Browse Source

Cull all the early queue requests since we request every time work is popped now.

nfactor-troky
Con Kolivas 12 years ago
parent
commit
c3e32274ee
  1. 26
      cgminer.c
  2. 24
      driver-bitforce.c
  3. 1
      miner.h

26
cgminer.c

@ -2630,6 +2630,8 @@ static struct pool *priority_pool(int choice)
return ret; return ret;
} }
static bool queue_request(struct thr_info *thr, bool needed);
void switch_pools(struct pool *selected) void switch_pools(struct pool *selected)
{ {
struct pool *pool, *last_pool; struct pool *pool, *last_pool;
@ -2719,8 +2721,6 @@ static void discard_work(struct work *work)
free_work(work); free_work(work);
} }
bool queue_request(struct thr_info *thr, bool needed);
static void discard_stale(void) static void discard_stale(void)
{ {
struct work *work, *tmp; struct work *work, *tmp;
@ -2781,8 +2781,6 @@ static void restart_threads(void)
/* Discard staged work that is now stale */ /* Discard staged work that is now stale */
discard_stale(); discard_stale();
queue_request(NULL, true);
for (i = 0; i < mining_threads; i++) for (i = 0; i < mining_threads; i++)
thr_info[i].work_restart = true; thr_info[i].work_restart = true;
@ -3930,7 +3928,7 @@ static void pool_resus(struct pool *pool)
switch_pools(NULL); switch_pools(NULL);
} }
bool queue_request(struct thr_info *thr, bool needed) static bool queue_request(struct thr_info *thr, bool needed)
{ {
struct workio_cmd *wc; struct workio_cmd *wc;
@ -4370,22 +4368,6 @@ void *miner_thread(void *userdata)
} }
timersub(&tv_end, &tv_workstart, &wdiff); timersub(&tv_end, &tv_workstart, &wdiff);
if (!requested) {
if (wdiff.tv_sec > request_interval || work->blk.nonce > request_nonce) {
thread_reportout(mythr);
if (unlikely(!queue_request(mythr, false))) {
applog(LOG_ERR, "Failed to queue_request in miner_thread %d", thr_id);
cgpu->device_last_not_well = time(NULL);
cgpu->device_not_well_reason = REASON_THREAD_FAIL_QUEUE;
cgpu->thread_fail_queue_count++;
goto out;
}
thread_reportin(mythr);
requested = true;
}
}
if (unlikely((long)sdiff.tv_sec < cycle)) { if (unlikely((long)sdiff.tv_sec < cycle)) {
int mult; int mult;
@ -4721,8 +4703,6 @@ static void *watchdog_thread(void __maybe_unused *userdata)
discard_stale(); discard_stale();
queue_request(NULL, false);
hashmeter(-1, &zero_tv, 0); hashmeter(-1, &zero_tv, 0);
#ifdef HAVE_CURSES #ifdef HAVE_CURSES

24
driver-bitforce.c

@ -602,35 +602,15 @@ static void biforce_thread_enable(struct thr_info *thr)
static int64_t bitforce_scanhash(struct thr_info *thr, struct work *work, int64_t __maybe_unused max_nonce) static int64_t bitforce_scanhash(struct thr_info *thr, struct work *work, int64_t __maybe_unused max_nonce)
{ {
struct cgpu_info *bitforce = thr->cgpu; struct cgpu_info *bitforce = thr->cgpu;
unsigned int sleep_time;
bool send_ret; bool send_ret;
int64_t ret; int64_t ret;
send_ret = bitforce_send_work(thr, work); send_ret = bitforce_send_work(thr, work);
if (!bitforce->nonce_range) { if (!restart_wait(bitforce->sleep_ms))
/* Initially wait 2/3 of the average cycle time so we can request more
work before full scan is up */
sleep_time = (2 * bitforce->sleep_ms) / 3;
if (!restart_wait(sleep_time))
return 0;
bitforce->wait_ms = sleep_time;
queue_request(thr, false);
/* Now wait athe final 1/3rd; no bitforce should be finished by now */
sleep_time = bitforce->sleep_ms - sleep_time;
if (!restart_wait(sleep_time))
return 0; return 0;
bitforce->wait_ms += sleep_time; bitforce->wait_ms = bitforce->sleep_ms;
} else {
sleep_time = bitforce->sleep_ms;
if (!restart_wait(sleep_time))
return 0;
bitforce->wait_ms = sleep_time;
}
if (send_ret) { if (send_ret) {
bitforce->polling = true; bitforce->polling = true;

1
miner.h

@ -599,7 +599,6 @@ extern pthread_mutex_t restart_lock;
extern pthread_cond_t restart_cond; extern pthread_cond_t restart_cond;
extern void thread_reportin(struct thr_info *thr); extern void thread_reportin(struct thr_info *thr);
extern bool queue_request(struct thr_info *thr, bool needed);
extern int restart_wait(unsigned int mstime); extern int restart_wait(unsigned int mstime);
extern void kill_work(void); extern void kill_work(void);

Loading…
Cancel
Save