diff --git a/cgminer.c b/cgminer.c index b68bc10f..09570037 100644 --- a/cgminer.c +++ b/cgminer.c @@ -5775,11 +5775,14 @@ static void fill_queue(struct thr_info *mythr, struct cgpu_info *cgpu, struct de { thread_reportout(mythr); do { - struct work *work = get_work(mythr, thr_id); + struct work *work; - work->device_diff = MIN(drv->max_diff, work->work_difficulty); wr_lock(&cgpu->qlock); - HASH_ADD_INT(cgpu->queued_work, id, work); + if (HASH_COUNT(cgpu->queued_work) == cgpu->queued_count) { + work = get_work(mythr, thr_id); + work->device_diff = MIN(drv->max_diff, work->work_difficulty); + HASH_ADD_INT(cgpu->queued_work, id, work); + } wr_unlock(&cgpu->qlock); /* The queue_full function should be used by the driver to * actually place work items on the physical device if it @@ -5799,6 +5802,7 @@ struct work *get_queued(struct cgpu_info *cgpu) HASH_ITER(hh, cgpu->queued_work, work, tmp) { if (!work->queued) { work->queued = true; + cgpu->queued_count++; ret = work; break; } @@ -5849,6 +5853,8 @@ struct work *find_queued_work_bymidstate(struct cgpu_info *cgpu, char *midstate, void work_completed(struct cgpu_info *cgpu, struct work *work) { wr_lock(&cgpu->qlock); + if (work->queued) + cgpu->queued_count--; HASH_DEL(cgpu->queued_work, work); wr_unlock(&cgpu->qlock); free_work(work); diff --git a/miner.h b/miner.h index 754d5f81..7f565827 100644 --- a/miner.h +++ b/miner.h @@ -534,6 +534,7 @@ struct cgpu_info { pthread_rwlock_t qlock; struct work *queued_work; + unsigned int queued_count; }; extern bool add_cgpu(struct cgpu_info*);