|
|
@ -6341,55 +6341,39 @@ static void hash_sole_work(struct thr_info *mythr) |
|
|
|
cgpu->deven = DEV_DISABLED; |
|
|
|
cgpu->deven = DEV_DISABLED; |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
/* Create a hashtable of work items for devices with a queue. The device
|
|
|
|
/* Put a new unqueued work item in cgpu->unqueued_work under cgpu->qlock till
|
|
|
|
* driver must have a custom queue_full function or it will default to true |
|
|
|
* the driver tells us it's full so that it may extract the work item using |
|
|
|
* and put only one work item in the queue. Work items should not be removed |
|
|
|
* the get_queued() function which adds it to the hashtable on |
|
|
|
* from this hashtable until they are no longer in use anywhere. Once a work |
|
|
|
* cgpu->queued_work. */ |
|
|
|
* item is physically queued on the device itself, the work->queued flag |
|
|
|
|
|
|
|
* should be set under cgpu->qlock write lock to prevent it being dereferenced |
|
|
|
|
|
|
|
* while still in use. */ |
|
|
|
|
|
|
|
static void fill_queue(struct thr_info *mythr, struct cgpu_info *cgpu, struct device_drv *drv, const int thr_id) |
|
|
|
static void fill_queue(struct thr_info *mythr, struct cgpu_info *cgpu, struct device_drv *drv, const int thr_id) |
|
|
|
{ |
|
|
|
{ |
|
|
|
do { |
|
|
|
do { |
|
|
|
bool need_work; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
rd_lock(&cgpu->qlock); |
|
|
|
|
|
|
|
need_work = (HASH_COUNT(cgpu->queued_work) == cgpu->queued_count); |
|
|
|
|
|
|
|
rd_unlock(&cgpu->qlock); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if (need_work) { |
|
|
|
|
|
|
|
struct work *work = get_work(mythr, thr_id); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
wr_lock(&cgpu->qlock); |
|
|
|
wr_lock(&cgpu->qlock); |
|
|
|
HASH_ADD_INT(cgpu->queued_work, id, work); |
|
|
|
if (!cgpu->unqueued_work) |
|
|
|
|
|
|
|
cgpu->unqueued_work = get_work(mythr, thr_id); |
|
|
|
wr_unlock(&cgpu->qlock); |
|
|
|
wr_unlock(&cgpu->qlock); |
|
|
|
} |
|
|
|
|
|
|
|
/* The queue_full function should be used by the driver to
|
|
|
|
/* The queue_full function should be used by the driver to
|
|
|
|
* actually place work items on the physical device if it |
|
|
|
* actually place work items on the physical device if it |
|
|
|
* does have a queue. */ |
|
|
|
* does have a queue. */ |
|
|
|
} while (!drv->queue_full(cgpu)); |
|
|
|
} while (!drv->queue_full(cgpu)); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
/* This function is for retrieving one work item from the queued hashtable of
|
|
|
|
/* This function is for retrieving one work item from the unqueued pointer and
|
|
|
|
* available work items that are not yet physically on a device (which is |
|
|
|
* adding it to the hashtable of queued work. Code using this function must be |
|
|
|
* flagged with the work->queued bool). Code using this function must be able |
|
|
|
* able to handle NULL as a return which implies there is no work available. */ |
|
|
|
* to handle NULL as a return which implies there is no work available. */ |
|
|
|
|
|
|
|
struct work *get_queued(struct cgpu_info *cgpu) |
|
|
|
struct work *get_queued(struct cgpu_info *cgpu) |
|
|
|
{ |
|
|
|
{ |
|
|
|
struct work *work, *tmp, *ret = NULL; |
|
|
|
struct work *work = NULL; |
|
|
|
|
|
|
|
|
|
|
|
wr_lock(&cgpu->qlock); |
|
|
|
wr_lock(&cgpu->qlock); |
|
|
|
HASH_ITER(hh, cgpu->queued_work, work, tmp) { |
|
|
|
if (cgpu->unqueued_work) { |
|
|
|
if (!work->queued) { |
|
|
|
work = cgpu->unqueued_work; |
|
|
|
work->queued = true; |
|
|
|
HASH_ADD_INT(cgpu->queued_work, id, work); |
|
|
|
cgpu->queued_count++; |
|
|
|
cgpu->unqueued_work = NULL; |
|
|
|
ret = work; |
|
|
|
|
|
|
|
break; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
} |
|
|
|
} |
|
|
|
wr_unlock(&cgpu->qlock); |
|
|
|
wr_unlock(&cgpu->qlock); |
|
|
|
|
|
|
|
|
|
|
|
return ret; |
|
|
|
return work; |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
/* This function is for finding an already queued work item in the
|
|
|
|
/* This function is for finding an already queued work item in the
|
|
|
@ -6402,8 +6386,7 @@ struct work *__find_work_bymidstate(struct work *que, char *midstate, size_t mid |
|
|
|
struct work *work, *tmp, *ret = NULL; |
|
|
|
struct work *work, *tmp, *ret = NULL; |
|
|
|
|
|
|
|
|
|
|
|
HASH_ITER(hh, que, work, tmp) { |
|
|
|
HASH_ITER(hh, que, work, tmp) { |
|
|
|
if (work->queued && |
|
|
|
if (memcmp(work->midstate, midstate, midstatelen) == 0 && |
|
|
|
memcmp(work->midstate, midstate, midstatelen) == 0 && |
|
|
|
|
|
|
|
memcmp(work->data + offset, data, datalen) == 0) { |
|
|
|
memcmp(work->data + offset, data, datalen) == 0) { |
|
|
|
ret = work; |
|
|
|
ret = work; |
|
|
|
break; |
|
|
|
break; |
|
|
@ -6443,7 +6426,6 @@ struct work *clone_queued_work_bymidstate(struct cgpu_info *cgpu, char *midstate |
|
|
|
|
|
|
|
|
|
|
|
void __work_completed(struct cgpu_info *cgpu, struct work *work) |
|
|
|
void __work_completed(struct cgpu_info *cgpu, struct work *work) |
|
|
|
{ |
|
|
|
{ |
|
|
|
if (work->queued) |
|
|
|
|
|
|
|
cgpu->queued_count--; |
|
|
|
cgpu->queued_count--; |
|
|
|
HASH_DEL(cgpu->queued_work, work); |
|
|
|
HASH_DEL(cgpu->queued_work, work); |
|
|
|
} |
|
|
|
} |
|
|
@ -6475,23 +6457,17 @@ struct work *take_queued_work_bymidstate(struct cgpu_info *cgpu, char *midstate, |
|
|
|
|
|
|
|
|
|
|
|
static void flush_queue(struct cgpu_info *cgpu) |
|
|
|
static void flush_queue(struct cgpu_info *cgpu) |
|
|
|
{ |
|
|
|
{ |
|
|
|
struct work *work, *tmp; |
|
|
|
struct work *work = NULL; |
|
|
|
int discarded = 0; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
wr_lock(&cgpu->qlock); |
|
|
|
wr_lock(&cgpu->qlock); |
|
|
|
HASH_ITER(hh, cgpu->queued_work, work, tmp) { |
|
|
|
work = cgpu->unqueued_work; |
|
|
|
/* Can only discard the work items if they're not physically
|
|
|
|
cgpu->unqueued_work = NULL; |
|
|
|
* queued on the device. */ |
|
|
|
|
|
|
|
if (!work->queued) { |
|
|
|
|
|
|
|
HASH_DEL(cgpu->queued_work, work); |
|
|
|
|
|
|
|
discard_work(work); |
|
|
|
|
|
|
|
discarded++; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
wr_unlock(&cgpu->qlock); |
|
|
|
wr_unlock(&cgpu->qlock); |
|
|
|
|
|
|
|
|
|
|
|
if (discarded) |
|
|
|
if (work) { |
|
|
|
applog(LOG_DEBUG, "Discarded %d queued work items", discarded); |
|
|
|
free_work(work); |
|
|
|
|
|
|
|
applog(LOG_DEBUG, "Discarded queued work item"); |
|
|
|
|
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
/* This version of hash work is for devices that are fast enough to always
|
|
|
|
/* This version of hash work is for devices that are fast enough to always
|
|
|
|