mirror of
https://github.com/GOSTSec/sgminer
synced 2025-01-10 23:08:07 +00:00
Avoid recursive locks in fill_queue.
This commit is contained in:
parent
ec9390dc4e
commit
29f0ac77da
16
cgminer.c
16
cgminer.c
@ -5728,15 +5728,19 @@ static void fill_queue(struct thr_info *mythr, struct cgpu_info *cgpu, struct de
|
|||||||
{
|
{
|
||||||
thread_reportout(mythr);
|
thread_reportout(mythr);
|
||||||
do {
|
do {
|
||||||
struct work *work;
|
bool need_work;
|
||||||
|
|
||||||
wr_lock(&cgpu->qlock);
|
rd_lock(&cgpu->qlock);
|
||||||
if (HASH_COUNT(cgpu->queued_work) == cgpu->queued_count) {
|
need_work = (HASH_COUNT(cgpu->queued_work) == cgpu->queued_count);
|
||||||
work = get_work(mythr, thr_id);
|
rd_unlock(&cgpu->qlock);
|
||||||
work->device_diff = MIN(drv->max_diff, work->work_difficulty);
|
|
||||||
|
if (need_work) {
|
||||||
|
struct work *work = get_work(mythr, thr_id);
|
||||||
|
|
||||||
|
wr_lock(&cgpu->qlock);
|
||||||
HASH_ADD_INT(cgpu->queued_work, id, work);
|
HASH_ADD_INT(cgpu->queued_work, id, work);
|
||||||
|
wr_unlock(&cgpu->qlock);
|
||||||
}
|
}
|
||||||
wr_unlock(&cgpu->qlock);
|
|
||||||
/* The queue_full function should be used by the driver to
|
/* The queue_full function should be used by the driver to
|
||||||
* actually place work items on the physical device if it
|
* actually place work items on the physical device if it
|
||||||
* does have a queue. */
|
* does have a queue. */
|
||||||
|
Loading…
Reference in New Issue
Block a user