|
|
@ -3943,9 +3943,12 @@ int restart_wait(struct thr_info *thr, unsigned int mstime) |
|
|
|
return rc; |
|
|
|
return rc; |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
static void flush_queue(struct cgpu_info *cgpu); |
|
|
|
|
|
|
|
|
|
|
|
static void restart_threads(void) |
|
|
|
static void restart_threads(void) |
|
|
|
{ |
|
|
|
{ |
|
|
|
struct pool *cp = current_pool(); |
|
|
|
struct pool *cp = current_pool(); |
|
|
|
|
|
|
|
struct cgpu_info *cgpu; |
|
|
|
int i; |
|
|
|
int i; |
|
|
|
|
|
|
|
|
|
|
|
/* Artificially set the lagging flag to avoid pool not providing work
|
|
|
|
/* Artificially set the lagging flag to avoid pool not providing work
|
|
|
@ -3956,8 +3959,12 @@ static void restart_threads(void) |
|
|
|
discard_stale(); |
|
|
|
discard_stale(); |
|
|
|
|
|
|
|
|
|
|
|
rd_lock(&mining_thr_lock); |
|
|
|
rd_lock(&mining_thr_lock); |
|
|
|
for (i = 0; i < mining_threads; i++) |
|
|
|
for (i = 0; i < mining_threads; i++) { |
|
|
|
|
|
|
|
cgpu = mining_thr[i]->cgpu; |
|
|
|
mining_thr[i]->work_restart = true; |
|
|
|
mining_thr[i]->work_restart = true; |
|
|
|
|
|
|
|
flush_queue(cgpu); |
|
|
|
|
|
|
|
cgpu->drv->flush_work(cgpu); |
|
|
|
|
|
|
|
} |
|
|
|
rd_unlock(&mining_thr_lock); |
|
|
|
rd_unlock(&mining_thr_lock); |
|
|
|
|
|
|
|
|
|
|
|
mutex_lock(&restart_lock); |
|
|
|
mutex_lock(&restart_lock); |
|
|
@ -6557,10 +6564,7 @@ void hash_queued_work(struct thr_info *mythr) |
|
|
|
if (unlikely(mythr->pause || cgpu->deven != DEV_ENABLED)) |
|
|
|
if (unlikely(mythr->pause || cgpu->deven != DEV_ENABLED)) |
|
|
|
mt_disable(mythr, thr_id, drv); |
|
|
|
mt_disable(mythr, thr_id, drv); |
|
|
|
|
|
|
|
|
|
|
|
if (unlikely(mythr->work_restart)) { |
|
|
|
if (mythr->work_update) |
|
|
|
flush_queue(cgpu); |
|
|
|
|
|
|
|
drv->flush_work(cgpu); |
|
|
|
|
|
|
|
} else if (mythr->work_update) |
|
|
|
|
|
|
|
drv->update_work(cgpu); |
|
|
|
drv->update_work(cgpu); |
|
|
|
} |
|
|
|
} |
|
|
|
cgpu->deven = DEV_DISABLED; |
|
|
|
cgpu->deven = DEV_DISABLED; |
|
|
@ -6607,9 +6611,7 @@ void hash_driver_work(struct thr_info *mythr) |
|
|
|
if (unlikely(mythr->pause || cgpu->deven != DEV_ENABLED)) |
|
|
|
if (unlikely(mythr->pause || cgpu->deven != DEV_ENABLED)) |
|
|
|
mt_disable(mythr, thr_id, drv); |
|
|
|
mt_disable(mythr, thr_id, drv); |
|
|
|
|
|
|
|
|
|
|
|
if (unlikely(mythr->work_restart)) |
|
|
|
if (mythr->work_update) |
|
|
|
drv->flush_work(cgpu); |
|
|
|
|
|
|
|
else if (mythr->work_update) |
|
|
|
|
|
|
|
drv->update_work(cgpu); |
|
|
|
drv->update_work(cgpu); |
|
|
|
} |
|
|
|
} |
|
|
|
cgpu->deven = DEV_DISABLED; |
|
|
|
cgpu->deven = DEV_DISABLED; |
|
|
|