1
0
mirror of https://github.com/GOSTSec/sgminer synced 2025-01-23 13:04:29 +00:00

Flush queued work on a restart from the hash database and discard the work structs.

This commit is contained in:
Con Kolivas 2013-02-16 12:35:16 +11:00
parent e8e88beff1
commit 294cda2eb2

View File

@ -5624,6 +5624,27 @@ void work_completed(struct cgpu_info *cgpu, struct work *work)
free_work(work); free_work(work);
} }
static void flush_queue(struct cgpu_info *cgpu)
{
struct work *work, *tmp;
int discarded = 0;
wr_lock(&cgpu->qlock);
HASH_ITER(hh, cgpu->queued_work, work, tmp) {
/* Can only discard the work items if they're not physically
* queued on the device. */
if (!work->queued) {
HASH_DEL(cgpu->queued_work, work);
discard_work(work);
discarded++;
}
}
wr_unlock(&cgpu->qlock);
if (discarded)
applog(LOG_DEBUG, "Discarded %d queued work items", discarded);
}
/* This version of hash work is for devices that are fast enough to always /* This version of hash work is for devices that are fast enough to always
* perform a full nonce range and need a queue to maintain the device busy. * perform a full nonce range and need a queue to maintain the device busy.
* Work creation and destruction is not done from within this function * Work creation and destruction is not done from within this function
@ -5663,8 +5684,8 @@ static void hash_queued_work(struct thr_info *mythr)
memcpy(&tv_start, &tv_end, sizeof(struct timeval)); memcpy(&tv_start, &tv_end, sizeof(struct timeval));
} }
//if (unlikely(mythr->work_restart)) if (unlikely(mythr->work_restart))
// flush_queue(mythr, cgpu); flush_queue(cgpu);
if (unlikely(mythr->pause || cgpu->deven != DEV_ENABLED)) if (unlikely(mythr->pause || cgpu->deven != DEV_ENABLED))
mt_disable(mythr, thr_id, drv); mt_disable(mythr, thr_id, drv);