Browse Source

Simplify queued hashtable by storing unqueued work separately in a single pointer.

nfactor-troky
ckolivas 11 years ago
parent
commit
680f014c85
  1. 76
      cgminer.c
  2. 2
      driver-bflsc.c
  3. 14
      driver-klondike.c
  4. 2
      miner.h

76
cgminer.c

@ -6341,55 +6341,39 @@ static void hash_sole_work(struct thr_info *mythr) @@ -6341,55 +6341,39 @@ static void hash_sole_work(struct thr_info *mythr)
cgpu->deven = DEV_DISABLED;
}
/* Create a hashtable of work items for devices with a queue. The device
* driver must have a custom queue_full function or it will default to true
* and put only one work item in the queue. Work items should not be removed
* from this hashtable until they are no longer in use anywhere. Once a work
* item is physically queued on the device itself, the work->queued flag
* should be set under cgpu->qlock write lock to prevent it being dereferenced
* while still in use. */
/* Put a new unqueued work item in cgpu->unqueued_work under cgpu->qlock till
* the driver tells us it's full so that it may extract the work item using
* the get_queued() function which adds it to the hashtable on
* cgpu->queued_work. */
static void fill_queue(struct thr_info *mythr, struct cgpu_info *cgpu, struct device_drv *drv, const int thr_id)
{
do {
bool need_work;
rd_lock(&cgpu->qlock);
need_work = (HASH_COUNT(cgpu->queued_work) == cgpu->queued_count);
rd_unlock(&cgpu->qlock);
if (need_work) {
struct work *work = get_work(mythr, thr_id);
wr_lock(&cgpu->qlock);
HASH_ADD_INT(cgpu->queued_work, id, work);
wr_unlock(&cgpu->qlock);
}
wr_lock(&cgpu->qlock);
if (!cgpu->unqueued_work)
cgpu->unqueued_work = get_work(mythr, thr_id);
wr_unlock(&cgpu->qlock);
/* The queue_full function should be used by the driver to
* actually place work items on the physical device if it
* does have a queue. */
} while (!drv->queue_full(cgpu));
}
/* This function is for retrieving one work item from the queued hashtable of
* available work items that are not yet physically on a device (which is
* flagged with the work->queued bool). Code using this function must be able
* to handle NULL as a return which implies there is no work available. */
/* This function is for retrieving one work item from the unqueued pointer and
* adding it to the hashtable of queued work. Code using this function must be
* able to handle NULL as a return which implies there is no work available. */
struct work *get_queued(struct cgpu_info *cgpu)
{
struct work *work, *tmp, *ret = NULL;
struct work *work = NULL;
wr_lock(&cgpu->qlock);
HASH_ITER(hh, cgpu->queued_work, work, tmp) {
if (!work->queued) {
work->queued = true;
cgpu->queued_count++;
ret = work;
break;
}
if (cgpu->unqueued_work) {
work = cgpu->unqueued_work;
HASH_ADD_INT(cgpu->queued_work, id, work);
cgpu->unqueued_work = NULL;
}
wr_unlock(&cgpu->qlock);
return ret;
return work;
}
/* This function is for finding an already queued work item in the
@ -6402,8 +6386,7 @@ struct work *__find_work_bymidstate(struct work *que, char *midstate, size_t mid @@ -6402,8 +6386,7 @@ struct work *__find_work_bymidstate(struct work *que, char *midstate, size_t mid
struct work *work, *tmp, *ret = NULL;
HASH_ITER(hh, que, work, tmp) {
if (work->queued &&
memcmp(work->midstate, midstate, midstatelen) == 0 &&
if (memcmp(work->midstate, midstate, midstatelen) == 0 &&
memcmp(work->data + offset, data, datalen) == 0) {
ret = work;
break;
@ -6443,8 +6426,7 @@ struct work *clone_queued_work_bymidstate(struct cgpu_info *cgpu, char *midstate @@ -6443,8 +6426,7 @@ struct work *clone_queued_work_bymidstate(struct cgpu_info *cgpu, char *midstate
void __work_completed(struct cgpu_info *cgpu, struct work *work)
{
if (work->queued)
cgpu->queued_count--;
cgpu->queued_count--;
HASH_DEL(cgpu->queued_work, work);
}
/* This function should be used by queued device drivers when they're sure
@ -6475,23 +6457,17 @@ struct work *take_queued_work_bymidstate(struct cgpu_info *cgpu, char *midstate, @@ -6475,23 +6457,17 @@ struct work *take_queued_work_bymidstate(struct cgpu_info *cgpu, char *midstate,
static void flush_queue(struct cgpu_info *cgpu)
{
struct work *work, *tmp;
int discarded = 0;
struct work *work = NULL;
wr_lock(&cgpu->qlock);
HASH_ITER(hh, cgpu->queued_work, work, tmp) {
/* Can only discard the work items if they're not physically
* queued on the device. */
if (!work->queued) {
HASH_DEL(cgpu->queued_work, work);
discard_work(work);
discarded++;
}
}
work = cgpu->unqueued_work;
cgpu->unqueued_work = NULL;
wr_unlock(&cgpu->qlock);
if (discarded)
applog(LOG_DEBUG, "Discarded %d queued work items", discarded);
if (work) {
free_work(work);
applog(LOG_DEBUG, "Discarded queued work item");
}
}
/* This version of hash work is for devices that are fast enough to always

2
driver-bflsc.c

@ -948,7 +948,7 @@ static void flush_one_dev(struct cgpu_info *bflsc, int dev) @@ -948,7 +948,7 @@ static void flush_one_dev(struct cgpu_info *bflsc, int dev)
rd_lock(&bflsc->qlock);
HASH_ITER(hh, bflsc->queued_work, work, tmp) {
if (work->queued && work->subid == dev) {
if (work->subid == dev) {
// devflag is used to flag stale work
work->devflag = true;
did = true;

14
driver-klondike.c

@ -720,7 +720,7 @@ static void klondike_check_nonce(struct cgpu_info *klncgpu, KLIST *kitem) @@ -720,7 +720,7 @@ static void klondike_check_nonce(struct cgpu_info *klncgpu, KLIST *kitem)
cgtime(&tv_now);
rd_lock(&(klncgpu->qlock));
HASH_ITER(hh, klncgpu->queued_work, look, tmp) {
if (look->queued && ms_tdiff(&tv_now, &(look->tv_stamp)) < OLD_WORK_MS &&
if (ms_tdiff(&tv_now, &(look->tv_stamp)) < OLD_WORK_MS &&
(look->subid == (kline->wr.dev*256 + kline->wr.workid))) {
work = look;
break;
@ -1026,13 +1026,11 @@ static bool klondike_send_work(struct cgpu_info *klncgpu, int dev, struct work * @@ -1026,13 +1026,11 @@ static bool klondike_send_work(struct cgpu_info *klncgpu, int dev, struct work *
cgtime(&tv_old);
wr_lock(&klncgpu->qlock);
HASH_ITER(hh, klncgpu->queued_work, look, tmp) {
if (look->queued) {
if (ms_tdiff(&tv_old, &(look->tv_stamp)) > OLD_WORK_MS) {
__work_completed(klncgpu, look);
free_work(look);
} else
wque_size++;
}
if (ms_tdiff(&tv_old, &(look->tv_stamp)) > OLD_WORK_MS) {
__work_completed(klncgpu, look);
free_work(look);
} else
wque_size++;
}
wr_unlock(&klncgpu->qlock);

2
miner.h

@ -574,6 +574,7 @@ struct cgpu_info { @@ -574,6 +574,7 @@ struct cgpu_info {
pthread_rwlock_t qlock;
struct work *queued_work;
struct work *unqueued_work;
unsigned int queued_count;
bool shutdown;
@ -1381,7 +1382,6 @@ struct work { @@ -1381,7 +1382,6 @@ struct work {
bool stale;
bool mandatory;
bool block;
bool queued;
bool stratum;
char *job_id;

Loading…
Cancel
Save