1
0
mirror of https://github.com/GOSTSec/sgminer synced 2025-01-11 07:17:58 +00:00

Create a fill_queue function that creates hashtables of as many work items as is required by the device driver till it flags the queue full.

This commit is contained in:
Con Kolivas 2013-02-16 12:14:13 +11:00
parent 5e3253a7cf
commit 95b2020263
2 changed files with 30 additions and 1 deletions

View File

@ -5595,6 +5595,25 @@ static void hash_sole_work(struct thr_info *mythr)
} }
} }
/* Create a hashtable of work items for devices with a queue. The device
* driver must have a custom queue_full function or it will default to true
* and put only one work item in the queue. Work items should not be removed
* from this hashtable until they are no longer in use anywhere. Once a work
* item is physically queued on the device itself, the work->queued flag
* should be set under cgpu->qlock write lock to prevent it being dereferenced
* while still in use. */
static void fill_queue(struct thr_info *mythr, struct cgpu_info *cgpu, struct device_drv *drv, const int thr_id)
{
thread_reportout(mythr);
do {
struct work *work = get_work(mythr, thr_id);
wr_lock(&cgpu->qlock);
HASH_ADD_INT(cgpu->queued_work, id, work);
wr_unlock(&cgpu->qlock);
} while (!drv->queue_full(cgpu));
}
/* This version of hash work is for devices that are fast enough to always /* This version of hash work is for devices that are fast enough to always
* perform a full nonce range and need a queue to maintain the device busy. * perform a full nonce range and need a queue to maintain the device busy.
* Work creation and destruction is not done from within this function * Work creation and destruction is not done from within this function
@ -5614,7 +5633,7 @@ static void hash_queued_work(struct thr_info *mythr)
mythr->work_restart = false; mythr->work_restart = false;
//fill_queue(mythr, cgpu, drv, thr_id); fill_queue(mythr, cgpu, drv, thr_id);
thread_reportin(mythr); thread_reportin(mythr);
hashes = drv->scanwork(mythr); hashes = drv->scanwork(mythr);
@ -6589,6 +6608,8 @@ void fill_device_api(struct cgpu_info *cgpu)
drv->thread_shutdown = &noop_thread_shutdown; drv->thread_shutdown = &noop_thread_shutdown;
if (!drv->thread_enable) if (!drv->thread_enable)
drv->thread_enable = &noop_thread_enable; drv->thread_enable = &noop_thread_enable;
if (!drv->queue_full)
drv->queue_full = &noop_get_stats;
} }
void enable_device(struct cgpu_info *cgpu) void enable_device(struct cgpu_info *cgpu)
@ -6612,6 +6633,9 @@ void enable_device(struct cgpu_info *cgpu)
} }
#endif #endif
fill_device_api(cgpu); fill_device_api(cgpu);
rwlock_init(&cgpu->qlock);
cgpu->queued_work = NULL;
} }
struct _cgpu_devid_counter { struct _cgpu_devid_counter {

View File

@ -299,6 +299,8 @@ struct device_drv {
bool (*prepare_work)(struct thr_info *, struct work *); bool (*prepare_work)(struct thr_info *, struct work *);
int64_t (*scanhash)(struct thr_info *, struct work *, int64_t); int64_t (*scanhash)(struct thr_info *, struct work *, int64_t);
int64_t (*scanwork)(struct thr_info *); int64_t (*scanwork)(struct thr_info *);
bool (*queue_full)(struct cgpu_info *);
void (*hw_error)(struct thr_info *); void (*hw_error)(struct thr_info *);
void (*thread_shutdown)(struct thr_info *); void (*thread_shutdown)(struct thr_info *);
void (*thread_enable)(struct thr_info *); void (*thread_enable)(struct thr_info *);
@ -500,6 +502,9 @@ struct cgpu_info {
int dev_throttle_count; int dev_throttle_count;
struct cgminer_stats cgminer_stats; struct cgminer_stats cgminer_stats;
pthread_rwlock_t qlock;
struct work *queued_work;
}; };
extern bool add_cgpu(struct cgpu_info*); extern bool add_cgpu(struct cgpu_info*);