1
0
mirror of https://github.com/GOSTSec/sgminer synced 2025-02-05 11:34:16 +00:00

Abstract out work cloning and clone $mining_threads copies whenever a rollable work item is found and return a clone instead.

This commit is contained in:
Con Kolivas 2012-06-24 18:10:17 +10:00
parent a8ae1a43ea
commit 610302afcb

View File

@ -3618,6 +3618,48 @@ static bool reuse_work(struct work *work)
return false; return false;
} }
static struct work *make_clone(struct work *work)
{
struct work *work_clone = make_work();
memcpy(work_clone, work, sizeof(struct work));
work_clone->clone = true;
work_clone->longpoll = false;
return work_clone;
}
/* Clones work by rolling it if possible, and returning a clone instead of the
* original work item which gets staged again to possibly be rolled again in
* the future */
static struct work *clone_work(struct work *work)
{
struct work *work_clone;
bool cloned = false;
int rolled = 0;
work_clone = make_clone(work);
while (rolled++ < mining_threads && can_roll(work) && should_roll(work)) {
applog(LOG_DEBUG, "Pushing rolled converted work to stage thread");
if (unlikely(!stage_work(work_clone))) {
cloned = false;
break;
}
roll_work(work);
work_clone = make_clone(work);
cloned = true;
}
if (cloned) {
stage_work(work);
return work_clone;
}
free_work(work_clone);
return work;
}
static bool get_work(struct work *work, bool requested, struct thr_info *thr, static bool get_work(struct work *work, bool requested, struct thr_info *thr,
const int thr_id) const int thr_id)
{ {
@ -3702,18 +3744,11 @@ retry:
pool_resus(pool); pool_resus(pool);
} }
memcpy(work, work_heap, sizeof(*work)); work_heap = clone_work(work_heap);
memcpy(work, work_heap, sizeof(struct work));
/* Hand out a clone if we can roll this work item */ free_work(work_heap);
if (reuse_work(work_heap)) { if (!work->clone)
applog(LOG_DEBUG, "Pushing divided work to get queue head");
stage_work(work_heap);
work->clone = true;
} else {
dec_queued(); dec_queued();
free_work(work_heap);
}
ret = true; ret = true;
out: out:
@ -4039,8 +4074,7 @@ enum {
/* Stage another work item from the work returned in a longpoll */ /* Stage another work item from the work returned in a longpoll */
static void convert_to_work(json_t *val, int rolltime, struct pool *pool) static void convert_to_work(json_t *val, int rolltime, struct pool *pool)
{ {
struct work *work, *work_clone; struct work *work;
int rolled = 0;
bool rc; bool rc;
work = make_work(); work = make_work();
@ -4073,18 +4107,7 @@ static void convert_to_work(json_t *val, int rolltime, struct pool *pool)
return; return;
} }
work_clone = make_work(); work = clone_work(work);
memcpy(work_clone, work, sizeof(struct work));
while (reuse_work(work) && rolled++ < mining_threads) {
work_clone->clone = true;
work_clone->longpoll = false;
applog(LOG_DEBUG, "Pushing rolled converted work to stage thread");
if (unlikely(!stage_work(work_clone)))
break;
work_clone = make_work();
memcpy(work_clone, work, sizeof(struct work));
}
free_work(work_clone);
applog(LOG_DEBUG, "Pushing converted work to stage thread"); applog(LOG_DEBUG, "Pushing converted work to stage thread");