Browse Source

Rewrite the convoluted get_work() function to be much simpler and roll work as much as possible with each new work item.

nfactor-troky
Con Kolivas 13 years ago
parent
commit
dec99ab739
  1. 165
      cgminer.c

165
cgminer.c

@ -1525,19 +1525,30 @@ out:
return rc; return rc;
} }
static int inc_totalwork(void)
{
int ret;
mutex_lock(&control_lock);
ret = total_work++;
mutex_unlock(&control_lock);
return ret;
}
static struct work *make_work(void) static struct work *make_work(void)
{ {
struct work *work = calloc(1, sizeof(struct work)); struct work *work = calloc(1, sizeof(struct work));
if (unlikely(!work)) if (unlikely(!work))
quit(1, "Failed to calloc work in make_work"); quit(1, "Failed to calloc work in make_work");
work->id = total_work++; work->id = inc_totalwork();
return work; return work;
} }
static void free_work(struct work *work) static void free_work(struct work *work)
{ {
free(work); free(work);
work = NULL;
} }
static void workio_cmd_free(struct workio_cmd *wc) static void workio_cmd_free(struct workio_cmd *wc)
@ -2984,7 +2995,7 @@ static void roll_work(struct work *work)
/* This is now a different work item so it needs a different ID for the /* This is now a different work item so it needs a different ID for the
* hashtable */ * hashtable */
work->id = total_work++; work->id = inc_totalwork();
} }
static bool reuse_work(struct work *work) static bool reuse_work(struct work *work)
@ -2996,103 +3007,83 @@ static bool reuse_work(struct work *work)
return false; return false;
} }
static bool get_work(struct work *work, bool requested, struct thr_info *thr, static void roll_cloned(struct work *work)
const int thr_id)
{ {
bool newreq = false, ret = false; struct work *work_clone = make_work();
struct timespec abstime = {};
memcpy(work_clone, work, sizeof(struct work));
while (reuse_work(work)) {
work_clone->clone = true;
if (opt_debug)
applog(LOG_DEBUG, "Pushing rolled cloned work to stage thread");
if (unlikely(!stage_work(work_clone)))
break;
work_clone = make_work();
memcpy(work_clone, work, sizeof(struct work));
}
free_work(work_clone);
}
static struct work *get_work(bool requested, struct thr_info *thr, const int thr_id)
{
struct timespec abstime = {0, 0};
struct work *work = NULL;
bool newreq = false;
struct timeval now; struct timeval now;
struct work *work_heap;
struct pool *pool; struct pool *pool;
int failures = 0;
/* Tell the watchdog thread this thread is waiting on getwork and /* Tell the watchdog thread this thread is waiting on getwork and
* should not be restarted */ * should not be restarted */
thread_reportout(thr); thread_reportout(thr);
retry:
pool = current_pool();
if (!requested || requests_queued() < opt_queue) {
if (unlikely(!queue_request(thr, true))) {
applog(LOG_WARNING, "Failed to queue_request in get_work");
goto out;
}
newreq = true;
}
if (reuse_work(work)) { while (!work) {
ret = true; pool = current_pool();
goto out; if (!requested || requests_queued() < opt_queue) {
} if (unlikely(!queue_request(thr, true)))
quit(1, "Failed to queue_request in get_work");
if (requested && !newreq && !requests_staged() && requests_queued() >= mining_threads && newreq = true;
!pool_tset(pool, &pool->lagging)) { }
applog(LOG_WARNING, "Pool %d not providing work fast enough", pool->pool_no);
pool->getfail_occasions++;
total_go++;
}
newreq = requested = false; if (requested && !newreq && !requests_staged() && requests_queued() >= mining_threads &&
gettimeofday(&now, NULL); !pool_tset(pool, &pool->lagging)) {
abstime.tv_sec = now.tv_sec + 60; applog(LOG_WARNING, "Pool %d not providing work fast enough", pool->pool_no);
pool->getfail_occasions++;
total_go++;
}
if (opt_debug) newreq = requested = false;
applog(LOG_DEBUG, "Popping work from get queue to get work"); gettimeofday(&now, NULL);
abstime.tv_sec = now.tv_sec + 60;
/* wait for 1st response, or get cached response */ if (opt_debug)
work_heap = hash_pop(&abstime); applog(LOG_DEBUG, "Popping work from get queue to get work");
if (unlikely(!work_heap)) {
/* Attempt to switch pools if this one times out */
pool_died(pool);
goto retry;
}
if (stale_work(work_heap, false)) { /* wait for 1st response, or get cached response */
dec_queued(); work = hash_pop(&abstime);
discard_work(work_heap); if (unlikely(!work)) {
goto retry; /* Attempt to switch pools if this one times out */
pool_died(pool);
} else if (stale_work(work, false)) {
dec_queued();
discard_work(work);
}
} }
pool = work_heap->pool; pool = work->pool;
/* If we make it here we have succeeded in getting fresh work */ /* If we make it here we have succeeded in getting fresh work */
if (!work_heap->mined) { if (likely(pool)) {
pool_tclear(pool, &pool->lagging); pool_tclear(pool, &pool->lagging);
if (pool_tclear(pool, &pool->idle)) if (pool_tclear(pool, &pool->idle))
pool_resus(pool); pool_resus(pool);
} }
memcpy(work, work_heap, sizeof(*work)); roll_cloned(work);
dec_queued();
/* Hand out a clone if we can roll this work item */
if (reuse_work(work_heap)) {
if (opt_debug)
applog(LOG_DEBUG, "Pushing divided work to get queue head");
stage_work(work_heap);
work->clone = true;
} else {
dec_queued();
free_work(work_heap);
}
ret = true;
out:
if (unlikely(ret == false)) {
if ((opt_retries >= 0) && (++failures > opt_retries)) {
applog(LOG_ERR, "Failed %d times to get_work");
return ret;
}
applog(LOG_DEBUG, "Retrying after %d seconds", fail_pause);
sleep(fail_pause);
fail_pause += opt_fail_pause;
goto retry;
}
fail_pause = opt_fail_pause;
work->thr_id = thr_id; work->thr_id = thr_id;
work->mined = true;
thread_reportin(thr); thread_reportin(thr);
if (ret) return work;
work->mined = true;
return ret;
} }
bool submit_work_sync(struct thr_info *thr, const struct work *work_in) bool submit_work_sync(struct thr_info *thr, const struct work *work_in)
@ -3211,11 +3202,8 @@ void *miner_thread(void *userdata)
work_restart[thr_id].restart = 0; work_restart[thr_id].restart = 0;
if (api->free_work && likely(work->pool)) if (api->free_work && likely(work->pool))
api->free_work(mythr, work); api->free_work(mythr, work);
if (unlikely(!get_work(work, requested, mythr, thr_id))) { free_work(work);
applog(LOG_ERR, "work retrieval failed, exiting " work = get_work(requested, mythr, thr_id);
"mining thread %d", thr_id);
break;
}
requested = false; requested = false;
cycle = (can_roll(work) && should_roll(work)) ? 1 : def_cycle; cycle = (can_roll(work) && should_roll(work)) ? 1 : def_cycle;
gettimeofday(&tv_workstart, NULL); gettimeofday(&tv_workstart, NULL);
@ -3332,7 +3320,7 @@ enum {
/* Stage another work item from the work returned in a longpoll */ /* Stage another work item from the work returned in a longpoll */
static void convert_to_work(json_t *val, bool rolltime, struct pool *pool) static void convert_to_work(json_t *val, bool rolltime, struct pool *pool)
{ {
struct work *work, *work_clone; struct work *work;
bool rc; bool rc;
work = make_work(); work = make_work();
@ -3350,18 +3338,7 @@ static void convert_to_work(json_t *val, bool rolltime, struct pool *pool)
* allows testwork to know whether LP discovered the block or not. */ * allows testwork to know whether LP discovered the block or not. */
test_work_current(work, true); test_work_current(work, true);
work_clone = make_work(); roll_cloned(work);
memcpy(work_clone, work, sizeof(struct work));
while (reuse_work(work)) {
work_clone->clone = true;
if (opt_debug)
applog(LOG_DEBUG, "Pushing rolled converted work to stage thread");
if (unlikely(!stage_work(work_clone)))
break;
work_clone = make_work();
memcpy(work_clone, work, sizeof(struct work));
}
free_work(work_clone);
if (opt_debug) if (opt_debug)
applog(LOG_DEBUG, "Pushing converted work to stage thread"); applog(LOG_DEBUG, "Pushing converted work to stage thread");

Loading…
Cancel
Save