Browse Source

Properly detect stale work based on time from staging and discard instead of handing on, but be more lax about how long work can be divided for up to the scantime.

nfactor-troky
Con Kolivas 13 years ago
parent
commit
6cc8d22beb
  1. 34
      main.c

34
main.c

@ -1212,15 +1212,15 @@ static bool stale_work(struct work *work)
bool ret = false; bool ret = false;
char *hexstr; char *hexstr;
gettimeofday(&now, NULL);
if ((now.tv_sec - work->tv_staged.tv_sec) > opt_scantime)
return true;
/* Only use the primary pool for determination as the work may /* Only use the primary pool for determination as the work may
* interleave at times of new blocks */ * interleave at times of new blocks */
if (work->pool != current_pool()) if (work->pool != current_pool())
return ret; return ret;
gettimeofday(&now, NULL);
if ((now.tv_sec - work->tv_staged.tv_sec) > opt_scantime)
return ret;
hexstr = bin2hex(work->data, 36); hexstr = bin2hex(work->data, 36);
if (unlikely(!hexstr)) { if (unlikely(!hexstr)) {
applog(LOG_ERR, "submit_work_thread OOM"); applog(LOG_ERR, "submit_work_thread OOM");
@ -2203,12 +2203,24 @@ static bool queue_request(void)
return true; return true;
} }
static void discard_work(struct work *work)
{
if (!work->clone) {
if (work->pool)
work->pool->discarded_work++;
total_discarded++;
if (opt_debug)
applog(LOG_DEBUG, "Discarded cloned work");
} else if (opt_debug)
applog(LOG_DEBUG, "Discarded work");
free(work);
}
static void discard_staged(void) static void discard_staged(void)
{ {
struct timespec abstime = {}; struct timespec abstime = {};
struct timeval now; struct timeval now;
struct work *work_heap; struct work *work_heap;
struct pool *pool;
/* Just in case we fell in a hole and missed a queue filling */ /* Just in case we fell in a hole and missed a queue filling */
if (unlikely(!requests_staged())) if (unlikely(!requests_staged()))
@ -2221,11 +2233,8 @@ static void discard_staged(void)
if (unlikely(!work_heap)) if (unlikely(!work_heap))
return; return;
pool = work_heap->pool; discard_work(work_heap);
free(work_heap);
dec_queued(); dec_queued();
pool->discarded_work++;
total_discarded++;
} }
static void flush_requests(void) static void flush_requests(void)
@ -2290,7 +2299,7 @@ static bool divide_work(struct timeval *now, struct work *work, uint32_t hash_di
if ((uint64_t)work->blk.nonce + hash_inc < MAXTHREADS) { if ((uint64_t)work->blk.nonce + hash_inc < MAXTHREADS) {
/* Don't keep handing it out if it's getting old, but try to /* Don't keep handing it out if it's getting old, but try to
* roll it instead */ * roll it instead */
if ((now->tv_sec - work->tv_staged.tv_sec) > opt_scantime * 2 / 3) { if ((now->tv_sec - work->tv_staged.tv_sec) > opt_scantime) {
if (!can_roll(work)) if (!can_roll(work))
return false; return false;
else { else {
@ -2369,6 +2378,11 @@ retry:
goto retry; goto retry;
} }
if (stale_work(work_heap)) {
discard_work(work_heap);
goto retry;
}
pool = work_heap->pool; pool = work_heap->pool;
/* If we make it here we have succeeded in getting fresh work */ /* If we make it here we have succeeded in getting fresh work */
if (pool_tclear(pool, &pool->idle)) if (pool_tclear(pool, &pool->idle))

Loading…
Cancel
Save