|
|
@ -1289,15 +1289,18 @@ static bool workio_get_work(struct workio_cmd *wc) |
|
|
|
return true; |
|
|
|
return true; |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
static bool stale_work(struct work *work) |
|
|
|
static bool stale_work(struct work *work, bool rolling) |
|
|
|
{ |
|
|
|
{ |
|
|
|
struct timeval now; |
|
|
|
|
|
|
|
bool ret = false; |
|
|
|
bool ret = false; |
|
|
|
char *hexstr; |
|
|
|
char *hexstr; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if (!rolling) { |
|
|
|
|
|
|
|
struct timeval now; |
|
|
|
|
|
|
|
|
|
|
|
gettimeofday(&now, NULL); |
|
|
|
gettimeofday(&now, NULL); |
|
|
|
if ((now.tv_sec - work->tv_staged.tv_sec) > opt_scantime) |
|
|
|
if ((now.tv_sec - work->tv_staged.tv_sec) > opt_scantime) |
|
|
|
return true; |
|
|
|
return true; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
/* Only use the primary pool for determination as the work may
|
|
|
|
/* Only use the primary pool for determination as the work may
|
|
|
|
* interleave at times of new blocks */ |
|
|
|
* interleave at times of new blocks */ |
|
|
@ -1326,7 +1329,7 @@ static void *submit_work_thread(void *userdata) |
|
|
|
|
|
|
|
|
|
|
|
pthread_detach(pthread_self()); |
|
|
|
pthread_detach(pthread_self()); |
|
|
|
|
|
|
|
|
|
|
|
if (stale_work(work)) { |
|
|
|
if (stale_work(work, false)) { |
|
|
|
applog(LOG_WARNING, "Stale share detected, discarding"); |
|
|
|
applog(LOG_WARNING, "Stale share detected, discarding"); |
|
|
|
total_stale++; |
|
|
|
total_stale++; |
|
|
|
pool->stale_shares++; |
|
|
|
pool->stale_shares++; |
|
|
@ -1335,7 +1338,7 @@ static void *submit_work_thread(void *userdata) |
|
|
|
|
|
|
|
|
|
|
|
/* submit solution to bitcoin via JSON-RPC */ |
|
|
|
/* submit solution to bitcoin via JSON-RPC */ |
|
|
|
while (!submit_upstream_work(work)) { |
|
|
|
while (!submit_upstream_work(work)) { |
|
|
|
if (stale_work(work)) { |
|
|
|
if (stale_work(work, false)) { |
|
|
|
applog(LOG_WARNING, "Stale share detected, discarding"); |
|
|
|
applog(LOG_WARNING, "Stale share detected, discarding"); |
|
|
|
total_stale++; |
|
|
|
total_stale++; |
|
|
|
pool->stale_shares++; |
|
|
|
pool->stale_shares++; |
|
|
@ -2412,7 +2415,7 @@ static void flush_requests(void) |
|
|
|
|
|
|
|
|
|
|
|
static inline bool can_roll(struct work *work) |
|
|
|
static inline bool can_roll(struct work *work) |
|
|
|
{ |
|
|
|
{ |
|
|
|
return (work->pool && !stale_work(work) && work->pool->has_rolltime && |
|
|
|
return (work->pool && !stale_work(work, true) && work->pool->has_rolltime && |
|
|
|
work->rolls < 11 && !work->clone); |
|
|
|
work->rolls < 11 && !work->clone); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
@ -2520,7 +2523,7 @@ retry: |
|
|
|
goto retry; |
|
|
|
goto retry; |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
if (stale_work(work_heap)) { |
|
|
|
if (stale_work(work_heap, false)) { |
|
|
|
dec_queued(); |
|
|
|
dec_queued(); |
|
|
|
discard_work(work_heap); |
|
|
|
discard_work(work_heap); |
|
|
|
goto retry; |
|
|
|
goto retry; |
|
|
@ -2826,7 +2829,7 @@ static void *miner_thread(void *userdata) |
|
|
|
decay_time(&hash_divfloat , (double)((MAXTHREADS / total_hashes) ? : 1)); |
|
|
|
decay_time(&hash_divfloat , (double)((MAXTHREADS / total_hashes) ? : 1)); |
|
|
|
hash_div = hash_divfloat; |
|
|
|
hash_div = hash_divfloat; |
|
|
|
needs_work = true; |
|
|
|
needs_work = true; |
|
|
|
} else if (work_restart[thr_id].restart || stale_work(work) || |
|
|
|
} else if (work_restart[thr_id].restart || stale_work(work, false) || |
|
|
|
work->blk.nonce >= MAXTHREADS - hashes_done) |
|
|
|
work->blk.nonce >= MAXTHREADS - hashes_done) |
|
|
|
needs_work = true; |
|
|
|
needs_work = true; |
|
|
|
} |
|
|
|
} |
|
|
@ -3051,7 +3054,7 @@ static void *gpuminer_thread(void *userdata) |
|
|
|
if (diff.tv_sec > opt_scantime || |
|
|
|
if (diff.tv_sec > opt_scantime || |
|
|
|
work->blk.nonce >= MAXTHREADS - hashes || |
|
|
|
work->blk.nonce >= MAXTHREADS - hashes || |
|
|
|
work_restart[thr_id].restart || |
|
|
|
work_restart[thr_id].restart || |
|
|
|
stale_work(work)) { |
|
|
|
stale_work(work, false)) { |
|
|
|
/* Ignore any reads since we're getting new work and queue a clean buffer */ |
|
|
|
/* Ignore any reads since we're getting new work and queue a clean buffer */ |
|
|
|
status = clEnqueueWriteBuffer(clState->commandQueue, clState->outputBuffer, CL_FALSE, 0, |
|
|
|
status = clEnqueueWriteBuffer(clState->commandQueue, clState->outputBuffer, CL_FALSE, 0, |
|
|
|
BUFFERSIZE, blank_res, 0, NULL, NULL); |
|
|
|
BUFFERSIZE, blank_res, 0, NULL, NULL); |
|
|
|