|
|
@ -70,7 +70,7 @@ struct workio_cmd { |
|
|
|
enum workio_commands cmd; |
|
|
|
enum workio_commands cmd; |
|
|
|
struct thr_info *thr; |
|
|
|
struct thr_info *thr; |
|
|
|
struct work *work; |
|
|
|
struct work *work; |
|
|
|
bool needed; |
|
|
|
struct pool *pool; |
|
|
|
}; |
|
|
|
}; |
|
|
|
|
|
|
|
|
|
|
|
struct strategies strategies[] = { |
|
|
|
struct strategies strategies[] = { |
|
|
@ -1955,11 +1955,9 @@ static inline struct pool *select_pool(bool lagging) |
|
|
|
if (pool_strategy == POOL_BALANCE) |
|
|
|
if (pool_strategy == POOL_BALANCE) |
|
|
|
return select_balanced(cp); |
|
|
|
return select_balanced(cp); |
|
|
|
|
|
|
|
|
|
|
|
if (pool_strategy != POOL_LOADBALANCE && (!lagging || opt_fail_only)) { |
|
|
|
if (pool_strategy != POOL_LOADBALANCE && (!lagging || opt_fail_only)) |
|
|
|
if (cp->prio != 0) |
|
|
|
pool = cp; |
|
|
|
switch_pools(NULL); |
|
|
|
else |
|
|
|
pool = current_pool(); |
|
|
|
|
|
|
|
} else |
|
|
|
|
|
|
|
pool = NULL; |
|
|
|
pool = NULL; |
|
|
|
|
|
|
|
|
|
|
|
while (!pool) { |
|
|
|
while (!pool) { |
|
|
@ -2251,17 +2249,19 @@ static void push_curl_entry(struct curl_ent *ce, struct pool *pool) |
|
|
|
|
|
|
|
|
|
|
|
/* This is overkill, but at least we'll know accurately how much work is
|
|
|
|
/* This is overkill, but at least we'll know accurately how much work is
|
|
|
|
* queued to prevent ever being left without work */ |
|
|
|
* queued to prevent ever being left without work */ |
|
|
|
static void inc_queued(void) |
|
|
|
static void inc_queued(struct pool *pool) |
|
|
|
{ |
|
|
|
{ |
|
|
|
mutex_lock(&qd_lock); |
|
|
|
mutex_lock(&qd_lock); |
|
|
|
total_queued++; |
|
|
|
total_queued++; |
|
|
|
|
|
|
|
pool->queued++; |
|
|
|
mutex_unlock(&qd_lock); |
|
|
|
mutex_unlock(&qd_lock); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
static void dec_queued(void) |
|
|
|
static void dec_queued(struct pool *pool) |
|
|
|
{ |
|
|
|
{ |
|
|
|
mutex_lock(&qd_lock); |
|
|
|
mutex_lock(&qd_lock); |
|
|
|
total_queued--; |
|
|
|
total_queued--; |
|
|
|
|
|
|
|
pool->queued--; |
|
|
|
mutex_unlock(&qd_lock); |
|
|
|
mutex_unlock(&qd_lock); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
@ -2380,68 +2380,46 @@ out: |
|
|
|
return cloned; |
|
|
|
return cloned; |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
static bool queue_request(void); |
|
|
|
|
|
|
|
|
|
|
|
static void *get_work_thread(void *userdata) |
|
|
|
static void *get_work_thread(void *userdata) |
|
|
|
{ |
|
|
|
{ |
|
|
|
struct workio_cmd *wc = (struct workio_cmd *)userdata; |
|
|
|
struct workio_cmd *wc = (struct workio_cmd *)userdata; |
|
|
|
int ts, tq, maxq = opt_queue + mining_threads; |
|
|
|
|
|
|
|
struct pool *pool = current_pool(); |
|
|
|
struct pool *pool = current_pool(); |
|
|
|
struct work *ret_work= NULL; |
|
|
|
struct work *ret_work= NULL; |
|
|
|
struct curl_ent *ce = NULL; |
|
|
|
struct curl_ent *ce = NULL; |
|
|
|
bool lagging = false; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
pthread_detach(pthread_self()); |
|
|
|
pthread_detach(pthread_self()); |
|
|
|
|
|
|
|
|
|
|
|
applog(LOG_DEBUG, "Creating extra get work thread"); |
|
|
|
applog(LOG_DEBUG, "Creating extra get work thread"); |
|
|
|
|
|
|
|
|
|
|
|
retry: |
|
|
|
pool = wc->pool; |
|
|
|
tq = global_queued(); |
|
|
|
|
|
|
|
ts = total_staged(); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if (ts >= maxq) |
|
|
|
|
|
|
|
goto out; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if (ts >= opt_queue && tq >= maxq) |
|
|
|
if (clone_available()) { |
|
|
|
goto out; |
|
|
|
dec_queued(pool); |
|
|
|
|
|
|
|
|
|
|
|
if (clone_available()) |
|
|
|
|
|
|
|
goto out; |
|
|
|
goto out; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
ret_work = make_work(); |
|
|
|
ret_work = make_work(); |
|
|
|
if (wc->thr) |
|
|
|
ret_work->thr = NULL; |
|
|
|
ret_work->thr = wc->thr; |
|
|
|
|
|
|
|
else |
|
|
|
|
|
|
|
ret_work->thr = NULL; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if (opt_benchmark) { |
|
|
|
if (opt_benchmark) { |
|
|
|
get_benchmark_work(ret_work); |
|
|
|
get_benchmark_work(ret_work); |
|
|
|
ret_work->queued = true; |
|
|
|
ret_work->queued = true; |
|
|
|
} else { |
|
|
|
} else { |
|
|
|
|
|
|
|
ret_work->pool = wc->pool; |
|
|
|
if (!ts) |
|
|
|
|
|
|
|
lagging = true; |
|
|
|
|
|
|
|
pool = ret_work->pool = select_pool(lagging); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
inc_queued(); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if (!ce) |
|
|
|
if (!ce) |
|
|
|
ce = pop_curl_entry(pool); |
|
|
|
ce = pop_curl_entry(pool); |
|
|
|
|
|
|
|
|
|
|
|
/* Check that we haven't staged work via other threads while
|
|
|
|
|
|
|
|
* waiting for a curl entry */ |
|
|
|
|
|
|
|
if (total_staged() >= maxq) { |
|
|
|
|
|
|
|
dec_queued(); |
|
|
|
|
|
|
|
free_work(ret_work); |
|
|
|
|
|
|
|
goto out; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/* obtain new work from bitcoin via JSON-RPC */ |
|
|
|
/* obtain new work from bitcoin via JSON-RPC */ |
|
|
|
if (!get_upstream_work(ret_work, ce->curl)) { |
|
|
|
if (!get_upstream_work(ret_work, ce->curl)) { |
|
|
|
/* pause, then restart work-request loop */ |
|
|
|
/* pause, then restart work-request loop */ |
|
|
|
applog(LOG_DEBUG, "json_rpc_call failed on get work, retrying"); |
|
|
|
applog(LOG_DEBUG, "json_rpc_call failed on get work, retrying"); |
|
|
|
lagging = true; |
|
|
|
dec_queued(pool); |
|
|
|
dec_queued(); |
|
|
|
queue_request(); |
|
|
|
free_work(ret_work); |
|
|
|
free_work(ret_work); |
|
|
|
goto retry; |
|
|
|
goto out; |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
ret_work->queued = true; |
|
|
|
ret_work->queued = true; |
|
|
@ -2620,8 +2598,6 @@ static struct pool *priority_pool(int choice) |
|
|
|
return ret; |
|
|
|
return ret; |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
static bool queue_request(struct thr_info *thr, bool needed); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
void switch_pools(struct pool *selected) |
|
|
|
void switch_pools(struct pool *selected) |
|
|
|
{ |
|
|
|
{ |
|
|
|
struct pool *pool, *last_pool; |
|
|
|
struct pool *pool, *last_pool; |
|
|
@ -2696,8 +2672,6 @@ void switch_pools(struct pool *selected) |
|
|
|
mutex_lock(&lp_lock); |
|
|
|
mutex_lock(&lp_lock); |
|
|
|
pthread_cond_broadcast(&lp_cond); |
|
|
|
pthread_cond_broadcast(&lp_cond); |
|
|
|
mutex_unlock(&lp_lock); |
|
|
|
mutex_unlock(&lp_lock); |
|
|
|
|
|
|
|
|
|
|
|
queue_request(NULL, false); |
|
|
|
|
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
static void discard_work(struct work *work) |
|
|
|
static void discard_work(struct work *work) |
|
|
@ -2721,6 +2695,7 @@ static void discard_stale(void) |
|
|
|
HASH_ITER(hh, staged_work, work, tmp) { |
|
|
|
HASH_ITER(hh, staged_work, work, tmp) { |
|
|
|
if (stale_work(work, false)) { |
|
|
|
if (stale_work(work, false)) { |
|
|
|
HASH_DEL(staged_work, work); |
|
|
|
HASH_DEL(staged_work, work); |
|
|
|
|
|
|
|
work->pool->staged--; |
|
|
|
discard_work(work); |
|
|
|
discard_work(work); |
|
|
|
stale++; |
|
|
|
stale++; |
|
|
|
} |
|
|
|
} |
|
|
@ -2730,7 +2705,7 @@ static void discard_stale(void) |
|
|
|
if (stale) { |
|
|
|
if (stale) { |
|
|
|
applog(LOG_DEBUG, "Discarded %d stales that didn't match current hash", stale); |
|
|
|
applog(LOG_DEBUG, "Discarded %d stales that didn't match current hash", stale); |
|
|
|
while (stale-- > 0) |
|
|
|
while (stale-- > 0) |
|
|
|
queue_request(NULL, false); |
|
|
|
queue_request(); |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
@ -2927,9 +2902,11 @@ static bool hash_push(struct work *work) |
|
|
|
pthread_cond_signal(&getq->cond); |
|
|
|
pthread_cond_signal(&getq->cond); |
|
|
|
mutex_unlock(stgd_lock); |
|
|
|
mutex_unlock(stgd_lock); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
work->pool->staged++; |
|
|
|
|
|
|
|
|
|
|
|
if (work->queued) { |
|
|
|
if (work->queued) { |
|
|
|
work->queued = false; |
|
|
|
work->queued = false; |
|
|
|
dec_queued(); |
|
|
|
dec_queued(work->pool); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
return rc; |
|
|
|
return rc; |
|
|
@ -3919,9 +3896,28 @@ static void pool_resus(struct pool *pool) |
|
|
|
switch_pools(NULL); |
|
|
|
switch_pools(NULL); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
static bool queue_request(struct thr_info *thr, bool needed) |
|
|
|
static bool queue_request(void) |
|
|
|
{ |
|
|
|
{ |
|
|
|
|
|
|
|
int ts, tq, maxq = opt_queue + mining_threads; |
|
|
|
|
|
|
|
struct pool *pool, *cp; |
|
|
|
struct workio_cmd *wc; |
|
|
|
struct workio_cmd *wc; |
|
|
|
|
|
|
|
bool lagging; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
ts = total_staged(); |
|
|
|
|
|
|
|
tq = global_queued(); |
|
|
|
|
|
|
|
if (ts && ts + tq >= maxq) |
|
|
|
|
|
|
|
return true; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
cp = current_pool(); |
|
|
|
|
|
|
|
lagging = !opt_fail_only && cp->lagging && !ts && cp->queued >= maxq; |
|
|
|
|
|
|
|
if (!lagging && cp->staged + cp->queued >= maxq) |
|
|
|
|
|
|
|
return true; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
pool = select_pool(lagging); |
|
|
|
|
|
|
|
if (pool->staged + pool->queued >= maxq) |
|
|
|
|
|
|
|
return true; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
inc_queued(pool); |
|
|
|
|
|
|
|
|
|
|
|
/* fill out work request message */ |
|
|
|
/* fill out work request message */ |
|
|
|
wc = calloc(1, sizeof(*wc)); |
|
|
|
wc = calloc(1, sizeof(*wc)); |
|
|
@ -3931,8 +3927,7 @@ static bool queue_request(struct thr_info *thr, bool needed) |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
wc->cmd = WC_GET_WORK; |
|
|
|
wc->cmd = WC_GET_WORK; |
|
|
|
wc->thr = thr; |
|
|
|
wc->pool = pool; |
|
|
|
wc->needed = needed; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
applog(LOG_DEBUG, "Queueing getwork request to work thread"); |
|
|
|
applog(LOG_DEBUG, "Queueing getwork request to work thread"); |
|
|
|
|
|
|
|
|
|
|
@ -3967,12 +3962,13 @@ static struct work *hash_pop(const struct timespec *abstime) |
|
|
|
} else |
|
|
|
} else |
|
|
|
work = staged_work; |
|
|
|
work = staged_work; |
|
|
|
HASH_DEL(staged_work, work); |
|
|
|
HASH_DEL(staged_work, work); |
|
|
|
|
|
|
|
work->pool->staged--; |
|
|
|
if (work_rollable(work)) |
|
|
|
if (work_rollable(work)) |
|
|
|
staged_rollable--; |
|
|
|
staged_rollable--; |
|
|
|
} |
|
|
|
} |
|
|
|
mutex_unlock(stgd_lock); |
|
|
|
mutex_unlock(stgd_lock); |
|
|
|
|
|
|
|
|
|
|
|
queue_request(NULL, false); |
|
|
|
queue_request(); |
|
|
|
|
|
|
|
|
|
|
|
return work; |
|
|
|
return work; |
|
|
|
} |
|
|
|
} |
|
|
@ -4089,7 +4085,10 @@ retry: |
|
|
|
pool = work_heap->pool; |
|
|
|
pool = work_heap->pool; |
|
|
|
/* If we make it here we have succeeded in getting fresh work */ |
|
|
|
/* If we make it here we have succeeded in getting fresh work */ |
|
|
|
if (!work_heap->mined) { |
|
|
|
if (!work_heap->mined) { |
|
|
|
pool_tclear(pool, &pool->lagging); |
|
|
|
/* Only clear the lagging flag if we are staging them at a
|
|
|
|
|
|
|
|
* rate faster then we're using them */ |
|
|
|
|
|
|
|
if (pool->lagging && total_staged()) |
|
|
|
|
|
|
|
pool_tclear(pool, &pool->lagging); |
|
|
|
if (pool_tclear(pool, &pool->idle)) |
|
|
|
if (pool_tclear(pool, &pool->idle)) |
|
|
|
pool_resus(pool); |
|
|
|
pool_resus(pool); |
|
|
|
} |
|
|
|
} |
|
|
@ -5722,7 +5721,7 @@ begin_bench: |
|
|
|
#endif |
|
|
|
#endif |
|
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < mining_threads + opt_queue; i++) |
|
|
|
for (i = 0; i < mining_threads + opt_queue; i++) |
|
|
|
queue_request(NULL, false); |
|
|
|
queue_request(); |
|
|
|
|
|
|
|
|
|
|
|
/* main loop - simply wait for workio thread to exit. This is not the
|
|
|
|
/* main loop - simply wait for workio thread to exit. This is not the
|
|
|
|
* normal exit path and only occurs should the workio_thread die |
|
|
|
* normal exit path and only occurs should the workio_thread die |
|
|
|