Browse Source

Clean up the pool switching to not be dependent on whether the work can roll or not by setting a lagging flag and then the idle flag.

nfactor-troky
Con Kolivas 13 years ago
parent
commit
ced4a9ac92
  1. 48
      main.c
  2. 1
      miner.h

48
main.c

@ -2852,9 +2852,11 @@ static bool pool_active(struct pool *pool, bool pinging)
static void pool_died(struct pool *pool) static void pool_died(struct pool *pool)
{ {
applog(LOG_WARNING, "Pool %d %s not responding!", pool->pool_no, pool->rpc_url); if (!pool_tset(pool, &pool->idle)) {
gettimeofday(&pool->tv_idle, NULL); applog(LOG_WARNING, "Pool %d %s not responding!", pool->pool_no, pool->rpc_url);
switch_pools(NULL); gettimeofday(&pool->tv_idle, NULL);
switch_pools(NULL);
}
} }
static void pool_resus(struct pool *pool) static void pool_resus(struct pool *pool)
@ -3053,30 +3055,18 @@ retry:
} }
requested = false; requested = false;
if (!requests_staged() && can_roll(work)) { if (!requests_staged()) {
/* Only print this message once each time we shift to localgen */ if (!pool_tset(pool, &pool->lagging)) {
if (!pool_tset(pool, &pool->idle)) { applog(LOG_WARNING, "Pool %d not providing work fast enough",
applog(LOG_WARNING, "Pool %d not providing work fast enough, generating work locally",
pool->pool_no); pool->pool_no);
pool->localgen_occasions++; pool->localgen_occasions++;
total_lo++; total_lo++;
gettimeofday(&pool->tv_idle, NULL);
} else {
struct timeval tv_now, diff;
gettimeofday(&tv_now, NULL);
timeval_subtract(&diff, &tv_now, &pool->tv_idle);
/* Attempt to switch pools if this one has been unresponsive for >half
* a block's duration */
if (diff.tv_sec > 300) {
pool_died(pool);
goto retry;
}
} }
if (can_roll(work)) {
roll_work(work); roll_work(work);
ret = true; ret = true;
goto out; goto out;
}
} }
gettimeofday(&now, NULL); gettimeofday(&now, NULL);
@ -3088,10 +3078,7 @@ retry:
/* wait for 1st response, or get cached response */ /* wait for 1st response, or get cached response */
work_heap = tq_pop(getq, &abstime); work_heap = tq_pop(getq, &abstime);
if (unlikely(!work_heap)) { if (unlikely(!work_heap)) {
/* Attempt to switch pools if this one has mandatory work that /* Attempt to switch pools if this one times out */
* has timed out or does not support rolltime */
pool->localgen_occasions++;
total_lo++;
pool_died(pool); pool_died(pool);
goto retry; goto retry;
} }
@ -3104,8 +3091,11 @@ retry:
pool = work_heap->pool; pool = work_heap->pool;
/* If we make it here we have succeeded in getting fresh work */ /* If we make it here we have succeeded in getting fresh work */
if (pool_tclear(pool, &pool->idle)) if (!work_heap->mined) {
pool_resus(pool); pool_tclear(pool, &pool->lagging);
if (pool_tclear(pool, &pool->idle))
pool_resus(pool);
}
memcpy(work, work_heap, sizeof(*work)); memcpy(work, work_heap, sizeof(*work));

1
miner.h

@ -310,6 +310,7 @@ struct pool {
int accepted, rejected; int accepted, rejected;
bool submit_fail; bool submit_fail;
bool idle; bool idle;
bool lagging;
bool probed; bool probed;
bool enabled; bool enabled;

Loading…
Cancel
Save