Browse Source

Getwork fail was not being detected. Remove a vast amount of unused variables and functions used in the old queue request mechanism and redefine the getfail testing.

nfactor-troky
Con Kolivas 12 years ago
parent
commit
d8c76bbd08
  1. 94
      cgminer.c
  2. 2
      miner.h

94
cgminer.c

@ -1334,32 +1334,12 @@ void decay_time(double *f, double fadd)
*f = (fadd + *f * 0.58) / 1.58; *f = (fadd + *f * 0.58) / 1.58;
} }
static int __total_staged(void)
{
return HASH_COUNT(staged_work);
}
static int total_staged(void) static int total_staged(void)
{ {
int ret; int ret;
mutex_lock(stgd_lock); mutex_lock(stgd_lock);
ret = __total_staged(); ret = HASH_COUNT(staged_work);
mutex_unlock(stgd_lock);
return ret;
}
static int __pool_staged(struct pool *pool)
{
return pool->staged;
}
static int pool_staged(struct pool *pool)
{
int ret;
mutex_lock(stgd_lock);
ret = pool->staged;
mutex_unlock(stgd_lock); mutex_unlock(stgd_lock);
return ret; return ret;
} }
@ -2232,44 +2212,20 @@ static void push_curl_entry(struct curl_ent *ce, struct pool *pool)
/* This is overkill, but at least we'll know accurately how much work is /* This is overkill, but at least we'll know accurately how much work is
* queued to prevent ever being left without work */ * queued to prevent ever being left without work */
static void inc_queued(struct pool *pool) static void inc_queued(void)
{ {
if (unlikely(!pool))
return;
mutex_lock(&qd_lock); mutex_lock(&qd_lock);
pool->queued++;
total_queued++; total_queued++;
mutex_unlock(&qd_lock); mutex_unlock(&qd_lock);
} }
static void dec_queued(struct pool *pool) static void dec_queued(void)
{ {
if (unlikely(!pool))
return;
mutex_lock(&qd_lock); mutex_lock(&qd_lock);
pool->queued--;
total_queued--; total_queued--;
mutex_unlock(&qd_lock); mutex_unlock(&qd_lock);
} }
static int __pool_queued(struct pool *pool)
{
return pool->queued;
}
static int current_queued(void)
{
struct pool *pool = current_pool();
int ret;
mutex_lock(&qd_lock);
ret = __pool_queued(pool);
mutex_unlock(&qd_lock);
return ret;
}
static int __global_queued(void) static int __global_queued(void)
{ {
return total_queued; return total_queued;
@ -2388,7 +2344,7 @@ out:
static void *get_work_thread(void *userdata) static void *get_work_thread(void *userdata)
{ {
struct workio_cmd *wc = (struct workio_cmd *)userdata; struct workio_cmd *wc = (struct workio_cmd *)userdata;
int cq, cs, ts, tq, maxq = opt_queue + mining_threads; int ts, tq, maxq = opt_queue + mining_threads;
struct pool *pool = current_pool(); struct pool *pool = current_pool();
struct curl_ent *ce = NULL; struct curl_ent *ce = NULL;
struct work *ret_work; struct work *ret_work;
@ -2398,15 +2354,8 @@ static void *get_work_thread(void *userdata)
applog(LOG_DEBUG, "Creating extra get work thread"); applog(LOG_DEBUG, "Creating extra get work thread");
mutex_lock(&qd_lock); tq = global_queued();
cq = __pool_queued(pool); ts = total_staged();
tq = __global_queued();
mutex_unlock(&qd_lock);
mutex_lock(stgd_lock);
cs = __pool_staged(pool);
ts = __total_staged();
mutex_unlock(stgd_lock);
if (ts >= maxq) if (ts >= maxq)
goto out; goto out;
@ -2431,7 +2380,7 @@ static void *get_work_thread(void *userdata)
if (ts <= opt_queue) if (ts <= opt_queue)
lagging = true; lagging = true;
pool = ret_work->pool = select_pool(lagging); pool = ret_work->pool = select_pool(lagging);
inc_queued(pool); inc_queued();
ce = pop_curl_entry(pool); ce = pop_curl_entry(pool);
@ -2735,7 +2684,6 @@ static void discard_stale(void)
HASH_ITER(hh, staged_work, work, tmp) { HASH_ITER(hh, staged_work, work, tmp) {
if (stale_work(work, false)) { if (stale_work(work, false)) {
HASH_DEL(staged_work, work); HASH_DEL(staged_work, work);
work->pool->staged--;
discard_work(work); discard_work(work);
stale++; stale++;
} }
@ -2922,7 +2870,6 @@ static bool hash_push(struct work *work)
staged_rollable++; staged_rollable++;
if (likely(!getq->frozen)) { if (likely(!getq->frozen)) {
HASH_ADD_INT(staged_work, id, work); HASH_ADD_INT(staged_work, id, work);
work->pool->staged++;
HASH_SORT(staged_work, tv_sort); HASH_SORT(staged_work, tv_sort);
} else } else
rc = false; rc = false;
@ -2930,7 +2877,7 @@ static bool hash_push(struct work *work)
mutex_unlock(stgd_lock); mutex_unlock(stgd_lock);
if (dec) if (dec)
dec_queued(work->pool); dec_queued();
return rc; return rc;
} }
@ -3981,7 +3928,6 @@ static struct work *hash_pop(const struct timespec *abstime)
} else } else
work = staged_work; work = staged_work;
HASH_DEL(staged_work, work); HASH_DEL(staged_work, work);
work->pool->staged--;
if (work_rollable(work)) if (work_rollable(work))
staged_rollable--; staged_rollable--;
} }
@ -4039,15 +3985,14 @@ static struct work *clone_work(struct work *work)
return work; return work;
} }
static bool get_work(struct work *work, bool requested, struct thr_info *thr, static bool get_work(struct work *work, struct thr_info *thr, const int thr_id)
const int thr_id)
{ {
bool newreq = false, ret = false;
struct timespec abstime = {0, 0}; struct timespec abstime = {0, 0};
struct timeval now;
struct work *work_heap; struct work *work_heap;
int failures = 0, cq; struct timeval now;
struct pool *pool; struct pool *pool;
int failures = 0;
bool ret = false;
/* Tell the watchdog thread this thread is waiting on getwork and /* Tell the watchdog thread this thread is waiting on getwork and
* should not be restarted */ * should not be restarted */
@ -4059,23 +4004,15 @@ static bool get_work(struct work *work, bool requested, struct thr_info *thr,
return true; return true;
} }
cq = current_queued();
retry: retry:
pool = current_pool(); pool = current_pool();
if (!requested || cq < opt_queue) {
if (unlikely(!queue_request(thr, true))) {
applog(LOG_WARNING, "Failed to queue_request in get_work");
goto out;
}
newreq = true;
}
if (reuse_work(work)) { if (reuse_work(work)) {
ret = true; ret = true;
goto out; goto out;
} }
if (!pool->lagging && requested && !newreq && !pool_staged(pool) && cq >= mining_threads + opt_queue) { if (!pool->lagging && !total_staged() && global_queued() >= mining_threads + opt_queue) {
struct cgpu_info *cgpu = thr->cgpu; struct cgpu_info *cgpu = thr->cgpu;
bool stalled = true; bool stalled = true;
int i; int i;
@ -4097,7 +4034,6 @@ retry:
} }
} }
newreq = requested = false;
gettimeofday(&now, NULL); gettimeofday(&now, NULL);
abstime.tv_sec = now.tv_sec + 60; abstime.tv_sec = now.tv_sec + 60;
@ -4275,7 +4211,6 @@ void *miner_thread(void *userdata)
int64_t hashes_done = 0; int64_t hashes_done = 0;
int64_t hashes; int64_t hashes;
struct work *work = make_work(); struct work *work = make_work();
bool requested = false;
const bool primary = (!mythr->device_thread) || mythr->primary_thread; const bool primary = (!mythr->device_thread) || mythr->primary_thread;
pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL); pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
@ -4301,12 +4236,11 @@ void *miner_thread(void *userdata)
mythr->work_restart = false; mythr->work_restart = false;
if (api->free_work && likely(work->pool)) if (api->free_work && likely(work->pool))
api->free_work(mythr, work); api->free_work(mythr, work);
if (unlikely(!get_work(work, requested, mythr, thr_id))) { if (unlikely(!get_work(work, mythr, thr_id))) {
applog(LOG_ERR, "work retrieval failed, exiting " applog(LOG_ERR, "work retrieval failed, exiting "
"mining thread %d", thr_id); "mining thread %d", thr_id);
break; break;
} }
requested = false;
gettimeofday(&tv_workstart, NULL); gettimeofday(&tv_workstart, NULL);
work->blk.nonce = 0; work->blk.nonce = 0;
cgpu->max_hashes = 0; cgpu->max_hashes = 0;

2
miner.h

@ -723,8 +723,6 @@ struct pool {
int accepted, rejected; int accepted, rejected;
int seq_rejects; int seq_rejects;
int solved; int solved;
int queued;
int staged;
bool submit_fail; bool submit_fail;
bool idle; bool idle;

Loading…
Cancel
Save