Browse Source

Minimise locking and unlocking when getting counts by reusing shared mutex lock functions.

nfactor-troky
Con Kolivas 12 years ago
parent
commit
d66742a8c1
  1. 61
      cgminer.c

61
cgminer.c

@ -1334,16 +1334,26 @@ void decay_time(double *f, double fadd)
*f = (fadd + *f * 0.58) / 1.58; *f = (fadd + *f * 0.58) / 1.58;
} }
static int __total_staged(void)
{
return HASH_COUNT(staged_work);
}
static int total_staged(void) static int total_staged(void)
{ {
int ret; int ret;
mutex_lock(stgd_lock); mutex_lock(stgd_lock);
ret = HASH_COUNT(staged_work); ret = __total_staged();
mutex_unlock(stgd_lock); mutex_unlock(stgd_lock);
return ret; return ret;
} }
static int __pool_staged(struct pool *pool)
{
return pool->staged;
}
static int pool_staged(struct pool *pool) static int pool_staged(struct pool *pool)
{ {
int ret; int ret;
@ -1354,13 +1364,6 @@ static int pool_staged(struct pool *pool)
return ret; return ret;
} }
static int current_staged(void)
{
struct pool *pool = current_pool();
return pool_staged(pool);
}
#ifdef HAVE_CURSES #ifdef HAVE_CURSES
WINDOW *mainwin, *statuswin, *logwin; WINDOW *mainwin, *statuswin, *logwin;
#endif #endif
@ -2251,23 +2254,33 @@ static void dec_queued(struct pool *pool)
mutex_unlock(&qd_lock); mutex_unlock(&qd_lock);
} }
static int __pool_queued(struct pool *pool)
{
return pool->queued;
}
static int current_queued(void) static int current_queued(void)
{ {
struct pool *pool = current_pool(); struct pool *pool = current_pool();
int ret; int ret;
mutex_lock(&qd_lock); mutex_lock(&qd_lock);
ret = pool->queued; ret = __pool_queued(pool);
mutex_unlock(&qd_lock); mutex_unlock(&qd_lock);
return ret; return ret;
} }
static int __global_queued(void)
{
return total_queued;
}
static int global_queued(void) static int global_queued(void)
{ {
int ret; int ret;
mutex_lock(&qd_lock); mutex_lock(&qd_lock);
ret = total_queued; ret = __global_queued();
mutex_unlock(&qd_lock); mutex_unlock(&qd_lock);
return ret; return ret;
} }
@ -2275,11 +2288,17 @@ static int global_queued(void)
static bool enough_work(void) static bool enough_work(void)
{ {
int cq, cs, ts, tq, maxq = opt_queue + mining_threads; int cq, cs, ts, tq, maxq = opt_queue + mining_threads;
struct pool *pool = current_pool();
cq = current_queued(); mutex_lock(&qd_lock);
cs = current_staged(); cq = __pool_queued(pool);
ts = total_staged(); tq = __global_queued();
tq = global_queued(); mutex_unlock(&qd_lock);
mutex_lock(stgd_lock);
cs = __pool_staged(pool);
ts = __total_staged();
mutex_unlock(stgd_lock);
if (((cs || cq >= opt_queue) && ts >= maxq) || if (((cs || cq >= opt_queue) && ts >= maxq) ||
((cs || cq) && tq >= maxq)) ((cs || cq) && tq >= maxq))
@ -3753,13 +3772,19 @@ static void pool_resus(struct pool *pool)
bool queue_request(struct thr_info *thr, bool needed) bool queue_request(struct thr_info *thr, bool needed)
{ {
int cq, cs, ts, tq, maxq = opt_queue + mining_threads; int cq, cs, ts, tq, maxq = opt_queue + mining_threads;
struct pool *pool = current_pool();
struct workio_cmd *wc; struct workio_cmd *wc;
bool lag = false; bool lag = false;
cq = current_queued(); mutex_lock(&qd_lock);
cs = current_staged(); cq = __pool_queued(pool);
ts = total_staged(); tq = __global_queued();
tq = global_queued(); mutex_unlock(&qd_lock);
mutex_lock(stgd_lock);
cs = __pool_staged(pool);
ts = __total_staged();
mutex_unlock(stgd_lock);
if (needed && cq >= maxq && !ts && !opt_fail_only) { if (needed && cq >= maxq && !ts && !opt_fail_only) {
/* If we're queueing work faster than we can stage it, consider /* If we're queueing work faster than we can stage it, consider

Loading…
Cancel
Save