From 057a38eb1cce78654f6187a5b18e9d09f757f026 Mon Sep 17 00:00:00 2001 From: Con Kolivas Date: Fri, 22 Jun 2012 13:37:32 +1000 Subject: [PATCH 01/47] When disabling fanspeed monitoring on adl failure, remove any twin GPU association. This could have been leading to hangs on machines with dual GPU cards when ADL failed. --- adl.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/adl.c b/adl.c index 473cba0d..c7393f48 100644 --- a/adl.c +++ b/adl.c @@ -695,6 +695,11 @@ int gpu_fanpercent(int gpu) applog(LOG_WARNING, "You will need to start cgminer from scratch to correct this"); applog(LOG_WARNING, "Disabling fanspeed monitoring on this device"); ga->has_fanspeed = false; + if (ga->twin) { + applog(LOG_WARNING, "Disabling fanspeed linking on GPU twins"); + ga->twin->twin = NULL;; + ga->twin = NULL; + } } return ret; } From c5a21fabf0e61c02125650c5099dfdfd70602b0a Mon Sep 17 00:00:00 2001 From: ckolivas Date: Sat, 23 Jun 2012 23:43:22 +1000 Subject: [PATCH 02/47] Extend nrolltime to support the expiry= parameter. Do this by turning the rolltime bool into an integer set to the expiry time. If the pool supports rolltime but not expiry= then set the expiry time to the standard scantime. --- cgminer.c | 17 ++++++++++------- miner.h | 4 ++-- util.c | 17 +++++++++++------ 3 files changed, 23 insertions(+), 15 deletions(-) diff --git a/cgminer.c b/cgminer.c index 81e29ecf..3f315d8c 100644 --- a/cgminer.c +++ b/cgminer.c @@ -1622,7 +1622,7 @@ static bool submit_upstream_work(const struct work *work, CURL *curl) int thr_id = work->thr_id; struct cgpu_info *cgpu = thr_info[thr_id].cgpu; struct pool *pool = work->pool; - bool rolltime; + int rolltime; uint32_t *hash32; char hashshow[64+1] = ""; @@ -2163,6 +2163,9 @@ static bool stale_work(struct work *work, bool share) if (share) { if ((now.tv_sec - work->tv_staged.tv_sec) >= opt_expiry) return true; + } else if (work->rolls) { + if ((now.tv_sec - work->tv_staged.tv_sec) >= work->rolltime) + return true; } else if ((now.tv_sec - work->tv_staged.tv_sec) >= opt_scantime) return true; @@ -3380,7 +3383,7 @@ static bool pool_active(struct pool *pool, bool pinging) bool ret = false; json_t *val; CURL *curl; - bool rolltime; + int rolltime; curl = curl_easy_init(); if (unlikely(!curl)) { @@ -3566,8 +3569,7 @@ static inline bool should_roll(struct work *work) static inline bool can_roll(struct work *work) { - return (work->pool && !stale_work(work, false) && work->rolltime && - work->rolls < 11 && !work->clone); + return (work->pool && !stale_work(work, false) && work->rolltime && !work->clone); } static void roll_work(struct work *work) @@ -4015,9 +4017,10 @@ enum { }; /* Stage another work item from the work returned in a longpoll */ -static void convert_to_work(json_t *val, bool rolltime, struct pool *pool) +static void convert_to_work(json_t *val, int rolltime, struct pool *pool) { struct work *work, *work_clone; + int rolled = 0; bool rc; work = make_work(); @@ -4052,7 +4055,7 @@ static void convert_to_work(json_t *val, bool rolltime, struct pool *pool) work_clone = make_work(); memcpy(work_clone, work, sizeof(struct work)); - while (reuse_work(work)) { + while (reuse_work(work) && rolled++ < mining_threads) { work_clone->clone = true; work_clone->longpoll = false; applog(LOG_DEBUG, "Pushing rolled converted work to stage thread"); @@ -4113,7 +4116,7 @@ static void *longpoll_thread(void *userdata) struct timeval start, end; CURL *curl = NULL; int failures = 0; - bool rolltime; + int rolltime; curl = curl_easy_init(); if (unlikely(!curl)) { diff --git a/miner.h b/miner.h index 69cdcf24..8675195f 100644 --- a/miner.h +++ b/miner.h @@ -530,7 +530,7 @@ extern pthread_rwlock_t netacc_lock; extern const uint32_t sha256_init_state[]; extern json_t *json_rpc_call(CURL *curl, const char *url, const char *userpass, - const char *rpc_req, bool, bool, bool *, + const char *rpc_req, bool, bool, int *, struct pool *pool, bool); extern char *bin2hex(const unsigned char *p, size_t len); extern bool hex2bin(unsigned char *p, const char *hexstr, size_t len); @@ -732,7 +732,7 @@ struct work { bool mined; bool clone; bool cloned; - bool rolltime; + int rolltime; bool longpoll; bool stale; bool mandatory; diff --git a/util.c b/util.c index 509d9381..703be545 100644 --- a/util.c +++ b/util.c @@ -56,7 +56,7 @@ struct upload_buffer { struct header_info { char *lp_path; - bool has_rolltime; + int rolltime; char *reason; }; @@ -160,8 +160,13 @@ static size_t resp_hdr_cb(void *ptr, size_t size, size_t nmemb, void *user_data) if (!strncasecmp("N", val, 1)) { applog(LOG_DEBUG, "X-Roll-Ntime: N found"); } else { - applog(LOG_DEBUG, "X-Roll-Ntime found"); - hi->has_rolltime = true; + /* Check to see if expire= is supported and if not, set + * the rolltime to the default scantime */ + if (strlen(val) > 7 && !strncasecmp("expire=", val, 7)) + sscanf(val + 7, "%d", &hi->rolltime); + else + hi->rolltime = opt_scantime; + applog(LOG_DEBUG, "X-Roll-Ntime expiry set to %d", hi->rolltime); } } @@ -248,7 +253,7 @@ static void set_nettime(void) json_t *json_rpc_call(CURL *curl, const char *url, const char *userpass, const char *rpc_req, - bool probe, bool longpoll, bool *rolltime, + bool probe, bool longpoll, int *rolltime, struct pool *pool, bool share) { json_t *val, *err_val, *res_val; @@ -260,7 +265,7 @@ json_t *json_rpc_call(CURL *curl, const char *url, char len_hdr[64], user_agent_hdr[128]; char curl_err_str[CURL_ERROR_SIZE]; long timeout = longpoll ? (60 * 60) : 60; - struct header_info hi = {NULL, false, NULL}; + struct header_info hi = {NULL, 0, NULL}; bool probing = false; memset(&err, 0, sizeof(err)); @@ -375,7 +380,7 @@ json_t *json_rpc_call(CURL *curl, const char *url, hi.lp_path = NULL; } - *rolltime = hi.has_rolltime; + *rolltime = hi.rolltime; val = JSON_LOADS(all_data.buf, &err); if (!val) { From eb36f8d271f5c3644718d2c9ef9d806625b4475c Mon Sep 17 00:00:00 2001 From: ckolivas Date: Sun, 24 Jun 2012 00:12:11 +1000 Subject: [PATCH 03/47] Walk through the thread list instead of searching for them when disabling threads for dynamic mode. --- driver-opencl.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/driver-opencl.c b/driver-opencl.c index 521a0635..9dbaef1a 100644 --- a/driver-opencl.c +++ b/driver-opencl.c @@ -538,15 +538,11 @@ struct cgpu_info *cpus; void pause_dynamic_threads(int gpu) { struct cgpu_info *cgpu = &gpus[gpu]; - int i, thread_no = 0; + int i; - for (i = 0; i < mining_threads; i++) { + for (i = 1; i < cgpu->threads; i++) { struct thr_info *thr = &thr_info[i]; - if (thr->cgpu != cgpu) - continue; - if (!thread_no++) - continue; if (!thr->pause && cgpu->dynamic) { applog(LOG_WARNING, "Disabling extra threads due to dynamic mode."); applog(LOG_WARNING, "Tune dynamic intensity with --gpu-dyninterval"); From 4e60a62ae226b36595114b722fab55aa73131691 Mon Sep 17 00:00:00 2001 From: Con Kolivas Date: Sun, 24 Jun 2012 12:55:56 +1000 Subject: [PATCH 04/47] Getwork delay in stats should include retries for each getwork call. --- cgminer.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/cgminer.c b/cgminer.c index 7b6b436b..9384a167 100644 --- a/cgminer.c +++ b/cgminer.c @@ -1837,16 +1837,15 @@ static bool get_upstream_work(struct work *work, CURL *curl) url = pool->rpc_url; + gettimeofday(&tv_start, NULL); retry: /* A single failure response here might be reported as a dead pool and * there may be temporary denied messages etc. falsely reporting * failure so retry a few times before giving up */ while (!val && retries++ < 3) { pool_stats->getwork_attempts++; - gettimeofday(&tv_start, NULL); val = json_rpc_call(curl, url, pool->rpc_userpass, rpc_req, false, false, &work->rolltime, pool, false); - gettimeofday(&tv_end, NULL); } if (unlikely(!val)) { applog(LOG_DEBUG, "Failed json_rpc_call in get_upstream_work"); @@ -1856,11 +1855,13 @@ retry: rc = work_decode(json_object_get(val, "result"), work); if (!rc && retries < 3) goto retry; + work->pool = pool; work->longpoll = false; total_getworks++; pool->getwork_requested++; + gettimeofday(&tv_end, NULL); timersub(&tv_end, &tv_start, &tv_elapsed); timeradd(&tv_elapsed, &(pool_stats->getwork_wait), &(pool_stats->getwork_wait)); if (timercmp(&tv_elapsed, &(pool_stats->getwork_wait_max), >)) { From f32ffb8718492aa33858f8f60af22962dbc0e481 Mon Sep 17 00:00:00 2001 From: Con Kolivas Date: Sun, 24 Jun 2012 13:20:17 +1000 Subject: [PATCH 05/47] Work out a rolling average getwork delay stored in pool_stats. --- api.c | 5 +++-- cgminer.c | 13 ++++++++----- miner.h | 1 + 3 files changed, 12 insertions(+), 7 deletions(-) diff --git a/api.c b/api.c index 7eb684dc..078c1579 100644 --- a/api.c +++ b/api.c @@ -2114,11 +2114,12 @@ static int itemstats(int i, char *id, struct cgminer_stats *stats, struct cgmine if (pool_stats) { sprintf(buf, isjson ? ",\"Pool Calls\":%d,\"Pool Attempts\":%d,\"Pool Wait\":%ld.%06ld,\"Pool Max\":%ld.%06ld,\"Pool Min\":%ld.%06ld" - : ",Pool Calls=%d,Pool Attempts=%d,Pool Wait=%ld.%06ld,Pool Max=%ld.%06ld,Pool Min=%ld.%06ld", + : ",Pool Calls=%d,Pool Attempts=%d,Pool Wait=%ld.%06ld,Pool Max=%ld.%06ld,Pool Min=%ld.%06ld,Pool Av=%f", pool_stats->getwork_calls, pool_stats->getwork_attempts, pool_stats->getwork_wait.tv_sec, pool_stats->getwork_wait.tv_usec, pool_stats->getwork_wait_max.tv_sec, pool_stats->getwork_wait_max.tv_usec, - pool_stats->getwork_wait_min.tv_sec, pool_stats->getwork_wait_min.tv_usec); + pool_stats->getwork_wait_min.tv_sec, pool_stats->getwork_wait_min.tv_usec, + pool_stats->getwork_wait_rolling); strcat(io_buffer, buf); } diff --git a/cgminer.c b/cgminer.c index 9384a167..c7a6d39d 100644 --- a/cgminer.c +++ b/cgminer.c @@ -1856,13 +1856,11 @@ retry: if (!rc && retries < 3) goto retry; - work->pool = pool; - work->longpoll = false; - total_getworks++; - pool->getwork_requested++; - gettimeofday(&tv_end, NULL); timersub(&tv_end, &tv_start, &tv_elapsed); + pool_stats->getwork_wait_rolling += ((double)tv_elapsed.tv_sec + ((double)tv_elapsed.tv_usec / 1000000)) * 0.63; + pool_stats->getwork_wait_rolling /= 1.63; + timeradd(&tv_elapsed, &(pool_stats->getwork_wait), &(pool_stats->getwork_wait)); if (timercmp(&tv_elapsed, &(pool_stats->getwork_wait_max), >)) { pool_stats->getwork_wait_max.tv_sec = tv_elapsed.tv_sec; @@ -1874,6 +1872,11 @@ retry: } pool_stats->getwork_calls++; + work->pool = pool; + work->longpoll = false; + total_getworks++; + pool->getwork_requested++; + json_decref(val); out: diff --git a/miner.h b/miner.h index b1840e98..9eb1b6d8 100644 --- a/miner.h +++ b/miner.h @@ -298,6 +298,7 @@ struct cgminer_pool_stats { struct timeval getwork_wait; struct timeval getwork_wait_max; struct timeval getwork_wait_min; + double getwork_wait_rolling; }; struct cgpu_info { From c20a89d998437dc82dfb6468584b4027cdce8b9f Mon Sep 17 00:00:00 2001 From: Con Kolivas Date: Sun, 24 Jun 2012 14:20:29 +1000 Subject: [PATCH 06/47] Take into account average getwork delay as a marker of pool communications when considering work stale. --- cgminer.c | 25 +++++++++++++++++-------- 1 file changed, 17 insertions(+), 8 deletions(-) diff --git a/cgminer.c b/cgminer.c index c7a6d39d..86ddfb08 100644 --- a/cgminer.c +++ b/cgminer.c @@ -2158,25 +2158,34 @@ static bool workio_get_work(struct workio_cmd *wc) static bool stale_work(struct work *work, bool share) { struct timeval now; + time_t work_expiry; struct pool *pool; + int getwork_delay; if (work->mandatory) return false; + if (share) + work_expiry = opt_expiry; + else if (work->rolls) + work_expiry = work->rolltime; + else + work_expiry = opt_scantime; + pool = work->pool; + /* Factor in the average getwork delay of this pool, rounding it up to + * the nearest second */ + getwork_delay = pool->cgminer_pool_stats.getwork_wait_rolling * 5 + 1; + work_expiry -= getwork_delay; + if (unlikely(work_expiry < 5)) + work_expiry = 5; + gettimeofday(&now, NULL); - if (share) { - if ((now.tv_sec - work->tv_staged.tv_sec) >= opt_expiry) - return true; - } else if (work->rolls) { - if ((now.tv_sec - work->tv_staged.tv_sec) >= work->rolltime) - return true; - } else if ((now.tv_sec - work->tv_staged.tv_sec) >= opt_scantime) + if ((now.tv_sec - work->tv_staged.tv_sec) >= work_expiry) return true; if (work->work_block != work_block) return true; - pool = work->pool; if (opt_fail_only && !share && pool != current_pool() && pool->enabled != POOL_REJECTING) return true; From a8ae1a43ead2952fff664d5ca9678bf9da2cd2ba Mon Sep 17 00:00:00 2001 From: Con Kolivas Date: Sun, 24 Jun 2012 14:38:31 +1000 Subject: [PATCH 07/47] Rolltime should be used as the cutoff time for primary work as well as the rolled work, if present. --- cgminer.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cgminer.c b/cgminer.c index 86ddfb08..1cd9e09d 100644 --- a/cgminer.c +++ b/cgminer.c @@ -2167,7 +2167,7 @@ static bool stale_work(struct work *work, bool share) if (share) work_expiry = opt_expiry; - else if (work->rolls) + else if (work->rolltime) work_expiry = work->rolltime; else work_expiry = opt_scantime; From 195d915a6cf8aa8aad6949fa7e2873285004aa0f Mon Sep 17 00:00:00 2001 From: Kano Date: Sun, 24 Jun 2012 16:31:11 +1000 Subject: [PATCH 08/47] api.c display Pool Av in json --- api.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api.c b/api.c index 078c1579..42a38c77 100644 --- a/api.c +++ b/api.c @@ -2113,7 +2113,7 @@ static int itemstats(int i, char *id, struct cgminer_stats *stats, struct cgmine if (pool_stats) { sprintf(buf, isjson - ? ",\"Pool Calls\":%d,\"Pool Attempts\":%d,\"Pool Wait\":%ld.%06ld,\"Pool Max\":%ld.%06ld,\"Pool Min\":%ld.%06ld" + ? ",\"Pool Calls\":%d,\"Pool Attempts\":%d,\"Pool Wait\":%ld.%06ld,\"Pool Max\":%ld.%06ld,\"Pool Min\":%ld.%06ld,\"Pool Av\":%f" : ",Pool Calls=%d,Pool Attempts=%d,Pool Wait=%ld.%06ld,Pool Max=%ld.%06ld,Pool Min=%ld.%06ld,Pool Av=%f", pool_stats->getwork_calls, pool_stats->getwork_attempts, pool_stats->getwork_wait.tv_sec, pool_stats->getwork_wait.tv_usec, From 610302afcb7090f14fb1fe7376c0ba5631d2c7d2 Mon Sep 17 00:00:00 2001 From: Con Kolivas Date: Sun, 24 Jun 2012 18:10:17 +1000 Subject: [PATCH 09/47] Abstract out work cloning and clone $mining_threads copies whenever a rollable work item is found and return a clone instead. --- cgminer.c | 73 ++++++++++++++++++++++++++++++++++++------------------- 1 file changed, 48 insertions(+), 25 deletions(-) diff --git a/cgminer.c b/cgminer.c index 1cd9e09d..bc2c931f 100644 --- a/cgminer.c +++ b/cgminer.c @@ -3618,6 +3618,48 @@ static bool reuse_work(struct work *work) return false; } +static struct work *make_clone(struct work *work) +{ + struct work *work_clone = make_work(); + + memcpy(work_clone, work, sizeof(struct work)); + work_clone->clone = true; + work_clone->longpoll = false; + + return work_clone; +} + +/* Clones work by rolling it if possible, and returning a clone instead of the + * original work item which gets staged again to possibly be rolled again in + * the future */ +static struct work *clone_work(struct work *work) +{ + struct work *work_clone; + bool cloned = false; + int rolled = 0; + + work_clone = make_clone(work); + while (rolled++ < mining_threads && can_roll(work) && should_roll(work)) { + applog(LOG_DEBUG, "Pushing rolled converted work to stage thread"); + if (unlikely(!stage_work(work_clone))) { + cloned = false; + break; + } + roll_work(work); + work_clone = make_clone(work); + cloned = true; + } + + if (cloned) { + stage_work(work); + return work_clone; + } + + free_work(work_clone); + + return work; +} + static bool get_work(struct work *work, bool requested, struct thr_info *thr, const int thr_id) { @@ -3702,18 +3744,11 @@ retry: pool_resus(pool); } - memcpy(work, work_heap, sizeof(*work)); - - /* Hand out a clone if we can roll this work item */ - if (reuse_work(work_heap)) { - applog(LOG_DEBUG, "Pushing divided work to get queue head"); - - stage_work(work_heap); - work->clone = true; - } else { + work_heap = clone_work(work_heap); + memcpy(work, work_heap, sizeof(struct work)); + free_work(work_heap); + if (!work->clone) dec_queued(); - free_work(work_heap); - } ret = true; out: @@ -4039,8 +4074,7 @@ enum { /* Stage another work item from the work returned in a longpoll */ static void convert_to_work(json_t *val, int rolltime, struct pool *pool) { - struct work *work, *work_clone; - int rolled = 0; + struct work *work; bool rc; work = make_work(); @@ -4073,18 +4107,7 @@ static void convert_to_work(json_t *val, int rolltime, struct pool *pool) return; } - work_clone = make_work(); - memcpy(work_clone, work, sizeof(struct work)); - while (reuse_work(work) && rolled++ < mining_threads) { - work_clone->clone = true; - work_clone->longpoll = false; - applog(LOG_DEBUG, "Pushing rolled converted work to stage thread"); - if (unlikely(!stage_work(work_clone))) - break; - work_clone = make_work(); - memcpy(work_clone, work, sizeof(struct work)); - } - free_work(work_clone); + work = clone_work(work); applog(LOG_DEBUG, "Pushing converted work to stage thread"); From 0c970bbd1cb3277044df0c7966634c21fe8ec32f Mon Sep 17 00:00:00 2001 From: Con Kolivas Date: Sun, 24 Jun 2012 18:22:20 +1000 Subject: [PATCH 10/47] Roll work again after duplicating it to prevent duplicates on return to the clone function. --- cgminer.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/cgminer.c b/cgminer.c index bc2c931f..d9c09e1f 100644 --- a/cgminer.c +++ b/cgminer.c @@ -3647,6 +3647,9 @@ static struct work *clone_work(struct work *work) } roll_work(work); work_clone = make_clone(work); + /* Roll it again to prevent duplicates should this be used + * directly later on */ + roll_work(work); cloned = true; } From 359635a8cf2d960cbcf8ba1a9f14cc2c484f0e7f Mon Sep 17 00:00:00 2001 From: Con Kolivas Date: Sun, 24 Jun 2012 18:44:09 +1000 Subject: [PATCH 11/47] Only roll enough work to have one staged work for each mining thread. --- cgminer.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/cgminer.c b/cgminer.c index d9c09e1f..43fc2060 100644 --- a/cgminer.c +++ b/cgminer.c @@ -3636,10 +3636,9 @@ static struct work *clone_work(struct work *work) { struct work *work_clone; bool cloned = false; - int rolled = 0; work_clone = make_clone(work); - while (rolled++ < mining_threads && can_roll(work) && should_roll(work)) { + while (requests_staged() < mining_threads && can_roll(work) && should_roll(work)) { applog(LOG_DEBUG, "Pushing rolled converted work to stage thread"); if (unlikely(!stage_work(work_clone))) { cloned = false; From 411784a99df46c74e4fdc58e6894a3321bc2440f Mon Sep 17 00:00:00 2001 From: Con Kolivas Date: Sun, 24 Jun 2012 19:53:31 +1000 Subject: [PATCH 12/47] As work is sorted by age, we can discard the oldest work at regular intervals to keep only 1 of the newest work items per mining thread. --- cgminer.c | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/cgminer.c b/cgminer.c index 43fc2060..17b2db65 100644 --- a/cgminer.c +++ b/cgminer.c @@ -3625,6 +3625,9 @@ static struct work *make_clone(struct work *work) memcpy(work_clone, work, sizeof(struct work)); work_clone->clone = true; work_clone->longpoll = false; + /* Make cloned work appear slightly older to bias towards keeping the + * master work item which can be further rolled */ + work_clone->tv_staged.tv_sec -= 1; return work_clone; } @@ -4312,6 +4315,23 @@ static void *watchpool_thread(void __maybe_unused *userdata) return NULL; } +/* Work is sorted according to age, so discard the oldest work items, leaving + * only 1 staged work item per mining thread */ +static void age_work(void) +{ + int discarded = 0; + + while (requests_staged() > mining_threads) { + struct work *work = hash_pop(NULL); + + if (unlikely(!work)) + break; + discard_work(work); + discarded++; + } + if (discarded) + applog(LOG_DEBUG, "Aged %d work items", discarded); +} /* Makes sure the hashmeter keeps going even if mining threads stall, updates * the screen at regular intervals, and restarts threads if they appear to have @@ -4334,6 +4354,8 @@ static void *watchdog_thread(void __maybe_unused *userdata) if (requests_queued() < opt_queue) queue_request(NULL, false); + age_work(); + hashmeter(-1, &zero_tv, 0); #ifdef HAVE_CURSES From 9f811c528f6eefbca5f16c92181783f756e3a68f Mon Sep 17 00:00:00 2001 From: Con Kolivas Date: Sun, 24 Jun 2012 20:38:40 +1000 Subject: [PATCH 13/47] Simplify the total_queued count to those staged not cloned and remove the locking since it's no longer a critical value. Clone only anticipated difference sicne there will be a lag from the value returned by requests_staged(). Keep 1/3 buffer of extra work items when ageing them. --- cgminer.c | 54 +++++++++++++++++------------------------------------- 1 file changed, 17 insertions(+), 37 deletions(-) diff --git a/cgminer.c b/cgminer.c index 17b2db65..2d789d85 100644 --- a/cgminer.c +++ b/cgminer.c @@ -161,7 +161,6 @@ static int total_threads; struct work_restart *work_restart = NULL; static pthread_mutex_t hash_lock; -static pthread_mutex_t qd_lock; static pthread_mutex_t *stgd_lock; #ifdef HAVE_CURSES static pthread_mutex_t curses_lock; @@ -2364,11 +2363,6 @@ void switch_pools(struct pool *selected) if (pool != last_pool) applog(LOG_WARNING, "Switching to %s", pool->rpc_url); - /* Reset the queued amount to allow more to be queued for the new pool */ - mutex_lock(&qd_lock); - total_queued = 0; - mutex_unlock(&qd_lock); - mutex_lock(&lp_lock); pthread_cond_broadcast(&lp_cond); mutex_unlock(&lp_lock); @@ -2386,31 +2380,21 @@ static void discard_work(struct work *work) free_work(work); } -/* This is overkill, but at least we'll know accurately how much work is - * queued to prevent ever being left without work */ -static void inc_queued(void) +/* Done lockless since this is not a critical value */ +static inline void inc_queued(void) { - mutex_lock(&qd_lock); total_queued++; - mutex_unlock(&qd_lock); } -static void dec_queued(void) +static inline void dec_queued(void) { - mutex_lock(&qd_lock); - if (total_queued > 0) + if (likely(total_queued > 0)) total_queued--; - mutex_unlock(&qd_lock); } static int requests_queued(void) { - int ret; - - mutex_lock(&qd_lock); - ret = total_queued; - mutex_unlock(&qd_lock); - return ret; + return requests_staged() - staged_extras; } static int discard_stale(void) @@ -3509,20 +3493,12 @@ static void pool_resus(struct pool *pool) switch_pools(NULL); } -static long requested_tv_sec; - static bool queue_request(struct thr_info *thr, bool needed) { - int rq = requests_queued(); + int rs = requests_staged(), rq = requests_queued(); struct workio_cmd *wc; - struct timeval now; - - gettimeofday(&now, NULL); - /* Space out retrieval of extra work according to the number of mining - * threads */ - if (rq >= mining_threads + staged_extras && - (now.tv_sec - requested_tv_sec) < opt_scantime / (mining_threads + 1)) + if (rq >= mining_threads || (rq >= opt_queue && rs >= mining_threads)) return true; /* fill out work request message */ @@ -3553,7 +3529,6 @@ static bool queue_request(struct thr_info *thr, bool needed) return false; } - requested_tv_sec = now.tv_sec; inc_queued(); return true; } @@ -3637,11 +3612,17 @@ static struct work *make_clone(struct work *work) * the future */ static struct work *clone_work(struct work *work) { + int mrs = mining_threads - requests_staged(); struct work *work_clone; - bool cloned = false; + bool cloned; + + if (mrs < 1) + return work; + + cloned = false; work_clone = make_clone(work); - while (requests_staged() < mining_threads && can_roll(work) && should_roll(work)) { + while (mrs-- > 0 && can_roll(work) && should_roll(work)) { applog(LOG_DEBUG, "Pushing rolled converted work to stage thread"); if (unlikely(!stage_work(work_clone))) { cloned = false; @@ -3699,7 +3680,7 @@ retry: goto out; } - if (!pool->lagging && requested && !newreq && !requests_staged() && requests_queued() >= mining_threads) { + if (!pool->lagging && requested && !newreq && !requests_staged()) { struct cgpu_info *cgpu = thr->cgpu; bool stalled = true; int i; @@ -4321,7 +4302,7 @@ static void age_work(void) { int discarded = 0; - while (requests_staged() > mining_threads) { + while (requests_staged() > mining_threads * 4 / 3) { struct work *work = hash_pop(NULL); if (unlikely(!work)) @@ -4905,7 +4886,6 @@ int main(int argc, char *argv[]) #endif mutex_init(&hash_lock); - mutex_init(&qd_lock); #ifdef HAVE_CURSES mutex_init(&curses_lock); #endif From b5757d124b804689ec7d0536d261ab2b7f0bc375 Mon Sep 17 00:00:00 2001 From: Con Kolivas Date: Sun, 24 Jun 2012 20:45:47 +1000 Subject: [PATCH 14/47] Don't count getwork delay when determining if shares are stale. --- cgminer.c | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/cgminer.c b/cgminer.c index 2d789d85..2c94f067 100644 --- a/cgminer.c +++ b/cgminer.c @@ -2159,7 +2159,6 @@ static bool stale_work(struct work *work, bool share) struct timeval now; time_t work_expiry; struct pool *pool; - int getwork_delay; if (work->mandatory) return false; @@ -2173,10 +2172,13 @@ static bool stale_work(struct work *work, bool share) pool = work->pool; /* Factor in the average getwork delay of this pool, rounding it up to * the nearest second */ - getwork_delay = pool->cgminer_pool_stats.getwork_wait_rolling * 5 + 1; - work_expiry -= getwork_delay; - if (unlikely(work_expiry < 5)) - work_expiry = 5; + if (!share) { + int getwork_delay = pool->cgminer_pool_stats.getwork_wait_rolling * 5 + 1; + + work_expiry -= getwork_delay; + if (unlikely(work_expiry < 5)) + work_expiry = 5; + } gettimeofday(&now, NULL); if ((now.tv_sec - work->tv_staged.tv_sec) >= work_expiry) From ded16838db358d2947067d238dfe770ea823a944 Mon Sep 17 00:00:00 2001 From: Con Kolivas Date: Sun, 24 Jun 2012 20:48:02 +1000 Subject: [PATCH 15/47] Add the getwork delay time instead of subtracting it when determining if a share is stale. --- cgminer.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/cgminer.c b/cgminer.c index 2c94f067..67691eb4 100644 --- a/cgminer.c +++ b/cgminer.c @@ -2159,6 +2159,7 @@ static bool stale_work(struct work *work, bool share) struct timeval now; time_t work_expiry; struct pool *pool; + int getwork_delay; if (work->mandatory) return false; @@ -2172,13 +2173,13 @@ static bool stale_work(struct work *work, bool share) pool = work->pool; /* Factor in the average getwork delay of this pool, rounding it up to * the nearest second */ + getwork_delay = pool->cgminer_pool_stats.getwork_wait_rolling * 5 + 1; if (!share) { - int getwork_delay = pool->cgminer_pool_stats.getwork_wait_rolling * 5 + 1; - work_expiry -= getwork_delay; if (unlikely(work_expiry < 5)) work_expiry = 5; - } + } else + work_expiry += getwork_delay; gettimeofday(&now, NULL); if ((now.tv_sec - work->tv_staged.tv_sec) >= work_expiry) From b20089fdb70a52ec029375beecebfd47efaee218 Mon Sep 17 00:00:00 2001 From: Con Kolivas Date: Sun, 24 Jun 2012 20:59:55 +1000 Subject: [PATCH 16/47] Take into account total_queued as well when deciding whether to queue a fresh request or not. --- cgminer.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/cgminer.c b/cgminer.c index 67691eb4..0381b04c 100644 --- a/cgminer.c +++ b/cgminer.c @@ -3501,8 +3501,9 @@ static bool queue_request(struct thr_info *thr, bool needed) int rs = requests_staged(), rq = requests_queued(); struct workio_cmd *wc; - if (rq >= mining_threads || (rq >= opt_queue && rs >= mining_threads)) - return true; + if ((rq >= mining_threads || (rq >= opt_queue && rs >= mining_threads)) && + total_queued >= opt_queue) + return true; /* fill out work request message */ wc = calloc(1, sizeof(*wc)); @@ -4335,7 +4336,7 @@ static void *watchdog_thread(void __maybe_unused *userdata) struct timeval now; sleep(interval); - if (requests_queued() < opt_queue) + if (requests_queued() < opt_queue || total_queued < opt_queue) queue_request(NULL, false); age_work(); From 790acad9f9223e4d532d8d38e00737c79b8e40fb Mon Sep 17 00:00:00 2001 From: Con Kolivas Date: Sun, 24 Jun 2012 21:42:34 +1000 Subject: [PATCH 17/47] Further simplify the total_queued counting mechanism and do all dec_queued from the one location. --- cgminer.c | 48 +++++++++++++++++++----------------------------- 1 file changed, 19 insertions(+), 29 deletions(-) diff --git a/cgminer.c b/cgminer.c index 0381b04c..483331af 100644 --- a/cgminer.c +++ b/cgminer.c @@ -2371,18 +2371,6 @@ void switch_pools(struct pool *selected) mutex_unlock(&lp_lock); } -static void discard_work(struct work *work) -{ - if (!work->clone && !work->rolls && !work->mined) { - if (work->pool) - work->pool->discarded_work++; - total_discarded++; - applog(LOG_DEBUG, "Discarded work"); - } else - applog(LOG_DEBUG, "Discarded cloned or rolled work"); - free_work(work); -} - /* Done lockless since this is not a critical value */ static inline void inc_queued(void) { @@ -2395,15 +2383,25 @@ static inline void dec_queued(void) total_queued--; } -static int requests_queued(void) +static void discard_work(struct work *work) { - return requests_staged() - staged_extras; + if (!work->clone) + dec_queued(); + + if (!work->clone && !work->rolls && !work->mined) { + if (work->pool) + work->pool->discarded_work++; + total_discarded++; + applog(LOG_DEBUG, "Discarded work"); + } else + applog(LOG_DEBUG, "Discarded cloned or rolled work"); + free_work(work); } static int discard_stale(void) { struct work *work, *tmp; - int i, stale = 0; + int stale = 0; mutex_lock(stgd_lock); HASH_ITER(hh, staged_work, work, tmp) { @@ -2419,10 +2417,6 @@ static int discard_stale(void) applog(LOG_DEBUG, "Discarded %d stales that didn't match current hash", stale); - /* Dec queued outside the loop to not have recursive locks */ - for (i = 0; i < stale; i++) - dec_queued(); - return stale; } @@ -3498,11 +3492,11 @@ static void pool_resus(struct pool *pool) static bool queue_request(struct thr_info *thr, bool needed) { - int rs = requests_staged(), rq = requests_queued(); + int rs = requests_staged(); struct workio_cmd *wc; - if ((rq >= mining_threads || (rq >= opt_queue && rs >= mining_threads)) && - total_queued >= opt_queue) + if ((total_queued >= opt_queue && rs >= mining_threads) || + total_queued >= mining_threads) return true; /* fill out work request message */ @@ -3521,7 +3515,7 @@ static bool queue_request(struct thr_info *thr, bool needed) /* If we're queueing work faster than we can stage it, consider the * system lagging and allow work to be gathered from another pool if * possible */ - if (rq && needed && !requests_staged() && !opt_fail_only) + if (total_queued && needed && !rs && !opt_fail_only) wc->lagging = true; applog(LOG_DEBUG, "Queueing getwork request to work thread"); @@ -3671,7 +3665,7 @@ static bool get_work(struct work *work, bool requested, struct thr_info *thr, } retry: pool = current_pool(); - if (!requested || requests_queued() < opt_queue) { + if (!requested || total_queued < opt_queue) { if (unlikely(!queue_request(thr, true))) { applog(LOG_WARNING, "Failed to queue_request in get_work"); goto out; @@ -3721,7 +3715,6 @@ retry: } if (stale_work(work_heap, false)) { - dec_queued(); discard_work(work_heap); goto retry; } @@ -3737,8 +3730,6 @@ retry: work_heap = clone_work(work_heap); memcpy(work, work_heap, sizeof(struct work)); free_work(work_heap); - if (!work->clone) - dec_queued(); ret = true; out: @@ -4336,8 +4327,7 @@ static void *watchdog_thread(void __maybe_unused *userdata) struct timeval now; sleep(interval); - if (requests_queued() < opt_queue || total_queued < opt_queue) - queue_request(NULL, false); + queue_request(NULL, false); age_work(); From c8601722752bcc6d3db7efd0063f7f2d7f2f7d2a Mon Sep 17 00:00:00 2001 From: Con Kolivas Date: Sun, 24 Jun 2012 21:52:07 +1000 Subject: [PATCH 18/47] Make sure to have at least one staged work item when deciding whether to queue another request or not and dec queued in free work not discard work. --- cgminer.c | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/cgminer.c b/cgminer.c index 483331af..1e854376 100644 --- a/cgminer.c +++ b/cgminer.c @@ -1892,8 +1892,17 @@ static struct work *make_work(void) return work; } +static inline void dec_queued(void) +{ + if (likely(total_queued > 0)) + total_queued--; +} + static void free_work(struct work *work) { + if (!work->clone) + dec_queued(); + free(work); } @@ -2377,17 +2386,8 @@ static inline void inc_queued(void) total_queued++; } -static inline void dec_queued(void) -{ - if (likely(total_queued > 0)) - total_queued--; -} - static void discard_work(struct work *work) { - if (!work->clone) - dec_queued(); - if (!work->clone && !work->rolls && !work->mined) { if (work->pool) work->pool->discarded_work++; @@ -3496,7 +3496,7 @@ static bool queue_request(struct thr_info *thr, bool needed) struct workio_cmd *wc; if ((total_queued >= opt_queue && rs >= mining_threads) || - total_queued >= mining_threads) + (total_queued >= mining_threads && rs)) return true; /* fill out work request message */ From d2c1a6bd6bf4c18aea08228a8c461ec330a57c17 Mon Sep 17 00:00:00 2001 From: Con Kolivas Date: Sun, 24 Jun 2012 21:56:36 +1000 Subject: [PATCH 19/47] Revert "Make sure to have at least one staged work item when deciding whether to queue another request or not and dec queued in free work not discard work." This reverts commit c8601722752bcc6d3db7efd0063f7f2d7f2f7d2a. --- cgminer.c | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/cgminer.c b/cgminer.c index 1e854376..483331af 100644 --- a/cgminer.c +++ b/cgminer.c @@ -1892,17 +1892,8 @@ static struct work *make_work(void) return work; } -static inline void dec_queued(void) -{ - if (likely(total_queued > 0)) - total_queued--; -} - static void free_work(struct work *work) { - if (!work->clone) - dec_queued(); - free(work); } @@ -2386,8 +2377,17 @@ static inline void inc_queued(void) total_queued++; } +static inline void dec_queued(void) +{ + if (likely(total_queued > 0)) + total_queued--; +} + static void discard_work(struct work *work) { + if (!work->clone) + dec_queued(); + if (!work->clone && !work->rolls && !work->mined) { if (work->pool) work->pool->discarded_work++; @@ -3496,7 +3496,7 @@ static bool queue_request(struct thr_info *thr, bool needed) struct workio_cmd *wc; if ((total_queued >= opt_queue && rs >= mining_threads) || - (total_queued >= mining_threads && rs)) + total_queued >= mining_threads) return true; /* fill out work request message */ From 750474bcfb38c58206b870bf665ba888e1ef059b Mon Sep 17 00:00:00 2001 From: Con Kolivas Date: Sun, 24 Jun 2012 21:56:53 +1000 Subject: [PATCH 20/47] Revert "Further simplify the total_queued counting mechanism and do all dec_queued from the one location." This reverts commit 790acad9f9223e4d532d8d38e00737c79b8e40fb. --- cgminer.c | 48 +++++++++++++++++++++++++++++------------------- 1 file changed, 29 insertions(+), 19 deletions(-) diff --git a/cgminer.c b/cgminer.c index 483331af..0381b04c 100644 --- a/cgminer.c +++ b/cgminer.c @@ -2371,6 +2371,18 @@ void switch_pools(struct pool *selected) mutex_unlock(&lp_lock); } +static void discard_work(struct work *work) +{ + if (!work->clone && !work->rolls && !work->mined) { + if (work->pool) + work->pool->discarded_work++; + total_discarded++; + applog(LOG_DEBUG, "Discarded work"); + } else + applog(LOG_DEBUG, "Discarded cloned or rolled work"); + free_work(work); +} + /* Done lockless since this is not a critical value */ static inline void inc_queued(void) { @@ -2383,25 +2395,15 @@ static inline void dec_queued(void) total_queued--; } -static void discard_work(struct work *work) +static int requests_queued(void) { - if (!work->clone) - dec_queued(); - - if (!work->clone && !work->rolls && !work->mined) { - if (work->pool) - work->pool->discarded_work++; - total_discarded++; - applog(LOG_DEBUG, "Discarded work"); - } else - applog(LOG_DEBUG, "Discarded cloned or rolled work"); - free_work(work); + return requests_staged() - staged_extras; } static int discard_stale(void) { struct work *work, *tmp; - int stale = 0; + int i, stale = 0; mutex_lock(stgd_lock); HASH_ITER(hh, staged_work, work, tmp) { @@ -2417,6 +2419,10 @@ static int discard_stale(void) applog(LOG_DEBUG, "Discarded %d stales that didn't match current hash", stale); + /* Dec queued outside the loop to not have recursive locks */ + for (i = 0; i < stale; i++) + dec_queued(); + return stale; } @@ -3492,11 +3498,11 @@ static void pool_resus(struct pool *pool) static bool queue_request(struct thr_info *thr, bool needed) { - int rs = requests_staged(); + int rs = requests_staged(), rq = requests_queued(); struct workio_cmd *wc; - if ((total_queued >= opt_queue && rs >= mining_threads) || - total_queued >= mining_threads) + if ((rq >= mining_threads || (rq >= opt_queue && rs >= mining_threads)) && + total_queued >= opt_queue) return true; /* fill out work request message */ @@ -3515,7 +3521,7 @@ static bool queue_request(struct thr_info *thr, bool needed) /* If we're queueing work faster than we can stage it, consider the * system lagging and allow work to be gathered from another pool if * possible */ - if (total_queued && needed && !rs && !opt_fail_only) + if (rq && needed && !requests_staged() && !opt_fail_only) wc->lagging = true; applog(LOG_DEBUG, "Queueing getwork request to work thread"); @@ -3665,7 +3671,7 @@ static bool get_work(struct work *work, bool requested, struct thr_info *thr, } retry: pool = current_pool(); - if (!requested || total_queued < opt_queue) { + if (!requested || requests_queued() < opt_queue) { if (unlikely(!queue_request(thr, true))) { applog(LOG_WARNING, "Failed to queue_request in get_work"); goto out; @@ -3715,6 +3721,7 @@ retry: } if (stale_work(work_heap, false)) { + dec_queued(); discard_work(work_heap); goto retry; } @@ -3730,6 +3737,8 @@ retry: work_heap = clone_work(work_heap); memcpy(work, work_heap, sizeof(struct work)); free_work(work_heap); + if (!work->clone) + dec_queued(); ret = true; out: @@ -4327,7 +4336,8 @@ static void *watchdog_thread(void __maybe_unused *userdata) struct timeval now; sleep(interval); - queue_request(NULL, false); + if (requests_queued() < opt_queue || total_queued < opt_queue) + queue_request(NULL, false); age_work(); From a05c8e3fd9c2db76f2ef33d45713eb4393046977 Mon Sep 17 00:00:00 2001 From: Con Kolivas Date: Sun, 24 Jun 2012 21:57:18 +1000 Subject: [PATCH 21/47] Revert "Take into account total_queued as well when deciding whether to queue a fresh request or not." This reverts commit b20089fdb70a52ec029375beecebfd47efaee218. --- cgminer.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/cgminer.c b/cgminer.c index 0381b04c..67691eb4 100644 --- a/cgminer.c +++ b/cgminer.c @@ -3501,9 +3501,8 @@ static bool queue_request(struct thr_info *thr, bool needed) int rs = requests_staged(), rq = requests_queued(); struct workio_cmd *wc; - if ((rq >= mining_threads || (rq >= opt_queue && rs >= mining_threads)) && - total_queued >= opt_queue) - return true; + if (rq >= mining_threads || (rq >= opt_queue && rs >= mining_threads)) + return true; /* fill out work request message */ wc = calloc(1, sizeof(*wc)); @@ -4336,7 +4335,7 @@ static void *watchdog_thread(void __maybe_unused *userdata) struct timeval now; sleep(interval); - if (requests_queued() < opt_queue || total_queued < opt_queue) + if (requests_queued() < opt_queue) queue_request(NULL, false); age_work(); From 53269a97f30c363ef92a522fe1cb7d8f7bd61a9a Mon Sep 17 00:00:00 2001 From: Con Kolivas Date: Sun, 24 Jun 2012 21:57:49 +1000 Subject: [PATCH 22/47] Revert "Simplify the total_queued count to those staged not cloned and remove the locking since it's no longer a critical value." This reverts commit 9f811c528f6eefbca5f16c92181783f756e3a68f. --- cgminer.c | 54 +++++++++++++++++++++++++++++++++++++----------------- 1 file changed, 37 insertions(+), 17 deletions(-) diff --git a/cgminer.c b/cgminer.c index 67691eb4..7475364a 100644 --- a/cgminer.c +++ b/cgminer.c @@ -161,6 +161,7 @@ static int total_threads; struct work_restart *work_restart = NULL; static pthread_mutex_t hash_lock; +static pthread_mutex_t qd_lock; static pthread_mutex_t *stgd_lock; #ifdef HAVE_CURSES static pthread_mutex_t curses_lock; @@ -2366,6 +2367,11 @@ void switch_pools(struct pool *selected) if (pool != last_pool) applog(LOG_WARNING, "Switching to %s", pool->rpc_url); + /* Reset the queued amount to allow more to be queued for the new pool */ + mutex_lock(&qd_lock); + total_queued = 0; + mutex_unlock(&qd_lock); + mutex_lock(&lp_lock); pthread_cond_broadcast(&lp_cond); mutex_unlock(&lp_lock); @@ -2383,21 +2389,31 @@ static void discard_work(struct work *work) free_work(work); } -/* Done lockless since this is not a critical value */ -static inline void inc_queued(void) +/* This is overkill, but at least we'll know accurately how much work is + * queued to prevent ever being left without work */ +static void inc_queued(void) { + mutex_lock(&qd_lock); total_queued++; + mutex_unlock(&qd_lock); } -static inline void dec_queued(void) +static void dec_queued(void) { - if (likely(total_queued > 0)) + mutex_lock(&qd_lock); + if (total_queued > 0) total_queued--; + mutex_unlock(&qd_lock); } static int requests_queued(void) { - return requests_staged() - staged_extras; + int ret; + + mutex_lock(&qd_lock); + ret = total_queued; + mutex_unlock(&qd_lock); + return ret; } static int discard_stale(void) @@ -3496,12 +3512,20 @@ static void pool_resus(struct pool *pool) switch_pools(NULL); } +static long requested_tv_sec; + static bool queue_request(struct thr_info *thr, bool needed) { - int rs = requests_staged(), rq = requests_queued(); + int rq = requests_queued(); struct workio_cmd *wc; + struct timeval now; + + gettimeofday(&now, NULL); - if (rq >= mining_threads || (rq >= opt_queue && rs >= mining_threads)) + /* Space out retrieval of extra work according to the number of mining + * threads */ + if (rq >= mining_threads + staged_extras && + (now.tv_sec - requested_tv_sec) < opt_scantime / (mining_threads + 1)) return true; /* fill out work request message */ @@ -3532,6 +3556,7 @@ static bool queue_request(struct thr_info *thr, bool needed) return false; } + requested_tv_sec = now.tv_sec; inc_queued(); return true; } @@ -3615,17 +3640,11 @@ static struct work *make_clone(struct work *work) * the future */ static struct work *clone_work(struct work *work) { - int mrs = mining_threads - requests_staged(); struct work *work_clone; - bool cloned; - - if (mrs < 1) - return work; - - cloned = false; + bool cloned = false; work_clone = make_clone(work); - while (mrs-- > 0 && can_roll(work) && should_roll(work)) { + while (requests_staged() < mining_threads && can_roll(work) && should_roll(work)) { applog(LOG_DEBUG, "Pushing rolled converted work to stage thread"); if (unlikely(!stage_work(work_clone))) { cloned = false; @@ -3683,7 +3702,7 @@ retry: goto out; } - if (!pool->lagging && requested && !newreq && !requests_staged()) { + if (!pool->lagging && requested && !newreq && !requests_staged() && requests_queued() >= mining_threads) { struct cgpu_info *cgpu = thr->cgpu; bool stalled = true; int i; @@ -4305,7 +4324,7 @@ static void age_work(void) { int discarded = 0; - while (requests_staged() > mining_threads * 4 / 3) { + while (requests_staged() > mining_threads) { struct work *work = hash_pop(NULL); if (unlikely(!work)) @@ -4889,6 +4908,7 @@ int main(int argc, char *argv[]) #endif mutex_init(&hash_lock); + mutex_init(&qd_lock); #ifdef HAVE_CURSES mutex_init(&curses_lock); #endif From 7b57df1171d9e8f0b449d48c80cf52ee1ce74ae7 Mon Sep 17 00:00:00 2001 From: Con Kolivas Date: Sun, 24 Jun 2012 21:58:52 +1000 Subject: [PATCH 23/47] Allow 1/3 extra buffer of staged work when ageing it. --- cgminer.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cgminer.c b/cgminer.c index 7475364a..0b0f457c 100644 --- a/cgminer.c +++ b/cgminer.c @@ -4324,7 +4324,7 @@ static void age_work(void) { int discarded = 0; - while (requests_staged() > mining_threads) { + while (requests_staged() > mining_threads * 4 / 3) { struct work *work = hash_pop(NULL); if (unlikely(!work)) From 74cd6548a939c681502c61ec87677d6435a3c5df Mon Sep 17 00:00:00 2001 From: Con Kolivas Date: Sun, 24 Jun 2012 22:00:37 +1000 Subject: [PATCH 24/47] Use a static base measurement difference of how many items to clone since requests_staged may not climb while rolling. --- cgminer.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/cgminer.c b/cgminer.c index 0b0f457c..7d7af4d9 100644 --- a/cgminer.c +++ b/cgminer.c @@ -3640,11 +3640,16 @@ static struct work *make_clone(struct work *work) * the future */ static struct work *clone_work(struct work *work) { + int mrs = mining_threads - requests_staged(); struct work *work_clone; - bool cloned = false; + bool cloned; + if (mrs < 1) + return work; + + cloned = false; work_clone = make_clone(work); - while (requests_staged() < mining_threads && can_roll(work) && should_roll(work)) { + while (mrs-- > 0 && can_roll(work) && should_roll(work)) { applog(LOG_DEBUG, "Pushing rolled converted work to stage thread"); if (unlikely(!stage_work(work_clone))) { cloned = false; From ebaa615f6dee436197fb78822ec82b1be415f9ed Mon Sep 17 00:00:00 2001 From: Con Kolivas Date: Sun, 24 Jun 2012 22:16:04 +1000 Subject: [PATCH 25/47] Count extra cloned work in the total queued count. --- cgminer.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cgminer.c b/cgminer.c index 7d7af4d9..a749e2a5 100644 --- a/cgminer.c +++ b/cgminer.c @@ -3655,6 +3655,7 @@ static struct work *clone_work(struct work *work) cloned = false; break; } + inc_queued(); roll_work(work); work_clone = make_clone(work); /* Roll it again to prevent duplicates should this be used @@ -3760,8 +3761,7 @@ retry: work_heap = clone_work(work_heap); memcpy(work, work_heap, sizeof(struct work)); free_work(work_heap); - if (!work->clone) - dec_queued(); + dec_queued(); ret = true; out: From 1bbc860a157cb2dd65141f5aedc0bb4664ac5773 Mon Sep 17 00:00:00 2001 From: Con Kolivas Date: Sun, 24 Jun 2012 22:47:51 +1000 Subject: [PATCH 26/47] Don't count longpoll work as a staged extra work. --- cgminer.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cgminer.c b/cgminer.c index a749e2a5..5b686586 100644 --- a/cgminer.c +++ b/cgminer.c @@ -2575,7 +2575,7 @@ static bool hash_push(struct work *work) if (likely(!getq->frozen)) { HASH_ADD_INT(staged_work, id, work); HASH_SORT(staged_work, tv_sort); - if (work->clone || work->longpoll) + if (work->clone) ++staged_extras; } else rc = false; @@ -3573,7 +3573,7 @@ static struct work *hash_pop(const struct timespec *abstime) if (HASH_COUNT(staged_work)) { work = staged_work; HASH_DEL(staged_work, work); - if (work->clone || work->longpoll) + if (work->clone) --staged_extras; } mutex_unlock(stgd_lock); From efa9569b664f67a3736089ceb85dfd039597a90a Mon Sep 17 00:00:00 2001 From: Con Kolivas Date: Sun, 24 Jun 2012 22:59:56 +1000 Subject: [PATCH 27/47] Test we have enough work queued for pools with and without rolltime capability. --- cgminer.c | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/cgminer.c b/cgminer.c index 5b686586..aff264bd 100644 --- a/cgminer.c +++ b/cgminer.c @@ -3512,20 +3512,27 @@ static void pool_resus(struct pool *pool) switch_pools(NULL); } -static long requested_tv_sec; +static time_t requested_tv_sec; static bool queue_request(struct thr_info *thr, bool needed) { int rq = requests_queued(); struct workio_cmd *wc; struct timeval now; + time_t scan_post; + + /* Grab more work every 2/3 of the scan time to avoid all work expiring + * at the same time */ + scan_post = opt_scantime * 2 / 3; + if (scan_post < 5) + scan_post = 5; gettimeofday(&now, NULL); - /* Space out retrieval of extra work according to the number of mining - * threads */ - if (rq >= mining_threads + staged_extras && - (now.tv_sec - requested_tv_sec) < opt_scantime / (mining_threads + 1)) + /* Test to make sure we have enough work for pools without rolltime + * and enough original work for pools with rolltime */ + if (rq >= mining_threads && rq > staged_extras + opt_queue && + now.tv_sec - requested_tv_sec < scan_post) return true; /* fill out work request message */ @@ -4329,7 +4336,7 @@ static void age_work(void) { int discarded = 0; - while (requests_staged() > mining_threads * 4 / 3) { + while (requests_staged() > mining_threads * 4 / 3 + opt_queue) { struct work *work = hash_pop(NULL); if (unlikely(!work)) From 47f66405c07ff3040a58bee90018010c8c2ab583 Mon Sep 17 00:00:00 2001 From: Con Kolivas Date: Sun, 24 Jun 2012 23:10:02 +1000 Subject: [PATCH 28/47] Alternatively check staged work count for rolltime capable pools when deciding to queue requests. --- cgminer.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/cgminer.c b/cgminer.c index aff264bd..4f88f9fd 100644 --- a/cgminer.c +++ b/cgminer.c @@ -3516,7 +3516,7 @@ static time_t requested_tv_sec; static bool queue_request(struct thr_info *thr, bool needed) { - int rq = requests_queued(); + int rq = requests_queued(), rs = requests_staged(); struct workio_cmd *wc; struct timeval now; time_t scan_post; @@ -3531,7 +3531,8 @@ static bool queue_request(struct thr_info *thr, bool needed) /* Test to make sure we have enough work for pools without rolltime * and enough original work for pools with rolltime */ - if (rq >= mining_threads && rq > staged_extras + opt_queue && + if ((rq >= mining_threads || rs >= mining_threads) && + rq > staged_extras + opt_queue && now.tv_sec - requested_tv_sec < scan_post) return true; @@ -3551,7 +3552,7 @@ static bool queue_request(struct thr_info *thr, bool needed) /* If we're queueing work faster than we can stage it, consider the * system lagging and allow work to be gathered from another pool if * possible */ - if (rq && needed && !requests_staged() && !opt_fail_only) + if (rq && needed && !rs && !opt_fail_only) wc->lagging = true; applog(LOG_DEBUG, "Queueing getwork request to work thread"); From 5d90c50fc08644c9b0c3fb7d508b2bc84e9a4163 Mon Sep 17 00:00:00 2001 From: Con Kolivas Date: Sun, 24 Jun 2012 23:38:24 +1000 Subject: [PATCH 29/47] With better bounds on the amount of work cloned, there is no need to age work and ageing it was picking off master work items that could be further rolled. --- cgminer.c | 20 -------------------- 1 file changed, 20 deletions(-) diff --git a/cgminer.c b/cgminer.c index 4f88f9fd..92f9ad8d 100644 --- a/cgminer.c +++ b/cgminer.c @@ -4331,24 +4331,6 @@ static void *watchpool_thread(void __maybe_unused *userdata) return NULL; } -/* Work is sorted according to age, so discard the oldest work items, leaving - * only 1 staged work item per mining thread */ -static void age_work(void) -{ - int discarded = 0; - - while (requests_staged() > mining_threads * 4 / 3 + opt_queue) { - struct work *work = hash_pop(NULL); - - if (unlikely(!work)) - break; - discard_work(work); - discarded++; - } - if (discarded) - applog(LOG_DEBUG, "Aged %d work items", discarded); -} - /* Makes sure the hashmeter keeps going even if mining threads stall, updates * the screen at regular intervals, and restarts threads if they appear to have * died. */ @@ -4370,8 +4352,6 @@ static void *watchdog_thread(void __maybe_unused *userdata) if (requests_queued() < opt_queue) queue_request(NULL, false); - age_work(); - hashmeter(-1, &zero_tv, 0); #ifdef HAVE_CURSES From 32f52721230471d78fe8075fb40e00b44975afb5 Mon Sep 17 00:00:00 2001 From: Con Kolivas Date: Mon, 25 Jun 2012 00:03:37 +1000 Subject: [PATCH 30/47] Revert "With better bounds on the amount of work cloned, there is no need to age work and ageing it was picking off master work items that could be further rolled." This reverts commit 5d90c50fc08644c9b0c3fb7d508b2bc84e9a4163. --- cgminer.c | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/cgminer.c b/cgminer.c index 92f9ad8d..4f88f9fd 100644 --- a/cgminer.c +++ b/cgminer.c @@ -4331,6 +4331,24 @@ static void *watchpool_thread(void __maybe_unused *userdata) return NULL; } +/* Work is sorted according to age, so discard the oldest work items, leaving + * only 1 staged work item per mining thread */ +static void age_work(void) +{ + int discarded = 0; + + while (requests_staged() > mining_threads * 4 / 3 + opt_queue) { + struct work *work = hash_pop(NULL); + + if (unlikely(!work)) + break; + discard_work(work); + discarded++; + } + if (discarded) + applog(LOG_DEBUG, "Aged %d work items", discarded); +} + /* Makes sure the hashmeter keeps going even if mining threads stall, updates * the screen at regular intervals, and restarts threads if they appear to have * died. */ @@ -4352,6 +4370,8 @@ static void *watchdog_thread(void __maybe_unused *userdata) if (requests_queued() < opt_queue) queue_request(NULL, false); + age_work(); + hashmeter(-1, &zero_tv, 0); #ifdef HAVE_CURSES From 05bc638d9705401e5dfe17e614524c92c7f0ec4d Mon Sep 17 00:00:00 2001 From: Con Kolivas Date: Mon, 25 Jun 2012 00:08:50 +1000 Subject: [PATCH 31/47] Increase queued count before pushing message. --- cgminer.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/cgminer.c b/cgminer.c index 4f88f9fd..56aaeedf 100644 --- a/cgminer.c +++ b/cgminer.c @@ -3536,10 +3536,13 @@ static bool queue_request(struct thr_info *thr, bool needed) now.tv_sec - requested_tv_sec < scan_post) return true; + inc_queued(); + /* fill out work request message */ wc = calloc(1, sizeof(*wc)); if (unlikely(!wc)) { applog(LOG_ERR, "Failed to calloc wc in queue_request"); + dec_queued(); return false; } @@ -3561,11 +3564,11 @@ static bool queue_request(struct thr_info *thr, bool needed) if (unlikely(!tq_push(thr_info[work_thr_id].q, wc))) { applog(LOG_ERR, "Failed to tq_push in queue_request"); workio_cmd_free(wc); + dec_queued(); return false; } requested_tv_sec = now.tv_sec; - inc_queued(); return true; } From d93e5f710d69b44e2d23931ffc363c22dd362545 Mon Sep 17 00:00:00 2001 From: Con Kolivas Date: Mon, 25 Jun 2012 00:23:58 +1000 Subject: [PATCH 32/47] Count longpoll's contribution to the queue. --- cgminer.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/cgminer.c b/cgminer.c index 56aaeedf..e54916f6 100644 --- a/cgminer.c +++ b/cgminer.c @@ -4137,8 +4137,10 @@ static void convert_to_work(json_t *val, int rolltime, struct pool *pool) if (unlikely(!stage_work(work))) free_work(work); - else + else { + inc_queued(); applog(LOG_DEBUG, "Converted longpoll data to work"); + } } /* If we want longpoll, enable it for the chosen default pool, or, if From 49dd8fb548f56cbd15859b592d074fa2a3c9f33e Mon Sep 17 00:00:00 2001 From: Con Kolivas Date: Mon, 25 Jun 2012 00:25:38 +1000 Subject: [PATCH 33/47] Don't decrement staged extras count from longpoll work. --- cgminer.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cgminer.c b/cgminer.c index e54916f6..45a9303a 100644 --- a/cgminer.c +++ b/cgminer.c @@ -2425,7 +2425,7 @@ static int discard_stale(void) HASH_ITER(hh, staged_work, work, tmp) { if (stale_work(work, false)) { HASH_DEL(staged_work, work); - if (work->clone || work->longpoll) + if (work->clone) --staged_extras; discard_work(work); stale++; From 757922e4ce88af4665ad8819a8e470941512d811 Mon Sep 17 00:00:00 2001 From: Con Kolivas Date: Mon, 25 Jun 2012 00:33:47 +1000 Subject: [PATCH 34/47] Use the work clone flag to determine if we should subtract it from the total queued variable and provide a subtract queued function to prevent looping over locked code. --- cgminer.c | 28 +++++++++++++++++++--------- 1 file changed, 19 insertions(+), 9 deletions(-) diff --git a/cgminer.c b/cgminer.c index 45a9303a..1d68349a 100644 --- a/cgminer.c +++ b/cgminer.c @@ -2398,8 +2398,11 @@ static void inc_queued(void) mutex_unlock(&qd_lock); } -static void dec_queued(void) +static void dec_queued(struct work *work) { + if (work->clone) + return; + mutex_lock(&qd_lock); if (total_queued > 0) total_queued--; @@ -2416,10 +2419,19 @@ static int requests_queued(void) return ret; } +static void subtract_queued(int work_units) +{ + mutex_lock(&qd_lock); + total_queued -= work_units; + if (total_queued < 0) + total_queued = 0; + mutex_unlock(&qd_lock); +} + static int discard_stale(void) { struct work *work, *tmp; - int i, stale = 0; + int stale = 0, nonclone = 0; mutex_lock(stgd_lock); HASH_ITER(hh, staged_work, work, tmp) { @@ -2427,6 +2439,8 @@ static int discard_stale(void) HASH_DEL(staged_work, work); if (work->clone) --staged_extras; + else + nonclone++; discard_work(work); stale++; } @@ -2436,8 +2450,7 @@ static int discard_stale(void) applog(LOG_DEBUG, "Discarded %d stales that didn't match current hash", stale); /* Dec queued outside the loop to not have recursive locks */ - for (i = 0; i < stale; i++) - dec_queued(); + subtract_queued(nonclone); return stale; } @@ -3542,7 +3555,6 @@ static bool queue_request(struct thr_info *thr, bool needed) wc = calloc(1, sizeof(*wc)); if (unlikely(!wc)) { applog(LOG_ERR, "Failed to calloc wc in queue_request"); - dec_queued(); return false; } @@ -3564,7 +3576,6 @@ static bool queue_request(struct thr_info *thr, bool needed) if (unlikely(!tq_push(thr_info[work_thr_id].q, wc))) { applog(LOG_ERR, "Failed to tq_push in queue_request"); workio_cmd_free(wc); - dec_queued(); return false; } @@ -3666,7 +3677,6 @@ static struct work *clone_work(struct work *work) cloned = false; break; } - inc_queued(); roll_work(work); work_clone = make_clone(work); /* Roll it again to prevent duplicates should this be used @@ -3756,7 +3766,7 @@ retry: } if (stale_work(work_heap, false)) { - dec_queued(); + dec_queued(work_heap); discard_work(work_heap); goto retry; } @@ -3771,8 +3781,8 @@ retry: work_heap = clone_work(work_heap); memcpy(work, work_heap, sizeof(struct work)); + dec_queued(work_heap); free_work(work_heap); - dec_queued(); ret = true; out: From 63dd598e2a55d7755d4e8daf1ec1d24007b099fe Mon Sep 17 00:00:00 2001 From: Con Kolivas Date: Mon, 25 Jun 2012 00:42:51 +1000 Subject: [PATCH 35/47] Queue multiple requests at once when levels are low. --- cgminer.c | 61 +++++++++++++++++++++++++++++++------------------------ 1 file changed, 35 insertions(+), 26 deletions(-) diff --git a/cgminer.c b/cgminer.c index 1d68349a..a3d22b05 100644 --- a/cgminer.c +++ b/cgminer.c @@ -3529,7 +3529,7 @@ static time_t requested_tv_sec; static bool queue_request(struct thr_info *thr, bool needed) { - int rq = requests_queued(), rs = requests_staged(); + int toq, rq = requests_queued(), rs = requests_staged(); struct workio_cmd *wc; struct timeval now; time_t scan_post; @@ -3549,37 +3549,46 @@ static bool queue_request(struct thr_info *thr, bool needed) now.tv_sec - requested_tv_sec < scan_post) return true; - inc_queued(); - - /* fill out work request message */ - wc = calloc(1, sizeof(*wc)); - if (unlikely(!wc)) { - applog(LOG_ERR, "Failed to calloc wc in queue_request"); - return false; - } + requested_tv_sec = now.tv_sec; - wc->cmd = WC_GET_WORK; - if (thr) - wc->thr = thr; + if (rq > rs) + toq = rq - mining_threads; else - wc->thr = NULL; + toq = rs - mining_threads; - /* If we're queueing work faster than we can stage it, consider the - * system lagging and allow work to be gathered from another pool if - * possible */ - if (rq && needed && !rs && !opt_fail_only) - wc->lagging = true; + do { + inc_queued(); - applog(LOG_DEBUG, "Queueing getwork request to work thread"); + /* fill out work request message */ + wc = calloc(1, sizeof(*wc)); + if (unlikely(!wc)) { + applog(LOG_ERR, "Failed to calloc wc in queue_request"); + return false; + } - /* send work request to workio thread */ - if (unlikely(!tq_push(thr_info[work_thr_id].q, wc))) { - applog(LOG_ERR, "Failed to tq_push in queue_request"); - workio_cmd_free(wc); - return false; - } + wc->cmd = WC_GET_WORK; + if (thr) + wc->thr = thr; + else + wc->thr = NULL; + + /* If we're queueing work faster than we can stage it, consider the + * system lagging and allow work to be gathered from another pool if + * possible */ + if (rq && needed && !rs && !opt_fail_only) + wc->lagging = true; + + applog(LOG_DEBUG, "Queueing getwork request to work thread"); + + /* send work request to workio thread */ + if (unlikely(!tq_push(thr_info[work_thr_id].q, wc))) { + applog(LOG_ERR, "Failed to tq_push in queue_request"); + workio_cmd_free(wc); + return false; + } + + } while (--toq > 0); - requested_tv_sec = now.tv_sec; return true; } From 42ea29ca4eb0bda94e18460ed00295a17c8dcc8b Mon Sep 17 00:00:00 2001 From: Con Kolivas Date: Mon, 25 Jun 2012 00:58:18 +1000 Subject: [PATCH 36/47] Use a queueing bool set under control_lock to prevent multiple calls to queue_request racing. --- cgminer.c | 60 ++++++++++++++++++++++++++++++++++++++++--------------- 1 file changed, 44 insertions(+), 16 deletions(-) diff --git a/cgminer.c b/cgminer.c index a3d22b05..a186ff14 100644 --- a/cgminer.c +++ b/cgminer.c @@ -2428,7 +2428,7 @@ static void subtract_queued(int work_units) mutex_unlock(&qd_lock); } -static int discard_stale(void) +static void discard_stale(void) { struct work *work, *tmp; int stale = 0, nonclone = 0; @@ -2451,21 +2451,18 @@ static int discard_stale(void) /* Dec queued outside the loop to not have recursive locks */ subtract_queued(nonclone); - - return stale; } static bool queue_request(struct thr_info *thr, bool needed); static void restart_threads(void) { - int i, stale; + int i; /* Discard staged work that is now stale */ - stale = discard_stale(); + discard_stale(); - for (i = 0; i < stale; i++) - queue_request(NULL, true); + queue_request(NULL, true); for (i = 0; i < mining_threads; i++) work_restart[i].restart = 1; @@ -3527,12 +3524,41 @@ static void pool_resus(struct pool *pool) static time_t requested_tv_sec; +static bool control_tset(bool *var) +{ + bool ret; + + mutex_lock(&control_lock); + ret = *var; + *var = true; + mutex_unlock(&control_lock); + + return ret; +} + +static void control_tclear(bool *var) +{ + mutex_lock(&control_lock); + *var = false; + mutex_unlock(&control_lock); +} + +static bool queueing; + static bool queue_request(struct thr_info *thr, bool needed) { - int toq, rq = requests_queued(), rs = requests_staged(); struct workio_cmd *wc; struct timeval now; time_t scan_post; + int toq, rq, rs; + bool ret = true; + + /* Prevent multiple requests being executed at once */ + if (control_tset(&queueing)) + return ret; + + rq = requests_queued(); + rs = requests_staged(); /* Grab more work every 2/3 of the scan time to avoid all work expiring * at the same time */ @@ -3547,7 +3573,7 @@ static bool queue_request(struct thr_info *thr, bool needed) if ((rq >= mining_threads || rs >= mining_threads) && rq > staged_extras + opt_queue && now.tv_sec - requested_tv_sec < scan_post) - return true; + goto out; requested_tv_sec = now.tv_sec; @@ -3563,14 +3589,12 @@ static bool queue_request(struct thr_info *thr, bool needed) wc = calloc(1, sizeof(*wc)); if (unlikely(!wc)) { applog(LOG_ERR, "Failed to calloc wc in queue_request"); - return false; + ret = false; + break; } wc->cmd = WC_GET_WORK; - if (thr) - wc->thr = thr; - else - wc->thr = NULL; + wc->thr = thr; /* If we're queueing work faster than we can stage it, consider the * system lagging and allow work to be gathered from another pool if @@ -3584,12 +3608,16 @@ static bool queue_request(struct thr_info *thr, bool needed) if (unlikely(!tq_push(thr_info[work_thr_id].q, wc))) { applog(LOG_ERR, "Failed to tq_push in queue_request"); workio_cmd_free(wc); - return false; + ret = false; + break; } } while (--toq > 0); - return true; +out: + control_tclear(&queueing); + + return ret; } static struct work *hash_pop(const struct timespec *abstime) From 17ba2dca63fdef062c63b8532b9ec7fdedd57caa Mon Sep 17 00:00:00 2001 From: Con Kolivas Date: Mon, 25 Jun 2012 10:51:45 +1000 Subject: [PATCH 37/47] Logic fail on queueing multiple requests at once. Just queue one at a time. --- cgminer.c | 55 +++++++++++++++++++++++-------------------------------- 1 file changed, 23 insertions(+), 32 deletions(-) diff --git a/cgminer.c b/cgminer.c index a186ff14..a6232a46 100644 --- a/cgminer.c +++ b/cgminer.c @@ -3550,7 +3550,7 @@ static bool queue_request(struct thr_info *thr, bool needed) struct workio_cmd *wc; struct timeval now; time_t scan_post; - int toq, rq, rs; + int rq, rs; bool ret = true; /* Prevent multiple requests being executed at once */ @@ -3577,42 +3577,33 @@ static bool queue_request(struct thr_info *thr, bool needed) requested_tv_sec = now.tv_sec; - if (rq > rs) - toq = rq - mining_threads; - else - toq = rs - mining_threads; + inc_queued(); - do { - inc_queued(); - - /* fill out work request message */ - wc = calloc(1, sizeof(*wc)); - if (unlikely(!wc)) { - applog(LOG_ERR, "Failed to calloc wc in queue_request"); - ret = false; - break; - } - - wc->cmd = WC_GET_WORK; - wc->thr = thr; + /* fill out work request message */ + wc = calloc(1, sizeof(*wc)); + if (unlikely(!wc)) { + applog(LOG_ERR, "Failed to calloc wc in queue_request"); + ret = false; + goto out; + } - /* If we're queueing work faster than we can stage it, consider the - * system lagging and allow work to be gathered from another pool if - * possible */ - if (rq && needed && !rs && !opt_fail_only) - wc->lagging = true; + wc->cmd = WC_GET_WORK; + wc->thr = thr; - applog(LOG_DEBUG, "Queueing getwork request to work thread"); + /* If we're queueing work faster than we can stage it, consider the + * system lagging and allow work to be gathered from another pool if + * possible */ + if (rq && needed && !rs && !opt_fail_only) + wc->lagging = true; - /* send work request to workio thread */ - if (unlikely(!tq_push(thr_info[work_thr_id].q, wc))) { - applog(LOG_ERR, "Failed to tq_push in queue_request"); - workio_cmd_free(wc); - ret = false; - break; - } + applog(LOG_DEBUG, "Queueing getwork request to work thread"); - } while (--toq > 0); + /* send work request to workio thread */ + if (unlikely(!tq_push(thr_info[work_thr_id].q, wc))) { + applog(LOG_ERR, "Failed to tq_push in queue_request"); + workio_cmd_free(wc); + ret = false; + } out: control_tclear(&queueing); From eaf1505381ff72f02a49d268a35656d9db88d2e2 Mon Sep 17 00:00:00 2001 From: Con Kolivas Date: Mon, 25 Jun 2012 13:31:52 +1000 Subject: [PATCH 38/47] Dynamic intensity for GPUs should be calculated on a per device basis. Clean up the code to only calculate it if required as well. --- driver-opencl.c | 26 ++++++++++++-------------- miner.h | 4 ++++ 2 files changed, 16 insertions(+), 14 deletions(-) diff --git a/driver-opencl.c b/driver-opencl.c index 9dbaef1a..2d6f9759 100644 --- a/driver-opencl.c +++ b/driver-opencl.c @@ -1350,34 +1350,32 @@ static uint64_t opencl_scanhash(struct thr_info *thr, struct work *work, _clState *clState = clStates[thr_id]; const cl_kernel *kernel = &clState->kernel; - double gpu_ms_average = 7; cl_int status; - size_t globalThreads[1]; size_t localThreads[1] = { clState->wsize }; unsigned int threads; unsigned int hashes; - - struct timeval tv_gpustart, tv_gpuend, diff; - suseconds_t gpu_us; - - gettimeofday(&tv_gpustart, NULL); - timeval_subtract(&diff, &tv_gpustart, &tv_gpuend); + gettimeofday(&gpu->tv_gpustart, NULL); /* This finish flushes the readbuffer set with CL_FALSE later */ clFinish(clState->commandQueue); - gettimeofday(&tv_gpuend, NULL); - timeval_subtract(&diff, &tv_gpuend, &tv_gpustart); - gpu_us = diff.tv_sec * 1000000 + diff.tv_usec; - decay_time(&gpu_ms_average, gpu_us / 1000); + gettimeofday(&gpu->tv_gpuend, NULL); + if (gpu->dynamic) { + struct timeval diff; + suseconds_t gpu_ms; + + timersub(&gpu->tv_gpuend, &gpu->tv_gpustart, &diff); + gpu_ms = diff.tv_sec * 1000 + diff.tv_usec / 1000; + gpu->gpu_ms_average = (gpu->gpu_ms_average + gpu_ms * 0.63) / 1.63; + /* Try to not let the GPU be out for longer than 6ms, but * increase intensity when the system is idle, unless * dynamic is disabled. */ - if (gpu_ms_average > opt_dynamic_interval) { + if (gpu->gpu_ms_average > opt_dynamic_interval) { if (gpu->intensity > MIN_INTENSITY) --gpu->intensity; - } else if (gpu_ms_average < ((opt_dynamic_interval / 2) ? : 1)) { + } else if (gpu->gpu_ms_average < ((opt_dynamic_interval / 2) ? : 1)) { if (gpu->intensity < MAX_INTENSITY) ++gpu->intensity; } diff --git a/miner.h b/miner.h index 9eb1b6d8..78d3b80f 100644 --- a/miner.h +++ b/miner.h @@ -342,6 +342,10 @@ struct cgpu_info { cl_uint vwidth; size_t work_size; enum cl_kernels kernel; + + struct timeval tv_gpustart;; + struct timeval tv_gpuend; + double gpu_ms_average; #endif float temp; From 5cf4b7c4324364f5c66c15a0802acb26ae1a2679 Mon Sep 17 00:00:00 2001 From: Con Kolivas Date: Mon, 25 Jun 2012 16:59:29 +1000 Subject: [PATCH 39/47] Make the devices array a dynamically allocated array of pointers to allow unlimited devices. --- cgminer.c | 5 ++--- driver-cpu.c | 2 -- driver-icarus.c | 3 ++- driver-opencl.c | 3 --- driver-ztex.c | 2 -- fpgautils.c | 21 ++------------------- miner.h | 3 +-- 7 files changed, 7 insertions(+), 32 deletions(-) diff --git a/cgminer.c b/cgminer.c index a6232a46..3dc506b5 100644 --- a/cgminer.c +++ b/cgminer.c @@ -116,7 +116,7 @@ struct list_head scan_devices; static signed int devices_enabled; static bool opt_removedisabled; int total_devices; -struct cgpu_info *devices[MAX_DEVICES]; +struct cgpu_info **devices; bool have_opencl; int opt_n_threads = -1; int mining_threads; @@ -4936,6 +4936,7 @@ bool add_cgpu(struct cgpu_info*cgpu) cgpu->device_id = d->lastid = 0; HASH_ADD_STR(devids, name, d); } + devices = realloc(devices, sizeof(struct cgpu_info *) * (total_devices + 2)); devices[total_devices++] = cgpu; return true; } @@ -5025,8 +5026,6 @@ int main(int argc, char *argv[]) gpus[i].dynamic = true; #endif - memset(devices, 0, sizeof(devices)); - /* parse command line */ opt_register_table(opt_config_table, "Options for both config file and command line"); diff --git a/driver-cpu.c b/driver-cpu.c index d0a25160..1f8ac892 100644 --- a/driver-cpu.c +++ b/driver-cpu.c @@ -731,8 +731,6 @@ static void cpu_detect() if (num_processors < 1) return; - if (total_devices + opt_n_threads > MAX_DEVICES) - opt_n_threads = MAX_DEVICES - total_devices; cpus = calloc(opt_n_threads, sizeof(struct cgpu_info)); if (unlikely(!cpus)) quit(1, "Failed to calloc cpus"); diff --git a/driver-icarus.c b/driver-icarus.c index a463c281..e27a1621 100644 --- a/driver-icarus.c +++ b/driver-icarus.c @@ -179,7 +179,7 @@ struct ICARUS_INFO { }; // One for each possible device -static struct ICARUS_INFO *icarus_info[MAX_DEVICES]; +static struct ICARUS_INFO **icarus_info; struct device_api icarus_api; @@ -421,6 +421,7 @@ static bool icarus_detect_one(const char *devpath) icarus->device_path = strdup(devpath); icarus->threads = 1; add_cgpu(icarus); + icarus_info = realloc(icarus_info, sizeof(struct ICARUS_INFO *) * (total_devices + 2)); applog(LOG_INFO, "Found Icarus at %s, mark as %d", devpath, icarus->device_id); diff --git a/driver-opencl.c b/driver-opencl.c index 2d6f9759..17be40ae 100644 --- a/driver-opencl.c +++ b/driver-opencl.c @@ -1126,9 +1126,6 @@ static void opencl_detect() nDevs = 0; } - if (MAX_DEVICES - total_devices < nDevs) - nDevs = MAX_DEVICES - total_devices; - if (!nDevs) return; diff --git a/driver-ztex.c b/driver-ztex.c index c881cd7d..e38be748 100644 --- a/driver-ztex.c +++ b/driver-ztex.c @@ -66,8 +66,6 @@ static void ztex_detect(void) applog(LOG_WARNING, "Found %d ztex board(s)", cnt); for (i = 0; i < cnt; i++) { - if (total_devices == MAX_DEVICES) - break; ztex = calloc(1, sizeof(struct cgpu_info)); ztex->api = &ztex_api; ztex->device_ztex = ztex_devices[i]->dev; diff --git a/fpgautils.c b/fpgautils.c index 70387c69..14c1c79d 100644 --- a/fpgautils.c +++ b/fpgautils.c @@ -40,9 +40,6 @@ char serial_autodetect_udev(detectone_func_t detectone, const char*prodname) { - if (total_devices == MAX_DEVICES) - return 0; - struct udev *udev = udev_new(); struct udev_enumerate *enumerate = udev_enumerate_new(udev); struct udev_list_entry *list_entry; @@ -64,9 +61,6 @@ serial_autodetect_udev(detectone_func_t detectone, const char*prodname) ++found; udev_device_unref(device); - - if (total_devices == MAX_DEVICES) - break; } udev_enumerate_unref(enumerate); udev_unref(udev); @@ -85,9 +79,6 @@ char serial_autodetect_devserial(detectone_func_t detectone, const char*prodname) { #ifndef WIN32 - if (total_devices == MAX_DEVICES) - return 0; - DIR *D; struct dirent *de; const char udevdir[] = "/dev/serial/by-id"; @@ -104,11 +95,8 @@ serial_autodetect_devserial(detectone_func_t detectone, const char*prodname) if (!strstr(de->d_name, prodname)) continue; strcpy(devfile, de->d_name); - if (detectone(devpath)) { + if (detectone(devpath)) ++found; - if (total_devices == MAX_DEVICES) - break; - } } closedir(D); @@ -121,9 +109,6 @@ serial_autodetect_devserial(detectone_func_t detectone, const char*prodname) char _serial_detect(const char*dname, detectone_func_t detectone, autoscan_func_t autoscan, bool forceauto) { - if (total_devices == MAX_DEVICES) - return 0; - struct string_elist *iter, *tmp; const char*s, *p; bool inhibitauto = false; @@ -148,12 +133,10 @@ _serial_detect(const char*dname, detectone_func_t detectone, autoscan_func_t aut string_elist_del(iter); inhibitauto = true; ++found; - if (total_devices == MAX_DEVICES) - break; } } - if ((forceauto || !inhibitauto) && autoscan && total_devices < MAX_DEVICES) + if ((forceauto || !inhibitauto) && autoscan) found += autoscan(); return found; diff --git a/miner.h b/miner.h index 78d3b80f..66d75717 100644 --- a/miner.h +++ b/miner.h @@ -585,7 +585,6 @@ extern int add_pool_details(bool live, char *url, char *user, char *pass); #define ADD_POOL_OK 0 #define MAX_GPUDEVICES 16 -#define MAX_DEVICES 64 #define MAX_POOLS (32) #define MIN_INTENSITY -10 @@ -607,7 +606,7 @@ extern double total_secs; extern int mining_threads; extern struct cgpu_info *cpus; extern int total_devices; -extern struct cgpu_info *devices[]; +extern struct cgpu_info **devices; extern int total_pools; extern struct pool *pools[MAX_POOLS]; extern const char *algo_names[]; From c027492fa4b7e11b1b23f81b044149bf0fe56f59 Mon Sep 17 00:00:00 2001 From: Con Kolivas Date: Mon, 25 Jun 2012 17:06:26 +1000 Subject: [PATCH 40/47] Make the pools array a dynamically allocated array to allow unlimited pools to be added. --- api.c | 5 +---- cgminer.c | 17 +++++------------ miner.h | 8 ++------ 3 files changed, 8 insertions(+), 22 deletions(-) diff --git a/api.c b/api.c index 42a38c77..ff26e54f 100644 --- a/api.c +++ b/api.c @@ -1636,10 +1636,7 @@ static void addpool(__maybe_unused SOCKETTYPE c, char *param, bool isjson) return; } - if (add_pool_details(true, url, user, pass) == ADD_POOL_MAXIMUM) { - strcpy(io_buffer, message(MSG_TOOMANYP, MAX_POOLS, NULL, isjson)); - return; - } + add_pool_details(true, url, user, pass); ptr = escape_string(url, isjson); strcpy(io_buffer, message(MSG_ADDPOOL, 0, ptr, isjson)); diff --git a/cgminer.c b/cgminer.c index 3dc506b5..774c5f68 100644 --- a/cgminer.c +++ b/cgminer.c @@ -190,7 +190,7 @@ unsigned int found_blocks; unsigned int local_work; unsigned int total_go, total_ro; -struct pool *pools[MAX_POOLS]; +struct pool **pools; static struct pool *currentpool = NULL; int total_pools; @@ -395,6 +395,7 @@ static struct pool *add_pool(void) if (!pool) quit(1, "Failed to malloc pool in add_pool"); pool->pool_no = pool->prio = total_pools; + pools = realloc(pools, sizeof(struct pool *) * (total_pools + 2)); pools[total_pools++] = pool; if (unlikely(pthread_mutex_init(&pool->pool_lock, NULL))) quit(1, "Failed to pthread_mutex_init in add_pool"); @@ -4702,13 +4703,10 @@ char *curses_input(const char *query) } #endif -int add_pool_details(bool live, char *url, char *user, char *pass) +void add_pool_details(bool live, char *url, char *user, char *pass) { struct pool *pool; - if (total_pools == MAX_POOLS) - return ADD_POOL_MAXIMUM; - pool = add_pool(); pool->rpc_url = url; @@ -4724,8 +4722,6 @@ int add_pool_details(bool live, char *url, char *user, char *pass) pool->enabled = POOL_ENABLED; if (live && !pool_active(pool, false)) pool->idle = true; - - return ADD_POOL_OK; } #ifdef HAVE_CURSES @@ -4735,10 +4731,6 @@ static bool input_pool(bool live) bool ret = false; immedok(logwin, true); - if (total_pools == MAX_POOLS) { - wlogprint("Reached maximum number of pools.\n"); - goto out; - } wlogprint("Input server details.\n"); url = curses_input("URL"); @@ -4766,7 +4758,8 @@ static bool input_pool(bool live) if (!pass) goto out; - ret = (add_pool_details(live, url, user, pass) == ADD_POOL_OK); + add_pool_details(live, url, user, pass); + ret = true; out: immedok(logwin, false); diff --git a/miner.h b/miner.h index 66d75717..024bce68 100644 --- a/miner.h +++ b/miner.h @@ -579,13 +579,9 @@ extern void api(int thr_id); extern struct pool *current_pool(void); extern int active_pools(void); -extern int add_pool_details(bool live, char *url, char *user, char *pass); - -#define ADD_POOL_MAXIMUM 1 -#define ADD_POOL_OK 0 +extern void add_pool_details(bool live, char *url, char *user, char *pass); #define MAX_GPUDEVICES 16 -#define MAX_POOLS (32) #define MIN_INTENSITY -10 #define _MIN_INTENSITY_STR "-10" @@ -608,7 +604,7 @@ extern struct cgpu_info *cpus; extern int total_devices; extern struct cgpu_info **devices; extern int total_pools; -extern struct pool *pools[MAX_POOLS]; +extern struct pool **pools; extern const char *algo_names[]; extern enum sha256_algos opt_algo; extern struct strategies strategies[]; From cffd5aee30315db5e0b47b872370a97c04b75a00 Mon Sep 17 00:00:00 2001 From: Kano Date: Mon, 25 Jun 2012 18:01:49 +1000 Subject: [PATCH 41/47] miner.php make fontname/size configurable with myminer.php --- miner.php | 29 ++++++++++++++++++----------- 1 file changed, 18 insertions(+), 11 deletions(-) diff --git a/miner.php b/miner.php index 728a4ebc..cdcfbac0 100644 --- a/miner.php +++ b/miner.php @@ -60,6 +60,11 @@ $warnfont = ''; $warnoff = ''; $dfmt = 'H:i:s j-M-Y \U\T\CP'; # +global $miner_font_family, $miner_font_size; +# +$miner_font_family = 'verdana,arial,sans'; +$miner_font_size = '13pt'; +# # This below allows you to put your own settings into a seperate file # so you don't need to update miner.php with your preferred settings # every time a new version is released @@ -78,6 +83,7 @@ $rigerror = array(); # function htmlhead($checkapi) { + global $miner_font_family, $miner_font_size; global $error, $readonly, $here; if ($readonly === false && $checkapi === true) { @@ -87,21 +93,22 @@ function htmlhead($checkapi) || $access['STATUS']['STATUS'] != 'S') $readonly = true; } -?> -Mine + $miner_font = "font-family:$miner_font_family; font-size:$miner_font_size;"; + + echo "Mine @@ -785,7 +810,7 @@ function doforeach($cmd, $des, $sum, $head, $datetime) foreach ($dthead as $name => $x) { if ($item == 'STATUS' && $name == '') - echo ""; + echo ""; else { if (isset($row[$name])) @@ -868,7 +893,7 @@ function doforeach($cmd, $des, $sum, $head, $datetime) if ($rig === 'total') echo "Total:"; else - echo ""; + echo ""; } else { @@ -891,17 +916,30 @@ function doforeach($cmd, $des, $sum, $head, $datetime) } } # +function refreshbuttons() +{ + global $readonly; + global $ignorerefresh, $changerefresh, $autorefresh; + + if ($ignorerefresh == false && $changerefresh == true) + { + echo '    '; + echo ""; + echo ""; + echo ""; + } +} +# function doOne($rig, $preprocess) { - global $error, $readonly, $notify; - global $rigs; + global $error, $readonly, $notify, $rigs; - htmlhead(true); + htmlhead(true, $rig); $error = null; echo ""; + echo ""; if (count($rigs) > 1) echo ""; echo "
"; - echo " "; @@ -913,6 +951,7 @@ function doOne($rig, $preprocess) echo ""; echo " "; } + refreshbuttons(); echo "
"; if ($preprocess != null) @@ -938,6 +977,14 @@ function display() global $tablebegin, $tableend; global $miner, $port; global $error, $readonly, $notify, $rigs; + global $ignorerefresh, $autorefresh; + + if ($ignorerefresh == false) + { + $ref = trim(getparam('ref', true)); + if ($ref != null && $ref != '') + $autorefresh = intval($ref); + } $rig = trim(getparam('rig', true)); @@ -1006,10 +1053,12 @@ function display() return; } - htmlhead(false); + htmlhead(false, null); echo "
"; echo ""; + echo " "; + refreshbuttons(); echo "
"; if ($preprocess != null) From 5ef85284706d1fed3b1d254ca54f7a348e63107e Mon Sep 17 00:00:00 2001 From: Kano Date: Mon, 25 Jun 2012 20:02:40 +1000 Subject: [PATCH 44/47] miner.php remove unneeded '.'s --- miner.php | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/miner.php b/miner.php index 3b03a0a5..7c4d8891 100644 --- a/miner.php +++ b/miner.php @@ -98,7 +98,7 @@ function htmlhead($checkapi, $rig) $paramrig = ''; if ($rig != null && $rig != '') - $paramrig .= "&rig=$rig"; + $paramrig = "&rig=$rig"; if ($ignorerefresh == true || $autorefresh == 0) $refreshmeta = ''; @@ -947,7 +947,7 @@ function doOne($rig, $preprocess) { $rg = ''; if (count($rigs) > 1) - $rg .= " Rig $rig"; + $rg = " Rig $rig"; echo ""; echo " "; } From e53c033aa877361a3b29f0b0da1b380d04edfe09 Mon Sep 17 00:00:00 2001 From: Con Kolivas Date: Mon, 25 Jun 2012 20:12:55 +1000 Subject: [PATCH 45/47] Icarus needs not +2 for realloc, just +1. --- driver-icarus.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/driver-icarus.c b/driver-icarus.c index e27a1621..4d8ac532 100644 --- a/driver-icarus.c +++ b/driver-icarus.c @@ -421,7 +421,7 @@ static bool icarus_detect_one(const char *devpath) icarus->device_path = strdup(devpath); icarus->threads = 1; add_cgpu(icarus); - icarus_info = realloc(icarus_info, sizeof(struct ICARUS_INFO *) * (total_devices + 2)); + icarus_info = realloc(icarus_info, sizeof(struct ICARUS_INFO *) * (total_devices + 1)); applog(LOG_INFO, "Found Icarus at %s, mark as %d", devpath, icarus->device_id); From cb1634debfa1f0af578a7116cc9eec36fd5d3c1a Mon Sep 17 00:00:00 2001 From: Kano Date: Mon, 25 Jun 2012 22:35:16 +1000 Subject: [PATCH 46/47] fpgautils.c - linux ordering back to the correct way --- fpgautils.c | 29 +++++++++++++++++------------ 1 file changed, 17 insertions(+), 12 deletions(-) diff --git a/fpgautils.c b/fpgautils.c index 14c1c79d..59eb7bcd 100644 --- a/fpgautils.c +++ b/fpgautils.c @@ -181,28 +181,33 @@ serial_open(const char*devpath, unsigned long baud, signed short timeout, bool p if (unlikely(fdDev == -1)) return -1; - struct termios pattr; - tcgetattr(fdDev, &pattr); - pattr.c_iflag &= ~(IGNBRK | BRKINT | PARMRK | ISTRIP | INLCR | IGNCR | ICRNL | IXON); - pattr.c_oflag &= ~OPOST; - pattr.c_lflag &= ~(ECHO | ECHONL | ICANON | ISIG | IEXTEN); - pattr.c_cflag &= ~(CSIZE | PARENB); - pattr.c_cflag |= CS8; + struct termios my_termios; + + tcgetattr(fdDev, &my_termios); switch (baud) { case 0: break; - case 115200: pattr.c_cflag = B115200; break; + case 115200: my_termios.c_cflag = B115200; break; default: applog(LOG_WARNING, "Unrecognized baud rate: %lu", baud); } - pattr.c_cflag |= CREAD | CLOCAL; + + my_termios.c_cflag |= CS8; + my_termios.c_cflag |= CREAD; + my_termios.c_cflag |= CLOCAL; + my_termios.c_cflag &= ~(CSIZE | PARENB); + + my_termios.c_iflag &= ~(IGNBRK | BRKINT | PARMRK | + ISTRIP | INLCR | IGNCR | ICRNL | IXON); + my_termios.c_oflag &= ~OPOST; + my_termios.c_lflag &= ~(ECHO | ECHONL | ICANON | ISIG | IEXTEN); if (timeout >= 0) { - pattr.c_cc[VTIME] = (cc_t)timeout; - pattr.c_cc[VMIN] = 0; + my_termios.c_cc[VTIME] = (cc_t)timeout; + my_termios.c_cc[VMIN] = 0; } - tcsetattr(fdDev, TCSANOW, &pattr); + tcsetattr(fdDev, TCSANOW, &my_termios); if (purge) tcflush(fdDev, TCIOFLUSH); return fdDev; From 3763cb0badead6d6dd6f473e94e36b0895d405a3 Mon Sep 17 00:00:00 2001 From: Kano Date: Mon, 25 Jun 2012 23:16:07 +1000 Subject: [PATCH 47/47] icarus - must allows allocate memory for icarus_info --- driver-icarus.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/driver-icarus.c b/driver-icarus.c index 4d8ac532..442eb9d1 100644 --- a/driver-icarus.c +++ b/driver-icarus.c @@ -426,11 +426,10 @@ static bool icarus_detect_one(const char *devpath) applog(LOG_INFO, "Found Icarus at %s, mark as %d", devpath, icarus->device_id); - if (icarus_info[icarus->device_id] == NULL) { - icarus_info[icarus->device_id] = (struct ICARUS_INFO *)malloc(sizeof(struct ICARUS_INFO)); - if (unlikely(!(icarus_info[icarus->device_id]))) - quit(1, "Failed to malloc ICARUS_INFO"); - } + // Since we are adding a new device on the end it needs to always be allocated + icarus_info[icarus->device_id] = (struct ICARUS_INFO *)malloc(sizeof(struct ICARUS_INFO)); + if (unlikely(!(icarus_info[icarus->device_id]))) + quit(1, "Failed to malloc ICARUS_INFO"); info = icarus_info[icarus->device_id];