From 750474bcfb38c58206b870bf665ba888e1ef059b Mon Sep 17 00:00:00 2001 From: Con Kolivas Date: Sun, 24 Jun 2012 21:56:53 +1000 Subject: [PATCH] Revert "Further simplify the total_queued counting mechanism and do all dec_queued from the one location." This reverts commit 790acad9f9223e4d532d8d38e00737c79b8e40fb. --- cgminer.c | 48 +++++++++++++++++++++++++++++------------------- 1 file changed, 29 insertions(+), 19 deletions(-) diff --git a/cgminer.c b/cgminer.c index 483331af..0381b04c 100644 --- a/cgminer.c +++ b/cgminer.c @@ -2371,6 +2371,18 @@ void switch_pools(struct pool *selected) mutex_unlock(&lp_lock); } +static void discard_work(struct work *work) +{ + if (!work->clone && !work->rolls && !work->mined) { + if (work->pool) + work->pool->discarded_work++; + total_discarded++; + applog(LOG_DEBUG, "Discarded work"); + } else + applog(LOG_DEBUG, "Discarded cloned or rolled work"); + free_work(work); +} + /* Done lockless since this is not a critical value */ static inline void inc_queued(void) { @@ -2383,25 +2395,15 @@ static inline void dec_queued(void) total_queued--; } -static void discard_work(struct work *work) +static int requests_queued(void) { - if (!work->clone) - dec_queued(); - - if (!work->clone && !work->rolls && !work->mined) { - if (work->pool) - work->pool->discarded_work++; - total_discarded++; - applog(LOG_DEBUG, "Discarded work"); - } else - applog(LOG_DEBUG, "Discarded cloned or rolled work"); - free_work(work); + return requests_staged() - staged_extras; } static int discard_stale(void) { struct work *work, *tmp; - int stale = 0; + int i, stale = 0; mutex_lock(stgd_lock); HASH_ITER(hh, staged_work, work, tmp) { @@ -2417,6 +2419,10 @@ static int discard_stale(void) applog(LOG_DEBUG, "Discarded %d stales that didn't match current hash", stale); + /* Dec queued outside the loop to not have recursive locks */ + for (i = 0; i < stale; i++) + dec_queued(); + return stale; } @@ -3492,11 +3498,11 @@ static void pool_resus(struct pool *pool) static bool queue_request(struct thr_info *thr, bool needed) { - int rs = requests_staged(); + int rs = requests_staged(), rq = requests_queued(); struct workio_cmd *wc; - if ((total_queued >= opt_queue && rs >= mining_threads) || - total_queued >= mining_threads) + if ((rq >= mining_threads || (rq >= opt_queue && rs >= mining_threads)) && + total_queued >= opt_queue) return true; /* fill out work request message */ @@ -3515,7 +3521,7 @@ static bool queue_request(struct thr_info *thr, bool needed) /* If we're queueing work faster than we can stage it, consider the * system lagging and allow work to be gathered from another pool if * possible */ - if (total_queued && needed && !rs && !opt_fail_only) + if (rq && needed && !requests_staged() && !opt_fail_only) wc->lagging = true; applog(LOG_DEBUG, "Queueing getwork request to work thread"); @@ -3665,7 +3671,7 @@ static bool get_work(struct work *work, bool requested, struct thr_info *thr, } retry: pool = current_pool(); - if (!requested || total_queued < opt_queue) { + if (!requested || requests_queued() < opt_queue) { if (unlikely(!queue_request(thr, true))) { applog(LOG_WARNING, "Failed to queue_request in get_work"); goto out; @@ -3715,6 +3721,7 @@ retry: } if (stale_work(work_heap, false)) { + dec_queued(); discard_work(work_heap); goto retry; } @@ -3730,6 +3737,8 @@ retry: work_heap = clone_work(work_heap); memcpy(work, work_heap, sizeof(struct work)); free_work(work_heap); + if (!work->clone) + dec_queued(); ret = true; out: @@ -4327,7 +4336,8 @@ static void *watchdog_thread(void __maybe_unused *userdata) struct timeval now; sleep(interval); - queue_request(NULL, false); + if (requests_queued() < opt_queue || total_queued < opt_queue) + queue_request(NULL, false); age_work();