|
|
@ -2371,6 +2371,18 @@ void switch_pools(struct pool *selected) |
|
|
|
mutex_unlock(&lp_lock); |
|
|
|
mutex_unlock(&lp_lock); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
static void discard_work(struct work *work) |
|
|
|
|
|
|
|
{ |
|
|
|
|
|
|
|
if (!work->clone && !work->rolls && !work->mined) { |
|
|
|
|
|
|
|
if (work->pool) |
|
|
|
|
|
|
|
work->pool->discarded_work++; |
|
|
|
|
|
|
|
total_discarded++; |
|
|
|
|
|
|
|
applog(LOG_DEBUG, "Discarded work"); |
|
|
|
|
|
|
|
} else |
|
|
|
|
|
|
|
applog(LOG_DEBUG, "Discarded cloned or rolled work"); |
|
|
|
|
|
|
|
free_work(work); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
/* Done lockless since this is not a critical value */ |
|
|
|
/* Done lockless since this is not a critical value */ |
|
|
|
static inline void inc_queued(void) |
|
|
|
static inline void inc_queued(void) |
|
|
|
{ |
|
|
|
{ |
|
|
@ -2383,25 +2395,15 @@ static inline void dec_queued(void) |
|
|
|
total_queued--; |
|
|
|
total_queued--; |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
static void discard_work(struct work *work) |
|
|
|
static int requests_queued(void) |
|
|
|
{ |
|
|
|
{ |
|
|
|
if (!work->clone) |
|
|
|
return requests_staged() - staged_extras; |
|
|
|
dec_queued(); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if (!work->clone && !work->rolls && !work->mined) { |
|
|
|
|
|
|
|
if (work->pool) |
|
|
|
|
|
|
|
work->pool->discarded_work++; |
|
|
|
|
|
|
|
total_discarded++; |
|
|
|
|
|
|
|
applog(LOG_DEBUG, "Discarded work"); |
|
|
|
|
|
|
|
} else |
|
|
|
|
|
|
|
applog(LOG_DEBUG, "Discarded cloned or rolled work"); |
|
|
|
|
|
|
|
free_work(work); |
|
|
|
|
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
static int discard_stale(void) |
|
|
|
static int discard_stale(void) |
|
|
|
{ |
|
|
|
{ |
|
|
|
struct work *work, *tmp; |
|
|
|
struct work *work, *tmp; |
|
|
|
int stale = 0; |
|
|
|
int i, stale = 0; |
|
|
|
|
|
|
|
|
|
|
|
mutex_lock(stgd_lock); |
|
|
|
mutex_lock(stgd_lock); |
|
|
|
HASH_ITER(hh, staged_work, work, tmp) { |
|
|
|
HASH_ITER(hh, staged_work, work, tmp) { |
|
|
@ -2417,6 +2419,10 @@ static int discard_stale(void) |
|
|
|
|
|
|
|
|
|
|
|
applog(LOG_DEBUG, "Discarded %d stales that didn't match current hash", stale); |
|
|
|
applog(LOG_DEBUG, "Discarded %d stales that didn't match current hash", stale); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/* Dec queued outside the loop to not have recursive locks */ |
|
|
|
|
|
|
|
for (i = 0; i < stale; i++) |
|
|
|
|
|
|
|
dec_queued(); |
|
|
|
|
|
|
|
|
|
|
|
return stale; |
|
|
|
return stale; |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
@ -3492,11 +3498,11 @@ static void pool_resus(struct pool *pool) |
|
|
|
|
|
|
|
|
|
|
|
static bool queue_request(struct thr_info *thr, bool needed) |
|
|
|
static bool queue_request(struct thr_info *thr, bool needed) |
|
|
|
{ |
|
|
|
{ |
|
|
|
int rs = requests_staged(); |
|
|
|
int rs = requests_staged(), rq = requests_queued(); |
|
|
|
struct workio_cmd *wc; |
|
|
|
struct workio_cmd *wc; |
|
|
|
|
|
|
|
|
|
|
|
if ((total_queued >= opt_queue && rs >= mining_threads) || |
|
|
|
if ((rq >= mining_threads || (rq >= opt_queue && rs >= mining_threads)) && |
|
|
|
total_queued >= mining_threads) |
|
|
|
total_queued >= opt_queue) |
|
|
|
return true; |
|
|
|
return true; |
|
|
|
|
|
|
|
|
|
|
|
/* fill out work request message */ |
|
|
|
/* fill out work request message */ |
|
|
@ -3515,7 +3521,7 @@ static bool queue_request(struct thr_info *thr, bool needed) |
|
|
|
/* If we're queueing work faster than we can stage it, consider the
|
|
|
|
/* If we're queueing work faster than we can stage it, consider the
|
|
|
|
* system lagging and allow work to be gathered from another pool if |
|
|
|
* system lagging and allow work to be gathered from another pool if |
|
|
|
* possible */ |
|
|
|
* possible */ |
|
|
|
if (total_queued && needed && !rs && !opt_fail_only) |
|
|
|
if (rq && needed && !requests_staged() && !opt_fail_only) |
|
|
|
wc->lagging = true; |
|
|
|
wc->lagging = true; |
|
|
|
|
|
|
|
|
|
|
|
applog(LOG_DEBUG, "Queueing getwork request to work thread"); |
|
|
|
applog(LOG_DEBUG, "Queueing getwork request to work thread"); |
|
|
@ -3665,7 +3671,7 @@ static bool get_work(struct work *work, bool requested, struct thr_info *thr, |
|
|
|
} |
|
|
|
} |
|
|
|
retry: |
|
|
|
retry: |
|
|
|
pool = current_pool(); |
|
|
|
pool = current_pool(); |
|
|
|
if (!requested || total_queued < opt_queue) { |
|
|
|
if (!requested || requests_queued() < opt_queue) { |
|
|
|
if (unlikely(!queue_request(thr, true))) { |
|
|
|
if (unlikely(!queue_request(thr, true))) { |
|
|
|
applog(LOG_WARNING, "Failed to queue_request in get_work"); |
|
|
|
applog(LOG_WARNING, "Failed to queue_request in get_work"); |
|
|
|
goto out; |
|
|
|
goto out; |
|
|
@ -3715,6 +3721,7 @@ retry: |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
if (stale_work(work_heap, false)) { |
|
|
|
if (stale_work(work_heap, false)) { |
|
|
|
|
|
|
|
dec_queued(); |
|
|
|
discard_work(work_heap); |
|
|
|
discard_work(work_heap); |
|
|
|
goto retry; |
|
|
|
goto retry; |
|
|
|
} |
|
|
|
} |
|
|
@ -3730,6 +3737,8 @@ retry: |
|
|
|
work_heap = clone_work(work_heap); |
|
|
|
work_heap = clone_work(work_heap); |
|
|
|
memcpy(work, work_heap, sizeof(struct work)); |
|
|
|
memcpy(work, work_heap, sizeof(struct work)); |
|
|
|
free_work(work_heap); |
|
|
|
free_work(work_heap); |
|
|
|
|
|
|
|
if (!work->clone) |
|
|
|
|
|
|
|
dec_queued(); |
|
|
|
|
|
|
|
|
|
|
|
ret = true; |
|
|
|
ret = true; |
|
|
|
out: |
|
|
|
out: |
|
|
@ -4327,7 +4336,8 @@ static void *watchdog_thread(void __maybe_unused *userdata) |
|
|
|
struct timeval now; |
|
|
|
struct timeval now; |
|
|
|
|
|
|
|
|
|
|
|
sleep(interval); |
|
|
|
sleep(interval); |
|
|
|
queue_request(NULL, false); |
|
|
|
if (requests_queued() < opt_queue || total_queued < opt_queue) |
|
|
|
|
|
|
|
queue_request(NULL, false); |
|
|
|
|
|
|
|
|
|
|
|
age_work(); |
|
|
|
age_work(); |
|
|
|
|
|
|
|
|
|
|
|