mirror of
https://github.com/GOSTSec/sgminer
synced 2025-01-25 22:14:36 +00:00
Queue requests for getwork regardless and test whether we should send for a getwork from the getwork thread itself.
This commit is contained in:
parent
7d77c01619
commit
fd0be1bb51
71
cgminer.c
71
cgminer.c
@ -70,7 +70,7 @@ struct workio_cmd {
|
|||||||
enum workio_commands cmd;
|
enum workio_commands cmd;
|
||||||
struct thr_info *thr;
|
struct thr_info *thr;
|
||||||
struct work *work;
|
struct work *work;
|
||||||
bool lagging;
|
bool needed;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct strategies strategies[] = {
|
struct strategies strategies[] = {
|
||||||
@ -1933,7 +1933,7 @@ static inline struct pool *select_pool(bool lagging)
|
|||||||
|
|
||||||
cp = current_pool();
|
cp = current_pool();
|
||||||
|
|
||||||
if (pool_strategy != POOL_LOADBALANCE && !lagging)
|
if (pool_strategy != POOL_LOADBALANCE && (!lagging || opt_fail_only))
|
||||||
pool = cp;
|
pool = cp;
|
||||||
else
|
else
|
||||||
pool = NULL;
|
pool = NULL;
|
||||||
@ -1997,6 +1997,8 @@ retry:
|
|||||||
if (!rc && retries < 3)
|
if (!rc && retries < 3)
|
||||||
goto retry;
|
goto retry;
|
||||||
|
|
||||||
|
pool->currently_rolling = !!work->rolltime;
|
||||||
|
|
||||||
gettimeofday(&tv_end, NULL);
|
gettimeofday(&tv_end, NULL);
|
||||||
timersub(&tv_end, &tv_start, &tv_elapsed);
|
timersub(&tv_end, &tv_start, &tv_elapsed);
|
||||||
pool_stats->getwork_wait_rolling += ((double)tv_elapsed.tv_sec + ((double)tv_elapsed.tv_usec / 1000000)) * 0.63;
|
pool_stats->getwork_wait_rolling += ((double)tv_elapsed.tv_sec + ((double)tv_elapsed.tv_usec / 1000000)) * 0.63;
|
||||||
@ -2285,10 +2287,19 @@ static int global_queued(void)
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool enough_work(void)
|
static void *get_work_thread(void *userdata)
|
||||||
{
|
{
|
||||||
|
struct workio_cmd *wc = (struct workio_cmd *)userdata;
|
||||||
int cq, cs, ts, tq, maxq = opt_queue + mining_threads;
|
int cq, cs, ts, tq, maxq = opt_queue + mining_threads;
|
||||||
struct pool *pool = current_pool();
|
struct pool *pool = current_pool();
|
||||||
|
struct curl_ent *ce = NULL;
|
||||||
|
struct work *ret_work;
|
||||||
|
bool lagging = false;
|
||||||
|
int failures = 0;
|
||||||
|
|
||||||
|
pthread_detach(pthread_self());
|
||||||
|
|
||||||
|
applog(LOG_DEBUG, "Creating extra get work thread");
|
||||||
|
|
||||||
mutex_lock(&qd_lock);
|
mutex_lock(&qd_lock);
|
||||||
cq = __pool_queued(pool);
|
cq = __pool_queued(pool);
|
||||||
@ -2300,27 +2311,9 @@ static bool enough_work(void)
|
|||||||
ts = __total_staged();
|
ts = __total_staged();
|
||||||
mutex_unlock(stgd_lock);
|
mutex_unlock(stgd_lock);
|
||||||
|
|
||||||
if (((cs || cq >= opt_queue) && ts >= maxq) ||
|
if (!ts)
|
||||||
((cs || cq) && tq >= maxq))
|
lagging = true;
|
||||||
return true;
|
else if (((cs || cq >= opt_queue) && ts >= maxq) || ((cs || cq) && tq >= maxq))
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* ce and pool may appear uninitialised at push_curl_entry, but they're always
|
|
||||||
* set when we don't have opt_benchmark enabled */
|
|
||||||
static void *get_work_thread(void *userdata)
|
|
||||||
{
|
|
||||||
struct workio_cmd *wc = (struct workio_cmd *)userdata;
|
|
||||||
struct pool * uninitialised_var(pool);
|
|
||||||
struct curl_ent *ce = NULL;
|
|
||||||
struct work *ret_work;
|
|
||||||
int failures = 0;
|
|
||||||
|
|
||||||
pthread_detach(pthread_self());
|
|
||||||
|
|
||||||
applog(LOG_DEBUG, "Creating extra get work thread");
|
|
||||||
|
|
||||||
if (!wc->lagging && enough_work())
|
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
ret_work = make_work();
|
ret_work = make_work();
|
||||||
@ -2332,7 +2325,7 @@ static void *get_work_thread(void *userdata)
|
|||||||
if (opt_benchmark)
|
if (opt_benchmark)
|
||||||
get_benchmark_work(ret_work);
|
get_benchmark_work(ret_work);
|
||||||
else {
|
else {
|
||||||
pool = ret_work->pool = select_pool(wc->lagging);
|
pool = ret_work->pool = select_pool(lagging);
|
||||||
inc_queued(pool);
|
inc_queued(pool);
|
||||||
|
|
||||||
ce = pop_curl_entry(pool);
|
ce = pop_curl_entry(pool);
|
||||||
@ -3820,33 +3813,7 @@ static void pool_resus(struct pool *pool)
|
|||||||
|
|
||||||
bool queue_request(struct thr_info *thr, bool needed)
|
bool queue_request(struct thr_info *thr, bool needed)
|
||||||
{
|
{
|
||||||
int cq, cs, ts, tq, maxq = opt_queue + mining_threads;
|
|
||||||
struct pool *pool = current_pool();
|
|
||||||
struct workio_cmd *wc;
|
struct workio_cmd *wc;
|
||||||
bool lag = false;
|
|
||||||
|
|
||||||
mutex_lock(&qd_lock);
|
|
||||||
cq = __pool_queued(pool);
|
|
||||||
tq = __global_queued();
|
|
||||||
mutex_unlock(&qd_lock);
|
|
||||||
|
|
||||||
mutex_lock(stgd_lock);
|
|
||||||
cs = __pool_staged(pool);
|
|
||||||
ts = __total_staged();
|
|
||||||
mutex_unlock(stgd_lock);
|
|
||||||
|
|
||||||
if (needed && cq >= maxq && !ts && !opt_fail_only) {
|
|
||||||
/* If we're queueing work faster than we can stage it, consider
|
|
||||||
* the system lagging and allow work to be gathered from
|
|
||||||
* another pool if possible */
|
|
||||||
lag = true;
|
|
||||||
} else {
|
|
||||||
/* Test to make sure we have enough work for pools without rolltime
|
|
||||||
* and enough original work for pools with rolltime */
|
|
||||||
if (((cs || cq >= opt_queue) && ts >= maxq) ||
|
|
||||||
((cs || cq) && tq >= maxq))
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* fill out work request message */
|
/* fill out work request message */
|
||||||
wc = calloc(1, sizeof(*wc));
|
wc = calloc(1, sizeof(*wc));
|
||||||
@ -3857,7 +3824,7 @@ bool queue_request(struct thr_info *thr, bool needed)
|
|||||||
|
|
||||||
wc->cmd = WC_GET_WORK;
|
wc->cmd = WC_GET_WORK;
|
||||||
wc->thr = thr;
|
wc->thr = thr;
|
||||||
wc->lagging = lag;
|
wc->needed = needed;
|
||||||
|
|
||||||
applog(LOG_DEBUG, "Queueing getwork request to work thread");
|
applog(LOG_DEBUG, "Queueing getwork request to work thread");
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user