mirror of
https://github.com/GOSTSec/sgminer
synced 2025-01-12 07:48:22 +00:00
Detect when the primary pool is lagging and start queueing requests on backup pools if possible before needing to roll work.
This commit is contained in:
parent
afcff07012
commit
508c5c4b12
29
main.c
29
main.c
@ -87,6 +87,7 @@ struct workio_cmd {
|
|||||||
union {
|
union {
|
||||||
struct work *work;
|
struct work *work;
|
||||||
} u;
|
} u;
|
||||||
|
bool lagging;
|
||||||
};
|
};
|
||||||
|
|
||||||
enum sha256_algos {
|
enum sha256_algos {
|
||||||
@ -930,14 +931,14 @@ static const char *rpc_req =
|
|||||||
"{\"method\": \"getwork\", \"params\": [], \"id\":0}\r\n";
|
"{\"method\": \"getwork\", \"params\": [], \"id\":0}\r\n";
|
||||||
|
|
||||||
/* Select any active pool in a rotating fashion when loadbalance is chosen */
|
/* Select any active pool in a rotating fashion when loadbalance is chosen */
|
||||||
static inline struct pool *select_pool(void)
|
static inline struct pool *select_pool(bool lagging)
|
||||||
{
|
{
|
||||||
static int rotating_pool;
|
static int rotating_pool = 0;
|
||||||
struct pool *pool, *cp;
|
struct pool *pool, *cp;
|
||||||
|
|
||||||
cp = current_pool();
|
cp = current_pool();
|
||||||
|
|
||||||
if (pool_strategy != POOL_LOADBALANCE)
|
if (pool_strategy != POOL_LOADBALANCE && !lagging)
|
||||||
pool = cp;
|
pool = cp;
|
||||||
else
|
else
|
||||||
pool = NULL;
|
pool = NULL;
|
||||||
@ -954,18 +955,20 @@ static inline struct pool *select_pool(void)
|
|||||||
return pool;
|
return pool;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool get_upstream_work(struct work *work)
|
static bool get_upstream_work(struct work *work, bool lagging)
|
||||||
{
|
{
|
||||||
struct pool *pool = select_pool();
|
struct pool *pool;
|
||||||
json_t *val;
|
json_t *val;
|
||||||
bool rc = false;
|
bool rc = false;
|
||||||
CURL *curl = curl_easy_init();
|
CURL *curl;
|
||||||
|
|
||||||
|
curl = curl_easy_init();
|
||||||
if (unlikely(!curl)) {
|
if (unlikely(!curl)) {
|
||||||
applog(LOG_ERR, "CURL initialisation failed");
|
applog(LOG_ERR, "CURL initialisation failed");
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pool = select_pool(lagging);
|
||||||
val = json_rpc_call(curl, pool->rpc_url, pool->rpc_userpass, rpc_req,
|
val = json_rpc_call(curl, pool->rpc_url, pool->rpc_userpass, rpc_req,
|
||||||
want_longpoll, false, pool);
|
want_longpoll, false, pool);
|
||||||
if (unlikely(!val)) {
|
if (unlikely(!val)) {
|
||||||
@ -1099,7 +1102,7 @@ static void *get_work_thread(void *userdata)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* obtain new work from bitcoin via JSON-RPC */
|
/* obtain new work from bitcoin via JSON-RPC */
|
||||||
while (!get_upstream_work(ret_work)) {
|
while (!get_upstream_work(ret_work, wc->lagging)) {
|
||||||
if (unlikely((opt_retries >= 0) && (++failures > opt_retries))) {
|
if (unlikely((opt_retries >= 0) && (++failures > opt_retries))) {
|
||||||
applog(LOG_ERR, "json_rpc_call failed, terminating workio thread");
|
applog(LOG_ERR, "json_rpc_call failed, terminating workio thread");
|
||||||
free(ret_work);
|
free(ret_work);
|
||||||
@ -1987,10 +1990,14 @@ static bool queue_request(void)
|
|||||||
{
|
{
|
||||||
int maxq = opt_queue + mining_threads;
|
int maxq = opt_queue + mining_threads;
|
||||||
struct workio_cmd *wc;
|
struct workio_cmd *wc;
|
||||||
|
int rq, rs;
|
||||||
|
|
||||||
|
rq = requests_queued();
|
||||||
|
rs = real_staged();
|
||||||
|
|
||||||
/* If we've been generating lots of local work we may already have
|
/* If we've been generating lots of local work we may already have
|
||||||
* enough in the queue */
|
* enough in the queue */
|
||||||
if (requests_queued() >= maxq || real_staged() >= maxq)
|
if (rq >= maxq || rs >= maxq)
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
/* fill out work request message */
|
/* fill out work request message */
|
||||||
@ -2004,6 +2011,12 @@ static bool queue_request(void)
|
|||||||
/* The get work does not belong to any thread */
|
/* The get work does not belong to any thread */
|
||||||
wc->thr = NULL;
|
wc->thr = NULL;
|
||||||
|
|
||||||
|
/* If we've queued more than 2/3 of the maximum and still have no
|
||||||
|
* staged work, consider the system lagging and allow work to be
|
||||||
|
* gathered from another pool if possible */
|
||||||
|
if (rq > (maxq * 2 / 3) && !rs)
|
||||||
|
wc->lagging = true;
|
||||||
|
|
||||||
/* send work request to workio thread */
|
/* send work request to workio thread */
|
||||||
if (unlikely(!tq_push(thr_info[work_thr_id].q, wc))) {
|
if (unlikely(!tq_push(thr_info[work_thr_id].q, wc))) {
|
||||||
applog(LOG_ERR, "Failed to tq_push in queue_request");
|
applog(LOG_ERR, "Failed to tq_push in queue_request");
|
||||||
|
Loading…
Reference in New Issue
Block a user