mirror of
https://github.com/GOSTSec/sgminer
synced 2025-09-03 01:32:33 +00:00
Use a queueing bool set under control_lock to prevent multiple calls to queue_request racing.
This commit is contained in:
parent
63dd598e2a
commit
42ea29ca4e
60
cgminer.c
60
cgminer.c
@ -2428,7 +2428,7 @@ static void subtract_queued(int work_units)
|
|||||||
mutex_unlock(&qd_lock);
|
mutex_unlock(&qd_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int discard_stale(void)
|
static void discard_stale(void)
|
||||||
{
|
{
|
||||||
struct work *work, *tmp;
|
struct work *work, *tmp;
|
||||||
int stale = 0, nonclone = 0;
|
int stale = 0, nonclone = 0;
|
||||||
@ -2451,21 +2451,18 @@ static int discard_stale(void)
|
|||||||
|
|
||||||
/* Dec queued outside the loop to not have recursive locks */
|
/* Dec queued outside the loop to not have recursive locks */
|
||||||
subtract_queued(nonclone);
|
subtract_queued(nonclone);
|
||||||
|
|
||||||
return stale;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool queue_request(struct thr_info *thr, bool needed);
|
static bool queue_request(struct thr_info *thr, bool needed);
|
||||||
|
|
||||||
static void restart_threads(void)
|
static void restart_threads(void)
|
||||||
{
|
{
|
||||||
int i, stale;
|
int i;
|
||||||
|
|
||||||
/* Discard staged work that is now stale */
|
/* Discard staged work that is now stale */
|
||||||
stale = discard_stale();
|
discard_stale();
|
||||||
|
|
||||||
for (i = 0; i < stale; i++)
|
queue_request(NULL, true);
|
||||||
queue_request(NULL, true);
|
|
||||||
|
|
||||||
for (i = 0; i < mining_threads; i++)
|
for (i = 0; i < mining_threads; i++)
|
||||||
work_restart[i].restart = 1;
|
work_restart[i].restart = 1;
|
||||||
@ -3527,12 +3524,41 @@ static void pool_resus(struct pool *pool)
|
|||||||
|
|
||||||
static time_t requested_tv_sec;
|
static time_t requested_tv_sec;
|
||||||
|
|
||||||
|
static bool control_tset(bool *var)
|
||||||
|
{
|
||||||
|
bool ret;
|
||||||
|
|
||||||
|
mutex_lock(&control_lock);
|
||||||
|
ret = *var;
|
||||||
|
*var = true;
|
||||||
|
mutex_unlock(&control_lock);
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void control_tclear(bool *var)
|
||||||
|
{
|
||||||
|
mutex_lock(&control_lock);
|
||||||
|
*var = false;
|
||||||
|
mutex_unlock(&control_lock);
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool queueing;
|
||||||
|
|
||||||
static bool queue_request(struct thr_info *thr, bool needed)
|
static bool queue_request(struct thr_info *thr, bool needed)
|
||||||
{
|
{
|
||||||
int toq, rq = requests_queued(), rs = requests_staged();
|
|
||||||
struct workio_cmd *wc;
|
struct workio_cmd *wc;
|
||||||
struct timeval now;
|
struct timeval now;
|
||||||
time_t scan_post;
|
time_t scan_post;
|
||||||
|
int toq, rq, rs;
|
||||||
|
bool ret = true;
|
||||||
|
|
||||||
|
/* Prevent multiple requests being executed at once */
|
||||||
|
if (control_tset(&queueing))
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
rq = requests_queued();
|
||||||
|
rs = requests_staged();
|
||||||
|
|
||||||
/* Grab more work every 2/3 of the scan time to avoid all work expiring
|
/* Grab more work every 2/3 of the scan time to avoid all work expiring
|
||||||
* at the same time */
|
* at the same time */
|
||||||
@ -3547,7 +3573,7 @@ static bool queue_request(struct thr_info *thr, bool needed)
|
|||||||
if ((rq >= mining_threads || rs >= mining_threads) &&
|
if ((rq >= mining_threads || rs >= mining_threads) &&
|
||||||
rq > staged_extras + opt_queue &&
|
rq > staged_extras + opt_queue &&
|
||||||
now.tv_sec - requested_tv_sec < scan_post)
|
now.tv_sec - requested_tv_sec < scan_post)
|
||||||
return true;
|
goto out;
|
||||||
|
|
||||||
requested_tv_sec = now.tv_sec;
|
requested_tv_sec = now.tv_sec;
|
||||||
|
|
||||||
@ -3563,14 +3589,12 @@ static bool queue_request(struct thr_info *thr, bool needed)
|
|||||||
wc = calloc(1, sizeof(*wc));
|
wc = calloc(1, sizeof(*wc));
|
||||||
if (unlikely(!wc)) {
|
if (unlikely(!wc)) {
|
||||||
applog(LOG_ERR, "Failed to calloc wc in queue_request");
|
applog(LOG_ERR, "Failed to calloc wc in queue_request");
|
||||||
return false;
|
ret = false;
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
wc->cmd = WC_GET_WORK;
|
wc->cmd = WC_GET_WORK;
|
||||||
if (thr)
|
wc->thr = thr;
|
||||||
wc->thr = thr;
|
|
||||||
else
|
|
||||||
wc->thr = NULL;
|
|
||||||
|
|
||||||
/* If we're queueing work faster than we can stage it, consider the
|
/* If we're queueing work faster than we can stage it, consider the
|
||||||
* system lagging and allow work to be gathered from another pool if
|
* system lagging and allow work to be gathered from another pool if
|
||||||
@ -3584,12 +3608,16 @@ static bool queue_request(struct thr_info *thr, bool needed)
|
|||||||
if (unlikely(!tq_push(thr_info[work_thr_id].q, wc))) {
|
if (unlikely(!tq_push(thr_info[work_thr_id].q, wc))) {
|
||||||
applog(LOG_ERR, "Failed to tq_push in queue_request");
|
applog(LOG_ERR, "Failed to tq_push in queue_request");
|
||||||
workio_cmd_free(wc);
|
workio_cmd_free(wc);
|
||||||
return false;
|
ret = false;
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
} while (--toq > 0);
|
} while (--toq > 0);
|
||||||
|
|
||||||
return true;
|
out:
|
||||||
|
control_tclear(&queueing);
|
||||||
|
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct work *hash_pop(const struct timespec *abstime)
|
static struct work *hash_pop(const struct timespec *abstime)
|
||||||
|
Loading…
x
Reference in New Issue
Block a user