mirror of
https://github.com/GOSTSec/sgminer
synced 2025-01-30 16:34:23 +00:00
mutex all access to mining_thr
This commit is contained in:
parent
212f32e5e0
commit
61ec1fe91f
4
api.c
4
api.c
@ -1800,7 +1800,9 @@ static void pgaenable(struct io_data *io_data, __maybe_unused SOCKETTYPE c, char
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
for (i = 0; i < mining_threads; i++) {
|
for (i = 0; i < mining_threads; i++) {
|
||||||
|
mutex_lock(&mining_thr_lock);
|
||||||
thr = mining_thr[i];
|
thr = mining_thr[i];
|
||||||
|
mutex_unlock(&mining_thr_lock);
|
||||||
pga = thr->cgpu->cgminer_id;
|
pga = thr->cgpu->cgminer_id;
|
||||||
if (pga == dev) {
|
if (pga == dev) {
|
||||||
cgpu->deven = DEV_ENABLED;
|
cgpu->deven = DEV_ENABLED;
|
||||||
@ -2105,7 +2107,9 @@ static void gpuenable(struct io_data *io_data, __maybe_unused SOCKETTYPE c, char
|
|||||||
}
|
}
|
||||||
|
|
||||||
for (i = 0; i < gpu_threads; i++) {
|
for (i = 0; i < gpu_threads; i++) {
|
||||||
|
mutex_lock(&mining_thr_lock);
|
||||||
thr = mining_thr[i];
|
thr = mining_thr[i];
|
||||||
|
mutex_unlock(&mining_thr_lock);
|
||||||
gpu = thr->cgpu->device_id;
|
gpu = thr->cgpu->device_id;
|
||||||
if (gpu == id) {
|
if (gpu == id) {
|
||||||
if (thr->cgpu->status != LIFE_WELL) {
|
if (thr->cgpu->status != LIFE_WELL) {
|
||||||
|
66
cgminer.c
66
cgminer.c
@ -172,6 +172,7 @@ static pthread_rwlock_t blk_lock;
|
|||||||
static pthread_mutex_t sshare_lock;
|
static pthread_mutex_t sshare_lock;
|
||||||
|
|
||||||
pthread_rwlock_t netacc_lock;
|
pthread_rwlock_t netacc_lock;
|
||||||
|
pthread_mutex_t mining_thr_lock;
|
||||||
|
|
||||||
static pthread_mutex_t lp_lock;
|
static pthread_mutex_t lp_lock;
|
||||||
static pthread_cond_t lp_cond;
|
static pthread_cond_t lp_cond;
|
||||||
@ -378,7 +379,9 @@ static void sharelog(const char*disposition, const struct work*work)
|
|||||||
return;
|
return;
|
||||||
|
|
||||||
thr_id = work->thr_id;
|
thr_id = work->thr_id;
|
||||||
|
mutex_lock(&mining_thr_lock);
|
||||||
cgpu = mining_thr[thr_id]->cgpu;
|
cgpu = mining_thr[thr_id]->cgpu;
|
||||||
|
mutex_unlock(&mining_thr_lock);
|
||||||
pool = work->pool;
|
pool = work->pool;
|
||||||
t = (unsigned long int)(work->tv_work_found.tv_sec);
|
t = (unsigned long int)(work->tv_work_found.tv_sec);
|
||||||
target = bin2hex(work->target, sizeof(work->target));
|
target = bin2hex(work->target, sizeof(work->target));
|
||||||
@ -1723,7 +1726,13 @@ out:
|
|||||||
|
|
||||||
int dev_from_id(int thr_id)
|
int dev_from_id(int thr_id)
|
||||||
{
|
{
|
||||||
return mining_thr[thr_id]->cgpu->device_id;
|
struct cgpu_info *cgpu;
|
||||||
|
|
||||||
|
mutex_lock(&mining_thr_lock);
|
||||||
|
cgpu = mining_thr[thr_id]->cgpu;
|
||||||
|
mutex_unlock(&mining_thr_lock);
|
||||||
|
|
||||||
|
return cgpu->device_id;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Make the change in the recent value adjust dynamically when the difference
|
/* Make the change in the recent value adjust dynamically when the difference
|
||||||
@ -1895,9 +1904,13 @@ static void get_statline(char *buf, struct cgpu_info *cgpu)
|
|||||||
|
|
||||||
static void text_print_status(int thr_id)
|
static void text_print_status(int thr_id)
|
||||||
{
|
{
|
||||||
struct cgpu_info *cgpu = mining_thr[thr_id]->cgpu;
|
struct cgpu_info *cgpu;
|
||||||
char logline[256];
|
char logline[256];
|
||||||
|
|
||||||
|
mutex_lock(&mining_thr_lock);
|
||||||
|
cgpu = mining_thr[thr_id]->cgpu;
|
||||||
|
mutex_unlock(&mining_thr_lock);
|
||||||
|
|
||||||
if (cgpu) {
|
if (cgpu) {
|
||||||
get_statline(logline, cgpu);
|
get_statline(logline, cgpu);
|
||||||
printf("%s\n", logline);
|
printf("%s\n", logline);
|
||||||
@ -1961,11 +1974,15 @@ static int dev_width;
|
|||||||
static void curses_print_devstatus(int thr_id)
|
static void curses_print_devstatus(int thr_id)
|
||||||
{
|
{
|
||||||
static int awidth = 1, rwidth = 1, hwwidth = 1, uwidth = 1;
|
static int awidth = 1, rwidth = 1, hwwidth = 1, uwidth = 1;
|
||||||
struct cgpu_info *cgpu = mining_thr[thr_id]->cgpu;
|
struct cgpu_info *cgpu;
|
||||||
char logline[256];
|
char logline[256];
|
||||||
char displayed_hashes[16], displayed_rolling[16];
|
char displayed_hashes[16], displayed_rolling[16];
|
||||||
uint64_t dh64, dr64;
|
uint64_t dh64, dr64;
|
||||||
|
|
||||||
|
mutex_lock(&mining_thr_lock);
|
||||||
|
cgpu = mining_thr[thr_id]->cgpu;
|
||||||
|
mutex_unlock(&mining_thr_lock);
|
||||||
|
|
||||||
if (devcursor + cgpu->cgminer_id > LINES - 2 || opt_compact)
|
if (devcursor + cgpu->cgminer_id > LINES - 2 || opt_compact)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
@ -2213,7 +2230,11 @@ share_result(json_t *val, json_t *res, json_t *err, const struct work *work,
|
|||||||
char *hashshow, bool resubmit, char *worktime)
|
char *hashshow, bool resubmit, char *worktime)
|
||||||
{
|
{
|
||||||
struct pool *pool = work->pool;
|
struct pool *pool = work->pool;
|
||||||
struct cgpu_info *cgpu = mining_thr[work->thr_id]->cgpu;
|
struct cgpu_info *cgpu;
|
||||||
|
|
||||||
|
mutex_lock(&mining_thr_lock);
|
||||||
|
cgpu = mining_thr[work->thr_id]->cgpu;
|
||||||
|
mutex_unlock(&mining_thr_lock);
|
||||||
|
|
||||||
if (json_is_true(res) || (work->gbt && json_is_null(res))) {
|
if (json_is_true(res) || (work->gbt && json_is_null(res))) {
|
||||||
mutex_lock(&stats_lock);
|
mutex_lock(&stats_lock);
|
||||||
@ -2363,13 +2384,17 @@ static bool submit_upstream_work(struct work *work, CURL *curl, bool resubmit)
|
|||||||
char *s;
|
char *s;
|
||||||
bool rc = false;
|
bool rc = false;
|
||||||
int thr_id = work->thr_id;
|
int thr_id = work->thr_id;
|
||||||
struct cgpu_info *cgpu = mining_thr[thr_id]->cgpu;
|
struct cgpu_info *cgpu;
|
||||||
struct pool *pool = work->pool;
|
struct pool *pool = work->pool;
|
||||||
int rolltime;
|
int rolltime;
|
||||||
struct timeval tv_submit, tv_submit_reply;
|
struct timeval tv_submit, tv_submit_reply;
|
||||||
char hashshow[64 + 4] = "";
|
char hashshow[64 + 4] = "";
|
||||||
char worktime[200] = "";
|
char worktime[200] = "";
|
||||||
|
|
||||||
|
mutex_lock(&mining_thr_lock);
|
||||||
|
cgpu = mining_thr[thr_id]->cgpu;
|
||||||
|
mutex_unlock(&mining_thr_lock);
|
||||||
|
|
||||||
#ifdef __BIG_ENDIAN__
|
#ifdef __BIG_ENDIAN__
|
||||||
int swapcounter = 0;
|
int swapcounter = 0;
|
||||||
for (swapcounter = 0; swapcounter < 32; swapcounter++)
|
for (swapcounter = 0; swapcounter < 32; swapcounter++)
|
||||||
@ -2756,7 +2781,9 @@ static void __kill_work(void)
|
|||||||
applog(LOG_DEBUG, "Stopping mining threads");
|
applog(LOG_DEBUG, "Stopping mining threads");
|
||||||
/* Stop the mining threads*/
|
/* Stop the mining threads*/
|
||||||
for (i = 0; i < mining_threads; i++) {
|
for (i = 0; i < mining_threads; i++) {
|
||||||
|
mutex_lock(&mining_thr_lock);
|
||||||
thr = mining_thr[i];
|
thr = mining_thr[i];
|
||||||
|
mutex_unlock(&mining_thr_lock);
|
||||||
thr_info_freeze(thr);
|
thr_info_freeze(thr);
|
||||||
thr->pause = true;
|
thr->pause = true;
|
||||||
}
|
}
|
||||||
@ -2766,7 +2793,9 @@ static void __kill_work(void)
|
|||||||
applog(LOG_DEBUG, "Killing off mining threads");
|
applog(LOG_DEBUG, "Killing off mining threads");
|
||||||
/* Kill the mining threads*/
|
/* Kill the mining threads*/
|
||||||
for (i = 0; i < mining_threads; i++) {
|
for (i = 0; i < mining_threads; i++) {
|
||||||
|
mutex_lock(&mining_thr_lock);
|
||||||
thr = mining_thr[i];
|
thr = mining_thr[i];
|
||||||
|
mutex_unlock(&mining_thr_lock);
|
||||||
thr_info_cancel(thr);
|
thr_info_cancel(thr);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3369,8 +3398,10 @@ static void restart_threads(void)
|
|||||||
/* Discard staged work that is now stale */
|
/* Discard staged work that is now stale */
|
||||||
discard_stale();
|
discard_stale();
|
||||||
|
|
||||||
|
mutex_lock(&mining_thr_lock);
|
||||||
for (i = 0; i < mining_threads; i++)
|
for (i = 0; i < mining_threads; i++)
|
||||||
mining_thr[i]->work_restart = true;
|
mining_thr[i]->work_restart = true;
|
||||||
|
mutex_unlock(&mining_thr_lock);
|
||||||
|
|
||||||
mutex_lock(&restart_lock);
|
mutex_lock(&restart_lock);
|
||||||
pthread_cond_broadcast(&restart_cond);
|
pthread_cond_broadcast(&restart_cond);
|
||||||
@ -4406,19 +4437,22 @@ static void hashmeter(int thr_id, struct timeval *diff,
|
|||||||
bool showlog = false;
|
bool showlog = false;
|
||||||
char displayed_hashes[16], displayed_rolling[16];
|
char displayed_hashes[16], displayed_rolling[16];
|
||||||
uint64_t dh64, dr64;
|
uint64_t dh64, dr64;
|
||||||
|
struct thr_info *thr;
|
||||||
|
|
||||||
local_mhashes = (double)hashes_done / 1000000.0;
|
local_mhashes = (double)hashes_done / 1000000.0;
|
||||||
/* Update the last time this thread reported in */
|
/* Update the last time this thread reported in */
|
||||||
if (thr_id >= 0) {
|
if (thr_id >= 0) {
|
||||||
gettimeofday(&(mining_thr[thr_id]->last), NULL);
|
mutex_lock(&mining_thr_lock);
|
||||||
mining_thr[thr_id]->cgpu->device_last_well = time(NULL);
|
thr = mining_thr[thr_id];
|
||||||
|
mutex_unlock(&mining_thr_lock);
|
||||||
|
gettimeofday(&(thr->last), NULL);
|
||||||
|
thr->cgpu->device_last_well = time(NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
secs = (double)diff->tv_sec + ((double)diff->tv_usec / 1000000.0);
|
secs = (double)diff->tv_sec + ((double)diff->tv_usec / 1000000.0);
|
||||||
|
|
||||||
/* So we can call hashmeter from a non worker thread */
|
/* So we can call hashmeter from a non worker thread */
|
||||||
if (thr_id >= 0) {
|
if (thr_id >= 0) {
|
||||||
struct thr_info *thr = mining_thr[thr_id];
|
|
||||||
struct cgpu_info *cgpu = thr->cgpu;
|
struct cgpu_info *cgpu = thr->cgpu;
|
||||||
double thread_rolling = 0.0;
|
double thread_rolling = 0.0;
|
||||||
int i;
|
int i;
|
||||||
@ -5857,12 +5891,10 @@ static void *watchdog_thread(void __maybe_unused *userdata)
|
|||||||
applog(LOG_WARNING, "Will restart execution as scheduled at %02d:%02d",
|
applog(LOG_WARNING, "Will restart execution as scheduled at %02d:%02d",
|
||||||
schedstart.tm.tm_hour, schedstart.tm.tm_min);
|
schedstart.tm.tm_hour, schedstart.tm.tm_min);
|
||||||
sched_paused = true;
|
sched_paused = true;
|
||||||
for (i = 0; i < mining_threads; i++) {
|
mutex_lock(&mining_thr_lock);
|
||||||
struct thr_info *thr;
|
for (i = 0; i < mining_threads; i++)
|
||||||
thr = mining_thr[i];
|
mining_thr[i]->pause = true;
|
||||||
|
mutex_unlock(&mining_thr_lock);
|
||||||
thr->pause = true;
|
|
||||||
}
|
|
||||||
} else if (sched_paused && should_run()) {
|
} else if (sched_paused && should_run()) {
|
||||||
applog(LOG_WARNING, "Restarting execution as per start time %02d:%02d scheduled",
|
applog(LOG_WARNING, "Restarting execution as per start time %02d:%02d scheduled",
|
||||||
schedstart.tm.tm_hour, schedstart.tm.tm_min);
|
schedstart.tm.tm_hour, schedstart.tm.tm_min);
|
||||||
@ -5873,7 +5905,10 @@ static void *watchdog_thread(void __maybe_unused *userdata)
|
|||||||
|
|
||||||
for (i = 0; i < mining_threads; i++) {
|
for (i = 0; i < mining_threads; i++) {
|
||||||
struct thr_info *thr;
|
struct thr_info *thr;
|
||||||
|
|
||||||
|
mutex_lock(&mining_thr_lock);
|
||||||
thr = mining_thr[i];
|
thr = mining_thr[i];
|
||||||
|
mutex_unlock(&mining_thr_lock);
|
||||||
|
|
||||||
/* Don't touch disabled devices */
|
/* Don't touch disabled devices */
|
||||||
if (thr->cgpu->deven == DEV_DISABLED)
|
if (thr->cgpu->deven == DEV_DISABLED)
|
||||||
@ -6426,6 +6461,7 @@ int main(int argc, char *argv[])
|
|||||||
mutex_init(&sshare_lock);
|
mutex_init(&sshare_lock);
|
||||||
rwlock_init(&blk_lock);
|
rwlock_init(&blk_lock);
|
||||||
rwlock_init(&netacc_lock);
|
rwlock_init(&netacc_lock);
|
||||||
|
mutex_init(&mining_thr_lock);
|
||||||
|
|
||||||
mutex_init(&lp_lock);
|
mutex_init(&lp_lock);
|
||||||
if (unlikely(pthread_cond_init(&lp_cond, NULL)))
|
if (unlikely(pthread_cond_init(&lp_cond, NULL)))
|
||||||
@ -6818,7 +6854,9 @@ begin_bench:
|
|||||||
cgpu->status = LIFE_INIT;
|
cgpu->status = LIFE_INIT;
|
||||||
|
|
||||||
for (j = 0; j < cgpu->threads; ++j, ++k) {
|
for (j = 0; j < cgpu->threads; ++j, ++k) {
|
||||||
|
mutex_lock(&mining_thr_lock);
|
||||||
thr = mining_thr[k];
|
thr = mining_thr[k];
|
||||||
|
mutex_unlock(&mining_thr_lock);
|
||||||
thr->id = k;
|
thr->id = k;
|
||||||
thr->cgpu = cgpu;
|
thr->cgpu = cgpu;
|
||||||
thr->device_thread = j;
|
thr->device_thread = j;
|
||||||
|
@ -616,7 +616,11 @@ void pause_dynamic_threads(int gpu)
|
|||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 1; i < cgpu->threads; i++) {
|
for (i = 1; i < cgpu->threads; i++) {
|
||||||
struct thr_info *thr = mining_thr[i];
|
struct thr_info *thr;
|
||||||
|
|
||||||
|
mutex_lock(&mining_thr_lock);
|
||||||
|
thr = mining_thr[i];
|
||||||
|
mutex_unlock(&mining_thr_lock);
|
||||||
|
|
||||||
if (!thr->pause && cgpu->dynamic) {
|
if (!thr->pause && cgpu->dynamic) {
|
||||||
applog(LOG_WARNING, "Disabling extra threads due to dynamic mode.");
|
applog(LOG_WARNING, "Disabling extra threads due to dynamic mode.");
|
||||||
@ -705,7 +709,9 @@ retry:
|
|||||||
else
|
else
|
||||||
wlog("%d\n", gpus[gpu].intensity);
|
wlog("%d\n", gpus[gpu].intensity);
|
||||||
for (i = 0; i < mining_threads; i++) {
|
for (i = 0; i < mining_threads; i++) {
|
||||||
|
mutex_lock(&mining_thr_lock);
|
||||||
thr = mining_thr[i];
|
thr = mining_thr[i];
|
||||||
|
mutex_unlock(&mining_thr_lock);
|
||||||
if (thr->cgpu != cgpu)
|
if (thr->cgpu != cgpu)
|
||||||
continue;
|
continue;
|
||||||
get_datestamp(checkin, &thr->last);
|
get_datestamp(checkin, &thr->last);
|
||||||
@ -760,7 +766,9 @@ retry:
|
|||||||
}
|
}
|
||||||
gpus[selected].deven = DEV_ENABLED;
|
gpus[selected].deven = DEV_ENABLED;
|
||||||
for (i = 0; i < mining_threads; ++i) {
|
for (i = 0; i < mining_threads; ++i) {
|
||||||
|
mutex_lock(&mining_thr_lock);
|
||||||
thr = mining_thr[i];
|
thr = mining_thr[i];
|
||||||
|
mutex_unlock(&mining_thr_lock);
|
||||||
cgpu = thr->cgpu;
|
cgpu = thr->cgpu;
|
||||||
if (cgpu->drv->drv != DRIVER_OPENCL)
|
if (cgpu->drv->drv != DRIVER_OPENCL)
|
||||||
continue;
|
continue;
|
||||||
@ -1147,14 +1155,18 @@ select_cgpu:
|
|||||||
gpu = cgpu->device_id;
|
gpu = cgpu->device_id;
|
||||||
|
|
||||||
for (thr_id = 0; thr_id < mining_threads; ++thr_id) {
|
for (thr_id = 0; thr_id < mining_threads; ++thr_id) {
|
||||||
|
mutex_lock(&mining_thr_lock);
|
||||||
thr = mining_thr[thr_id];
|
thr = mining_thr[thr_id];
|
||||||
|
mutex_unlock(&mining_thr_lock);
|
||||||
cgpu = thr->cgpu;
|
cgpu = thr->cgpu;
|
||||||
if (cgpu->drv->drv != DRIVER_OPENCL)
|
if (cgpu->drv->drv != DRIVER_OPENCL)
|
||||||
continue;
|
continue;
|
||||||
if (dev_from_id(thr_id) != gpu)
|
if (dev_from_id(thr_id) != gpu)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
|
mutex_lock(&mining_thr_lock);
|
||||||
thr = mining_thr[thr_id];
|
thr = mining_thr[thr_id];
|
||||||
|
mutex_unlock(&mining_thr_lock);
|
||||||
if (!thr) {
|
if (!thr) {
|
||||||
applog(LOG_WARNING, "No reference to thread %d exists", thr_id);
|
applog(LOG_WARNING, "No reference to thread %d exists", thr_id);
|
||||||
continue;
|
continue;
|
||||||
@ -1172,7 +1184,9 @@ select_cgpu:
|
|||||||
for (thr_id = 0; thr_id < mining_threads; ++thr_id) {
|
for (thr_id = 0; thr_id < mining_threads; ++thr_id) {
|
||||||
int virtual_gpu;
|
int virtual_gpu;
|
||||||
|
|
||||||
|
mutex_lock(&mining_thr_lock);
|
||||||
thr = mining_thr[thr_id];
|
thr = mining_thr[thr_id];
|
||||||
|
mutex_unlock(&mining_thr_lock);
|
||||||
cgpu = thr->cgpu;
|
cgpu = thr->cgpu;
|
||||||
if (cgpu->drv->drv != DRIVER_OPENCL)
|
if (cgpu->drv->drv != DRIVER_OPENCL)
|
||||||
continue;
|
continue;
|
||||||
@ -1209,7 +1223,9 @@ select_cgpu:
|
|||||||
get_datestamp(cgpu->init, &now);
|
get_datestamp(cgpu->init, &now);
|
||||||
|
|
||||||
for (thr_id = 0; thr_id < mining_threads; ++thr_id) {
|
for (thr_id = 0; thr_id < mining_threads; ++thr_id) {
|
||||||
|
mutex_lock(&mining_thr_lock);
|
||||||
thr = mining_thr[thr_id];
|
thr = mining_thr[thr_id];
|
||||||
|
mutex_unlock(&mining_thr_lock);
|
||||||
cgpu = thr->cgpu;
|
cgpu = thr->cgpu;
|
||||||
if (cgpu->drv->drv != DRIVER_OPENCL)
|
if (cgpu->drv->drv != DRIVER_OPENCL)
|
||||||
continue;
|
continue;
|
||||||
|
1
miner.h
1
miner.h
@ -737,6 +737,7 @@ extern pthread_mutex_t cgusb_lock;
|
|||||||
extern pthread_mutex_t hash_lock;
|
extern pthread_mutex_t hash_lock;
|
||||||
extern pthread_mutex_t console_lock;
|
extern pthread_mutex_t console_lock;
|
||||||
extern pthread_mutex_t ch_lock;
|
extern pthread_mutex_t ch_lock;
|
||||||
|
extern pthread_mutex_t mining_thr_lock;
|
||||||
|
|
||||||
extern pthread_mutex_t restart_lock;
|
extern pthread_mutex_t restart_lock;
|
||||||
extern pthread_cond_t restart_cond;
|
extern pthread_cond_t restart_cond;
|
||||||
|
Loading…
x
Reference in New Issue
Block a user