Browse Source

since mining_threads count can change, we need to lock more carefully and get_thread function is not helpful anymore

djm34
Jan Berdajs 11 years ago
parent
commit
9fdd23e6ff
  1. 4
      api.c
  2. 32
      driver-opencl.c
  3. 1
      miner.h
  4. 53
      sgminer.c

4
api.c

@ -1972,8 +1972,9 @@ static void gpuenable(struct io_data *io_data, __maybe_unused SOCKETTYPE c, char @@ -1972,8 +1972,9 @@ static void gpuenable(struct io_data *io_data, __maybe_unused SOCKETTYPE c, char
return;
}
rd_lock(&mining_thr_lock);
for (i = 0; i < mining_threads; i++) {
thr = get_thread(i);
thr = mining_thr[i];
gpu = thr->cgpu->device_id;
if (gpu == id) {
if (thr->cgpu->status != LIFE_WELL) {
@ -1985,6 +1986,7 @@ static void gpuenable(struct io_data *io_data, __maybe_unused SOCKETTYPE c, char @@ -1985,6 +1986,7 @@ static void gpuenable(struct io_data *io_data, __maybe_unused SOCKETTYPE c, char
cgsem_post(&thr->sem);
}
}
rd_unlock(&mining_thr_lock);
message(io_data, MSG_GPUREN, id, NULL, isjson);
}

32
driver-opencl.c

@ -679,10 +679,11 @@ void pause_dynamic_threads(int gpu) @@ -679,10 +679,11 @@ void pause_dynamic_threads(int gpu)
struct cgpu_info *cgpu = &gpus[gpu];
int i;
rd_lock(&mining_thr_lock);
for (i = 1; i < cgpu->threads; i++) {
struct thr_info *thr;
thr = get_thread(i);
thr = mining_thr[i];
if (!thr->pause && cgpu->dynamic) {
applog(LOG_WARNING, "Disabling extra threads due to dynamic mode.");
applog(LOG_WARNING, "Tune dynamic intensity with --gpu-dyninterval");
@ -692,6 +693,7 @@ void pause_dynamic_threads(int gpu) @@ -692,6 +693,7 @@ void pause_dynamic_threads(int gpu)
if (!cgpu->dynamic && cgpu->deven != DEV_DISABLED)
cgsem_post(&thr->sem);
}
rd_unlock(&mining_thr_lock);
}
#if defined(HAVE_CURSES)
@ -764,8 +766,10 @@ retry: // TODO: refactor @@ -764,8 +766,10 @@ retry: // TODO: refactor
}
#endif
wlog("Last initialised: %s\n", cgpu->init);
rd_lock(&mining_thr_lock);
for (i = 0; i < mining_threads; i++) {
thr = get_thread(i);
thr = mining_thr[i];
if (thr->cgpu != cgpu)
continue;
get_datestamp(checkin, sizeof(checkin), &thr->last);
@ -793,6 +797,8 @@ retry: // TODO: refactor @@ -793,6 +797,8 @@ retry: // TODO: refactor
wlog(" paused");
wlog("\n");
}
rd_unlock(&mining_thr_lock);
wlog("\n");
}
@ -821,8 +827,9 @@ retry: // TODO: refactor @@ -821,8 +827,9 @@ retry: // TODO: refactor
goto retry;
}
gpus[selected].deven = DEV_ENABLED;
rd_lock(&mining_thr_lock);
for (i = 0; i < mining_threads; ++i) {
thr = get_thread(i);
thr = mining_thr[i];
cgpu = thr->cgpu;
if (cgpu->drv->drv_id != DRIVER_opencl)
continue;
@ -836,6 +843,7 @@ retry: // TODO: refactor @@ -836,6 +843,7 @@ retry: // TODO: refactor
cgsem_post(&thr->sem);
}
rd_unlock(&mining_thr_lock);
goto retry;
} else if (!strncasecmp(&input, "d", 1)) {
if (selected)
@ -1036,20 +1044,15 @@ select_cgpu: @@ -1036,20 +1044,15 @@ select_cgpu:
gpu = cgpu->device_id;
rd_lock(&mining_thr_lock);
for (thr_id = 0; thr_id < mining_threads; ++thr_id) {
thr = get_thread(thr_id);
thr = mining_thr[thr_id];
cgpu = thr->cgpu;
if (cgpu->drv->drv_id != DRIVER_opencl)
continue;
if (dev_from_id(thr_id) != gpu)
continue;
thr = get_thread(thr_id);
if (!thr) {
applog(LOG_WARNING, "No reference to thread %d exists", thr_id);
continue;
}
thr->rolling = thr->cgpu->rolling = 0;
/* Reports the last time we tried to revive a sick GPU */
cgtime(&thr->sick);
@ -1060,11 +1063,13 @@ select_cgpu: @@ -1060,11 +1063,13 @@ select_cgpu:
} else
applog(LOG_WARNING, "Thread %d no longer exists", thr_id);
}
rd_unlock(&mining_thr_lock);
rd_lock(&mining_thr_lock);
for (thr_id = 0; thr_id < mining_threads; ++thr_id) {
int virtual_gpu;
thr = get_thread(thr_id);
thr = mining_thr[thr_id];
cgpu = thr->cgpu;
if (cgpu->drv->drv_id != DRIVER_opencl)
continue;
@ -1096,12 +1101,14 @@ select_cgpu: @@ -1096,12 +1101,14 @@ select_cgpu:
}
applog(LOG_WARNING, "Thread %d restarted", thr_id);
}
rd_unlock(&mining_thr_lock);
cgtime(&now);
get_datestamp(cgpu->init, sizeof(cgpu->init), &now);
rd_lock(&mining_thr_lock);
for (thr_id = 0; thr_id < mining_threads; ++thr_id) {
thr = get_thread(thr_id);
thr = mining_thr[thr_id];
cgpu = thr->cgpu;
if (cgpu->drv->drv_id != DRIVER_opencl)
continue;
@ -1110,6 +1117,7 @@ select_cgpu: @@ -1110,6 +1117,7 @@ select_cgpu:
cgsem_post(&thr->sem);
}
rd_unlock(&mining_thr_lock);
goto select_cgpu;
out:

1
miner.h

@ -1396,7 +1396,6 @@ extern void clean_work(struct work *work); @@ -1396,7 +1396,6 @@ extern void clean_work(struct work *work);
extern void free_work(struct work *work);
extern struct work *copy_work_noffset(struct work *base_work, int noffset);
#define copy_work(work_in) copy_work_noffset(work_in, 0)
extern struct thr_info *get_thread(int thr_id);
extern struct cgpu_info *get_devices(int id);
enum api_data_type {

53
sgminer.c

@ -403,27 +403,15 @@ static void applog_and_exit(const char *fmt, ...) @@ -403,27 +403,15 @@ static void applog_and_exit(const char *fmt, ...)
static pthread_mutex_t sharelog_lock;
static FILE *sharelog_file = NULL;
static struct thr_info *__get_thread(int thr_id)
{
return mining_thr[thr_id];
}
struct thr_info *get_thread(int thr_id)
static struct cgpu_info *get_thr_cgpu(int thr_id)
{
struct thr_info *thr;
rd_lock(&mining_thr_lock);
thr = __get_thread(thr_id);
if (thr_id < mining_threads)
thr = mining_thr[thr_id];
rd_unlock(&mining_thr_lock);
return thr;
}
static struct cgpu_info *get_thr_cgpu(int thr_id)
{
struct thr_info *thr = get_thread(thr_id);
return thr->cgpu;
return thr ? thr->cgpu : NULL;
}
struct cgpu_info *get_devices(int id)
@ -3342,10 +3330,11 @@ static void kill_mining(void) @@ -3342,10 +3330,11 @@ static void kill_mining(void)
forcelog(LOG_DEBUG, "Killing off mining threads");
/* Kill the mining threads*/
rd_lock(&mining_thr_lock);
for (i = 0; i < mining_threads; i++) {
pthread_t *pth = NULL;
thr = get_thread(i);
thr = mining_thr[i];
if (thr && PTH(thr) != 0L)
pth = &thr->pth;
thr_info_cancel(thr);
@ -3357,6 +3346,7 @@ static void kill_mining(void) @@ -3357,6 +3346,7 @@ static void kill_mining(void)
pthread_join(*pth, NULL);
#endif
}
rd_unlock(&mining_thr_lock);
}
static void __kill_work(void)
@ -3380,10 +3370,11 @@ static void __kill_work(void) @@ -3380,10 +3370,11 @@ static void __kill_work(void)
kill_timeout(thr);
forcelog(LOG_DEBUG, "Shutting down mining threads");
rd_lock(&mining_thr_lock);
for (i = 0; i < mining_threads; i++) {
struct cgpu_info *cgpu;
thr = get_thread(i);
thr = mining_thr[i];
if (!thr)
continue;
cgpu = thr->cgpu;
@ -3392,6 +3383,7 @@ static void __kill_work(void) @@ -3392,6 +3383,7 @@ static void __kill_work(void)
cgpu->shutdown = true;
}
rd_unlock(&mining_thr_lock);
sleep(1);
@ -4049,10 +4041,7 @@ static void *restart_thread(void __maybe_unused *arg) @@ -4049,10 +4041,7 @@ static void *restart_thread(void __maybe_unused *arg)
discard_stale();
rd_lock(&mining_thr_lock);
mt = mining_threads;
rd_unlock(&mining_thr_lock);
for (i = 0; i < mt; i++) {
for (i = 0; i < mining_threads; i++) {
cgpu = mining_thr[i]->cgpu;
if (unlikely(!cgpu))
continue;
@ -4061,6 +4050,7 @@ static void *restart_thread(void __maybe_unused *arg) @@ -4061,6 +4050,7 @@ static void *restart_thread(void __maybe_unused *arg)
mining_thr[i]->work_restart = true;
cgpu->drv->flush_work(cgpu);
}
rd_unlock(&mining_thr_lock);
mutex_lock(&restart_lock);
pthread_cond_broadcast(&restart_cond);
@ -5179,20 +5169,22 @@ static void hashmeter(int thr_id, struct timeval *diff, @@ -5179,20 +5169,22 @@ static void hashmeter(int thr_id, struct timeval *diff,
bool showlog = false;
char displayed_hashes[16], displayed_rolling[16];
uint64_t dh64, dr64;
struct thr_info *thr;
struct thr_info *thr = NULL;
local_mhashes = (double)hashes_done / 1000000.0;
/* Update the last time this thread reported in */
if (thr_id >= 0) {
thr = get_thread(thr_id);
rd_lock(&mining_thr_lock);
if (thr_id >= 0 && thr_id < mining_threads) {
thr = mining_thr[thr_id];
cgtime(&thr->last);
thr->cgpu->device_last_well = time(NULL);
}
rd_unlock(&mining_thr_lock);
secs = (double)diff->tv_sec + ((double)diff->tv_usec / 1000000.0);
/* So we can call hashmeter from a non worker thread */
if (thr_id >= 0) {
if (thr) {
struct cgpu_info *cgpu = thr->cgpu;
double thread_rolling = 0.0;
int i;
@ -7087,7 +7079,7 @@ static void *watchdog_thread(void __maybe_unused *userdata) @@ -7087,7 +7079,7 @@ static void *watchdog_thread(void __maybe_unused *userdata)
for (i = 0; i < mining_threads; i++) {
struct thr_info *thr;
thr = get_thread(i);
thr = mining_thr[i];
/* Don't touch disabled devices */
if (thr->cgpu->deven == DEV_DISABLED)
@ -7733,15 +7725,20 @@ static void restart_mining_threads(unsigned int new_n_threads) @@ -7733,15 +7725,20 @@ static void restart_mining_threads(unsigned int new_n_threads)
// Stop and free threads
if (mining_thr) {
rd_lock(&mining_thr_lock);
for (i = 0; i < mining_threads; i++) {
mining_thr[i]->cgpu->shutdown = true;
}
rd_unlock(&mining_thr_lock);
// kill_mining will rd lock mining_thr_lock
kill_mining();
rd_lock(&mining_thr_lock);
for (i = 0; i < mining_threads; i++) {
thr = mining_thr[i];
thr->cgpu->drv->thread_shutdown(thr);
thr->cgpu->shutdown = false;
}
rd_unlock(&mining_thr_lock);
}
wr_lock(&mining_thr_lock);
@ -7779,7 +7776,7 @@ static void restart_mining_threads(unsigned int new_n_threads) @@ -7779,7 +7776,7 @@ static void restart_mining_threads(unsigned int new_n_threads)
cgpu->status = LIFE_INIT;
for (j = 0; j < cgpu->threads; ++j, ++k) {
thr = get_thread(k);
thr = mining_thr[k];
thr->id = k;
thr->cgpu = cgpu;
thr->device_thread = j;

Loading…
Cancel
Save