Browse Source

since mining_threads count can change, we need to lock more carefully and get_thread function is not helpful anymore

djm34
Jan Berdajs 10 years ago
parent
commit
9fdd23e6ff
  1. 4
      api.c
  2. 32
      driver-opencl.c
  3. 1
      miner.h
  4. 53
      sgminer.c

4
api.c

@ -1972,8 +1972,9 @@ static void gpuenable(struct io_data *io_data, __maybe_unused SOCKETTYPE c, char
return; return;
} }
rd_lock(&mining_thr_lock);
for (i = 0; i < mining_threads; i++) { for (i = 0; i < mining_threads; i++) {
thr = get_thread(i); thr = mining_thr[i];
gpu = thr->cgpu->device_id; gpu = thr->cgpu->device_id;
if (gpu == id) { if (gpu == id) {
if (thr->cgpu->status != LIFE_WELL) { if (thr->cgpu->status != LIFE_WELL) {
@ -1985,6 +1986,7 @@ static void gpuenable(struct io_data *io_data, __maybe_unused SOCKETTYPE c, char
cgsem_post(&thr->sem); cgsem_post(&thr->sem);
} }
} }
rd_unlock(&mining_thr_lock);
message(io_data, MSG_GPUREN, id, NULL, isjson); message(io_data, MSG_GPUREN, id, NULL, isjson);
} }

32
driver-opencl.c

@ -679,10 +679,11 @@ void pause_dynamic_threads(int gpu)
struct cgpu_info *cgpu = &gpus[gpu]; struct cgpu_info *cgpu = &gpus[gpu];
int i; int i;
rd_lock(&mining_thr_lock);
for (i = 1; i < cgpu->threads; i++) { for (i = 1; i < cgpu->threads; i++) {
struct thr_info *thr; struct thr_info *thr;
thr = get_thread(i); thr = mining_thr[i];
if (!thr->pause && cgpu->dynamic) { if (!thr->pause && cgpu->dynamic) {
applog(LOG_WARNING, "Disabling extra threads due to dynamic mode."); applog(LOG_WARNING, "Disabling extra threads due to dynamic mode.");
applog(LOG_WARNING, "Tune dynamic intensity with --gpu-dyninterval"); applog(LOG_WARNING, "Tune dynamic intensity with --gpu-dyninterval");
@ -692,6 +693,7 @@ void pause_dynamic_threads(int gpu)
if (!cgpu->dynamic && cgpu->deven != DEV_DISABLED) if (!cgpu->dynamic && cgpu->deven != DEV_DISABLED)
cgsem_post(&thr->sem); cgsem_post(&thr->sem);
} }
rd_unlock(&mining_thr_lock);
} }
#if defined(HAVE_CURSES) #if defined(HAVE_CURSES)
@ -764,8 +766,10 @@ retry: // TODO: refactor
} }
#endif #endif
wlog("Last initialised: %s\n", cgpu->init); wlog("Last initialised: %s\n", cgpu->init);
rd_lock(&mining_thr_lock);
for (i = 0; i < mining_threads; i++) { for (i = 0; i < mining_threads; i++) {
thr = get_thread(i); thr = mining_thr[i];
if (thr->cgpu != cgpu) if (thr->cgpu != cgpu)
continue; continue;
get_datestamp(checkin, sizeof(checkin), &thr->last); get_datestamp(checkin, sizeof(checkin), &thr->last);
@ -793,6 +797,8 @@ retry: // TODO: refactor
wlog(" paused"); wlog(" paused");
wlog("\n"); wlog("\n");
} }
rd_unlock(&mining_thr_lock);
wlog("\n"); wlog("\n");
} }
@ -821,8 +827,9 @@ retry: // TODO: refactor
goto retry; goto retry;
} }
gpus[selected].deven = DEV_ENABLED; gpus[selected].deven = DEV_ENABLED;
rd_lock(&mining_thr_lock);
for (i = 0; i < mining_threads; ++i) { for (i = 0; i < mining_threads; ++i) {
thr = get_thread(i); thr = mining_thr[i];
cgpu = thr->cgpu; cgpu = thr->cgpu;
if (cgpu->drv->drv_id != DRIVER_opencl) if (cgpu->drv->drv_id != DRIVER_opencl)
continue; continue;
@ -836,6 +843,7 @@ retry: // TODO: refactor
cgsem_post(&thr->sem); cgsem_post(&thr->sem);
} }
rd_unlock(&mining_thr_lock);
goto retry; goto retry;
} else if (!strncasecmp(&input, "d", 1)) { } else if (!strncasecmp(&input, "d", 1)) {
if (selected) if (selected)
@ -1036,20 +1044,15 @@ select_cgpu:
gpu = cgpu->device_id; gpu = cgpu->device_id;
rd_lock(&mining_thr_lock);
for (thr_id = 0; thr_id < mining_threads; ++thr_id) { for (thr_id = 0; thr_id < mining_threads; ++thr_id) {
thr = get_thread(thr_id); thr = mining_thr[thr_id];
cgpu = thr->cgpu; cgpu = thr->cgpu;
if (cgpu->drv->drv_id != DRIVER_opencl) if (cgpu->drv->drv_id != DRIVER_opencl)
continue; continue;
if (dev_from_id(thr_id) != gpu) if (dev_from_id(thr_id) != gpu)
continue; continue;
thr = get_thread(thr_id);
if (!thr) {
applog(LOG_WARNING, "No reference to thread %d exists", thr_id);
continue;
}
thr->rolling = thr->cgpu->rolling = 0; thr->rolling = thr->cgpu->rolling = 0;
/* Reports the last time we tried to revive a sick GPU */ /* Reports the last time we tried to revive a sick GPU */
cgtime(&thr->sick); cgtime(&thr->sick);
@ -1060,11 +1063,13 @@ select_cgpu:
} else } else
applog(LOG_WARNING, "Thread %d no longer exists", thr_id); applog(LOG_WARNING, "Thread %d no longer exists", thr_id);
} }
rd_unlock(&mining_thr_lock);
rd_lock(&mining_thr_lock);
for (thr_id = 0; thr_id < mining_threads; ++thr_id) { for (thr_id = 0; thr_id < mining_threads; ++thr_id) {
int virtual_gpu; int virtual_gpu;
thr = get_thread(thr_id); thr = mining_thr[thr_id];
cgpu = thr->cgpu; cgpu = thr->cgpu;
if (cgpu->drv->drv_id != DRIVER_opencl) if (cgpu->drv->drv_id != DRIVER_opencl)
continue; continue;
@ -1096,12 +1101,14 @@ select_cgpu:
} }
applog(LOG_WARNING, "Thread %d restarted", thr_id); applog(LOG_WARNING, "Thread %d restarted", thr_id);
} }
rd_unlock(&mining_thr_lock);
cgtime(&now); cgtime(&now);
get_datestamp(cgpu->init, sizeof(cgpu->init), &now); get_datestamp(cgpu->init, sizeof(cgpu->init), &now);
rd_lock(&mining_thr_lock);
for (thr_id = 0; thr_id < mining_threads; ++thr_id) { for (thr_id = 0; thr_id < mining_threads; ++thr_id) {
thr = get_thread(thr_id); thr = mining_thr[thr_id];
cgpu = thr->cgpu; cgpu = thr->cgpu;
if (cgpu->drv->drv_id != DRIVER_opencl) if (cgpu->drv->drv_id != DRIVER_opencl)
continue; continue;
@ -1110,6 +1117,7 @@ select_cgpu:
cgsem_post(&thr->sem); cgsem_post(&thr->sem);
} }
rd_unlock(&mining_thr_lock);
goto select_cgpu; goto select_cgpu;
out: out:

1
miner.h

@ -1396,7 +1396,6 @@ extern void clean_work(struct work *work);
extern void free_work(struct work *work); extern void free_work(struct work *work);
extern struct work *copy_work_noffset(struct work *base_work, int noffset); extern struct work *copy_work_noffset(struct work *base_work, int noffset);
#define copy_work(work_in) copy_work_noffset(work_in, 0) #define copy_work(work_in) copy_work_noffset(work_in, 0)
extern struct thr_info *get_thread(int thr_id);
extern struct cgpu_info *get_devices(int id); extern struct cgpu_info *get_devices(int id);
enum api_data_type { enum api_data_type {

53
sgminer.c

@ -403,27 +403,15 @@ static void applog_and_exit(const char *fmt, ...)
static pthread_mutex_t sharelog_lock; static pthread_mutex_t sharelog_lock;
static FILE *sharelog_file = NULL; static FILE *sharelog_file = NULL;
static struct thr_info *__get_thread(int thr_id) static struct cgpu_info *get_thr_cgpu(int thr_id)
{
return mining_thr[thr_id];
}
struct thr_info *get_thread(int thr_id)
{ {
struct thr_info *thr; struct thr_info *thr;
rd_lock(&mining_thr_lock); rd_lock(&mining_thr_lock);
thr = __get_thread(thr_id); if (thr_id < mining_threads)
thr = mining_thr[thr_id];
rd_unlock(&mining_thr_lock); rd_unlock(&mining_thr_lock);
return thr; return thr ? thr->cgpu : NULL;
}
static struct cgpu_info *get_thr_cgpu(int thr_id)
{
struct thr_info *thr = get_thread(thr_id);
return thr->cgpu;
} }
struct cgpu_info *get_devices(int id) struct cgpu_info *get_devices(int id)
@ -3342,10 +3330,11 @@ static void kill_mining(void)
forcelog(LOG_DEBUG, "Killing off mining threads"); forcelog(LOG_DEBUG, "Killing off mining threads");
/* Kill the mining threads*/ /* Kill the mining threads*/
rd_lock(&mining_thr_lock);
for (i = 0; i < mining_threads; i++) { for (i = 0; i < mining_threads; i++) {
pthread_t *pth = NULL; pthread_t *pth = NULL;
thr = get_thread(i); thr = mining_thr[i];
if (thr && PTH(thr) != 0L) if (thr && PTH(thr) != 0L)
pth = &thr->pth; pth = &thr->pth;
thr_info_cancel(thr); thr_info_cancel(thr);
@ -3357,6 +3346,7 @@ static void kill_mining(void)
pthread_join(*pth, NULL); pthread_join(*pth, NULL);
#endif #endif
} }
rd_unlock(&mining_thr_lock);
} }
static void __kill_work(void) static void __kill_work(void)
@ -3380,10 +3370,11 @@ static void __kill_work(void)
kill_timeout(thr); kill_timeout(thr);
forcelog(LOG_DEBUG, "Shutting down mining threads"); forcelog(LOG_DEBUG, "Shutting down mining threads");
rd_lock(&mining_thr_lock);
for (i = 0; i < mining_threads; i++) { for (i = 0; i < mining_threads; i++) {
struct cgpu_info *cgpu; struct cgpu_info *cgpu;
thr = get_thread(i); thr = mining_thr[i];
if (!thr) if (!thr)
continue; continue;
cgpu = thr->cgpu; cgpu = thr->cgpu;
@ -3392,6 +3383,7 @@ static void __kill_work(void)
cgpu->shutdown = true; cgpu->shutdown = true;
} }
rd_unlock(&mining_thr_lock);
sleep(1); sleep(1);
@ -4049,10 +4041,7 @@ static void *restart_thread(void __maybe_unused *arg)
discard_stale(); discard_stale();
rd_lock(&mining_thr_lock); rd_lock(&mining_thr_lock);
mt = mining_threads; for (i = 0; i < mining_threads; i++) {
rd_unlock(&mining_thr_lock);
for (i = 0; i < mt; i++) {
cgpu = mining_thr[i]->cgpu; cgpu = mining_thr[i]->cgpu;
if (unlikely(!cgpu)) if (unlikely(!cgpu))
continue; continue;
@ -4061,6 +4050,7 @@ static void *restart_thread(void __maybe_unused *arg)
mining_thr[i]->work_restart = true; mining_thr[i]->work_restart = true;
cgpu->drv->flush_work(cgpu); cgpu->drv->flush_work(cgpu);
} }
rd_unlock(&mining_thr_lock);
mutex_lock(&restart_lock); mutex_lock(&restart_lock);
pthread_cond_broadcast(&restart_cond); pthread_cond_broadcast(&restart_cond);
@ -5179,20 +5169,22 @@ static void hashmeter(int thr_id, struct timeval *diff,
bool showlog = false; bool showlog = false;
char displayed_hashes[16], displayed_rolling[16]; char displayed_hashes[16], displayed_rolling[16];
uint64_t dh64, dr64; uint64_t dh64, dr64;
struct thr_info *thr; struct thr_info *thr = NULL;
local_mhashes = (double)hashes_done / 1000000.0; local_mhashes = (double)hashes_done / 1000000.0;
/* Update the last time this thread reported in */ /* Update the last time this thread reported in */
if (thr_id >= 0) { rd_lock(&mining_thr_lock);
thr = get_thread(thr_id); if (thr_id >= 0 && thr_id < mining_threads) {
thr = mining_thr[thr_id];
cgtime(&thr->last); cgtime(&thr->last);
thr->cgpu->device_last_well = time(NULL); thr->cgpu->device_last_well = time(NULL);
} }
rd_unlock(&mining_thr_lock);
secs = (double)diff->tv_sec + ((double)diff->tv_usec / 1000000.0); secs = (double)diff->tv_sec + ((double)diff->tv_usec / 1000000.0);
/* So we can call hashmeter from a non worker thread */ /* So we can call hashmeter from a non worker thread */
if (thr_id >= 0) { if (thr) {
struct cgpu_info *cgpu = thr->cgpu; struct cgpu_info *cgpu = thr->cgpu;
double thread_rolling = 0.0; double thread_rolling = 0.0;
int i; int i;
@ -7087,7 +7079,7 @@ static void *watchdog_thread(void __maybe_unused *userdata)
for (i = 0; i < mining_threads; i++) { for (i = 0; i < mining_threads; i++) {
struct thr_info *thr; struct thr_info *thr;
thr = get_thread(i); thr = mining_thr[i];
/* Don't touch disabled devices */ /* Don't touch disabled devices */
if (thr->cgpu->deven == DEV_DISABLED) if (thr->cgpu->deven == DEV_DISABLED)
@ -7733,15 +7725,20 @@ static void restart_mining_threads(unsigned int new_n_threads)
// Stop and free threads // Stop and free threads
if (mining_thr) { if (mining_thr) {
rd_lock(&mining_thr_lock);
for (i = 0; i < mining_threads; i++) { for (i = 0; i < mining_threads; i++) {
mining_thr[i]->cgpu->shutdown = true; mining_thr[i]->cgpu->shutdown = true;
} }
rd_unlock(&mining_thr_lock);
// kill_mining will rd lock mining_thr_lock
kill_mining(); kill_mining();
rd_lock(&mining_thr_lock);
for (i = 0; i < mining_threads; i++) { for (i = 0; i < mining_threads; i++) {
thr = mining_thr[i]; thr = mining_thr[i];
thr->cgpu->drv->thread_shutdown(thr); thr->cgpu->drv->thread_shutdown(thr);
thr->cgpu->shutdown = false; thr->cgpu->shutdown = false;
} }
rd_unlock(&mining_thr_lock);
} }
wr_lock(&mining_thr_lock); wr_lock(&mining_thr_lock);
@ -7779,7 +7776,7 @@ static void restart_mining_threads(unsigned int new_n_threads)
cgpu->status = LIFE_INIT; cgpu->status = LIFE_INIT;
for (j = 0; j < cgpu->threads; ++j, ++k) { for (j = 0; j < cgpu->threads; ++j, ++k) {
thr = get_thread(k); thr = mining_thr[k];
thr->id = k; thr->id = k;
thr->cgpu = cgpu; thr->cgpu = cgpu;
thr->device_thread = j; thr->device_thread = j;

Loading…
Cancel
Save