Browse Source

make rw locks: mining_thr_lock and devices_lock

nfactor-troky
Kano 12 years ago
parent
commit
120e9a072d
  1. 10
      api.c
  2. 48
      cgminer.c
  3. 4
      miner.h

10
api.c

@ -1161,7 +1161,7 @@ static int numpgas()
int count = 0; int count = 0;
int i; int i;
mutex_lock(&devices_lock); rd_lock(&devices_lock);
for (i = 0; i < total_devices; i++) { for (i = 0; i < total_devices; i++) {
#ifdef USE_BITFORCE #ifdef USE_BITFORCE
if (devices[i]->drv->drv_id == DRIVER_BITFORCE) if (devices[i]->drv->drv_id == DRIVER_BITFORCE)
@ -1180,7 +1180,7 @@ static int numpgas()
count++; count++;
#endif #endif
} }
mutex_unlock(&devices_lock); rd_unlock(&devices_lock);
return count; return count;
} }
@ -1189,7 +1189,7 @@ static int pgadevice(int pgaid)
int count = 0; int count = 0;
int i; int i;
mutex_lock(&devices_lock); rd_lock(&devices_lock);
for (i = 0; i < total_devices; i++) { for (i = 0; i < total_devices; i++) {
#ifdef USE_BITFORCE #ifdef USE_BITFORCE
if (devices[i]->drv->drv_id == DRIVER_BITFORCE) if (devices[i]->drv->drv_id == DRIVER_BITFORCE)
@ -1211,12 +1211,12 @@ static int pgadevice(int pgaid)
goto foundit; goto foundit;
} }
mutex_unlock(&devices_lock); rd_unlock(&devices_lock);
return -1; return -1;
foundit: foundit:
mutex_unlock(&devices_lock); rd_unlock(&devices_lock);
return i; return i;
} }
#endif #endif

48
cgminer.c

@ -180,8 +180,8 @@ static pthread_rwlock_t blk_lock;
static pthread_mutex_t sshare_lock; static pthread_mutex_t sshare_lock;
pthread_rwlock_t netacc_lock; pthread_rwlock_t netacc_lock;
pthread_mutex_t mining_thr_lock; pthread_rwlock_t mining_thr_lock;
pthread_mutex_t devices_lock; pthread_rwlock_t devices_lock;
static pthread_mutex_t lp_lock; static pthread_mutex_t lp_lock;
static pthread_cond_t lp_cond; static pthread_cond_t lp_cond;
@ -379,9 +379,9 @@ struct thr_info *get_thread(int thr_id)
{ {
struct thr_info *thr; struct thr_info *thr;
mutex_lock(&mining_thr_lock); rd_lock(&mining_thr_lock);
thr = mining_thr[thr_id]; thr = mining_thr[thr_id];
mutex_unlock(&mining_thr_lock); rd_unlock(&mining_thr_lock);
return thr; return thr;
} }
@ -396,9 +396,9 @@ struct cgpu_info *get_devices(int id)
{ {
struct cgpu_info *cgpu; struct cgpu_info *cgpu;
mutex_lock(&devices_lock); rd_lock(&devices_lock);
cgpu = devices[id]; cgpu = devices[id];
mutex_unlock(&devices_lock); rd_unlock(&devices_lock);
return cgpu; return cgpu;
} }
@ -762,24 +762,24 @@ static void load_temp_cutoffs()
if (val < 0 || val > 200) if (val < 0 || val > 200)
quit(1, "Invalid value passed to set temp cutoff"); quit(1, "Invalid value passed to set temp cutoff");
mutex_lock(&devices_lock); rd_lock(&devices_lock);
devices[device]->cutofftemp = val; devices[device]->cutofftemp = val;
mutex_unlock(&devices_lock); rd_unlock(&devices_lock);
} }
} else { } else {
mutex_lock(&devices_lock); rd_lock(&devices_lock);
for (i = device; i < total_devices; ++i) { for (i = device; i < total_devices; ++i) {
if (!devices[i]->cutofftemp) if (!devices[i]->cutofftemp)
devices[i]->cutofftemp = opt_cutofftemp; devices[i]->cutofftemp = opt_cutofftemp;
} }
mutex_unlock(&devices_lock); rd_unlock(&devices_lock);
return; return;
} }
if (device <= 1) { if (device <= 1) {
mutex_lock(&devices_lock); rd_lock(&devices_lock);
for (i = device; i < total_devices; ++i) for (i = device; i < total_devices; ++i)
devices[i]->cutofftemp = val; devices[i]->cutofftemp = val;
mutex_unlock(&devices_lock); rd_unlock(&devices_lock);
} }
} }
@ -3496,10 +3496,10 @@ static void restart_threads(void)
/* Discard staged work that is now stale */ /* Discard staged work that is now stale */
discard_stale(); discard_stale();
mutex_lock(&mining_thr_lock); rd_lock(&mining_thr_lock);
for (i = 0; i < mining_threads; i++) for (i = 0; i < mining_threads; i++)
mining_thr[i]->work_restart = true; mining_thr[i]->work_restart = true;
mutex_unlock(&mining_thr_lock); rd_unlock(&mining_thr_lock);
mutex_lock(&restart_lock); mutex_lock(&restart_lock);
pthread_cond_broadcast(&restart_cond); pthread_cond_broadcast(&restart_cond);
@ -6152,10 +6152,10 @@ static void *watchdog_thread(void __maybe_unused *userdata)
applog(LOG_WARNING, "Will restart execution as scheduled at %02d:%02d", applog(LOG_WARNING, "Will restart execution as scheduled at %02d:%02d",
schedstart.tm.tm_hour, schedstart.tm.tm_min); schedstart.tm.tm_hour, schedstart.tm.tm_min);
sched_paused = true; sched_paused = true;
mutex_lock(&mining_thr_lock); rd_lock(&mining_thr_lock);
for (i = 0; i < mining_threads; i++) for (i = 0; i < mining_threads; i++)
mining_thr[i]->pause = true; mining_thr[i]->pause = true;
mutex_unlock(&mining_thr_lock); rd_unlock(&mining_thr_lock);
} else if (sched_paused && should_run()) { } else if (sched_paused && should_run()) {
applog(LOG_WARNING, "Restarting execution as per start time %02d:%02d scheduled", applog(LOG_WARNING, "Restarting execution as per start time %02d:%02d scheduled",
schedstart.tm.tm_hour, schedstart.tm.tm_min); schedstart.tm.tm_hour, schedstart.tm.tm_min);
@ -6719,9 +6719,9 @@ void fill_device_drv(struct cgpu_info *cgpu)
void enable_device(struct cgpu_info *cgpu) void enable_device(struct cgpu_info *cgpu)
{ {
cgpu->deven = DEV_ENABLED; cgpu->deven = DEV_ENABLED;
mutex_lock(&devices_lock); wr_lock(&devices_lock);
devices[cgpu->cgminer_id = cgminer_id_count++] = cgpu; devices[cgpu->cgminer_id = cgminer_id_count++] = cgpu;
mutex_unlock(&devices_lock); wr_unlock(&devices_lock);
if (hotplug_mode) { if (hotplug_mode) {
new_threads += cgpu->threads; new_threads += cgpu->threads;
#ifdef HAVE_CURSES #ifdef HAVE_CURSES
@ -6764,9 +6764,9 @@ bool add_cgpu(struct cgpu_info*cgpu)
cgpu->device_id = d->lastid = 0; cgpu->device_id = d->lastid = 0;
HASH_ADD_STR(devids, name, d); HASH_ADD_STR(devids, name, d);
} }
mutex_lock(&devices_lock); wr_lock(&devices_lock);
devices = realloc(devices, sizeof(struct cgpu_info *) * (total_devices + new_devices + 2)); devices = realloc(devices, sizeof(struct cgpu_info *) * (total_devices + new_devices + 2));
mutex_unlock(&devices_lock); wr_unlock(&devices_lock);
if (hotplug_mode) if (hotplug_mode)
devices[total_devices + new_devices++] = cgpu; devices[total_devices + new_devices++] = cgpu;
else else
@ -6802,9 +6802,9 @@ static void hotplug_process()
cgpu->rolling = cgpu->total_mhashes = 0; cgpu->rolling = cgpu->total_mhashes = 0;
} }
mutex_lock(&mining_thr_lock); wr_lock(&mining_thr_lock);
mining_thr = realloc(mining_thr, sizeof(thr) * (mining_threads + new_threads + 1)); mining_thr = realloc(mining_thr, sizeof(thr) * (mining_threads + new_threads + 1));
mutex_unlock(&mining_thr_lock); wr_unlock(&mining_thr_lock);
if (!mining_thr) if (!mining_thr)
quit(1, "Failed to hotplug realloc mining_thr"); quit(1, "Failed to hotplug realloc mining_thr");
for (i = 0; i < new_threads; i++) { for (i = 0; i < new_threads; i++) {
@ -6933,8 +6933,8 @@ int main(int argc, char *argv[])
mutex_init(&sshare_lock); mutex_init(&sshare_lock);
rwlock_init(&blk_lock); rwlock_init(&blk_lock);
rwlock_init(&netacc_lock); rwlock_init(&netacc_lock);
mutex_init(&mining_thr_lock); rwlock_init(&mining_thr_lock);
mutex_init(&devices_lock); rwlock_init(&devices_lock);
mutex_init(&lp_lock); mutex_init(&lp_lock);
if (unlikely(pthread_cond_init(&lp_cond, NULL))) if (unlikely(pthread_cond_init(&lp_cond, NULL)))

4
miner.h

@ -757,8 +757,8 @@ extern pthread_mutex_t cgusb_lock;
extern pthread_mutex_t hash_lock; extern pthread_mutex_t hash_lock;
extern pthread_mutex_t console_lock; extern pthread_mutex_t console_lock;
extern pthread_mutex_t ch_lock; extern pthread_mutex_t ch_lock;
extern pthread_mutex_t mining_thr_lock; extern pthread_rwlock_t mining_thr_lock;
extern pthread_mutex_t devices_lock; extern pthread_rwlock_t devices_lock;
extern pthread_mutex_t restart_lock; extern pthread_mutex_t restart_lock;
extern pthread_cond_t restart_cond; extern pthread_cond_t restart_cond;

Loading…
Cancel
Save