Browse Source

Change algorithm more robustly and faster, previous way was buggy

refactor
Jan Berdajs 11 years ago
parent
commit
cdb817bcbd
  1. 90
      sgminer.c

90
sgminer.c

@ -199,7 +199,9 @@ static pthread_mutex_t lp_lock;
static pthread_cond_t lp_cond; static pthread_cond_t lp_cond;
static pthread_mutex_t algo_switch_lock; static pthread_mutex_t algo_switch_lock;
static pthread_t algo_switch_thr = 0; static int algo_switch_n = 0;
static pthread_mutex_t algo_switch_wait_lock;
static pthread_cond_t algo_switch_wait_cond;
pthread_mutex_t restart_lock; pthread_mutex_t restart_lock;
pthread_cond_t restart_cond; pthread_cond_t restart_cond;
@ -6023,67 +6025,51 @@ static void gen_stratum_work(struct pool *pool, struct work *work)
cgtime(&work->tv_staged); cgtime(&work->tv_staged);
} }
static void *switch_algo_thread(void *arg) static void get_work_prepare_thread(struct thr_info *mythr, struct work *work)
{ {
algorithm_t *new_algo = (algorithm_t *) arg;
int i; int i;
pthread_detach(pthread_self()); pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, NULL);
applog(LOG_WARNING, "Switching algorithm to %s (%d)",
new_algo->name, new_algo->nfactor);
// TODO: When threads are canceled, they may leak memory, for example
// the "work" variable in get_work.
rd_lock(&devices_lock);
for (i = 0; i < total_devices; i++) {
devices[i]->algorithm = *new_algo;
reinit_device(devices[i]);
}
rd_unlock(&devices_lock);
// Wait for reinit_gpu to finish
while (42) {
struct thread_q *tq = control_thr[gpur_thr_id].q;
bool stop = false;
mutex_lock(&tq->mutex);
stop = list_empty(&tq->q);
mutex_unlock(&tq->mutex);
if (stop) break;
usleep(50000);
}
mutex_lock(&algo_switch_lock); mutex_lock(&algo_switch_lock);
algo_switch_thr = 0;
mutex_unlock(&algo_switch_lock);
return NULL; if (cmp_algorithm(&work->pool->algorithm, &mythr->cgpu->algorithm) && (algo_switch_n == 0)) {
}
static void wait_to_die(void)
{
mutex_unlock(&algo_switch_lock); mutex_unlock(&algo_switch_lock);
pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL); pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL);
sleep(60); return;
applog(LOG_ERR, "Thread not canceled within 60 seconds"); }
}
static void get_work_prepare_thread(struct thr_info *mythr, struct work *work) algo_switch_n++;
{
struct cgpu_info *cgpu = mythr->cgpu;
mutex_lock(&algo_switch_lock); // If all threads are waiting now
pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, NULL); if (algo_switch_n >= mining_threads) {
if (!cmp_algorithm(&work->pool->algorithm, &cgpu->algorithm)) { rd_lock(&mining_thr_lock);
// stage work back to queue, we cannot process it yet // Shutdown all threads first (necessary)
stage_work(work); for (i = 0; i < mining_threads; i++) {
if (algo_switch_thr == 0) struct thr_info *thr = mining_thr[i];
pthread_create(&algo_switch_thr, NULL, &switch_algo_thread, &work->pool->algorithm); thr->cgpu->drv->thread_shutdown(thr);
wait_to_die(); }
// Change algorithm for each thread (thread_prepare calls initCl)
for (i = 0; i < mining_threads; i++) {
struct thr_info *thr = mining_thr[i];
thr->cgpu->algorithm = work->pool->algorithm;
thr->cgpu->drv->thread_prepare(thr);
}
rd_unlock(&mining_thr_lock);
algo_switch_n = 0;
mutex_unlock(&algo_switch_lock);
// Signal other threads to start working now
mutex_lock(&algo_switch_wait_lock);
pthread_cond_broadcast(&algo_switch_wait_cond);
mutex_unlock(&algo_switch_wait_lock);
// Not all threads are waiting, join the waiting list
} else { } else {
mutex_unlock(&algo_switch_lock); mutex_unlock(&algo_switch_lock);
pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL); // Wait for signal to start working again
mutex_lock(&algo_switch_wait_lock);
pthread_cond_wait(&algo_switch_wait_cond, &algo_switch_wait_lock);
mutex_unlock(&algo_switch_wait_lock);
} }
pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL);
} }
struct work *get_work(struct thr_info *thr, const int thr_id) struct work *get_work(struct thr_info *thr, const int thr_id)
@ -7948,6 +7934,10 @@ int main(int argc, char *argv[])
if (unlikely(pthread_cond_init(&restart_cond, NULL))) if (unlikely(pthread_cond_init(&restart_cond, NULL)))
quit(1, "Failed to pthread_cond_init restart_cond"); quit(1, "Failed to pthread_cond_init restart_cond");
mutex_init(&algo_switch_wait_lock);
if (unlikely(pthread_cond_init(&algo_switch_wait_cond, NULL)))
quit(1, "Failed to pthread_cond_init algo_switch_wait_cond");
if (unlikely(pthread_cond_init(&gws_cond, NULL))) if (unlikely(pthread_cond_init(&gws_cond, NULL)))
quit(1, "Failed to pthread_cond_init gws_cond"); quit(1, "Failed to pthread_cond_init gws_cond");

Loading…
Cancel
Save