Browse Source

Make main() the getwork scheduler once everything is set up, so that all app exits use the kill_work and quit paths.

nfactor-troky
Con Kolivas 12 years ago
parent
commit
01b9c6dfbf
  1. 258
      cgminer.c

258
cgminer.c

@ -178,9 +178,6 @@ static pthread_cond_t lp_cond; @@ -178,9 +178,6 @@ static pthread_cond_t lp_cond;
pthread_mutex_t restart_lock;
pthread_cond_t restart_cond;
pthread_mutex_t kill_lock;
pthread_cond_t kill_cond;
pthread_cond_t gws_cond;
double total_mhashes_done;
@ -2806,11 +2803,6 @@ static void __kill_work(void) @@ -2806,11 +2803,6 @@ static void __kill_work(void)
applog(LOG_DEBUG, "Killing off API thread");
thr = &thr_info[api_thr_id];
thr_info_cancel(thr);
applog(LOG_DEBUG, "Sending kill broadcast");
mutex_lock(&kill_lock);
pthread_cond_signal(&kill_cond);
mutex_unlock(&kill_lock);
}
/* This should be the common exit path */
@ -3043,126 +3035,6 @@ static void pool_died(struct pool *pool) @@ -3043,126 +3035,6 @@ static void pool_died(struct pool *pool)
}
}
static void gen_stratum_work(struct pool *pool, struct work *work);
static void pool_resus(struct pool *pool);
static void *getwork_thread(void __maybe_unused *userdata)
{
pthread_detach(pthread_self());
RenameThread("getwork_sched");
while (42) {
int ts, max_staged = opt_queue;
struct pool *pool, *cp;
bool lagging = false;
struct curl_ent *ce;
struct work *work;
cp = current_pool();
/* If the primary pool is a getwork pool and cannot roll work,
* try to stage one extra work per mining thread */
if (!cp->has_stratum && !cp->has_gbt && !staged_rollable)
max_staged += mining_threads;
mutex_lock(stgd_lock);
ts = __total_staged();
if (!cp->has_stratum && !cp->has_gbt && !ts && !opt_fail_only)
lagging = true;
/* Wait until hash_pop tells us we need to create more work */
if (ts > max_staged) {
pthread_cond_wait(&gws_cond, stgd_lock);
ts = __total_staged();
}
mutex_unlock(stgd_lock);
if (ts > max_staged)
continue;
work = make_work();
if (lagging && !pool_tset(cp, &cp->lagging)) {
applog(LOG_WARNING, "Pool %d not providing work fast enough", cp->pool_no);
cp->getfail_occasions++;
total_go++;
}
pool = select_pool(lagging);
retry:
if (pool->has_stratum) {
while (!pool->stratum_active) {
struct pool *altpool = select_pool(true);
sleep(5);
if (altpool != pool) {
pool = altpool;
goto retry;
}
}
gen_stratum_work(pool, work);
applog(LOG_DEBUG, "Generated stratum work");
stage_work(work);
continue;
}
if (pool->has_gbt) {
while (pool->idle) {
struct pool *altpool = select_pool(true);
sleep(5);
if (altpool != pool) {
pool = altpool;
goto retry;
}
}
gen_gbt_work(pool, work);
applog(LOG_DEBUG, "Generated GBT work");
stage_work(work);
continue;
}
if (clone_available()) {
applog(LOG_DEBUG, "Cloned getwork work");
free_work(work);
continue;
}
if (opt_benchmark) {
get_benchmark_work(work);
applog(LOG_DEBUG, "Generated benchmark work");
stage_work(work);
continue;
}
work->pool = pool;
ce = pop_curl_entry(pool);
/* obtain new work from bitcoin via JSON-RPC */
if (!get_upstream_work(work, ce->curl)) {
applog(LOG_DEBUG, "Pool %d json_rpc_call failed on get work, retrying in 5s", pool->pool_no);
/* Make sure the pool just hasn't stopped serving
* requests but is up as we'll keep hammering it */
if (++pool->seq_getfails > mining_threads + opt_queue)
pool_died(pool);
sleep(5);
push_curl_entry(ce, pool);
pool = select_pool(true);
goto retry;
}
pool_tclear(pool, &pool->lagging);
if (pool_tclear(pool, &pool->idle))
pool_resus(pool);
applog(LOG_DEBUG, "Generated getwork work");
stage_work(work);
push_curl_entry(ce, pool);
}
return NULL;
}
static bool stale_work(struct work *work, bool share)
{
struct timeval now;
@ -4651,6 +4523,8 @@ static bool cnx_needed(struct pool *pool) @@ -4651,6 +4523,8 @@ static bool cnx_needed(struct pool *pool)
}
static void wait_lpcurrent(struct pool *pool);
static void pool_resus(struct pool *pool);
static void gen_stratum_work(struct pool *pool, struct work *work);
/* One stratum thread per pool that has stratum waits on the socket checking
* for new messages and for the integrity of the socket connection. We reset
@ -6368,14 +6242,13 @@ bool add_cgpu(struct cgpu_info*cgpu) @@ -6368,14 +6242,13 @@ bool add_cgpu(struct cgpu_info*cgpu)
int main(int argc, char *argv[])
{
struct block *block, *tmpblock;
struct work *work, *tmpwork;
bool pools_active = false;
struct sigaction handler;
struct thr_info *thr;
char *s;
struct block *block;
unsigned int k;
int i, j;
char *s;
/* This dangerous functions tramples random dynamically allocated
* variables so do it before anything at all */
@ -6416,10 +6289,6 @@ int main(int argc, char *argv[]) @@ -6416,10 +6289,6 @@ int main(int argc, char *argv[])
if (unlikely(pthread_cond_init(&restart_cond, NULL)))
quit(1, "Failed to pthread_cond_init restart_cond");
mutex_init(&kill_lock);
if (unlikely(pthread_cond_init(&kill_cond, NULL)))
quit(1, "Failed to pthread_cond_init kill_cond");
if (unlikely(pthread_cond_init(&gws_cond, NULL)))
quit(1, "Failed to pthread_cond_init gws_cond");
@ -6879,36 +6748,113 @@ begin_bench: @@ -6879,36 +6748,113 @@ begin_bench:
pthread_detach(thr->pth);
#endif
thr = &thr_info[gwsched_thr_id];
thr->id = gwsched_thr_id;
/* Once everything is set up, main() becomes the getwork scheduler */
while (42) {
int ts, max_staged = opt_queue;
struct pool *pool, *cp;
bool lagging = false;
struct curl_ent *ce;
struct work *work;
cp = current_pool();
/* start getwork scheduler thread */
if (thr_info_create(thr, NULL, getwork_thread, thr))
quit(1, "getwork_thread create failed");
/* If the primary pool is a getwork pool and cannot roll work,
* try to stage one extra work per mining thread */
if (!cp->has_stratum && !cp->has_gbt && !staged_rollable)
max_staged += mining_threads;
/* Wait till we receive the conditional telling us to die */
mutex_lock(&kill_lock);
pthread_cond_wait(&kill_cond, &kill_lock);
mutex_unlock(&kill_lock);
mutex_lock(stgd_lock);
ts = __total_staged();
applog(LOG_INFO, "Given kill message, exiting.");
if (!cp->has_stratum && !cp->has_gbt && !ts && !opt_fail_only)
lagging = true;
/* Not really necessary, but let's clean this up too anyway */
HASH_ITER(hh, staged_work, work, tmpwork) {
HASH_DEL(staged_work, work);
/* Wait until hash_pop tells us we need to create more work */
if (ts > max_staged) {
pthread_cond_wait(&gws_cond, stgd_lock);
ts = __total_staged();
}
mutex_unlock(stgd_lock);
if (ts > max_staged)
continue;
work = make_work();
if (lagging && !pool_tset(cp, &cp->lagging)) {
applog(LOG_WARNING, "Pool %d not providing work fast enough", cp->pool_no);
cp->getfail_occasions++;
total_go++;
}
pool = select_pool(lagging);
retry:
if (pool->has_stratum) {
while (!pool->stratum_active) {
struct pool *altpool = select_pool(true);
sleep(5);
if (altpool != pool) {
pool = altpool;
goto retry;
}
}
gen_stratum_work(pool, work);
applog(LOG_DEBUG, "Generated stratum work");
stage_work(work);
continue;
}
if (pool->has_gbt) {
while (pool->idle) {
struct pool *altpool = select_pool(true);
sleep(5);
if (altpool != pool) {
pool = altpool;
goto retry;
}
}
gen_gbt_work(pool, work);
applog(LOG_DEBUG, "Generated GBT work");
stage_work(work);
continue;
}
if (clone_available()) {
applog(LOG_DEBUG, "Cloned getwork work");
free_work(work);
continue;
}
HASH_ITER(hh, blocks, block, tmpblock) {
HASH_DEL(blocks, block);
free(block);
if (opt_benchmark) {
get_benchmark_work(work);
applog(LOG_DEBUG, "Generated benchmark work");
stage_work(work);
continue;
}
#if defined(unix)
if (forkpid > 0) {
kill(forkpid, SIGTERM);
forkpid = 0;
work->pool = pool;
ce = pop_curl_entry(pool);
/* obtain new work from bitcoin via JSON-RPC */
if (!get_upstream_work(work, ce->curl)) {
applog(LOG_DEBUG, "Pool %d json_rpc_call failed on get work, retrying in 5s", pool->pool_no);
/* Make sure the pool just hasn't stopped serving
* requests but is up as we'll keep hammering it */
if (++pool->seq_getfails > mining_threads + opt_queue)
pool_died(pool);
sleep(5);
push_curl_entry(ce, pool);
pool = select_pool(true);
goto retry;
}
pool_tclear(pool, &pool->lagging);
if (pool_tclear(pool, &pool->idle))
pool_resus(pool);
applog(LOG_DEBUG, "Generated getwork work");
stage_work(work);
push_curl_entry(ce, pool);
}
#endif
return 0;
}

Loading…
Cancel
Save