|
|
@ -398,6 +398,7 @@ struct thr_info *get_thread(int thr_id) |
|
|
|
rd_lock(&mining_thr_lock); |
|
|
|
rd_lock(&mining_thr_lock); |
|
|
|
thr = mining_thr[thr_id]; |
|
|
|
thr = mining_thr[thr_id]; |
|
|
|
rd_unlock(&mining_thr_lock); |
|
|
|
rd_unlock(&mining_thr_lock); |
|
|
|
|
|
|
|
|
|
|
|
return thr; |
|
|
|
return thr; |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
@ -415,6 +416,7 @@ struct cgpu_info *get_devices(int id) |
|
|
|
rd_lock(&devices_lock); |
|
|
|
rd_lock(&devices_lock); |
|
|
|
cgpu = devices[id]; |
|
|
|
cgpu = devices[id]; |
|
|
|
rd_unlock(&devices_lock); |
|
|
|
rd_unlock(&devices_lock); |
|
|
|
|
|
|
|
|
|
|
|
return cgpu; |
|
|
|
return cgpu; |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
@ -455,6 +457,7 @@ static void sharelog(const char*disposition, const struct work*work) |
|
|
|
ret = fwrite(s, rv, 1, sharelog_file); |
|
|
|
ret = fwrite(s, rv, 1, sharelog_file); |
|
|
|
fflush(sharelog_file); |
|
|
|
fflush(sharelog_file); |
|
|
|
mutex_unlock(&sharelog_lock); |
|
|
|
mutex_unlock(&sharelog_lock); |
|
|
|
|
|
|
|
|
|
|
|
if (ret != 1) |
|
|
|
if (ret != 1) |
|
|
|
applog(LOG_ERR, "sharelog fwrite error"); |
|
|
|
applog(LOG_ERR, "sharelog fwrite error"); |
|
|
|
} |
|
|
|
} |
|
|
@ -500,6 +503,7 @@ static bool pool_tset(struct pool *pool, bool *var) |
|
|
|
ret = *var; |
|
|
|
ret = *var; |
|
|
|
*var = true; |
|
|
|
*var = true; |
|
|
|
mutex_unlock(&pool->pool_lock); |
|
|
|
mutex_unlock(&pool->pool_lock); |
|
|
|
|
|
|
|
|
|
|
|
return ret; |
|
|
|
return ret; |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
@ -511,6 +515,7 @@ bool pool_tclear(struct pool *pool, bool *var) |
|
|
|
ret = *var; |
|
|
|
ret = *var; |
|
|
|
*var = false; |
|
|
|
*var = false; |
|
|
|
mutex_unlock(&pool->pool_lock); |
|
|
|
mutex_unlock(&pool->pool_lock); |
|
|
|
|
|
|
|
|
|
|
|
return ret; |
|
|
|
return ret; |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
@ -521,6 +526,7 @@ struct pool *current_pool(void) |
|
|
|
cg_rlock(&control_lock); |
|
|
|
cg_rlock(&control_lock); |
|
|
|
pool = currentpool; |
|
|
|
pool = currentpool; |
|
|
|
cg_runlock(&control_lock); |
|
|
|
cg_runlock(&control_lock); |
|
|
|
|
|
|
|
|
|
|
|
return pool; |
|
|
|
return pool; |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
@ -790,6 +796,7 @@ static void load_temp_cutoffs() |
|
|
|
devices[i]->cutofftemp = opt_cutofftemp; |
|
|
|
devices[i]->cutofftemp = opt_cutofftemp; |
|
|
|
} |
|
|
|
} |
|
|
|
rd_unlock(&devices_lock); |
|
|
|
rd_unlock(&devices_lock); |
|
|
|
|
|
|
|
|
|
|
|
return; |
|
|
|
return; |
|
|
|
} |
|
|
|
} |
|
|
|
if (device <= 1) { |
|
|
|
if (device <= 1) { |
|
|
@ -1491,9 +1498,11 @@ static struct work *make_work(void) |
|
|
|
|
|
|
|
|
|
|
|
if (unlikely(!work)) |
|
|
|
if (unlikely(!work)) |
|
|
|
quit(1, "Failed to calloc work in make_work"); |
|
|
|
quit(1, "Failed to calloc work in make_work"); |
|
|
|
|
|
|
|
|
|
|
|
cg_wlock(&control_lock); |
|
|
|
cg_wlock(&control_lock); |
|
|
|
work->id = total_work++; |
|
|
|
work->id = total_work++; |
|
|
|
cg_wunlock(&control_lock); |
|
|
|
cg_wunlock(&control_lock); |
|
|
|
|
|
|
|
|
|
|
|
return work; |
|
|
|
return work; |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
@ -1886,6 +1895,7 @@ static int total_staged(void) |
|
|
|
mutex_lock(stgd_lock); |
|
|
|
mutex_lock(stgd_lock); |
|
|
|
ret = __total_staged(); |
|
|
|
ret = __total_staged(); |
|
|
|
mutex_unlock(stgd_lock); |
|
|
|
mutex_unlock(stgd_lock); |
|
|
|
|
|
|
|
|
|
|
|
return ret; |
|
|
|
return ret; |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
@ -3158,10 +3168,12 @@ static bool stale_work(struct work *work, bool share) |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
same_job = true; |
|
|
|
same_job = true; |
|
|
|
|
|
|
|
|
|
|
|
cg_rlock(&pool->data_lock); |
|
|
|
cg_rlock(&pool->data_lock); |
|
|
|
if (strcmp(work->job_id, pool->swork.job_id)) |
|
|
|
if (strcmp(work->job_id, pool->swork.job_id)) |
|
|
|
same_job = false; |
|
|
|
same_job = false; |
|
|
|
cg_runlock(&pool->data_lock); |
|
|
|
cg_runlock(&pool->data_lock); |
|
|
|
|
|
|
|
|
|
|
|
if (!same_job) { |
|
|
|
if (!same_job) { |
|
|
|
applog(LOG_DEBUG, "Work stale due to stratum job_id mismatch"); |
|
|
|
applog(LOG_DEBUG, "Work stale due to stratum job_id mismatch"); |
|
|
|
return true; |
|
|
|
return true; |
|
|
@ -3205,6 +3217,7 @@ static uint64_t share_diff(const struct work *work) |
|
|
|
if (unlikely(!d64)) |
|
|
|
if (unlikely(!d64)) |
|
|
|
d64 = 1; |
|
|
|
d64 = 1; |
|
|
|
ret = diffone / d64; |
|
|
|
ret = diffone / d64; |
|
|
|
|
|
|
|
|
|
|
|
cg_wlock(&control_lock); |
|
|
|
cg_wlock(&control_lock); |
|
|
|
if (ret > best_diff) { |
|
|
|
if (ret > best_diff) { |
|
|
|
best_diff = ret; |
|
|
|
best_diff = ret; |
|
|
@ -3213,6 +3226,7 @@ static uint64_t share_diff(const struct work *work) |
|
|
|
if (ret > work->pool->best_diff) |
|
|
|
if (ret > work->pool->best_diff) |
|
|
|
work->pool->best_diff = ret; |
|
|
|
work->pool->best_diff = ret; |
|
|
|
cg_wunlock(&control_lock); |
|
|
|
cg_wunlock(&control_lock); |
|
|
|
|
|
|
|
|
|
|
|
return ret; |
|
|
|
return ret; |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
@ -3277,6 +3291,7 @@ static void *submit_work_thread(void *userdata) |
|
|
|
total_diff_stale += work->work_difficulty; |
|
|
|
total_diff_stale += work->work_difficulty; |
|
|
|
pool->diff_stale += work->work_difficulty; |
|
|
|
pool->diff_stale += work->work_difficulty; |
|
|
|
mutex_unlock(&stats_lock); |
|
|
|
mutex_unlock(&stats_lock); |
|
|
|
|
|
|
|
|
|
|
|
goto out; |
|
|
|
goto out; |
|
|
|
} |
|
|
|
} |
|
|
|
work->stale = true; |
|
|
|
work->stale = true; |
|
|
@ -3316,10 +3331,12 @@ static void *submit_work_thread(void *userdata) |
|
|
|
if (likely(stratum_send(pool, s, strlen(s)))) { |
|
|
|
if (likely(stratum_send(pool, s, strlen(s)))) { |
|
|
|
if (pool_tclear(pool, &pool->submit_fail)) |
|
|
|
if (pool_tclear(pool, &pool->submit_fail)) |
|
|
|
applog(LOG_WARNING, "Pool %d communication resumed, submitting work", pool->pool_no); |
|
|
|
applog(LOG_WARNING, "Pool %d communication resumed, submitting work", pool->pool_no); |
|
|
|
|
|
|
|
|
|
|
|
mutex_lock(&sshare_lock); |
|
|
|
mutex_lock(&sshare_lock); |
|
|
|
HASH_ADD_INT(stratum_shares, id, sshare); |
|
|
|
HASH_ADD_INT(stratum_shares, id, sshare); |
|
|
|
pool->sshares++; |
|
|
|
pool->sshares++; |
|
|
|
mutex_unlock(&sshare_lock); |
|
|
|
mutex_unlock(&sshare_lock); |
|
|
|
|
|
|
|
|
|
|
|
applog(LOG_DEBUG, "Successfully submitted, adding to stratum_shares db"); |
|
|
|
applog(LOG_DEBUG, "Successfully submitted, adding to stratum_shares db"); |
|
|
|
submitted = true; |
|
|
|
submitted = true; |
|
|
|
break; |
|
|
|
break; |
|
|
@ -3365,6 +3382,7 @@ static void *submit_work_thread(void *userdata) |
|
|
|
total_diff_stale += work->work_difficulty; |
|
|
|
total_diff_stale += work->work_difficulty; |
|
|
|
pool->diff_stale += work->work_difficulty; |
|
|
|
pool->diff_stale += work->work_difficulty; |
|
|
|
mutex_unlock(&stats_lock); |
|
|
|
mutex_unlock(&stats_lock); |
|
|
|
|
|
|
|
|
|
|
|
break; |
|
|
|
break; |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
@ -3605,6 +3623,7 @@ static bool block_exists(char *hexstr) |
|
|
|
rd_lock(&blk_lock); |
|
|
|
rd_lock(&blk_lock); |
|
|
|
HASH_FIND_STR(blocks, hexstr, s); |
|
|
|
HASH_FIND_STR(blocks, hexstr, s); |
|
|
|
rd_unlock(&blk_lock); |
|
|
|
rd_unlock(&blk_lock); |
|
|
|
|
|
|
|
|
|
|
|
if (s) |
|
|
|
if (s) |
|
|
|
return true; |
|
|
|
return true; |
|
|
|
return false; |
|
|
|
return false; |
|
|
@ -3693,6 +3712,7 @@ static bool test_work_current(struct work *work) |
|
|
|
quit (1, "test_work_current OOM"); |
|
|
|
quit (1, "test_work_current OOM"); |
|
|
|
strcpy(s->hash, hexstr); |
|
|
|
strcpy(s->hash, hexstr); |
|
|
|
s->block_no = new_blocks++; |
|
|
|
s->block_no = new_blocks++; |
|
|
|
|
|
|
|
|
|
|
|
wr_lock(&blk_lock); |
|
|
|
wr_lock(&blk_lock); |
|
|
|
/* Only keep the last hour's worth of blocks in memory since
|
|
|
|
/* Only keep the last hour's worth of blocks in memory since
|
|
|
|
* work from blocks before this is virtually impossible and we |
|
|
|
* work from blocks before this is virtually impossible and we |
|
|
@ -3709,6 +3729,7 @@ static bool test_work_current(struct work *work) |
|
|
|
HASH_ADD_STR(blocks, hash, s); |
|
|
|
HASH_ADD_STR(blocks, hash, s); |
|
|
|
set_blockdiff(work); |
|
|
|
set_blockdiff(work); |
|
|
|
wr_unlock(&blk_lock); |
|
|
|
wr_unlock(&blk_lock); |
|
|
|
|
|
|
|
|
|
|
|
if (deleted_block) |
|
|
|
if (deleted_block) |
|
|
|
applog(LOG_DEBUG, "Deleted block %d from database", deleted_block); |
|
|
|
applog(LOG_DEBUG, "Deleted block %d from database", deleted_block); |
|
|
|
set_curblock(hexstr, work->data); |
|
|
|
set_curblock(hexstr, work->data); |
|
|
@ -4701,6 +4722,7 @@ static void hashmeter(int thr_id, struct timeval *diff, |
|
|
|
local_mhashes_done = 0; |
|
|
|
local_mhashes_done = 0; |
|
|
|
out_unlock: |
|
|
|
out_unlock: |
|
|
|
mutex_unlock(&hash_lock); |
|
|
|
mutex_unlock(&hash_lock); |
|
|
|
|
|
|
|
|
|
|
|
if (showlog) { |
|
|
|
if (showlog) { |
|
|
|
if (!curses_active) { |
|
|
|
if (!curses_active) { |
|
|
|
printf("%s \r", statusline); |
|
|
|
printf("%s \r", statusline); |
|
|
@ -4763,6 +4785,7 @@ static bool parse_stratum_response(struct pool *pool, char *s) |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
id = json_integer_value(id_val); |
|
|
|
id = json_integer_value(id_val); |
|
|
|
|
|
|
|
|
|
|
|
mutex_lock(&sshare_lock); |
|
|
|
mutex_lock(&sshare_lock); |
|
|
|
HASH_FIND_INT(stratum_shares, &id, sshare); |
|
|
|
HASH_FIND_INT(stratum_shares, &id, sshare); |
|
|
|
if (sshare) { |
|
|
|
if (sshare) { |
|
|
@ -4770,6 +4793,7 @@ static bool parse_stratum_response(struct pool *pool, char *s) |
|
|
|
pool->sshares--; |
|
|
|
pool->sshares--; |
|
|
|
} |
|
|
|
} |
|
|
|
mutex_unlock(&sshare_lock); |
|
|
|
mutex_unlock(&sshare_lock); |
|
|
|
|
|
|
|
|
|
|
|
if (!sshare) { |
|
|
|
if (!sshare) { |
|
|
|
if (json_is_true(res_val)) |
|
|
|
if (json_is_true(res_val)) |
|
|
|
applog(LOG_NOTICE, "Accepted untracked stratum share from pool %d", pool->pool_no); |
|
|
|
applog(LOG_NOTICE, "Accepted untracked stratum share from pool %d", pool->pool_no); |
|
|
@ -4840,6 +4864,7 @@ static int cp_prio(void) |
|
|
|
cg_rlock(&control_lock); |
|
|
|
cg_rlock(&control_lock); |
|
|
|
prio = currentpool->prio; |
|
|
|
prio = currentpool->prio; |
|
|
|
cg_runlock(&control_lock); |
|
|
|
cg_runlock(&control_lock); |
|
|
|
|
|
|
|
|
|
|
|
return prio; |
|
|
|
return prio; |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
@ -4901,6 +4926,7 @@ static bool supports_resume(struct pool *pool) |
|
|
|
cg_rlock(&pool->data_lock); |
|
|
|
cg_rlock(&pool->data_lock); |
|
|
|
ret = (pool->sessionid != NULL); |
|
|
|
ret = (pool->sessionid != NULL); |
|
|
|
cg_runlock(&pool->data_lock); |
|
|
|
cg_runlock(&pool->data_lock); |
|
|
|
|
|
|
|
|
|
|
|
return ret; |
|
|
|
return ret; |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
@ -5819,6 +5845,7 @@ void work_completed(struct cgpu_info *cgpu, struct work *work) |
|
|
|
cgpu->queued_count--; |
|
|
|
cgpu->queued_count--; |
|
|
|
HASH_DEL(cgpu->queued_work, work); |
|
|
|
HASH_DEL(cgpu->queued_work, work); |
|
|
|
wr_unlock(&cgpu->qlock); |
|
|
|
wr_unlock(&cgpu->qlock); |
|
|
|
|
|
|
|
|
|
|
|
free_work(work); |
|
|
|
free_work(work); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
@ -6159,6 +6186,7 @@ static void reap_curl(struct pool *pool) |
|
|
|
int reaped = 0; |
|
|
|
int reaped = 0; |
|
|
|
|
|
|
|
|
|
|
|
gettimeofday(&now, NULL); |
|
|
|
gettimeofday(&now, NULL); |
|
|
|
|
|
|
|
|
|
|
|
mutex_lock(&pool->pool_lock); |
|
|
|
mutex_lock(&pool->pool_lock); |
|
|
|
list_for_each_entry_safe(ent, iter, &pool->curlring, node) { |
|
|
|
list_for_each_entry_safe(ent, iter, &pool->curlring, node) { |
|
|
|
if (pool->curls < 2) |
|
|
|
if (pool->curls < 2) |
|
|
@ -6172,6 +6200,7 @@ static void reap_curl(struct pool *pool) |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
mutex_unlock(&pool->pool_lock); |
|
|
|
mutex_unlock(&pool->pool_lock); |
|
|
|
|
|
|
|
|
|
|
|
if (reaped) |
|
|
|
if (reaped) |
|
|
|
applog(LOG_DEBUG, "Reaped %d curl%s from pool %d", reaped, reaped > 1 ? "s" : "", pool->pool_no); |
|
|
|
applog(LOG_DEBUG, "Reaped %d curl%s from pool %d", reaped, reaped > 1 ? "s" : "", pool->pool_no); |
|
|
|
} |
|
|
|
} |
|
|
@ -6294,6 +6323,7 @@ static void *watchdog_thread(void __maybe_unused *userdata) |
|
|
|
applog(LOG_WARNING, "Will restart execution as scheduled at %02d:%02d", |
|
|
|
applog(LOG_WARNING, "Will restart execution as scheduled at %02d:%02d", |
|
|
|
schedstart.tm.tm_hour, schedstart.tm.tm_min); |
|
|
|
schedstart.tm.tm_hour, schedstart.tm.tm_min); |
|
|
|
sched_paused = true; |
|
|
|
sched_paused = true; |
|
|
|
|
|
|
|
|
|
|
|
rd_lock(&mining_thr_lock); |
|
|
|
rd_lock(&mining_thr_lock); |
|
|
|
for (i = 0; i < mining_threads; i++) |
|
|
|
for (i = 0; i < mining_threads; i++) |
|
|
|
mining_thr[i]->pause = true; |
|
|
|
mining_thr[i]->pause = true; |
|
|
@ -6577,6 +6607,7 @@ static void *test_pool_thread(void *arg) |
|
|
|
pools_active = true; |
|
|
|
pools_active = true; |
|
|
|
} |
|
|
|
} |
|
|
|
cg_wunlock(&control_lock); |
|
|
|
cg_wunlock(&control_lock); |
|
|
|
|
|
|
|
|
|
|
|
pool_resus(pool); |
|
|
|
pool_resus(pool); |
|
|
|
} else |
|
|
|
} else |
|
|
|
pool_died(pool); |
|
|
|
pool_died(pool); |
|
|
@ -6890,9 +6921,11 @@ void fill_device_drv(struct cgpu_info *cgpu) |
|
|
|
void enable_device(struct cgpu_info *cgpu) |
|
|
|
void enable_device(struct cgpu_info *cgpu) |
|
|
|
{ |
|
|
|
{ |
|
|
|
cgpu->deven = DEV_ENABLED; |
|
|
|
cgpu->deven = DEV_ENABLED; |
|
|
|
|
|
|
|
|
|
|
|
wr_lock(&devices_lock); |
|
|
|
wr_lock(&devices_lock); |
|
|
|
devices[cgpu->cgminer_id = cgminer_id_count++] = cgpu; |
|
|
|
devices[cgpu->cgminer_id = cgminer_id_count++] = cgpu; |
|
|
|
wr_unlock(&devices_lock); |
|
|
|
wr_unlock(&devices_lock); |
|
|
|
|
|
|
|
|
|
|
|
if (hotplug_mode) { |
|
|
|
if (hotplug_mode) { |
|
|
|
new_threads += cgpu->threads; |
|
|
|
new_threads += cgpu->threads; |
|
|
|
#ifdef HAVE_CURSES |
|
|
|
#ifdef HAVE_CURSES |
|
|
@ -6935,9 +6968,11 @@ bool add_cgpu(struct cgpu_info*cgpu) |
|
|
|
cgpu->device_id = d->lastid = 0; |
|
|
|
cgpu->device_id = d->lastid = 0; |
|
|
|
HASH_ADD_STR(devids, name, d); |
|
|
|
HASH_ADD_STR(devids, name, d); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
wr_lock(&devices_lock); |
|
|
|
wr_lock(&devices_lock); |
|
|
|
devices = realloc(devices, sizeof(struct cgpu_info *) * (total_devices + new_devices + 2)); |
|
|
|
devices = realloc(devices, sizeof(struct cgpu_info *) * (total_devices + new_devices + 2)); |
|
|
|
wr_unlock(&devices_lock); |
|
|
|
wr_unlock(&devices_lock); |
|
|
|
|
|
|
|
|
|
|
|
if (hotplug_mode) |
|
|
|
if (hotplug_mode) |
|
|
|
devices[total_devices + new_devices++] = cgpu; |
|
|
|
devices[total_devices + new_devices++] = cgpu; |
|
|
|
else |
|
|
|
else |
|
|
@ -6976,6 +7011,7 @@ static void hotplug_process() |
|
|
|
wr_lock(&mining_thr_lock); |
|
|
|
wr_lock(&mining_thr_lock); |
|
|
|
mining_thr = realloc(mining_thr, sizeof(thr) * (mining_threads + new_threads + 1)); |
|
|
|
mining_thr = realloc(mining_thr, sizeof(thr) * (mining_threads + new_threads + 1)); |
|
|
|
wr_unlock(&mining_thr_lock); |
|
|
|
wr_unlock(&mining_thr_lock); |
|
|
|
|
|
|
|
|
|
|
|
if (!mining_thr) |
|
|
|
if (!mining_thr) |
|
|
|
quit(1, "Failed to hotplug realloc mining_thr"); |
|
|
|
quit(1, "Failed to hotplug realloc mining_thr"); |
|
|
|
for (i = 0; i < new_threads; i++) { |
|
|
|
for (i = 0; i < new_threads; i++) { |
|
|
|