|
|
@ -174,6 +174,9 @@ pthread_rwlock_t netacc_lock; |
|
|
|
static pthread_mutex_t lp_lock; |
|
|
|
static pthread_mutex_t lp_lock; |
|
|
|
static pthread_cond_t lp_cond; |
|
|
|
static pthread_cond_t lp_cond; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
pthread_mutex_t restart_lock; |
|
|
|
|
|
|
|
pthread_cond_t restart_cond; |
|
|
|
|
|
|
|
|
|
|
|
double total_mhashes_done; |
|
|
|
double total_mhashes_done; |
|
|
|
static struct timeval total_tv_start, total_tv_end; |
|
|
|
static struct timeval total_tv_start, total_tv_end; |
|
|
|
|
|
|
|
|
|
|
@ -336,25 +339,35 @@ static FILE *sharelog_file = NULL; |
|
|
|
|
|
|
|
|
|
|
|
static void sharelog(const char*disposition, const struct work*work) |
|
|
|
static void sharelog(const char*disposition, const struct work*work) |
|
|
|
{ |
|
|
|
{ |
|
|
|
|
|
|
|
char *target, *hash, *data; |
|
|
|
|
|
|
|
struct cgpu_info *cgpu; |
|
|
|
|
|
|
|
unsigned long int t; |
|
|
|
|
|
|
|
struct pool *pool; |
|
|
|
|
|
|
|
int thr_id, rv; |
|
|
|
|
|
|
|
char s[1024]; |
|
|
|
|
|
|
|
size_t ret; |
|
|
|
|
|
|
|
|
|
|
|
if (!sharelog_file) |
|
|
|
if (!sharelog_file) |
|
|
|
return; |
|
|
|
return; |
|
|
|
|
|
|
|
|
|
|
|
int thr_id = work->thr_id; |
|
|
|
thr_id = work->thr_id; |
|
|
|
struct cgpu_info *cgpu = thr_info[thr_id].cgpu; |
|
|
|
cgpu = thr_info[thr_id].cgpu; |
|
|
|
struct pool *pool = work->pool; |
|
|
|
pool = work->pool; |
|
|
|
unsigned long int t = (unsigned long int)work->share_found_time; |
|
|
|
t = (unsigned long int)work->share_found_time; |
|
|
|
char *target = bin2hex(work->target, sizeof(work->target)); |
|
|
|
target = bin2hex(work->target, sizeof(work->target)); |
|
|
|
if (unlikely(!target)) { |
|
|
|
if (unlikely(!target)) { |
|
|
|
applog(LOG_ERR, "sharelog target OOM"); |
|
|
|
applog(LOG_ERR, "sharelog target OOM"); |
|
|
|
return; |
|
|
|
return; |
|
|
|
} |
|
|
|
} |
|
|
|
char *hash = bin2hex(work->hash, sizeof(work->hash)); |
|
|
|
|
|
|
|
|
|
|
|
hash = bin2hex(work->hash, sizeof(work->hash)); |
|
|
|
if (unlikely(!hash)) { |
|
|
|
if (unlikely(!hash)) { |
|
|
|
free(target); |
|
|
|
free(target); |
|
|
|
applog(LOG_ERR, "sharelog hash OOM"); |
|
|
|
applog(LOG_ERR, "sharelog hash OOM"); |
|
|
|
return; |
|
|
|
return; |
|
|
|
} |
|
|
|
} |
|
|
|
char *data = bin2hex(work->data, sizeof(work->data)); |
|
|
|
|
|
|
|
|
|
|
|
data = bin2hex(work->data, sizeof(work->data)); |
|
|
|
if (unlikely(!data)) { |
|
|
|
if (unlikely(!data)) { |
|
|
|
free(target); |
|
|
|
free(target); |
|
|
|
free(hash); |
|
|
|
free(hash); |
|
|
@ -363,26 +376,22 @@ static void sharelog(const char*disposition, const struct work*work) |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
// timestamp,disposition,target,pool,dev,thr,sharehash,sharedata
|
|
|
|
// timestamp,disposition,target,pool,dev,thr,sharehash,sharedata
|
|
|
|
char s[1024]; |
|
|
|
|
|
|
|
int rv; |
|
|
|
|
|
|
|
rv = snprintf(s, sizeof(s), "%lu,%s,%s,%s,%s%u,%u,%s,%s\n", t, disposition, target, pool->rpc_url, cgpu->api->name, cgpu->device_id, thr_id, hash, data); |
|
|
|
rv = snprintf(s, sizeof(s), "%lu,%s,%s,%s,%s%u,%u,%s,%s\n", t, disposition, target, pool->rpc_url, cgpu->api->name, cgpu->device_id, thr_id, hash, data); |
|
|
|
free(target); |
|
|
|
free(target); |
|
|
|
free(hash); |
|
|
|
free(hash); |
|
|
|
free(data); |
|
|
|
free(data); |
|
|
|
if (rv >= (int)(sizeof(s))) |
|
|
|
if (rv >= (int)(sizeof(s))) |
|
|
|
s[sizeof(s) - 1] = '\0'; |
|
|
|
s[sizeof(s) - 1] = '\0'; |
|
|
|
else |
|
|
|
else if (rv < 0) { |
|
|
|
if (rv < 0) { |
|
|
|
|
|
|
|
applog(LOG_ERR, "sharelog printf error"); |
|
|
|
applog(LOG_ERR, "sharelog printf error"); |
|
|
|
return; |
|
|
|
return; |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
size_t ret; |
|
|
|
|
|
|
|
mutex_lock(&sharelog_lock); |
|
|
|
mutex_lock(&sharelog_lock); |
|
|
|
ret = fwrite(s, rv, 1, sharelog_file); |
|
|
|
ret = fwrite(s, rv, 1, sharelog_file); |
|
|
|
fflush(sharelog_file); |
|
|
|
fflush(sharelog_file); |
|
|
|
mutex_unlock(&sharelog_lock); |
|
|
|
mutex_unlock(&sharelog_lock); |
|
|
|
if (1 != ret) |
|
|
|
if (ret != 1) |
|
|
|
applog(LOG_ERR, "sharelog fwrite error"); |
|
|
|
applog(LOG_ERR, "sharelog fwrite error"); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
@ -445,6 +454,7 @@ struct pool *current_pool(void) |
|
|
|
char *set_int_range(const char *arg, int *i, int min, int max) |
|
|
|
char *set_int_range(const char *arg, int *i, int min, int max) |
|
|
|
{ |
|
|
|
{ |
|
|
|
char *err = opt_set_intval(arg, i); |
|
|
|
char *err = opt_set_intval(arg, i); |
|
|
|
|
|
|
|
|
|
|
|
if (err) |
|
|
|
if (err) |
|
|
|
return err; |
|
|
|
return err; |
|
|
|
|
|
|
|
|
|
|
@ -594,7 +604,7 @@ static char *set_userpass(const char *arg) |
|
|
|
static char *enable_debug(bool *flag) |
|
|
|
static char *enable_debug(bool *flag) |
|
|
|
{ |
|
|
|
{ |
|
|
|
*flag = true; |
|
|
|
*flag = true; |
|
|
|
/* Turn out verbose output, too. */ |
|
|
|
/* Turn on verbose output, too. */ |
|
|
|
opt_log_output = true; |
|
|
|
opt_log_output = true; |
|
|
|
return NULL; |
|
|
|
return NULL; |
|
|
|
} |
|
|
|
} |
|
|
@ -609,8 +619,8 @@ static char *set_schedtime(const char *arg, struct schedtime *st) |
|
|
|
return NULL; |
|
|
|
return NULL; |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
static char* |
|
|
|
static char* set_sharelog(char *arg) |
|
|
|
set_sharelog(char *arg) { |
|
|
|
{ |
|
|
|
char *r = ""; |
|
|
|
char *r = ""; |
|
|
|
long int i = strtol(arg, &r, 10); |
|
|
|
long int i = strtol(arg, &r, 10); |
|
|
|
|
|
|
|
|
|
|
@ -662,11 +672,11 @@ static void load_temp_cutoffs() |
|
|
|
|
|
|
|
|
|
|
|
devices[device]->cutofftemp = val; |
|
|
|
devices[device]->cutofftemp = val; |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
} else { |
|
|
|
else { |
|
|
|
for (i = device; i < total_devices; ++i) { |
|
|
|
for (i = device; i < total_devices; ++i) |
|
|
|
|
|
|
|
if (!devices[i]->cutofftemp) |
|
|
|
if (!devices[i]->cutofftemp) |
|
|
|
devices[i]->cutofftemp = opt_cutofftemp; |
|
|
|
devices[i]->cutofftemp = opt_cutofftemp; |
|
|
|
|
|
|
|
} |
|
|
|
return; |
|
|
|
return; |
|
|
|
} |
|
|
|
} |
|
|
|
if (device <= 1) { |
|
|
|
if (device <= 1) { |
|
|
@ -1009,8 +1019,8 @@ static int fileconf_load; |
|
|
|
static char *parse_config(json_t *config, bool fileconf) |
|
|
|
static char *parse_config(json_t *config, bool fileconf) |
|
|
|
{ |
|
|
|
{ |
|
|
|
static char err_buf[200]; |
|
|
|
static char err_buf[200]; |
|
|
|
json_t *val; |
|
|
|
|
|
|
|
struct opt_table *opt; |
|
|
|
struct opt_table *opt; |
|
|
|
|
|
|
|
json_t *val; |
|
|
|
|
|
|
|
|
|
|
|
if (fileconf && !fileconf_load) |
|
|
|
if (fileconf && !fileconf_load) |
|
|
|
fileconf_load = 1; |
|
|
|
fileconf_load = 1; |
|
|
@ -1025,6 +1035,7 @@ static char *parse_config(json_t *config, bool fileconf) |
|
|
|
name = strdup(opt->names); |
|
|
|
name = strdup(opt->names); |
|
|
|
for (p = strtok(name, "|"); p; p = strtok(NULL, "|")) { |
|
|
|
for (p = strtok(name, "|"); p; p = strtok(NULL, "|")) { |
|
|
|
char *err = NULL; |
|
|
|
char *err = NULL; |
|
|
|
|
|
|
|
|
|
|
|
/* Ignore short options. */ |
|
|
|
/* Ignore short options. */ |
|
|
|
if (p[1] != '-') |
|
|
|
if (p[1] != '-') |
|
|
|
continue; |
|
|
|
continue; |
|
|
@ -1117,8 +1128,7 @@ static void load_default_config(void) |
|
|
|
if (getenv("HOME") && *getenv("HOME")) { |
|
|
|
if (getenv("HOME") && *getenv("HOME")) { |
|
|
|
strcpy(cnfbuf, getenv("HOME")); |
|
|
|
strcpy(cnfbuf, getenv("HOME")); |
|
|
|
strcat(cnfbuf, "/"); |
|
|
|
strcat(cnfbuf, "/"); |
|
|
|
} |
|
|
|
} else |
|
|
|
else |
|
|
|
|
|
|
|
strcpy(cnfbuf, ""); |
|
|
|
strcpy(cnfbuf, ""); |
|
|
|
strcat(cnfbuf, ".cgminer/"); |
|
|
|
strcat(cnfbuf, ".cgminer/"); |
|
|
|
#else |
|
|
|
#else |
|
|
@ -2471,7 +2481,28 @@ static void discard_stale(void) |
|
|
|
subtract_queued(nonclone); |
|
|
|
subtract_queued(nonclone); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
static bool queue_request(struct thr_info *thr, bool needed); |
|
|
|
bool queue_request(struct thr_info *thr, bool needed); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/* A generic wait function for threads that poll that will wait a specified
|
|
|
|
|
|
|
|
* time tdiff waiting on the pthread conditional that is broadcast when a |
|
|
|
|
|
|
|
* work restart is required. Returns the value of pthread_cond_timedwait |
|
|
|
|
|
|
|
* which is zero if the condition was met or ETIMEDOUT if not. |
|
|
|
|
|
|
|
*/ |
|
|
|
|
|
|
|
int restart_wait(struct timeval *tdiff) |
|
|
|
|
|
|
|
{ |
|
|
|
|
|
|
|
struct timeval now, then; |
|
|
|
|
|
|
|
struct timespec abstime; |
|
|
|
|
|
|
|
int rc; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
gettimeofday(&now, NULL); |
|
|
|
|
|
|
|
timeradd(&now, tdiff, &then); |
|
|
|
|
|
|
|
abstime.tv_sec = then.tv_sec; |
|
|
|
|
|
|
|
abstime.tv_nsec = then.tv_usec * 1000; |
|
|
|
|
|
|
|
mutex_lock(&restart_lock); |
|
|
|
|
|
|
|
rc = pthread_cond_timedwait(&restart_cond, &restart_lock, &abstime); |
|
|
|
|
|
|
|
mutex_unlock(&restart_lock); |
|
|
|
|
|
|
|
return rc; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
static void restart_threads(void) |
|
|
|
static void restart_threads(void) |
|
|
|
{ |
|
|
|
{ |
|
|
@ -2484,6 +2515,10 @@ static void restart_threads(void) |
|
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < mining_threads; i++) |
|
|
|
for (i = 0; i < mining_threads; i++) |
|
|
|
work_restart[i].restart = 1; |
|
|
|
work_restart[i].restart = 1; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
mutex_lock(&restart_lock); |
|
|
|
|
|
|
|
pthread_cond_broadcast(&restart_cond); |
|
|
|
|
|
|
|
mutex_unlock(&restart_lock); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
static void set_curblock(char *hexstr, unsigned char *hash) |
|
|
|
static void set_curblock(char *hexstr, unsigned char *hash) |
|
|
@ -3364,8 +3399,9 @@ static void hashmeter(int thr_id, struct timeval *diff, |
|
|
|
if (want_per_device_stats) { |
|
|
|
if (want_per_device_stats) { |
|
|
|
struct timeval now; |
|
|
|
struct timeval now; |
|
|
|
struct timeval elapsed; |
|
|
|
struct timeval elapsed; |
|
|
|
|
|
|
|
|
|
|
|
gettimeofday(&now, NULL); |
|
|
|
gettimeofday(&now, NULL); |
|
|
|
timeval_subtract(&elapsed, &now, &thr->cgpu->last_message_tv); |
|
|
|
timersub(&now, &thr->cgpu->last_message_tv, &elapsed); |
|
|
|
if (opt_log_interval <= elapsed.tv_sec) { |
|
|
|
if (opt_log_interval <= elapsed.tv_sec) { |
|
|
|
struct cgpu_info *cgpu = thr->cgpu; |
|
|
|
struct cgpu_info *cgpu = thr->cgpu; |
|
|
|
char logline[255]; |
|
|
|
char logline[255]; |
|
|
@ -3385,7 +3421,7 @@ static void hashmeter(int thr_id, struct timeval *diff, |
|
|
|
/* Totals are updated by all threads so can race without locking */ |
|
|
|
/* Totals are updated by all threads so can race without locking */ |
|
|
|
mutex_lock(&hash_lock); |
|
|
|
mutex_lock(&hash_lock); |
|
|
|
gettimeofday(&temp_tv_end, NULL); |
|
|
|
gettimeofday(&temp_tv_end, NULL); |
|
|
|
timeval_subtract(&total_diff, &temp_tv_end, &total_tv_end); |
|
|
|
timersub(&temp_tv_end, &total_tv_end, &total_diff); |
|
|
|
|
|
|
|
|
|
|
|
total_mhashes_done += local_mhashes; |
|
|
|
total_mhashes_done += local_mhashes; |
|
|
|
local_mhashes_done += local_mhashes; |
|
|
|
local_mhashes_done += local_mhashes; |
|
|
@ -3399,7 +3435,7 @@ static void hashmeter(int thr_id, struct timeval *diff, |
|
|
|
decay_time(&rolling, local_mhashes_done / local_secs); |
|
|
|
decay_time(&rolling, local_mhashes_done / local_secs); |
|
|
|
global_hashrate = roundl(rolling) * 1000000; |
|
|
|
global_hashrate = roundl(rolling) * 1000000; |
|
|
|
|
|
|
|
|
|
|
|
timeval_subtract(&total_diff, &total_tv_end, &total_tv_start); |
|
|
|
timersub(&total_tv_end, &total_tv_start, &total_diff); |
|
|
|
total_secs = (double)total_diff.tv_sec + |
|
|
|
total_secs = (double)total_diff.tv_sec + |
|
|
|
((double)total_diff.tv_usec / 1000000.0); |
|
|
|
((double)total_diff.tv_usec / 1000000.0); |
|
|
|
|
|
|
|
|
|
|
@ -3562,7 +3598,7 @@ static void control_tclear(bool *var) |
|
|
|
|
|
|
|
|
|
|
|
static bool queueing; |
|
|
|
static bool queueing; |
|
|
|
|
|
|
|
|
|
|
|
static bool queue_request(struct thr_info *thr, bool needed) |
|
|
|
bool queue_request(struct thr_info *thr, bool needed) |
|
|
|
{ |
|
|
|
{ |
|
|
|
struct workio_cmd *wc; |
|
|
|
struct workio_cmd *wc; |
|
|
|
struct timeval now; |
|
|
|
struct timeval now; |
|
|
@ -3953,7 +3989,7 @@ void *miner_thread(void *userdata) |
|
|
|
/* Try to cycle approximately 5 times before each log update */ |
|
|
|
/* Try to cycle approximately 5 times before each log update */ |
|
|
|
const long cycle = opt_log_interval / 5 ? : 1; |
|
|
|
const long cycle = opt_log_interval / 5 ? : 1; |
|
|
|
struct timeval tv_start, tv_end, tv_workstart, tv_lastupdate; |
|
|
|
struct timeval tv_start, tv_end, tv_workstart, tv_lastupdate; |
|
|
|
struct timeval diff, sdiff, wdiff; |
|
|
|
struct timeval diff, sdiff, wdiff = {0, 0}; |
|
|
|
uint32_t max_nonce = api->can_limit_work ? api->can_limit_work(mythr) : 0xffffffff; |
|
|
|
uint32_t max_nonce = api->can_limit_work ? api->can_limit_work(mythr) : 0xffffffff; |
|
|
|
unsigned long long hashes_done = 0; |
|
|
|
unsigned long long hashes_done = 0; |
|
|
|
unsigned long long hashes; |
|
|
|
unsigned long long hashes; |
|
|
@ -4070,7 +4106,7 @@ void *miner_thread(void *userdata) |
|
|
|
cgpu->max_hashes = hashes; |
|
|
|
cgpu->max_hashes = hashes; |
|
|
|
|
|
|
|
|
|
|
|
gettimeofday(&tv_end, NULL); |
|
|
|
gettimeofday(&tv_end, NULL); |
|
|
|
timeval_subtract(&diff, &tv_end, &tv_start); |
|
|
|
timersub(&tv_end, &tv_start, &diff); |
|
|
|
sdiff.tv_sec += diff.tv_sec; |
|
|
|
sdiff.tv_sec += diff.tv_sec; |
|
|
|
sdiff.tv_usec += diff.tv_usec; |
|
|
|
sdiff.tv_usec += diff.tv_usec; |
|
|
|
if (sdiff.tv_usec > 1000000) { |
|
|
|
if (sdiff.tv_usec > 1000000) { |
|
|
@ -4078,7 +4114,7 @@ void *miner_thread(void *userdata) |
|
|
|
sdiff.tv_usec -= 1000000; |
|
|
|
sdiff.tv_usec -= 1000000; |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
timeval_subtract(&wdiff, &tv_end, &tv_workstart); |
|
|
|
timersub(&tv_end, &tv_workstart, &wdiff); |
|
|
|
if (!requested) { |
|
|
|
if (!requested) { |
|
|
|
if (wdiff.tv_sec > request_interval || work->blk.nonce > request_nonce) { |
|
|
|
if (wdiff.tv_sec > request_interval || work->blk.nonce > request_nonce) { |
|
|
|
thread_reportout(mythr); |
|
|
|
thread_reportout(mythr); |
|
|
@ -4114,7 +4150,7 @@ void *miner_thread(void *userdata) |
|
|
|
max_nonce = max_nonce * 0x400 / (((cycle * 1000000) + sdiff.tv_usec) / (cycle * 1000000 / 0x400)); |
|
|
|
max_nonce = max_nonce * 0x400 / (((cycle * 1000000) + sdiff.tv_usec) / (cycle * 1000000 / 0x400)); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
timeval_subtract(&diff, &tv_end, &tv_lastupdate); |
|
|
|
timersub(&tv_end, &tv_lastupdate, &diff); |
|
|
|
if (diff.tv_sec >= opt_log_interval) { |
|
|
|
if (diff.tv_sec >= opt_log_interval) { |
|
|
|
hashmeter(thr_id, &diff, hashes_done); |
|
|
|
hashmeter(thr_id, &diff, hashes_done); |
|
|
|
hashes_done = 0; |
|
|
|
hashes_done = 0; |
|
|
@ -4534,19 +4570,15 @@ static void *watchdog_thread(void __maybe_unused *userdata) |
|
|
|
else |
|
|
|
else |
|
|
|
cgpu->low_count = 0; |
|
|
|
cgpu->low_count = 0; |
|
|
|
|
|
|
|
|
|
|
|
uint64_t hashtime = now.tv_sec - thr->last.tv_sec; |
|
|
|
bool dev_count_well = (cgpu->low_count < WATCHDOG_SICK_COUNT); |
|
|
|
bool dev_time_well = hashtime < WATCHDOG_SICK_TIME; |
|
|
|
bool dev_count_sick = (cgpu->low_count > WATCHDOG_SICK_COUNT); |
|
|
|
bool dev_time_sick = hashtime > WATCHDOG_SICK_TIME; |
|
|
|
bool dev_count_dead = (cgpu->low_count > WATCHDOG_DEAD_COUNT); |
|
|
|
bool dev_time_dead = hashtime > WATCHDOG_DEAD_TIME; |
|
|
|
|
|
|
|
bool dev_count_well = cgpu->low_count < WATCHDOG_SICK_COUNT; |
|
|
|
|
|
|
|
bool dev_count_sick = cgpu->low_count > WATCHDOG_SICK_COUNT; |
|
|
|
|
|
|
|
bool dev_count_dead = cgpu->low_count > WATCHDOG_DEAD_COUNT; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if (cgpu->status != LIFE_WELL && dev_time_well && dev_count_well) { |
|
|
|
if (gpus[gpu].status != LIFE_WELL && (now.tv_sec - thr->last.tv_sec < WATCHDOG_SICK_TIME) && dev_count_well) { |
|
|
|
applog(LOG_ERR, "%s: Recovered, declaring WELL!", dev_str); |
|
|
|
applog(LOG_ERR, "%s: Recovered, declaring WELL!", dev_str); |
|
|
|
cgpu->status = LIFE_WELL; |
|
|
|
cgpu->status = LIFE_WELL; |
|
|
|
cgpu->device_last_well = time(NULL); |
|
|
|
cgpu->device_last_well = time(NULL); |
|
|
|
} else if (cgpu->status == LIFE_WELL && (dev_time_sick || dev_count_sick)) { |
|
|
|
} else if (cgpu->status == LIFE_WELL && ((now.tv_sec - thr->last.tv_sec > WATCHDOG_SICK_TIME) || dev_count_sick)) { |
|
|
|
thr->rolling = cgpu->rolling = 0; |
|
|
|
thr->rolling = cgpu->rolling = 0; |
|
|
|
cgpu->status = LIFE_SICK; |
|
|
|
cgpu->status = LIFE_SICK; |
|
|
|
applog(LOG_ERR, "%s: Idle for more than 60 seconds, declaring SICK!", dev_str); |
|
|
|
applog(LOG_ERR, "%s: Idle for more than 60 seconds, declaring SICK!", dev_str); |
|
|
@ -4565,7 +4597,7 @@ static void *watchdog_thread(void __maybe_unused *userdata) |
|
|
|
applog(LOG_ERR, "%s: Attempting to restart", dev_str); |
|
|
|
applog(LOG_ERR, "%s: Attempting to restart", dev_str); |
|
|
|
reinit_device(cgpu); |
|
|
|
reinit_device(cgpu); |
|
|
|
} |
|
|
|
} |
|
|
|
} else if (cgpu->status == LIFE_SICK && (dev_time_dead || dev_count_dead)) { |
|
|
|
} else if (cgpu->status == LIFE_SICK && ((now.tv_sec - thr->last.tv_sec > WATCHDOG_DEAD_TIME) || dev_count_dead)) { |
|
|
|
cgpu->status = LIFE_DEAD; |
|
|
|
cgpu->status = LIFE_DEAD; |
|
|
|
applog(LOG_ERR, "%s: Not responded for more than 10 minutes, declaring DEAD!", dev_str); |
|
|
|
applog(LOG_ERR, "%s: Not responded for more than 10 minutes, declaring DEAD!", dev_str); |
|
|
|
gettimeofday(&thr->sick, NULL); |
|
|
|
gettimeofday(&thr->sick, NULL); |
|
|
@ -4605,7 +4637,7 @@ static void print_summary(void) |
|
|
|
int hours, mins, secs, i; |
|
|
|
int hours, mins, secs, i; |
|
|
|
double utility, efficiency = 0.0; |
|
|
|
double utility, efficiency = 0.0; |
|
|
|
|
|
|
|
|
|
|
|
timeval_subtract(&diff, &total_tv_end, &total_tv_start); |
|
|
|
timersub(&total_tv_end, &total_tv_start, &diff); |
|
|
|
hours = diff.tv_sec / 3600; |
|
|
|
hours = diff.tv_sec / 3600; |
|
|
|
mins = (diff.tv_sec % 3600) / 60; |
|
|
|
mins = (diff.tv_sec % 3600) / 60; |
|
|
|
secs = diff.tv_sec % 60; |
|
|
|
secs = diff.tv_sec % 60; |
|
|
@ -4816,71 +4848,72 @@ out: |
|
|
|
#endif |
|
|
|
#endif |
|
|
|
|
|
|
|
|
|
|
|
#if defined(unix) |
|
|
|
#if defined(unix) |
|
|
|
static void fork_monitor() |
|
|
|
static void fork_monitor() |
|
|
|
{ |
|
|
|
{ |
|
|
|
// Make a pipe: [readFD, writeFD]
|
|
|
|
// Make a pipe: [readFD, writeFD]
|
|
|
|
int pfd[2]; |
|
|
|
int pfd[2]; |
|
|
|
int r = pipe(pfd); |
|
|
|
int r = pipe(pfd); |
|
|
|
if (r<0) { |
|
|
|
|
|
|
|
perror("pipe - failed to create pipe for --monitor"); |
|
|
|
|
|
|
|
exit(1); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// Make stderr write end of pipe
|
|
|
|
if (r < 0) { |
|
|
|
fflush(stderr); |
|
|
|
perror("pipe - failed to create pipe for --monitor"); |
|
|
|
r = dup2(pfd[1], 2); |
|
|
|
exit(1); |
|
|
|
if (r<0) { |
|
|
|
} |
|
|
|
perror("dup2 - failed to alias stderr to write end of pipe for --monitor"); |
|
|
|
|
|
|
|
exit(1); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
r = close(pfd[1]); |
|
|
|
|
|
|
|
if (r<0) { |
|
|
|
|
|
|
|
perror("close - failed to close write end of pipe for --monitor"); |
|
|
|
|
|
|
|
exit(1); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// Don't allow a dying monitor to kill the main process
|
|
|
|
// Make stderr write end of pipe
|
|
|
|
sighandler_t sr0 = signal(SIGPIPE, SIG_IGN); |
|
|
|
fflush(stderr); |
|
|
|
sighandler_t sr1 = signal(SIGPIPE, SIG_IGN); |
|
|
|
r = dup2(pfd[1], 2); |
|
|
|
if (SIG_ERR==sr0 || SIG_ERR==sr1) { |
|
|
|
if (r < 0) { |
|
|
|
perror("signal - failed to edit signal mask for --monitor"); |
|
|
|
perror("dup2 - failed to alias stderr to write end of pipe for --monitor"); |
|
|
|
|
|
|
|
exit(1); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
r = close(pfd[1]); |
|
|
|
|
|
|
|
if (r < 0) { |
|
|
|
|
|
|
|
perror("close - failed to close write end of pipe for --monitor"); |
|
|
|
|
|
|
|
exit(1); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// Don't allow a dying monitor to kill the main process
|
|
|
|
|
|
|
|
sighandler_t sr0 = signal(SIGPIPE, SIG_IGN); |
|
|
|
|
|
|
|
sighandler_t sr1 = signal(SIGPIPE, SIG_IGN); |
|
|
|
|
|
|
|
if (SIG_ERR == sr0 || SIG_ERR == sr1) { |
|
|
|
|
|
|
|
perror("signal - failed to edit signal mask for --monitor"); |
|
|
|
|
|
|
|
exit(1); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// Fork a child process
|
|
|
|
|
|
|
|
forkpid = fork(); |
|
|
|
|
|
|
|
if (forkpid < 0) { |
|
|
|
|
|
|
|
perror("fork - failed to fork child process for --monitor"); |
|
|
|
|
|
|
|
exit(1); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// Child: launch monitor command
|
|
|
|
|
|
|
|
if (0 == forkpid) { |
|
|
|
|
|
|
|
// Make stdin read end of pipe
|
|
|
|
|
|
|
|
r = dup2(pfd[0], 0); |
|
|
|
|
|
|
|
if (r < 0) { |
|
|
|
|
|
|
|
perror("dup2 - in child, failed to alias read end of pipe to stdin for --monitor"); |
|
|
|
exit(1); |
|
|
|
exit(1); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
close(pfd[0]); |
|
|
|
// Fork a child process
|
|
|
|
if (r < 0) { |
|
|
|
forkpid = fork(); |
|
|
|
perror("close - in child, failed to close read end of pipe for --monitor"); |
|
|
|
if (forkpid<0) { |
|
|
|
|
|
|
|
perror("fork - failed to fork child process for --monitor"); |
|
|
|
|
|
|
|
exit(1); |
|
|
|
exit(1); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
// Child: launch monitor command
|
|
|
|
// Launch user specified command
|
|
|
|
if (0==forkpid) { |
|
|
|
execl("/bin/bash", "/bin/bash", "-c", opt_stderr_cmd, (char*)NULL); |
|
|
|
// Make stdin read end of pipe
|
|
|
|
perror("execl - in child failed to exec user specified command for --monitor"); |
|
|
|
r = dup2(pfd[0], 0); |
|
|
|
exit(1); |
|
|
|
if (r<0) { |
|
|
|
} |
|
|
|
perror("dup2 - in child, failed to alias read end of pipe to stdin for --monitor"); |
|
|
|
|
|
|
|
exit(1); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
close(pfd[0]); |
|
|
|
|
|
|
|
if (r<0) { |
|
|
|
|
|
|
|
perror("close - in child, failed to close read end of pipe for --monitor"); |
|
|
|
|
|
|
|
exit(1); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// Launch user specified command
|
|
|
|
|
|
|
|
execl("/bin/bash", "/bin/bash", "-c", opt_stderr_cmd, (char*)NULL); |
|
|
|
|
|
|
|
perror("execl - in child failed to exec user specified command for --monitor"); |
|
|
|
|
|
|
|
exit(1); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// Parent: clean up unused fds and bail
|
|
|
|
// Parent: clean up unused fds and bail
|
|
|
|
r = close(pfd[0]); |
|
|
|
r = close(pfd[0]); |
|
|
|
if (r<0) { |
|
|
|
if (r < 0) { |
|
|
|
perror("close - failed to close read end of pipe for --monitor"); |
|
|
|
perror("close - failed to close read end of pipe for --monitor"); |
|
|
|
exit(1); |
|
|
|
exit(1); |
|
|
|
} |
|
|
|
|
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
} |
|
|
|
#endif // defined(unix)
|
|
|
|
#endif // defined(unix)
|
|
|
|
|
|
|
|
|
|
|
|
#ifdef HAVE_CURSES |
|
|
|
#ifdef HAVE_CURSES |
|
|
@ -4962,8 +4995,7 @@ bool add_cgpu(struct cgpu_info*cgpu) |
|
|
|
HASH_FIND_STR(devids, cgpu->api->name, d); |
|
|
|
HASH_FIND_STR(devids, cgpu->api->name, d); |
|
|
|
if (d) |
|
|
|
if (d) |
|
|
|
cgpu->device_id = ++d->lastid; |
|
|
|
cgpu->device_id = ++d->lastid; |
|
|
|
else |
|
|
|
else { |
|
|
|
{ |
|
|
|
|
|
|
|
d = malloc(sizeof(*d)); |
|
|
|
d = malloc(sizeof(*d)); |
|
|
|
memcpy(d->name, cgpu->api->name, sizeof(d->name)); |
|
|
|
memcpy(d->name, cgpu->api->name, sizeof(d->name)); |
|
|
|
cgpu->device_id = d->lastid = 0; |
|
|
|
cgpu->device_id = d->lastid = 0; |
|
|
@ -5012,6 +5044,10 @@ int main(int argc, char *argv[]) |
|
|
|
if (unlikely(pthread_cond_init(&lp_cond, NULL))) |
|
|
|
if (unlikely(pthread_cond_init(&lp_cond, NULL))) |
|
|
|
quit(1, "Failed to pthread_cond_init lp_cond"); |
|
|
|
quit(1, "Failed to pthread_cond_init lp_cond"); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
mutex_init(&restart_lock); |
|
|
|
|
|
|
|
if (unlikely(pthread_cond_init(&restart_cond, NULL))) |
|
|
|
|
|
|
|
quit(1, "Failed to pthread_cond_init restart_cond"); |
|
|
|
|
|
|
|
|
|
|
|
sprintf(packagename, "%s %s", PACKAGE, VERSION); |
|
|
|
sprintf(packagename, "%s %s", PACKAGE, VERSION); |
|
|
|
|
|
|
|
|
|
|
|
#ifdef WANT_CPUMINE |
|
|
|
#ifdef WANT_CPUMINE |
|
|
@ -5122,14 +5158,16 @@ int main(int argc, char *argv[]) |
|
|
|
opt_log_output = true; |
|
|
|
opt_log_output = true; |
|
|
|
|
|
|
|
|
|
|
|
#ifdef WANT_CPUMINE |
|
|
|
#ifdef WANT_CPUMINE |
|
|
|
if (0<=opt_bench_algo) { |
|
|
|
if (0 <= opt_bench_algo) { |
|
|
|
double rate = bench_algo_stage3(opt_bench_algo); |
|
|
|
double rate = bench_algo_stage3(opt_bench_algo); |
|
|
|
if (!skip_to_bench) { |
|
|
|
|
|
|
|
|
|
|
|
if (!skip_to_bench) |
|
|
|
printf("%.5f (%s)\n", rate, algo_names[opt_bench_algo]); |
|
|
|
printf("%.5f (%s)\n", rate, algo_names[opt_bench_algo]); |
|
|
|
} else { |
|
|
|
else { |
|
|
|
// Write result to shared memory for parent
|
|
|
|
// Write result to shared memory for parent
|
|
|
|
#if defined(WIN32) |
|
|
|
#if defined(WIN32) |
|
|
|
char unique_name[64]; |
|
|
|
char unique_name[64]; |
|
|
|
|
|
|
|
|
|
|
|
if (GetEnvironmentVariable("CGMINER_SHARED_MEM", unique_name, 32)) { |
|
|
|
if (GetEnvironmentVariable("CGMINER_SHARED_MEM", unique_name, 32)) { |
|
|
|
HANDLE map_handle = CreateFileMapping( |
|
|
|
HANDLE map_handle = CreateFileMapping( |
|
|
|
INVALID_HANDLE_VALUE, // use paging file
|
|
|
|
INVALID_HANDLE_VALUE, // use paging file
|
|
|
@ -5139,7 +5177,7 @@ int main(int argc, char *argv[]) |
|
|
|
4096, // size: low 32-bits
|
|
|
|
4096, // size: low 32-bits
|
|
|
|
unique_name // name of map object
|
|
|
|
unique_name // name of map object
|
|
|
|
); |
|
|
|
); |
|
|
|
if (NULL!=map_handle) { |
|
|
|
if (NULL != map_handle) { |
|
|
|
void *shared_mem = MapViewOfFile( |
|
|
|
void *shared_mem = MapViewOfFile( |
|
|
|
map_handle, // object to map view of
|
|
|
|
map_handle, // object to map view of
|
|
|
|
FILE_MAP_WRITE, // read/write access
|
|
|
|
FILE_MAP_WRITE, // read/write access
|
|
|
@ -5147,13 +5185,13 @@ int main(int argc, char *argv[]) |
|
|
|
0, // low offset: beginning
|
|
|
|
0, // low offset: beginning
|
|
|
|
0 // default: map entire file
|
|
|
|
0 // default: map entire file
|
|
|
|
); |
|
|
|
); |
|
|
|
if (NULL!=shared_mem) |
|
|
|
if (NULL != shared_mem) |
|
|
|
CopyMemory(shared_mem, &rate, sizeof(rate)); |
|
|
|
CopyMemory(shared_mem, &rate, sizeof(rate)); |
|
|
|
(void)UnmapViewOfFile(shared_mem); |
|
|
|
(void)UnmapViewOfFile(shared_mem); |
|
|
|
} |
|
|
|
} |
|
|
|
(void)CloseHandle(map_handle); |
|
|
|
(void)CloseHandle(map_handle); |
|
|
|
} |
|
|
|
} |
|
|
|
#endif |
|
|
|
#endif |
|
|
|
} |
|
|
|
} |
|
|
|
exit(0); |
|
|
|
exit(0); |
|
|
|
} |
|
|
|
} |
|
|
|