mirror of
https://github.com/GOSTSec/sgminer
synced 2025-08-26 05:41:55 +00:00
Merge branch 'master' of git@github.com:pshep/cgminer.git
This commit is contained in:
commit
75519cb04b
81
NEWS
81
NEWS
@ -1,3 +1,84 @@
|
|||||||
|
Version 2.4.4 - July 1, 2012
|
||||||
|
|
||||||
|
- Fix builds on non gnu platforms.
|
||||||
|
- api.c ensure old mode is always available when not using --api-groups + quit()
|
||||||
|
on param errors
|
||||||
|
- Implement rudimentary X-Mining-Hashrate support.
|
||||||
|
- Detect large swings in temperature when below the target temperature range and
|
||||||
|
change fan by amounts dependant on the value of tdiff.
|
||||||
|
- Adjust the fanspeed by the magnitude of the temperature difference when in the
|
||||||
|
optimal range.
|
||||||
|
- Revert "Restarting cgminer from within after ADL has been corrupted only leads
|
||||||
|
to a crash. Display a warning only and disable fanspeed monitoring."
|
||||||
|
- api.c fix json already closed
|
||||||
|
- implement and document API option --api-groups
|
||||||
|
- Put upper bounds to under 2 hours that work can be rolled into the future for
|
||||||
|
bitcoind will deem it invalid beyond that.
|
||||||
|
- define API option --api-groups
|
||||||
|
- api.c allow unwell devices to be enabled so they can be cured
|
||||||
|
- miner.php - fix/enable autorefresh for custom pages
|
||||||
|
- miner.php allow custom summary pages - new 'Mobile' summary
|
||||||
|
- Work around pools that advertise very low expire= time inappropriately as this
|
||||||
|
leads to many false positives for stale shares detected.
|
||||||
|
- Only show ztex board count if any exist.
|
||||||
|
- There is no need for work to be a union in struct workio_cmd
|
||||||
|
- fpgautils.c include a debug message for all unknown open errors
|
||||||
|
- Don't keep rolling work right up to the expire= cut off. Use 2/3 of the time
|
||||||
|
between the scantime and the expiry as cutoff for reusing work.
|
||||||
|
- Log a specific error when serial opens fail due to lack of user permissions
|
||||||
|
- Increase GPU timing resolution to microsecond and add sanity check to ensure
|
||||||
|
times are positive.
|
||||||
|
- Opencl code may start executing before the clfinish order is given to it so
|
||||||
|
get the start timing used for dynamic intensity from before the kernel is
|
||||||
|
queued.
|
||||||
|
- fpgautils.c - set BAUD rate according to termio spec
|
||||||
|
- fpgautils.c - linux ordering back to the correct way
|
||||||
|
- miner.php remove unneeded '.'s
|
||||||
|
- miner.php add auto refresh options
|
||||||
|
- miner.php add 'restart' next to 'quit'
|
||||||
|
- miner.php make fontname/size configurable with myminer.php
|
||||||
|
- Make the pools array a dynamically allocated array to allow unlimited pools to
|
||||||
|
be added.
|
||||||
|
- Make the devices array a dynamically allocated array of pointers to allow
|
||||||
|
unlimited devices.
|
||||||
|
- Dynamic intensity for GPUs should be calculated on a per device basis. Clean
|
||||||
|
up the code to only calculate it if required as well.
|
||||||
|
- Use a queueing bool set under control_lock to prevent multiple calls to
|
||||||
|
queue_request racing.
|
||||||
|
- Use the work clone flag to determine if we should subtract it from the total
|
||||||
|
queued variable and provide a subtract queued function to prevent looping over
|
||||||
|
locked code.
|
||||||
|
- Don't decrement staged extras count from longpoll work.
|
||||||
|
- Count longpoll's contribution to the queue.
|
||||||
|
- Increase queued count before pushing message.
|
||||||
|
- Test we have enough work queued for pools with and without rolltime
|
||||||
|
capability.
|
||||||
|
- As work is sorted by age, we can discard the oldest work at regular intervals
|
||||||
|
to keep only 1 of the newest work items per mining thread.
|
||||||
|
- Roll work again after duplicating it to prevent duplicates on return to the
|
||||||
|
clone function.
|
||||||
|
- Abstract out work cloning and clone $mining_threads copies whenever a rollable
|
||||||
|
work item is found and return a clone instead.
|
||||||
|
- api.c display Pool Av in json
|
||||||
|
- Take into account average getwork delay as a marker of pool communications
|
||||||
|
when considering work stale.
|
||||||
|
- Work out a rolling average getwork delay stored in pool_stats.
|
||||||
|
- Getwork delay in stats should include retries for each getwork call.
|
||||||
|
- Walk through the thread list instead of searching for them when disabling
|
||||||
|
threads for dynamic mode.
|
||||||
|
- Extend nrolltime to support the expiry= parameter. Do this by turning the
|
||||||
|
rolltime bool into an integer set to the expiry time. If the pool supports
|
||||||
|
rolltime but not expiry= then set the expiry time to the standard scantime.
|
||||||
|
- When disabling fanspeed monitoring on adl failure, remove any twin GPU
|
||||||
|
association. This could have been leading to hangs on machines with dual GPU
|
||||||
|
cards when ADL failed.
|
||||||
|
- modminer: Don't delay 2nd+ FPGAs during work restart
|
||||||
|
- Disable OpenCL code when not available.
|
||||||
|
- Fix openwrt crashing on regeneratehash() by making check_solve a noop.
|
||||||
|
- FPGA - allow device detect override without an open failure
|
||||||
|
- Fix sign warning.
|
||||||
|
|
||||||
|
|
||||||
Version 2.4.3 - June 14, 2012
|
Version 2.4.3 - June 14, 2012
|
||||||
|
|
||||||
- can_roll and should_roll should have no bearing on the cycle period within the
|
- can_roll and should_roll should have no bearing on the cycle period within the
|
||||||
|
266
cgminer.c
266
cgminer.c
@ -174,6 +174,9 @@ pthread_rwlock_t netacc_lock;
|
|||||||
static pthread_mutex_t lp_lock;
|
static pthread_mutex_t lp_lock;
|
||||||
static pthread_cond_t lp_cond;
|
static pthread_cond_t lp_cond;
|
||||||
|
|
||||||
|
pthread_mutex_t restart_lock;
|
||||||
|
pthread_cond_t restart_cond;
|
||||||
|
|
||||||
double total_mhashes_done;
|
double total_mhashes_done;
|
||||||
static struct timeval total_tv_start, total_tv_end;
|
static struct timeval total_tv_start, total_tv_end;
|
||||||
|
|
||||||
@ -336,25 +339,35 @@ static FILE *sharelog_file = NULL;
|
|||||||
|
|
||||||
static void sharelog(const char*disposition, const struct work*work)
|
static void sharelog(const char*disposition, const struct work*work)
|
||||||
{
|
{
|
||||||
|
char *target, *hash, *data;
|
||||||
|
struct cgpu_info *cgpu;
|
||||||
|
unsigned long int t;
|
||||||
|
struct pool *pool;
|
||||||
|
int thr_id, rv;
|
||||||
|
char s[1024];
|
||||||
|
size_t ret;
|
||||||
|
|
||||||
if (!sharelog_file)
|
if (!sharelog_file)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
int thr_id = work->thr_id;
|
thr_id = work->thr_id;
|
||||||
struct cgpu_info *cgpu = thr_info[thr_id].cgpu;
|
cgpu = thr_info[thr_id].cgpu;
|
||||||
struct pool *pool = work->pool;
|
pool = work->pool;
|
||||||
unsigned long int t = (unsigned long int)work->share_found_time;
|
t = (unsigned long int)work->share_found_time;
|
||||||
char *target = bin2hex(work->target, sizeof(work->target));
|
target = bin2hex(work->target, sizeof(work->target));
|
||||||
if (unlikely(!target)) {
|
if (unlikely(!target)) {
|
||||||
applog(LOG_ERR, "sharelog target OOM");
|
applog(LOG_ERR, "sharelog target OOM");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
char *hash = bin2hex(work->hash, sizeof(work->hash));
|
|
||||||
|
hash = bin2hex(work->hash, sizeof(work->hash));
|
||||||
if (unlikely(!hash)) {
|
if (unlikely(!hash)) {
|
||||||
free(target);
|
free(target);
|
||||||
applog(LOG_ERR, "sharelog hash OOM");
|
applog(LOG_ERR, "sharelog hash OOM");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
char *data = bin2hex(work->data, sizeof(work->data));
|
|
||||||
|
data = bin2hex(work->data, sizeof(work->data));
|
||||||
if (unlikely(!data)) {
|
if (unlikely(!data)) {
|
||||||
free(target);
|
free(target);
|
||||||
free(hash);
|
free(hash);
|
||||||
@ -363,26 +376,22 @@ static void sharelog(const char*disposition, const struct work*work)
|
|||||||
}
|
}
|
||||||
|
|
||||||
// timestamp,disposition,target,pool,dev,thr,sharehash,sharedata
|
// timestamp,disposition,target,pool,dev,thr,sharehash,sharedata
|
||||||
char s[1024];
|
|
||||||
int rv;
|
|
||||||
rv = snprintf(s, sizeof(s), "%lu,%s,%s,%s,%s%u,%u,%s,%s\n", t, disposition, target, pool->rpc_url, cgpu->api->name, cgpu->device_id, thr_id, hash, data);
|
rv = snprintf(s, sizeof(s), "%lu,%s,%s,%s,%s%u,%u,%s,%s\n", t, disposition, target, pool->rpc_url, cgpu->api->name, cgpu->device_id, thr_id, hash, data);
|
||||||
free(target);
|
free(target);
|
||||||
free(hash);
|
free(hash);
|
||||||
free(data);
|
free(data);
|
||||||
if (rv >= (int)(sizeof(s)))
|
if (rv >= (int)(sizeof(s)))
|
||||||
s[sizeof(s) - 1] = '\0';
|
s[sizeof(s) - 1] = '\0';
|
||||||
else
|
else if (rv < 0) {
|
||||||
if (rv < 0) {
|
|
||||||
applog(LOG_ERR, "sharelog printf error");
|
applog(LOG_ERR, "sharelog printf error");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t ret;
|
|
||||||
mutex_lock(&sharelog_lock);
|
mutex_lock(&sharelog_lock);
|
||||||
ret = fwrite(s, rv, 1, sharelog_file);
|
ret = fwrite(s, rv, 1, sharelog_file);
|
||||||
fflush(sharelog_file);
|
fflush(sharelog_file);
|
||||||
mutex_unlock(&sharelog_lock);
|
mutex_unlock(&sharelog_lock);
|
||||||
if (1 != ret)
|
if (ret != 1)
|
||||||
applog(LOG_ERR, "sharelog fwrite error");
|
applog(LOG_ERR, "sharelog fwrite error");
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -445,6 +454,7 @@ struct pool *current_pool(void)
|
|||||||
char *set_int_range(const char *arg, int *i, int min, int max)
|
char *set_int_range(const char *arg, int *i, int min, int max)
|
||||||
{
|
{
|
||||||
char *err = opt_set_intval(arg, i);
|
char *err = opt_set_intval(arg, i);
|
||||||
|
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
@ -594,7 +604,7 @@ static char *set_userpass(const char *arg)
|
|||||||
static char *enable_debug(bool *flag)
|
static char *enable_debug(bool *flag)
|
||||||
{
|
{
|
||||||
*flag = true;
|
*flag = true;
|
||||||
/* Turn out verbose output, too. */
|
/* Turn on verbose output, too. */
|
||||||
opt_log_output = true;
|
opt_log_output = true;
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
@ -609,8 +619,8 @@ static char *set_schedtime(const char *arg, struct schedtime *st)
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static char*
|
static char* set_sharelog(char *arg)
|
||||||
set_sharelog(char *arg) {
|
{
|
||||||
char *r = "";
|
char *r = "";
|
||||||
long int i = strtol(arg, &r, 10);
|
long int i = strtol(arg, &r, 10);
|
||||||
|
|
||||||
@ -662,11 +672,11 @@ static void load_temp_cutoffs()
|
|||||||
|
|
||||||
devices[device]->cutofftemp = val;
|
devices[device]->cutofftemp = val;
|
||||||
}
|
}
|
||||||
}
|
} else {
|
||||||
else {
|
for (i = device; i < total_devices; ++i) {
|
||||||
for (i = device; i < total_devices; ++i)
|
|
||||||
if (!devices[i]->cutofftemp)
|
if (!devices[i]->cutofftemp)
|
||||||
devices[i]->cutofftemp = opt_cutofftemp;
|
devices[i]->cutofftemp = opt_cutofftemp;
|
||||||
|
}
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
if (device <= 1) {
|
if (device <= 1) {
|
||||||
@ -1009,8 +1019,8 @@ static int fileconf_load;
|
|||||||
static char *parse_config(json_t *config, bool fileconf)
|
static char *parse_config(json_t *config, bool fileconf)
|
||||||
{
|
{
|
||||||
static char err_buf[200];
|
static char err_buf[200];
|
||||||
json_t *val;
|
|
||||||
struct opt_table *opt;
|
struct opt_table *opt;
|
||||||
|
json_t *val;
|
||||||
|
|
||||||
if (fileconf && !fileconf_load)
|
if (fileconf && !fileconf_load)
|
||||||
fileconf_load = 1;
|
fileconf_load = 1;
|
||||||
@ -1025,6 +1035,7 @@ static char *parse_config(json_t *config, bool fileconf)
|
|||||||
name = strdup(opt->names);
|
name = strdup(opt->names);
|
||||||
for (p = strtok(name, "|"); p; p = strtok(NULL, "|")) {
|
for (p = strtok(name, "|"); p; p = strtok(NULL, "|")) {
|
||||||
char *err = NULL;
|
char *err = NULL;
|
||||||
|
|
||||||
/* Ignore short options. */
|
/* Ignore short options. */
|
||||||
if (p[1] != '-')
|
if (p[1] != '-')
|
||||||
continue;
|
continue;
|
||||||
@ -1117,8 +1128,7 @@ static void load_default_config(void)
|
|||||||
if (getenv("HOME") && *getenv("HOME")) {
|
if (getenv("HOME") && *getenv("HOME")) {
|
||||||
strcpy(cnfbuf, getenv("HOME"));
|
strcpy(cnfbuf, getenv("HOME"));
|
||||||
strcat(cnfbuf, "/");
|
strcat(cnfbuf, "/");
|
||||||
}
|
} else
|
||||||
else
|
|
||||||
strcpy(cnfbuf, "");
|
strcpy(cnfbuf, "");
|
||||||
strcat(cnfbuf, ".cgminer/");
|
strcat(cnfbuf, ".cgminer/");
|
||||||
#else
|
#else
|
||||||
@ -2471,8 +2481,29 @@ static void discard_stale(void)
|
|||||||
subtract_queued(nonclone);
|
subtract_queued(nonclone);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool queue_request(struct thr_info *thr, bool needed);
|
bool queue_request(struct thr_info *thr, bool needed);
|
||||||
|
|
||||||
|
/* A generic wait function for threads that poll that will wait a specified
|
||||||
|
* time tdiff waiting on the pthread conditional that is broadcast when a
|
||||||
|
* work restart is required. Returns the value of pthread_cond_timedwait
|
||||||
|
* which is zero if the condition was met or ETIMEDOUT if not.
|
||||||
|
*/
|
||||||
|
int restart_wait(struct timeval *tdiff)
|
||||||
|
{
|
||||||
|
struct timeval now, then;
|
||||||
|
struct timespec abstime;
|
||||||
|
int rc;
|
||||||
|
|
||||||
|
gettimeofday(&now, NULL);
|
||||||
|
timeradd(&now, tdiff, &then);
|
||||||
|
abstime.tv_sec = then.tv_sec;
|
||||||
|
abstime.tv_nsec = then.tv_usec * 1000;
|
||||||
|
mutex_lock(&restart_lock);
|
||||||
|
rc = pthread_cond_timedwait(&restart_cond, &restart_lock, &abstime);
|
||||||
|
mutex_unlock(&restart_lock);
|
||||||
|
return rc;
|
||||||
|
}
|
||||||
|
|
||||||
static void restart_threads(void)
|
static void restart_threads(void)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
@ -2484,6 +2515,10 @@ static void restart_threads(void)
|
|||||||
|
|
||||||
for (i = 0; i < mining_threads; i++)
|
for (i = 0; i < mining_threads; i++)
|
||||||
work_restart[i].restart = 1;
|
work_restart[i].restart = 1;
|
||||||
|
|
||||||
|
mutex_lock(&restart_lock);
|
||||||
|
pthread_cond_broadcast(&restart_cond);
|
||||||
|
mutex_unlock(&restart_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void set_curblock(char *hexstr, unsigned char *hash)
|
static void set_curblock(char *hexstr, unsigned char *hash)
|
||||||
@ -3364,8 +3399,9 @@ static void hashmeter(int thr_id, struct timeval *diff,
|
|||||||
if (want_per_device_stats) {
|
if (want_per_device_stats) {
|
||||||
struct timeval now;
|
struct timeval now;
|
||||||
struct timeval elapsed;
|
struct timeval elapsed;
|
||||||
|
|
||||||
gettimeofday(&now, NULL);
|
gettimeofday(&now, NULL);
|
||||||
timeval_subtract(&elapsed, &now, &thr->cgpu->last_message_tv);
|
timersub(&now, &thr->cgpu->last_message_tv, &elapsed);
|
||||||
if (opt_log_interval <= elapsed.tv_sec) {
|
if (opt_log_interval <= elapsed.tv_sec) {
|
||||||
struct cgpu_info *cgpu = thr->cgpu;
|
struct cgpu_info *cgpu = thr->cgpu;
|
||||||
char logline[255];
|
char logline[255];
|
||||||
@ -3385,7 +3421,7 @@ static void hashmeter(int thr_id, struct timeval *diff,
|
|||||||
/* Totals are updated by all threads so can race without locking */
|
/* Totals are updated by all threads so can race without locking */
|
||||||
mutex_lock(&hash_lock);
|
mutex_lock(&hash_lock);
|
||||||
gettimeofday(&temp_tv_end, NULL);
|
gettimeofday(&temp_tv_end, NULL);
|
||||||
timeval_subtract(&total_diff, &temp_tv_end, &total_tv_end);
|
timersub(&temp_tv_end, &total_tv_end, &total_diff);
|
||||||
|
|
||||||
total_mhashes_done += local_mhashes;
|
total_mhashes_done += local_mhashes;
|
||||||
local_mhashes_done += local_mhashes;
|
local_mhashes_done += local_mhashes;
|
||||||
@ -3399,7 +3435,7 @@ static void hashmeter(int thr_id, struct timeval *diff,
|
|||||||
decay_time(&rolling, local_mhashes_done / local_secs);
|
decay_time(&rolling, local_mhashes_done / local_secs);
|
||||||
global_hashrate = roundl(rolling) * 1000000;
|
global_hashrate = roundl(rolling) * 1000000;
|
||||||
|
|
||||||
timeval_subtract(&total_diff, &total_tv_end, &total_tv_start);
|
timersub(&total_tv_end, &total_tv_start, &total_diff);
|
||||||
total_secs = (double)total_diff.tv_sec +
|
total_secs = (double)total_diff.tv_sec +
|
||||||
((double)total_diff.tv_usec / 1000000.0);
|
((double)total_diff.tv_usec / 1000000.0);
|
||||||
|
|
||||||
@ -3562,7 +3598,7 @@ static void control_tclear(bool *var)
|
|||||||
|
|
||||||
static bool queueing;
|
static bool queueing;
|
||||||
|
|
||||||
static bool queue_request(struct thr_info *thr, bool needed)
|
bool queue_request(struct thr_info *thr, bool needed)
|
||||||
{
|
{
|
||||||
struct workio_cmd *wc;
|
struct workio_cmd *wc;
|
||||||
struct timeval now;
|
struct timeval now;
|
||||||
@ -3953,7 +3989,7 @@ void *miner_thread(void *userdata)
|
|||||||
/* Try to cycle approximately 5 times before each log update */
|
/* Try to cycle approximately 5 times before each log update */
|
||||||
const long cycle = opt_log_interval / 5 ? : 1;
|
const long cycle = opt_log_interval / 5 ? : 1;
|
||||||
struct timeval tv_start, tv_end, tv_workstart, tv_lastupdate;
|
struct timeval tv_start, tv_end, tv_workstart, tv_lastupdate;
|
||||||
struct timeval diff, sdiff, wdiff;
|
struct timeval diff, sdiff, wdiff = {0, 0};
|
||||||
uint32_t max_nonce = api->can_limit_work ? api->can_limit_work(mythr) : 0xffffffff;
|
uint32_t max_nonce = api->can_limit_work ? api->can_limit_work(mythr) : 0xffffffff;
|
||||||
unsigned long long hashes_done = 0;
|
unsigned long long hashes_done = 0;
|
||||||
unsigned long long hashes;
|
unsigned long long hashes;
|
||||||
@ -4070,7 +4106,7 @@ void *miner_thread(void *userdata)
|
|||||||
cgpu->max_hashes = hashes;
|
cgpu->max_hashes = hashes;
|
||||||
|
|
||||||
gettimeofday(&tv_end, NULL);
|
gettimeofday(&tv_end, NULL);
|
||||||
timeval_subtract(&diff, &tv_end, &tv_start);
|
timersub(&tv_end, &tv_start, &diff);
|
||||||
sdiff.tv_sec += diff.tv_sec;
|
sdiff.tv_sec += diff.tv_sec;
|
||||||
sdiff.tv_usec += diff.tv_usec;
|
sdiff.tv_usec += diff.tv_usec;
|
||||||
if (sdiff.tv_usec > 1000000) {
|
if (sdiff.tv_usec > 1000000) {
|
||||||
@ -4078,7 +4114,7 @@ void *miner_thread(void *userdata)
|
|||||||
sdiff.tv_usec -= 1000000;
|
sdiff.tv_usec -= 1000000;
|
||||||
}
|
}
|
||||||
|
|
||||||
timeval_subtract(&wdiff, &tv_end, &tv_workstart);
|
timersub(&tv_end, &tv_workstart, &wdiff);
|
||||||
if (!requested) {
|
if (!requested) {
|
||||||
if (wdiff.tv_sec > request_interval || work->blk.nonce > request_nonce) {
|
if (wdiff.tv_sec > request_interval || work->blk.nonce > request_nonce) {
|
||||||
thread_reportout(mythr);
|
thread_reportout(mythr);
|
||||||
@ -4114,7 +4150,7 @@ void *miner_thread(void *userdata)
|
|||||||
max_nonce = max_nonce * 0x400 / (((cycle * 1000000) + sdiff.tv_usec) / (cycle * 1000000 / 0x400));
|
max_nonce = max_nonce * 0x400 / (((cycle * 1000000) + sdiff.tv_usec) / (cycle * 1000000 / 0x400));
|
||||||
}
|
}
|
||||||
|
|
||||||
timeval_subtract(&diff, &tv_end, &tv_lastupdate);
|
timersub(&tv_end, &tv_lastupdate, &diff);
|
||||||
if (diff.tv_sec >= opt_log_interval) {
|
if (diff.tv_sec >= opt_log_interval) {
|
||||||
hashmeter(thr_id, &diff, hashes_done);
|
hashmeter(thr_id, &diff, hashes_done);
|
||||||
hashes_done = 0;
|
hashes_done = 0;
|
||||||
@ -4534,19 +4570,15 @@ static void *watchdog_thread(void __maybe_unused *userdata)
|
|||||||
else
|
else
|
||||||
cgpu->low_count = 0;
|
cgpu->low_count = 0;
|
||||||
|
|
||||||
uint64_t hashtime = now.tv_sec - thr->last.tv_sec;
|
bool dev_count_well = (cgpu->low_count < WATCHDOG_SICK_COUNT);
|
||||||
bool dev_time_well = hashtime < WATCHDOG_SICK_TIME;
|
bool dev_count_sick = (cgpu->low_count > WATCHDOG_SICK_COUNT);
|
||||||
bool dev_time_sick = hashtime > WATCHDOG_SICK_TIME;
|
bool dev_count_dead = (cgpu->low_count > WATCHDOG_DEAD_COUNT);
|
||||||
bool dev_time_dead = hashtime > WATCHDOG_DEAD_TIME;
|
|
||||||
bool dev_count_well = cgpu->low_count < WATCHDOG_SICK_COUNT;
|
|
||||||
bool dev_count_sick = cgpu->low_count > WATCHDOG_SICK_COUNT;
|
|
||||||
bool dev_count_dead = cgpu->low_count > WATCHDOG_DEAD_COUNT;
|
|
||||||
|
|
||||||
if (cgpu->status != LIFE_WELL && dev_time_well && dev_count_well) {
|
if (gpus[gpu].status != LIFE_WELL && (now.tv_sec - thr->last.tv_sec < WATCHDOG_SICK_TIME) && dev_count_well) {
|
||||||
applog(LOG_ERR, "%s: Recovered, declaring WELL!", dev_str);
|
applog(LOG_ERR, "%s: Recovered, declaring WELL!", dev_str);
|
||||||
cgpu->status = LIFE_WELL;
|
cgpu->status = LIFE_WELL;
|
||||||
cgpu->device_last_well = time(NULL);
|
cgpu->device_last_well = time(NULL);
|
||||||
} else if (cgpu->status == LIFE_WELL && (dev_time_sick || dev_count_sick)) {
|
} else if (cgpu->status == LIFE_WELL && ((now.tv_sec - thr->last.tv_sec > WATCHDOG_SICK_TIME) || dev_count_sick)) {
|
||||||
thr->rolling = cgpu->rolling = 0;
|
thr->rolling = cgpu->rolling = 0;
|
||||||
cgpu->status = LIFE_SICK;
|
cgpu->status = LIFE_SICK;
|
||||||
applog(LOG_ERR, "%s: Idle for more than 60 seconds, declaring SICK!", dev_str);
|
applog(LOG_ERR, "%s: Idle for more than 60 seconds, declaring SICK!", dev_str);
|
||||||
@ -4565,7 +4597,7 @@ static void *watchdog_thread(void __maybe_unused *userdata)
|
|||||||
applog(LOG_ERR, "%s: Attempting to restart", dev_str);
|
applog(LOG_ERR, "%s: Attempting to restart", dev_str);
|
||||||
reinit_device(cgpu);
|
reinit_device(cgpu);
|
||||||
}
|
}
|
||||||
} else if (cgpu->status == LIFE_SICK && (dev_time_dead || dev_count_dead)) {
|
} else if (cgpu->status == LIFE_SICK && ((now.tv_sec - thr->last.tv_sec > WATCHDOG_DEAD_TIME) || dev_count_dead)) {
|
||||||
cgpu->status = LIFE_DEAD;
|
cgpu->status = LIFE_DEAD;
|
||||||
applog(LOG_ERR, "%s: Not responded for more than 10 minutes, declaring DEAD!", dev_str);
|
applog(LOG_ERR, "%s: Not responded for more than 10 minutes, declaring DEAD!", dev_str);
|
||||||
gettimeofday(&thr->sick, NULL);
|
gettimeofday(&thr->sick, NULL);
|
||||||
@ -4605,7 +4637,7 @@ static void print_summary(void)
|
|||||||
int hours, mins, secs, i;
|
int hours, mins, secs, i;
|
||||||
double utility, efficiency = 0.0;
|
double utility, efficiency = 0.0;
|
||||||
|
|
||||||
timeval_subtract(&diff, &total_tv_end, &total_tv_start);
|
timersub(&total_tv_end, &total_tv_start, &diff);
|
||||||
hours = diff.tv_sec / 3600;
|
hours = diff.tv_sec / 3600;
|
||||||
mins = (diff.tv_sec % 3600) / 60;
|
mins = (diff.tv_sec % 3600) / 60;
|
||||||
secs = diff.tv_sec % 60;
|
secs = diff.tv_sec % 60;
|
||||||
@ -4816,71 +4848,72 @@ out:
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if defined(unix)
|
#if defined(unix)
|
||||||
static void fork_monitor()
|
static void fork_monitor()
|
||||||
{
|
{
|
||||||
// Make a pipe: [readFD, writeFD]
|
// Make a pipe: [readFD, writeFD]
|
||||||
int pfd[2];
|
int pfd[2];
|
||||||
int r = pipe(pfd);
|
int r = pipe(pfd);
|
||||||
if (r<0) {
|
|
||||||
perror("pipe - failed to create pipe for --monitor");
|
|
||||||
exit(1);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Make stderr write end of pipe
|
if (r < 0) {
|
||||||
fflush(stderr);
|
perror("pipe - failed to create pipe for --monitor");
|
||||||
r = dup2(pfd[1], 2);
|
exit(1);
|
||||||
if (r<0) {
|
|
||||||
perror("dup2 - failed to alias stderr to write end of pipe for --monitor");
|
|
||||||
exit(1);
|
|
||||||
}
|
|
||||||
r = close(pfd[1]);
|
|
||||||
if (r<0) {
|
|
||||||
perror("close - failed to close write end of pipe for --monitor");
|
|
||||||
exit(1);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Don't allow a dying monitor to kill the main process
|
|
||||||
sighandler_t sr0 = signal(SIGPIPE, SIG_IGN);
|
|
||||||
sighandler_t sr1 = signal(SIGPIPE, SIG_IGN);
|
|
||||||
if (SIG_ERR==sr0 || SIG_ERR==sr1) {
|
|
||||||
perror("signal - failed to edit signal mask for --monitor");
|
|
||||||
exit(1);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fork a child process
|
|
||||||
forkpid = fork();
|
|
||||||
if (forkpid<0) {
|
|
||||||
perror("fork - failed to fork child process for --monitor");
|
|
||||||
exit(1);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Child: launch monitor command
|
|
||||||
if (0==forkpid) {
|
|
||||||
// Make stdin read end of pipe
|
|
||||||
r = dup2(pfd[0], 0);
|
|
||||||
if (r<0) {
|
|
||||||
perror("dup2 - in child, failed to alias read end of pipe to stdin for --monitor");
|
|
||||||
exit(1);
|
|
||||||
}
|
|
||||||
close(pfd[0]);
|
|
||||||
if (r<0) {
|
|
||||||
perror("close - in child, failed to close read end of pipe for --monitor");
|
|
||||||
exit(1);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Launch user specified command
|
|
||||||
execl("/bin/bash", "/bin/bash", "-c", opt_stderr_cmd, (char*)NULL);
|
|
||||||
perror("execl - in child failed to exec user specified command for --monitor");
|
|
||||||
exit(1);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parent: clean up unused fds and bail
|
|
||||||
r = close(pfd[0]);
|
|
||||||
if (r<0) {
|
|
||||||
perror("close - failed to close read end of pipe for --monitor");
|
|
||||||
exit(1);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Make stderr write end of pipe
|
||||||
|
fflush(stderr);
|
||||||
|
r = dup2(pfd[1], 2);
|
||||||
|
if (r < 0) {
|
||||||
|
perror("dup2 - failed to alias stderr to write end of pipe for --monitor");
|
||||||
|
exit(1);
|
||||||
|
}
|
||||||
|
r = close(pfd[1]);
|
||||||
|
if (r < 0) {
|
||||||
|
perror("close - failed to close write end of pipe for --monitor");
|
||||||
|
exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Don't allow a dying monitor to kill the main process
|
||||||
|
sighandler_t sr0 = signal(SIGPIPE, SIG_IGN);
|
||||||
|
sighandler_t sr1 = signal(SIGPIPE, SIG_IGN);
|
||||||
|
if (SIG_ERR == sr0 || SIG_ERR == sr1) {
|
||||||
|
perror("signal - failed to edit signal mask for --monitor");
|
||||||
|
exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fork a child process
|
||||||
|
forkpid = fork();
|
||||||
|
if (forkpid < 0) {
|
||||||
|
perror("fork - failed to fork child process for --monitor");
|
||||||
|
exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Child: launch monitor command
|
||||||
|
if (0 == forkpid) {
|
||||||
|
// Make stdin read end of pipe
|
||||||
|
r = dup2(pfd[0], 0);
|
||||||
|
if (r < 0) {
|
||||||
|
perror("dup2 - in child, failed to alias read end of pipe to stdin for --monitor");
|
||||||
|
exit(1);
|
||||||
|
}
|
||||||
|
close(pfd[0]);
|
||||||
|
if (r < 0) {
|
||||||
|
perror("close - in child, failed to close read end of pipe for --monitor");
|
||||||
|
exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Launch user specified command
|
||||||
|
execl("/bin/bash", "/bin/bash", "-c", opt_stderr_cmd, (char*)NULL);
|
||||||
|
perror("execl - in child failed to exec user specified command for --monitor");
|
||||||
|
exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parent: clean up unused fds and bail
|
||||||
|
r = close(pfd[0]);
|
||||||
|
if (r < 0) {
|
||||||
|
perror("close - failed to close read end of pipe for --monitor");
|
||||||
|
exit(1);
|
||||||
|
}
|
||||||
|
}
|
||||||
#endif // defined(unix)
|
#endif // defined(unix)
|
||||||
|
|
||||||
#ifdef HAVE_CURSES
|
#ifdef HAVE_CURSES
|
||||||
@ -4962,8 +4995,7 @@ bool add_cgpu(struct cgpu_info*cgpu)
|
|||||||
HASH_FIND_STR(devids, cgpu->api->name, d);
|
HASH_FIND_STR(devids, cgpu->api->name, d);
|
||||||
if (d)
|
if (d)
|
||||||
cgpu->device_id = ++d->lastid;
|
cgpu->device_id = ++d->lastid;
|
||||||
else
|
else {
|
||||||
{
|
|
||||||
d = malloc(sizeof(*d));
|
d = malloc(sizeof(*d));
|
||||||
memcpy(d->name, cgpu->api->name, sizeof(d->name));
|
memcpy(d->name, cgpu->api->name, sizeof(d->name));
|
||||||
cgpu->device_id = d->lastid = 0;
|
cgpu->device_id = d->lastid = 0;
|
||||||
@ -5012,6 +5044,10 @@ int main(int argc, char *argv[])
|
|||||||
if (unlikely(pthread_cond_init(&lp_cond, NULL)))
|
if (unlikely(pthread_cond_init(&lp_cond, NULL)))
|
||||||
quit(1, "Failed to pthread_cond_init lp_cond");
|
quit(1, "Failed to pthread_cond_init lp_cond");
|
||||||
|
|
||||||
|
mutex_init(&restart_lock);
|
||||||
|
if (unlikely(pthread_cond_init(&restart_cond, NULL)))
|
||||||
|
quit(1, "Failed to pthread_cond_init restart_cond");
|
||||||
|
|
||||||
sprintf(packagename, "%s %s", PACKAGE, VERSION);
|
sprintf(packagename, "%s %s", PACKAGE, VERSION);
|
||||||
|
|
||||||
#ifdef WANT_CPUMINE
|
#ifdef WANT_CPUMINE
|
||||||
@ -5122,14 +5158,16 @@ int main(int argc, char *argv[])
|
|||||||
opt_log_output = true;
|
opt_log_output = true;
|
||||||
|
|
||||||
#ifdef WANT_CPUMINE
|
#ifdef WANT_CPUMINE
|
||||||
if (0<=opt_bench_algo) {
|
if (0 <= opt_bench_algo) {
|
||||||
double rate = bench_algo_stage3(opt_bench_algo);
|
double rate = bench_algo_stage3(opt_bench_algo);
|
||||||
if (!skip_to_bench) {
|
|
||||||
|
if (!skip_to_bench)
|
||||||
printf("%.5f (%s)\n", rate, algo_names[opt_bench_algo]);
|
printf("%.5f (%s)\n", rate, algo_names[opt_bench_algo]);
|
||||||
} else {
|
else {
|
||||||
// Write result to shared memory for parent
|
// Write result to shared memory for parent
|
||||||
#if defined(WIN32)
|
#if defined(WIN32)
|
||||||
char unique_name[64];
|
char unique_name[64];
|
||||||
|
|
||||||
if (GetEnvironmentVariable("CGMINER_SHARED_MEM", unique_name, 32)) {
|
if (GetEnvironmentVariable("CGMINER_SHARED_MEM", unique_name, 32)) {
|
||||||
HANDLE map_handle = CreateFileMapping(
|
HANDLE map_handle = CreateFileMapping(
|
||||||
INVALID_HANDLE_VALUE, // use paging file
|
INVALID_HANDLE_VALUE, // use paging file
|
||||||
@ -5139,7 +5177,7 @@ int main(int argc, char *argv[])
|
|||||||
4096, // size: low 32-bits
|
4096, // size: low 32-bits
|
||||||
unique_name // name of map object
|
unique_name // name of map object
|
||||||
);
|
);
|
||||||
if (NULL!=map_handle) {
|
if (NULL != map_handle) {
|
||||||
void *shared_mem = MapViewOfFile(
|
void *shared_mem = MapViewOfFile(
|
||||||
map_handle, // object to map view of
|
map_handle, // object to map view of
|
||||||
FILE_MAP_WRITE, // read/write access
|
FILE_MAP_WRITE, // read/write access
|
||||||
@ -5147,13 +5185,13 @@ int main(int argc, char *argv[])
|
|||||||
0, // low offset: beginning
|
0, // low offset: beginning
|
||||||
0 // default: map entire file
|
0 // default: map entire file
|
||||||
);
|
);
|
||||||
if (NULL!=shared_mem)
|
if (NULL != shared_mem)
|
||||||
CopyMemory(shared_mem, &rate, sizeof(rate));
|
CopyMemory(shared_mem, &rate, sizeof(rate));
|
||||||
(void)UnmapViewOfFile(shared_mem);
|
(void)UnmapViewOfFile(shared_mem);
|
||||||
}
|
}
|
||||||
(void)CloseHandle(map_handle);
|
(void)CloseHandle(map_handle);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
exit(0);
|
exit(0);
|
||||||
}
|
}
|
||||||
|
@ -2,7 +2,7 @@
|
|||||||
##--##--##--##--##--##--##--##--##--##--##--##--##--##--##--##--##
|
##--##--##--##--##--##--##--##--##--##--##--##--##--##--##--##--##
|
||||||
m4_define([v_maj], [2])
|
m4_define([v_maj], [2])
|
||||||
m4_define([v_min], [4])
|
m4_define([v_min], [4])
|
||||||
m4_define([v_mic], [3])
|
m4_define([v_mic], [4])
|
||||||
##--##--##--##--##--##--##--##--##--##--##--##--##--##--##--##--##
|
##--##--##--##--##--##--##--##--##--##--##--##--##--##--##--##--##
|
||||||
m4_define([v_ver], [v_maj.v_min.v_mic])
|
m4_define([v_ver], [v_maj.v_min.v_mic])
|
||||||
m4_define([lt_rev], m4_eval(v_maj + v_min))
|
m4_define([lt_rev], m4_eval(v_maj + v_min))
|
||||||
|
@ -20,7 +20,7 @@
|
|||||||
#include "fpgautils.h"
|
#include "fpgautils.h"
|
||||||
#include "miner.h"
|
#include "miner.h"
|
||||||
|
|
||||||
#define BITFORCE_SLEEP_MS 2000
|
#define BITFORCE_SLEEP_MS 3000
|
||||||
#define BITFORCE_TIMEOUT_MS 10000
|
#define BITFORCE_TIMEOUT_MS 10000
|
||||||
#define BITFORCE_CHECK_INTERVAL_MS 10
|
#define BITFORCE_CHECK_INTERVAL_MS 10
|
||||||
#define WORK_CHECK_INTERVAL_MS 50
|
#define WORK_CHECK_INTERVAL_MS 50
|
||||||
@ -214,7 +214,7 @@ static bool bitforce_get_temp(struct cgpu_info *bitforce)
|
|||||||
mutex_unlock(&bitforce->device_mutex);
|
mutex_unlock(&bitforce->device_mutex);
|
||||||
|
|
||||||
if (unlikely(!pdevbuf[0])) {
|
if (unlikely(!pdevbuf[0])) {
|
||||||
applog(LOG_ERR, "BFL%i: Error reading (ZLX)", bitforce->device_id);
|
applog(LOG_ERR, "BFL%i: Error: Get temp returned empty string", bitforce->device_id);
|
||||||
bitforce->temp = 0;
|
bitforce->temp = 0;
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
@ -245,22 +245,17 @@ static bool bitforce_send_work(struct thr_info *thr, struct work *work)
|
|||||||
|
|
||||||
if (!fdDev)
|
if (!fdDev)
|
||||||
return false;
|
return false;
|
||||||
|
re_send:
|
||||||
mutex_lock(&bitforce->device_mutex);
|
mutex_lock(&bitforce->device_mutex);
|
||||||
BFwrite(fdDev, "ZDX", 3);
|
BFwrite(fdDev, "ZDX", 3);
|
||||||
BFgets(pdevbuf, sizeof(pdevbuf), fdDev);
|
BFgets(pdevbuf, sizeof(pdevbuf), fdDev);
|
||||||
if (unlikely(!pdevbuf[0])) {
|
if (!pdevbuf[0] || (pdevbuf[0] == 'B')) {
|
||||||
applog(LOG_ERR, "BFL%i: Error reading (ZDX)", bitforce->device_id);
|
|
||||||
mutex_unlock(&bitforce->device_mutex);
|
mutex_unlock(&bitforce->device_mutex);
|
||||||
return false;
|
bitforce->wait_ms += WORK_CHECK_INTERVAL_MS;
|
||||||
}
|
usleep(WORK_CHECK_INTERVAL_MS*1000);
|
||||||
if (pdevbuf[0] == 'B'){
|
goto re_send;
|
||||||
applog(LOG_WARNING, "BFL%i: Throttling", bitforce->device_id);
|
} else if (unlikely(pdevbuf[0] != 'O' || pdevbuf[1] != 'K')) {
|
||||||
mutex_unlock(&bitforce->device_mutex);
|
applog(LOG_ERR, "BFL%i: Error: Send work reports: %s", bitforce->device_id, pdevbuf);
|
||||||
return true;
|
|
||||||
}
|
|
||||||
else if (unlikely(pdevbuf[0] != 'O' || pdevbuf[1] != 'K')) {
|
|
||||||
applog(LOG_ERR, "BFL%i: ZDX reports: %s", bitforce->device_id, pdevbuf);
|
|
||||||
mutex_unlock(&bitforce->device_mutex);
|
mutex_unlock(&bitforce->device_mutex);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
@ -276,11 +271,11 @@ static bool bitforce_send_work(struct thr_info *thr, struct work *work)
|
|||||||
BFgets(pdevbuf, sizeof(pdevbuf), fdDev);
|
BFgets(pdevbuf, sizeof(pdevbuf), fdDev);
|
||||||
mutex_unlock(&bitforce->device_mutex);
|
mutex_unlock(&bitforce->device_mutex);
|
||||||
if (unlikely(!pdevbuf[0])) {
|
if (unlikely(!pdevbuf[0])) {
|
||||||
applog(LOG_ERR, "BFL%i: Error reading (block data)", bitforce->device_id);
|
applog(LOG_ERR, "BFL%i: Error: Send block data returned empty string", bitforce->device_id);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
if (unlikely(pdevbuf[0] != 'O' || pdevbuf[1] != 'K')) {
|
if (unlikely(pdevbuf[0] != 'O' || pdevbuf[1] != 'K')) {
|
||||||
applog(LOG_ERR, "BFL%i: block data reports: %s", bitforce->device_id, pdevbuf);
|
applog(LOG_ERR, "BFL%i: Error: Send block data reports: %s", bitforce->device_id, pdevbuf);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
return true;
|
return true;
|
||||||
@ -294,23 +289,24 @@ static uint64_t bitforce_get_result(struct thr_info *thr, struct work *work)
|
|||||||
char pdevbuf[0x100];
|
char pdevbuf[0x100];
|
||||||
char *pnoncebuf;
|
char *pnoncebuf;
|
||||||
uint32_t nonce;
|
uint32_t nonce;
|
||||||
|
unsigned int delay_time_ms = BITFORCE_CHECK_INTERVAL_MS;
|
||||||
|
|
||||||
if (!fdDev)
|
if (!fdDev)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
while (bitforce->wait_ms < BITFORCE_TIMEOUT_MS) {
|
while (bitforce->wait_ms < BITFORCE_TIMEOUT_MS) {
|
||||||
|
if (unlikely(work_restart[thr->id].restart))
|
||||||
|
return 1;
|
||||||
mutex_lock(&bitforce->device_mutex);
|
mutex_lock(&bitforce->device_mutex);
|
||||||
BFwrite(fdDev, "ZFX", 3);
|
BFwrite(fdDev, "ZFX", 3);
|
||||||
BFgets(pdevbuf, sizeof(pdevbuf), fdDev);
|
BFgets(pdevbuf, sizeof(pdevbuf), fdDev);
|
||||||
mutex_unlock(&bitforce->device_mutex);
|
mutex_unlock(&bitforce->device_mutex);
|
||||||
if (unlikely(!pdevbuf[0])) {
|
if (pdevbuf[0] && pdevbuf[0] != 'B') /* BFL does not respond during throttling */
|
||||||
applog(LOG_ERR, "BFL%i: Error reading (ZFX)", bitforce->device_id);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
if (pdevbuf[0] != 'B')
|
|
||||||
break;
|
break;
|
||||||
usleep(BITFORCE_CHECK_INTERVAL_MS*1000);
|
/* if BFL is throttling, no point checking so quickly */
|
||||||
bitforce->wait_ms += BITFORCE_CHECK_INTERVAL_MS;
|
delay_time_ms = (pdevbuf[0] ? BITFORCE_CHECK_INTERVAL_MS : 2*WORK_CHECK_INTERVAL_MS);
|
||||||
|
usleep(delay_time_ms*1000);
|
||||||
|
bitforce->wait_ms += delay_time_ms;
|
||||||
}
|
}
|
||||||
if (bitforce->wait_ms >= BITFORCE_TIMEOUT_MS) {
|
if (bitforce->wait_ms >= BITFORCE_TIMEOUT_MS) {
|
||||||
applog(LOG_ERR, "BFL%i: took longer than 10s", bitforce->device_id);
|
applog(LOG_ERR, "BFL%i: took longer than 10s", bitforce->device_id);
|
||||||
@ -333,7 +329,7 @@ static uint64_t bitforce_get_result(struct thr_info *thr, struct work *work)
|
|||||||
else if (pdevbuf[0] == 'I')
|
else if (pdevbuf[0] == 'I')
|
||||||
return 1; /* Device idle */
|
return 1; /* Device idle */
|
||||||
else if (strncasecmp(pdevbuf, "NONCE-FOUND", 11)) {
|
else if (strncasecmp(pdevbuf, "NONCE-FOUND", 11)) {
|
||||||
applog(LOG_WARNING, "BFL%i: result reports: %s", bitforce->device_id, pdevbuf);
|
applog(LOG_WARNING, "BFL%i: Error: Get result reports: %s", bitforce->device_id, pdevbuf);
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -369,16 +365,34 @@ static void biforce_thread_enable(struct thr_info *thr)
|
|||||||
bitforce_init(bitforce);
|
bitforce_init(bitforce);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
static uint64_t bitforce_scanhash(struct thr_info *thr, struct work *work, uint64_t __maybe_unused max_nonce)
|
static uint64_t bitforce_scanhash(struct thr_info *thr, struct work *work, uint64_t __maybe_unused max_nonce)
|
||||||
{
|
{
|
||||||
struct cgpu_info *bitforce = thr->cgpu;
|
struct cgpu_info *bitforce = thr->cgpu;
|
||||||
bitforce->wait_ms = 0;
|
|
||||||
uint64_t ret;
|
uint64_t ret;
|
||||||
|
struct timeval tdiff;
|
||||||
|
unsigned int sleep_time;
|
||||||
|
|
||||||
|
bitforce->wait_ms = 0;
|
||||||
ret = bitforce_send_work(thr, work);
|
ret = bitforce_send_work(thr, work);
|
||||||
|
|
||||||
|
/* Initially wait 2/3 of the average cycle time so we can request more
|
||||||
|
work before full scan is up */
|
||||||
|
sleep_time = (2*bitforce->sleep_ms) / 3;
|
||||||
|
tdiff.tv_sec = sleep_time/1000;
|
||||||
|
tdiff.tv_usec = sleep_time*1000 - (tdiff.tv_sec * 1000000);
|
||||||
|
if (!restart_wait(&tdiff))
|
||||||
|
return 1;
|
||||||
|
bitforce->wait_ms += sleep_time;
|
||||||
|
queue_request(thr, false);
|
||||||
|
|
||||||
|
/* Now wait athe final 1/3rd; no bitforce should be finished by now */
|
||||||
|
sleep_time = bitforce->sleep_ms - sleep_time;
|
||||||
|
tdiff.tv_sec = sleep_time/1000;
|
||||||
|
tdiff.tv_usec = sleep_time*1000 - (tdiff.tv_sec * 1000000);
|
||||||
|
if (!restart_wait(&tdiff))
|
||||||
|
return 1;
|
||||||
|
bitforce->wait_ms += sleep_time;
|
||||||
|
/*
|
||||||
while (bitforce->wait_ms < bitforce->sleep_ms) {
|
while (bitforce->wait_ms < bitforce->sleep_ms) {
|
||||||
usleep(WORK_CHECK_INTERVAL_MS*1000);
|
usleep(WORK_CHECK_INTERVAL_MS*1000);
|
||||||
bitforce->wait_ms += WORK_CHECK_INTERVAL_MS;
|
bitforce->wait_ms += WORK_CHECK_INTERVAL_MS;
|
||||||
@ -387,7 +401,7 @@ static uint64_t bitforce_scanhash(struct thr_info *thr, struct work *work, uint6
|
|||||||
return 1; //we have discarded all work; equivilent to 0 hashes done.
|
return 1; //we have discarded all work; equivilent to 0 hashes done.
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
*/
|
||||||
if (ret)
|
if (ret)
|
||||||
ret = bitforce_get_result(thr, work);
|
ret = bitforce_get_result(thr, work);
|
||||||
|
|
||||||
|
@ -466,7 +466,7 @@ modminer_process_results(struct thr_info*thr)
|
|||||||
|
|
||||||
struct timeval tv_workend, elapsed;
|
struct timeval tv_workend, elapsed;
|
||||||
gettimeofday(&tv_workend, NULL);
|
gettimeofday(&tv_workend, NULL);
|
||||||
timeval_subtract(&elapsed, &tv_workend, &state->tv_workstart);
|
timersub(&tv_workend, &state->tv_workstart, &elapsed);
|
||||||
|
|
||||||
uint64_t hashes = (uint64_t)state->clock * (((uint64_t)elapsed.tv_sec * 1000000) + elapsed.tv_usec);
|
uint64_t hashes = (uint64_t)state->clock * (((uint64_t)elapsed.tv_sec * 1000000) + elapsed.tv_usec);
|
||||||
if (hashes > 0xffffffff)
|
if (hashes > 0xffffffff)
|
||||||
|
8
miner.h
8
miner.h
@ -561,9 +561,6 @@ typedef bool (*sha256_func)(int thr_id, const unsigned char *pmidstate,
|
|||||||
uint32_t *last_nonce,
|
uint32_t *last_nonce,
|
||||||
uint32_t nonce);
|
uint32_t nonce);
|
||||||
|
|
||||||
extern int
|
|
||||||
timeval_subtract (struct timeval *result, struct timeval *x, struct timeval *y);
|
|
||||||
|
|
||||||
extern bool fulltest(const unsigned char *hash, const unsigned char *target);
|
extern bool fulltest(const unsigned char *hash, const unsigned char *target);
|
||||||
|
|
||||||
extern int opt_scantime;
|
extern int opt_scantime;
|
||||||
@ -573,7 +570,12 @@ struct work_restart {
|
|||||||
char padding[128 - sizeof(unsigned long)];
|
char padding[128 - sizeof(unsigned long)];
|
||||||
};
|
};
|
||||||
|
|
||||||
|
extern pthread_mutex_t restart_lock;
|
||||||
|
extern pthread_cond_t restart_cond;
|
||||||
|
|
||||||
extern void thread_reportin(struct thr_info *thr);
|
extern void thread_reportin(struct thr_info *thr);
|
||||||
|
extern bool queue_request(struct thr_info *thr, bool needed);
|
||||||
|
extern int restart_wait(struct timeval *tdiff);
|
||||||
|
|
||||||
extern void kill_work(void);
|
extern void kill_work(void);
|
||||||
|
|
||||||
|
59
util.c
59
util.c
@ -147,9 +147,9 @@ static size_t resp_hdr_cb(void *ptr, size_t size, size_t nmemb, void *user_data)
|
|||||||
|
|
||||||
memcpy(val, rem, remlen); /* store value, trim trailing ws */
|
memcpy(val, rem, remlen); /* store value, trim trailing ws */
|
||||||
val[remlen] = 0;
|
val[remlen] = 0;
|
||||||
while ((*val) && (isspace(val[strlen(val) - 1]))) {
|
while ((*val) && (isspace(val[strlen(val) - 1])))
|
||||||
val[strlen(val) - 1] = 0;
|
val[strlen(val) - 1] = 0;
|
||||||
}
|
|
||||||
if (!*val) /* skip blank value */
|
if (!*val) /* skip blank value */
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
@ -157,9 +157,9 @@ static size_t resp_hdr_cb(void *ptr, size_t size, size_t nmemb, void *user_data)
|
|||||||
applog(LOG_DEBUG, "HTTP hdr(%s): %s", key, val);
|
applog(LOG_DEBUG, "HTTP hdr(%s): %s", key, val);
|
||||||
|
|
||||||
if (!strcasecmp("X-Roll-Ntime", key)) {
|
if (!strcasecmp("X-Roll-Ntime", key)) {
|
||||||
if (!strncasecmp("N", val, 1)) {
|
if (!strncasecmp("N", val, 1))
|
||||||
applog(LOG_DEBUG, "X-Roll-Ntime: N found");
|
applog(LOG_DEBUG, "X-Roll-Ntime: N found");
|
||||||
} else {
|
else {
|
||||||
/* Check to see if expire= is supported and if not, set
|
/* Check to see if expire= is supported and if not, set
|
||||||
* the rolltime to the default scantime */
|
* the rolltime to the default scantime */
|
||||||
if (strlen(val) > 7 && !strncasecmp("expire=", val, 7))
|
if (strlen(val) > 7 && !strncasecmp("expire=", val, 7))
|
||||||
@ -256,10 +256,10 @@ json_t *json_rpc_call(CURL *curl, const char *url,
|
|||||||
bool probe, bool longpoll, int *rolltime,
|
bool probe, bool longpoll, int *rolltime,
|
||||||
struct pool *pool, bool share)
|
struct pool *pool, bool share)
|
||||||
{
|
{
|
||||||
char len_hdr[64], user_agent_hdr[128], *ghashrate;
|
|
||||||
long timeout = longpoll ? (60 * 60) : 60;
|
long timeout = longpoll ? (60 * 60) : 60;
|
||||||
struct data_buffer all_data = {NULL, 0};
|
struct data_buffer all_data = {NULL, 0};
|
||||||
struct header_info hi = {NULL, 0, NULL};
|
struct header_info hi = {NULL, 0, NULL};
|
||||||
|
char len_hdr[64], user_agent_hdr[128];
|
||||||
char curl_err_str[CURL_ERROR_SIZE];
|
char curl_err_str[CURL_ERROR_SIZE];
|
||||||
struct curl_slist *headers = NULL;
|
struct curl_slist *headers = NULL;
|
||||||
struct upload_buffer upload_data;
|
struct upload_buffer upload_data;
|
||||||
@ -327,9 +327,10 @@ json_t *json_rpc_call(CURL *curl, const char *url,
|
|||||||
"X-Mining-Extensions: longpoll midstate rollntime submitold");
|
"X-Mining-Extensions: longpoll midstate rollntime submitold");
|
||||||
|
|
||||||
if (likely(global_hashrate)) {
|
if (likely(global_hashrate)) {
|
||||||
asprintf(&ghashrate, "X-Mining-Hashrate: %llu", global_hashrate);
|
char ghashrate[255];
|
||||||
|
|
||||||
|
sprintf(ghashrate, "X-Mining-Hashrate: %llu", global_hashrate);
|
||||||
headers = curl_slist_append(headers, ghashrate);
|
headers = curl_slist_append(headers, ghashrate);
|
||||||
free(ghashrate);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
headers = curl_slist_append(headers, len_hdr);
|
headers = curl_slist_append(headers, len_hdr);
|
||||||
@ -379,9 +380,8 @@ json_t *json_rpc_call(CURL *curl, const char *url,
|
|||||||
if (pool->hdr_path != NULL)
|
if (pool->hdr_path != NULL)
|
||||||
free(pool->hdr_path);
|
free(pool->hdr_path);
|
||||||
pool->hdr_path = hi.lp_path;
|
pool->hdr_path = hi.lp_path;
|
||||||
} else {
|
} else
|
||||||
pool->hdr_path = NULL;
|
pool->hdr_path = NULL;
|
||||||
}
|
|
||||||
} else if (hi.lp_path) {
|
} else if (hi.lp_path) {
|
||||||
free(hi.lp_path);
|
free(hi.lp_path);
|
||||||
hi.lp_path = NULL;
|
hi.lp_path = NULL;
|
||||||
@ -401,6 +401,7 @@ json_t *json_rpc_call(CURL *curl, const char *url,
|
|||||||
|
|
||||||
if (opt_protocol) {
|
if (opt_protocol) {
|
||||||
char *s = json_dumps(val, JSON_INDENT(3));
|
char *s = json_dumps(val, JSON_INDENT(3));
|
||||||
|
|
||||||
applog(LOG_DEBUG, "JSON protocol response:\n%s", s);
|
applog(LOG_DEBUG, "JSON protocol response:\n%s", s);
|
||||||
free(s);
|
free(s);
|
||||||
}
|
}
|
||||||
@ -450,8 +451,8 @@ err_out:
|
|||||||
|
|
||||||
char *bin2hex(const unsigned char *p, size_t len)
|
char *bin2hex(const unsigned char *p, size_t len)
|
||||||
{
|
{
|
||||||
unsigned int i;
|
|
||||||
char *s = malloc((len * 2) + 1);
|
char *s = malloc((len * 2) + 1);
|
||||||
|
unsigned int i;
|
||||||
|
|
||||||
if (!s)
|
if (!s)
|
||||||
return NULL;
|
return NULL;
|
||||||
@ -492,43 +493,14 @@ bool hex2bin(unsigned char *p, const char *hexstr, size_t len)
|
|||||||
return (len == 0 && *hexstr == 0) ? true : false;
|
return (len == 0 && *hexstr == 0) ? true : false;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Subtract the `struct timeval' values X and Y,
|
|
||||||
storing the result in RESULT.
|
|
||||||
Return 1 if the difference is negative, otherwise 0. */
|
|
||||||
|
|
||||||
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
|
|
||||||
{
|
|
||||||
/* Perform the carry for the later subtraction by updating Y. */
|
|
||||||
if (x->tv_usec < y->tv_usec) {
|
|
||||||
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
|
|
||||||
|
|
||||||
y->tv_usec -= 1000000 * nsec;
|
|
||||||
y->tv_sec += nsec;
|
|
||||||
}
|
|
||||||
if (x->tv_usec - y->tv_usec > 1000000) {
|
|
||||||
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
|
|
||||||
|
|
||||||
y->tv_usec += 1000000 * nsec;
|
|
||||||
y->tv_sec -= nsec;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Compute the time remaining to wait.
|
|
||||||
* `tv_usec' is certainly positive. */
|
|
||||||
result->tv_sec = x->tv_sec - y->tv_sec;
|
|
||||||
result->tv_usec = x->tv_usec - y->tv_usec;
|
|
||||||
|
|
||||||
/* Return 1 if result is negative. */
|
|
||||||
return x->tv_sec < y->tv_sec;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool fulltest(const unsigned char *hash, const unsigned char *target)
|
bool fulltest(const unsigned char *hash, const unsigned char *target)
|
||||||
{
|
{
|
||||||
unsigned char hash_swap[32], target_swap[32];
|
unsigned char hash_swap[32], target_swap[32];
|
||||||
uint32_t *hash32 = (uint32_t *) hash_swap;
|
uint32_t *hash32 = (uint32_t *) hash_swap;
|
||||||
uint32_t *target32 = (uint32_t *) target_swap;
|
uint32_t *target32 = (uint32_t *) target_swap;
|
||||||
int i;
|
|
||||||
bool rc = true;
|
|
||||||
char *hash_str, *target_str;
|
char *hash_str, *target_str;
|
||||||
|
bool rc = true;
|
||||||
|
int i;
|
||||||
|
|
||||||
swap256(hash_swap, hash);
|
swap256(hash_swap, hash);
|
||||||
swap256(target_swap, target);
|
swap256(target_swap, target);
|
||||||
@ -681,10 +653,7 @@ out:
|
|||||||
|
|
||||||
int thr_info_create(struct thr_info *thr, pthread_attr_t *attr, void *(*start) (void *), void *arg)
|
int thr_info_create(struct thr_info *thr, pthread_attr_t *attr, void *(*start) (void *), void *arg)
|
||||||
{
|
{
|
||||||
int ret;
|
return pthread_create(&thr->pth, attr, start, arg);
|
||||||
|
|
||||||
ret = pthread_create(&thr->pth, attr, start, arg);
|
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void thr_info_freeze(struct thr_info *thr)
|
void thr_info_freeze(struct thr_info *thr)
|
||||||
|
Loading…
x
Reference in New Issue
Block a user