diff --git a/adl.c b/adl.c
index 473cba0d..c7393f48 100644
--- a/adl.c
+++ b/adl.c
@@ -695,6 +695,11 @@ int gpu_fanpercent(int gpu)
applog(LOG_WARNING, "You will need to start cgminer from scratch to correct this");
applog(LOG_WARNING, "Disabling fanspeed monitoring on this device");
ga->has_fanspeed = false;
+ if (ga->twin) {
+ applog(LOG_WARNING, "Disabling fanspeed linking on GPU twins");
+ ga->twin->twin = NULL;;
+ ga->twin = NULL;
+ }
}
return ret;
}
diff --git a/api.c b/api.c
index 7eb684dc..ff26e54f 100644
--- a/api.c
+++ b/api.c
@@ -1636,10 +1636,7 @@ static void addpool(__maybe_unused SOCKETTYPE c, char *param, bool isjson)
return;
}
- if (add_pool_details(true, url, user, pass) == ADD_POOL_MAXIMUM) {
- strcpy(io_buffer, message(MSG_TOOMANYP, MAX_POOLS, NULL, isjson));
- return;
- }
+ add_pool_details(true, url, user, pass);
ptr = escape_string(url, isjson);
strcpy(io_buffer, message(MSG_ADDPOOL, 0, ptr, isjson));
@@ -2113,12 +2110,13 @@ static int itemstats(int i, char *id, struct cgminer_stats *stats, struct cgmine
if (pool_stats) {
sprintf(buf, isjson
- ? ",\"Pool Calls\":%d,\"Pool Attempts\":%d,\"Pool Wait\":%ld.%06ld,\"Pool Max\":%ld.%06ld,\"Pool Min\":%ld.%06ld"
- : ",Pool Calls=%d,Pool Attempts=%d,Pool Wait=%ld.%06ld,Pool Max=%ld.%06ld,Pool Min=%ld.%06ld",
+ ? ",\"Pool Calls\":%d,\"Pool Attempts\":%d,\"Pool Wait\":%ld.%06ld,\"Pool Max\":%ld.%06ld,\"Pool Min\":%ld.%06ld,\"Pool Av\":%f"
+ : ",Pool Calls=%d,Pool Attempts=%d,Pool Wait=%ld.%06ld,Pool Max=%ld.%06ld,Pool Min=%ld.%06ld,Pool Av=%f",
pool_stats->getwork_calls, pool_stats->getwork_attempts,
pool_stats->getwork_wait.tv_sec, pool_stats->getwork_wait.tv_usec,
pool_stats->getwork_wait_max.tv_sec, pool_stats->getwork_wait_max.tv_usec,
- pool_stats->getwork_wait_min.tv_sec, pool_stats->getwork_wait_min.tv_usec);
+ pool_stats->getwork_wait_min.tv_sec, pool_stats->getwork_wait_min.tv_usec,
+ pool_stats->getwork_wait_rolling);
strcat(io_buffer, buf);
}
diff --git a/cgminer.c b/cgminer.c
index ec062789..75a28244 100644
--- a/cgminer.c
+++ b/cgminer.c
@@ -116,7 +116,7 @@ struct list_head scan_devices;
static signed int devices_enabled;
static bool opt_removedisabled;
int total_devices;
-struct cgpu_info *devices[MAX_DEVICES];
+struct cgpu_info **devices;
bool have_opencl;
int opt_n_threads = -1;
int mining_threads;
@@ -190,7 +190,7 @@ unsigned int found_blocks;
unsigned int local_work;
unsigned int total_go, total_ro;
-struct pool *pools[MAX_POOLS];
+struct pool **pools;
static struct pool *currentpool = NULL;
int total_pools;
@@ -395,6 +395,7 @@ static struct pool *add_pool(void)
if (!pool)
quit(1, "Failed to malloc pool in add_pool");
pool->pool_no = pool->prio = total_pools;
+ pools = realloc(pools, sizeof(struct pool *) * (total_pools + 2));
pools[total_pools++] = pool;
if (unlikely(pthread_mutex_init(&pool->pool_lock, NULL)))
quit(1, "Failed to pthread_mutex_init in add_pool");
@@ -1622,7 +1623,7 @@ static bool submit_upstream_work(const struct work *work, CURL *curl)
int thr_id = work->thr_id;
struct cgpu_info *cgpu = thr_info[thr_id].cgpu;
struct pool *pool = work->pool;
- bool rolltime;
+ int rolltime;
uint32_t *hash32;
char hashshow[64+1] = "";
@@ -1837,16 +1838,15 @@ static bool get_upstream_work(struct work *work, CURL *curl)
url = pool->rpc_url;
+ gettimeofday(&tv_start, NULL);
retry:
/* A single failure response here might be reported as a dead pool and
* there may be temporary denied messages etc. falsely reporting
* failure so retry a few times before giving up */
while (!val && retries++ < 3) {
pool_stats->getwork_attempts++;
- gettimeofday(&tv_start, NULL);
val = json_rpc_call(curl, url, pool->rpc_userpass, rpc_req,
false, false, &work->rolltime, pool, false);
- gettimeofday(&tv_end, NULL);
}
if (unlikely(!val)) {
applog(LOG_DEBUG, "Failed json_rpc_call in get_upstream_work");
@@ -1856,12 +1856,12 @@ retry:
rc = work_decode(json_object_get(val, "result"), work);
if (!rc && retries < 3)
goto retry;
- work->pool = pool;
- work->longpoll = false;
- total_getworks++;
- pool->getwork_requested++;
+ gettimeofday(&tv_end, NULL);
timersub(&tv_end, &tv_start, &tv_elapsed);
+ pool_stats->getwork_wait_rolling += ((double)tv_elapsed.tv_sec + ((double)tv_elapsed.tv_usec / 1000000)) * 0.63;
+ pool_stats->getwork_wait_rolling /= 1.63;
+
timeradd(&tv_elapsed, &(pool_stats->getwork_wait), &(pool_stats->getwork_wait));
if (timercmp(&tv_elapsed, &(pool_stats->getwork_wait_max), >)) {
pool_stats->getwork_wait_max.tv_sec = tv_elapsed.tv_sec;
@@ -1873,6 +1873,11 @@ retry:
}
pool_stats->getwork_calls++;
+ work->pool = pool;
+ work->longpoll = false;
+ total_getworks++;
+ pool->getwork_requested++;
+
json_decref(val);
out:
@@ -2154,22 +2159,37 @@ static bool workio_get_work(struct workio_cmd *wc)
static bool stale_work(struct work *work, bool share)
{
struct timeval now;
+ time_t work_expiry;
struct pool *pool;
+ int getwork_delay;
if (work->mandatory)
return false;
+ if (share)
+ work_expiry = opt_expiry;
+ else if (work->rolltime)
+ work_expiry = work->rolltime;
+ else
+ work_expiry = opt_scantime;
+ pool = work->pool;
+ /* Factor in the average getwork delay of this pool, rounding it up to
+ * the nearest second */
+ getwork_delay = pool->cgminer_pool_stats.getwork_wait_rolling * 5 + 1;
+ if (!share) {
+ work_expiry -= getwork_delay;
+ if (unlikely(work_expiry < 5))
+ work_expiry = 5;
+ } else
+ work_expiry += getwork_delay;
+
gettimeofday(&now, NULL);
- if (share) {
- if ((now.tv_sec - work->tv_staged.tv_sec) >= opt_expiry)
- return true;
- } else if ((now.tv_sec - work->tv_staged.tv_sec) >= opt_scantime)
+ if ((now.tv_sec - work->tv_staged.tv_sec) >= work_expiry)
return true;
if (work->work_block != work_block)
return true;
- pool = work->pool;
if (opt_fail_only && !share && pool != current_pool() && pool->enabled != POOL_REJECTING)
return true;
@@ -2379,8 +2399,11 @@ static void inc_queued(void)
mutex_unlock(&qd_lock);
}
-static void dec_queued(void)
+static void dec_queued(struct work *work)
{
+ if (work->clone)
+ return;
+
mutex_lock(&qd_lock);
if (total_queued > 0)
total_queued--;
@@ -2397,17 +2420,28 @@ static int requests_queued(void)
return ret;
}
-static int discard_stale(void)
+static void subtract_queued(int work_units)
+{
+ mutex_lock(&qd_lock);
+ total_queued -= work_units;
+ if (total_queued < 0)
+ total_queued = 0;
+ mutex_unlock(&qd_lock);
+}
+
+static void discard_stale(void)
{
struct work *work, *tmp;
- int i, stale = 0;
+ int stale = 0, nonclone = 0;
mutex_lock(stgd_lock);
HASH_ITER(hh, staged_work, work, tmp) {
if (stale_work(work, false)) {
HASH_DEL(staged_work, work);
- if (work->clone || work->longpoll)
+ if (work->clone)
--staged_extras;
+ else
+ nonclone++;
discard_work(work);
stale++;
}
@@ -2417,23 +2451,19 @@ static int discard_stale(void)
applog(LOG_DEBUG, "Discarded %d stales that didn't match current hash", stale);
/* Dec queued outside the loop to not have recursive locks */
- for (i = 0; i < stale; i++)
- dec_queued();
-
- return stale;
+ subtract_queued(nonclone);
}
static bool queue_request(struct thr_info *thr, bool needed);
static void restart_threads(void)
{
- int i, stale;
+ int i;
/* Discard staged work that is now stale */
- stale = discard_stale();
+ discard_stale();
- for (i = 0; i < stale; i++)
- queue_request(NULL, true);
+ queue_request(NULL, true);
for (i = 0; i < mining_threads; i++)
work_restart[i].restart = 1;
@@ -2556,7 +2586,7 @@ static bool hash_push(struct work *work)
if (likely(!getq->frozen)) {
HASH_ADD_INT(staged_work, id, work);
HASH_SORT(staged_work, tv_sort);
- if (work->clone || work->longpoll)
+ if (work->clone)
++staged_extras;
} else
rc = false;
@@ -3385,7 +3415,7 @@ static bool pool_active(struct pool *pool, bool pinging)
bool ret = false;
json_t *val;
CURL *curl;
- bool rolltime;
+ int rolltime;
curl = curl_easy_init();
if (unlikely(!curl)) {
@@ -3493,39 +3523,78 @@ static void pool_resus(struct pool *pool)
switch_pools(NULL);
}
-static long requested_tv_sec;
+static time_t requested_tv_sec;
+
+static bool control_tset(bool *var)
+{
+ bool ret;
+
+ mutex_lock(&control_lock);
+ ret = *var;
+ *var = true;
+ mutex_unlock(&control_lock);
+
+ return ret;
+}
+
+static void control_tclear(bool *var)
+{
+ mutex_lock(&control_lock);
+ *var = false;
+ mutex_unlock(&control_lock);
+}
+
+static bool queueing;
static bool queue_request(struct thr_info *thr, bool needed)
{
- int rq = requests_queued();
struct workio_cmd *wc;
struct timeval now;
+ time_t scan_post;
+ int rq, rs;
+ bool ret = true;
+
+ /* Prevent multiple requests being executed at once */
+ if (control_tset(&queueing))
+ return ret;
+
+ rq = requests_queued();
+ rs = requests_staged();
+
+ /* Grab more work every 2/3 of the scan time to avoid all work expiring
+ * at the same time */
+ scan_post = opt_scantime * 2 / 3;
+ if (scan_post < 5)
+ scan_post = 5;
gettimeofday(&now, NULL);
- /* Space out retrieval of extra work according to the number of mining
- * threads */
- if (rq >= mining_threads + staged_extras &&
- (now.tv_sec - requested_tv_sec) < opt_scantime / (mining_threads + 1))
- return true;
+ /* Test to make sure we have enough work for pools without rolltime
+ * and enough original work for pools with rolltime */
+ if ((rq >= mining_threads || rs >= mining_threads) &&
+ rq > staged_extras + opt_queue &&
+ now.tv_sec - requested_tv_sec < scan_post)
+ goto out;
+
+ requested_tv_sec = now.tv_sec;
+
+ inc_queued();
/* fill out work request message */
wc = calloc(1, sizeof(*wc));
if (unlikely(!wc)) {
applog(LOG_ERR, "Failed to calloc wc in queue_request");
- return false;
+ ret = false;
+ goto out;
}
wc->cmd = WC_GET_WORK;
- if (thr)
- wc->thr = thr;
- else
- wc->thr = NULL;
+ wc->thr = thr;
/* If we're queueing work faster than we can stage it, consider the
* system lagging and allow work to be gathered from another pool if
* possible */
- if (rq && needed && !requests_staged() && !opt_fail_only)
+ if (rq && needed && !rs && !opt_fail_only)
wc->lagging = true;
applog(LOG_DEBUG, "Queueing getwork request to work thread");
@@ -3534,12 +3603,13 @@ static bool queue_request(struct thr_info *thr, bool needed)
if (unlikely(!tq_push(thr_info[work_thr_id].q, wc))) {
applog(LOG_ERR, "Failed to tq_push in queue_request");
workio_cmd_free(wc);
- return false;
+ ret = false;
}
- requested_tv_sec = now.tv_sec;
- inc_queued();
- return true;
+out:
+ control_tclear(&queueing);
+
+ return ret;
}
static struct work *hash_pop(const struct timespec *abstime)
@@ -3554,7 +3624,7 @@ static struct work *hash_pop(const struct timespec *abstime)
if (HASH_COUNT(staged_work)) {
work = staged_work;
HASH_DEL(staged_work, work);
- if (work->clone || work->longpoll)
+ if (work->clone)
--staged_extras;
}
mutex_unlock(stgd_lock);
@@ -3571,8 +3641,7 @@ static inline bool should_roll(struct work *work)
static inline bool can_roll(struct work *work)
{
- return (work->pool && !stale_work(work, false) && work->rolltime &&
- work->rolls < 11 && !work->clone);
+ return (work->pool && !stale_work(work, false) && work->rolltime && !work->clone);
}
static void roll_work(struct work *work)
@@ -3603,6 +3672,58 @@ static bool reuse_work(struct work *work)
return false;
}
+static struct work *make_clone(struct work *work)
+{
+ struct work *work_clone = make_work();
+
+ memcpy(work_clone, work, sizeof(struct work));
+ work_clone->clone = true;
+ work_clone->longpoll = false;
+ /* Make cloned work appear slightly older to bias towards keeping the
+ * master work item which can be further rolled */
+ work_clone->tv_staged.tv_sec -= 1;
+
+ return work_clone;
+}
+
+/* Clones work by rolling it if possible, and returning a clone instead of the
+ * original work item which gets staged again to possibly be rolled again in
+ * the future */
+static struct work *clone_work(struct work *work)
+{
+ int mrs = mining_threads - requests_staged();
+ struct work *work_clone;
+ bool cloned;
+
+ if (mrs < 1)
+ return work;
+
+ cloned = false;
+ work_clone = make_clone(work);
+ while (mrs-- > 0 && can_roll(work) && should_roll(work)) {
+ applog(LOG_DEBUG, "Pushing rolled converted work to stage thread");
+ if (unlikely(!stage_work(work_clone))) {
+ cloned = false;
+ break;
+ }
+ roll_work(work);
+ work_clone = make_clone(work);
+ /* Roll it again to prevent duplicates should this be used
+ * directly later on */
+ roll_work(work);
+ cloned = true;
+ }
+
+ if (cloned) {
+ stage_work(work);
+ return work_clone;
+ }
+
+ free_work(work_clone);
+
+ return work;
+}
+
static bool get_work(struct work *work, bool requested, struct thr_info *thr,
const int thr_id)
{
@@ -3674,7 +3795,7 @@ retry:
}
if (stale_work(work_heap, false)) {
- dec_queued();
+ dec_queued(work_heap);
discard_work(work_heap);
goto retry;
}
@@ -3687,18 +3808,10 @@ retry:
pool_resus(pool);
}
- memcpy(work, work_heap, sizeof(*work));
-
- /* Hand out a clone if we can roll this work item */
- if (reuse_work(work_heap)) {
- applog(LOG_DEBUG, "Pushing divided work to get queue head");
-
- stage_work(work_heap);
- work->clone = true;
- } else {
- dec_queued();
- free_work(work_heap);
- }
+ work_heap = clone_work(work_heap);
+ memcpy(work, work_heap, sizeof(struct work));
+ dec_queued(work_heap);
+ free_work(work_heap);
ret = true;
out:
@@ -4023,9 +4136,9 @@ enum {
};
/* Stage another work item from the work returned in a longpoll */
-static void convert_to_work(json_t *val, bool rolltime, struct pool *pool)
+static void convert_to_work(json_t *val, int rolltime, struct pool *pool)
{
- struct work *work, *work_clone;
+ struct work *work;
bool rc;
work = make_work();
@@ -4058,25 +4171,16 @@ static void convert_to_work(json_t *val, bool rolltime, struct pool *pool)
return;
}
- work_clone = make_work();
- memcpy(work_clone, work, sizeof(struct work));
- while (reuse_work(work)) {
- work_clone->clone = true;
- work_clone->longpoll = false;
- applog(LOG_DEBUG, "Pushing rolled converted work to stage thread");
- if (unlikely(!stage_work(work_clone)))
- break;
- work_clone = make_work();
- memcpy(work_clone, work, sizeof(struct work));
- }
- free_work(work_clone);
+ work = clone_work(work);
applog(LOG_DEBUG, "Pushing converted work to stage thread");
if (unlikely(!stage_work(work)))
free_work(work);
- else
+ else {
+ inc_queued();
applog(LOG_DEBUG, "Converted longpoll data to work");
+ }
}
/* If we want longpoll, enable it for the chosen default pool, or, if
@@ -4121,7 +4225,7 @@ static void *longpoll_thread(void *userdata)
struct timeval start, end;
CURL *curl = NULL;
int failures = 0;
- bool rolltime;
+ int rolltime;
curl = curl_easy_init();
if (unlikely(!curl)) {
@@ -4272,6 +4376,23 @@ static void *watchpool_thread(void __maybe_unused *userdata)
return NULL;
}
+/* Work is sorted according to age, so discard the oldest work items, leaving
+ * only 1 staged work item per mining thread */
+static void age_work(void)
+{
+ int discarded = 0;
+
+ while (requests_staged() > mining_threads * 4 / 3 + opt_queue) {
+ struct work *work = hash_pop(NULL);
+
+ if (unlikely(!work))
+ break;
+ discard_work(work);
+ discarded++;
+ }
+ if (discarded)
+ applog(LOG_DEBUG, "Aged %d work items", discarded);
+}
/* Makes sure the hashmeter keeps going even if mining threads stall, updates
* the screen at regular intervals, and restarts threads if they appear to have
@@ -4294,6 +4415,8 @@ static void *watchdog_thread(void __maybe_unused *userdata)
if (requests_queued() < opt_queue)
queue_request(NULL, false);
+ age_work();
+
hashmeter(-1, &zero_tv, 0);
#ifdef HAVE_CURSES
@@ -4581,13 +4704,10 @@ char *curses_input(const char *query)
}
#endif
-int add_pool_details(bool live, char *url, char *user, char *pass)
+void add_pool_details(bool live, char *url, char *user, char *pass)
{
struct pool *pool;
- if (total_pools == MAX_POOLS)
- return ADD_POOL_MAXIMUM;
-
pool = add_pool();
pool->rpc_url = url;
@@ -4603,8 +4723,6 @@ int add_pool_details(bool live, char *url, char *user, char *pass)
pool->enabled = POOL_ENABLED;
if (live && !pool_active(pool, false))
pool->idle = true;
-
- return ADD_POOL_OK;
}
#ifdef HAVE_CURSES
@@ -4614,10 +4732,6 @@ static bool input_pool(bool live)
bool ret = false;
immedok(logwin, true);
- if (total_pools == MAX_POOLS) {
- wlogprint("Reached maximum number of pools.\n");
- goto out;
- }
wlogprint("Input server details.\n");
url = curses_input("URL");
@@ -4645,7 +4759,8 @@ static bool input_pool(bool live)
if (!pass)
goto out;
- ret = (add_pool_details(live, url, user, pass) == ADD_POOL_OK);
+ add_pool_details(live, url, user, pass);
+ ret = true;
out:
immedok(logwin, false);
@@ -4815,6 +4930,7 @@ bool add_cgpu(struct cgpu_info*cgpu)
cgpu->device_id = d->lastid = 0;
HASH_ADD_STR(devids, name, d);
}
+ devices = realloc(devices, sizeof(struct cgpu_info *) * (total_devices + 2));
devices[total_devices++] = cgpu;
return true;
}
@@ -4904,8 +5020,6 @@ int main(int argc, char *argv[])
gpus[i].dynamic = true;
#endif
- memset(devices, 0, sizeof(devices));
-
/* parse command line */
opt_register_table(opt_config_table,
"Options for both config file and command line");
diff --git a/driver-cpu.c b/driver-cpu.c
index d0a25160..1f8ac892 100644
--- a/driver-cpu.c
+++ b/driver-cpu.c
@@ -731,8 +731,6 @@ static void cpu_detect()
if (num_processors < 1)
return;
- if (total_devices + opt_n_threads > MAX_DEVICES)
- opt_n_threads = MAX_DEVICES - total_devices;
cpus = calloc(opt_n_threads, sizeof(struct cgpu_info));
if (unlikely(!cpus))
quit(1, "Failed to calloc cpus");
diff --git a/driver-icarus.c b/driver-icarus.c
index a463c281..442eb9d1 100644
--- a/driver-icarus.c
+++ b/driver-icarus.c
@@ -179,7 +179,7 @@ struct ICARUS_INFO {
};
// One for each possible device
-static struct ICARUS_INFO *icarus_info[MAX_DEVICES];
+static struct ICARUS_INFO **icarus_info;
struct device_api icarus_api;
@@ -421,15 +421,15 @@ static bool icarus_detect_one(const char *devpath)
icarus->device_path = strdup(devpath);
icarus->threads = 1;
add_cgpu(icarus);
+ icarus_info = realloc(icarus_info, sizeof(struct ICARUS_INFO *) * (total_devices + 1));
applog(LOG_INFO, "Found Icarus at %s, mark as %d",
devpath, icarus->device_id);
- if (icarus_info[icarus->device_id] == NULL) {
- icarus_info[icarus->device_id] = (struct ICARUS_INFO *)malloc(sizeof(struct ICARUS_INFO));
- if (unlikely(!(icarus_info[icarus->device_id])))
- quit(1, "Failed to malloc ICARUS_INFO");
- }
+ // Since we are adding a new device on the end it needs to always be allocated
+ icarus_info[icarus->device_id] = (struct ICARUS_INFO *)malloc(sizeof(struct ICARUS_INFO));
+ if (unlikely(!(icarus_info[icarus->device_id])))
+ quit(1, "Failed to malloc ICARUS_INFO");
info = icarus_info[icarus->device_id];
diff --git a/driver-opencl.c b/driver-opencl.c
index 521a0635..17be40ae 100644
--- a/driver-opencl.c
+++ b/driver-opencl.c
@@ -538,15 +538,11 @@ struct cgpu_info *cpus;
void pause_dynamic_threads(int gpu)
{
struct cgpu_info *cgpu = &gpus[gpu];
- int i, thread_no = 0;
+ int i;
- for (i = 0; i < mining_threads; i++) {
+ for (i = 1; i < cgpu->threads; i++) {
struct thr_info *thr = &thr_info[i];
- if (thr->cgpu != cgpu)
- continue;
- if (!thread_no++)
- continue;
if (!thr->pause && cgpu->dynamic) {
applog(LOG_WARNING, "Disabling extra threads due to dynamic mode.");
applog(LOG_WARNING, "Tune dynamic intensity with --gpu-dyninterval");
@@ -1130,9 +1126,6 @@ static void opencl_detect()
nDevs = 0;
}
- if (MAX_DEVICES - total_devices < nDevs)
- nDevs = MAX_DEVICES - total_devices;
-
if (!nDevs)
return;
@@ -1354,34 +1347,32 @@ static uint64_t opencl_scanhash(struct thr_info *thr, struct work *work,
_clState *clState = clStates[thr_id];
const cl_kernel *kernel = &clState->kernel;
- double gpu_ms_average = 7;
cl_int status;
-
size_t globalThreads[1];
size_t localThreads[1] = { clState->wsize };
unsigned int threads;
unsigned int hashes;
-
- struct timeval tv_gpustart, tv_gpuend, diff;
- suseconds_t gpu_us;
-
- gettimeofday(&tv_gpustart, NULL);
- timeval_subtract(&diff, &tv_gpustart, &tv_gpuend);
+ gettimeofday(&gpu->tv_gpustart, NULL);
/* This finish flushes the readbuffer set with CL_FALSE later */
clFinish(clState->commandQueue);
- gettimeofday(&tv_gpuend, NULL);
- timeval_subtract(&diff, &tv_gpuend, &tv_gpustart);
- gpu_us = diff.tv_sec * 1000000 + diff.tv_usec;
- decay_time(&gpu_ms_average, gpu_us / 1000);
+ gettimeofday(&gpu->tv_gpuend, NULL);
+
if (gpu->dynamic) {
+ struct timeval diff;
+ suseconds_t gpu_ms;
+
+ timersub(&gpu->tv_gpuend, &gpu->tv_gpustart, &diff);
+ gpu_ms = diff.tv_sec * 1000 + diff.tv_usec / 1000;
+ gpu->gpu_ms_average = (gpu->gpu_ms_average + gpu_ms * 0.63) / 1.63;
+
/* Try to not let the GPU be out for longer than 6ms, but
* increase intensity when the system is idle, unless
* dynamic is disabled. */
- if (gpu_ms_average > opt_dynamic_interval) {
+ if (gpu->gpu_ms_average > opt_dynamic_interval) {
if (gpu->intensity > MIN_INTENSITY)
--gpu->intensity;
- } else if (gpu_ms_average < ((opt_dynamic_interval / 2) ? : 1)) {
+ } else if (gpu->gpu_ms_average < ((opt_dynamic_interval / 2) ? : 1)) {
if (gpu->intensity < MAX_INTENSITY)
++gpu->intensity;
}
diff --git a/driver-ztex.c b/driver-ztex.c
index c881cd7d..e38be748 100644
--- a/driver-ztex.c
+++ b/driver-ztex.c
@@ -66,8 +66,6 @@ static void ztex_detect(void)
applog(LOG_WARNING, "Found %d ztex board(s)", cnt);
for (i = 0; i < cnt; i++) {
- if (total_devices == MAX_DEVICES)
- break;
ztex = calloc(1, sizeof(struct cgpu_info));
ztex->api = &ztex_api;
ztex->device_ztex = ztex_devices[i]->dev;
diff --git a/fpgautils.c b/fpgautils.c
index 70387c69..59eb7bcd 100644
--- a/fpgautils.c
+++ b/fpgautils.c
@@ -40,9 +40,6 @@
char
serial_autodetect_udev(detectone_func_t detectone, const char*prodname)
{
- if (total_devices == MAX_DEVICES)
- return 0;
-
struct udev *udev = udev_new();
struct udev_enumerate *enumerate = udev_enumerate_new(udev);
struct udev_list_entry *list_entry;
@@ -64,9 +61,6 @@ serial_autodetect_udev(detectone_func_t detectone, const char*prodname)
++found;
udev_device_unref(device);
-
- if (total_devices == MAX_DEVICES)
- break;
}
udev_enumerate_unref(enumerate);
udev_unref(udev);
@@ -85,9 +79,6 @@ char
serial_autodetect_devserial(detectone_func_t detectone, const char*prodname)
{
#ifndef WIN32
- if (total_devices == MAX_DEVICES)
- return 0;
-
DIR *D;
struct dirent *de;
const char udevdir[] = "/dev/serial/by-id";
@@ -104,11 +95,8 @@ serial_autodetect_devserial(detectone_func_t detectone, const char*prodname)
if (!strstr(de->d_name, prodname))
continue;
strcpy(devfile, de->d_name);
- if (detectone(devpath)) {
+ if (detectone(devpath))
++found;
- if (total_devices == MAX_DEVICES)
- break;
- }
}
closedir(D);
@@ -121,9 +109,6 @@ serial_autodetect_devserial(detectone_func_t detectone, const char*prodname)
char
_serial_detect(const char*dname, detectone_func_t detectone, autoscan_func_t autoscan, bool forceauto)
{
- if (total_devices == MAX_DEVICES)
- return 0;
-
struct string_elist *iter, *tmp;
const char*s, *p;
bool inhibitauto = false;
@@ -148,12 +133,10 @@ _serial_detect(const char*dname, detectone_func_t detectone, autoscan_func_t aut
string_elist_del(iter);
inhibitauto = true;
++found;
- if (total_devices == MAX_DEVICES)
- break;
}
}
- if ((forceauto || !inhibitauto) && autoscan && total_devices < MAX_DEVICES)
+ if ((forceauto || !inhibitauto) && autoscan)
found += autoscan();
return found;
@@ -198,28 +181,33 @@ serial_open(const char*devpath, unsigned long baud, signed short timeout, bool p
if (unlikely(fdDev == -1))
return -1;
- struct termios pattr;
- tcgetattr(fdDev, &pattr);
- pattr.c_iflag &= ~(IGNBRK | BRKINT | PARMRK | ISTRIP | INLCR | IGNCR | ICRNL | IXON);
- pattr.c_oflag &= ~OPOST;
- pattr.c_lflag &= ~(ECHO | ECHONL | ICANON | ISIG | IEXTEN);
- pattr.c_cflag &= ~(CSIZE | PARENB);
- pattr.c_cflag |= CS8;
+ struct termios my_termios;
+
+ tcgetattr(fdDev, &my_termios);
switch (baud) {
case 0: break;
- case 115200: pattr.c_cflag = B115200; break;
+ case 115200: my_termios.c_cflag = B115200; break;
default:
applog(LOG_WARNING, "Unrecognized baud rate: %lu", baud);
}
- pattr.c_cflag |= CREAD | CLOCAL;
+
+ my_termios.c_cflag |= CS8;
+ my_termios.c_cflag |= CREAD;
+ my_termios.c_cflag |= CLOCAL;
+ my_termios.c_cflag &= ~(CSIZE | PARENB);
+
+ my_termios.c_iflag &= ~(IGNBRK | BRKINT | PARMRK |
+ ISTRIP | INLCR | IGNCR | ICRNL | IXON);
+ my_termios.c_oflag &= ~OPOST;
+ my_termios.c_lflag &= ~(ECHO | ECHONL | ICANON | ISIG | IEXTEN);
if (timeout >= 0) {
- pattr.c_cc[VTIME] = (cc_t)timeout;
- pattr.c_cc[VMIN] = 0;
+ my_termios.c_cc[VTIME] = (cc_t)timeout;
+ my_termios.c_cc[VMIN] = 0;
}
- tcsetattr(fdDev, TCSANOW, &pattr);
+ tcsetattr(fdDev, TCSANOW, &my_termios);
if (purge)
tcflush(fdDev, TCIOFLUSH);
return fdDev;
diff --git a/miner.h b/miner.h
index 5259d358..42ded100 100644
--- a/miner.h
+++ b/miner.h
@@ -300,6 +300,7 @@ struct cgminer_pool_stats {
struct timeval getwork_wait;
struct timeval getwork_wait_max;
struct timeval getwork_wait_min;
+ double getwork_wait_rolling;
};
struct cgpu_info {
@@ -347,6 +348,10 @@ struct cgpu_info {
cl_uint vwidth;
size_t work_size;
enum cl_kernels kernel;
+
+ struct timeval tv_gpustart;;
+ struct timeval tv_gpuend;
+ double gpu_ms_average;
#endif
float temp;
@@ -537,7 +542,7 @@ extern pthread_rwlock_t netacc_lock;
extern const uint32_t sha256_init_state[];
extern json_t *json_rpc_call(CURL *curl, const char *url, const char *userpass,
- const char *rpc_req, bool, bool, bool *,
+ const char *rpc_req, bool, bool, int *,
struct pool *pool, bool);
extern char *bin2hex(const unsigned char *p, size_t len);
extern bool hex2bin(unsigned char *p, const char *hexstr, size_t len);
@@ -580,14 +585,9 @@ extern void api(int thr_id);
extern struct pool *current_pool(void);
extern int active_pools(void);
-extern int add_pool_details(bool live, char *url, char *user, char *pass);
-
-#define ADD_POOL_MAXIMUM 1
-#define ADD_POOL_OK 0
+extern void add_pool_details(bool live, char *url, char *user, char *pass);
#define MAX_GPUDEVICES 16
-#define MAX_DEVICES 64
-#define MAX_POOLS (32)
#define MIN_INTENSITY -10
#define _MIN_INTENSITY_STR "-10"
@@ -608,9 +608,9 @@ extern double total_secs;
extern int mining_threads;
extern struct cgpu_info *cpus;
extern int total_devices;
-extern struct cgpu_info *devices[];
+extern struct cgpu_info **devices;
extern int total_pools;
-extern struct pool *pools[MAX_POOLS];
+extern struct pool **pools;
extern const char *algo_names[];
extern enum sha256_algos opt_algo;
extern struct strategies strategies[];
@@ -739,7 +739,7 @@ struct work {
bool mined;
bool clone;
bool cloned;
- bool rolltime;
+ int rolltime;
bool longpoll;
bool stale;
bool mandatory;
diff --git a/miner.php b/miner.php
index 728a4ebc..7c4d8891 100644
--- a/miner.php
+++ b/miner.php
@@ -3,6 +3,7 @@ session_start();
#
global $miner, $port, $readonly, $notify, $rigs, $socktimeoutsec;
global $checklastshare, $hidefields;
+global $ignorerefresh, $changerefresh, $autorefresh;
#
# Don't touch these 2 - see $rigs below
$miner = null;
@@ -50,6 +51,14 @@ $socktimeoutsec = 10;
#$hidefields = array('POOL.URL' => 1, 'POOL.User' => 1);
$hidefields = array();
#
+# Auto-refresh of the page (in seconds)
+# $ignorerefresh = true/false always ignore refresh parameters
+# $changerefresh = true/false show buttons to change the value
+# $autorefresh = default value, 0 means dont auto-refresh
+$ignorerefresh = false;
+$changerefresh = true;
+$autorefresh = 0;
+#
$here = $_SERVER['PHP_SELF'];
#
global $tablebegin, $tableend, $warnfont, $warnoff, $dfmt;
@@ -60,6 +69,11 @@ $warnfont = '';
$warnoff = '';
$dfmt = 'H:i:s j-M-Y \U\T\CP';
#
+global $miner_font_family, $miner_font_size;
+#
+$miner_font_family = 'verdana,arial,sans';
+$miner_font_size = '13pt';
+#
# This below allows you to put your own settings into a seperate file
# so you don't need to update miner.php with your preferred settings
# every time a new version is released
@@ -76,9 +90,24 @@ $showndate = false;
global $rigerror;
$rigerror = array();
#
-function htmlhead($checkapi)
+function htmlhead($checkapi, $rig)
{
+ global $miner_font_family, $miner_font_size;
global $error, $readonly, $here;
+ global $ignorerefresh, $autorefresh;
+
+ $paramrig = '';
+ if ($rig != null && $rig != '')
+ $paramrig = "&rig=$rig";
+
+ if ($ignorerefresh == true || $autorefresh == 0)
+ $refreshmeta = '';
+ else
+ {
+ $url = "$here?ref=$autorefresh$paramrig";
+ $refreshmeta = "\n";
+ }
+
if ($readonly === false && $checkapi === true)
{
$access = api('privileged');
@@ -87,28 +116,31 @@ function htmlhead($checkapi)
|| $access['STATUS']['STATUS'] != 'S')
$readonly = true;
}
-?>
-
Mine
+ $miner_font = "font-family:$miner_font_family; font-size:$miner_font_size;";
+
+ echo "$refreshmeta
+Mine
@@ -778,7 +810,7 @@ function doforeach($cmd, $des, $sum, $head, $datetime)
foreach ($dthead as $name => $x)
{
if ($item == 'STATUS' && $name == '')
- echo " | ";
+ echo " | ";
else
{
if (isset($row[$name]))
@@ -861,7 +893,7 @@ function doforeach($cmd, $des, $sum, $head, $datetime)
if ($rig === 'total')
echo "Total: | ";
else
- echo " | ";
+ echo " | ";
}
else
{
@@ -884,27 +916,42 @@ function doforeach($cmd, $des, $sum, $head, $datetime)
}
}
#
+function refreshbuttons()
+{
+ global $readonly;
+ global $ignorerefresh, $changerefresh, $autorefresh;
+
+ if ($ignorerefresh == false && $changerefresh == true)
+ {
+ echo ' ';
+ echo "";
+ echo "";
+ echo "";
+ }
+}
+#
function doOne($rig, $preprocess)
{
- global $error, $readonly, $notify;
- global $rigs;
+ global $error, $readonly, $notify, $rigs;
- htmlhead(true);
+ htmlhead(true, $rig);
$error = null;
echo " |
";
if ($preprocess != null)
@@ -930,6 +977,14 @@ function display()
global $tablebegin, $tableend;
global $miner, $port;
global $error, $readonly, $notify, $rigs;
+ global $ignorerefresh, $autorefresh;
+
+ if ($ignorerefresh == false)
+ {
+ $ref = trim(getparam('ref', true));
+ if ($ref != null && $ref != '')
+ $autorefresh = intval($ref);
+ }
$rig = trim(getparam('rig', true));
@@ -998,10 +1053,12 @@ function display()
return;
}
- htmlhead(false);
+ htmlhead(false, null);
echo " |
";
if ($preprocess != null)
diff --git a/util.c b/util.c
index 509d9381..703be545 100644
--- a/util.c
+++ b/util.c
@@ -56,7 +56,7 @@ struct upload_buffer {
struct header_info {
char *lp_path;
- bool has_rolltime;
+ int rolltime;
char *reason;
};
@@ -160,8 +160,13 @@ static size_t resp_hdr_cb(void *ptr, size_t size, size_t nmemb, void *user_data)
if (!strncasecmp("N", val, 1)) {
applog(LOG_DEBUG, "X-Roll-Ntime: N found");
} else {
- applog(LOG_DEBUG, "X-Roll-Ntime found");
- hi->has_rolltime = true;
+ /* Check to see if expire= is supported and if not, set
+ * the rolltime to the default scantime */
+ if (strlen(val) > 7 && !strncasecmp("expire=", val, 7))
+ sscanf(val + 7, "%d", &hi->rolltime);
+ else
+ hi->rolltime = opt_scantime;
+ applog(LOG_DEBUG, "X-Roll-Ntime expiry set to %d", hi->rolltime);
}
}
@@ -248,7 +253,7 @@ static void set_nettime(void)
json_t *json_rpc_call(CURL *curl, const char *url,
const char *userpass, const char *rpc_req,
- bool probe, bool longpoll, bool *rolltime,
+ bool probe, bool longpoll, int *rolltime,
struct pool *pool, bool share)
{
json_t *val, *err_val, *res_val;
@@ -260,7 +265,7 @@ json_t *json_rpc_call(CURL *curl, const char *url,
char len_hdr[64], user_agent_hdr[128];
char curl_err_str[CURL_ERROR_SIZE];
long timeout = longpoll ? (60 * 60) : 60;
- struct header_info hi = {NULL, false, NULL};
+ struct header_info hi = {NULL, 0, NULL};
bool probing = false;
memset(&err, 0, sizeof(err));
@@ -375,7 +380,7 @@ json_t *json_rpc_call(CURL *curl, const char *url,
hi.lp_path = NULL;
}
- *rolltime = hi.has_rolltime;
+ *rolltime = hi.rolltime;
val = JSON_LOADS(all_data.buf, &err);
if (!val) {