Browse Source

Merge branch 'master' of git://github.com/ckolivas/cgminer.git

nfactor-troky
Paul Sheppard 12 years ago
parent
commit
2d1f961046
  1. 5
      adl.c
  2. 52
      cgminer.c
  3. 2
      miner.h

5
adl.c

@ -1065,6 +1065,11 @@ static bool fan_autotune(int gpu, int temp, int fanpercent, int lasttemp, bool * @@ -1065,6 +1065,11 @@ static bool fan_autotune(int gpu, int temp, int fanpercent, int lasttemp, bool *
if (newpercent != fanpercent) {
applog(LOG_INFO, "Setting GPU %d fan percentage to %d", gpu, newpercent);
set_fanspeed(gpu, newpercent);
/* If the fanspeed is going down and we're below the top speed,
* consider the fan optimal to prevent minute changes in
* fanspeed delaying GPU engine speed changes */
if (newpercent < fanpercent && *fan_window)
return true;
return false;
}
return true;

52
cgminer.c

@ -1625,7 +1625,6 @@ static bool submit_upstream_work(const struct work *work, CURL *curl) @@ -1625,7 +1625,6 @@ static bool submit_upstream_work(const struct work *work, CURL *curl)
bool rolltime;
uint32_t *hash32;
char hashshow[64+1] = "";
bool isblock;
#ifdef __BIG_ENDIAN__
int swapcounter = 0;
@ -1666,17 +1665,9 @@ static bool submit_upstream_work(const struct work *work, CURL *curl) @@ -1666,17 +1665,9 @@ static bool submit_upstream_work(const struct work *work, CURL *curl)
res = json_object_get(val, "result");
if (!QUIET) {
#ifndef MIPSEB
// This one segfaults on my router for some reason
isblock = regeneratehash(work);
if (unlikely(isblock)) {
pool->solved++;
found_blocks++;
}
hash32 = (uint32_t *)(work->hash);
sprintf(hashshow, "%08lx.%08lx%s", (unsigned long)(hash32[6]), (unsigned long)(hash32[5]),
isblock ? " BLOCK!" : "");
#endif
work->block? " BLOCK!" : "");
}
/* Theoretically threads could race when modifying accepted and
@ -1829,6 +1820,7 @@ static void get_benchmark_work(struct work *work) @@ -1829,6 +1820,7 @@ static void get_benchmark_work(struct work *work)
size_t min_size = (work_size < bench_size ? work_size : bench_size);
memset(work, 0, sizeof(work));
memcpy(work, &bench_block, min_size);
work->mandatory = true;
}
static bool get_upstream_work(struct work *work, CURL *curl)
@ -2164,7 +2156,7 @@ static bool stale_work(struct work *work, bool share) @@ -2164,7 +2156,7 @@ static bool stale_work(struct work *work, bool share)
struct timeval now;
struct pool *pool;
if (opt_benchmark)
if (work->mandatory)
return false;
gettimeofday(&now, NULL);
@ -2184,6 +2176,16 @@ static bool stale_work(struct work *work, bool share) @@ -2184,6 +2176,16 @@ static bool stale_work(struct work *work, bool share)
return false;
}
static void check_solve(struct work *work)
{
work->block = regeneratehash(work);
if (unlikely(work->block)) {
work->pool->solved++;
found_blocks++;
work->mandatory = true;
applog(LOG_NOTICE, "Found block for pool %d!", work->pool);
}
}
static void *submit_work_thread(void *userdata)
{
@ -2197,6 +2199,8 @@ static void *submit_work_thread(void *userdata) @@ -2197,6 +2199,8 @@ static void *submit_work_thread(void *userdata)
applog(LOG_DEBUG, "Creating extra submit work thread");
check_solve(work);
if (stale_work(work, true)) {
if (opt_submit_stale)
applog(LOG_NOTICE, "Stale share detected, submitting as user requested");
@ -2280,7 +2284,7 @@ static struct pool *priority_pool(int choice) @@ -2280,7 +2284,7 @@ static struct pool *priority_pool(int choice)
void switch_pools(struct pool *selected)
{
struct pool *pool, *last_pool;
int i, pool_no;
int i, pool_no, next_pool;
mutex_lock(&control_lock);
last_pool = currentpool;
@ -2313,13 +2317,22 @@ void switch_pools(struct pool *selected) @@ -2313,13 +2317,22 @@ void switch_pools(struct pool *selected)
/* Both of these simply increment and cycle */
case POOL_ROUNDROBIN:
case POOL_ROTATE:
if (selected) {
if (selected && !selected->idle) {
pool_no = selected->pool_no;
break;
}
pool_no++;
if (pool_no >= total_pools)
pool_no = 0;
next_pool = pool_no;
/* Select the next alive pool */
for (i = 1; i < total_pools; i++) {
next_pool++;
if (next_pool >= total_pools)
next_pool = 0;
pool = pools[next_pool];
if (!pool->idle && pool->enabled == POOL_ENABLED) {
pool_no = next_pool;
break;
}
}
break;
default:
break;
@ -2479,7 +2492,7 @@ static void test_work_current(struct work *work) @@ -2479,7 +2492,7 @@ static void test_work_current(struct work *work)
{
char *hexstr;
if (opt_benchmark)
if (work->mandatory)
return;
hexstr = bin2hex(work->data, 18);
@ -4010,6 +4023,9 @@ static void convert_to_work(json_t *val, bool rolltime, struct pool *pool) @@ -4010,6 +4023,9 @@ static void convert_to_work(json_t *val, bool rolltime, struct pool *pool)
work->rolltime = rolltime;
work->longpoll = true;
if (pool->enabled == POOL_REJECTING)
work->mandatory = true;
/* We'll be checking this work item twice, but we already know it's
* from a new block so explicitly force the new block detection now
* rather than waiting for it to hit the stage thread. This also
@ -4069,7 +4085,7 @@ static struct pool *select_longpoll_pool(struct pool *cp) @@ -4069,7 +4085,7 @@ static struct pool *select_longpoll_pool(struct pool *cp)
*/
static void wait_lpcurrent(struct pool *pool)
{
if (pool->enabled == POOL_REJECTING)
if (pool->enabled == POOL_REJECTING || pool_strategy == POOL_LOADBALANCE)
return;
while (pool != current_pool()) {

2
miner.h

@ -741,6 +741,8 @@ struct work { @@ -741,6 +741,8 @@ struct work {
bool rolltime;
bool longpoll;
bool stale;
bool mandatory;
bool block;
unsigned int work_block;
int id;

Loading…
Cancel
Save