Browse Source

Merge branch 'master' of git://github.com/ckolivas/cgminer.git

nfactor-troky
Paul Sheppard 13 years ago
parent
commit
2aa95ab812
  1. 9
      cgminer.c
  2. 14
      driver-bitforce.c

9
cgminer.c

@ -1415,6 +1415,8 @@ static void adj_width(int var, int *length) @@ -1415,6 +1415,8 @@ static void adj_width(int var, int *length)
(*length)++;
}
static int dev_width;
static void curses_print_devstatus(int thr_id)
{
static int awidth = 1, rwidth = 1, hwwidth = 1, uwidth = 1;
@ -1426,7 +1428,7 @@ static void curses_print_devstatus(int thr_id) @@ -1426,7 +1428,7 @@ static void curses_print_devstatus(int thr_id)
/* Check this isn't out of the window size */
if (wmove(statuswin,devcursor + cgpu->cgminer_id, 0) == ERR)
return;
wprintw(statuswin, " %s %d: ", cgpu->api->name, cgpu->device_id);
wprintw(statuswin, " %s %*d: ", cgpu->api->name, dev_width, cgpu->device_id);
if (cgpu->api->get_statline_before) {
logline[0] = '\0';
cgpu->api->get_statline_before(logline, cgpu);
@ -4577,7 +4579,7 @@ static void *watchdog_thread(void __maybe_unused *userdata) @@ -4577,7 +4579,7 @@ static void *watchdog_thread(void __maybe_unused *userdata)
dev_count_sick = (cgpu->low_count > WATCHDOG_SICK_COUNT);
dev_count_dead = (cgpu->low_count > WATCHDOG_DEAD_COUNT);
if (gpus[gpu].status != LIFE_WELL && (now.tv_sec - thr->last.tv_sec < WATCHDOG_SICK_TIME) && dev_count_well) {
if (cgpu->status != LIFE_WELL && (now.tv_sec - thr->last.tv_sec < WATCHDOG_SICK_TIME) && dev_count_well) {
applog(LOG_ERR, "%s: Recovered, declaring WELL!", dev_str);
cgpu->status = LIFE_WELL;
cgpu->device_last_well = time(NULL);
@ -4977,6 +4979,9 @@ void enable_device(struct cgpu_info *cgpu) @@ -4977,6 +4979,9 @@ void enable_device(struct cgpu_info *cgpu)
cgpu->deven = DEV_ENABLED;
devices[cgpu->cgminer_id = cgminer_id_count++] = cgpu;
mining_threads += cgpu->threads;
#ifdef HAVE_CURSES
adj_width(mining_threads, &dev_width);
#endif
#ifdef HAVE_OPENCL
if (cgpu->api == &opencl_api) {
gpu_threads += cgpu->threads;

14
driver-bitforce.c

@ -381,6 +381,12 @@ static void biforce_thread_enable(struct thr_info *thr) @@ -381,6 +381,12 @@ static void biforce_thread_enable(struct thr_info *thr)
bitforce_init(bitforce);
}
static void ms_to_timeval(unsigned int mstime, struct timeval *ttime)
{
ttime->tv_sec = mstime / 1000;
ttime->tv_usec = mstime * 1000 - (ttime->tv_sec * 1000000);
}
static uint64_t bitforce_scanhash(struct thr_info *thr, struct work *work, uint64_t __maybe_unused max_nonce)
{
struct cgpu_info *bitforce = thr->cgpu;
@ -394,8 +400,7 @@ static uint64_t bitforce_scanhash(struct thr_info *thr, struct work *work, uint6 @@ -394,8 +400,7 @@ static uint64_t bitforce_scanhash(struct thr_info *thr, struct work *work, uint6
/* Initially wait 2/3 of the average cycle time so we can request more
work before full scan is up */
sleep_time = (2 * bitforce->sleep_ms) / 3;
tdiff.tv_sec = sleep_time / 1000;
tdiff.tv_usec = sleep_time * 1000 - (tdiff.tv_sec * 1000000);
ms_to_timeval(sleep_time, &tdiff);
if (!restart_wait(&tdiff))
return 1;
@ -404,8 +409,7 @@ static uint64_t bitforce_scanhash(struct thr_info *thr, struct work *work, uint6 @@ -404,8 +409,7 @@ static uint64_t bitforce_scanhash(struct thr_info *thr, struct work *work, uint6
/* Now wait athe final 1/3rd; no bitforce should be finished by now */
sleep_time = bitforce->sleep_ms - sleep_time;
tdiff.tv_sec = sleep_time / 1000;
tdiff.tv_usec = sleep_time * 1000 - (tdiff.tv_sec * 1000000);
ms_to_timeval(sleep_time, &tdiff);
if (!restart_wait(&tdiff))
return 1;
@ -453,7 +457,7 @@ static struct api_data *bitforce_api_stats(struct cgpu_info *cgpu) @@ -453,7 +457,7 @@ static struct api_data *bitforce_api_stats(struct cgpu_info *cgpu)
// care since hashing performance is way more important than
// locking access to displaying API debug 'stats'
// If locking becomes an issue for any of them, use copy_data=true also
root = api_add_int(root, "Sleep Time", &(cgpu->sleep_ms), false);
root = api_add_uint(root, "Sleep Time", &(cgpu->sleep_ms), false);
return root;
}

Loading…
Cancel
Save