Browse Source

With better bounds on the amount of work cloned, there is no need to age work and ageing it was picking off master work items that could be further rolled.

nfactor-troky
Con Kolivas 13 years ago
parent
commit
5d90c50fc0
  1. 20
      cgminer.c

20
cgminer.c

@ -4331,24 +4331,6 @@ static void *watchpool_thread(void __maybe_unused *userdata) @@ -4331,24 +4331,6 @@ static void *watchpool_thread(void __maybe_unused *userdata)
return NULL;
}
/* Work is sorted according to age, so discard the oldest work items, leaving
* only 1 staged work item per mining thread */
static void age_work(void)
{
int discarded = 0;
while (requests_staged() > mining_threads * 4 / 3 + opt_queue) {
struct work *work = hash_pop(NULL);
if (unlikely(!work))
break;
discard_work(work);
discarded++;
}
if (discarded)
applog(LOG_DEBUG, "Aged %d work items", discarded);
}
/* Makes sure the hashmeter keeps going even if mining threads stall, updates
* the screen at regular intervals, and restarts threads if they appear to have
* died. */
@ -4370,8 +4352,6 @@ static void *watchdog_thread(void __maybe_unused *userdata) @@ -4370,8 +4352,6 @@ static void *watchdog_thread(void __maybe_unused *userdata)
if (requests_queued() < opt_queue)
queue_request(NULL, false);
age_work();
hashmeter(-1, &zero_tv, 0);
#ifdef HAVE_CURSES

Loading…
Cancel
Save