Browse Source

Get rid of the requirement for a static struct that needs locking to cache work.

Make it possible to use the thread id for getting work again.
Flag the getwork() function when we have a new block to explicitly discard any cached work when a new block is detected.
Store the header of each new work and compare it to blocks we're about to submit to decide if they're stale due to a new block and don't try to submit them.
This should significantly decrease the number of rejected blocks.
nfactor-troky
Con Kolivas 14 years ago
parent
commit
bed692152f
  1. 75
      cpu-miner.c

75
cpu-miner.c

@ -146,7 +146,6 @@ int longpoll_thr_id;
struct work_restart *work_restart = NULL; struct work_restart *work_restart = NULL;
pthread_mutex_t time_lock; pthread_mutex_t time_lock;
static pthread_mutex_t hash_lock; static pthread_mutex_t hash_lock;
static pthread_mutex_t get_lock;
static double total_mhashes_done; static double total_mhashes_done;
static struct timeval total_tv_start, total_tv_end; static struct timeval total_tv_start, total_tv_end;
static int accepted, rejected; static int accepted, rejected;
@ -457,6 +456,7 @@ struct io_data{
static pthread_t *get_thread = NULL; static pthread_t *get_thread = NULL;
static pthread_t *submit_thread = NULL; static pthread_t *submit_thread = NULL;
static char current_block[36];
static void *get_work_thread(void *userdata) static void *get_work_thread(void *userdata)
{ {
@ -537,6 +537,11 @@ static void *submit_work_thread(void *userdata)
CURL *curl = io_data->curl; CURL *curl = io_data->curl;
int failures = 0; int failures = 0;
if (unlikely(strncmp((const char *)wc->u.work->data, current_block, 36))) {
applog(LOG_INFO, "Stale work detected, discarding");
goto out;
}
/* submit solution to bitcoin via JSON-RPC */ /* submit solution to bitcoin via JSON-RPC */
while (!submit_upstream_work(curl, wc->u.work)) { while (!submit_upstream_work(curl, wc->u.work)) {
if (unlikely((opt_retries >= 0) && (++failures > opt_retries))) { if (unlikely((opt_retries >= 0) && (++failures > opt_retries))) {
@ -684,17 +689,15 @@ static void hashmeter(int thr_id, struct timeval *diff,
local_mhashes_done = 0; local_mhashes_done = 0;
} }
static struct work *work_heap = NULL;
/* Since we always have one extra work item queued, set the thread id to 0
* for all the work and just give the work to the first thread that requests
* work */
static bool get_work(struct work *work) static bool get_work(struct work *work)
{ {
struct thr_info *thr = &thr_info[0]; struct thr_info *thr = &thr_info[work->thr_id];
struct work *work_heap;
struct workio_cmd *wc; struct workio_cmd *wc;
bool ret = false; bool ret = false;
static bool first_work = true;
get_new:
/* fill out work request message */ /* fill out work request message */
wc = calloc(1, sizeof(*wc)); wc = calloc(1, sizeof(*wc));
if (unlikely(!wc)) if (unlikely(!wc))
@ -709,41 +712,41 @@ static bool get_work(struct work *work)
goto out; goto out;
} }
/* work_heap is protected by get_lock */ /* wait for 1st response, or get cached response */
pthread_mutex_lock(&get_lock);
if (likely(work_heap)) {
memcpy(work, work_heap, sizeof(*work));
/* Wait for next response, a unit of work - it should be queued */
free(work_heap);
work_heap = tq_pop(thr->q, NULL);
} else {
/* wait for 1st response, or 1st response after failure */
work_heap = tq_pop(thr->q, NULL); work_heap = tq_pop(thr->q, NULL);
if (unlikely(!work_heap)) if (unlikely(!work_heap))
goto out_unlock; goto out;
if (unlikely(work_restart[opt_n_threads + gpu_threads].restart)) {
work_restart[opt_n_threads + gpu_threads].restart = 0;
free(work_heap);
if (opt_debug)
applog(LOG_DEBUG, "New block detected, discarding old work");
goto get_new;
}
if (unlikely(first_work)) {
first_work = false;
/* send for another work request for the next time get_work /* send for another work request for the next time get_work
* is called. */ * is called. */
wc = calloc(1, sizeof(*wc)); wc = calloc(1, sizeof(*wc));
if (unlikely(!wc)) { if (unlikely(!wc))
free(work_heap); goto out_free;
work_heap = NULL;
goto out_unlock;
}
wc->cmd = WC_GET_WORK; wc->cmd = WC_GET_WORK;
wc->thr = thr; wc->thr = thr;
if (unlikely(!tq_push(thr_info[work_thr_id].q, wc))) { if (unlikely(!tq_push(thr_info[work_thr_id].q, wc))) {
workio_cmd_free(wc); workio_cmd_free(wc);
free(work_heap); goto out_free;
work_heap = NULL;
goto out_unlock;
} }
} }
memcpy(work, work_heap, sizeof(*work));
memcpy(current_block, work->data, 36);
ret = true; ret = true;
out_unlock: out_free:
pthread_mutex_unlock(&get_lock); free(work_heap);
out: out:
return ret; return ret;
} }
@ -819,13 +822,13 @@ static void *miner_thread(void *userdata)
uint64_t max64; uint64_t max64;
bool rc; bool rc;
work.thr_id = thr_id;
/* obtain new work from internal workio thread */ /* obtain new work from internal workio thread */
if (unlikely(!get_work(&work))) { if (unlikely(!get_work(&work))) {
applog(LOG_ERR, "work retrieval failed, exiting " applog(LOG_ERR, "work retrieval failed, exiting "
"mining thread %d", mythr->id); "mining thread %d", mythr->id);
goto out; goto out;
} }
work.thr_id = thr_id;
hashes_done = 0; hashes_done = 0;
gettimeofday(&tv_start, NULL); gettimeofday(&tv_start, NULL);
@ -1027,13 +1030,13 @@ static void *gpuminer_thread(void *userdata)
memset(res, 0, BUFFERSIZE); memset(res, 0, BUFFERSIZE);
gettimeofday(&tv_workstart, NULL); gettimeofday(&tv_workstart, NULL);
work->thr_id = thr_id;
/* obtain new work from internal workio thread */ /* obtain new work from internal workio thread */
if (unlikely(!get_work(work))) { if (unlikely(!get_work(work))) {
applog(LOG_ERR, "work retrieval failed, exiting " applog(LOG_ERR, "work retrieval failed, exiting "
"gpu mining thread %d", mythr->id); "gpu mining thread %d", mythr->id);
goto out; goto out;
} }
work->thr_id = thr_id;
precalc_hash(&work->blk, (uint32_t *)(work->midstate), (uint32_t *)(work->data + 64)); precalc_hash(&work->blk, (uint32_t *)(work->midstate), (uint32_t *)(work->data + 64));
work->blk.nonce = 0; work->blk.nonce = 0;
@ -1094,16 +1097,8 @@ static void restart_threads(void)
{ {
int i; int i;
pthread_mutex_lock(&get_lock); for (i = 0; i < opt_n_threads + gpu_threads + 1; i++)
for (i = 0; i < opt_n_threads + gpu_threads; i++)
work_restart[i].restart = 1; work_restart[i].restart = 1;
/* If longpoll has detected a new block, we should discard any queued
* blocks in work_heap */
if (likely(work_heap)) {
free(work_heap);
work_heap = NULL;
}
pthread_mutex_unlock(&get_lock);
} }
static void *longpoll_thread(void *userdata) static void *longpoll_thread(void *userdata)
@ -1425,8 +1420,6 @@ int main (int argc, char *argv[])
return 1; return 1;
if (unlikely(pthread_mutex_init(&hash_lock, NULL))) if (unlikely(pthread_mutex_init(&hash_lock, NULL)))
return 1; return 1;
if (unlikely(pthread_mutex_init(&get_lock, NULL)))
return 1;
if (unlikely(curl_global_init(CURL_GLOBAL_ALL))) if (unlikely(curl_global_init(CURL_GLOBAL_ALL)))
return 1; return 1;
@ -1435,7 +1428,7 @@ int main (int argc, char *argv[])
openlog("cpuminer", LOG_PID, LOG_USER); openlog("cpuminer", LOG_PID, LOG_USER);
#endif #endif
work_restart = calloc(opt_n_threads + gpu_threads, sizeof(*work_restart)); work_restart = calloc(opt_n_threads + gpu_threads + 1, sizeof(*work_restart));
if (!work_restart) if (!work_restart)
return 1; return 1;

Loading…
Cancel
Save