bbd2c704f9
commit a9d3c1ffdb71d2a4985749acba3d424161154ab4 Author: Tanguy Pruvot <tanguy.pruvot@gmail.com> Date: Thu May 21 05:39:24 2015 +0200 multipool: last changes before squashed merge and fix net diff on wallets.. was longpoll specific commit a63f0024f3f1fb52d2c4369518bf87c33a9e16ae Author: Tanguy Pruvot <tanguy.pruvot@gmail.com> Date: Thu May 21 05:02:27 2015 +0200 update api sample for the protocol 1.5 commit adda14b22edde6485932be56550166478f6f00dd Author: Tanguy Pruvot <tanguy.pruvot@gmail.com> Date: Thu May 21 04:43:25 2015 +0200 stats: store pool number in scanlog commit e1a0274b01c29409ce16f9096b9985a35cf78ba7 Author: Tanguy Pruvot <tanguy.pruvot@gmail.com> Date: Thu May 21 03:36:15 2015 +0200 api: switchpool and new pool stats variables (API v1.5) add accepted/rej by pool, wait time on conditional, net diff and rate also add scantime per pool config option and do some pool cleanup.. commit 1a30450ad2a5e068983531b9d2a96629b970c1e8 Author: Tanguy Pruvot <tanguy.pruvot@gmail.com> Date: Wed May 20 06:39:09 2015 +0200 prevent concurrent pool switching and limit condtionnal wait messages to the first thread/device commit e3922c7a331a3ad2730bc83082fcd6b2547542f5 Author: Tanguy Pruvot <tanguy.pruvot@gmail.com> Date: Wed May 20 05:39:45 2015 +0200 add some pool rotate options, like pool time-limit update sample pools.conf for time rotation commit 312bd905412d49fd5a9f9e7ff2bc72b23edf38ed Author: Tanguy Pruvot <tanguy.pruvot@gmail.com> Date: Wed May 20 04:31:19 2015 +0200 do not try to restart threads from threads Start inconditionally the stratum and longpoll threads, these threads are just waiting a tq_push() if unused... so add some checks to know if vars are set for the right pool commit d4a9428fefdd9e9d70c3c8231f10961e7cd41760 Author: Tanguy Pruvot <tanguy.pruvot@gmail.com> Date: Wed May 20 01:06:31 2015 +0200 pools: add name and removed attributes also increase max defined pools to 8 to be tested on windows.. commit d840d683ecb2cc4767f0a0612b8359c52d4bad29 Author: Tanguy Pruvot <tanguy.pruvot@gmail.com> Date: Tue May 19 22:33:11 2015 +0200 parse json config file pools array commit d6c29b1f7f6b786c56e1f0cb8a90305f06cc7aec Author: Tanguy Pruvot <tanguy.pruvot@gmail.com> Date: Tue May 19 03:29:30 2015 +0200 multi-pools: prepare storage/switch of credentials for the moment: - allow the storage of multiple -o params (and user/pass) - allow a failover pool on connection failed - switch to the next pool with the "seturl" api remote command - longpoll to stratum switch (reverse to check...) todo: mix stratum/getwork, new api commands, json config... commit 2d6b3fddf6631d7df1ac6ca74eee91c33a3c09ee Author: Tanguy Pruvot <tanguy.pruvot@gmail.com> Date: Fri May 22 08:26:40 2015 +0200 multipool: increase stability, but not 100% perfect several problems fixed: - submit to the pool set in work (source pool) - longpoll curl timeout could be too high and could lock the switch - mutexes cant be copied on windows (stratum global var to fully remove later) I linked the -T timeout option to curl and tried to remove all fixed timeout values commit 6fd935c369cf33949dab98c8b09b2ca8cab3e7ea Author: Tanguy Pruvot <tanguy.pruvot@gmail.com> Date: Fri May 22 11:23:07 2015 +0200 stratum: remove last rpc_ vars in stratum thread commit ee9c821525be303282e5dab512ffd2ae81ad524f Author: Tanguy Pruvot <tanguy.pruvot@gmail.com> Date: Sat May 23 03:53:50 2015 +0200 stratum: do not alloc empty merkle tree commit 69852a2874bd18c4ed1daa9180a10d12976424dc Author: Tanguy Pruvot <tanguy.pruvot@gmail.com> Date: Sat May 23 04:25:12 2015 +0200 stratum: properly free jobs on disconnect Signed-off-by: Tanguy Pruvot <tanguy.pruvot@gmail.com>
277 lines
6.6 KiB
C++
277 lines
6.6 KiB
C++
/**
|
|
* Hash log of submitted job nonces
|
|
* Prevent duplicate shares
|
|
*
|
|
* (to be merged later with stats)
|
|
*
|
|
* tpruvot@github 2014
|
|
*/
|
|
#include <stdlib.h>
|
|
#include <memory.h>
|
|
#include <map>
|
|
|
|
#include "miner.h"
|
|
|
|
#define HI_DWORD(u64) ((uint32_t) (u64 >> 32))
|
|
#define LO_DWORD(u64) ((uint32_t) u64)
|
|
#define MK_HI64(u32) (0x100000000ULL * u32)
|
|
|
|
/* from miner.h
|
|
struct hashlog_data {
|
|
uint8_t npool;
|
|
uint8_t pool_type;
|
|
uint32_t height;
|
|
uint32_t njobid;
|
|
uint32_t nonce;
|
|
uint32_t scanned_from;
|
|
uint32_t scanned_to;
|
|
uint32_t last_from;
|
|
uint32_t tm_add;
|
|
uint32_t tm_upd;
|
|
uint32_t tm_sent;
|
|
};
|
|
*/
|
|
|
|
static std::map<uint64_t, hashlog_data> tlastshares;
|
|
|
|
#define LOG_PURGE_TIMEOUT 5*60
|
|
|
|
/**
|
|
* str hex to uint32
|
|
*/
|
|
static uint64_t hextouint(char* jobid)
|
|
{
|
|
char *ptr;
|
|
/* dont use strtoull(), only since VS2013 */
|
|
return (uint64_t) strtoul(jobid, &ptr, 16);
|
|
}
|
|
|
|
/**
|
|
* @return time of a job/nonce submission (or last nonce if nonce is 0)
|
|
*/
|
|
uint32_t hashlog_already_submittted(char* jobid, uint32_t nonce)
|
|
{
|
|
uint32_t ret = 0;
|
|
uint64_t njobid = hextouint(jobid);
|
|
uint64_t key = (njobid << 32) + nonce;
|
|
|
|
if (nonce == 0) {
|
|
// search last submitted nonce for job
|
|
ret = hashlog_get_last_sent(jobid);
|
|
} else if (tlastshares.find(key) != tlastshares.end()) {
|
|
hashlog_data data = tlastshares[key];
|
|
ret = data.tm_sent;
|
|
}
|
|
return ret;
|
|
}
|
|
/**
|
|
* Store submitted nonces of a job
|
|
*/
|
|
void hashlog_remember_submit(struct work* work, uint32_t nonce)
|
|
{
|
|
uint64_t njobid = hextouint(work->job_id);
|
|
uint64_t key = (njobid << 32) + nonce;
|
|
hashlog_data data;
|
|
|
|
memset(&data, 0, sizeof(data));
|
|
data.scanned_from = work->scanned_from;
|
|
data.scanned_to = nonce;
|
|
data.height = work->height;
|
|
data.njobid = (uint32_t) njobid;
|
|
data.tm_add = data.tm_upd = data.tm_sent = (uint32_t) time(NULL);
|
|
data.npool = (uint8_t) cur_pooln;
|
|
data.pool_type = pools[cur_pooln].type;
|
|
tlastshares[key] = data;
|
|
}
|
|
|
|
/**
|
|
* Update job scanned range
|
|
*/
|
|
void hashlog_remember_scan_range(struct work* work)
|
|
{
|
|
uint64_t njobid = hextouint(work->job_id);
|
|
uint64_t key = (njobid << 32);
|
|
uint64_t range = hashlog_get_scan_range(work->job_id);
|
|
hashlog_data data;
|
|
|
|
// global scan range of a job
|
|
data = tlastshares[key];
|
|
if (range == 0) {
|
|
memset(&data, 0, sizeof(data));
|
|
data.njobid = (uint32_t) njobid;
|
|
} else {
|
|
// get min and max from all sent records
|
|
data.scanned_from = LO_DWORD(range);
|
|
data.scanned_to = HI_DWORD(range);
|
|
}
|
|
|
|
if (data.tm_add == 0)
|
|
data.tm_add = (uint32_t) time(NULL);
|
|
|
|
data.last_from = work->scanned_from;
|
|
|
|
if (work->scanned_from < work->scanned_to) {
|
|
if (data.scanned_to == 0 || work->scanned_from == data.scanned_to + 1)
|
|
data.scanned_to = work->scanned_to;
|
|
if (data.scanned_from == 0)
|
|
data.scanned_from = work->scanned_from ? work->scanned_from : 1; // min 1
|
|
else if (work->scanned_from < data.scanned_from || work->scanned_to == (data.scanned_from - 1))
|
|
data.scanned_from = work->scanned_from;
|
|
}
|
|
|
|
data.tm_upd = (uint32_t) time(NULL);
|
|
|
|
tlastshares[key] = data;
|
|
/* applog(LOG_BLUE, "job %s range : %x %x -> %x %x", jobid,
|
|
scanned_from, scanned_to, data.scanned_from, data.scanned_to); */
|
|
}
|
|
|
|
/**
|
|
* Returns the range of a job
|
|
* @return uint64_t to|from
|
|
*/
|
|
uint64_t hashlog_get_scan_range(char* jobid)
|
|
{
|
|
uint64_t ret = 0;
|
|
uint64_t njobid = hextouint(jobid);
|
|
uint64_t keypfx = (njobid << 32);
|
|
uint64_t keymsk = (0xffffffffULL << 32);
|
|
hashlog_data data;
|
|
|
|
data.scanned_from = 0;
|
|
data.scanned_to = 0;
|
|
std::map<uint64_t, hashlog_data>::iterator i = tlastshares.begin();
|
|
while (i != tlastshares.end()) {
|
|
if ((keymsk & i->first) == keypfx && i->second.scanned_to > ret) {
|
|
if (i->second.scanned_to > data.scanned_to)
|
|
data.scanned_to = i->second.scanned_to;
|
|
if (i->second.scanned_from < data.scanned_from || data.scanned_from == 0)
|
|
data.scanned_from = i->second.scanned_from;
|
|
}
|
|
i++;
|
|
}
|
|
ret = data.scanned_from;
|
|
ret += MK_HI64(data.scanned_to);
|
|
return ret;
|
|
}
|
|
|
|
/**
|
|
* Search last submitted nonce for a job
|
|
* @return max nonce
|
|
*/
|
|
uint32_t hashlog_get_last_sent(char* jobid)
|
|
{
|
|
uint32_t nonce = 0;
|
|
uint64_t njobid = hextouint(jobid);
|
|
uint64_t keypfx = (njobid << 32);
|
|
std::map<uint64_t, hashlog_data>::iterator i = tlastshares.begin();
|
|
while (i != tlastshares.end()) {
|
|
if ((keypfx & i->first) == keypfx && i->second.tm_sent > 0) {
|
|
nonce = LO_DWORD(i->first);
|
|
}
|
|
i++;
|
|
}
|
|
return nonce;
|
|
}
|
|
|
|
/**
|
|
* Export data for api calls
|
|
*/
|
|
int hashlog_get_history(struct hashlog_data *data, int max_records)
|
|
{
|
|
int records = 0;
|
|
|
|
std::map<uint64_t, hashlog_data>::reverse_iterator it = tlastshares.rbegin();
|
|
while (it != tlastshares.rend() && records < max_records) {
|
|
memcpy(&data[records], &(it->second), sizeof(struct hashlog_data));
|
|
data[records].nonce = LO_DWORD(it->first);
|
|
data[records].njobid = (uint32_t) HI_DWORD(it->first);
|
|
records++;
|
|
++it;
|
|
}
|
|
return records;
|
|
}
|
|
|
|
/**
|
|
* Remove entries of a job...
|
|
*/
|
|
void hashlog_purge_job(char* jobid)
|
|
{
|
|
int deleted = 0;
|
|
uint64_t njobid = hextouint(jobid);
|
|
uint64_t keypfx = (njobid << 32);
|
|
uint32_t sz = (uint32_t) tlastshares.size();
|
|
std::map<uint64_t, hashlog_data>::iterator i = tlastshares.begin();
|
|
while (i != tlastshares.end()) {
|
|
if ((keypfx & i->first) == keypfx) {
|
|
deleted++;
|
|
tlastshares.erase(i++);
|
|
}
|
|
else ++i;
|
|
}
|
|
if (opt_debug && deleted) {
|
|
applog(LOG_DEBUG, "hashlog: purge job %s, del %d/%d", jobid, deleted, sz);
|
|
}
|
|
}
|
|
|
|
/**
|
|
* Remove old entries to reduce memory usage
|
|
*/
|
|
void hashlog_purge_old(void)
|
|
{
|
|
int deleted = 0;
|
|
uint32_t now = (uint32_t) time(NULL);
|
|
uint32_t sz = (uint32_t) tlastshares.size();
|
|
std::map<uint64_t, hashlog_data>::iterator i = tlastshares.begin();
|
|
while (i != tlastshares.end()) {
|
|
if ((now - i->second.tm_sent) > LOG_PURGE_TIMEOUT) {
|
|
deleted++;
|
|
tlastshares.erase(i++);
|
|
}
|
|
else ++i;
|
|
}
|
|
if (opt_debug && deleted) {
|
|
applog(LOG_DEBUG, "hashlog: %d/%d purged", deleted, sz);
|
|
}
|
|
}
|
|
|
|
/**
|
|
* Reset the submitted nonces cache
|
|
*/
|
|
void hashlog_purge_all(void)
|
|
{
|
|
tlastshares.clear();
|
|
}
|
|
|
|
/**
|
|
* API meminfo
|
|
*/
|
|
void hashlog_getmeminfo(uint64_t *mem, uint32_t *records)
|
|
{
|
|
(*records) = (uint32_t) tlastshares.size();
|
|
(*mem) = (*records) * sizeof(hashlog_data);
|
|
}
|
|
|
|
/**
|
|
* Used to debug ranges...
|
|
*/
|
|
void hashlog_dump_job(char* jobid)
|
|
{
|
|
if (opt_debug) {
|
|
uint64_t njobid = hextouint(jobid);
|
|
uint64_t keypfx = (njobid << 32);
|
|
// uint32_t sz = tlastshares.size();
|
|
std::map<uint64_t, hashlog_data>::iterator i = tlastshares.begin();
|
|
while (i != tlastshares.end()) {
|
|
if ((keypfx & i->first) == keypfx) {
|
|
if (i->first != keypfx)
|
|
applog(LOG_DEBUG, CL_YLW "job %s, found %08x ", jobid, LO_DWORD(i->first));
|
|
else
|
|
applog(LOG_DEBUG, CL_YLW "job %s(%u) range done: %08x-%08x", jobid,
|
|
i->second.height, i->second.scanned_from, i->second.scanned_to);
|
|
}
|
|
i++;
|
|
}
|
|
}
|
|
}
|