mirror of
https://github.com/GOSTSec/sgminer
synced 2025-01-11 07:17:58 +00:00
Merge branch 'master' into error_counters
This commit is contained in:
commit
dad6c70420
@ -26,7 +26,7 @@ cgminer_LDADD = $(DLOPEN_FLAGS) @LIBCURL_LIBS@ @JANSSON_LIBS@ @PTHREAD_LIBS@ \
|
||||
@OPENCL_LIBS@ @NCURSES_LIBS@ @PDCURSES_LIBS@ @WS2_LIBS@ \
|
||||
@UDEV_LIBS@ @USB_LIBS@ \
|
||||
@MATH_LIBS@ lib/libgnu.a ccan/libccan.a
|
||||
cgminer_CPPFLAGS = -I$(top_builddir)/lib -I$(top_srcdir)/lib @OPENCL_FLAGS@
|
||||
cgminer_CPPFLAGS = -I$(top_builddir)/lib -I$(top_srcdir)/lib @OPENCL_FLAGS@ @LIBCURL_CFLAGS@
|
||||
|
||||
# common sources
|
||||
cgminer_SOURCES := cgminer.c
|
||||
|
81
NEWS
81
NEWS
@ -1,3 +1,84 @@
|
||||
Version 2.9.1 - November 6, 2012
|
||||
|
||||
- Reset work flags to prevent GBT shares from being submitted as stratum ones
|
||||
after switching.
|
||||
|
||||
|
||||
Version 2.9.0 - November 6, 2012
|
||||
|
||||
- Add endian swap defines for where missing.
|
||||
- Only retarget stratum shares to new pool diff if diff has dropped.
|
||||
- Remove resetting of probed variable when detecting GBT.
|
||||
- Count lost stratum share submits and increase message priority to warning.
|
||||
- Only retrieve a new block template for GBT pools that are the current pool.
|
||||
- Show which pool untracked share messages have come from.
|
||||
- Add management for dead GBT pools.
|
||||
- Count lost shares with stratum as submit stale lost.
|
||||
- Discard record of stratum shares sent and report lost shares on disconnection
|
||||
since they will never be reported back.
|
||||
- Swab, don't just swap the bytes in the GBT target.
|
||||
- Change status window message for GBT connected pools versus LP.
|
||||
- Generate a gbt work item from longpoll when required to set new block and
|
||||
message appropriately.
|
||||
- Use existing pool submit_old bool from gbt data.
|
||||
- Retrieve a new block template if more than 30 seconds has elapsed since the
|
||||
last one to keep the data current and test the pool is still alive.
|
||||
- Update GBT longpollid every time we request a new longpoll.
|
||||
- Manage appropriate response codes for share submission with GBT.
|
||||
- Allow the longpoll thread to start with GBT and only set the longpollid once.
|
||||
- Correct last few components of GBT block generation courtesy of Luke-jr.
|
||||
- Use correct length for offsetting extra nonce and remaining data.
|
||||
- Flip all 80 bytes in the flip function which was wrongly named flip256 for its
|
||||
purpose.
|
||||
- Calculate midstate for gbt work and remove now unused variable.
|
||||
- Use a standard function for flipping bytes.
|
||||
- Insert the extra nonce and remaining data in the correct position in the
|
||||
coinbase.
|
||||
- Remove txn size debugging and enlarge gbt block string to prevent overflow.
|
||||
- Remove varint display debugging.
|
||||
- Build varint correctly for share submission and sleep 5 seconds before
|
||||
retrying submit.
|
||||
- Make gbt_coinbase large enough for submissions, swap bytes correctly to make a
|
||||
header from GBT and encode the number of transactions in share submission.
|
||||
- Store the fixed size entries as static variables in GBT in binary form,
|
||||
byteswapping as is required.
|
||||
- 32 bit hex encoded variables should be in LE with GBT.
|
||||
- Target and prevblockhash need to be reversed from GBT variables.
|
||||
- Construct block for submission when using GBT.
|
||||
- Use same string for debug as for submission and make string larger to cope
|
||||
with future GBT messages.
|
||||
- Skip trying to decipher LP url if we have GBT support.
|
||||
- Store all the transaction hashes in pool->txn_hashes instead of separating
|
||||
txn0 and correct generation of merkle root, fixing memory overwrites.
|
||||
- Hook into various places to generate GBT work where appropriate.
|
||||
- Create extra work fields when generating GBT work.
|
||||
- Generate header from correct hashing generation of the merkle root for GBT.
|
||||
- Generate the merkle root for gbt work generation.
|
||||
- Create a store of the transactions with GBT in the minimum size form required
|
||||
to generate work items with a varied coinbase.
|
||||
- Create a function that generates a GBT coinbase from the existing pool
|
||||
variables.
|
||||
- Extract and store the various variables GBT uses when decoding gbt work.
|
||||
- Check for invalid json result in work_decode.
|
||||
- Decode work in separate functions for getwork vs gbt.
|
||||
- Check for the coinbase/append mutable in GBT support to decide whether to use
|
||||
it or not.
|
||||
- Add a gbt mutex within the pool struct for protecting the gbt values.
|
||||
- Convert work decode function to prepare for decoding block templates.
|
||||
- Check for GBT support on first probing the pool and convert to using the GBT
|
||||
request as the rpc request for that pool.
|
||||
- Make the rpc request used with getwork a pool variable to allow it to be
|
||||
converted to/from gbt requests.
|
||||
- Changes to build prototypes to support building on FreeBSD 9.1-RC2 amd64
|
||||
- Free old stratum_work data before replacing it
|
||||
- There is no need for addrinfo any more.
|
||||
- server and client sockaddr_in are no longer used in struct pool.
|
||||
- Merge pull request #322 from luke-jr/bugfix_stratum_tmpwork
|
||||
- Set sshare id and swork_id within the sshare mutex to avoid multiple share
|
||||
submits with the same id.
|
||||
- Initialize temporary stratum work
|
||||
|
||||
|
||||
Version 2.8.7 - October 29, 2012
|
||||
|
||||
- Fail on select() failing in stratum thread without needing to attempt
|
||||
|
542
cgminer.c
542
cgminer.c
@ -405,6 +405,10 @@ static void sharelog(const char*disposition, const struct work*work)
|
||||
applog(LOG_ERR, "sharelog fwrite error");
|
||||
}
|
||||
|
||||
static char *getwork_req = "{\"method\": \"getwork\", \"params\": [], \"id\":0}\n";
|
||||
|
||||
static char *gbt_req = "{\"id\": 0, \"method\": \"getblocktemplate\", \"params\": [{\"capabilities\": [\"coinbasetxn\", \"workid\", \"coinbase/append\"]}]}\n";
|
||||
|
||||
/* Return value is ignored if not called from add_pool_details */
|
||||
struct pool *add_pool(void)
|
||||
{
|
||||
@ -416,17 +420,17 @@ struct pool *add_pool(void)
|
||||
pool->pool_no = pool->prio = total_pools;
|
||||
pools = realloc(pools, sizeof(struct pool *) * (total_pools + 2));
|
||||
pools[total_pools++] = pool;
|
||||
if (unlikely(pthread_mutex_init(&pool->pool_lock, NULL)))
|
||||
quit(1, "Failed to pthread_mutex_init in add_pool");
|
||||
mutex_init(&pool->pool_lock);
|
||||
if (unlikely(pthread_cond_init(&pool->cr_cond, NULL)))
|
||||
quit(1, "Failed to pthread_cond_init in add_pool");
|
||||
if (unlikely(pthread_mutex_init(&pool->stratum_lock, NULL)))
|
||||
quit(1, "Failed to pthread_mutex_init in add_pool");
|
||||
mutex_init(&pool->stratum_lock);
|
||||
mutex_init(&pool->gbt_lock);
|
||||
INIT_LIST_HEAD(&pool->curlring);
|
||||
|
||||
/* Make sure the pool doesn't think we've been idle since time 0 */
|
||||
pool->tv_idle.tv_sec = ~0UL;
|
||||
|
||||
pool->rpc_req = getwork_req;
|
||||
pool->rpc_proxy = NULL;
|
||||
|
||||
return pool;
|
||||
@ -1353,37 +1357,286 @@ static void calc_midstate(struct work *work)
|
||||
#endif
|
||||
}
|
||||
|
||||
static bool work_decode(const json_t *val, struct work *work)
|
||||
/* Generate a GBT coinbase from the existing GBT variables stored. Must be
|
||||
* entered under gbt_lock */
|
||||
static void __build_gbt_coinbase(struct pool *pool)
|
||||
{
|
||||
if (unlikely(!jobj_binary(val, "data", work->data, sizeof(work->data), true))) {
|
||||
applog(LOG_ERR, "JSON inval data");
|
||||
goto err_out;
|
||||
int cbt_len, cal_len, orig_len;
|
||||
unsigned char *coinbase;
|
||||
uint8_t *extra_len;
|
||||
|
||||
cbt_len = strlen(pool->coinbasetxn) / 2;
|
||||
pool->coinbase_len = cbt_len + 4;
|
||||
/* We add 4 bytes of extra data corresponding to nonce2 of stratum */
|
||||
cal_len = pool->coinbase_len + 1;
|
||||
if (cal_len % 4)
|
||||
cal_len += 4 - (cal_len % 4);
|
||||
coinbase = calloc(cal_len, 1);
|
||||
hex2bin(coinbase, pool->coinbasetxn, 42);
|
||||
extra_len = (uint8_t *)(coinbase + 41);
|
||||
orig_len = *extra_len;
|
||||
hex2bin(coinbase + 42, pool->coinbasetxn + 84, orig_len);
|
||||
memcpy(coinbase + 42 + orig_len, &pool->nonce2, 4);
|
||||
*extra_len += 4;
|
||||
hex2bin(coinbase + 42 + *extra_len, pool->coinbasetxn + 84 + (orig_len * 2), cbt_len - orig_len - 42);
|
||||
pool->nonce2++;
|
||||
free(pool->gbt_coinbase);
|
||||
pool->gbt_coinbase = coinbase;
|
||||
}
|
||||
|
||||
static void gen_hash(unsigned char *data, unsigned char *hash, int len);
|
||||
|
||||
/* Process transactions with GBT by storing the binary value of the first
|
||||
* transaction, and the hashes of the remaining transactions since these
|
||||
* remain constant with an altered coinbase when generating work. Must be
|
||||
* entered under gbt_lock */
|
||||
static bool __build_gbt_txns(struct pool *pool, json_t *res_val)
|
||||
{
|
||||
json_t *txn_array;
|
||||
bool ret = false;
|
||||
int i, cal_len;
|
||||
|
||||
free(pool->txn_hashes);
|
||||
pool->txn_hashes = NULL;
|
||||
pool->gbt_txns = 0;
|
||||
|
||||
txn_array = json_object_get(res_val, "transactions");
|
||||
if (!json_is_array(txn_array))
|
||||
goto out;
|
||||
|
||||
ret = true;
|
||||
pool->gbt_txns = json_array_size(txn_array);
|
||||
if (!pool->gbt_txns)
|
||||
goto out;
|
||||
|
||||
pool->txn_hashes = calloc(32 * (pool->gbt_txns + 1), 1);
|
||||
if (unlikely(!pool->txn_hashes))
|
||||
quit(1, "Failed to calloc txn_hashes in __build_gbt_txns");
|
||||
|
||||
for (i = 0; i < pool->gbt_txns; i++) {
|
||||
json_t *txn_val = json_object_get(json_array_get(txn_array, i), "data");
|
||||
const char *txn = json_string_value(txn_val);
|
||||
int txn_len = strlen(txn);
|
||||
unsigned char *txn_bin;
|
||||
|
||||
cal_len = txn_len;
|
||||
if (cal_len % 4)
|
||||
cal_len += 4 - (cal_len % 4);
|
||||
txn_bin = calloc(cal_len, 1);
|
||||
if (unlikely(!txn_bin))
|
||||
quit(1, "Failed to calloc txn_bin in __build_gbt_txns");
|
||||
if (unlikely(!hex2bin(txn_bin, txn, txn_len / 2)))
|
||||
quit(1, "Failed to hex2bin txn_bin");
|
||||
|
||||
gen_hash(txn_bin, pool->txn_hashes + (32 * i), txn_len / 2);
|
||||
free(txn_bin);
|
||||
}
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static unsigned char *__gbt_merkleroot(struct pool *pool)
|
||||
{
|
||||
unsigned char *merkle_hash;
|
||||
int i, txns;
|
||||
|
||||
merkle_hash = calloc(32 * (pool->gbt_txns + 2), 1);
|
||||
if (unlikely(!merkle_hash))
|
||||
quit(1, "Failed to calloc merkle_hash in __gbt_merkleroot");
|
||||
|
||||
gen_hash(pool->gbt_coinbase, merkle_hash, pool->coinbase_len);
|
||||
|
||||
if (pool->gbt_txns)
|
||||
memcpy(merkle_hash + 32, pool->txn_hashes, pool->gbt_txns * 32);
|
||||
|
||||
txns = pool->gbt_txns + 1;
|
||||
while (txns > 1) {
|
||||
if (txns % 2) {
|
||||
memcpy(&merkle_hash[txns * 32], &merkle_hash[(txns - 1) * 32], 32);
|
||||
txns++;
|
||||
}
|
||||
for (i = 0; i < txns; i += 2){
|
||||
unsigned char hashout[32];
|
||||
|
||||
gen_hash(merkle_hash + (i * 32), hashout, 64);
|
||||
memcpy(merkle_hash + (i / 2 * 32), hashout, 32);
|
||||
}
|
||||
txns /= 2;
|
||||
}
|
||||
return merkle_hash;
|
||||
}
|
||||
|
||||
static void calc_diff(struct work *work, int known);
|
||||
|
||||
static void gen_gbt_work(struct pool *pool, struct work *work)
|
||||
{
|
||||
unsigned char *merkleroot;
|
||||
char *cbhex;
|
||||
|
||||
mutex_lock(&pool->gbt_lock);
|
||||
__build_gbt_coinbase(pool);
|
||||
merkleroot = __gbt_merkleroot(pool);
|
||||
|
||||
memcpy(work->data, &pool->gbt_version, 4);
|
||||
memcpy(work->data + 4, pool->previousblockhash, 32);
|
||||
memcpy(work->data + 4 + 32 + 32, &pool->curtime, 4);
|
||||
memcpy(work->data + 4 + 32 + 32 + 4, &pool->gbt_bits, 4);
|
||||
|
||||
memcpy(work->target, pool->gbt_target, 32);
|
||||
|
||||
cbhex = bin2hex(pool->gbt_coinbase, pool->coinbase_len);
|
||||
sprintf(work->gbt_coinbase, "%s", cbhex);
|
||||
free(cbhex);
|
||||
|
||||
/* For encoding the block data on submission */
|
||||
work->gbt_txns = pool->gbt_txns + 1;
|
||||
mutex_unlock(&pool->gbt_lock);
|
||||
|
||||
memcpy(work->data + 4 + 32, merkleroot, 32);
|
||||
flip32(work->data + 4 + 32, merkleroot);
|
||||
free(merkleroot);
|
||||
memset(work->data + 4 + 32 + 32 + 4 + 4, 0, 4); /* nonce */
|
||||
|
||||
hex2bin(work->data + 4 + 32 + 32 + 4 + 4 + 4, "000000800000000000000000000000000000000000000000000000000000000000000000000000000000000080020000", 48);
|
||||
hex2bin(work->hash1, "00000000000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000010000", 64);
|
||||
|
||||
if (opt_debug) {
|
||||
char *header = bin2hex(work->data, 128);
|
||||
|
||||
applog(LOG_DEBUG, "Generated GBT header %s", header);
|
||||
applog(LOG_DEBUG, "Work coinbase %s", work->gbt_coinbase);
|
||||
free(header);
|
||||
}
|
||||
|
||||
if (!jobj_binary(val, "midstate", work->midstate, sizeof(work->midstate), false)) {
|
||||
calc_midstate(work);
|
||||
local_work++;
|
||||
work->pool = pool;
|
||||
work->gbt = true;
|
||||
work->id = total_work++;
|
||||
work->longpoll = false;
|
||||
work->getwork_mode = GETWORK_MODE_GBT;
|
||||
work->work_block = work_block;
|
||||
calc_diff(work, 0);
|
||||
gettimeofday(&work->tv_staged, NULL);
|
||||
}
|
||||
|
||||
static bool gbt_decode(struct pool *pool, json_t *res_val)
|
||||
{
|
||||
const char *previousblockhash;
|
||||
const char *target;
|
||||
const char *coinbasetxn;
|
||||
const char *longpollid;
|
||||
unsigned char hash_swap[32];
|
||||
int expires;
|
||||
int version;
|
||||
int curtime;
|
||||
bool submitold;
|
||||
const char *bits;
|
||||
|
||||
previousblockhash = json_string_value(json_object_get(res_val, "previousblockhash"));
|
||||
target = json_string_value(json_object_get(res_val, "target"));
|
||||
coinbasetxn = json_string_value(json_object_get(json_object_get(res_val, "coinbasetxn"), "data"));
|
||||
longpollid = json_string_value(json_object_get(res_val, "longpollid"));
|
||||
expires = json_integer_value(json_object_get(res_val, "expires"));
|
||||
version = json_integer_value(json_object_get(res_val, "version"));
|
||||
curtime = json_integer_value(json_object_get(res_val, "curtime"));
|
||||
submitold = json_is_true(json_object_get(res_val, "submitold"));
|
||||
bits = json_string_value(json_object_get(res_val, "bits"));
|
||||
|
||||
if (!previousblockhash || !target || !coinbasetxn || !longpollid ||
|
||||
!expires || !version || !curtime || !bits) {
|
||||
applog(LOG_ERR, "JSON failed to decode GBT");
|
||||
return false;
|
||||
}
|
||||
|
||||
applog(LOG_DEBUG, "previousblockhash: %s", previousblockhash);
|
||||
applog(LOG_DEBUG, "target: %s", target);
|
||||
applog(LOG_DEBUG, "coinbasetxn: %s", coinbasetxn);
|
||||
applog(LOG_DEBUG, "longpollid: %s", longpollid);
|
||||
applog(LOG_DEBUG, "expires: %d", expires);
|
||||
applog(LOG_DEBUG, "version: %d", version);
|
||||
applog(LOG_DEBUG, "curtime: %d", curtime);
|
||||
applog(LOG_DEBUG, "submitold: %s", submitold ? "true" : "false");
|
||||
applog(LOG_DEBUG, "bits: %s", bits);
|
||||
|
||||
mutex_lock(&pool->gbt_lock);
|
||||
free(pool->coinbasetxn);
|
||||
pool->coinbasetxn = strdup(coinbasetxn);
|
||||
free(pool->longpollid);
|
||||
pool->longpollid = strdup(longpollid);
|
||||
|
||||
hex2bin(hash_swap, previousblockhash, 32);
|
||||
swap256(pool->previousblockhash, hash_swap);
|
||||
|
||||
hex2bin(hash_swap, target, 32);
|
||||
swab256(pool->gbt_target, hash_swap);
|
||||
|
||||
pool->gbt_expires = expires;
|
||||
pool->gbt_version = htobe32(version);
|
||||
pool->curtime = htobe32(curtime);
|
||||
pool->submit_old = submitold;
|
||||
|
||||
hex2bin((unsigned char *)&pool->gbt_bits, bits, 4);
|
||||
|
||||
__build_gbt_txns(pool, res_val);
|
||||
mutex_unlock(&pool->gbt_lock);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool getwork_decode(json_t *res_val, struct work *work)
|
||||
{
|
||||
if (unlikely(!jobj_binary(res_val, "data", work->data, sizeof(work->data), true))) {
|
||||
applog(LOG_ERR, "JSON inval data");
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!jobj_binary(res_val, "midstate", work->midstate, sizeof(work->midstate), false)) {
|
||||
// Calculate it ourselves
|
||||
applog(LOG_DEBUG, "Calculating midstate locally");
|
||||
calc_midstate(work);
|
||||
}
|
||||
|
||||
if (!jobj_binary(val, "hash1", work->hash1, sizeof(work->hash1), false)) {
|
||||
if (!jobj_binary(res_val, "hash1", work->hash1, sizeof(work->hash1), false)) {
|
||||
// Always the same anyway
|
||||
memcpy(work->hash1, "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x80\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\1\0\0", 64);
|
||||
}
|
||||
|
||||
if (unlikely(!jobj_binary(val, "target", work->target, sizeof(work->target), true))) {
|
||||
if (unlikely(!jobj_binary(res_val, "target", work->target, sizeof(work->target), true))) {
|
||||
applog(LOG_ERR, "JSON inval target");
|
||||
goto err_out;
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool work_decode(struct pool *pool, struct work *work, json_t *val)
|
||||
{
|
||||
json_t *res_val = json_object_get(val, "result");
|
||||
bool ret = false;
|
||||
|
||||
if (!res_val || json_is_null(res_val)) {
|
||||
applog(LOG_ERR, "JSON Failed to decode result");
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (pool->has_gbt) {
|
||||
if (unlikely(!gbt_decode(pool, res_val)))
|
||||
goto out;
|
||||
gettimeofday(&pool->tv_template, NULL);
|
||||
work->gbt = true;
|
||||
ret = true;
|
||||
goto out;
|
||||
} else if (unlikely(!getwork_decode(res_val, work)))
|
||||
goto out;
|
||||
|
||||
memset(work->hash, 0, sizeof(work->hash));
|
||||
|
||||
gettimeofday(&work->tv_staged, NULL);
|
||||
|
||||
return true;
|
||||
ret = true;
|
||||
|
||||
err_out:
|
||||
return false;
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
int dev_from_id(int thr_id)
|
||||
@ -1593,8 +1846,9 @@ static void curses_print_status(void)
|
||||
mvwprintw(statuswin, 4, 0, " Connected to multiple pools with%s LP",
|
||||
have_longpoll ? "": "out");
|
||||
} else {
|
||||
mvwprintw(statuswin, 4, 0, " Connected to %s with%s LP as user %s",
|
||||
pool->sockaddr_url, have_longpoll ? "": "out", pool->rpc_user);
|
||||
mvwprintw(statuswin, 4, 0, " Connected to %s with%s %s as user %s",
|
||||
pool->sockaddr_url, have_longpoll ? "": "out",
|
||||
pool->has_gbt ? "GBT" : "LP", pool->rpc_user);
|
||||
}
|
||||
wclrtoeol(statuswin);
|
||||
mvwprintw(statuswin, 5, 0, " Block: %s... Started: %s Best share: %s ", current_hash, blocktime, best_share);
|
||||
@ -1879,7 +2133,7 @@ share_result(json_t *val, json_t *res, json_t *err, const struct work *work,
|
||||
struct pool *pool = work->pool;
|
||||
struct cgpu_info *cgpu = thr_info[work->thr_id].cgpu;
|
||||
|
||||
if (json_is_true(res)) {
|
||||
if (json_is_true(res) || (work->gbt && json_is_null(res))) {
|
||||
cgpu->accepted++;
|
||||
total_accepted++;
|
||||
pool->accepted++;
|
||||
@ -1937,6 +2191,7 @@ share_result(json_t *val, json_t *res, json_t *err, const struct work *work,
|
||||
else
|
||||
strcpy(where, "");
|
||||
|
||||
if (!work->gbt)
|
||||
res = json_object_get(val, "reject-reason");
|
||||
if (res) {
|
||||
const char *reasontmp = json_string_value(res);
|
||||
@ -2025,7 +2280,7 @@ static bool submit_upstream_work(struct work *work, CURL *curl, bool resubmit)
|
||||
{
|
||||
char *hexstr = NULL;
|
||||
json_t *val, *res, *err;
|
||||
char s[345], sd[345];
|
||||
char s[1024];
|
||||
bool rc = false;
|
||||
int thr_id = work->thr_id;
|
||||
struct cgpu_info *cgpu = thr_info[thr_id].cgpu;
|
||||
@ -2046,14 +2301,39 @@ static bool submit_upstream_work(struct work *work, CURL *curl, bool resubmit)
|
||||
hexstr = bin2hex(work->data, sizeof(work->data));
|
||||
|
||||
/* build JSON-RPC request */
|
||||
sprintf(s,
|
||||
"{\"method\": \"getwork\", \"params\": [ \"%s\" ], \"id\":1}\r\n",
|
||||
hexstr);
|
||||
sprintf(sd,
|
||||
"{\"method\": \"getwork\", \"params\": [ \"%s\" ], \"id\":1}",
|
||||
hexstr);
|
||||
if (work->gbt) {
|
||||
char gbt_block[1024], *varint, *header;
|
||||
unsigned char data[80];
|
||||
|
||||
applog(LOG_DEBUG, "DBG: sending %s submit RPC call: %s", pool->rpc_url, sd);
|
||||
flip80(data, work->data);
|
||||
header = bin2hex(data, 80);
|
||||
sprintf(gbt_block, "%s", header);
|
||||
free(header);
|
||||
|
||||
if (work->gbt_txns < 0xfd) {
|
||||
uint8_t val = work->gbt_txns;
|
||||
|
||||
varint = bin2hex((const unsigned char *)&val, 1);
|
||||
} else if (work->gbt_txns <= 0xffff) {
|
||||
uint16_t val = htole16(work->gbt_txns);
|
||||
|
||||
strcat(gbt_block, "fd");
|
||||
varint = bin2hex((const unsigned char *)&val, 2);
|
||||
} else {
|
||||
uint32_t val = htole32(work->gbt_txns);
|
||||
|
||||
strcat(gbt_block, "fe");
|
||||
varint = bin2hex((const unsigned char *)&val, 4);
|
||||
}
|
||||
strcat(gbt_block, varint);
|
||||
free(varint);
|
||||
strcat(gbt_block, work->gbt_coinbase);
|
||||
|
||||
sprintf(s, "{\"id\": 0, \"method\": \"submitblock\", \"params\": [\"%s\", {}]}", gbt_block);
|
||||
} else
|
||||
sprintf(s, "{\"method\": \"getwork\", \"params\": [ \"%s\" ], \"id\":1}", hexstr);
|
||||
applog(LOG_DEBUG, "DBG: sending %s submit RPC call: %s", pool->rpc_url, s);
|
||||
strcat(s, "\n");
|
||||
|
||||
gettimeofday(&tv_submit, NULL);
|
||||
/* issue JSON-RPC request */
|
||||
@ -2066,6 +2346,7 @@ static bool submit_upstream_work(struct work *work, CURL *curl, bool resubmit)
|
||||
pool->remotefail_occasions++;
|
||||
applog(LOG_WARNING, "Pool %d communication failure, caching submissions", pool->pool_no);
|
||||
}
|
||||
sleep(5);
|
||||
goto out;
|
||||
} else if (pool_tclear(pool, &pool->submit_fail))
|
||||
applog(LOG_WARNING, "Pool %d communication resumed, submitting work", pool->pool_no);
|
||||
@ -2158,9 +2439,6 @@ out:
|
||||
return rc;
|
||||
}
|
||||
|
||||
static const char *rpc_req =
|
||||
"{\"method\": \"getwork\", \"params\": [], \"id\":0}\r\n";
|
||||
|
||||
/* In balanced mode, the amount of diff1 solutions per pool is monitored as a
|
||||
* rolling average per 10 minutes and if pools start getting more, it biases
|
||||
* away from them to distribute work evenly. The share count is reset to the
|
||||
@ -2290,18 +2568,18 @@ static bool get_upstream_work(struct work *work, CURL *curl)
|
||||
bool rc = false;
|
||||
char *url;
|
||||
|
||||
applog(LOG_DEBUG, "DBG: sending %s get RPC call: %s", pool->rpc_url, rpc_req);
|
||||
applog(LOG_DEBUG, "DBG: sending %s get RPC call: %s", pool->rpc_url, pool->rpc_req);
|
||||
|
||||
url = pool->rpc_url;
|
||||
|
||||
gettimeofday(&(work->tv_getwork), NULL);
|
||||
|
||||
val = json_rpc_call(curl, url, pool->rpc_userpass, rpc_req, false,
|
||||
val = json_rpc_call(curl, url, pool->rpc_userpass, pool->rpc_req, false,
|
||||
false, &work->rolltime, pool, false);
|
||||
pool_stats->getwork_attempts++;
|
||||
|
||||
if (likely(val)) {
|
||||
rc = work_decode(json_object_get(val, "result"), work);
|
||||
rc = work_decode(pool, work, val);
|
||||
if (unlikely(!rc))
|
||||
applog(LOG_DEBUG, "Failed to decode work in get_upstream_work");
|
||||
} else
|
||||
@ -2761,6 +3039,29 @@ retry:
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (pool->has_gbt) {
|
||||
while (pool->idle) {
|
||||
struct pool *altpool = select_pool(true);
|
||||
|
||||
sleep(5);
|
||||
if (altpool != pool) {
|
||||
wc->pool = altpool;
|
||||
inc_queued(altpool);
|
||||
dec_queued(pool);
|
||||
goto retry;
|
||||
}
|
||||
}
|
||||
ret_work = make_work();
|
||||
gen_gbt_work(pool, ret_work);
|
||||
if (unlikely(!stage_work(ret_work))) {
|
||||
applog(LOG_ERR, "Failed to stage gbt work in get_work_thread");
|
||||
kill_work();
|
||||
free(ret_work);
|
||||
}
|
||||
dec_queued(pool);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (clone_available()) {
|
||||
dec_queued(pool);
|
||||
goto out;
|
||||
@ -2941,11 +3242,13 @@ static void *submit_work_thread(void *userdata)
|
||||
applog(LOG_WARNING, "Pool %d communication resumed, submitting work", pool->pool_no);
|
||||
applog(LOG_DEBUG, "Successfully submitted, adding to stratum_shares db");
|
||||
} else {
|
||||
applog(LOG_INFO, "Failed to submit stratum share");
|
||||
applog(LOG_WARNING, "Failed to submit stratum share to pool %d", pool->pool_no);
|
||||
mutex_lock(&sshare_lock);
|
||||
HASH_DEL(stratum_shares, sshare);
|
||||
mutex_unlock(&sshare_lock);
|
||||
free(sshare);
|
||||
pool->stale_shares++;
|
||||
total_stale++;
|
||||
|
||||
if (!pool_tset(pool, &pool->submit_fail)) {
|
||||
total_ro++;
|
||||
@ -3016,6 +3319,8 @@ static struct pool *priority_pool(int choice)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static bool pool_active(struct pool *pool, bool pinging);
|
||||
|
||||
void switch_pools(struct pool *selected)
|
||||
{
|
||||
struct pool *pool, *last_pool;
|
||||
@ -3084,8 +3389,13 @@ void switch_pools(struct pool *selected)
|
||||
if (opt_fail_only)
|
||||
pool_tset(pool, &pool->lagging);
|
||||
|
||||
if (pool != last_pool)
|
||||
if (pool != last_pool) {
|
||||
applog(LOG_WARNING, "Switching to %s", pool->rpc_url);
|
||||
/* Get a fresh block template since we may not have an up to
|
||||
* date one */
|
||||
if (pool->has_gbt)
|
||||
pool_active(pool, true);
|
||||
}
|
||||
|
||||
mutex_lock(&lp_lock);
|
||||
pthread_cond_broadcast(&lp_cond);
|
||||
@ -3274,8 +3584,8 @@ static bool test_work_current(struct work *work)
|
||||
|
||||
if (!work->stratum) {
|
||||
if (work->longpoll) {
|
||||
applog(LOG_NOTICE, "LONGPOLL from pool %d detected new block",
|
||||
work->pool->pool_no);
|
||||
applog(LOG_NOTICE, "%sLONGPOLL from pool %d detected new block",
|
||||
work->gbt ? "GBT " : "", work->pool->pool_no);
|
||||
work->longpoll = false;
|
||||
} else if (have_longpoll)
|
||||
applog(LOG_NOTICE, "New block detected on network before longpoll");
|
||||
@ -3286,8 +3596,8 @@ static bool test_work_current(struct work *work)
|
||||
} else if (work->longpoll) {
|
||||
work->longpoll = false;
|
||||
if (work->pool == current_pool()) {
|
||||
applog(LOG_NOTICE, "LONGPOLL from pool %d requested work restart",
|
||||
work->pool->pool_no);
|
||||
applog(LOG_NOTICE, "%sLONGPOLL from pool %d requested work restart",
|
||||
work->gbt ? "GBT " : "", work->pool->pool_no);
|
||||
work_block++;
|
||||
restart_threads();
|
||||
}
|
||||
@ -4246,7 +4556,7 @@ static void stratum_share_result(json_t *val, json_t *res_val, json_t *err_val,
|
||||
|
||||
/* Parses stratum json responses and tries to find the id that the request
|
||||
* matched to and treat it accordingly. */
|
||||
static bool parse_stratum_response(char *s)
|
||||
static bool parse_stratum_response(struct pool *pool, char *s)
|
||||
{
|
||||
json_t *val = NULL, *err_val, *res_val, *id_val;
|
||||
struct stratum_share *sshare;
|
||||
@ -4287,9 +4597,9 @@ static bool parse_stratum_response(char *s)
|
||||
mutex_unlock(&sshare_lock);
|
||||
if (!sshare) {
|
||||
if (json_is_true(res_val))
|
||||
applog(LOG_NOTICE, "Accepted untracked stratum share");
|
||||
applog(LOG_NOTICE, "Accepted untracked stratum share from pool %d", pool->pool_no);
|
||||
else
|
||||
applog(LOG_NOTICE, "Rejected untracked stratum share");
|
||||
applog(LOG_NOTICE, "Rejected untracked stratum share from pool %d", pool->pool_no);
|
||||
goto out;
|
||||
}
|
||||
stratum_share_result(val, res_val, err_val, sshare);
|
||||
@ -4305,6 +4615,28 @@ out:
|
||||
|
||||
static void pool_resus(struct pool *pool);
|
||||
|
||||
static void clear_stratum_shares(struct pool *pool)
|
||||
{
|
||||
struct stratum_share *sshare, *tmpshare;
|
||||
int cleared = 0;
|
||||
|
||||
mutex_lock(&sshare_lock);
|
||||
HASH_ITER(hh, stratum_shares, sshare, tmpshare) {
|
||||
if (sshare->work.pool == pool) {
|
||||
HASH_DEL(stratum_shares, sshare);
|
||||
free(sshare);
|
||||
cleared++;
|
||||
}
|
||||
}
|
||||
mutex_unlock(&sshare_lock);
|
||||
|
||||
if (cleared) {
|
||||
applog(LOG_WARNING, "Lost %d shares due to stratum disconnect on pool %d", cleared, pool->pool_no);
|
||||
pool->stale_shares++;
|
||||
total_stale++;
|
||||
}
|
||||
}
|
||||
|
||||
/* One stratum thread per pool that has stratum waits on the socket checking
|
||||
* for new messages and for the integrity of the socket connection. We reset
|
||||
* the connection based on the integrity of the receive side only as the send
|
||||
@ -4341,6 +4673,11 @@ static void *stratum_thread(void *userdata)
|
||||
pool->getfail_occasions++;
|
||||
total_go++;
|
||||
|
||||
/* If the socket to our stratum pool disconnects, all
|
||||
* tracked submitted shares are lost and we will leak
|
||||
* the memory if we don't discard their records. */
|
||||
clear_stratum_shares(pool);
|
||||
|
||||
if (initiate_stratum(pool) && auth_stratum(pool))
|
||||
continue;
|
||||
|
||||
@ -4356,7 +4693,7 @@ static void *stratum_thread(void *userdata)
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!parse_method(pool, s) && !parse_stratum_response(s))
|
||||
if (!parse_method(pool, s) && !parse_stratum_response(pool, s))
|
||||
applog(LOG_INFO, "Unknown stratum msg: %s", s);
|
||||
free(s);
|
||||
if (pool->swork.clean) {
|
||||
@ -4412,6 +4749,9 @@ static bool pool_active(struct pool *pool, bool pinging)
|
||||
CURL *curl;
|
||||
int rolltime;
|
||||
|
||||
if (pool->has_gbt)
|
||||
applog(LOG_DEBUG, "Retrieving block template from pool %s", pool->rpc_url);
|
||||
else
|
||||
applog(LOG_INFO, "Testing pool %s", pool->rpc_url);
|
||||
|
||||
/* This is the central point we activate stratum when we can */
|
||||
@ -4439,9 +4779,47 @@ retry_stratum:
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Probe for GBT support on first pass */
|
||||
if (!pool->probed && !opt_fix_protocol) {
|
||||
applog(LOG_DEBUG, "Probing for GBT support");
|
||||
val = json_rpc_call(curl, pool->rpc_url, pool->rpc_userpass,
|
||||
gbt_req, true, false, &rolltime, pool, false);
|
||||
if (val) {
|
||||
json_t *res_val, *mutables;
|
||||
int i, mutsize = 0;
|
||||
|
||||
res_val = json_object_get(val, "result");
|
||||
if (res_val) {
|
||||
mutables = json_object_get(res_val, "mutable");
|
||||
mutsize = json_array_size(mutables);
|
||||
}
|
||||
|
||||
for (i = 0; i < mutsize; i++) {
|
||||
json_t *arrval = json_array_get(mutables, i);
|
||||
|
||||
if (json_is_string(arrval)) {
|
||||
const char *mutable = json_string_value(arrval);
|
||||
|
||||
/* Only use GBT if it supports coinbase append */
|
||||
if (!strncasecmp(mutable, "coinbase/append", 15)) {
|
||||
pool->has_gbt = true;
|
||||
pool->rpc_req = gbt_req;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
json_decref(val);
|
||||
}
|
||||
|
||||
if (pool->has_gbt)
|
||||
applog(LOG_DEBUG, "GBT coinbase append support found, switching to GBT protocol");
|
||||
else
|
||||
applog(LOG_DEBUG, "No GBT coinbase append support found, using getwork protocol");
|
||||
}
|
||||
|
||||
gettimeofday(&tv_getwork, NULL);
|
||||
val = json_rpc_call(curl, pool->rpc_url, pool->rpc_userpass, rpc_req,
|
||||
true, false, &rolltime, pool, false);
|
||||
val = json_rpc_call(curl, pool->rpc_url, pool->rpc_userpass,
|
||||
pool->rpc_req, true, false, &rolltime, pool, false);
|
||||
gettimeofday(&tv_getwork_reply, NULL);
|
||||
|
||||
/* Detect if a http getwork pool has an X-Stratum header at startup,
|
||||
@ -4460,7 +4838,7 @@ retry_stratum:
|
||||
struct work *work = make_work();
|
||||
bool rc;
|
||||
|
||||
rc = work_decode(json_object_get(val, "result"), work);
|
||||
rc = work_decode(pool, work, val);
|
||||
if (rc) {
|
||||
applog(LOG_DEBUG, "Successfully retrieved and deciphered work from pool %u %s",
|
||||
pool->pool_no, pool->rpc_url);
|
||||
@ -4635,7 +5013,15 @@ static bool reuse_work(struct work *work, struct pool *pool)
|
||||
if (!pool->stratum_active)
|
||||
return false;
|
||||
applog(LOG_DEBUG, "Reusing stratum work");
|
||||
gen_stratum_work(pool, work);;
|
||||
gen_stratum_work(pool, work);
|
||||
return true;
|
||||
}
|
||||
|
||||
if (pool->has_gbt) {
|
||||
if (pool->idle)
|
||||
return false;
|
||||
applog(LOG_DEBUG, "Reusing GBT work");
|
||||
gen_gbt_work(pool, work);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -4827,6 +5213,8 @@ static void get_work(struct work *work, struct thr_info *thr, const int thr_id)
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Reset these flags in case we switch pools with these work structs */
|
||||
work->stratum = work->gbt = false;
|
||||
retry:
|
||||
pool = current_pool();
|
||||
|
||||
@ -4941,7 +5329,7 @@ static bool hashtest(struct thr_info *thr, struct work *work)
|
||||
unsigned char hash2[32];
|
||||
uint32_t *hash2_32 = (uint32_t *)hash2;
|
||||
struct pool *pool = work->pool;
|
||||
int i, diff;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < 80 / 4; i++)
|
||||
swap32[i] = swab32(data32[i]);
|
||||
@ -4967,11 +5355,15 @@ static bool hashtest(struct thr_info *thr, struct work *work)
|
||||
}
|
||||
|
||||
if (work->stratum) {
|
||||
int diff;
|
||||
|
||||
mutex_lock(&pool->pool_lock);
|
||||
diff = pool->swork.diff;
|
||||
mutex_unlock(&pool->pool_lock);
|
||||
|
||||
if (unlikely(work->sdiff != diff)) {
|
||||
/* Retarget share only if pool diff has dropped since we
|
||||
* generated this work */
|
||||
if (unlikely(work->sdiff > diff)) {
|
||||
applog(LOG_DEBUG, "Share needs retargetting to match pool");
|
||||
set_work_target(work, diff);
|
||||
}
|
||||
@ -5230,7 +5622,7 @@ static void convert_to_work(json_t *val, int rolltime, struct pool *pool, struct
|
||||
|
||||
work = make_work();
|
||||
|
||||
rc = work_decode(json_object_get(val, "result"), work);
|
||||
rc = work_decode(pool, work, val);
|
||||
if (unlikely(!rc)) {
|
||||
applog(LOG_ERR, "Could not convert longpoll data to work");
|
||||
free_work(work);
|
||||
@ -5238,15 +5630,18 @@ static void convert_to_work(json_t *val, int rolltime, struct pool *pool, struct
|
||||
}
|
||||
work->pool = pool;
|
||||
work->rolltime = rolltime;
|
||||
work->longpoll = true;
|
||||
memcpy(&(work->tv_getwork), tv_lp, sizeof(struct timeval));
|
||||
memcpy(&(work->tv_getwork_reply), tv_lp_reply, sizeof(struct timeval));
|
||||
work->getwork_mode = GETWORK_MODE_LP;
|
||||
calc_diff(work, 0);
|
||||
|
||||
if (pool->enabled == POOL_REJECTING)
|
||||
work->mandatory = true;
|
||||
|
||||
if (pool->has_gbt)
|
||||
gen_gbt_work(pool, work);
|
||||
work->longpoll = true;
|
||||
work->getwork_mode = GETWORK_MODE_LP;
|
||||
|
||||
/* We'll be checking this work item twice, but we already know it's
|
||||
* from a new block so explicitly force the new block detection now
|
||||
* rather than waiting for it to hit the stage thread. This also
|
||||
@ -5279,7 +5674,7 @@ static struct pool *select_longpoll_pool(struct pool *cp)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (cp->hdr_path)
|
||||
if (cp->hdr_path || cp->has_gbt)
|
||||
return cp;
|
||||
for (i = 0; i < total_pools; i++) {
|
||||
struct pool *pool = pools[i];
|
||||
@ -5314,6 +5709,8 @@ static void *longpoll_thread(void *userdata)
|
||||
struct timeval start, reply, end;
|
||||
CURL *curl = NULL;
|
||||
int failures = 0;
|
||||
char lpreq[1024];
|
||||
char *lp_url;
|
||||
int rolltime;
|
||||
|
||||
curl = curl_easy_init();
|
||||
@ -5337,10 +5734,18 @@ retry_pool:
|
||||
|
||||
wait_lpcurrent(cp);
|
||||
|
||||
if (pool->has_gbt) {
|
||||
lp_url = pool->rpc_url;
|
||||
applog(LOG_WARNING, "GBT longpoll ID activated for %s", lp_url);
|
||||
} else {
|
||||
strcpy(lpreq, getwork_req);
|
||||
|
||||
lp_url = pool->lp_url;
|
||||
if (cp == pool)
|
||||
applog(LOG_WARNING, "Long-polling activated for %s", pool->lp_url);
|
||||
applog(LOG_WARNING, "Long-polling activated for %s", lp_url);
|
||||
else
|
||||
applog(LOG_WARNING, "Long-polling activated for pool %s via %s", cp->rpc_url, pool->lp_url);
|
||||
applog(LOG_WARNING, "Long-polling activated for pool %s via %s", cp->rpc_url, lp_url);
|
||||
}
|
||||
|
||||
while (42) {
|
||||
json_t *val, *soval;
|
||||
@ -5349,13 +5754,23 @@ retry_pool:
|
||||
|
||||
gettimeofday(&start, NULL);
|
||||
|
||||
/* Update the longpollid every time, but do it under lock to
|
||||
* avoid races */
|
||||
if (pool->has_gbt) {
|
||||
mutex_lock(&pool->gbt_lock);
|
||||
sprintf(lpreq, "{\"id\": 0, \"method\": \"getblocktemplate\", \"params\": "
|
||||
"[{\"capabilities\": [\"coinbasetxn\", \"workid\", \"coinbase/append\"], "
|
||||
"\"longpollid\": \"%s\"}]}\n", pool->longpollid);
|
||||
mutex_unlock(&pool->gbt_lock);
|
||||
}
|
||||
|
||||
/* Longpoll connections can be persistent for a very long time
|
||||
* and any number of issues could have come up in the meantime
|
||||
* so always establish a fresh connection instead of relying on
|
||||
* a persistent one. */
|
||||
curl_easy_setopt(curl, CURLOPT_FRESH_CONNECT, 1);
|
||||
val = json_rpc_call(curl, pool->lp_url, pool->rpc_userpass, rpc_req,
|
||||
false, true, &rolltime, pool, false);
|
||||
val = json_rpc_call(curl, lp_url, pool->rpc_userpass,
|
||||
lpreq, false, true, &rolltime, pool, false);
|
||||
|
||||
gettimeofday(&reply, NULL);
|
||||
|
||||
@ -5377,7 +5792,7 @@ retry_pool:
|
||||
if (end.tv_sec - start.tv_sec > 30)
|
||||
continue;
|
||||
if (failures == 1)
|
||||
applog(LOG_WARNING, "longpoll failed for %s, retrying every 30s", pool->lp_url);
|
||||
applog(LOG_WARNING, "longpoll failed for %s, retrying every 30s", lp_url);
|
||||
sleep(30);
|
||||
}
|
||||
if (pool != cp) {
|
||||
@ -5453,8 +5868,18 @@ static void *watchpool_thread(void __maybe_unused *userdata)
|
||||
if (pool->enabled == POOL_DISABLED || pool->has_stratum)
|
||||
continue;
|
||||
|
||||
/* Apart from longpollid comms, we retrieve a fresh
|
||||
* template if more than 30 seconds has elapsed since
|
||||
* the last one to keep the data current and as a test
|
||||
* for when the pool dies. */
|
||||
if (!pool->idle && pool->has_gbt && pool == current_pool() &&
|
||||
now.tv_sec - pool->tv_template.tv_sec > 60) {
|
||||
if (!pool_active(pool, true))
|
||||
pool_died(pool);
|
||||
}
|
||||
|
||||
/* Test pool is idle once every minute */
|
||||
if (pool->idle && now.tv_sec - pool->tv_idle.tv_sec > 60) {
|
||||
if (pool->idle && now.tv_sec - pool->tv_idle.tv_sec > 30) {
|
||||
gettimeofday(&pool->tv_idle, NULL);
|
||||
if (pool_active(pool, true) && pool_tclear(pool, &pool->idle))
|
||||
pool_resus(pool);
|
||||
@ -6425,6 +6850,7 @@ int main(int argc, char *argv[])
|
||||
for (i = 0; i < total_pools; i++) {
|
||||
struct pool *pool = pools[i];
|
||||
if (pool_active(pool, false)) {
|
||||
pool_tclear(pool, &pool->idle);
|
||||
if (!currentpool)
|
||||
currentpool = pool;
|
||||
applog(LOG_INFO, "Pool %d %s active", pool->pool_no, pool->rpc_url);
|
||||
|
@ -1,8 +1,8 @@
|
||||
##--##--##--##--##--##--##--##--##--##--##--##--##--##--##--##--##
|
||||
##--##--##--##--##--##--##--##--##--##--##--##--##--##--##--##--##
|
||||
m4_define([v_maj], [2])
|
||||
m4_define([v_min], [8])
|
||||
m4_define([v_mic], [7])
|
||||
m4_define([v_min], [9])
|
||||
m4_define([v_mic], [1])
|
||||
##--##--##--##--##--##--##--##--##--##--##--##--##--##--##--##--##
|
||||
m4_define([v_ver], [v_maj.v_min.v_mic])
|
||||
m4_define([lt_rev], m4_eval(v_maj + v_min))
|
||||
@ -70,6 +70,9 @@ WS2_LIBS=""
|
||||
MATH_LIBS="-lm"
|
||||
|
||||
case $target in
|
||||
amd64-*)
|
||||
have_x86_64=true
|
||||
;;
|
||||
x86_64-*)
|
||||
have_x86_64=true
|
||||
;;
|
||||
@ -257,7 +260,7 @@ if test "x$curses" = "xno"; then
|
||||
else
|
||||
AC_SEARCH_LIBS(addstr, ncurses pdcurses, [
|
||||
curses=yes
|
||||
cursesmsg="FOUND: ${ac_cv_search_addstr:2}"
|
||||
cursesmsg="FOUND: ${ac_cv_search_addstr}"
|
||||
AC_DEFINE([HAVE_CURSES], [1], [Defined to 1 if curses TUI support is wanted])
|
||||
], [
|
||||
if test "x$curses" = "xyes"; then
|
||||
|
47
miner.h
47
miner.h
@ -143,11 +143,15 @@ static inline int fsync (int fd)
|
||||
* htobe64 also won't exist */
|
||||
#ifndef htobe32
|
||||
# if __BYTE_ORDER == __LITTLE_ENDIAN
|
||||
# define htole16(x) (x)
|
||||
# define htole32(x) (x)
|
||||
# define be32toh(x) bswap_32(x)
|
||||
# define be64toh(x) bswap_64(x)
|
||||
# define htobe32(x) bswap_32(x)
|
||||
# define htobe64(x) bswap_64(x)
|
||||
# elif __BYTE_ORDER == __BIG_ENDIAN
|
||||
# define htole16(x) bswap_16(x)
|
||||
# define htole32(x) bswap_32(x)
|
||||
# define be32toh(x) (x)
|
||||
# define be64toh(x) (x)
|
||||
# define htobe32(x) (x)
|
||||
@ -544,6 +548,26 @@ static inline void swab256(void *dest_p, const void *src_p)
|
||||
dest[7] = swab32(src[0]);
|
||||
}
|
||||
|
||||
static inline void flip32(void *dest_p, const void *src_p)
|
||||
{
|
||||
uint32_t *dest = dest_p;
|
||||
const uint32_t *src = src_p;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < 8; i++)
|
||||
dest[i] = swab32(src[i]);
|
||||
}
|
||||
|
||||
static inline void flip80(void *dest_p, const void *src_p)
|
||||
{
|
||||
uint32_t *dest = dest_p;
|
||||
const uint32_t *src = src_p;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < 20; i++)
|
||||
dest[i] = swab32(src[i]);
|
||||
}
|
||||
|
||||
extern void quit(int status, const char *format, ...);
|
||||
|
||||
static inline void mutex_lock(pthread_mutex_t *lock)
|
||||
@ -841,6 +865,7 @@ struct pool {
|
||||
double utility;
|
||||
int last_shares, shares;
|
||||
|
||||
char *rpc_req;
|
||||
char *rpc_url;
|
||||
char *rpc_userpass;
|
||||
char *rpc_user, *rpc_pass;
|
||||
@ -882,6 +907,23 @@ struct pool {
|
||||
struct stratum_work swork;
|
||||
pthread_t stratum_thread;
|
||||
pthread_mutex_t stratum_lock;
|
||||
|
||||
/* GBT variables */
|
||||
bool has_gbt;
|
||||
pthread_mutex_t gbt_lock;
|
||||
unsigned char previousblockhash[32];
|
||||
unsigned char gbt_target[32];
|
||||
char *coinbasetxn;
|
||||
char *longpollid;
|
||||
int gbt_expires;
|
||||
uint32_t gbt_version;
|
||||
uint32_t curtime;
|
||||
uint32_t gbt_bits;
|
||||
unsigned char *gbt_coinbase;
|
||||
unsigned char *txn_hashes;
|
||||
int gbt_txns;
|
||||
int coinbase_len;
|
||||
struct timeval tv_template;
|
||||
};
|
||||
|
||||
#define GETWORK_MODE_TESTPOOL 'T'
|
||||
@ -889,6 +931,7 @@ struct pool {
|
||||
#define GETWORK_MODE_LP 'L'
|
||||
#define GETWORK_MODE_BENCHMARK 'B'
|
||||
#define GETWORK_MODE_STRATUM 'S'
|
||||
#define GETWORK_MODE_GBT 'G'
|
||||
|
||||
struct work {
|
||||
unsigned char data[128];
|
||||
@ -926,6 +969,10 @@ struct work {
|
||||
char ntime[16];
|
||||
int sdiff;
|
||||
|
||||
bool gbt;
|
||||
char gbt_coinbase[512];
|
||||
int gbt_txns;
|
||||
|
||||
unsigned int work_block;
|
||||
int id;
|
||||
UT_hash_handle hh;
|
||||
|
3
util.c
3
util.c
@ -451,8 +451,7 @@ json_t *json_rpc_call(CURL *curl, const char *url,
|
||||
res_val = json_object_get(val, "result");
|
||||
err_val = json_object_get(val, "error");
|
||||
|
||||
if (!res_val || json_is_null(res_val) ||
|
||||
(err_val && !json_is_null(err_val))) {
|
||||
if (!res_val ||(err_val && !json_is_null(err_val))) {
|
||||
char *s;
|
||||
|
||||
if (err_val)
|
||||
|
Loading…
Reference in New Issue
Block a user