Browse Source

Provide a --disable-libcurl config option to build support for stratum mining only.

nfactor-troky
Con Kolivas 11 years ago
parent
commit
466d3beca0
  1. 1
      api.c
  2. 414
      cgminer.c
  3. 28
      configure.ac
  4. 16
      miner.h
  5. 1
      ocl.c
  6. 153
      util.c
  7. 6
      util.h

1
api.c

@ -22,6 +22,7 @@ @@ -22,6 +22,7 @@
#include <stdbool.h>
#include <stdint.h>
#include <unistd.h>
#include <limits.h>
#include <sys/types.h>
#include "compat.h"

414
cgminer.c

@ -27,6 +27,7 @@ @@ -27,6 +27,7 @@
#include <stdarg.h>
#include <assert.h>
#include <signal.h>
#include <limits.h>
#ifdef USE_USBUTILS
#include <semaphore.h>
@ -42,7 +43,11 @@ @@ -42,7 +43,11 @@
#endif
#include <ccan/opt/opt.h>
#include <jansson.h>
#ifdef HAVE_LIBCURL
#include <curl/curl.h>
#else
char *curly = ":D";
#endif
#include <libgen.h>
#include <sha2.h>
@ -1625,6 +1630,7 @@ static struct opt_table opt_cmdline_table[] = { @@ -1625,6 +1630,7 @@ static struct opt_table opt_cmdline_table[] = {
OPT_ENDTABLE
};
#ifdef HAVE_LIBCURL
static bool jobj_binary(const json_t *obj, const char *key,
void *buf, size_t buflen, bool required)
{
@ -1647,6 +1653,7 @@ static bool jobj_binary(const json_t *obj, const char *key, @@ -1647,6 +1653,7 @@ static bool jobj_binary(const json_t *obj, const char *key,
return true;
}
#endif
static void calc_midstate(struct work *work)
{
@ -1695,7 +1702,10 @@ void free_work(struct work *work) @@ -1695,7 +1702,10 @@ void free_work(struct work *work)
}
static void gen_hash(unsigned char *data, unsigned char *hash, int len);
static void calc_diff(struct work *work, int known);
char *workpadding = "000000800000000000000000000000000000000000000000000000000000000000000000000000000000000080020000";
#ifdef HAVE_LIBCURL
/* Process transactions with GBT by storing the binary value of the first
* transaction, and the hashes of the remaining transactions since these
* remain constant with an altered coinbase when generating work. Must be
@ -1776,7 +1786,6 @@ static unsigned char *__gbt_merkleroot(struct pool *pool) @@ -1776,7 +1786,6 @@ static unsigned char *__gbt_merkleroot(struct pool *pool)
return merkle_hash;
}
static void calc_diff(struct work *work, int known);
static bool work_decode(struct pool *pool, struct work *work, json_t *val);
static void update_gbt(struct pool *pool)
@ -1815,8 +1824,6 @@ static void update_gbt(struct pool *pool) @@ -1815,8 +1824,6 @@ static void update_gbt(struct pool *pool)
curl_easy_cleanup(curl);
}
char *workpadding = "000000800000000000000000000000000000000000000000000000000000000000000000000000000000000080020000";
static void gen_gbt_work(struct pool *pool, struct work *work)
{
unsigned char *merkleroot;
@ -2023,6 +2030,13 @@ static bool work_decode(struct pool *pool, struct work *work, json_t *val) @@ -2023,6 +2030,13 @@ static bool work_decode(struct pool *pool, struct work *work, json_t *val)
out:
return ret;
}
#else /* HAVE_LIBCURL */
/* Always true with stratum */
#define pool_localgen(pool) (true)
#define json_rpc_call(curl, url, userpass, rpc_req, probe, longpoll, rolltime, pool, share) (NULL)
#define work_decode(pool, work, val) (false)
#define gen_gbt_work(pool, work) {}
#endif /* HAVE_LIBCURL */
int dev_from_id(int thr_id)
{
@ -2190,18 +2204,6 @@ static void get_statline(char *buf, size_t bufsiz, struct cgpu_info *cgpu) @@ -2190,18 +2204,6 @@ static void get_statline(char *buf, size_t bufsiz, struct cgpu_info *cgpu)
cgpu->drv->get_statline(buf, bufsiz, cgpu);
}
static void text_print_status(int thr_id)
{
struct cgpu_info *cgpu;
char logline[256];
cgpu = get_thr_cgpu(thr_id);
if (cgpu) {
get_statline(logline, sizeof(logline), cgpu);
printf("%s\n", logline);
}
}
#ifdef HAVE_CURSES
#define CURBUFSIZ 256
#define cg_mvwprintw(win, y, x, fmt, ...) do { \
@ -2341,12 +2343,6 @@ static void curses_print_devstatus(struct cgpu_info *cgpu, int count) @@ -2341,12 +2343,6 @@ static void curses_print_devstatus(struct cgpu_info *cgpu, int count)
}
#endif
static void print_status(int thr_id)
{
if (!curses_active)
text_print_status(thr_id);
}
#ifdef HAVE_CURSES
/* Check for window resize. Called with curses mutex locked */
static inline void change_logwinsize(void)
@ -2638,6 +2634,25 @@ share_result(json_t *val, json_t *res, json_t *err, const struct work *work, @@ -2638,6 +2634,25 @@ share_result(json_t *val, json_t *res, json_t *err, const struct work *work,
}
}
#ifdef HAVE_LIBCURL
static void text_print_status(int thr_id)
{
struct cgpu_info *cgpu;
char logline[256];
cgpu = get_thr_cgpu(thr_id);
if (cgpu) {
get_statline(logline, sizeof(logline), cgpu);
printf("%s\n", logline);
}
}
static void print_status(int thr_id)
{
if (!curses_active)
text_print_status(thr_id);
}
static bool submit_upstream_work(struct work *work, CURL *curl, bool resubmit)
{
char *hexstr = NULL;
@ -2822,6 +2837,62 @@ out: @@ -2822,6 +2837,62 @@ out:
return rc;
}
static bool get_upstream_work(struct work *work, CURL *curl)
{
struct pool *pool = work->pool;
struct cgminer_pool_stats *pool_stats = &(pool->cgminer_pool_stats);
struct timeval tv_elapsed;
json_t *val = NULL;
bool rc = false;
char *url;
applog(LOG_DEBUG, "DBG: sending %s get RPC call: %s", pool->rpc_url, pool->rpc_req);
url = pool->rpc_url;
cgtime(&work->tv_getwork);
val = json_rpc_call(curl, url, pool->rpc_userpass, pool->rpc_req, false,
false, &work->rolltime, pool, false);
pool_stats->getwork_attempts++;
if (likely(val)) {
rc = work_decode(pool, work, val);
if (unlikely(!rc))
applog(LOG_DEBUG, "Failed to decode work in get_upstream_work");
} else
applog(LOG_DEBUG, "Failed json_rpc_call in get_upstream_work");
cgtime(&work->tv_getwork_reply);
timersub(&(work->tv_getwork_reply), &(work->tv_getwork), &tv_elapsed);
pool_stats->getwork_wait_rolling += ((double)tv_elapsed.tv_sec + ((double)tv_elapsed.tv_usec / 1000000)) * 0.63;
pool_stats->getwork_wait_rolling /= 1.63;
timeradd(&tv_elapsed, &(pool_stats->getwork_wait), &(pool_stats->getwork_wait));
if (timercmp(&tv_elapsed, &(pool_stats->getwork_wait_max), >)) {
pool_stats->getwork_wait_max.tv_sec = tv_elapsed.tv_sec;
pool_stats->getwork_wait_max.tv_usec = tv_elapsed.tv_usec;
}
if (timercmp(&tv_elapsed, &(pool_stats->getwork_wait_min), <)) {
pool_stats->getwork_wait_min.tv_sec = tv_elapsed.tv_sec;
pool_stats->getwork_wait_min.tv_usec = tv_elapsed.tv_usec;
}
pool_stats->getwork_calls++;
work->pool = pool;
work->longpoll = false;
work->getwork_mode = GETWORK_MODE_POOL;
calc_diff(work, 0);
total_getworks++;
pool->getwork_requested++;
if (likely(val))
json_decref(val);
return rc;
}
#endif /* HAVE_LIBCURL */
/* Specifies whether we can use this pool for work or not. */
static bool pool_unworkable(struct pool *pool)
{
@ -3010,61 +3081,6 @@ static void get_benchmark_work(struct work *work) @@ -3010,61 +3081,6 @@ static void get_benchmark_work(struct work *work)
calc_diff(work, 0);
}
static bool get_upstream_work(struct work *work, CURL *curl)
{
struct pool *pool = work->pool;
struct cgminer_pool_stats *pool_stats = &(pool->cgminer_pool_stats);
struct timeval tv_elapsed;
json_t *val = NULL;
bool rc = false;
char *url;
applog(LOG_DEBUG, "DBG: sending %s get RPC call: %s", pool->rpc_url, pool->rpc_req);
url = pool->rpc_url;
cgtime(&work->tv_getwork);
val = json_rpc_call(curl, url, pool->rpc_userpass, pool->rpc_req, false,
false, &work->rolltime, pool, false);
pool_stats->getwork_attempts++;
if (likely(val)) {
rc = work_decode(pool, work, val);
if (unlikely(!rc))
applog(LOG_DEBUG, "Failed to decode work in get_upstream_work");
} else
applog(LOG_DEBUG, "Failed json_rpc_call in get_upstream_work");
cgtime(&work->tv_getwork_reply);
timersub(&(work->tv_getwork_reply), &(work->tv_getwork), &tv_elapsed);
pool_stats->getwork_wait_rolling += ((double)tv_elapsed.tv_sec + ((double)tv_elapsed.tv_usec / 1000000)) * 0.63;
pool_stats->getwork_wait_rolling /= 1.63;
timeradd(&tv_elapsed, &(pool_stats->getwork_wait), &(pool_stats->getwork_wait));
if (timercmp(&tv_elapsed, &(pool_stats->getwork_wait_max), >)) {
pool_stats->getwork_wait_max.tv_sec = tv_elapsed.tv_sec;
pool_stats->getwork_wait_max.tv_usec = tv_elapsed.tv_usec;
}
if (timercmp(&tv_elapsed, &(pool_stats->getwork_wait_min), <)) {
pool_stats->getwork_wait_min.tv_sec = tv_elapsed.tv_sec;
pool_stats->getwork_wait_min.tv_usec = tv_elapsed.tv_usec;
}
pool_stats->getwork_calls++;
work->pool = pool;
work->longpoll = false;
work->getwork_mode = GETWORK_MODE_POOL;
calc_diff(work, 0);
total_getworks++;
pool->getwork_requested++;
if (likely(val))
json_decref(val);
return rc;
}
#ifdef HAVE_CURSES
static void disable_curses_windows(void)
{
@ -3232,6 +3248,7 @@ static void sighandler(int __maybe_unused sig) @@ -3232,6 +3248,7 @@ static void sighandler(int __maybe_unused sig)
kill_work();
}
#ifdef HAVE_LIBCURL
/* Called with pool_lock held. Recruit an extra curl if none are available for
* this pool. */
static void recruit_curl(struct pool *pool)
@ -3313,7 +3330,7 @@ static inline bool should_roll(struct work *work) @@ -3313,7 +3330,7 @@ static inline bool should_roll(struct work *work)
cgtime(&now);
if (now.tv_sec - work->tv_staged.tv_sec > expiry)
return false;
return true;
}
@ -3344,36 +3361,47 @@ static void roll_work(struct work *work) @@ -3344,36 +3361,47 @@ static void roll_work(struct work *work)
work->id = total_work++;
}
/* Duplicates any dynamically allocated arrays within the work struct to
* prevent a copied work struct from freeing ram belonging to another struct */
void __copy_work(struct work *work, struct work *base_work)
static void *submit_work_thread(void *userdata)
{
int id = work->id;
struct work *work = (struct work *)userdata;
struct pool *pool = work->pool;
bool resubmit = false;
struct curl_ent *ce;
clean_work(work);
memcpy(work, base_work, sizeof(struct work));
/* Keep the unique new id assigned during make_work to prevent copied
* work from having the same id. */
work->id = id;
if (base_work->job_id)
work->job_id = strdup(base_work->job_id);
if (base_work->nonce1)
work->nonce1 = strdup(base_work->nonce1);
if (base_work->ntime)
work->ntime = strdup(base_work->ntime);
if (base_work->coinbase)
work->coinbase = strdup(base_work->coinbase);
}
pthread_detach(pthread_self());
/* Generates a copy of an existing work struct, creating fresh heap allocations
* for all dynamically allocated arrays within the struct */
struct work *copy_work(struct work *base_work)
{
struct work *work = make_work();
RenameThread("submit_work");
__copy_work(work, base_work);
applog(LOG_DEBUG, "Creating extra submit work thread");
return work;
ce = pop_curl_entry(pool);
/* submit solution to bitcoin via JSON-RPC */
while (!submit_upstream_work(work, ce->curl, resubmit)) {
if (opt_lowmem) {
applog(LOG_NOTICE, "Pool %d share being discarded to minimise memory cache", pool->pool_no);
break;
}
resubmit = true;
if (stale_work(work, true)) {
applog(LOG_NOTICE, "Pool %d share became stale while retrying submit, discarding", pool->pool_no);
mutex_lock(&stats_lock);
total_stale++;
pool->stale_shares++;
total_diff_stale += work->work_difficulty;
pool->diff_stale += work->work_difficulty;
mutex_unlock(&stats_lock);
free_work(work);
break;
}
/* pause, then restart work-request loop */
applog(LOG_INFO, "json_rpc_call failed on submit_work, retrying");
}
push_curl_entry(ce, pool);
return NULL;
}
static struct work *make_clone(struct work *work)
@ -3422,6 +3450,81 @@ out_unlock: @@ -3422,6 +3450,81 @@ out_unlock:
return cloned;
}
/* Clones work by rolling it if possible, and returning a clone instead of the
* original work item which gets staged again to possibly be rolled again in
* the future */
static struct work *clone_work(struct work *work)
{
int mrs = mining_threads + opt_queue - total_staged();
struct work *work_clone;
bool cloned;
if (mrs < 1)
return work;
cloned = false;
work_clone = make_clone(work);
while (mrs-- > 0 && can_roll(work) && should_roll(work)) {
applog(LOG_DEBUG, "Pushing rolled converted work to stage thread");
stage_work(work_clone);
roll_work(work);
work_clone = make_clone(work);
/* Roll it again to prevent duplicates should this be used
* directly later on */
roll_work(work);
cloned = true;
}
if (cloned) {
stage_work(work);
return work_clone;
}
free_work(work_clone);
return work;
}
#else /* HAVE_LIBCURL */
static void *submit_work_thread(void __maybe_unused *userdata)
{
pthread_detach(pthread_self());
return NULL;
}
#endif /* HAVE_LIBCURL */
/* Duplicates any dynamically allocated arrays within the work struct to
* prevent a copied work struct from freeing ram belonging to another struct */
void __copy_work(struct work *work, struct work *base_work)
{
int id = work->id;
clean_work(work);
memcpy(work, base_work, sizeof(struct work));
/* Keep the unique new id assigned during make_work to prevent copied
* work from having the same id. */
work->id = id;
if (base_work->job_id)
work->job_id = strdup(base_work->job_id);
if (base_work->nonce1)
work->nonce1 = strdup(base_work->nonce1);
if (base_work->ntime)
work->ntime = strdup(base_work->ntime);
if (base_work->coinbase)
work->coinbase = strdup(base_work->coinbase);
}
/* Generates a copy of an existing work struct, creating fresh heap allocations
* for all dynamically allocated arrays within the struct */
struct work *copy_work(struct work *base_work)
{
struct work *work = make_work();
__copy_work(work, base_work);
return work;
}
static void pool_died(struct pool *pool)
{
if (!pool_tset(pool, &pool->idle)) {
@ -3565,49 +3668,6 @@ static void rebuild_hash(struct work *work) @@ -3565,49 +3668,6 @@ static void rebuild_hash(struct work *work)
static bool cnx_needed(struct pool *pool);
static void *submit_work_thread(void *userdata)
{
struct work *work = (struct work *)userdata;
struct pool *pool = work->pool;
bool resubmit = false;
struct curl_ent *ce;
pthread_detach(pthread_self());
RenameThread("submit_work");
applog(LOG_DEBUG, "Creating extra submit work thread");
ce = pop_curl_entry(pool);
/* submit solution to bitcoin via JSON-RPC */
while (!submit_upstream_work(work, ce->curl, resubmit)) {
if (opt_lowmem) {
applog(LOG_NOTICE, "Pool %d share being discarded to minimise memory cache", pool->pool_no);
break;
}
resubmit = true;
if (stale_work(work, true)) {
applog(LOG_NOTICE, "Pool %d share became stale while retrying submit, discarding", pool->pool_no);
mutex_lock(&stats_lock);
total_stale++;
pool->stale_shares++;
total_diff_stale += work->work_difficulty;
pool->diff_stale += work->work_difficulty;
mutex_unlock(&stats_lock);
free_work(work);
break;
}
/* pause, then restart work-request loop */
applog(LOG_INFO, "json_rpc_call failed on submit_work, retrying");
}
push_curl_entry(ce, pool);
return NULL;
}
/* Find the pool that currently has the highest priority */
static struct pool *priority_pool(int choice)
{
@ -5754,41 +5814,6 @@ static struct work *hash_pop(void) @@ -5754,41 +5814,6 @@ static struct work *hash_pop(void)
return work;
}
/* Clones work by rolling it if possible, and returning a clone instead of the
* original work item which gets staged again to possibly be rolled again in
* the future */
static struct work *clone_work(struct work *work)
{
int mrs = mining_threads + opt_queue - total_staged();
struct work *work_clone;
bool cloned;
if (mrs < 1)
return work;
cloned = false;
work_clone = make_clone(work);
while (mrs-- > 0 && can_roll(work) && should_roll(work)) {
applog(LOG_DEBUG, "Pushing rolled converted work to stage thread");
stage_work(work_clone);
roll_work(work);
work_clone = make_clone(work);
/* Roll it again to prevent duplicates should this be used
* directly later on */
roll_work(work);
cloned = true;
}
if (cloned) {
stage_work(work);
return work_clone;
}
free_work(work_clone);
return work;
}
static void gen_hash(unsigned char *data, unsigned char *hash, int len)
{
unsigned char hash1[32];
@ -6461,6 +6486,7 @@ enum { @@ -6461,6 +6486,7 @@ enum {
FAILURE_INTERVAL = 30,
};
#ifdef HAVE_LIBCURL
/* Stage another work item from the work returned in a longpoll */
static void convert_to_work(json_t *val, int rolltime, struct pool *pool, struct timeval *tv_lp, struct timeval *tv_lp_reply)
{
@ -6531,6 +6557,7 @@ static struct pool *select_longpoll_pool(struct pool *cp) @@ -6531,6 +6557,7 @@ static struct pool *select_longpoll_pool(struct pool *cp)
}
return NULL;
}
#endif /* HAVE_LIBCURL */
/* This will make the longpoll thread wait till it's the current pool, or it
* has been flagged as rejecting, before attempting to open any connections.
@ -6546,6 +6573,7 @@ static void wait_lpcurrent(struct pool *pool) @@ -6546,6 +6573,7 @@ static void wait_lpcurrent(struct pool *pool)
}
}
#ifdef HAVE_LIBCURL
static void *longpoll_thread(void *userdata)
{
struct pool *cp = (struct pool *)userdata;
@ -6673,6 +6701,13 @@ out: @@ -6673,6 +6701,13 @@ out:
return NULL;
}
#else /* HAVE_LIBCURL */
static void *longpoll_thread(void __maybe_unused *userdata)
{
pthread_detach(pthread_self());
return NULL;
}
#endif /* HAVE_LIBCURL */
void reinit_device(struct cgpu_info *cgpu)
{
@ -8143,7 +8178,6 @@ begin_bench: @@ -8143,7 +8178,6 @@ begin_bench:
int ts, max_staged = opt_queue;
struct pool *pool, *cp;
bool lagging = false;
struct curl_ent *ce;
struct work *work;
cp = current_pool();
@ -8194,6 +8228,16 @@ retry: @@ -8194,6 +8228,16 @@ retry:
continue;
}
if (opt_benchmark) {
get_benchmark_work(work);
applog(LOG_DEBUG, "Generated benchmark work");
stage_work(work);
continue;
}
#ifdef HAVE_LIBCURL
struct curl_ent *ce;
if (pool->has_gbt) {
while (pool->idle) {
struct pool *altpool = select_pool(true);
@ -8216,13 +8260,6 @@ retry: @@ -8216,13 +8260,6 @@ retry:
continue;
}
if (opt_benchmark) {
get_benchmark_work(work);
applog(LOG_DEBUG, "Generated benchmark work");
stage_work(work);
continue;
}
work->pool = pool;
ce = pop_curl_entry(pool);
/* obtain new work from bitcoin via JSON-RPC */
@ -8245,6 +8282,7 @@ retry: @@ -8245,6 +8282,7 @@ retry:
applog(LOG_DEBUG, "Generated getwork work");
stage_work(work);
push_curl_entry(ce, pool);
#endif
}
return 0;

28
configure.ac

@ -354,15 +354,26 @@ fi @@ -354,15 +354,26 @@ fi
AC_SUBST(LIBUSB_LIBS)
AC_SUBST(LIBUSB_CFLAGS)
if test "x$have_win32" != xtrue; then
PKG_CHECK_MODULES([LIBCURL], [libcurl >= 7.25.0], [AC_DEFINE([CURL_HAS_KEEPALIVE], [1], [Defined if version of curl supports keepalive.])],
[PKG_CHECK_MODULES([LIBCURL], [libcurl >= 7.18.2], ,[AC_MSG_ERROR([Missing required libcurl dev >= 7.18.2])])])
AC_ARG_ENABLE([libcurl],
[AC_HELP_STRING([--disable-libcurl],[Disable building with libcurl for getwork and GBT support])],
[libcurl=$enableval]
)
if test "x$libcurl" != xno; then
if test "x$have_win32" != xtrue; then
PKG_CHECK_MODULES([LIBCURL], [libcurl >= 7.25.0], [AC_DEFINE([CURL_HAS_KEEPALIVE], [1], [Defined if version of curl supports keepalive.])],
[PKG_CHECK_MODULES([LIBCURL], [libcurl >= 7.18.2], ,[AC_MSG_ERROR([Missing required libcurl dev >= 7.18.2])])])
else
PKG_CHECK_MODULES([LIBCURL], [libcurl >= 7.25.0], ,[AC_MSG_ERROR([Missing required libcurl dev >= 7.25.0])])
AC_DEFINE([CURL_HAS_KEEPALIVE], [1])
fi
AC_DEFINE([HAVE_LIBCURL], [1], [Defined to 1 if libcurl support built in])
else
PKG_CHECK_MODULES([LIBCURL], [libcurl >= 7.25.0], ,[AC_MSG_ERROR([Missing required libcurl dev >= 7.25.0])])
AC_DEFINE([CURL_HAS_KEEPALIVE], [1])
LIBCURL_LIBS=""
fi
AC_SUBST(LIBCURL_LIBS)
#check execv signature
AC_COMPILE_IFELSE([AC_LANG_SOURCE([
#include <process.h>
@ -451,8 +462,15 @@ echo @@ -451,8 +462,15 @@ echo
echo "Configuration Options Summary:"
echo
if test "x$libcurl" != xno; then
echo " libcurl(GBT+getwork).: Enabled: $LIBCURL_LIBS"
else
echo " libcurl(GBT+getwork).: Disabled"
fi
echo " curses.TUI...........: $cursesmsg"
if test "x$opencl" != xno; then
if test $found_opencl = 1; then
echo " OpenCL...............: FOUND. GPU mining support enabled"

16
miner.h

@ -8,7 +8,17 @@ @@ -8,7 +8,17 @@
#include <sys/time.h>
#include <pthread.h>
#include <jansson.h>
#ifdef HAVE_LIBCURL
#include <curl/curl.h>
#else
typedef char CURL;
extern char *curly;
#define curl_easy_init(curl) (curly)
#define curl_easy_cleanup(curl) {}
#define curl_global_cleanup() {}
#define CURL_GLOBAL_ALL 0
#define curl_global_init(X) (0)
#endif
#include <sched.h>
#include "elist.h"
@ -912,10 +922,12 @@ extern int swork_id; @@ -912,10 +922,12 @@ extern int swork_id;
extern pthread_rwlock_t netacc_lock;
extern const uint32_t sha256_init_state[];
#ifdef HAVE_LIBCURL
extern json_t *json_rpc_call(CURL *curl, const char *url, const char *userpass,
const char *rpc_req, bool, bool, int *,
struct pool *pool, bool);
extern const char *proxytype(curl_proxytype proxytype);
#endif
extern const char *proxytype(proxytypes_t proxytype);
extern char *get_proxy(char *url, struct pool *pool);
extern char *bin2hex(const unsigned char *p, size_t len);
extern bool hex2bin(unsigned char *p, const char *hexstr, size_t len);
@ -1155,7 +1167,7 @@ struct pool { @@ -1155,7 +1167,7 @@ struct pool {
char *rpc_url;
char *rpc_userpass;
char *rpc_user, *rpc_pass;
curl_proxytype rpc_proxytype;
proxytypes_t rpc_proxytype;
char *rpc_proxy;
pthread_mutex_t pool_lock;

1
ocl.c

@ -14,6 +14,7 @@ @@ -14,6 +14,7 @@
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
#include <limits.h>
#include <sys/types.h>
#ifdef WIN32

153
util.c

@ -16,7 +16,9 @@ @@ -16,7 +16,9 @@
#include <stdarg.h>
#include <string.h>
#include <jansson.h>
#ifdef HAVE_LIBCURL
#include <curl/curl.h>
#endif
#include <time.h>
#include <errno.h>
#include <unistd.h>
@ -45,6 +47,44 @@ @@ -45,6 +47,44 @@
#define DEFAULT_SOCKWAIT 60
bool successful_connect = false;
static void keep_sockalive(SOCKETTYPE fd)
{
const int tcp_one = 1;
#ifndef WIN32
const int tcp_keepidle = 45;
const int tcp_keepintvl = 30;
int flags = fcntl(fd, F_GETFL, 0);
fcntl(fd, F_SETFL, O_NONBLOCK | flags);
#else
u_long flags = 1;
ioctlsocket(fd, FIONBIO, &flags);
#endif
setsockopt(fd, SOL_SOCKET, SO_KEEPALIVE, (const void *)&tcp_one, sizeof(tcp_one));
if (!opt_delaynet)
#ifndef __linux
setsockopt(fd, IPPROTO_TCP, TCP_NODELAY, (const void *)&tcp_one, sizeof(tcp_one));
#else /* __linux */
setsockopt(fd, SOL_TCP, TCP_NODELAY, (const void *)&tcp_one, sizeof(tcp_one));
setsockopt(fd, SOL_TCP, TCP_KEEPCNT, &tcp_one, sizeof(tcp_one));
setsockopt(fd, SOL_TCP, TCP_KEEPIDLE, &tcp_keepidle, sizeof(tcp_keepidle));
setsockopt(fd, SOL_TCP, TCP_KEEPINTVL, &tcp_keepintvl, sizeof(tcp_keepintvl));
#endif /* __linux */
#ifdef __APPLE_CC__
setsockopt(fd, IPPROTO_TCP, TCP_KEEPALIVE, &tcp_keepintvl, sizeof(tcp_keepintvl));
#endif /* __APPLE_CC__ */
}
struct tq_ent {
void *data;
struct list_head q_node;
};
#ifdef HAVE_LIBCURL
struct timeval nettime;
struct data_buffer {
@ -67,11 +107,6 @@ struct header_info { @@ -67,11 +107,6 @@ struct header_info {
bool hadexpire;
};
struct tq_ent {
void *data;
struct list_head q_node;
};
static void databuf_free(struct data_buffer *db)
{
if (!db)
@ -202,36 +237,19 @@ out: @@ -202,36 +237,19 @@ out:
return ptrlen;
}
static void keep_sockalive(SOCKETTYPE fd)
static void last_nettime(struct timeval *last)
{
const int tcp_one = 1;
#ifndef WIN32
const int tcp_keepidle = 45;
const int tcp_keepintvl = 30;
int flags = fcntl(fd, F_GETFL, 0);
fcntl(fd, F_SETFL, O_NONBLOCK | flags);
#else
u_long flags = 1;
ioctlsocket(fd, FIONBIO, &flags);
#endif
setsockopt(fd, SOL_SOCKET, SO_KEEPALIVE, (const void *)&tcp_one, sizeof(tcp_one));
if (!opt_delaynet)
#ifndef __linux
setsockopt(fd, IPPROTO_TCP, TCP_NODELAY, (const void *)&tcp_one, sizeof(tcp_one));
#else /* __linux */
setsockopt(fd, SOL_TCP, TCP_NODELAY, (const void *)&tcp_one, sizeof(tcp_one));
setsockopt(fd, SOL_TCP, TCP_KEEPCNT, &tcp_one, sizeof(tcp_one));
setsockopt(fd, SOL_TCP, TCP_KEEPIDLE, &tcp_keepidle, sizeof(tcp_keepidle));
setsockopt(fd, SOL_TCP, TCP_KEEPINTVL, &tcp_keepintvl, sizeof(tcp_keepintvl));
#endif /* __linux */
#ifdef __APPLE_CC__
setsockopt(fd, IPPROTO_TCP, TCP_KEEPALIVE, &tcp_keepintvl, sizeof(tcp_keepintvl));
#endif /* __APPLE_CC__ */
rd_lock(&netacc_lock);
last->tv_sec = nettime.tv_sec;
last->tv_usec = nettime.tv_usec;
rd_unlock(&netacc_lock);
}
static void set_nettime(void)
{
wr_lock(&netacc_lock);
cgtime(&nettime);
wr_unlock(&netacc_lock);
}
#if CURL_HAS_KEEPALIVE
@ -255,21 +273,6 @@ static void keep_curlalive(CURL *curl) @@ -255,21 +273,6 @@ static void keep_curlalive(CURL *curl)
}
#endif
static void last_nettime(struct timeval *last)
{
rd_lock(&netacc_lock);
last->tv_sec = nettime.tv_sec;
last->tv_usec = nettime.tv_usec;
rd_unlock(&netacc_lock);
}
static void set_nettime(void)
{
wr_lock(&netacc_lock);
cgtime(&nettime);
wr_unlock(&netacc_lock);
}
static int curl_debug_cb(__maybe_unused CURL *handle, curl_infotype type,
__maybe_unused char *data, size_t size, void *userdata)
{
@ -513,29 +516,35 @@ err_out: @@ -513,29 +516,35 @@ err_out:
curl_easy_setopt(curl, CURLOPT_FRESH_CONNECT, 1);
return NULL;
}
#define PROXY_HTTP CURLPROXY_HTTP
#define PROXY_HTTP_1_0 CURLPROXY_HTTP_1_0
#define PROXY_SOCKS4 CURLPROXY_SOCKS4
#define PROXY_SOCKS5 CURLPROXY_SOCKS5
#define PROXY_SOCKS4A CURLPROXY_SOCKS4A
#define PROXY_SOCKS5H CURLPROXY_SOCKS5_HOSTNAME
#else /* HAVE_LIBCURL */
#define PROXY_HTTP 0
#define PROXY_HTTP_1_0 1
#define PROXY_SOCKS4 2
#define PROXY_SOCKS5 3
#define PROXY_SOCKS4A 4
#define PROXY_SOCKS5H 5
#endif /* HAVE_LIBCURL */
#if (LIBCURL_VERSION_MAJOR == 7 && LIBCURL_VERSION_MINOR >= 10) || (LIBCURL_VERSION_MAJOR > 7)
static struct {
const char *name;
curl_proxytype proxytype;
proxytypes_t proxytype;
} proxynames[] = {
{ "http:", CURLPROXY_HTTP },
#if (LIBCURL_VERSION_MAJOR > 7) || (LIBCURL_VERSION_MINOR > 19) || (LIBCURL_VERSION_MINOR == 19 && LIBCURL_VERSION_PATCH >= 4)
{ "http0:", CURLPROXY_HTTP_1_0 },
#endif
#if (LIBCURL_VERSION_MAJOR > 7) || (LIBCURL_VERSION_MINOR > 15) || (LIBCURL_VERSION_MINOR == 15 && LIBCURL_VERSION_PATCH >= 2)
{ "socks4:", CURLPROXY_SOCKS4 },
#endif
{ "socks5:", CURLPROXY_SOCKS5 },
#if (LIBCURL_VERSION_MAJOR > 7) || (LIBCURL_VERSION_MINOR >= 18)
{ "socks4a:", CURLPROXY_SOCKS4A },
{ "socks5h:", CURLPROXY_SOCKS5_HOSTNAME },
#endif
{ "http:", PROXY_HTTP },
{ "http0:", PROXY_HTTP_1_0 },
{ "socks4:", PROXY_SOCKS4 },
{ "socks5:", PROXY_SOCKS5 },
{ "socks4a:", PROXY_SOCKS4A },
{ "socks5h:", PROXY_SOCKS5H },
{ NULL, 0 }
};
#endif
const char *proxytype(curl_proxytype proxytype)
const char *proxytype(proxytypes_t proxytype)
{
int i;
@ -550,7 +559,6 @@ char *get_proxy(char *url, struct pool *pool) @@ -550,7 +559,6 @@ char *get_proxy(char *url, struct pool *pool)
{
pool->rpc_proxy = NULL;
#if (LIBCURL_VERSION_MAJOR == 7 && LIBCURL_VERSION_MINOR >= 10) || (LIBCURL_VERSION_MAJOR > 7)
char *split;
int plen, len, i;
@ -573,7 +581,6 @@ char *get_proxy(char *url, struct pool *pool) @@ -573,7 +581,6 @@ char *get_proxy(char *url, struct pool *pool)
break;
}
}
#endif
return url;
}
@ -1948,7 +1955,7 @@ static bool setup_stratum_socket(struct pool *pool) @@ -1948,7 +1955,7 @@ static bool setup_stratum_socket(struct pool *pool)
if (!pool->rpc_proxy && opt_socks_proxy) {
pool->rpc_proxy = opt_socks_proxy;
extract_sockaddr(pool->rpc_proxy, &pool->sockaddr_proxy_url, &pool->sockaddr_proxy_port);
pool->rpc_proxytype = CURLPROXY_SOCKS5;
pool->rpc_proxytype = PROXY_SOCKS5;
}
if (pool->rpc_proxy) {
@ -1995,24 +2002,24 @@ static bool setup_stratum_socket(struct pool *pool) @@ -1995,24 +2002,24 @@ static bool setup_stratum_socket(struct pool *pool)
if (pool->rpc_proxy) {
switch (pool->rpc_proxytype) {
case CURLPROXY_HTTP_1_0:
case PROXY_HTTP_1_0:
if (!http_negotiate(pool, sockd, true))
return false;
break;
case CURLPROXY_HTTP:
case PROXY_HTTP:
if (!http_negotiate(pool, sockd, false))
return false;
break;
case CURLPROXY_SOCKS5:
case CURLPROXY_SOCKS5_HOSTNAME:
case PROXY_SOCKS5:
case PROXY_SOCKS5H:
if (!socks5_negotiate(pool, sockd))
return false;
break;
case CURLPROXY_SOCKS4:
case PROXY_SOCKS4:
if (!socks4_negotiate(pool, sockd, false))
return false;
break;
case CURLPROXY_SOCKS4A:
case PROXY_SOCKS4A:
if (!socks4_negotiate(pool, sockd, true))
return false;
break;

6
util.h

@ -52,6 +52,12 @@ @@ -52,6 +52,12 @@
#define JSON_LOADS(str, err_ptr) json_loads((str), (err_ptr))
#endif
#ifdef HAVE_LIBCURL
typedef curl_proxytype proxytypes_t;
#else
typedef int proxytypes_t;
#endif /* HAVE_LIBCURL */
/* cgminer specific unnamed semaphore implementations to cope with osx not
* implementing them. */
#ifdef __APPLE__

Loading…
Cancel
Save