Browse Source

Merge remote-tracking branch 'upstream/master'

nfactor-troky
James Z.M. Gao 12 years ago
parent
commit
57fcd07bdd
  1. 4
      Makefile.am
  2. 44
      README
  3. 2
      api-example.c
  4. 16
      api.c
  5. 116
      cgminer.c
  6. 23
      configure.ac
  7. 1038
      driver-avalon.c
  8. 130
      driver-avalon.h
  9. 22
      fpgautils.c
  10. 2
      fpgautils.h
  11. 77
      hexdump.c
  12. 20
      miner.h
  13. 4
      usbutils.c
  14. 2
      usbutils.h
  15. 91
      util.c

4
Makefile.am

@ -100,6 +100,10 @@ if HAS_ICARUS @@ -100,6 +100,10 @@ if HAS_ICARUS
cgminer_SOURCES += driver-icarus.c
endif
if HAS_AVALON
cgminer_SOURCES += driver-avalon.c
endif
if HAS_MODMINER
cgminer_SOURCES += driver-modminer.c
bitstreamsdir = $(bindir)/bitstreams

44
README

@ -984,6 +984,10 @@ Q: Can I mine with Nvidia or Intel GPUs? @@ -984,6 +984,10 @@ Q: Can I mine with Nvidia or Intel GPUs?
A: Yes but their hashrate is very poor and likely you'll be using much more
energy than you'll be earning in coins.
Q: Can I mine on both Nvidia and AMD GPUs at the same time?
A: No, you must run one instance of cgminer with the --gpu-platform option for
each.
Q: Can I mine on Linux without running Xorg?
A: With Nvidia you can, but with AMD you cannot.
@ -1001,13 +1005,51 @@ A: You are generating garbage hashes due to your choice of settings. Your @@ -1001,13 +1005,51 @@ A: You are generating garbage hashes due to your choice of settings. Your
Work Utility (WU) value will confirm you are not generating garbage. You
should be getting about .9WU per kHash. If not, then try decreasing your
intensity, do not increase the number of gpu-threads, and consider adding
system RAM to match your GPU ram.
system RAM to match your GPU ram. You may also be using a bad combination
of driver and/or SDK.
Q: Scrypt fails to initialise the kernel every time?
A: Your parameters are too high. Don't add GPU threads, don't set intensity
too high, decrease thread concurrency. See the SCRYPT-README for a lot more
help.
Q: Cgminer stops mining (or my GPUs go DEAD) and I can't close it?
A: Once the driver has crashed, there is no way for cgminer to close cleanly.
You will have to kill it, and depending on how corrupted your driver state
has gotten, you may even need to reboot. Windows is known to reset drivers
when they fail and cgminer will be stuck trying to use the old driver instance.
Q: I can't get any monitoring of temperatures or fanspeed with cgminer when
I start it remotely?
A: With linux, make sure to export the DISPLAY variable. On windows, you
cannot access these monitoring values via RDP. This should work with tightVNC
or teamviewer though.
Q: I change my GPU engine/memory/voltage and cgminer reports back no change?
A: Cgminer asks the GPU using the ATI Display Library to change settings, but
the driver and hardware are free to do what it wants with that query, including
ignoring it. Some GPUs are locked with one or more of those properties as well.
Q: I have multiple GPUs and although many devices show up, it appears to be
working only on one GPU splitting it up.
A: Your driver setup is failing to properly use the accessory GPUs. Your
driver may be configured wrong or you have a driver version that needs a dummy
plug on all the GPUs that aren't connected to a monitor.
Q: I have some random GPU performance related problem not addressed above.
A: Seriously, it's the driver and/or SDK. Uninstall them and start again,
noting there is no clean way to uninstall them so you have to use extra tools
or do it manually.
Q: Do I need to recompile after updating my driver/SDK?
A: No. The software is unchanged regardless of which driver/SDK/ADL_SDK version
you are running. However if you change SDKs you should delete any generated
.bin files for them to be recreated with the new SDK.
Q: I switch users on windows and my mining stops working?
A: That's correct, it does. It's a permissions issue that there is no known
fix for.
Q: My network gets slower and slower and then dies for a minute?
A; Try the --net-delay option.

2
api-example.c

@ -145,7 +145,7 @@ @@ -145,7 +145,7 @@
static const char SEPARATOR = '|';
static const char COMMA = ',';
static const char EQ = '=';
static int ONLY = 0;
static int ONLY;
void display(char *buf)
{

16
api.c

@ -29,7 +29,7 @@ @@ -29,7 +29,7 @@
#include "util.h"
#include "driver-cpu.h" /* for algo_names[], TODO: re-factor dependency */
#if defined(USE_BFLSC)
#if defined(USE_BFLSC) || defined(USE_AVALON)
#define HAVE_AN_ASIC 1
#endif
@ -179,6 +179,9 @@ static const char *DEVICECODE = "" @@ -179,6 +179,9 @@ static const char *DEVICECODE = ""
#ifdef USE_ICARUS
"ICA "
#endif
#ifdef USE_AVALON
"AVA "
#endif
#ifdef USE_ZTEX
"ZTX "
#endif
@ -605,9 +608,6 @@ struct CODES { @@ -605,9 +608,6 @@ struct CODES {
static int my_thr_id = 0;
static bool bye;
#if defined(HAVE_OPENCL) || defined (HAVE_AN_ASIC) || defined(HAVE_AN_FPGA)
static bool ping = true;
#endif
// Used to control quit restart access to shutdown variables
static pthread_mutex_t quit_restart_lock;
@ -1178,6 +1178,10 @@ static int numascs() @@ -1178,6 +1178,10 @@ static int numascs()
rd_lock(&devices_lock);
for (i = 0; i < total_devices; i++) {
#ifdef USE_AVALON
if (devices[i]->drv->drv_id == DRIVER_AVALON)
count++;
#endif
#ifdef USE_BFLSC
if (devices[i]->drv->drv_id == DRIVER_BFLSC)
count++;
@ -1194,6 +1198,10 @@ static int ascdevice(int ascid) @@ -1194,6 +1198,10 @@ static int ascdevice(int ascid)
rd_lock(&devices_lock);
for (i = 0; i < total_devices; i++) {
#ifdef USE_AVALON
if (devices[i]->drv->drv_id == DRIVER_AVALON)
count++;
#endif
#ifdef USE_BFLSC
if (devices[i]->drv->drv_id == DRIVER_BFLSC)
count++;

116
cgminer.c

@ -48,6 +48,7 @@ @@ -48,6 +48,7 @@
#include "driver-opencl.h"
#include "bench_block.h"
#include "scrypt.h"
#include "driver-avalon.h"
#if defined(unix)
#include <errno.h>
@ -55,9 +56,9 @@ @@ -55,9 +56,9 @@
#include <sys/wait.h>
#endif
#if defined(USE_BITFORCE) || defined(USE_ICARUS) || defined(USE_MODMINER)
#if defined(USE_BITFORCE) || defined(USE_ICARUS) || defined(USE_AVALON) || defined(USE_MODMINER)
# define USE_FPGA
#if defined(USE_ICARUS)
#if defined(USE_ICARUS) || defined(USE_AVALON)
# define USE_FPGA_SERIAL
#endif
#elif defined(USE_ZTEX)
@ -138,6 +139,9 @@ bool opt_disable_pool; @@ -138,6 +139,9 @@ bool opt_disable_pool;
char *opt_icarus_options = NULL;
char *opt_icarus_timing = NULL;
bool opt_worktime;
#ifdef USE_AVALON
char *opt_avalon_options = NULL;
#endif
#ifdef USE_USBUTILS
char *opt_usb_select = NULL;
int opt_usbdump = -1;
@ -394,6 +398,7 @@ struct thr_info *get_thread(int thr_id) @@ -394,6 +398,7 @@ struct thr_info *get_thread(int thr_id)
rd_lock(&mining_thr_lock);
thr = mining_thr[thr_id];
rd_unlock(&mining_thr_lock);
return thr;
}
@ -411,6 +416,7 @@ struct cgpu_info *get_devices(int id) @@ -411,6 +416,7 @@ struct cgpu_info *get_devices(int id)
rd_lock(&devices_lock);
cgpu = devices[id];
rd_unlock(&devices_lock);
return cgpu;
}
@ -451,6 +457,7 @@ static void sharelog(const char*disposition, const struct work*work) @@ -451,6 +457,7 @@ static void sharelog(const char*disposition, const struct work*work)
ret = fwrite(s, rv, 1, sharelog_file);
fflush(sharelog_file);
mutex_unlock(&sharelog_lock);
if (ret != 1)
applog(LOG_ERR, "sharelog fwrite error");
}
@ -496,6 +503,7 @@ static bool pool_tset(struct pool *pool, bool *var) @@ -496,6 +503,7 @@ static bool pool_tset(struct pool *pool, bool *var)
ret = *var;
*var = true;
mutex_unlock(&pool->pool_lock);
return ret;
}
@ -507,6 +515,7 @@ bool pool_tclear(struct pool *pool, bool *var) @@ -507,6 +515,7 @@ bool pool_tclear(struct pool *pool, bool *var)
ret = *var;
*var = false;
mutex_unlock(&pool->pool_lock);
return ret;
}
@ -517,6 +526,7 @@ struct pool *current_pool(void) @@ -517,6 +526,7 @@ struct pool *current_pool(void)
cg_rlock(&control_lock);
pool = currentpool;
cg_runlock(&control_lock);
return pool;
}
@ -786,6 +796,7 @@ static void load_temp_cutoffs() @@ -786,6 +796,7 @@ static void load_temp_cutoffs()
devices[i]->cutofftemp = opt_cutofftemp;
}
rd_unlock(&devices_lock);
return;
}
if (device <= 1) {
@ -833,6 +844,15 @@ static char *set_icarus_timing(const char *arg) @@ -833,6 +844,15 @@ static char *set_icarus_timing(const char *arg)
}
#endif
#ifdef USE_AVALON
static char *set_avalon_options(const char *arg)
{
opt_set_charp(arg, &opt_avalon_options);
return NULL;
}
#endif
#ifdef USE_USBUTILS
static char *set_usb_select(const char *arg)
{
@ -1032,6 +1052,11 @@ static struct opt_table opt_config_table[] = { @@ -1032,6 +1052,11 @@ static struct opt_table opt_config_table[] = {
OPT_WITH_ARG("--icarus-timing",
set_icarus_timing, NULL, NULL,
opt_hidden),
#endif
#ifdef USE_AVALON
OPT_WITH_ARG("--avalon-options",
set_avalon_options, NULL, NULL,
opt_hidden),
#endif
OPT_WITHOUT_ARG("--load-balance",
set_loadbalance, &pool_strategy,
@ -1105,7 +1130,7 @@ static struct opt_table opt_config_table[] = { @@ -1105,7 +1130,7 @@ static struct opt_table opt_config_table[] = {
#ifdef USE_FPGA_SERIAL
OPT_WITH_ARG("--scan-serial|-S",
add_serial, NULL, NULL,
"Serial port to probe for FPGA Mining device"),
"Serial port to probe for Icarus FPGA Mining device"),
#endif
OPT_WITH_ARG("--scan-time|-s",
set_int_0_to_9999, opt_show_intval, &opt_scantime,
@ -1359,6 +1384,9 @@ static char *opt_verusage_and_exit(const char *extra) @@ -1359,6 +1384,9 @@ static char *opt_verusage_and_exit(const char *extra)
#ifdef USE_ICARUS
"icarus "
#endif
#ifdef USE_AVALON
"avalon "
#endif
#ifdef USE_MODMINER
"modminer "
#endif
@ -1470,9 +1498,11 @@ static struct work *make_work(void) @@ -1470,9 +1498,11 @@ static struct work *make_work(void)
if (unlikely(!work))
quit(1, "Failed to calloc work in make_work");
cg_wlock(&control_lock);
work->id = total_work++;
cg_wunlock(&control_lock);
return work;
}
@ -1865,6 +1895,7 @@ static int total_staged(void) @@ -1865,6 +1895,7 @@ static int total_staged(void)
mutex_lock(stgd_lock);
ret = __total_staged();
mutex_unlock(stgd_lock);
return ret;
}
@ -2040,10 +2071,8 @@ static void curses_print_status(void) @@ -2040,10 +2071,8 @@ static void curses_print_status(void)
pool->has_gbt ? "GBT" : "LP", pool->rpc_user);
}
wclrtoeol(statuswin);
cg_rlock(&ch_lock);
mvwprintw(statuswin, 5, 0, " Block: %s... Diff:%s Started: %s Best share: %s ",
current_hash, block_diff, blocktime, best_share);
cg_runlock(&ch_lock);
mvwhline(statuswin, 6, 0, '-', 80);
mvwhline(statuswin, statusy - 1, 0, '-', 80);
mvwprintw(statuswin, devcursor - 1, 1, "[P]ool management %s[S]ettings [D]isplay options [Q]uit",
@ -2916,7 +2945,6 @@ static void recruit_curl(struct pool *pool) @@ -2916,7 +2945,6 @@ static void recruit_curl(struct pool *pool)
list_add(&ce->node, &pool->curlring);
pool->curls++;
applog(LOG_DEBUG, "Recruited curl %d for pool %d", pool->curls, pool->pool_no);
}
/* Grab an available curl if there is one. If not, then recruit extra curls
@ -2927,23 +2955,29 @@ static void recruit_curl(struct pool *pool) @@ -2927,23 +2955,29 @@ static void recruit_curl(struct pool *pool)
static struct curl_ent *pop_curl_entry(struct pool *pool)
{
int curl_limit = opt_delaynet ? 5 : (mining_threads + opt_queue) * 2;
bool recruited = false;
struct curl_ent *ce;
mutex_lock(&pool->pool_lock);
retry:
if (!pool->curls)
if (!pool->curls) {
recruit_curl(pool);
else if (list_empty(&pool->curlring)) {
recruited = true;
} else if (list_empty(&pool->curlring)) {
if (pool->curls >= curl_limit) {
pthread_cond_wait(&pool->cr_cond, &pool->pool_lock);
goto retry;
} else
} else {
recruit_curl(pool);
recruited = true;
}
}
ce = list_entry(pool->curlring.next, struct curl_ent, node);
list_del(&ce->node);
mutex_unlock(&pool->pool_lock);
if (recruited)
applog(LOG_DEBUG, "Recruited curl for pool %d", pool->pool_no);
return ce;
}
@ -3073,7 +3107,6 @@ static bool clone_available(void) @@ -3073,7 +3107,6 @@ static bool clone_available(void)
roll_work(work);
work_clone = make_clone(work);
roll_work(work);
applog(LOG_DEBUG, "Pushing cloned available work to stage thread");
cloned = true;
break;
}
@ -3082,8 +3115,10 @@ static bool clone_available(void) @@ -3082,8 +3115,10 @@ static bool clone_available(void)
out_unlock:
mutex_unlock(stgd_lock);
if (cloned)
if (cloned) {
applog(LOG_DEBUG, "Pushing cloned available work to stage thread");
stage_work(work_clone);
}
return cloned;
}
@ -3133,10 +3168,12 @@ static bool stale_work(struct work *work, bool share) @@ -3133,10 +3168,12 @@ static bool stale_work(struct work *work, bool share)
}
same_job = true;
cg_rlock(&pool->data_lock);
if (strcmp(work->job_id, pool->swork.job_id))
same_job = false;
cg_runlock(&pool->data_lock);
if (!same_job) {
applog(LOG_DEBUG, "Work stale due to stratum job_id mismatch");
return true;
@ -3180,6 +3217,7 @@ static uint64_t share_diff(const struct work *work) @@ -3180,6 +3217,7 @@ static uint64_t share_diff(const struct work *work)
if (unlikely(!d64))
d64 = 1;
ret = diffone / d64;
cg_wlock(&control_lock);
if (ret > best_diff) {
best_diff = ret;
@ -3188,6 +3226,7 @@ static uint64_t share_diff(const struct work *work) @@ -3188,6 +3226,7 @@ static uint64_t share_diff(const struct work *work)
if (ret > work->pool->best_diff)
work->pool->best_diff = ret;
cg_wunlock(&control_lock);
return ret;
}
@ -3252,6 +3291,7 @@ static void *submit_work_thread(void *userdata) @@ -3252,6 +3291,7 @@ static void *submit_work_thread(void *userdata)
total_diff_stale += work->work_difficulty;
pool->diff_stale += work->work_difficulty;
mutex_unlock(&stats_lock);
goto out;
}
work->stale = true;
@ -3291,10 +3331,12 @@ static void *submit_work_thread(void *userdata) @@ -3291,10 +3331,12 @@ static void *submit_work_thread(void *userdata)
if (likely(stratum_send(pool, s, strlen(s)))) {
if (pool_tclear(pool, &pool->submit_fail))
applog(LOG_WARNING, "Pool %d communication resumed, submitting work", pool->pool_no);
mutex_lock(&sshare_lock);
HASH_ADD_INT(stratum_shares, id, sshare);
pool->sshares++;
mutex_unlock(&sshare_lock);
applog(LOG_DEBUG, "Successfully submitted, adding to stratum_shares db");
submitted = true;
break;
@ -3340,6 +3382,7 @@ static void *submit_work_thread(void *userdata) @@ -3340,6 +3382,7 @@ static void *submit_work_thread(void *userdata)
total_diff_stale += work->work_difficulty;
pool->diff_stale += work->work_difficulty;
mutex_unlock(&stats_lock);
break;
}
@ -3567,8 +3610,9 @@ static void set_curblock(char *hexstr, unsigned char *hash) @@ -3567,8 +3610,9 @@ static void set_curblock(char *hexstr, unsigned char *hash)
free(current_fullhash);
current_fullhash = bin2hex(block_hash_swap, 32);
get_timestamp(blocktime, &block_timeval);
applog(LOG_INFO, "New block: %s... diff %s", current_hash, block_diff);
cg_wunlock(&ch_lock);
applog(LOG_INFO, "New block: %s... diff %s", current_hash, block_diff);
}
/* Search to see if this string is from a block that has been seen before */
@ -3579,6 +3623,7 @@ static bool block_exists(char *hexstr) @@ -3579,6 +3623,7 @@ static bool block_exists(char *hexstr)
rd_lock(&blk_lock);
HASH_FIND_STR(blocks, hexstr, s);
rd_unlock(&blk_lock);
if (s)
return true;
return false;
@ -3667,6 +3712,7 @@ static bool test_work_current(struct work *work) @@ -3667,6 +3712,7 @@ static bool test_work_current(struct work *work)
quit (1, "test_work_current OOM");
strcpy(s->hash, hexstr);
s->block_no = new_blocks++;
wr_lock(&blk_lock);
/* Only keep the last hour's worth of blocks in memory since
* work from blocks before this is virtually impossible and we
@ -3683,6 +3729,7 @@ static bool test_work_current(struct work *work) @@ -3683,6 +3729,7 @@ static bool test_work_current(struct work *work)
HASH_ADD_STR(blocks, hash, s);
set_blockdiff(work);
wr_unlock(&blk_lock);
if (deleted_block)
applog(LOG_DEBUG, "Deleted block %d from database", deleted_block);
set_curblock(hexstr, work->data);
@ -4675,6 +4722,7 @@ static void hashmeter(int thr_id, struct timeval *diff, @@ -4675,6 +4722,7 @@ static void hashmeter(int thr_id, struct timeval *diff,
local_mhashes_done = 0;
out_unlock:
mutex_unlock(&hash_lock);
if (showlog) {
if (!curses_active) {
printf("%s \r", statusline);
@ -4737,6 +4785,7 @@ static bool parse_stratum_response(struct pool *pool, char *s) @@ -4737,6 +4785,7 @@ static bool parse_stratum_response(struct pool *pool, char *s)
}
id = json_integer_value(id_val);
mutex_lock(&sshare_lock);
HASH_FIND_INT(stratum_shares, &id, sshare);
if (sshare) {
@ -4744,6 +4793,7 @@ static bool parse_stratum_response(struct pool *pool, char *s) @@ -4744,6 +4793,7 @@ static bool parse_stratum_response(struct pool *pool, char *s)
pool->sshares--;
}
mutex_unlock(&sshare_lock);
if (!sshare) {
if (json_is_true(res_val))
applog(LOG_NOTICE, "Accepted untracked stratum share from pool %d", pool->pool_no);
@ -4814,6 +4864,7 @@ static int cp_prio(void) @@ -4814,6 +4864,7 @@ static int cp_prio(void)
cg_rlock(&control_lock);
prio = currentpool->prio;
cg_runlock(&control_lock);
return prio;
}
@ -4875,6 +4926,7 @@ static bool supports_resume(struct pool *pool) @@ -4875,6 +4926,7 @@ static bool supports_resume(struct pool *pool)
cg_rlock(&pool->data_lock);
ret = (pool->sessionid != NULL);
cg_runlock(&pool->data_lock);
return ret;
}
@ -5707,15 +5759,20 @@ static void fill_queue(struct thr_info *mythr, struct cgpu_info *cgpu, struct de @@ -5707,15 +5759,20 @@ static void fill_queue(struct thr_info *mythr, struct cgpu_info *cgpu, struct de
{
thread_reportout(mythr);
do {
struct work *work;
bool need_work;
rd_lock(&cgpu->qlock);
need_work = (HASH_COUNT(cgpu->queued_work) == cgpu->queued_count);
rd_unlock(&cgpu->qlock);
if (need_work) {
struct work *work = get_work(mythr, thr_id);
wr_lock(&cgpu->qlock);
if (HASH_COUNT(cgpu->queued_work) == cgpu->queued_count) {
work = get_work(mythr, thr_id);
work->device_diff = MIN(drv->max_diff, work->work_difficulty);
wr_lock(&cgpu->qlock);
HASH_ADD_INT(cgpu->queued_work, id, work);
}
wr_unlock(&cgpu->qlock);
}
/* The queue_full function should be used by the driver to
* actually place work items on the physical device if it
* does have a queue. */
@ -5789,6 +5846,7 @@ void work_completed(struct cgpu_info *cgpu, struct work *work) @@ -5789,6 +5846,7 @@ void work_completed(struct cgpu_info *cgpu, struct work *work)
cgpu->queued_count--;
HASH_DEL(cgpu->queued_work, work);
wr_unlock(&cgpu->qlock);
free_work(work);
}
@ -6129,6 +6187,7 @@ static void reap_curl(struct pool *pool) @@ -6129,6 +6187,7 @@ static void reap_curl(struct pool *pool)
int reaped = 0;
gettimeofday(&now, NULL);
mutex_lock(&pool->pool_lock);
list_for_each_entry_safe(ent, iter, &pool->curlring, node) {
if (pool->curls < 2)
@ -6142,6 +6201,7 @@ static void reap_curl(struct pool *pool) @@ -6142,6 +6201,7 @@ static void reap_curl(struct pool *pool)
}
}
mutex_unlock(&pool->pool_lock);
if (reaped)
applog(LOG_DEBUG, "Reaped %d curl%s from pool %d", reaped, reaped > 1 ? "s" : "", pool->pool_no);
}
@ -6264,6 +6324,7 @@ static void *watchdog_thread(void __maybe_unused *userdata) @@ -6264,6 +6324,7 @@ static void *watchdog_thread(void __maybe_unused *userdata)
applog(LOG_WARNING, "Will restart execution as scheduled at %02d:%02d",
schedstart.tm.tm_hour, schedstart.tm.tm_min);
sched_paused = true;
rd_lock(&mining_thr_lock);
for (i = 0; i < mining_threads; i++)
mining_thr[i]->pause = true;
@ -6538,15 +6599,20 @@ static void *test_pool_thread(void *arg) @@ -6538,15 +6599,20 @@ static void *test_pool_thread(void *arg)
if (pool_active(pool, false)) {
pool_tset(pool, &pool->lagging);
pool_tclear(pool, &pool->idle);
bool first_pool = false;
cg_wlock(&control_lock);
if (!pools_active) {
currentpool = pool;
if (pool->pool_no != 0)
applog(LOG_NOTICE, "Switching to pool %d %s - first alive pool", pool->pool_no, pool->rpc_url);
first_pool = true;
pools_active = true;
}
cg_wunlock(&control_lock);
if (unlikely(first_pool))
applog(LOG_NOTICE, "Switching to pool %d %s - first alive pool", pool->pool_no, pool->rpc_url);
pool_resus(pool);
} else
pool_died(pool);
@ -6751,6 +6817,10 @@ extern struct device_drv bitforce_drv; @@ -6751,6 +6817,10 @@ extern struct device_drv bitforce_drv;
extern struct device_drv icarus_drv;
#endif
#ifdef USE_AVALON
extern struct device_drv avalon_drv;
#endif
#ifdef USE_MODMINER
extern struct device_drv modminer_drv;
#endif
@ -6856,9 +6926,11 @@ void fill_device_drv(struct cgpu_info *cgpu) @@ -6856,9 +6926,11 @@ void fill_device_drv(struct cgpu_info *cgpu)
void enable_device(struct cgpu_info *cgpu)
{
cgpu->deven = DEV_ENABLED;
wr_lock(&devices_lock);
devices[cgpu->cgminer_id = cgminer_id_count++] = cgpu;
wr_unlock(&devices_lock);
if (hotplug_mode) {
new_threads += cgpu->threads;
#ifdef HAVE_CURSES
@ -6901,9 +6973,11 @@ bool add_cgpu(struct cgpu_info*cgpu) @@ -6901,9 +6973,11 @@ bool add_cgpu(struct cgpu_info*cgpu)
cgpu->device_id = d->lastid = 0;
HASH_ADD_STR(devids, name, d);
}
wr_lock(&devices_lock);
devices = realloc(devices, sizeof(struct cgpu_info *) * (total_devices + new_devices + 2));
wr_unlock(&devices_lock);
if (hotplug_mode)
devices[total_devices + new_devices++] = cgpu;
else
@ -6942,6 +7016,7 @@ static void hotplug_process() @@ -6942,6 +7016,7 @@ static void hotplug_process()
wr_lock(&mining_thr_lock);
mining_thr = realloc(mining_thr, sizeof(thr) * (mining_threads + new_threads + 1));
wr_unlock(&mining_thr_lock);
if (!mining_thr)
quit(1, "Failed to hotplug realloc mining_thr");
for (i = 0; i < new_threads; i++) {
@ -7270,6 +7345,11 @@ int main(int argc, char *argv[]) @@ -7270,6 +7345,11 @@ int main(int argc, char *argv[])
icarus_drv.drv_detect();
#endif
#ifdef USE_AVALON
if (!opt_scrypt)
avalon_drv.drv_detect();
#endif
#ifdef USE_BFLSC
if (!opt_scrypt)
bflsc_drv.drv_detect();

23
configure.ac

@ -252,6 +252,17 @@ if test "x$icarus" = xyes; then @@ -252,6 +252,17 @@ if test "x$icarus" = xyes; then
fi
AM_CONDITIONAL([HAS_ICARUS], [test x$icarus = xyes])
avalon="no"
AC_ARG_ENABLE([avalon],
[AC_HELP_STRING([--enable-avalon],[Compile support for Avalon (default disabled)])],
[avalon=$enableval]
)
if test "x$avalon" = xyes; then
AC_DEFINE([USE_AVALON], [1], [Defined to 1 if Avalon support is wanted])
fi
AM_CONDITIONAL([HAS_AVALON], [test x$avalon = xyes])
modminer="no"
AC_ARG_ENABLE([modminer],
@ -298,7 +309,7 @@ else @@ -298,7 +309,7 @@ else
])
fi
AM_CONDITIONAL([NEED_FPGAUTILS], [test x$icarus$bitforce$modminer$ztex != xnononono])
AM_CONDITIONAL([NEED_FPGAUTILS], [test x$avalon$icarus$bitforce$modminer$ztex != xnonononono])
AM_CONDITIONAL([NEED_USBUTILS_C], [test x$bitforce$modminer$bflsc != xnonono])
AM_CONDITIONAL([HAVE_CURSES], [test x$curses = xyes])
AM_CONDITIONAL([WANT_JANSSON], [test x$request_jansson = xtrue])
@ -523,14 +534,14 @@ if test "x$opencl" != xno; then @@ -523,14 +534,14 @@ if test "x$opencl" != xno; then
else
echo " OpenCL...............: NOT FOUND. GPU mining support DISABLED"
if test "x$cpumining$bitforce$icarus$ztex$modminer$bflsc" = xnononononono; then
if test "x$cpumining$bitforce$avalon$icarus$ztex$modminer$bflsc" = xnonononononono; then
AC_MSG_ERROR([No mining configured in])
fi
echo " scrypt...............: Disabled (needs OpenCL)"
fi
else
echo " OpenCL...............: Detection overrided. GPU mining support DISABLED"
if test "x$cpumining$bitforce$icarus$ztex$modminer$bflsc" = xnononononono; then
if test "x$cpumining$bitforce$icarus$avalon$ztex$modminer$bflsc" = xnonononononono; then
AC_MSG_ERROR([No mining configured in])
fi
echo " scrypt...............: Disabled (needs OpenCL)"
@ -547,6 +558,12 @@ else @@ -547,6 +558,12 @@ else
fi
echo
if test "x$avalon" = xyes; then
echo " Avalon.ASICs.........: Enabled"
else
echo " Avalon.ASICs.........: Disabled"
fi
if test "x$bflsc" = xyes; then
echo " BFL.ASICs............: Enabled"
else

1038
driver-avalon.c

File diff suppressed because it is too large Load Diff

130
driver-avalon.h

@ -0,0 +1,130 @@ @@ -0,0 +1,130 @@
/*
* Copyright 2013 Avalon project
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 3 of the License, or (at your option)
* any later version. See COPYING for more details.
*/
#ifndef AVALON_H
#define AVALON_H
#ifdef USE_AVALON
#define AVALON_RESET_FAULT_DECISECONDS 1
#define AVALON_MINER_THREADS 1
#define AVALON_IO_SPEED 115200
#define AVALON_HASH_TIME_FACTOR ((float)1.67/0x32)
#define AVALON_RESET_PITCH (300*1000*1000)
#define AVALON_FAN_FACTOR 120
#define AVALON_DEFAULT_FAN_MAX_PWM 0xA0 /* 100% */
#define AVALON_DEFAULT_FAN_MIN_PWM 0x20 /* 20% */
#define AVALON_DEFAULT_TIMEOUT 0x32
#define AVALON_DEFAULT_FREQUENCY 256
#define AVALON_DEFAULT_MINER_NUM 0x20
#define AVALON_DEFAULT_ASIC_NUM 0xA
struct avalon_task {
uint8_t reset :1;
uint8_t flush_fifo :1;
uint8_t fan_eft :1;
uint8_t timer_eft :1;
uint8_t asic_num :4;
uint8_t fan_pwm_data;
uint8_t timeout_data;
uint8_t miner_num;
uint8_t nonce_elf :1;
uint8_t gate_miner_elf :1;
uint8_t asic_pll :1;
uint8_t gate_miner :1;
uint8_t _pad0 :4;
uint8_t _pad1[3];
uint32_t _pad2;
uint8_t midstate[32];
uint8_t data[12];
} __attribute__((packed, aligned(4)));
struct avalon_result {
uint32_t nonce;
uint8_t data[12];
uint8_t midstate[32];
uint8_t fan0;
uint8_t fan1;
uint8_t fan2;
uint8_t temp0;
uint8_t temp1;
uint8_t temp2;
uint8_t _pad0[2];
uint16_t fifo_wp;
uint16_t fifo_rp;
uint8_t chip_num;
uint8_t pwm_data;
uint8_t timeout;
uint8_t miner_num;
} __attribute__((packed, aligned(4)));
struct avalon_info {
int baud;
int miner_count;
int asic_count;
int timeout;
int fan0;
int fan1;
int fan2;
int temp0;
int temp1;
int temp2;
int temp_max;
int temp_history_count;
int temp_history_index;
int temp_sum;
int temp_old;
int fan_pwm;
int no_matching_work;
int matching_work[AVALON_DEFAULT_MINER_NUM];
int frequency;
};
#define AVALON_WRITE_SIZE (sizeof(struct avalon_task))
#define AVALON_READ_SIZE (sizeof(struct avalon_result))
#define AVALON_ARRAY_SIZE 4
#define AVA_GETS_ERROR -1
#define AVA_GETS_OK 0
#define AVA_GETS_RESTART 1
#define AVA_GETS_TIMEOUT 2
#define AVA_SEND_ERROR -1
#define AVA_SEND_OK 0
#define AVA_SEND_BUFFER_EMPTY 1
#define AVA_SEND_BUFFER_FULL 2
#define AVA_BUFFER_FULL 0
#define AVA_BUFFER_EMPTY 1
#define avalon_open2(devpath, baud, purge) serial_open(devpath, baud, AVALON_RESET_FAULT_DECISECONDS, purge)
#define avalon_open(devpath, baud) avalon_open2(devpath, baud, true)
#define avalon_close(fd) close(fd)
#define avalon_buffer_full(fd) get_serial_cts(fd)
#define AVALON_READ_TIME(baud) ((double)AVALON_READ_SIZE * (double)8.0 / (double)(baud))
#define ASSERT1(condition) __maybe_unused static char sizeof_uint32_t_must_be_4[(condition)?1:-1]
ASSERT1(sizeof(uint32_t) == 4);
extern struct avalon_info **avalon_info;
#endif /* USE_AVALON */
#endif /* AVALON_H */

22
fpgautils.c

@ -1,4 +1,5 @@ @@ -1,4 +1,5 @@
/*
* Copyright 2013 Con Kolivas <kernel@kolivas.org>
* Copyright 2012 Luke Dashjr
* Copyright 2012 Andrew Smith
*
@ -19,6 +20,7 @@ @@ -19,6 +20,7 @@
#ifndef WIN32
#include <errno.h>
#include <termios.h>
#include <sys/ioctl.h>
#include <sys/stat.h>
#include <unistd.h>
#include <fcntl.h>
@ -32,10 +34,12 @@ @@ -32,10 +34,12 @@
#ifdef HAVE_LIBUDEV
#include <libudev.h>
#include <sys/ioctl.h>
#endif
#include "elist.h"
#include "logging.h"
#include "miner.h"
#include "fpgautils.h"
#ifdef HAVE_LIBUDEV
@ -382,6 +386,14 @@ int serial_open(const char *devpath, unsigned long baud, signed short timeout, b @@ -382,6 +386,14 @@ int serial_open(const char *devpath, unsigned long baud, signed short timeout, b
switch (baud) {
case 0:
break;
case 19200:
cfsetispeed(&my_termios, B19200);
cfsetospeed(&my_termios, B19200);
break;
case 38400:
cfsetispeed(&my_termios, B38400);
cfsetospeed(&my_termios, B38400);
break;
case 57600:
cfsetispeed(&my_termios, B57600);
cfsetospeed(&my_termios, B57600);
@ -570,4 +582,14 @@ size_t _select_write(int fd, char *buf, size_t siz, struct timeval *timeout) @@ -570,4 +582,14 @@ size_t _select_write(int fd, char *buf, size_t siz, struct timeval *timeout)
return wrote;
}
int get_serial_cts(int fd)
{
int flags;
if (!fd)
return -1;
ioctl(fd, TIOCMGET, &flags);
return (flags & TIOCM_CTS) ? 1 : 0;
}
#endif // ! WIN32

2
fpgautils.h

@ -36,6 +36,8 @@ extern ssize_t _serial_read(int fd, char *buf, size_t buflen, char *eol); @@ -36,6 +36,8 @@ extern ssize_t _serial_read(int fd, char *buf, size_t buflen, char *eol);
extern FILE *open_bitstream(const char *dname, const char *filename);
extern int get_serial_cts(int fd);
#ifndef WIN32
extern const struct timeval tv_timeout_default;
extern const struct timeval tv_inter_char_default;

77
hexdump.c

@ -0,0 +1,77 @@ @@ -0,0 +1,77 @@
/*
* hexdump implementation without depenecies to *printf()
* output is equal to 'hexdump -C'
* should be compatible to 64bit architectures
*
* Copyright (c) 2009 Daniel Mack <daniel@caiaq.de>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#define hex_print(p) applog(LOG_DEBUG, "%s", p)
static char nibble[] = {
'0', '1', '2', '3', '4', '5', '6', '7',
'8', '9', 'a', 'b', 'c', 'd', 'e', 'f' };
#define BYTES_PER_LINE 0x10
void hexdump(const uint8_t *p, unsigned int len)
{
unsigned int i, addr;
unsigned int wordlen = sizeof(void*);
unsigned char v, line[BYTES_PER_LINE * 5];
for (addr = 0; addr < len; addr += BYTES_PER_LINE) {
/* clear line */
for (i = 0; i < sizeof(line); i++) {
if (i == wordlen * 2 + 52 ||
i == wordlen * 2 + 69) {
line[i] = '|';
continue;
}
if (i == wordlen * 2 + 70) {
line[i] = '\0';
continue;
}
line[i] = ' ';
}
/* print address */
for (i = 0; i < wordlen * 2; i++) {
v = addr >> ((wordlen * 2 - i - 1) * 4);
line[i] = nibble[v & 0xf];
}
/* dump content */
for (i = 0; i < BYTES_PER_LINE; i++) {
int pos = (wordlen * 2) + 3 + (i / 8);
if (addr + i >= len)
break;
v = p[addr + i];
line[pos + (i * 3) + 0] = nibble[v >> 4];
line[pos + (i * 3) + 1] = nibble[v & 0xf];
/* character printable? */
line[(wordlen * 2) + 53 + i] =
(v >= ' ' && v <= '~') ? v : '.';
}
hex_print(line);
}
}

20
miner.h

@ -120,9 +120,11 @@ static inline int fsync (int fd) @@ -120,9 +120,11 @@ static inline int fsync (int fd)
#if (!defined(WIN32) && ((__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3))) \
|| (defined(WIN32) && ((__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 7)))
#define bswap_16 __builtin_bswap16
#define bswap_32 __builtin_bswap32
#define bswap_64 __builtin_bswap64
#ifndef bswap_16
#define bswap_16 __builtin_bswap16
#define bswap_32 __builtin_bswap32
#define bswap_64 __builtin_bswap64
#endif
#else
#if HAVE_BYTESWAP_H
#include <byteswap.h>
@ -421,10 +423,16 @@ struct cgpu_info { @@ -421,10 +423,16 @@ struct cgpu_info {
#ifdef USE_USBUTILS
struct cg_usb_device *usbdev;
#endif
#ifdef USE_ICARUS
#if defined(USE_ICARUS) || defined(USE_AVALON)
int device_fd;
#endif
};
#ifdef USE_AVALON
struct work **works;
int work_array;
int queued;
int results;
#endif
#ifdef USE_USBUTILS
struct cg_usb_info usbinfo;
#endif
@ -787,6 +795,9 @@ extern bool opt_restart; @@ -787,6 +795,9 @@ extern bool opt_restart;
extern char *opt_icarus_options;
extern char *opt_icarus_timing;
extern bool opt_worktime;
#ifdef USE_AVALON
extern char *opt_avalon_options;
#endif
#ifdef USE_USBUTILS
extern char *opt_usb_select;
extern int opt_usbdump;
@ -795,6 +806,7 @@ extern bool opt_usb_list_all; @@ -795,6 +806,7 @@ extern bool opt_usb_list_all;
#ifdef USE_BITFORCE
extern bool opt_bfl_noncerange;
#endif
extern bool ping;
extern int swork_id;
extern pthread_rwlock_t netacc_lock;

4
usbutils.c

@ -294,6 +294,8 @@ static const char *C_REQUESTQUEJOB_S = "RequestQueJob"; @@ -294,6 +294,8 @@ static const char *C_REQUESTQUEJOB_S = "RequestQueJob";
static const char *C_REQUESTQUEJOBSTATUS_S = "RequestQueJobStatus";
static const char *C_QUEJOB_S = "QueJob";
static const char *C_QUEJOBSTATUS_S = "QueJobStatus";
static const char *C_QUEFLUSH_S = "QueFlush";
static const char *C_QUEFLUSHREPLY_S = "QueFlushReply";
#ifdef EOL
#undef EOL
@ -759,6 +761,8 @@ static void cgusb_check_init() @@ -759,6 +761,8 @@ static void cgusb_check_init()
usb_commands[C_REQUESTQUEJOBSTATUS] = C_REQUESTQUEJOBSTATUS_S;
usb_commands[C_QUEJOB] = C_QUEJOB_S;
usb_commands[C_QUEJOBSTATUS] = C_QUEJOBSTATUS_S;
usb_commands[C_QUEFLUSH] = C_QUEFLUSH_S;
usb_commands[C_QUEFLUSHREPLY] = C_QUEFLUSHREPLY_S;
stats_initialised = true;
}

2
usbutils.h

@ -131,6 +131,8 @@ enum usb_cmds { @@ -131,6 +131,8 @@ enum usb_cmds {
C_REQUESTQUEJOBSTATUS,
C_QUEJOB,
C_QUEJOBSTATUS,
C_QUEFLUSH,
C_QUEFLUSHREPLY,
C_MAX
};

91
util.c

@ -710,9 +710,7 @@ void tq_free(struct thread_q *tq) @@ -710,9 +710,7 @@ void tq_free(struct thread_q *tq)
static void tq_freezethaw(struct thread_q *tq, bool frozen)
{
mutex_lock(&tq->mutex);
tq->frozen = frozen;
pthread_cond_signal(&tq->cond);
mutex_unlock(&tq->mutex);
}
@ -740,14 +738,12 @@ bool tq_push(struct thread_q *tq, void *data) @@ -740,14 +738,12 @@ bool tq_push(struct thread_q *tq, void *data)
INIT_LIST_HEAD(&ent->q_node);
mutex_lock(&tq->mutex);
if (!tq->frozen) {
list_add_tail(&ent->q_node, &tq->q);
} else {
free(ent);
rc = false;
}
pthread_cond_signal(&tq->cond);
mutex_unlock(&tq->mutex);
@ -761,7 +757,6 @@ void *tq_pop(struct thread_q *tq, const struct timespec *abstime) @@ -761,7 +757,6 @@ void *tq_pop(struct thread_q *tq, const struct timespec *abstime)
int rc;
mutex_lock(&tq->mutex);
if (!list_empty(&tq->q))
goto pop;
@ -773,16 +768,15 @@ void *tq_pop(struct thread_q *tq, const struct timespec *abstime) @@ -773,16 +768,15 @@ void *tq_pop(struct thread_q *tq, const struct timespec *abstime)
goto out;
if (list_empty(&tq->q))
goto out;
pop:
ent = list_entry(tq->q.next, struct tq_ent, q_node);
rval = ent->data;
list_del(&ent->q_node);
free(ent);
out:
mutex_unlock(&tq->mutex);
return rval;
}
@ -898,16 +892,20 @@ bool extract_sockaddr(struct pool *pool, char *url) @@ -898,16 +892,20 @@ bool extract_sockaddr(struct pool *pool, char *url)
return true;
}
enum send_ret {
SEND_OK,
SEND_SELECTFAIL,
SEND_SENDFAIL,
SEND_INACTIVE
};
/* Send a single command across a socket, appending \n to it. This should all
* be done under stratum lock except when first establishing the socket */
static bool __stratum_send(struct pool *pool, char *s, ssize_t len)
static enum send_ret __stratum_send(struct pool *pool, char *s, ssize_t len)
{
SOCKETTYPE sock = pool->sock;
ssize_t ssent = 0;
if (opt_protocol)
applog(LOG_DEBUG, "SEND: %s", s);
strcat(s, "\n");
len++;
@ -918,16 +916,12 @@ static bool __stratum_send(struct pool *pool, char *s, ssize_t len) @@ -918,16 +916,12 @@ static bool __stratum_send(struct pool *pool, char *s, ssize_t len)
FD_ZERO(&wd);
FD_SET(sock, &wd);
if (select(sock + 1, NULL, &wd, NULL, &timeout) < 1) {
applog(LOG_DEBUG, "Write select failed on pool %d sock", pool->pool_no);
return false;
}
if (select(sock + 1, NULL, &wd, NULL, &timeout) < 1)
return SEND_SELECTFAIL;
sent = send(pool->sock, s + ssent, len, 0);
if (sent < 0) {
if (errno != EAGAIN && errno != EWOULDBLOCK) {
applog(LOG_DEBUG, "Failed to curl_easy_send in stratum_send");
return false;
}
if (errno != EAGAIN && errno != EWOULDBLOCK)
return SEND_SENDFAIL;
sent = 0;
}
ssent += sent;
@ -937,21 +931,37 @@ static bool __stratum_send(struct pool *pool, char *s, ssize_t len) @@ -937,21 +931,37 @@ static bool __stratum_send(struct pool *pool, char *s, ssize_t len)
pool->cgminer_pool_stats.times_sent++;
pool->cgminer_pool_stats.bytes_sent += ssent;
pool->cgminer_pool_stats.net_bytes_sent += ssent;
return true;
return SEND_OK;
}
bool stratum_send(struct pool *pool, char *s, ssize_t len)
{
bool ret = false;
enum send_ret ret = SEND_INACTIVE;
if (opt_protocol)
applog(LOG_DEBUG, "SEND: %s", s);
mutex_lock(&pool->stratum_lock);
if (pool->stratum_active)
ret = __stratum_send(pool, s, len);
else
applog(LOG_DEBUG, "Stratum send failed due to no pool stratum_active");
mutex_unlock(&pool->stratum_lock);
return ret;
/* This is to avoid doing applog under stratum_lock */
switch (ret) {
default:
case SEND_OK:
break;
case SEND_SELECTFAIL:
applog(LOG_DEBUG, "Write select failed on pool %d sock", pool->pool_no);
break;
case SEND_SENDFAIL:
applog(LOG_DEBUG, "Failed to curl_easy_send in stratum_send");
break;
case SEND_INACTIVE:
applog(LOG_DEBUG, "Stratum send failed due to no pool stratum_active");
break;
}
return (ret == SEND_OK);
}
static bool socket_full(struct pool *pool, bool wait)
@ -1011,7 +1021,8 @@ static void recalloc_sock(struct pool *pool, size_t len) @@ -1011,7 +1021,8 @@ static void recalloc_sock(struct pool *pool, size_t len)
if (new < pool->sockbuf_size)
return;
new = new + (RBUFSIZE - (new % RBUFSIZE));
applog(LOG_DEBUG, "Recallocing pool sockbuf to %d", new);
// Avoid potentially recursive locking
// applog(LOG_DEBUG, "Recallocing pool sockbuf to %d", new);
pool->sockbuf = realloc(pool->sockbuf, new);
if (!pool->sockbuf)
quit(1, "Failed to realloc pool sockbuf in recalloc_sock");
@ -1019,6 +1030,12 @@ static void recalloc_sock(struct pool *pool, size_t len) @@ -1019,6 +1030,12 @@ static void recalloc_sock(struct pool *pool, size_t len)
pool->sockbuf_size = new;
}
enum recv_ret {
RECV_OK,
RECV_CLOSED,
RECV_RECVFAIL
};
/* Peeks at a socket to find the first end of line and then reads just that
* from the socket and returns that as a malloced char */
char *recv_line(struct pool *pool)
@ -1027,6 +1044,7 @@ char *recv_line(struct pool *pool) @@ -1027,6 +1044,7 @@ char *recv_line(struct pool *pool)
char *tok, *sret = NULL;
if (!strstr(pool->sockbuf, "\n")) {
enum recv_ret ret = RECV_OK;
struct timeval rstart, now;
gettimeofday(&rstart, NULL);
@ -1044,11 +1062,11 @@ char *recv_line(struct pool *pool) @@ -1044,11 +1062,11 @@ char *recv_line(struct pool *pool)
memset(s, 0, RBUFSIZE);
n = recv(pool->sock, s, RECVSIZE, 0);
if (!n) {
applog(LOG_DEBUG, "Socket closed waiting in recv_line");
ret = RECV_CLOSED;
break;
}
if (n < 0 && errno != EAGAIN && errno != EWOULDBLOCK) {
applog(LOG_DEBUG, "Failed to recv sock in recv_line");
ret = RECV_RECVFAIL;
break;
}
slen = strlen(s);
@ -1057,6 +1075,18 @@ char *recv_line(struct pool *pool) @@ -1057,6 +1075,18 @@ char *recv_line(struct pool *pool)
gettimeofday(&now, NULL);
} while (tdiff(&now, &rstart) < 60 && !strstr(pool->sockbuf, "\n"));
mutex_unlock(&pool->stratum_lock);
switch (ret) {
default:
case RECV_OK:
break;
case RECV_CLOSED:
applog(LOG_DEBUG, "Socket closed waiting in recv_line");
break;
case RECV_RECVFAIL:
applog(LOG_DEBUG, "Failed to recv sock in recv_line");
break;
}
}
buflen = strlen(pool->sockbuf);
@ -1441,6 +1471,7 @@ static bool setup_stratum_curl(struct pool *pool) @@ -1441,6 +1471,7 @@ static bool setup_stratum_curl(struct pool *pool)
if (unlikely(!pool->stratum_curl))
quit(1, "Failed to curl_easy_init in initiate_stratum");
mutex_unlock(&pool->stratum_lock);
curl = pool->stratum_curl;
if (!pool->sockbuf) {
@ -1470,6 +1501,8 @@ static bool setup_stratum_curl(struct pool *pool) @@ -1470,6 +1501,8 @@ static bool setup_stratum_curl(struct pool *pool)
curl_easy_setopt(curl, CURLOPT_CONNECT_ONLY, 1);
if (curl_easy_perform(curl)) {
applog(LOG_INFO, "Stratum connect failed to pool %d: %s", pool->pool_no, curl_err_str);
curl_easy_cleanup(curl);
pool->stratum_curl = NULL;
return false;
}
curl_easy_getinfo(curl, CURLINFO_LASTSOCKET, (long *)&pool->sock);
@ -1517,6 +1550,7 @@ void suspend_stratum(struct pool *pool) @@ -1517,6 +1550,7 @@ void suspend_stratum(struct pool *pool)
{
clear_sockbuf(pool);
applog(LOG_INFO, "Closing socket for stratum pool %d", pool->pool_no);
mutex_lock(&pool->stratum_lock);
pool->stratum_active = pool->stratum_notify = false;
if (pool->stratum_curl) {
@ -1561,7 +1595,7 @@ resend: @@ -1561,7 +1595,7 @@ resend:
sprintf(s, "{\"id\": %d, \"method\": \"mining.subscribe\", \"params\": [\""PACKAGE"/"VERSION"\"]}", swork_id++);
}
if (!__stratum_send(pool, s, strlen(s))) {
if (__stratum_send(pool, s, strlen(s)) != SEND_OK) {
applog(LOG_DEBUG, "Failed to send s in initiate_stratum");
goto out;
}
@ -1654,6 +1688,7 @@ out: @@ -1654,6 +1688,7 @@ out:
free(pool->nonce1);
pool->sessionid = pool->nonce1 = NULL;
cg_wunlock(&pool->data_lock);
applog(LOG_DEBUG, "Failed to resume stratum, trying afresh");
noresume = true;
goto resend;

Loading…
Cancel
Save