Browse Source

Rework avalon reset sequence to include idling of chips and waiting for them to go idle followed by 2nd reset and then checking result.

nfactor-troky
Con Kolivas 12 years ago
parent
commit
930317e123
  1. 226
      driver-avalon.c

226
driver-avalon.c

@ -299,68 +299,139 @@ static bool avalon_decode_nonce(struct thr_info *thr, struct avalon_result *ar,
return true; return true;
} }
static void avalon_get_reset(int fd, struct avalon_result *ar) static int avalon_write(int fd, char *buf, ssize_t len)
{ {
int read_amount = AVALON_READ_SIZE; ssize_t wrote = 0;
uint8_t result[AVALON_READ_SIZE];
struct timeval timeout = {1, 0}; while (len > 0) {
ssize_t ret = 0, offset = 0; struct timeval timeout;
ssize_t ret;
fd_set wd;
timeout.tv_sec = 0;
timeout.tv_usec = 1000;
FD_ZERO(&wd);
FD_SET((SOCKETTYPE)fd, &wd);
ret = select(fd + 1, NULL, &wd, NULL, &timeout);
if (unlikely(ret < 1)) {
applog(LOG_WARNING, "Select error on avalon_write");
return AVA_SEND_ERROR;
}
ret = write(fd, buf + wrote, len);
if (unlikely(ret < 1)) {
applog(LOG_WARNING, "Write error on avalon_write");
return AVA_SEND_ERROR;
}
wrote += ret;
len -= ret;
}
return 0;
}
static int avalon_read(int fd, char *buf, ssize_t len)
{
ssize_t aread = 0;
while (len > 0) {
struct timeval timeout;
ssize_t ret;
fd_set rd; fd_set rd;
memset(result, 0, AVALON_READ_SIZE); timeout.tv_sec = 0;
memset(ar, 0, AVALON_READ_SIZE); timeout.tv_usec = 1000;
FD_ZERO(&rd); FD_ZERO(&rd);
FD_SET((SOCKETTYPE)fd, &rd); FD_SET((SOCKETTYPE)fd, &rd);
ret = select(fd + 1, &rd, NULL, NULL, &timeout); ret = select(fd + 1, &rd, NULL, NULL, &timeout);
if (unlikely(ret < 0)) { if (unlikely(ret < 1)) {
applog(LOG_WARNING, "Avalon: Error %d on select in avalon_get_reset", errno); applog(LOG_WARNING, "Select error on avalon_read");
return; return AVA_GETS_ERROR;
} }
if (!ret) { ret = read(fd, buf + aread, len);
applog(LOG_WARNING, "Avalon: Timeout on select in avalon_get_reset"); if (unlikely(ret < 1)) {
return; applog(LOG_WARNING, "Read error on avalon_read");
return AVA_GETS_ERROR;
} }
do { aread += ret;
ret = read(fd, result + offset, read_amount); len -= ret;
if (unlikely(ret < 0)) {
applog(LOG_WARNING, "Avalon: Error %d on read in avalon_get_reset", errno);
return;
} }
read_amount -= ret;
offset += ret; return 0;
} while (read_amount > 0);
if (opt_debug) {
applog(LOG_DEBUG, "Avalon: get:");
hexdump((uint8_t *)result, AVALON_READ_SIZE);
} }
memcpy((uint8_t *)ar, result, AVALON_READ_SIZE);
/* Non blocking clearing of anything in the buffer */
static void avalon_clear_readbuf(int fd)
{
ssize_t ret;
do {
struct timeval timeout;
char buf[AVALON_FTDI_READSIZE];
fd_set rd;
timeout.tv_sec = timeout.tv_usec = 0;
FD_ZERO(&rd);
FD_SET((SOCKETTYPE)fd, &rd);
ret = select(fd + 1, &rd, NULL, NULL, &timeout);
if (ret > 0)
ret = read(fd, buf, AVALON_FTDI_READSIZE);
} while (ret > 0);
} }
static int avalon_reset(int fd, struct avalon_result *ar) static void avalon_idle(struct cgpu_info *avalon)
{ {
struct avalon_info *info = avalon->device_data;
int i, fd = avalon->device_fd;
for (i = 0; i < info->miner_count; i++) {
struct avalon_task at; struct avalon_task at;
int ret;
if (unlikely(avalon_buffer_full(fd))) {
applog(LOG_WARNING, "Avalon buffer full in avalon_idle");
break;
}
avalon_init_task(&at, 0, 0, info->fan_pwm,
info->timeout, info->asic_count,
info->miner_count, 1, 1, info->frequency);
ret = avalon_write(fd, (char *)&at, AVALON_WRITE_SIZE);
if (unlikely(ret == AVA_SEND_ERROR))
break;
}
applog(LOG_ERR, "Avalon: Going to idle mode");
sleep(2);
avalon_clear_readbuf(fd);
applog(LOG_ERR, "Avalon: Idle");
}
static int avalon_reset(struct cgpu_info *avalon, int fd)
{
struct avalon_result ar;
uint8_t *buf; uint8_t *buf;
int ret, i = 0; int ret, i = 0;
struct timespec p; struct timespec p;
avalon_init_task(&at, 1, 0, /* Reset once, then send command to go idle */
AVALON_DEFAULT_FAN_MAX_PWM, ret = avalon_write(fd, "ad", 2);
AVALON_DEFAULT_TIMEOUT, if (unlikely(ret == AVA_SEND_ERROR))
AVALON_DEFAULT_ASIC_NUM, return -1;
AVALON_DEFAULT_MINER_NUM, p.tv_sec = 0;
0, 0, p.tv_nsec = AVALON_RESET_PITCH;
AVALON_DEFAULT_FREQUENCY); nanosleep(&p, NULL);
ret = avalon_send_task(fd, &at, NULL); avalon_clear_readbuf(fd);
if (ret == AVA_SEND_ERROR) avalon_idle(avalon);
return 1; /* Reset again, then check result */
ret = avalon_write(fd, "ad", 2);
avalon_get_reset(fd, ar); if (unlikely(ret == AVA_SEND_ERROR))
return -1;
buf = (uint8_t *)ar;
/* Sometimes there is one extra 0 byte for some reason in the buffer, ret = avalon_read(fd, (char *)&ar, AVALON_READ_SIZE);
* so work around it. */ if (unlikely(ret == AVA_GETS_ERROR))
if (buf[0] == 0) return -1;
buf = (uint8_t *)(ar + 1);
nanosleep(&p, NULL);
buf = (uint8_t *)&ar;
if (buf[0] == 0xAA && buf[1] == 0x55 && if (buf[0] == 0xAA && buf[1] == 0x55 &&
buf[2] == 0xAA && buf[3] == 0x55) { buf[2] == 0xAA && buf[3] == 0x55) {
for (i = 4; i < 11; i++) for (i = 4; i < 11; i++)
@ -368,10 +439,6 @@ static int avalon_reset(int fd, struct avalon_result *ar)
break; break;
} }
p.tv_sec = 0;
p.tv_nsec = AVALON_RESET_PITCH;
nanosleep(&p, NULL);
if (i != 11) { if (i != 11) {
applog(LOG_ERR, "Avalon: Reset failed! not an Avalon?" applog(LOG_ERR, "Avalon: Reset failed! not an Avalon?"
" (%d: %02x %02x %02x %02x)", " (%d: %02x %02x %02x %02x)",
@ -382,38 +449,6 @@ static int avalon_reset(int fd, struct avalon_result *ar)
return 0; return 0;
} }
static void avalon_idle(struct cgpu_info *avalon)
{
int i, ret;
struct avalon_task at;
int fd = avalon->device_fd;
struct avalon_info *info = avalon->device_data;
int avalon_get_work_count = info->miner_count;
i = 0;
while (true) {
avalon_init_task(&at, 0, 0, info->fan_pwm,
info->timeout, info->asic_count,
info->miner_count, 1, 1, info->frequency);
ret = avalon_send_task(fd, &at, avalon);
if (unlikely(ret == AVA_SEND_ERROR ||
(ret == AVA_SEND_BUFFER_EMPTY &&
(i + 1 == avalon_get_work_count * 2)))) {
applog(LOG_ERR, "AVA%i: Comms error", avalon->device_id);
return;
}
if (i + 1 == avalon_get_work_count * 2)
break;
if (ret == AVA_SEND_BUFFER_FULL)
break;
i++;
}
applog(LOG_ERR, "Avalon: Goto idle mode");
}
static void get_options(int this_option_offset, int *baud, int *miner_count, static void get_options(int this_option_offset, int *baud, int *miner_count,
int *asic_count, int *timeout, int *frequency) int *asic_count, int *timeout, int *frequency)
{ {
@ -550,29 +585,9 @@ static void get_options(int this_option_offset, int *baud, int *miner_count,
} }
} }
/* Non blocking clearing of anything in the buffer */
static void avalon_clear_readbuf(int fd)
{
ssize_t ret;
do {
struct timeval timeout;
char buf[AVALON_FTDI_READSIZE];
fd_set rd;
timeout.tv_sec = timeout.tv_usec = 0;
FD_ZERO(&rd);
FD_SET((SOCKETTYPE)fd, &rd);
ret = select(fd + 1, &rd, NULL, NULL, &timeout);
if (ret > 0)
ret = read(fd, buf, AVALON_FTDI_READSIZE);
} while (ret > 0);
}
static bool avalon_detect_one(const char *devpath) static bool avalon_detect_one(const char *devpath)
{ {
struct avalon_info *info; struct avalon_info *info;
struct avalon_result ar;
int fd, ret; int fd, ret;
int baud, miner_count, asic_count, timeout, frequency = 0; int baud, miner_count, asic_count, timeout, frequency = 0;
struct cgpu_info *avalon; struct cgpu_info *avalon;
@ -600,7 +615,7 @@ static bool avalon_detect_one(const char *devpath)
avalon->threads = AVALON_MINER_THREADS; avalon->threads = AVALON_MINER_THREADS;
add_cgpu(avalon); add_cgpu(avalon);
ret = avalon_reset(fd, &ar); ret = avalon_reset(avalon, fd);
if (ret) { if (ret) {
; /* FIXME: I think IT IS avalon and wait on reset; ; /* FIXME: I think IT IS avalon and wait on reset;
* avalon_close(fd); * avalon_close(fd);
@ -640,8 +655,6 @@ static bool avalon_detect_one(const char *devpath)
info->temp_old = 0; info->temp_old = 0;
info->frequency = frequency; info->frequency = frequency;
/* Set asic to idle mode after detect */
avalon_idle(avalon);
avalon->device_fd = -1; avalon->device_fd = -1;
avalon_close(fd); avalon_close(fd);
@ -661,7 +674,6 @@ static void __avalon_init(struct cgpu_info *avalon)
static void avalon_init(struct cgpu_info *avalon) static void avalon_init(struct cgpu_info *avalon)
{ {
struct avalon_info *info = avalon->device_data; struct avalon_info *info = avalon->device_data;
struct avalon_result ar;
int fd, ret; int fd, ret;
avalon->device_fd = -1; avalon->device_fd = -1;
@ -672,7 +684,7 @@ static void avalon_init(struct cgpu_info *avalon)
return; return;
} }
ret = avalon_reset(fd, &ar); ret = avalon_reset(avalon, fd);
if (ret) { if (ret) {
avalon_close(fd); avalon_close(fd);
return; return;
@ -727,14 +739,12 @@ static void avalon_free_work(struct thr_info *thr)
static void do_avalon_close(struct thr_info *thr) static void do_avalon_close(struct thr_info *thr)
{ {
struct avalon_result ar;
struct cgpu_info *avalon = thr->cgpu; struct cgpu_info *avalon = thr->cgpu;
struct avalon_info *info = avalon->device_data; struct avalon_info *info = avalon->device_data;
avalon_free_work(thr); avalon_free_work(thr);
sleep(1); sleep(1);
avalon_reset(avalon->device_fd, &ar); avalon_reset(avalon, avalon->device_fd);
avalon_idle(avalon);
avalon_close(avalon->device_fd); avalon_close(avalon->device_fd);
avalon->device_fd = -1; avalon->device_fd = -1;

Loading…
Cancel
Save