|
|
@ -272,6 +272,7 @@ static int avalon_get_result(int fd, struct avalon_result *ar, |
|
|
|
memset(result, 0, AVALON_READ_SIZE); |
|
|
|
memset(result, 0, AVALON_READ_SIZE); |
|
|
|
ret = avalon_gets(fd, result, read_count, thr, tv_finish); |
|
|
|
ret = avalon_gets(fd, result, read_count, thr, tv_finish); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
memset(ar, 0, sizeof(struct avalon_result)); |
|
|
|
if (ret == AVA_GETS_OK) { |
|
|
|
if (ret == AVA_GETS_OK) { |
|
|
|
if (opt_debug) { |
|
|
|
if (opt_debug) { |
|
|
|
applog(LOG_DEBUG, "Avalon: get:"); |
|
|
|
applog(LOG_DEBUG, "Avalon: get:"); |
|
|
@ -283,35 +284,28 @@ static int avalon_get_result(int fd, struct avalon_result *ar, |
|
|
|
return ret; |
|
|
|
return ret; |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
static int avalon_decode_nonce(struct thr_info *thr, struct work **work, |
|
|
|
static bool avalon_decode_nonce(struct thr_info *thr, struct avalon_result *ar, |
|
|
|
struct avalon_result *ar, uint32_t *nonce) |
|
|
|
uint32_t *nonce) |
|
|
|
{ |
|
|
|
{ |
|
|
|
struct cgpu_info *avalon; |
|
|
|
struct cgpu_info *avalon; |
|
|
|
struct avalon_info *info; |
|
|
|
struct avalon_info *info; |
|
|
|
int avalon_get_work_count, i; |
|
|
|
int avalon_get_work_count, i; |
|
|
|
|
|
|
|
struct work *work; |
|
|
|
if (unlikely(!work)) |
|
|
|
|
|
|
|
return -1; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
avalon = thr->cgpu; |
|
|
|
avalon = thr->cgpu; |
|
|
|
info = avalon_info[avalon->device_id]; |
|
|
|
if (unlikely(!avalon->works)) |
|
|
|
avalon_get_work_count = info->miner_count; |
|
|
|
return false; |
|
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < avalon_get_work_count; i++) { |
|
|
|
work = find_queued_work_bymidstate(avalon, ar->midstate, 32, ar->data, 64, 12); |
|
|
|
if (work[i] && |
|
|
|
if (!work) |
|
|
|
!memcmp(ar->data, work[i]->data + 64, 12) && |
|
|
|
return false; |
|
|
|
!memcmp(ar->midstate, work[i]->midstate, 32)) |
|
|
|
|
|
|
|
break; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
if (i == avalon_get_work_count) |
|
|
|
|
|
|
|
return -1; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
info = avalon_info[avalon->device_id]; |
|
|
|
info->matching_work[i]++; |
|
|
|
info->matching_work[i]++; |
|
|
|
*nonce = htole32(ar->nonce); |
|
|
|
*nonce = htole32(ar->nonce); |
|
|
|
|
|
|
|
submit_nonce(thr, work, *nonce); |
|
|
|
|
|
|
|
|
|
|
|
applog(LOG_DEBUG, "Avalon: match to work[%d](%p): %d",i, work[i], |
|
|
|
return true; |
|
|
|
info->matching_work[i]); |
|
|
|
|
|
|
|
return i; |
|
|
|
|
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
static int avalon_reset(int fd, struct avalon_result *ar) |
|
|
|
static int avalon_reset(int fd, struct avalon_result *ar) |
|
|
@ -661,16 +655,18 @@ static bool avalon_prepare(struct thr_info *thr) |
|
|
|
return true; |
|
|
|
return true; |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
static void avalon_free_work(struct thr_info *thr, struct work **works) |
|
|
|
static void avalon_free_work(struct thr_info *thr) |
|
|
|
{ |
|
|
|
{ |
|
|
|
struct cgpu_info *avalon; |
|
|
|
struct cgpu_info *avalon; |
|
|
|
struct avalon_info *info; |
|
|
|
struct avalon_info *info; |
|
|
|
|
|
|
|
struct work **works; |
|
|
|
int i; |
|
|
|
int i; |
|
|
|
|
|
|
|
|
|
|
|
if (unlikely(!works)) |
|
|
|
|
|
|
|
return; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
avalon = thr->cgpu; |
|
|
|
avalon = thr->cgpu; |
|
|
|
|
|
|
|
avalon->queued = 0; |
|
|
|
|
|
|
|
if (unlikely(!avalon->works)) |
|
|
|
|
|
|
|
return; |
|
|
|
|
|
|
|
works = avalon->works; |
|
|
|
info = avalon_info[avalon->device_id]; |
|
|
|
info = avalon_info[avalon->device_id]; |
|
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < info->miner_count; i++) { |
|
|
|
for (i = 0; i < info->miner_count; i++) { |
|
|
@ -687,6 +683,7 @@ static void do_avalon_close(struct thr_info *thr) |
|
|
|
struct cgpu_info *avalon = thr->cgpu; |
|
|
|
struct cgpu_info *avalon = thr->cgpu; |
|
|
|
struct avalon_info *info = avalon_info[avalon->device_id]; |
|
|
|
struct avalon_info *info = avalon_info[avalon->device_id]; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
avalon_free_work(thr); |
|
|
|
sleep(1); |
|
|
|
sleep(1); |
|
|
|
avalon_reset(avalon->device_fd, &ar); |
|
|
|
avalon_reset(avalon->device_fd, &ar); |
|
|
|
avalon_idle(avalon); |
|
|
|
avalon_idle(avalon); |
|
|
@ -694,10 +691,6 @@ static void do_avalon_close(struct thr_info *thr) |
|
|
|
avalon->device_fd = -1; |
|
|
|
avalon->device_fd = -1; |
|
|
|
|
|
|
|
|
|
|
|
info->no_matching_work = 0; |
|
|
|
info->no_matching_work = 0; |
|
|
|
avalon_free_work(thr, info->bulk0); |
|
|
|
|
|
|
|
avalon_free_work(thr, info->bulk1); |
|
|
|
|
|
|
|
avalon_free_work(thr, info->bulk2); |
|
|
|
|
|
|
|
avalon_free_work(thr, info->bulk3); |
|
|
|
|
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
static inline void record_temp_fan(struct avalon_info *info, struct avalon_result *ar, float *temp_avg) |
|
|
|
static inline void record_temp_fan(struct avalon_info *info, struct avalon_result *ar, float *temp_avg) |
|
|
@ -781,12 +774,11 @@ static int64_t avalon_scanhash(struct thr_info *thr) |
|
|
|
struct cgpu_info *avalon; |
|
|
|
struct cgpu_info *avalon; |
|
|
|
struct work **works; |
|
|
|
struct work **works; |
|
|
|
int fd, ret, full; |
|
|
|
int fd, ret, full; |
|
|
|
int64_t scanret = 0; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
struct avalon_info *info; |
|
|
|
struct avalon_info *info; |
|
|
|
struct avalon_task at; |
|
|
|
struct avalon_task at; |
|
|
|
struct avalon_result ar; |
|
|
|
struct avalon_result ar; |
|
|
|
int i, work_i0, work_i1, work_i2, work_i3; |
|
|
|
int i; |
|
|
|
int avalon_get_work_count; |
|
|
|
int avalon_get_work_count; |
|
|
|
|
|
|
|
|
|
|
|
struct timeval tv_start, tv_finish, elapsed; |
|
|
|
struct timeval tv_start, tv_finish, elapsed; |
|
|
@ -806,8 +798,7 @@ static int64_t avalon_scanhash(struct thr_info *thr) |
|
|
|
avalon->device_id); |
|
|
|
avalon->device_id); |
|
|
|
dev_error(avalon, REASON_DEV_COMMS_ERROR); |
|
|
|
dev_error(avalon, REASON_DEV_COMMS_ERROR); |
|
|
|
/* fail the device if the reopen attempt fails */ |
|
|
|
/* fail the device if the reopen attempt fails */ |
|
|
|
scanret = -1; |
|
|
|
return -1; |
|
|
|
goto out; |
|
|
|
|
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
fd = avalon->device_fd; |
|
|
|
fd = avalon->device_fd; |
|
|
@ -815,15 +806,6 @@ static int64_t avalon_scanhash(struct thr_info *thr) |
|
|
|
tcflush(fd, TCOFLUSH); |
|
|
|
tcflush(fd, TCOFLUSH); |
|
|
|
#endif |
|
|
|
#endif |
|
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < avalon_get_work_count; i++) { |
|
|
|
|
|
|
|
info->bulk0[i] = info->bulk1[i]; |
|
|
|
|
|
|
|
info->bulk1[i] = info->bulk2[i]; |
|
|
|
|
|
|
|
info->bulk2[i] = info->bulk3[i]; |
|
|
|
|
|
|
|
info->bulk3[i] = works[i]; |
|
|
|
|
|
|
|
applog(LOG_DEBUG, "Avalon: bulk0/1/2 buffer [%d]: %p, %p, %p, %p", |
|
|
|
|
|
|
|
i, info->bulk0[i], info->bulk1[i], info->bulk2[i], info->bulk3[i]); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
i = 0; |
|
|
|
i = 0; |
|
|
|
while (true) { |
|
|
|
while (true) { |
|
|
|
avalon_init_task(&at, 0, 0, info->fan_pwm, |
|
|
|
avalon_init_task(&at, 0, 0, info->fan_pwm, |
|
|
@ -835,10 +817,6 @@ static int64_t avalon_scanhash(struct thr_info *thr) |
|
|
|
(ret == AVA_SEND_BUFFER_EMPTY && |
|
|
|
(ret == AVA_SEND_BUFFER_EMPTY && |
|
|
|
(i + 1 == avalon_get_work_count) && |
|
|
|
(i + 1 == avalon_get_work_count) && |
|
|
|
first_try))) { |
|
|
|
first_try))) { |
|
|
|
avalon_free_work(thr, info->bulk0); |
|
|
|
|
|
|
|
avalon_free_work(thr, info->bulk1); |
|
|
|
|
|
|
|
avalon_free_work(thr, info->bulk2); |
|
|
|
|
|
|
|
avalon_free_work(thr, info->bulk3); |
|
|
|
|
|
|
|
do_avalon_close(thr); |
|
|
|
do_avalon_close(thr); |
|
|
|
applog(LOG_ERR, "AVA%i: Comms error(buffer)", |
|
|
|
applog(LOG_ERR, "AVA%i: Comms error(buffer)", |
|
|
|
avalon->device_id); |
|
|
|
avalon->device_id); |
|
|
@ -846,12 +824,11 @@ static int64_t avalon_scanhash(struct thr_info *thr) |
|
|
|
first_try = 0; |
|
|
|
first_try = 0; |
|
|
|
sleep(1); |
|
|
|
sleep(1); |
|
|
|
avalon_init(avalon); |
|
|
|
avalon_init(avalon); |
|
|
|
goto out; /* This should never happen */ |
|
|
|
return 0; /* This should never happen */ |
|
|
|
} |
|
|
|
} |
|
|
|
if (ret == AVA_SEND_BUFFER_EMPTY && (i + 1 == avalon_get_work_count)) { |
|
|
|
if (ret == AVA_SEND_BUFFER_EMPTY && (i + 1 == avalon_get_work_count)) { |
|
|
|
first_try = 1; |
|
|
|
first_try = 1; |
|
|
|
ret = 0xffffffff; |
|
|
|
return 0xffffffff; |
|
|
|
goto out; |
|
|
|
|
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
works[i]->blk.nonce = 0xffffffff; |
|
|
|
works[i]->blk.nonce = 0xffffffff; |
|
|
@ -871,8 +848,6 @@ static int64_t avalon_scanhash(struct thr_info *thr) |
|
|
|
result_wrong = 0; |
|
|
|
result_wrong = 0; |
|
|
|
hash_count = 0; |
|
|
|
hash_count = 0; |
|
|
|
while (true) { |
|
|
|
while (true) { |
|
|
|
work_i0 = work_i1 = work_i2 = work_i3 = -1; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
full = avalon_buffer_full(fd); |
|
|
|
full = avalon_buffer_full(fd); |
|
|
|
applog(LOG_DEBUG, "Avalon: Buffer full: %s", |
|
|
|
applog(LOG_DEBUG, "Avalon: Buffer full: %s", |
|
|
|
((full == AVA_BUFFER_FULL) ? "Yes" : "No")); |
|
|
|
((full == AVA_BUFFER_FULL) ? "Yes" : "No")); |
|
|
@ -881,15 +856,11 @@ static int64_t avalon_scanhash(struct thr_info *thr) |
|
|
|
|
|
|
|
|
|
|
|
ret = avalon_get_result(fd, &ar, thr, &tv_finish); |
|
|
|
ret = avalon_get_result(fd, &ar, thr, &tv_finish); |
|
|
|
if (unlikely(ret == AVA_GETS_ERROR)) { |
|
|
|
if (unlikely(ret == AVA_GETS_ERROR)) { |
|
|
|
avalon_free_work(thr, info->bulk0); |
|
|
|
|
|
|
|
avalon_free_work(thr, info->bulk1); |
|
|
|
|
|
|
|
avalon_free_work(thr, info->bulk2); |
|
|
|
|
|
|
|
avalon_free_work(thr, info->bulk3); |
|
|
|
|
|
|
|
do_avalon_close(thr); |
|
|
|
do_avalon_close(thr); |
|
|
|
applog(LOG_ERR, |
|
|
|
applog(LOG_ERR, |
|
|
|
"AVA%i: Comms error(read)", avalon->device_id); |
|
|
|
"AVA%i: Comms error(read)", avalon->device_id); |
|
|
|
dev_error(avalon, REASON_DEV_COMMS_ERROR); |
|
|
|
dev_error(avalon, REASON_DEV_COMMS_ERROR); |
|
|
|
goto out; |
|
|
|
return 0; |
|
|
|
} |
|
|
|
} |
|
|
|
if (unlikely(ret == AVA_GETS_TIMEOUT)) { |
|
|
|
if (unlikely(ret == AVA_GETS_TIMEOUT)) { |
|
|
|
timersub(&tv_finish, &tv_start, &elapsed); |
|
|
|
timersub(&tv_finish, &tv_start, &elapsed); |
|
|
@ -898,40 +869,22 @@ static int64_t avalon_scanhash(struct thr_info *thr) |
|
|
|
continue; |
|
|
|
continue; |
|
|
|
} |
|
|
|
} |
|
|
|
if (unlikely(ret == AVA_GETS_RESTART)) { |
|
|
|
if (unlikely(ret == AVA_GETS_RESTART)) { |
|
|
|
avalon_free_work(thr, info->bulk0); |
|
|
|
|
|
|
|
avalon_free_work(thr, info->bulk1); |
|
|
|
|
|
|
|
avalon_free_work(thr, info->bulk2); |
|
|
|
|
|
|
|
avalon_free_work(thr, info->bulk3); |
|
|
|
|
|
|
|
break; |
|
|
|
break; |
|
|
|
} |
|
|
|
} |
|
|
|
result_count++; |
|
|
|
result_count++; |
|
|
|
|
|
|
|
|
|
|
|
work_i0 = avalon_decode_nonce(thr, info->bulk0, &ar, &nonce); |
|
|
|
if (!avalon_decode_nonce(thr, &ar, &nonce)) { |
|
|
|
if (work_i0 < 0) { |
|
|
|
info->no_matching_work++; |
|
|
|
work_i1 = avalon_decode_nonce(thr, info->bulk1, &ar, &nonce); |
|
|
|
result_wrong++; |
|
|
|
if (work_i1 < 0) { |
|
|
|
|
|
|
|
work_i2 = avalon_decode_nonce(thr, info->bulk2, &ar, &nonce); |
|
|
|
if (opt_debug) { |
|
|
|
if (work_i2 < 0) { |
|
|
|
timersub(&tv_finish, &tv_start, &elapsed); |
|
|
|
work_i3 = avalon_decode_nonce(thr, info->bulk3, &ar, &nonce); |
|
|
|
applog(LOG_DEBUG,"Avalon: no matching work: %d" |
|
|
|
if (work_i3 < 0) { |
|
|
|
" (%ld.%06lds)", info->no_matching_work, |
|
|
|
info->no_matching_work++; |
|
|
|
elapsed.tv_sec, elapsed.tv_usec); |
|
|
|
result_wrong++; |
|
|
|
} |
|
|
|
|
|
|
|
continue; |
|
|
|
if (opt_debug) { |
|
|
|
} |
|
|
|
timersub(&tv_finish, &tv_start, &elapsed); |
|
|
|
|
|
|
|
applog(LOG_DEBUG,"Avalon: no matching work: %d" |
|
|
|
|
|
|
|
" (%ld.%06lds)", info->no_matching_work, |
|
|
|
|
|
|
|
elapsed.tv_sec, elapsed.tv_usec); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
continue; |
|
|
|
|
|
|
|
} else |
|
|
|
|
|
|
|
submit_nonce(thr, info->bulk3[work_i3], nonce); |
|
|
|
|
|
|
|
} else |
|
|
|
|
|
|
|
submit_nonce(thr, info->bulk2[work_i2], nonce); |
|
|
|
|
|
|
|
} else |
|
|
|
|
|
|
|
submit_nonce(thr, info->bulk1[work_i1], nonce); |
|
|
|
|
|
|
|
} else |
|
|
|
|
|
|
|
submit_nonce(thr, info->bulk0[work_i0], nonce); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
hash_count += nonce; |
|
|
|
hash_count += nonce; |
|
|
|
if (opt_debug) { |
|
|
|
if (opt_debug) { |
|
|
@ -945,10 +898,6 @@ static int64_t avalon_scanhash(struct thr_info *thr) |
|
|
|
if (result_wrong && result_count == result_wrong) { |
|
|
|
if (result_wrong && result_count == result_wrong) { |
|
|
|
/* This mean FPGA controller give all wrong result
|
|
|
|
/* This mean FPGA controller give all wrong result
|
|
|
|
* try to reset the Avalon */ |
|
|
|
* try to reset the Avalon */ |
|
|
|
avalon_free_work(thr, info->bulk0); |
|
|
|
|
|
|
|
avalon_free_work(thr, info->bulk1); |
|
|
|
|
|
|
|
avalon_free_work(thr, info->bulk2); |
|
|
|
|
|
|
|
avalon_free_work(thr, info->bulk3); |
|
|
|
|
|
|
|
do_avalon_close(thr); |
|
|
|
do_avalon_close(thr); |
|
|
|
applog(LOG_ERR, |
|
|
|
applog(LOG_ERR, |
|
|
|
"AVA%i: FPGA controller mess up", avalon->device_id); |
|
|
|
"AVA%i: FPGA controller mess up", avalon->device_id); |
|
|
@ -956,10 +905,10 @@ static int64_t avalon_scanhash(struct thr_info *thr) |
|
|
|
do_avalon_close(thr); |
|
|
|
do_avalon_close(thr); |
|
|
|
sleep(1); |
|
|
|
sleep(1); |
|
|
|
avalon_init(avalon); |
|
|
|
avalon_init(avalon); |
|
|
|
goto out; |
|
|
|
return 0; |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
avalon_free_work(thr, info->bulk0); |
|
|
|
avalon_free_work(thr); |
|
|
|
|
|
|
|
|
|
|
|
record_temp_fan(info, &ar, &(avalon->temp)); |
|
|
|
record_temp_fan(info, &ar, &(avalon->temp)); |
|
|
|
applog(LOG_INFO, |
|
|
|
applog(LOG_INFO, |
|
|
@ -986,11 +935,7 @@ static int64_t avalon_scanhash(struct thr_info *thr) |
|
|
|
* |
|
|
|
* |
|
|
|
* Any patch will be great. |
|
|
|
* Any patch will be great. |
|
|
|
*/ |
|
|
|
*/ |
|
|
|
scanret = hash_count * 2; |
|
|
|
return hash_count * 2; |
|
|
|
out: |
|
|
|
|
|
|
|
avalon_free_work(thr, avalon->works); |
|
|
|
|
|
|
|
avalon->queued = 0; |
|
|
|
|
|
|
|
return scanret; |
|
|
|
|
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
static struct api_data *avalon_api_stats(struct cgpu_info *cgpu) |
|
|
|
static struct api_data *avalon_api_stats(struct cgpu_info *cgpu) |
|
|
|