mirror of
https://github.com/GOSTSec/ccminer
synced 2025-01-08 22:07:56 +00:00
9eead77027
This will allow later more gpu candidates. Note: This is an unfinished work, we keep the previous behavior for now To finish this, all algos solutions should be migrated and submitted nonces attributes stored. Its required to handle the different share diff per nonce and fix the possible solved count error (if 1/2 nonces is solved).
154 lines
4.1 KiB
Plaintext
154 lines
4.1 KiB
Plaintext
/*
|
|
* deepcoin algorithm
|
|
*
|
|
*/
|
|
extern "C" {
|
|
#include "sph/sph_luffa.h"
|
|
#include "sph/sph_cubehash.h"
|
|
#include "sph/sph_shavite.h"
|
|
#include "sph/sph_simd.h"
|
|
#include "sph/sph_echo.h"
|
|
}
|
|
|
|
#include "miner.h"
|
|
|
|
#include "cuda_helper.h"
|
|
#include "x11/cuda_x11.h"
|
|
|
|
static uint32_t *d_hash[MAX_GPUS];
|
|
|
|
extern void qubit_luffa512_cpu_init(int thr_id, uint32_t threads);
|
|
extern void qubit_luffa512_cpu_setBlock_80(void *pdata);
|
|
extern void qubit_luffa512_cpu_hash_80(int thr_id, uint32_t threads, uint32_t startNounce, uint32_t *d_hash, int order);
|
|
|
|
extern "C" void deephash(void *state, const void *input)
|
|
{
|
|
uint8_t _ALIGN(64) hash[64];
|
|
|
|
// luffa-80 cubehash-64 echo-64
|
|
sph_luffa512_context ctx_luffa;
|
|
sph_cubehash512_context ctx_cubehash;
|
|
sph_echo512_context ctx_echo;
|
|
|
|
sph_luffa512_init(&ctx_luffa);
|
|
sph_luffa512 (&ctx_luffa, input, 80);
|
|
sph_luffa512_close(&ctx_luffa, (void*) hash);
|
|
|
|
sph_cubehash512_init(&ctx_cubehash);
|
|
sph_cubehash512 (&ctx_cubehash, (const void*) hash, 64);
|
|
sph_cubehash512_close(&ctx_cubehash, (void*) hash);
|
|
|
|
sph_echo512_init(&ctx_echo);
|
|
sph_echo512 (&ctx_echo, (const void*) hash, 64);
|
|
sph_echo512_close(&ctx_echo, (void*) hash);
|
|
|
|
memcpy(state, hash, 32);
|
|
}
|
|
|
|
static bool init[MAX_GPUS] = { 0 };
|
|
|
|
extern "C" int scanhash_deep(int thr_id, struct work* work, uint32_t max_nonce, unsigned long *hashes_done)
|
|
{
|
|
uint32_t _ALIGN(64) endiandata[20];
|
|
uint32_t *pdata = work->data;
|
|
uint32_t *ptarget = work->target;
|
|
const uint32_t first_nonce = pdata[19];
|
|
uint32_t throughput = cuda_default_throughput(thr_id, 1U << 19); // 256*256*8
|
|
if (init[thr_id]) throughput = min(throughput, (max_nonce - first_nonce));
|
|
|
|
if (opt_benchmark)
|
|
((uint32_t*)ptarget)[7] = 0x0000f;
|
|
|
|
if (!init[thr_id])
|
|
{
|
|
cudaSetDevice(device_map[thr_id]);
|
|
if (opt_cudaschedule == -1 && gpu_threads == 1) {
|
|
cudaDeviceReset();
|
|
// reduce cpu usage
|
|
cudaSetDeviceFlags(cudaDeviceScheduleBlockingSync);
|
|
CUDA_LOG_ERROR();
|
|
}
|
|
gpulog(LOG_INFO, thr_id, "Intensity set to %g, %u cuda threads", throughput2intensity(throughput), throughput);
|
|
|
|
CUDA_SAFE_CALL(cudaMalloc(&d_hash[thr_id], (size_t) 64 * throughput));
|
|
|
|
qubit_luffa512_cpu_init(thr_id, throughput);
|
|
x11_cubehash512_cpu_init(thr_id, throughput);
|
|
x11_echo512_cpu_init(thr_id, throughput);
|
|
|
|
cuda_check_cpu_init(thr_id, throughput);
|
|
|
|
init[thr_id] = true;
|
|
}
|
|
|
|
for (int k=0; k < 19; k++)
|
|
be32enc(&endiandata[k], pdata[k]);
|
|
|
|
qubit_luffa512_cpu_setBlock_80((void*)endiandata);
|
|
cuda_check_cpu_setTarget(ptarget);
|
|
|
|
do {
|
|
int order = 0;
|
|
|
|
qubit_luffa512_cpu_hash_80(thr_id, throughput, pdata[19], d_hash[thr_id], order++);
|
|
x11_cubehash512_cpu_hash_64(thr_id, throughput, pdata[19], NULL, d_hash[thr_id], order++);
|
|
x11_echo512_cpu_hash_64(thr_id, throughput, pdata[19], NULL, d_hash[thr_id], order++);
|
|
|
|
*hashes_done = pdata[19] - first_nonce + throughput;
|
|
|
|
uint32_t foundNonce = cuda_check_hash(thr_id, throughput, pdata[19], d_hash[thr_id]);
|
|
if (foundNonce != UINT32_MAX)
|
|
{
|
|
uint32_t _ALIGN(64) vhash64[8];
|
|
be32enc(&endiandata[19], foundNonce);
|
|
deephash(vhash64, endiandata);
|
|
|
|
if (vhash64[7] <= ptarget[7] && fulltest(vhash64, ptarget)) {
|
|
int res = 1;
|
|
uint32_t secNonce = cuda_check_hash_suppl(thr_id, throughput, pdata[19], d_hash[thr_id], 1);
|
|
work_set_target_ratio(work, vhash64);
|
|
if (secNonce != 0) {
|
|
be32enc(&endiandata[19], secNonce);
|
|
deephash(vhash64, endiandata);
|
|
if (bn_hash_target_ratio(vhash64, ptarget) > work->shareratio[0])
|
|
work_set_target_ratio(work, vhash64);
|
|
pdata[21] = secNonce;
|
|
res++;
|
|
}
|
|
pdata[19] = foundNonce;
|
|
return res;
|
|
}
|
|
else {
|
|
gpulog(LOG_WARNING, thr_id, "result for %08x does not validate on CPU!", foundNonce);
|
|
}
|
|
}
|
|
|
|
if ((uint64_t)throughput + pdata[19] >= max_nonce) {
|
|
pdata[19] = max_nonce;
|
|
break;
|
|
}
|
|
|
|
pdata[19] += throughput;
|
|
|
|
} while (!work_restart[thr_id].restart);
|
|
|
|
*hashes_done = pdata[19] - first_nonce + 1;
|
|
return 0;
|
|
}
|
|
|
|
// cleanup
|
|
extern "C" void free_deep(int thr_id)
|
|
{
|
|
if (!init[thr_id])
|
|
return;
|
|
|
|
cudaThreadSynchronize();
|
|
|
|
cudaFree(d_hash[thr_id]);
|
|
|
|
cuda_check_cpu_free(thr_id);
|
|
init[thr_id] = false;
|
|
|
|
cudaDeviceSynchronize();
|
|
}
|