GOSTcoin support for ccminer CUDA miner project, compatible with most nvidia cards
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 
 
 

514 lines
13 KiB

#include <stdio.h>
#include <string.h>
#include <stdint.h>
#include <memory.h>
#include "miner.h"
#define NATIVE_LITTLE_ENDIAN
extern "C" {
#include <sph/blake2s.h>
}
static __thread blake2s_state ALIGN(64) s_midstate;
static __thread blake2s_state ALIGN(64) s_ctx;
//#define GPU_MIDSTATE
#define MIDLEN 76
#define A 64
#include "cuda_helper.h"
#ifdef __INTELLISENSE__
#define __byte_perm(x, y, b) x
#endif
#ifndef GPU_MIDSTATE
__constant__ uint32_t d_data[20];
#else
__constant__ blake2s_state ALIGN(8) d_state[1];
#endif
/* 16 adapters max */
static uint32_t *d_resNonce[MAX_GPUS];
static uint32_t *h_resNonce[MAX_GPUS];
/* threads per block */
#define TPB 512
/* max count of found nonces in one call */
#define NBN 2
static uint32_t extra_results[NBN] = { UINT32_MAX };
extern "C" void blake2s_hash(void *output, const void *input)
{
uint8_t _ALIGN(A) hash[BLAKE2S_OUTBYTES];
blake2s_state blake2_ctx;
blake2s_init(&blake2_ctx, BLAKE2S_OUTBYTES);
blake2s_update(&blake2_ctx, (uint8_t*) input, 80);
blake2s_final(&blake2_ctx, hash, BLAKE2S_OUTBYTES);
memcpy(output, hash, 32);
}
__host__
inline void blake2s_hash_end(uint32_t *output, const uint32_t *input)
{
s_ctx.buflen = MIDLEN;
memcpy(&s_ctx, &s_midstate, 32 + 16 + MIDLEN);
blake2s_update(&s_ctx, (uint8_t*) &input[MIDLEN/4], 80-MIDLEN);
blake2s_final(&s_ctx, (uint8_t*) output, BLAKE2S_OUTBYTES);
}
__host__
void blake2s_cpu_setBlock(uint32_t *penddata, blake2s_state *pstate)
{
#ifndef GPU_MIDSTATE
CUDA_SAFE_CALL(cudaMemcpyToSymbol(d_data, penddata, 80, 0, cudaMemcpyHostToDevice));
#else
CUDA_SAFE_CALL(cudaMemcpyToSymbol(d_state, pstate, sizeof(blake2s_state), 0, cudaMemcpyHostToDevice));
#endif
}
__device__ __forceinline__
uint32_t gpu_load32(const void *src) {
return *(uint32_t *)(src);
}
__device__ __forceinline__
void gpu_store32(void *dst, uint32_t dw) {
*(uint32_t *)(dst) = dw;
}
__device__ __forceinline__
void gpu_store64(void *dst, uint64_t lw) {
*(uint64_t *)(dst) = lw;
}
__device__ __forceinline__
uint64_t gpu_load48(const void *src)
{
const uint8_t *p = (const uint8_t *)src;
uint64_t w = *p++;
w |= (uint64_t)(*p++) << 8;
w |= (uint64_t)(*p++) << 16;
w |= (uint64_t)(*p++) << 24;
w |= (uint64_t)(*p++) << 32;
w |= (uint64_t)(*p++) << 40;
return w;
}
__device__ __forceinline__
void gpu_blake2s_set_lastnode(blake2s_state *S) {
S->f[1] = ~0U;
}
__device__ __forceinline__
void gpu_blake2s_clear_lastnode(blake2s_state *S) {
S->f[1] = 0U;
}
__device__ __forceinline__
void gpu_blake2s_increment_counter(blake2s_state *S, const uint32_t inc)
{
S->t[0] += inc;
S->t[1] += ( S->t[0] < inc );
}
__device__ __forceinline__
void gpu_blake2s_set_lastblock(blake2s_state *S)
{
if (S->last_node) gpu_blake2s_set_lastnode(S);
S->f[0] = ~0U;
}
__device__
void gpu_blake2s_compress(blake2s_state *S, const uint32_t *block)
{
uint32_t m[16];
uint32_t v[16];
const uint32_t blake2s_IV[8] = {
0x6A09E667UL, 0xBB67AE85UL, 0x3C6EF372UL, 0xA54FF53AUL,
0x510E527FUL, 0x9B05688CUL, 0x1F83D9ABUL, 0x5BE0CD19UL
};
const uint8_t blake2s_sigma[10][16] = {
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 },
{ 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 },
{ 11, 8, 12, 0, 5, 2, 15, 13, 10, 14, 3, 6, 7, 1, 9, 4 },
{ 7, 9, 3, 1, 13, 12, 11, 14, 2, 6, 5, 10, 4, 0, 15, 8 },
{ 9, 0, 5, 7, 2, 4, 10, 15, 14, 1, 11, 12, 6, 8, 3, 13 },
{ 2, 12, 6, 10, 0, 11, 8, 3, 4, 13, 7, 5, 15, 14, 1, 9 },
{ 12, 5, 1, 15, 14, 13, 4, 10, 0, 7, 6, 3, 9, 2, 8, 11 },
{ 13, 11, 7, 14, 12, 1, 3, 9, 5, 0, 15, 4, 8, 6, 2, 10 },
{ 6, 15, 14, 9, 11, 3, 0, 8, 12, 2, 13, 7, 1, 4, 10, 5 },
{ 10, 2, 8, 4, 7, 6, 1, 5, 15, 11, 9, 14, 3, 12, 13 , 0 },
};
#pragma unroll
for(int i = 0; i < 16; i++)
m[i] = block[i];
#pragma unroll
for(int i = 0; i < 8; i++)
v[i] = S->h[i];
v[ 8] = blake2s_IV[0];
v[ 9] = blake2s_IV[1];
v[10] = blake2s_IV[2];
v[11] = blake2s_IV[3];
v[12] = S->t[0] ^ blake2s_IV[4];
v[13] = S->t[1] ^ blake2s_IV[5];
v[14] = S->f[0] ^ blake2s_IV[6];
v[15] = S->f[1] ^ blake2s_IV[7];
#define G(r,i,a,b,c,d) { \
a += b + m[blake2s_sigma[r][2*i+0]]; \
d = __byte_perm(d ^ a, 0, 0x1032); /* d = ROTR32(d ^ a, 16); */ \
c = c + d; \
b = ROTR32(b ^ c, 12); \
a += b + m[blake2s_sigma[r][2*i+1]]; \
d = __byte_perm(d ^ a, 0, 0x0321); /* ROTR32(d ^ a, 8); */ \
c = c + d; \
b = ROTR32(b ^ c, 7); \
}
#define ROUND(r) { \
G(r,0,v[ 0],v[ 4],v[ 8],v[12]); \
G(r,1,v[ 1],v[ 5],v[ 9],v[13]); \
G(r,2,v[ 2],v[ 6],v[10],v[14]); \
G(r,3,v[ 3],v[ 7],v[11],v[15]); \
G(r,4,v[ 0],v[ 5],v[10],v[15]); \
G(r,5,v[ 1],v[ 6],v[11],v[12]); \
G(r,6,v[ 2],v[ 7],v[ 8],v[13]); \
G(r,7,v[ 3],v[ 4],v[ 9],v[14]); \
}
ROUND( 0 );
ROUND( 1 );
ROUND( 2 );
ROUND( 3 );
ROUND( 4 );
ROUND( 5 );
ROUND( 6 );
ROUND( 7 );
ROUND( 8 );
ROUND( 9 );
#pragma unroll
for(int i = 0; i < 8; i++)
S->h[i] = S->h[i] ^ v[i] ^ v[i + 8];
#undef G
#undef ROUND
}
__device__ __forceinline__
void gpu_blake2s_update(blake2s_state *S, const uint8_t *in, uint64_t inlen)
{
while(inlen > 0)
{
const int left = S->buflen;
size_t fill = 2 * BLAKE2S_BLOCKBYTES - left;
if(inlen > fill)
{
memcpy(S->buf + left, in, fill); // Fill buffer
S->buflen += fill;
gpu_blake2s_increment_counter(S, BLAKE2S_BLOCKBYTES);
gpu_blake2s_compress(S, (uint32_t*) S->buf); // Compress
memcpy(S->buf, S->buf + BLAKE2S_BLOCKBYTES, BLAKE2S_BLOCKBYTES); // Shift buffer left
S->buflen -= BLAKE2S_BLOCKBYTES;
in += fill;
inlen -= fill;
}
else // inlen <= fill
{
memcpy(S->buf + left, in, (size_t) inlen);
S->buflen += (size_t) inlen; // Be lazy, do not compress
in += inlen;
inlen -= inlen;
}
}
}
__device__ __forceinline__
void gpu_blake2s_update76(blake2s_state *S, const void *in)
{
uint64_t *b64 = (uint64_t*) S->buf;
uint64_t *i64 = (uint64_t*) in;
#pragma unroll
for (int i=0; i < 80/8; i++)
b64[i] = i64[i];
//S->buflen = 76;
}
__device__ __forceinline__
void gpu_blake2s_update_nonce(blake2s_state *S, const uint32_t nonce)
{
gpu_store32(&S->buf[76], nonce);
S->buflen = 80;
}
__device__ __forceinline__
void gpu_blake2s_final(blake2s_state *S, uint32_t *out)
{
//if (S->buflen > BLAKE2S_BLOCKBYTES)
{
gpu_blake2s_increment_counter(S, BLAKE2S_BLOCKBYTES);
gpu_blake2s_compress(S, (uint32_t*) S->buf);
S->buflen -= BLAKE2S_BLOCKBYTES;
//memcpy(S->buf, S->buf + BLAKE2S_BLOCKBYTES, S->buflen);
}
gpu_blake2s_increment_counter(S, (uint32_t)S->buflen);
gpu_blake2s_set_lastblock(S);
//memset(&S->buf[S->buflen], 0, 2 * BLAKE2S_BLOCKBYTES - S->buflen); /* Padding */
gpu_blake2s_compress(S, (uint32_t*) (S->buf + BLAKE2S_BLOCKBYTES));
#pragma unroll
for (int i = 0; i < 8; i++)
out[i] = S->h[i];
}
/* init2 xors IV with input parameter block */
__device__ __forceinline__
void gpu_blake2s_init_param(blake2s_state *S, const blake2s_param *P)
{
//blake2s_IV
S->h[0] = 0x6A09E667UL;
S->h[1] = 0xBB67AE85UL;
S->h[2] = 0x3C6EF372UL;
S->h[3] = 0xA54FF53AUL;
S->h[4] = 0x510E527FUL;
S->h[5] = 0x9B05688CUL;
S->h[6] = 0x1F83D9ABUL;
S->h[7] = 0x5BE0CD19UL;
S->t[0] = 0; S->t[1] = 0;
S->f[0] = 0; S->f[1] = 0;
S->last_node = 0;
S->buflen = 0;
#pragma unroll
for (int i = 0; i < sizeof(S->buf)/4; i++)
gpu_store32(S->buf + (4*i), 0);
uint32_t *p = (uint32_t*) P;
/* IV XOR ParamBlock */
for (int i = 0; i < 8; i++)
S->h[i] ^= gpu_load32(&p[i]);
}
// Sequential blake2s initialization
__device__ __forceinline__
void gpu_blake2s_init(blake2s_state *S, const uint8_t outlen)
{
blake2s_param P[1];
// if (!outlen || outlen > BLAKE2S_OUTBYTES) return;
P->digest_length = outlen;
P->key_length = 0;
P->fanout = 1;
P->depth = 1;
P->leaf_length = 0;
gpu_store64(P->node_offset, 0);
//P->node_depth = 0;
//P->inner_length = 0;
gpu_store64(&P->salt, 0);
gpu_store64(&P->personal, 0);
gpu_blake2s_init_param(S, P);
}
__device__ __forceinline__
void gpu_copystate(blake2s_state *dst, blake2s_state *src)
{
uint64_t* d64 = (uint64_t*) dst;
uint64_t* s64 = (uint64_t*) src;
#pragma unroll
for (int i=0; i < (32 + 16 + 2 * BLAKE2S_BLOCKBYTES)/8; i++)
gpu_store64(&d64[i], s64[i]);
dst->buflen = src->buflen;
dst->last_node = src->last_node;
}
__global__
void blake2s_gpu_hash(const uint32_t threads, const uint32_t startNonce, uint32_t *resNonce, const uint2 target2)
{
const uint32_t thread = (blockDim.x * blockIdx.x + threadIdx.x);
const uint32_t nonce = startNonce + thread;
blake2s_state ALIGN(8) blake2_ctx;
#ifndef GPU_MIDSTATE
gpu_blake2s_init(&blake2_ctx, BLAKE2S_OUTBYTES);
//gpu_blake2s_update(&blake2_ctx, (uint8_t*) d_data, 76);
gpu_blake2s_update76(&blake2_ctx, (uint64_t*) d_data);
#else
gpu_copystate(&blake2_ctx, &d_state[0]);
#endif
gpu_blake2s_update_nonce(&blake2_ctx, nonce);
uint32_t hash[8];
gpu_blake2s_final(&blake2_ctx, hash);
if (hash[7] <= target2.x && hash[6] <= target2.y) {
#if NBN == 2
if (resNonce[0] != UINT32_MAX)
resNonce[1] = nonce;
else
resNonce[0] = nonce;
#else
resNonce[0] = nonce;
#endif
}
}
__host__
uint32_t blake2s_host_hash(const int thr_id, const uint32_t threads, const uint32_t startNonce, const uint2 target2)
{
uint32_t result = UINT32_MAX;
dim3 grid((threads + TPB-1)/TPB);
dim3 block(TPB);
/* Check error on Ctrl+C or kill to prevent segfaults on exit */
if (cudaMemset(d_resNonce[thr_id], 0xff, NBN*sizeof(uint32_t)) != cudaSuccess)
return result;
blake2s_gpu_hash <<<grid, block>>> (threads, startNonce, d_resNonce[thr_id], target2);
cudaThreadSynchronize();
if (cudaSuccess == cudaMemcpy(h_resNonce[thr_id], d_resNonce[thr_id], NBN*sizeof(uint32_t), cudaMemcpyDeviceToHost)) {
result = h_resNonce[thr_id][0];
for (int n=0; n < (NBN-1); n++)
extra_results[n] = h_resNonce[thr_id][n+1];
}
return result;
}
static bool init[MAX_GPUS] = { 0 };
extern "C" int scanhash_blake2s(int thr_id, struct work *work, uint32_t max_nonce, unsigned long *hashes_done)
{
uint32_t _ALIGN(64) endiandata[20];
uint32_t *pdata = work->data;
uint32_t *ptarget = work->target;
const uint32_t first_nonce = pdata[19];
int dev_id = device_map[thr_id];
int intensity = (device_sm[dev_id] >= 500 && !is_windows()) ? 26 : 22;
if (device_sm[dev_id] < 350) intensity = 22;
uint32_t throughput = cuda_default_throughput(thr_id, 1U << intensity);
if (init[thr_id]) throughput = min(throughput, max_nonce - first_nonce);
if (opt_benchmark) {
ptarget[7] = swab32(0xff);
}
if (!init[thr_id])
{
cudaSetDevice(dev_id);
if (opt_cudaschedule == -1 && gpu_threads == 1) {
cudaDeviceReset();
// reduce cpu usage (linux)
cudaSetDeviceFlags(cudaDeviceScheduleBlockingSync);
cudaDeviceSetCacheConfig(cudaFuncCachePreferL1);
CUDA_LOG_ERROR();
}
CUDA_CALL_OR_RET_X(cudaMalloc(&d_resNonce[thr_id], NBN * sizeof(uint32_t)), -1);
CUDA_CALL_OR_RET_X(cudaMallocHost(&h_resNonce[thr_id], NBN * sizeof(uint32_t)), -1);
init[thr_id] = true;
}
for (int i=0; i < 19; i++) {
be32enc(&endiandata[i], pdata[i]);
}
// midstate
memset(s_midstate.buf, 0, sizeof(s_midstate.buf));
blake2s_init(&s_midstate, BLAKE2S_OUTBYTES);
blake2s_update(&s_midstate, (uint8_t*) endiandata, MIDLEN);
memcpy(&s_ctx, &s_midstate, sizeof(blake2s_state));
blake2s_cpu_setBlock(endiandata, &s_midstate);
uint2 gpu_target = make_uint2(ptarget[7], ptarget[6]);
const uint32_t Htarg = ptarget[7];
do {
uint32_t foundNonce = blake2s_host_hash(thr_id, throughput, pdata[19], gpu_target);
if (foundNonce != UINT32_MAX)
{
uint32_t _ALIGN(A) vhashcpu[8];
//blake2s_hash(vhashcpu, endiandata);
le32enc(&endiandata[19], foundNonce);
blake2s_hash_end(vhashcpu, endiandata);
if (vhashcpu[7] <= Htarg && fulltest(vhashcpu, ptarget)) {
work_set_target_ratio(work, vhashcpu);
*hashes_done = pdata[19] + throughput - first_nonce + 1;
pdata[19] = work->nonces[0] = swab32(foundNonce);
#if NBN > 1
if (extra_results[0] != UINT32_MAX) {
le32enc(&endiandata[19], extra_results[0]);
blake2s_hash_end(vhashcpu, endiandata);
if (vhashcpu[7] <= Htarg && fulltest(vhashcpu, ptarget)) {
work->nonces[1] = swab32(extra_results[0]);
if (bn_hash_target_ratio(vhashcpu, ptarget) > work->shareratio) {
work_set_target_ratio(work, vhashcpu);
xchg(work->nonces[1], pdata[19]);
}
return 2;
}
extra_results[0] = UINT32_MAX;
}
#endif
return 1;
} else {
gpulog(LOG_WARNING, thr_id, "result for %08x does not validate on CPU!", foundNonce);
applog_hex(pdata, 80);
applog_hex(ptarget, 32);
applog_hex(vhashcpu, 32);
}
}
pdata[19] += throughput;
} while (!work_restart[thr_id].restart && max_nonce > (uint64_t)throughput + pdata[19]);
*hashes_done = pdata[19] - first_nonce + 1;
MyStreamSynchronize(NULL, 0, device_map[thr_id]);
return 0;
}
// cleanup
extern "C" void free_blake2s(int thr_id)
{
if (!init[thr_id])
return;
cudaDeviceSynchronize();
cudaFreeHost(h_resNonce[thr_id]);
cudaFree(d_resNonce[thr_id]);
init[thr_id] = false;
cudaDeviceSynchronize();
}