Browse Source

blake2s improved

based on alexis work, with the new work->nonces
master
Tanguy Pruvot 8 years ago
parent
commit
44bd244fc4
  1. 795
      Algo256/blake2s.cu

795
Algo256/blake2s.cu

@ -1,133 +1,57 @@
/** /**
* Blake2-S 256 CUDA implementation * Based on the SPH implementation of blake2s
* @author tpruvot@github March 2016 * Provos Alexis - 2016
*/ */
#include <stdio.h>
#include <string.h>
#include <stdint.h>
#include <memory.h>
#include "miner.h" #include "miner.h"
extern "C" { #include <string.h>
#define NATIVE_LITTLE_ENDIAN #include <stdint.h>
#include <sph/blake2s.h>
}
//#define GPU_MIDSTATE
#define MIDLEN 76
#define A 64
static __thread blake2s_state ALIGN(A) s_midstate;
static __thread blake2s_state ALIGN(A) s_ctx;
#include "cuda_helper.h" #include "sph/blake2s.h"
#include "sph/sph_types.h"
#ifdef __INTELLISENSE__ #ifdef __INTELLISENSE__
#define __byte_perm(x, y, b) x #define __byte_perm(x, y, b) x
#endif #endif
#ifndef GPU_MIDSTATE #include "cuda_helper.h"
__constant__ uint2 d_data[10];
#else
__constant__ blake2s_state ALIGN(8) d_state[1];
#endif
/* 16 adapters max */
static uint32_t *d_resNonce[MAX_GPUS];
static uint32_t *h_resNonce[MAX_GPUS];
/* threads per block */
#define TPB 512
/* max count of found nonces in one call */
#define NBN 2
#if NBN > 1
static uint32_t extra_results[NBN] = { UINT32_MAX };
#endif
extern "C" void blake2s_hash(void *output, const void *input)
{
uint8_t _ALIGN(A) hash[BLAKE2S_OUTBYTES];
blake2s_state blake2_ctx;
blake2s_init(&blake2_ctx, BLAKE2S_OUTBYTES);
blake2s_update(&blake2_ctx, (uint8_t*) input, 80);
blake2s_final(&blake2_ctx, hash, BLAKE2S_OUTBYTES);
memcpy(output, hash, 32);
}
__host__
inline void blake2s_hash_end(uint32_t *output, const uint32_t *input)
{
s_ctx.buflen = MIDLEN;
memcpy(&s_ctx, &s_midstate, 32 + 16 + MIDLEN);
blake2s_update(&s_ctx, (uint8_t*) &input[MIDLEN/4], 80-MIDLEN);
blake2s_final(&s_ctx, (uint8_t*) output, BLAKE2S_OUTBYTES);
}
__host__
void blake2s_setBlock(uint32_t *penddata, blake2s_state *pstate)
{
#ifndef GPU_MIDSTATE
CUDA_SAFE_CALL(cudaMemcpyToSymbol(d_data, penddata, 80, 0, cudaMemcpyHostToDevice));
#else
CUDA_SAFE_CALL(cudaMemcpyToSymbol(d_state, pstate, sizeof(blake2s_state), 0, cudaMemcpyHostToDevice));
#endif
}
__device__ __forceinline__
uint64_t gpu_load64(void *src) {
return *(uint64_t*)(src);
}
__device__ __forceinline__
void gpu_store32(void *dst, uint32_t dw) {
*(uint32_t*)(dst) = dw;
}
__device__ __forceinline__ #ifdef __CUDA_ARCH__
void gpu_store64(void *dst, uint64_t lw) {
*(uint64_t*)(dst) = lw;
}
__device__ __forceinline__ __device__ __forceinline__
void gpu_blake2s_set_lastnode(blake2s_state *S) { uint32_t ROR8(const uint32_t a) {
S->f[1] = ~0U; return __byte_perm(a, 0, 0x0321);
} }
__device__ __forceinline__ __device__ __forceinline__
void gpu_blake2s_clear_lastnode(blake2s_state *S) { uint32_t ROL16(const uint32_t a) {
S->f[1] = 0U; return __byte_perm(a, 0, 0x1032);
} }
__device__ __forceinline__ #else
void gpu_blake2s_increment_counter(blake2s_state *S, const uint32_t inc) #define ROR8(u) (u >> 8)
{ #define ROL16(u) (u << 16)
S->t[0] += inc; #endif
S->t[1] += ( S->t[0] < inc );
}
__device__ __forceinline__ __device__ __forceinline__
void gpu_blake2s_set_lastblock(blake2s_state *S) uint32_t xor3x(uint32_t a, uint32_t b, uint32_t c)
{ {
if (S->last_node) gpu_blake2s_set_lastnode(S); uint32_t result;
S->f[0] = ~0U; #if __CUDA_ARCH__ >= 500 && CUDA_VERSION >= 7050
asm ("lop3.b32 %0, %1, %2, %3, 0x96;" : "=r"(result) : "r"(a), "r"(b),"r"(c)); //0x96 = 0xF0 ^ 0xCC ^ 0xAA
#else
result = a^b^c;
#endif
return result;
} }
__device__ static const uint32_t blake2s_IV[8] = {
void gpu_blake2s_compress(blake2s_state *S, const uint32_t *block)
{
uint32_t m[16];
uint32_t v[16];
const uint32_t blake2s_IV[8] = {
0x6A09E667UL, 0xBB67AE85UL, 0x3C6EF372UL, 0xA54FF53AUL, 0x6A09E667UL, 0xBB67AE85UL, 0x3C6EF372UL, 0xA54FF53AUL,
0x510E527FUL, 0x9B05688CUL, 0x1F83D9ABUL, 0x5BE0CD19UL 0x510E527FUL, 0x9B05688CUL, 0x1F83D9ABUL, 0x5BE0CD19UL
}; };
const uint8_t blake2s_sigma[10][16] = { static const uint8_t blake2s_sigma[10][16] = {
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 }, { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 },
{ 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 }, { 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 },
{ 11, 8, 12, 0, 5, 2, 15, 13, 10, 14, 3, 6, 7, 1, 9, 4 }, { 11, 8, 12, 0, 5, 2, 15, 13, 10, 14, 3, 6, 7, 1, 9, 4 },
@ -138,265 +62,368 @@ void gpu_blake2s_compress(blake2s_state *S, const uint32_t *block)
{ 13, 11, 7, 14, 12, 1, 3, 9, 5, 0, 15, 4, 8, 6, 2, 10 }, { 13, 11, 7, 14, 12, 1, 3, 9, 5, 0, 15, 4, 8, 6, 2, 10 },
{ 6, 15, 14, 9, 11, 3, 0, 8, 12, 2, 13, 7, 1, 4, 10, 5 }, { 6, 15, 14, 9, 11, 3, 0, 8, 12, 2, 13, 7, 1, 4, 10, 5 },
{ 10, 2, 8, 4, 7, 6, 1, 5, 15, 11, 9, 14, 3, 12, 13 , 0 }, { 10, 2, 8, 4, 7, 6, 1, 5, 15, 11, 9, 14, 3, 12, 13 , 0 },
}; };
#pragma unroll
for(int i = 0; i < 16; i++)
m[i] = block[i];
#pragma unroll #define G(r,i,a,b,c,d) \
for(int i = 0; i < 8; i++) do { \
v[i] = S->h[i]; a = a + b + m[blake2s_sigma[r][2*i+0]]; \
d = SPH_ROTR32(d ^ a, 16); \
v[ 8] = blake2s_IV[0];
v[ 9] = blake2s_IV[1];
v[10] = blake2s_IV[2];
v[11] = blake2s_IV[3];
v[12] = S->t[0] ^ blake2s_IV[4];
v[13] = S->t[1] ^ blake2s_IV[5];
v[14] = S->f[0] ^ blake2s_IV[6];
v[15] = S->f[1] ^ blake2s_IV[7];
#define G(r,i,a,b,c,d) { \
a += b + m[blake2s_sigma[r][2*i+0]]; \
d = __byte_perm(d ^ a, 0, 0x1032); /* d = ROTR32(d ^ a, 16); */ \
c = c + d; \ c = c + d; \
b = ROTR32(b ^ c, 12); \ b = SPH_ROTR32(b ^ c, 12); \
a += b + m[blake2s_sigma[r][2*i+1]]; \ a = a + b + m[blake2s_sigma[r][2*i+1]]; \
d = __byte_perm(d ^ a, 0, 0x0321); /* ROTR32(d ^ a, 8); */ \ d = SPH_ROTR32(d ^ a, 8); \
c = c + d; \ c = c + d; \
b = ROTR32(b ^ c, 7); \ b = SPH_ROTR32(b ^ c, 7); \
} } while(0)
#define ROUND(r) \
#define ROUND(r) { \ do { \
G(r,0,v[ 0],v[ 4],v[ 8],v[12]); \ G(r,0,v[0],v[4],v[ 8],v[12]); \
G(r,1,v[ 1],v[ 5],v[ 9],v[13]); \ G(r,1,v[1],v[5],v[ 9],v[13]); \
G(r,2,v[ 2],v[ 6],v[10],v[14]); \ G(r,2,v[2],v[6],v[10],v[14]); \
G(r,3,v[ 3],v[ 7],v[11],v[15]); \ G(r,3,v[3],v[7],v[11],v[15]); \
G(r,4,v[ 0],v[ 5],v[10],v[15]); \ G(r,4,v[0],v[5],v[10],v[15]); \
G(r,5,v[ 1],v[ 6],v[11],v[12]); \ G(r,5,v[1],v[6],v[11],v[12]); \
G(r,6,v[ 2],v[ 7],v[ 8],v[13]); \ G(r,6,v[2],v[7],v[ 8],v[13]); \
G(r,7,v[ 3],v[ 4],v[ 9],v[14]); \ G(r,7,v[3],v[4],v[ 9],v[14]); \
} } while(0)
ROUND( 0 ); extern "C" void blake2s_hash(void *output, const void *input)
ROUND( 1 );
ROUND( 2 );
ROUND( 3 );
ROUND( 4 );
ROUND( 5 );
ROUND( 6 );
ROUND( 7 );
ROUND( 8 );
ROUND( 9 );
#pragma unroll
for(int i = 0; i < 8; i++)
S->h[i] = S->h[i] ^ v[i] ^ v[i + 8];
#undef G
#undef ROUND
}
#if 0
/* unused but kept as reference */
__device__ __forceinline__
void gpu_blake2s_update(blake2s_state *S, const uint8_t *in, uint64_t inlen)
{
while(inlen > 0)
{
const int left = S->buflen;
size_t fill = 2 * BLAKE2S_BLOCKBYTES - left;
if(inlen > fill)
{
memcpy(S->buf + left, in, fill); // Fill buffer
S->buflen += fill;
gpu_blake2s_increment_counter(S, BLAKE2S_BLOCKBYTES);
gpu_blake2s_compress(S, (uint32_t*) S->buf); // Compress
memcpy(S->buf, S->buf + BLAKE2S_BLOCKBYTES, BLAKE2S_BLOCKBYTES); // Shift buffer left
S->buflen -= BLAKE2S_BLOCKBYTES;
in += fill;
inlen -= fill;
}
else // inlen <= fill
{
memcpy(S->buf + left, in, (size_t) inlen);
S->buflen += (size_t) inlen; // Be lazy, do not compress
in += inlen;
inlen -= inlen;
}
}
}
#endif
#ifndef GPU_MIDSTATE
__device__ __forceinline__
void gpu_blake2s_fill_data(blake2s_state *S, const uint32_t nonce)
{ {
uint2 *b2 = (uint2*) S->buf; uint32_t m[16];
#pragma unroll uint32_t v[16];
for (int i=0; i < 9; i++) uint32_t h[8];
b2[i] = d_data[i];
b2[9].x = d_data[9].x; uint32_t *in = (uint32_t*)input;
b2[9].y = nonce; // COMPRESS
S->buflen = 80; for(int i = 0; i < 16; ++i )
m[i] = in[i];
h[0] = 0x01010020 ^ blake2s_IV[0];
h[1] = blake2s_IV[1];
h[2] = blake2s_IV[2];
h[3] = blake2s_IV[3];
h[4] = blake2s_IV[4];
h[5] = blake2s_IV[5];
h[6] = blake2s_IV[6];
h[7] = blake2s_IV[7];
for(int i = 0; i < 8; ++i )
v[i] = h[i];
v[ 8] = blake2s_IV[0]; v[ 9] = blake2s_IV[1];
v[10] = blake2s_IV[2]; v[11] = blake2s_IV[3];
v[12] = 64 ^ blake2s_IV[4]; v[13] = blake2s_IV[5];
v[14] = blake2s_IV[6]; v[15] = blake2s_IV[7];
ROUND( 0 ); ROUND( 1 );
ROUND( 2 ); ROUND( 3 );
ROUND( 4 ); ROUND( 5 );
ROUND( 6 ); ROUND( 7 );
ROUND( 8 ); ROUND( 9 );
for(size_t i = 0; i < 8; ++i)
h[i] ^= v[i] ^ v[i + 8];
// COMPRESS
m[0] = in[16]; m[1] = in[17];
m[2] = in[18]; m[3] = in[19];
for(size_t i = 4; i < 16; ++i)
m[i] = 0;
for(size_t i = 0; i < 8; ++i)
v[i] = h[i];
v[ 8] = blake2s_IV[0]; v[ 9] = blake2s_IV[1];
v[10] = blake2s_IV[2]; v[11] = blake2s_IV[3];
v[12] = 0x50 ^ blake2s_IV[4]; v[13] = blake2s_IV[5];
v[14] = ~blake2s_IV[6]; v[15] = blake2s_IV[7];
ROUND( 0 ); ROUND( 1 );
ROUND( 2 ); ROUND( 3 );
ROUND( 4 ); ROUND( 5 );
ROUND( 6 ); ROUND( 7 );
ROUND( 8 ); ROUND( 9 );
for(size_t i = 0; i < 8; ++i)
h[i] ^= v[i] ^ v[i + 8];
memcpy(output, h, 32);
} }
#endif
__device__ __forceinline__ #define TPB 1024
void gpu_blake2s_update_nonce(blake2s_state *S, const uint32_t nonce) #define NPT 256
{ #define maxResults 16
gpu_store32(&S->buf[76], nonce); #define NBN 1
S->buflen = 80;
}
__device__ __forceinline__ __constant__ uint32_t _ALIGN(32) midstate[20];
uint2 gpu_blake2s_final(blake2s_state *S)
{
//if (S->buflen > BLAKE2S_BLOCKBYTES)
{
gpu_blake2s_increment_counter(S, BLAKE2S_BLOCKBYTES);
gpu_blake2s_compress(S, (uint32_t*) S->buf);
S->buflen -= BLAKE2S_BLOCKBYTES;
//memcpy(S->buf, S->buf + BLAKE2S_BLOCKBYTES, S->buflen);
}
gpu_blake2s_increment_counter(S, (uint32_t)S->buflen); static uint32_t *d_resNonce[MAX_GPUS];
gpu_blake2s_set_lastblock(S); static uint32_t *h_resNonce[MAX_GPUS];
//memset(&S->buf[S->buflen], 0, 2 * BLAKE2S_BLOCKBYTES - S->buflen); /* Padding */
gpu_blake2s_compress(S, (uint32_t*) (S->buf + BLAKE2S_BLOCKBYTES));
//#pragma unroll #define GS4(a,b,c,d,e,f,a1,b1,c1,d1,e1,f1,a2,b2,c2,d2,e2,f2,a3,b3,c3,d3,e3,f3){ \
//for (int i = 0; i < 8; i++) a += b + e; a1+= b1 + e1; a2+= b2 + e2; a3+= b3 + e3; \
// out[i] = S->h[i]; d = ROL16( d ^ a); d1 = ROL16(d1 ^ a1); d2 = ROL16(d2 ^ a2); d3 = ROL16(d3 ^ a3); \
return make_uint2(S->h[6], S->h[7]); c +=d; c1+=d1; c2+=d2; c3+=d3;\
b = ROTR32(b ^ c, 12); b1 = ROTR32(b1^c1, 12); b2 = ROTR32(b2^c2, 12); b3 = ROTR32(b3^c3, 12); \
a += b + f; a1+= b1 + f1; a2+= b2 + f2; a3+= b3 + f3; \
d = ROR8(d ^ a); d1 = ROR8(d1^a1); d2 = ROR8(d2^a2); d3 = ROR8(d3^a3); \
c += d; c1 += d1; c2 += d2; c3 += d3;\
b = ROTR32(b ^ c, 7); b1 = ROTR32(b1^c1, 7); b2 = ROTR32(b2^c2, 7); b3 = ROTR32(b3^c3, 7); \
} }
/* init2 xors IV with input parameter block */ __global__ __launch_bounds__(TPB,1)
__device__ __forceinline__ void blake2s_gpu_hash_nonce(const uint32_t threads, const uint32_t startNonce, uint32_t *resNonce, const uint32_t ptarget7)
void gpu_blake2s_init_param(blake2s_state *S, const blake2s_param *P)
{ {
//blake2s_IV const uint32_t step = gridDim.x * blockDim.x;
S->h[0] = 0x6A09E667UL;
S->h[1] = 0xBB67AE85UL;
S->h[2] = 0x3C6EF372UL;
S->h[3] = 0xA54FF53AUL;
S->h[4] = 0x510E527FUL;
S->h[5] = 0x9B05688CUL;
S->h[6] = 0x1F83D9ABUL;
S->h[7] = 0x5BE0CD19UL;
S->t[0] = 0; S->t[1] = 0;
S->f[0] = 0; S->f[1] = 0;
S->last_node = 0;
S->buflen = 0;
#pragma unroll uint32_t m[ 3];
for (int i = 8; i < sizeof(S->buf)/8; i++) uint32_t v[16];
gpu_store64(S->buf + (8*i), 0);
m[0] = midstate[16];
m[1] = midstate[17];
m[2] = midstate[18];
uint64_t *p = (uint64_t*) P; const uint32_t h7 = midstate[19];
/* IV XOR ParamBlock */ for(uint32_t thread = blockDim.x * blockIdx.x + threadIdx.x ; thread <threads; thread+=step){
#pragma unroll #pragma unroll
for (int i = 0; i < 4; i++) for(int i=0;i<16;i++){
S->h[i] ^= gpu_load64(&p[i]); v[ i] = midstate[ i];
}
uint32_t nonce = cuda_swab32(startNonce + thread);
// Round( 0 );
v[ 1] += nonce;
v[13] = ROR8(v[13] ^ v[ 1]);
v[ 9] += v[13];
v[ 5] = ROTR32(v[ 5] ^ v[ 9], 7);
v[ 1]+= v[ 6];
v[ 0]+= v[ 5];
v[12] = ROL16(v[12] ^ v[ 1]);
v[13] = ROL16(v[13] ^ v[ 2]);
v[15] = ROL16(v[15] ^ v[ 0]);
v[11]+= v[12]; v[ 8]+= v[13]; v[ 9]+= v[14]; v[10]+= v[15];
v[ 6] = ROTR32(v[ 6] ^ v[11], 12); v[ 7] = ROTR32(v[ 7] ^ v[ 8], 12); v[ 4] = ROTR32(v[ 4] ^ v[ 9], 12); v[ 5] = ROTR32(v[ 5] ^ v[10], 12);
v[ 1]+= v[ 6]; v[ 2]+= v[ 7]; v[ 3]+= v[ 4]; v[ 0]+= v[ 5];
v[12] = ROR8(v[12] ^ v[ 1]); v[13] = ROR8(v[13] ^ v[ 2]); v[14] = ROR8(v[14] ^ v[ 3]); v[15] = ROR8(v[15] ^ v[ 0]);
v[11]+= v[12]; v[ 8]+= v[13]; v[ 9]+= v[14]; v[10]+= v[15];
v[ 6] = ROTR32(v[ 6] ^ v[11], 7); v[ 7] = ROTR32(v[ 7] ^ v[ 8], 7); v[ 4] = ROTR32(v[ 4] ^ v[ 9], 7); v[ 5] = ROTR32(v[ 5] ^ v[10], 7);
GS4(v[ 0],v[ 4],v[ 8],v[12],0,0, v[ 1],v[ 5],v[ 9],v[13],0,0, v[ 2],v[ 6],v[10],v[14],0,0, v[ 3],v[ 7],v[11],v[15],0,0);
GS4(v[ 0],v[ 5],v[10],v[15],m[ 1],0, v[ 1],v[ 6],v[11],v[12],m[ 0],m[ 2], v[ 2],v[ 7],v[ 8],v[13],0,0, v[ 3],v[ 4],v[ 9],v[14],0,nonce);
GS4(v[ 0],v[ 4],v[ 8],v[12],0,0, v[ 1],v[ 5],v[ 9],v[13],0,m[ 0], v[ 2],v[ 6],v[10],v[14],0,m[ 2], v[ 3],v[ 7],v[11],v[15],0,0);
GS4(v[ 0],v[ 5],v[10],v[15],0,0, v[ 1],v[ 6],v[11],v[12],nonce,0, v[ 2],v[ 7],v[ 8],v[13],0,m[ 1], v[ 3],v[ 4],v[ 9],v[14],0,0);
GS4(v[ 0],v[ 4],v[ 8],v[12],0,0, v[ 1],v[ 5],v[ 9],v[13],nonce,m[ 1], v[ 2],v[ 6],v[10],v[14],0,0, v[ 3],v[ 7],v[11],v[15],0,0);
GS4(v[ 0],v[ 5],v[10],v[15],m[ 2],0, v[ 1],v[ 6],v[11],v[12],0,0, v[ 2],v[ 7],v[ 8],v[13],0,m[ 0], v[ 3],v[ 4],v[ 9],v[14],0,0);
GS4(v[ 0],v[ 4],v[ 8],v[12],0,m[ 0], v[ 1],v[ 5],v[ 9],v[13],0,0, v[ 2],v[ 6],v[10],v[14],m[ 2],0, v[ 3],v[ 7],v[11],v[15],0,0);
GS4(v[ 0],v[ 5],v[10],v[15],0,m[ 1], v[ 1],v[ 6],v[11],v[12],0,0, v[ 2],v[ 7],v[ 8],v[13],0,0, v[ 3],v[ 4],v[ 9],v[14],nonce,0);
GS4(v[ 0],v[ 4],v[ 8],v[12],m[ 2],0, v[ 1],v[ 5],v[ 9],v[13],0,0, v[ 2],v[ 6],v[10],v[14],m[ 0],0, v[ 3],v[ 7],v[11],v[15],0,nonce);
GS4(v[ 0],v[ 5],v[10],v[15],0,0, v[ 1],v[ 6],v[11],v[12],0,0, v[ 2],v[ 7],v[ 8],v[13],0,0, v[ 3],v[ 4],v[ 9],v[14],m[ 1],0);
GS4(v[ 0],v[ 4],v[ 8],v[12],0,0, v[ 1],v[ 5],v[ 9],v[13],m[ 1],0, v[ 2],v[ 6],v[10],v[14],0,0, v[ 3],v[ 7],v[11],v[15],0,0);
GS4(v[ 0],v[ 5],v[10],v[15],m[ 0],0, v[ 1],v[ 6],v[11],v[12],0,nonce, v[ 2],v[ 7],v[ 8],v[13],0,m[ 2], v[ 3],v[ 4],v[ 9],v[14],0,0);
GS4(v[ 0],v[ 4],v[ 8],v[12],0,0, v[ 1],v[ 5],v[ 9],v[13],0,0, v[ 2],v[ 6],v[10],v[14],0,m[ 1], v[ 3],v[ 7],v[11],v[15],nonce,0);
GS4(v[ 0],v[ 5],v[10],v[15],0,m[ 0], v[ 1],v[ 6],v[11],v[12],0,0, v[ 2],v[ 7],v[ 8],v[13],0,0, v[ 3],v[ 4],v[ 9],v[14],m[ 2],0);
GS4(v[ 0],v[ 4],v[ 8],v[12],0,0, v[ 1],v[ 5],v[ 9],v[13],0,0, v[ 2],v[ 6],v[10],v[14],0,nonce, v[ 3],v[ 7],v[11],v[15],m[ 0],0);
GS4(v[ 0],v[ 5],v[10],v[15],0,m[ 2], v[ 1],v[ 6],v[11],v[12],0,0, v[ 2],v[ 7],v[ 8],v[13],m[ 1],0, v[ 3],v[ 4],v[ 9],v[14],0,0);
GS4(v[ 0],v[ 4],v[ 8],v[12],0,m[ 2], v[ 1],v[ 5],v[ 9],v[13],0,0, v[ 2],v[ 6],v[10],v[14],0,0, v[ 3],v[ 7],v[11],v[15],m[ 1],0);
// GS(9,4,v[ 0],v[ 5],v[10],v[15]);
v[ 0] += v[ 5];
v[ 2] += v[ 7] + nonce;
v[15] = ROL16(v[15] ^ v[ 0]);
v[13] = ROL16(v[13] ^ v[ 2]);
v[10] += v[15];
v[ 8] += v[13];
v[ 5] = ROTR32(v[ 5] ^ v[10], 12);
v[ 7] = ROTR32(v[ 7] ^ v[ 8], 12);
v[ 0] += v[ 5];
v[ 2] += v[ 7];
v[15] = ROR8(v[15] ^ v[ 0]);
v[13] = ROR8(v[13] ^ v[ 2]);
v[ 8] += v[13];
v[ 7] = ROTR32(v[ 7] ^ v[ 8], 7);
if (xor3x(h7,v[7],v[15]) <= ptarget7){
uint32_t pos = atomicInc(&resNonce[0],0xffffffff)+1;
if(pos < maxResults)
resNonce[pos] = nonce;
return;
}
}
} }
// Sequential blake2s initialization __global__ __launch_bounds__(TPB,1)
__device__ __forceinline__ void blake2s_gpu_hash_nonce(const uint32_t threads, const uint32_t startNonce, uint32_t *resNonce)
void gpu_blake2s_init(blake2s_state *S, const uint8_t outlen)
{ {
blake2s_param P[1]; const uint32_t step = gridDim.x * blockDim.x;
// if (!outlen || outlen > BLAKE2S_OUTBYTES) return;
P->digest_length = outlen; uint32_t m[ 3];
P->key_length = 0; uint32_t v[16];
P->fanout = 1;
P->depth = 1;
P->leaf_length = 0;
gpu_store64(P->node_offset, 0);
//P->node_depth = 0;
//P->inner_length = 0;
gpu_store64(&P->salt, 0); m[0] = midstate[16];
gpu_store64(&P->personal, 0); m[1] = midstate[17];
m[2] = midstate[18];
gpu_blake2s_init_param(S, P); const uint32_t h7 = midstate[19];
}
__device__ __forceinline__ for(uint32_t thread = blockDim.x * blockIdx.x + threadIdx.x ; thread <threads; thread+=step)
void gpu_copystate(blake2s_state *dst, blake2s_state *src) {
{
uint64_t* d64 = (uint64_t*) dst;
uint64_t* s64 = (uint64_t*) src;
#pragma unroll #pragma unroll
for (int i=0; i < (32 + 16 + 2 * BLAKE2S_BLOCKBYTES)/8; i++) for(int i=0;i<16;i++){
gpu_store64(&d64[i], s64[i]); v[ i] = midstate[ i];
dst->buflen = src->buflen;
dst->last_node = src->last_node;
}
__global__
void blake2s_gpu_hash(const uint32_t threads, const uint32_t startNonce, uint32_t *resNonce, const uint2 target2, const int swap)
{
const uint32_t thread = (blockDim.x * blockIdx.x + threadIdx.x);
const uint32_t nonce = swap ? cuda_swab32(startNonce + thread) : startNonce + thread;
blake2s_state ALIGN(8) blake2_ctx;
#ifndef GPU_MIDSTATE
gpu_blake2s_init(&blake2_ctx, BLAKE2S_OUTBYTES);
//gpu_blake2s_update(&blake2_ctx, (uint8_t*) d_data, 76);
gpu_blake2s_fill_data(&blake2_ctx, nonce);
#else
gpu_copystate(&blake2_ctx, &d_state[0]);
gpu_blake2s_update_nonce(&blake2_ctx, nonce);
#endif
uint2 h2 = gpu_blake2s_final(&blake2_ctx);
if (h2.y <= target2.y && h2.x <= target2.x) {
#if NBN == 2
if (resNonce[0] != UINT32_MAX)
resNonce[1] = nonce;
else
resNonce[0] = nonce;
#else
resNonce[0] = nonce;
#endif
} }
}
static __inline uint32_t swab32_if(uint32_t val, bool iftrue) { uint32_t nonce = cuda_swab32(startNonce+thread);
return iftrue ? swab32(val) : val;
// Round( 0 );
v[ 1] += nonce;
v[13] = ROR8(v[13] ^ v[ 1]);
v[ 9] += v[13];
v[ 5] = ROTR32(v[ 5] ^ v[ 9], 7);
v[ 1]+= v[ 6];
v[ 0]+= v[ 5];
v[13] = ROL16(v[13] ^ v[ 2]); v[12] = ROL16(v[12] ^ v[ 1]); v[15] = ROL16(v[15] ^ v[ 0]);
v[ 8]+= v[13]; v[11]+= v[12]; v[ 9]+= v[14]; v[10]+= v[15];
v[ 7] = ROTR32(v[ 7] ^ v[ 8], 12); v[ 6] = ROTR32(v[ 6] ^ v[11], 12); v[ 4] = ROTR32(v[ 4] ^ v[ 9], 12); v[ 5] = ROTR32(v[ 5] ^ v[10], 12);
v[ 2]+= v[ 7]; v[ 1]+= v[ 6]; v[ 3]+= v[ 4]; v[ 0]+= v[ 5];
v[13] = ROR8(v[13] ^ v[ 2]); v[12] = ROR8(v[12] ^ v[ 1]); v[14] = ROR8(v[14] ^ v[ 3]); v[15] = ROR8(v[15] ^ v[ 0]);
v[ 8]+= v[13]; v[11]+= v[12]; v[ 9]+= v[14]; v[10]+= v[15];
v[ 6] = ROTR32(v[ 6] ^ v[11], 7); v[ 7] = ROTR32(v[ 7] ^ v[8], 7); v[ 4] = ROTR32(v[ 4] ^ v[ 9], 7); v[ 5] = ROTR32(v[ 5] ^ v[10], 7);
GS4(v[ 0],v[ 4],v[ 8],v[12],0,0, v[ 1],v[ 5],v[ 9],v[13],0,0, v[ 2],v[ 6],v[10],v[14],0,0, v[ 3],v[ 7],v[11],v[15],0,0);
GS4(v[ 0],v[ 5],v[10],v[15],m[ 1],0, v[ 1],v[ 6],v[11],v[12],m[ 0],m[ 2], v[ 2],v[ 7],v[ 8],v[13],0,0, v[ 3],v[ 4],v[ 9],v[14],0,nonce);
GS4(v[ 0],v[ 4],v[ 8],v[12],0,0, v[ 1],v[ 5],v[ 9],v[13],0,m[ 0], v[ 2],v[ 6],v[10],v[14],0,m[ 2], v[ 3],v[ 7],v[11],v[15],0,0);
GS4(v[ 0],v[ 5],v[10],v[15],0,0, v[ 1],v[ 6],v[11],v[12],nonce,0, v[ 2],v[ 7],v[ 8],v[13],0,m[ 1], v[ 3],v[ 4],v[ 9],v[14],0,0);
GS4(v[ 0],v[ 4],v[ 8],v[12],0,0, v[ 1],v[ 5],v[ 9],v[13],nonce,m[ 1], v[ 2],v[ 6],v[10],v[14],0,0, v[ 3],v[ 7],v[11],v[15],0,0);
GS4(v[ 0],v[ 5],v[10],v[15],m[ 2],0, v[ 1],v[ 6],v[11],v[12],0,0, v[ 2],v[ 7],v[ 8],v[13],0,m[ 0], v[ 3],v[ 4],v[ 9],v[14],0,0);
GS4(v[ 0],v[ 4],v[ 8],v[12],0,m[ 0], v[ 1],v[ 5],v[ 9],v[13],0,0, v[ 2],v[ 6],v[10],v[14],m[ 2],0, v[ 3],v[ 7],v[11],v[15],0,0);
GS4(v[ 0],v[ 5],v[10],v[15],0,m[ 1], v[ 1],v[ 6],v[11],v[12],0,0, v[ 2],v[ 7],v[ 8],v[13],0,0, v[ 3],v[ 4],v[ 9],v[14],nonce,0);
GS4(v[ 0],v[ 4],v[ 8],v[12],m[ 2],0, v[ 1],v[ 5],v[ 9],v[13],0,0, v[ 2],v[ 6],v[10],v[14],m[ 0],0, v[ 3],v[ 7],v[11],v[15],0,nonce);
GS4(v[ 0],v[ 5],v[10],v[15],0,0, v[ 1],v[ 6],v[11],v[12],0,0, v[ 2],v[ 7],v[ 8],v[13],0,0, v[ 3],v[ 4],v[ 9],v[14],m[ 1],0);
GS4(v[ 0],v[ 4],v[ 8],v[12],0,0, v[ 1],v[ 5],v[ 9],v[13],m[ 1],0, v[ 2],v[ 6],v[10],v[14],0,0, v[ 3],v[ 7],v[11],v[15],0,0);
GS4(v[ 0],v[ 5],v[10],v[15],m[ 0],0, v[ 1],v[ 6],v[11],v[12],0,nonce, v[ 2],v[ 7],v[ 8],v[13],0,m[ 2], v[ 3],v[ 4],v[ 9],v[14],0,0);
GS4(v[ 0],v[ 4],v[ 8],v[12],0,0, v[ 1],v[ 5],v[ 9],v[13],0,0, v[ 2],v[ 6],v[10],v[14],0,m[ 1], v[ 3],v[ 7],v[11],v[15],nonce,0);
GS4(v[ 0],v[ 5],v[10],v[15],0,m[ 0], v[ 1],v[ 6],v[11],v[12],0,0, v[ 2],v[ 7],v[ 8],v[13],0,0, v[ 3],v[ 4],v[ 9],v[14],m[ 2],0);
GS4(v[ 0],v[ 4],v[ 8],v[12],0,0, v[ 1],v[ 5],v[ 9],v[13],0,0, v[ 2],v[ 6],v[10],v[14],0,nonce, v[ 3],v[ 7],v[11],v[15],m[ 0],0);
GS4(v[ 0],v[ 5],v[10],v[15],0,m[ 2], v[ 1],v[ 6],v[11],v[12],0,0, v[ 2],v[ 7],v[ 8],v[13],m[ 1],0, v[ 3],v[ 4],v[ 9],v[14],0,0);
GS4(v[ 0],v[ 4],v[ 8],v[12],0,m[ 2], v[ 1],v[ 5],v[ 9],v[13],0,0, v[ 2],v[ 6],v[10],v[14],0,0, v[ 3],v[ 7],v[11],v[15],m[ 1],0);
v[ 0] += v[ 5];
v[ 2] += v[ 7] + nonce;
v[15] = ROL16(v[15] ^ v[ 0]);
v[13] = ROL16(v[13] ^ v[ 2]);
v[10] += v[15];
v[ 8] += v[13];
v[ 5] = ROTR32(v[ 5] ^ v[10], 12);
v[ 7] = ROTR32(v[ 7] ^ v[ 8], 12);
v[ 0] += v[ 5];
v[ 2] += v[ 7];
v[15] = ROTR32(v[15] ^ v[ 0],1);
v[13] = ROR8(v[13] ^ v[ 2]);
v[ 8] += v[13];
if(xor3x(v[ 7],h7,v[ 8])==v[15]){
uint32_t pos = atomicInc(&resNonce[0],0xffffffff)+1;
if(pos < maxResults)
resNonce[pos]=nonce;
return;
}
}
} }
__host__ static void blake2s_setBlock(const uint32_t* input,const uint32_t ptarget7)
uint32_t blake2s_hash_cuda(const int thr_id, const uint32_t threads, const uint32_t startNonce, const uint2 target2, const int swap)
{ {
uint32_t result = UINT32_MAX; uint32_t _ALIGN(64) m[16];
uint32_t _ALIGN(64) v[16];
dim3 grid((threads + TPB-1)/TPB); uint32_t _ALIGN(64) h[21];
dim3 block(TPB);
// COMPRESS
/* Check error on Ctrl+C or kill to prevent segfaults on exit */ for(int i = 0; i < 16; ++i )
if (cudaMemset(d_resNonce[thr_id], 0xff, NBN*sizeof(uint32_t)) != cudaSuccess) m[i] = input[i];
return result;
h[0] = 0x01010020 ^ blake2s_IV[0];
blake2s_gpu_hash <<<grid, block>>> (threads, startNonce, d_resNonce[thr_id], target2, swap); h[1] = blake2s_IV[1];
cudaThreadSynchronize(); h[2] = blake2s_IV[2]; h[3] = blake2s_IV[3];
h[4] = blake2s_IV[4]; h[5] = blake2s_IV[5];
if (cudaSuccess == cudaMemcpy(h_resNonce[thr_id], d_resNonce[thr_id], NBN*sizeof(uint32_t), cudaMemcpyDeviceToHost)) { h[6] = blake2s_IV[6]; h[7] = blake2s_IV[7];
result = swab32_if(h_resNonce[thr_id][0], swap);
#if NBN > 1 for(int i = 0; i < 8; ++i )
for (int n=0; n < (NBN-1); n++) v[i] = h[i];
extra_results[n] = swab32_if(h_resNonce[thr_id][n+1], swap);
#endif v[ 8] = blake2s_IV[0]; v[ 9] = blake2s_IV[1];
v[10] = blake2s_IV[2]; v[11] = blake2s_IV[3];
v[12] = 64 ^ blake2s_IV[4]; v[13] = blake2s_IV[5];
v[14] = blake2s_IV[6]; v[15] = blake2s_IV[7];
ROUND( 0 ); ROUND( 1 );
ROUND( 2 ); ROUND( 3 );
ROUND( 4 ); ROUND( 5 );
ROUND( 6 ); ROUND( 7 );
ROUND( 8 ); ROUND( 9 );
for(int i = 0; i < 8; ++i )
h[i] ^= v[i] ^ v[i + 8];
h[16] = input[16];
h[17] = input[17];
h[18] = input[18];
h[ 8] = 0x6A09E667; h[ 9] = 0xBB67AE85;
h[10] = 0x3C6EF372; h[11] = 0xA54FF53A;
h[12] = 0x510E522F; h[13] = 0x9B05688C;
h[14] =~0x1F83D9AB; h[15] = 0x5BE0CD19;
h[ 0]+= h[ 4] + h[16];
h[12] = SPH_ROTR32(h[12] ^ h[ 0],16);
h[ 8]+= h[12];
h[ 4] = SPH_ROTR32(h[ 4] ^ h[ 8],12);
h[ 0]+= h[ 4] + h[17];
h[12] = SPH_ROTR32(h[12] ^ h[ 0],8);
h[ 8]+= h[12];
h[ 4] = SPH_ROTR32(h[ 4] ^ h[ 8],7);
h[ 1]+= h[ 5] + h[18];
h[13] = SPH_ROTR32(h[13] ^ h[ 1], 16);
h[ 9]+= h[13];
h[ 5] = ROTR32(h[ 5] ^ h[ 9], 12);
h[ 2]+= h[ 6];
h[14] = SPH_ROTR32(h[14] ^ h[ 2],16);
h[10]+= h[14];
h[ 6] = SPH_ROTR32(h[ 6] ^ h[10], 12);
h[ 2]+= h[ 6];
h[14] = SPH_ROTR32(h[14] ^ h[ 2],8);
h[10]+= h[14];
h[ 6] = SPH_ROTR32(h[ 6] ^ h[10], 7);
h[19] = h[7]; //constant h[7] for nonce check
h[ 3]+= h[ 7];
h[15] = SPH_ROTR32(h[15] ^ h[ 3],16);
h[11]+= h[15];
h[ 7] = SPH_ROTR32(h[ 7] ^ h[11], 12);
h[ 3]+= h[ 7];
h[15] = SPH_ROTR32(h[15] ^ h[ 3],8);
h[11]+= h[15];
h[ 7] = SPH_ROTR32(h[ 7] ^ h[11], 7);
h[ 1]+= h[ 5];
h[ 3]+= h[ 4];
h[14] = SPH_ROTR32(h[14] ^ h[ 3],16);
h[ 2]+= h[ 7];
if(ptarget7==0){
h[19] = SPH_ROTL32(h[19],7); //align the rotation with v[7] v[15];
} }
return result; cudaMemcpyToSymbol(midstate, h, 20*sizeof(uint32_t), 0, cudaMemcpyHostToDevice);
} }
static bool init[MAX_GPUS] = { 0 }; static bool init[MAX_GPUS] = { 0 };
@ -404,23 +431,21 @@ static bool init[MAX_GPUS] = { 0 };
extern "C" int scanhash_blake2s(int thr_id, struct work *work, uint32_t max_nonce, unsigned long *hashes_done) extern "C" int scanhash_blake2s(int thr_id, struct work *work, uint32_t max_nonce, unsigned long *hashes_done)
{ {
uint32_t _ALIGN(64) endiandata[20]; uint32_t _ALIGN(64) endiandata[20];
uint32_t *pdata = work->data; uint32_t *pdata = work->data;
uint32_t *ptarget = work->target; uint32_t *ptarget = work->target;
const int swap = 1; // to toggle nonce endian uint32_t *resNonces;
const uint32_t first_nonce = pdata[19]; const uint32_t first_nonce = pdata[19];
int dev_id = device_map[thr_id]; const int dev_id = device_map[thr_id];
int intensity = (device_sm[dev_id] >= 500 && !is_windows()) ? 28 : 25; int rc = 0;
if (device_sm[dev_id] < 350) intensity = 22; int intensity = 28;
uint32_t throughput = cuda_default_throughput(thr_id, 1U << intensity); uint32_t throughput = cuda_default_throughput(thr_id, 1U << intensity);
if (init[thr_id]) throughput = min(throughput, max_nonce - first_nonce); if (init[thr_id]) throughput = min(throughput, max_nonce - first_nonce);
if (opt_benchmark) { const dim3 grid((throughput + (NPT*TPB)-1)/(NPT*TPB));
ptarget[6] = swab32(0xFFFF0); const dim3 block(TPB);
ptarget[7] = 0;
}
if (!init[thr_id]) if (!init[thr_id])
{ {
@ -429,82 +454,93 @@ extern "C" int scanhash_blake2s(int thr_id, struct work *work, uint32_t max_nonc
cudaDeviceReset(); cudaDeviceReset();
// reduce cpu usage (linux) // reduce cpu usage (linux)
cudaSetDeviceFlags(cudaDeviceScheduleBlockingSync); cudaSetDeviceFlags(cudaDeviceScheduleBlockingSync);
cudaDeviceSetCacheConfig(cudaFuncCachePreferL1);
CUDA_LOG_ERROR(); CUDA_LOG_ERROR();
} }
gpulog(LOG_INFO, thr_id, "Intensity set to %g, %u cuda threads", throughput2intensity(throughput), throughput); gpulog(LOG_INFO, thr_id, "Intensity set to %g, %u cuda threads", throughput2intensity(throughput), throughput);
CUDA_CALL_OR_RET_X(cudaMalloc(&d_resNonce[thr_id], NBN * sizeof(uint32_t)), -1); CUDA_CALL_OR_RET_X(cudaMalloc(&d_resNonce[thr_id], maxResults * sizeof(uint32_t)), -1);
CUDA_CALL_OR_RET_X(cudaMallocHost(&h_resNonce[thr_id], NBN * sizeof(uint32_t)), -1); CUDA_CALL_OR_RET_X(cudaMallocHost(&h_resNonce[thr_id], maxResults * sizeof(uint32_t)), -1);
init[thr_id] = true; init[thr_id] = true;
} }
resNonces = h_resNonce[thr_id];
for (int i=0; i < 19; i++) { for (int i=0; i < 19; i++) {
be32enc(&endiandata[i], pdata[i]); be32enc(&endiandata[i], pdata[i]);
} }
blake2s_setBlock(endiandata,ptarget[7]);
// midstate cudaMemset(d_resNonce[thr_id], 0x00, maxResults*sizeof(uint32_t));
memset(s_midstate.buf, 0, sizeof(s_midstate.buf));
blake2s_init(&s_midstate, BLAKE2S_OUTBYTES);
blake2s_update(&s_midstate, (uint8_t*) endiandata, MIDLEN);
memcpy(&s_ctx, &s_midstate, sizeof(blake2s_state));
blake2s_setBlock(endiandata, &s_midstate); do {
if(ptarget[7]) {
blake2s_gpu_hash_nonce<<<grid, block>>>(throughput,pdata[19],d_resNonce[thr_id],ptarget[7]);
} else {
blake2s_gpu_hash_nonce<<<grid, block>>>(throughput,pdata[19],d_resNonce[thr_id]);
}
cudaMemcpy(resNonces, d_resNonce[thr_id], sizeof(uint32_t), cudaMemcpyDeviceToHost);
const uint2 target = make_uint2(ptarget[6], ptarget[7]); if(resNonces[0])
{
cudaMemcpy(resNonces, d_resNonce[thr_id], maxResults*sizeof(uint32_t), cudaMemcpyDeviceToHost);
cudaMemset(d_resNonce[thr_id], 0x00, sizeof(uint32_t));
do { if(resNonces[0] >= maxResults) {
uint32_t foundNonce = blake2s_hash_cuda(thr_id, throughput, pdata[19], target, swap); gpulog(LOG_WARNING, thr_id, "candidates flood: %u", resNonces[0]);
resNonces[0] = maxResults-1;
}
uint32_t vhashcpu[8];
uint32_t nonce = sph_bswap32(resNonces[1]);
be32enc(&endiandata[19], nonce);
blake2s_hash(vhashcpu, endiandata);
*hashes_done = pdata[19] - first_nonce + throughput; *hashes_done = pdata[19] - first_nonce + throughput;
if (foundNonce != UINT32_MAX) if(vhashcpu[6] <= ptarget[6] && fulltest(vhashcpu, ptarget))
{ {
uint32_t _ALIGN(A) vhashcpu[8]; work_set_target_ratio(work, vhashcpu);
work->nonces[0] = nonce;
rc = work->valid_nonces = 1;
//blake2s_hash(vhashcpu, endiandata); // search for 2nd best nonce
endiandata[19] = swab32_if(foundNonce, swap); for(uint32_t j=2; j <= resNonces[0]; j++)
blake2s_hash_end(vhashcpu, endiandata); {
nonce = sph_bswap32(resNonces[j]);
be32enc(&endiandata[19], nonce);
blake2s_hash(vhashcpu, endiandata);
if(vhashcpu[6] <= ptarget[6] && fulltest(vhashcpu, ptarget))
{
gpulog(LOG_DEBUG, thr_id, "Multiple nonces: 1/%08x - %u/%08x", work->nonces[0], j, nonce);
if (vhashcpu[7] <= target.y && fulltest(vhashcpu, ptarget)) { work->nonces[1] = nonce;
work_set_target_ratio(work, vhashcpu);
work->nonces[0] = swab32_if(foundNonce, !swap);
work->valid_nonces = 1;
#if NBN > 1
if (extra_results[0] != UINT32_MAX) {
endiandata[19] = swab32_if(extra_results[0], swap);
blake2s_hash_end(vhashcpu, endiandata);
if (vhashcpu[7] <= target.y && fulltest(vhashcpu, ptarget)) {
work->nonces[1] = swab32_if(extra_results[0], !swap);
if (bn_hash_target_ratio(vhashcpu, ptarget) > work->shareratio[0]) { if (bn_hash_target_ratio(vhashcpu, ptarget) > work->shareratio[0]) {
work->shareratio[1] = work->shareratio[0]; work->shareratio[1] = work->shareratio[0];
work->sharediff[1] = work->sharediff[0]; work->sharediff[1] = work->sharediff[0];
xchg(work->nonces[1], work->nonces[0]);
work_set_target_ratio(work, vhashcpu); work_set_target_ratio(work, vhashcpu);
xchg(work->nonces[0], work->nonces[1]); } else if (work->valid_nonces == 1) {
} else {
bn_set_target_ratio(work, vhashcpu, 1); bn_set_target_ratio(work, vhashcpu, 1);
} }
work->valid_nonces++; work->valid_nonces++;
pdata[19] = max(work->nonces[0], work->nonces[1]); rc = 2;
return 2; break;
} }
extra_results[0] = UINT32_MAX;
} }
#endif pdata[19] = max(work->nonces[0], work->nonces[1]); // next scan start
pdata[19] = max(work->nonces[0], work->nonces[1]); return rc;
return 1; } else if (vhashcpu[7] > ptarget[7]) {
} else { gpulog(LOG_WARNING, thr_id, "result for %08x does not validate on CPU!", resNonces[0]);
gpulog(LOG_WARNING, thr_id, "result for %08x does not validate on CPU!", foundNonce);
} }
} }
pdata[19] += throughput; pdata[19] += throughput;
} while (!work_restart[thr_id].restart && max_nonce > (uint64_t)throughput + pdata[19]); } while (!work_restart[thr_id].restart && (uint64_t)max_nonce > (uint64_t)throughput + pdata[19]);
*hashes_done = pdata[19] - first_nonce; *hashes_done = pdata[19] - first_nonce;
return 0; return rc;
} }
// cleanup // cleanup
@ -522,3 +558,4 @@ extern "C" void free_blake2s(int thr_id)
cudaDeviceSynchronize(); cudaDeviceSynchronize();
} }

Loading…
Cancel
Save