|
|
|
@ -1,24 +1,38 @@
@@ -1,24 +1,38 @@
|
|
|
|
|
/* |
|
|
|
|
* sha256 CUDA implementation. |
|
|
|
|
* sha256 + ripemd CUDA implementation. |
|
|
|
|
* tpruvot and alexis78 |
|
|
|
|
*/ |
|
|
|
|
|
|
|
|
|
#include <stdio.h> |
|
|
|
|
#include <stdint.h> |
|
|
|
|
#include <memory.h> |
|
|
|
|
|
|
|
|
|
#include <cuda_helper.h> |
|
|
|
|
#include <cuda_vector_uint2x4.h> |
|
|
|
|
|
|
|
|
|
#include <miner.h> |
|
|
|
|
|
|
|
|
|
__constant__ static uint32_t __align__(8) c_midstate112[8]; |
|
|
|
|
__constant__ static uint32_t __align__(8) c_dataEnd112[12]; |
|
|
|
|
__constant__ static uint32_t _ALIGN(8) c_midstate112[8]; |
|
|
|
|
__constant__ static uint32_t _ALIGN(8) c_midbuffer112[8]; |
|
|
|
|
__constant__ static uint32_t _ALIGN(8) c_dataEnd112[12]; |
|
|
|
|
|
|
|
|
|
const __constant__ uint32_t __align__(8) c_H256[8] = { |
|
|
|
|
__constant__ const uint32_t c_H256[8] = { |
|
|
|
|
0x6A09E667U, 0xBB67AE85U, 0x3C6EF372U, 0xA54FF53AU, |
|
|
|
|
0x510E527FU, 0x9B05688CU, 0x1F83D9ABU, 0x5BE0CD19U |
|
|
|
|
}; |
|
|
|
|
__constant__ static uint32_t __align__(8) c_K[64]; |
|
|
|
|
__constant__ static uint32_t _ALIGN(16) c_K[64] = { |
|
|
|
|
0x428A2F98, 0x71374491, 0xB5C0FBCF, 0xE9B5DBA5, 0x3956C25B, 0x59F111F1, 0x923F82A4, 0xAB1C5ED5, |
|
|
|
|
0xD807AA98, 0x12835B01, 0x243185BE, 0x550C7DC3, 0x72BE5D74, 0x80DEB1FE, 0x9BDC06A7, 0xC19BF174, |
|
|
|
|
0xE49B69C1, 0xEFBE4786, 0x0FC19DC6, 0x240CA1CC, 0x2DE92C6F, 0x4A7484AA, 0x5CB0A9DC, 0x76F988DA, |
|
|
|
|
0x983E5152, 0xA831C66D, 0xB00327C8, 0xBF597FC7, 0xC6E00BF3, 0xD5A79147, 0x06CA6351, 0x14292967, |
|
|
|
|
0x27B70A85, 0x2E1B2138, 0x4D2C6DFC, 0x53380D13, 0x650A7354, 0x766A0ABB, 0x81C2C92E, 0x92722C85, |
|
|
|
|
0xA2BFE8A1, 0xA81A664B, 0xC24B8B70, 0xC76C51A3, 0xD192E819, 0xD6990624, 0xF40E3585, 0x106AA070, |
|
|
|
|
0x19A4C116, 0x1E376C08, 0x2748774C, 0x34B0BCB5, 0x391C0CB3, 0x4ED8AA4A, 0x5B9CCA4F, 0x682E6FF3, |
|
|
|
|
0x748F82EE, 0x78A5636F, 0x84C87814, 0x8CC70208, 0x90BEFFFA, 0xA4506CEB, 0xBEF9A3F7, 0xC67178F2 |
|
|
|
|
}; |
|
|
|
|
|
|
|
|
|
static __thread uint32_t* d_resNonces; |
|
|
|
|
__constant__ static uint32_t __align__(8) c_target[2]; |
|
|
|
|
__constant__ static uint32_t _ALIGN(8) c_target[2]; |
|
|
|
|
__device__ uint64_t d_target[1]; |
|
|
|
|
|
|
|
|
|
#ifdef __INTELLISENSE__ |
|
|
|
@ -43,32 +57,24 @@ static const uint32_t cpu_K[64] = {
@@ -43,32 +57,24 @@ static const uint32_t cpu_K[64] = {
|
|
|
|
|
0x748F82EE, 0x78A5636F, 0x84C87814, 0x8CC70208, 0x90BEFFFA, 0xA4506CEB, 0xBEF9A3F7, 0xC67178F2 |
|
|
|
|
}; |
|
|
|
|
|
|
|
|
|
#define ROTR ROTR32 |
|
|
|
|
|
|
|
|
|
__host__ |
|
|
|
|
static void sha256_step1_host(uint32_t a, uint32_t b, uint32_t c, uint32_t &d, |
|
|
|
|
uint32_t e, uint32_t f, uint32_t g, uint32_t &h, |
|
|
|
|
uint32_t in, const uint32_t Kshared) |
|
|
|
|
uint32_t e, uint32_t f, uint32_t g, uint32_t &h, uint32_t in, const uint32_t Kshared) |
|
|
|
|
{ |
|
|
|
|
uint32_t t1,t2; |
|
|
|
|
uint32_t vxandx = (((f) ^ (g)) & (e)) ^ (g); // xandx(e, f, g); |
|
|
|
|
uint32_t bsg21 = ROTR(e, 6) ^ ROTR(e, 11) ^ ROTR(e, 25); // bsg2_1(e); |
|
|
|
|
uint32_t bsg20 = ROTR(a, 2) ^ ROTR(a, 13) ^ ROTR(a, 22); //bsg2_0(a); |
|
|
|
|
uint32_t bsg21 = ROTR32(e, 6) ^ ROTR32(e, 11) ^ ROTR32(e, 25); // bsg2_1(e); |
|
|
|
|
uint32_t bsg20 = ROTR32(a, 2) ^ ROTR32(a, 13) ^ ROTR32(a, 22); //bsg2_0(a); |
|
|
|
|
uint32_t andorv = ((b) & (c)) | (((b) | (c)) & (a)); //andor32(a,b,c); |
|
|
|
|
|
|
|
|
|
t1 = h + bsg21 + vxandx + Kshared + in; |
|
|
|
|
t2 = bsg20 + andorv; |
|
|
|
|
uint32_t t1 = h + bsg21 + vxandx + Kshared + in; |
|
|
|
|
uint32_t t2 = bsg20 + andorv; |
|
|
|
|
d = d + t1; |
|
|
|
|
h = t1 + t2; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
__host__ |
|
|
|
|
static void sha256_step2_host(uint32_t a, uint32_t b, uint32_t c, uint32_t &d, |
|
|
|
|
uint32_t e, uint32_t f, uint32_t g, uint32_t &h, |
|
|
|
|
uint32_t* in, uint32_t pc, const uint32_t Kshared) |
|
|
|
|
uint32_t e, uint32_t f, uint32_t g, uint32_t &h, uint32_t* in, uint32_t pc, const uint32_t Kshared) |
|
|
|
|
{ |
|
|
|
|
uint32_t t1,t2; |
|
|
|
|
|
|
|
|
|
int pcidx1 = (pc-2) & 0xF; |
|
|
|
|
int pcidx2 = (pc-7) & 0xF; |
|
|
|
|
int pcidx3 = (pc-15) & 0xF; |
|
|
|
@ -78,12 +84,13 @@ static void sha256_step2_host(uint32_t a, uint32_t b, uint32_t c, uint32_t &d,
@@ -78,12 +84,13 @@ static void sha256_step2_host(uint32_t a, uint32_t b, uint32_t c, uint32_t &d,
|
|
|
|
|
uint32_t inx2 = in[pcidx2]; |
|
|
|
|
uint32_t inx3 = in[pcidx3]; |
|
|
|
|
|
|
|
|
|
uint32_t ssg21 = ROTR(inx1, 17) ^ ROTR(inx1, 19) ^ SPH_T32((inx1) >> 10); //ssg2_1(inx1); |
|
|
|
|
uint32_t ssg20 = ROTR(inx3, 7) ^ ROTR(inx3, 18) ^ SPH_T32((inx3) >> 3); //ssg2_0(inx3); |
|
|
|
|
uint32_t ssg21 = ROTR32(inx1, 17) ^ ROTR32(inx1, 19) ^ SPH_T32((inx1) >> 10); //ssg2_1(inx1); |
|
|
|
|
uint32_t ssg20 = ROTR32(inx3, 7) ^ ROTR32(inx3, 18) ^ SPH_T32((inx3) >> 3); //ssg2_0(inx3); |
|
|
|
|
uint32_t vxandx = (((f) ^ (g)) & (e)) ^ (g); // xandx(e, f, g); |
|
|
|
|
uint32_t bsg21 = ROTR(e, 6) ^ ROTR(e, 11) ^ ROTR(e, 25); // bsg2_1(e); |
|
|
|
|
uint32_t bsg20 = ROTR(a, 2) ^ ROTR(a, 13) ^ ROTR(a, 22); //bsg2_0(a); |
|
|
|
|
uint32_t bsg21 = ROTR32(e, 6) ^ ROTR32(e, 11) ^ ROTR32(e, 25); // bsg2_1(e); |
|
|
|
|
uint32_t bsg20 = ROTR32(a, 2) ^ ROTR32(a, 13) ^ ROTR32(a, 22); //bsg2_0(a); |
|
|
|
|
uint32_t andorv = ((b) & (c)) | (((b) | (c)) & (a)); //andor32(a,b,c); |
|
|
|
|
uint32_t t1,t2; |
|
|
|
|
|
|
|
|
|
in[pc] = ssg21 + inx2 + ssg20 + inx0; |
|
|
|
|
|
|
|
|
@ -152,55 +159,32 @@ static void sha256_round_body_host(uint32_t* in, uint32_t* state, const uint32_t
@@ -152,55 +159,32 @@ static void sha256_round_body_host(uint32_t* in, uint32_t* state, const uint32_t
|
|
|
|
|
state[7] += h; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
__device__ __forceinline__ |
|
|
|
|
uint32_t xor3b(const uint32_t a, const uint32_t b, const uint32_t c) { |
|
|
|
|
uint32_t result; |
|
|
|
|
#if __CUDA_ARCH__ >= 500 && CUDA_VERSION >= 7050 |
|
|
|
|
asm ("lop3.b32 %0, %1, %2, %3, 0x96; // xor3b" //0x96 = 0xF0 ^ 0xCC ^ 0xAA |
|
|
|
|
: "=r"(result) : "r"(a), "r"(b),"r"(c)); |
|
|
|
|
#else |
|
|
|
|
result = a^b^c; |
|
|
|
|
#endif |
|
|
|
|
return result; |
|
|
|
|
} |
|
|
|
|
#define xor3b(a,b,c) ((a ^ b) ^ c) |
|
|
|
|
|
|
|
|
|
__device__ __forceinline__ uint32_t bsg2_0(const uint32_t x) |
|
|
|
|
{ |
|
|
|
|
uint32_t r1 = ROTR32(x,2); |
|
|
|
|
uint32_t r2 = ROTR32(x,13); |
|
|
|
|
uint32_t r3 = ROTR32(x,22); |
|
|
|
|
return xor3b(r1,r2,r3); |
|
|
|
|
return xor3b(ROTR32(x,2),ROTR32(x,13),ROTR32(x,22)); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
__device__ __forceinline__ uint32_t bsg2_1(const uint32_t x) |
|
|
|
|
{ |
|
|
|
|
uint32_t r1 = ROTR32(x,6); |
|
|
|
|
uint32_t r2 = ROTR32(x,11); |
|
|
|
|
uint32_t r3 = ROTR32(x,25); |
|
|
|
|
return xor3b(r1,r2,r3); |
|
|
|
|
return xor3b(ROTR32(x,6),ROTR32(x,11),ROTR32(x,25)); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
__device__ __forceinline__ uint32_t ssg2_0(const uint32_t x) |
|
|
|
|
{ |
|
|
|
|
uint64_t r1 = ROTR32(x,7); |
|
|
|
|
uint64_t r2 = ROTR32(x,18); |
|
|
|
|
uint64_t r3 = shr_t32(x,3); |
|
|
|
|
return xor3b(r1,r2,r3); |
|
|
|
|
return xor3b(ROTR32(x,7),ROTR32(x,18),(x>>3)); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
__device__ __forceinline__ uint32_t ssg2_1(const uint32_t x) |
|
|
|
|
{ |
|
|
|
|
uint64_t r1 = ROTR32(x,17); |
|
|
|
|
uint64_t r2 = ROTR32(x,19); |
|
|
|
|
uint64_t r3 = shr_t32(x,10); |
|
|
|
|
return xor3b(r1,r2,r3); |
|
|
|
|
return xor3b(ROTR32(x,17),ROTR32(x,19),(x>>10)); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
__device__ __forceinline__ uint32_t andor32(const uint32_t a, const uint32_t b, const uint32_t c) |
|
|
|
|
{ |
|
|
|
|
uint32_t result; |
|
|
|
|
asm("{\n\t" |
|
|
|
|
".reg .u32 m,n,o;\n\t" |
|
|
|
|
asm("{ .reg .u32 m,n,o; // andor32 \n\t" |
|
|
|
|
"and.b32 m, %1, %2;\n\t" |
|
|
|
|
" or.b32 n, %1, %2;\n\t" |
|
|
|
|
"and.b32 o, n, %3;\n\t" |
|
|
|
@ -210,27 +194,21 @@ __device__ __forceinline__ uint32_t andor32(const uint32_t a, const uint32_t b,
@@ -210,27 +194,21 @@ __device__ __forceinline__ uint32_t andor32(const uint32_t a, const uint32_t b,
|
|
|
|
|
return result; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
__device__ __forceinline__ uint2 vectorizeswap(uint64_t v) { |
|
|
|
|
__device__ __forceinline__ uint2 vectorizeswap(uint64_t v) |
|
|
|
|
{ |
|
|
|
|
uint2 result; |
|
|
|
|
asm("mov.b64 {%0,%1},%2; \n\t" |
|
|
|
|
asm("mov.b64 {%0,%1},%2; // vectorizeswap \n\t" |
|
|
|
|
: "=r"(result.y), "=r"(result.x) : "l"(v)); |
|
|
|
|
return result; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
__device__ |
|
|
|
|
static void sha2_step1(uint32_t a, uint32_t b, uint32_t c, uint32_t &d, uint32_t e, uint32_t f, uint32_t g, uint32_t &h, |
|
|
|
|
uint32_t in, const uint32_t Kshared) |
|
|
|
|
__forceinline__ |
|
|
|
|
static void sha2_step1(uint32_t a, uint32_t b, uint32_t c, uint32_t &d, uint32_t e, uint32_t f, uint32_t g, uint32_t &h, uint32_t in, const uint32_t Kshared) |
|
|
|
|
{ |
|
|
|
|
uint32_t t1,t2; |
|
|
|
|
uint32_t vxandx = xandx(e, f, g); |
|
|
|
|
uint32_t bsg21 = bsg2_1(e); |
|
|
|
|
uint32_t bsg20 = bsg2_0(a); |
|
|
|
|
uint32_t andorv = andor32(a,b,c); |
|
|
|
|
|
|
|
|
|
t1 = h + bsg21 + vxandx + Kshared + in; |
|
|
|
|
t2 = bsg20 + andorv; |
|
|
|
|
d = d + t1; |
|
|
|
|
h = t1 + t2; |
|
|
|
|
uint32_t t1 = bsg2_1(e) + ((((f) ^ (g)) & (e)) ^ (g)) + Kshared + in; |
|
|
|
|
d = d + h + t1; |
|
|
|
|
h += t1 + bsg2_0(a) + (((b) & (c)) | (((b) | (c)) & (a))); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
__device__ |
|
|
|
@ -263,6 +241,56 @@ static void sha2_step2(uint32_t a, uint32_t b, uint32_t c, uint32_t &d, uint32_t
@@ -263,6 +241,56 @@ static void sha2_step2(uint32_t a, uint32_t b, uint32_t c, uint32_t &d, uint32_t
|
|
|
|
|
h = t1 + t2; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
__device__ __forceinline__ |
|
|
|
|
static void sha256_round_first(uint32_t* in,uint32_t *buf, uint32_t* state, uint32_t* const Kshared) |
|
|
|
|
{ |
|
|
|
|
uint32_t a = buf[0]; |
|
|
|
|
uint32_t b = buf[1]; |
|
|
|
|
uint32_t c = buf[2]; |
|
|
|
|
uint32_t d = buf[3]; |
|
|
|
|
uint32_t e = buf[4]; |
|
|
|
|
uint32_t f = buf[5]; |
|
|
|
|
uint32_t g = buf[6]; |
|
|
|
|
uint32_t h = buf[7]; |
|
|
|
|
|
|
|
|
|
// 10 first steps made on host |
|
|
|
|
sha2_step1(f,g,h,a,b,c,d,e,in[11],Kshared[11]); |
|
|
|
|
sha2_step1(e,f,g,h,a,b,c,d,in[12],Kshared[12]); |
|
|
|
|
sha2_step1(d,e,f,g,h,a,b,c,in[13],Kshared[13]); |
|
|
|
|
sha2_step1(c,d,e,f,g,h,a,b,in[14],Kshared[14]); |
|
|
|
|
sha2_step1(b,c,d,e,f,g,h,a,in[15],Kshared[15]); |
|
|
|
|
|
|
|
|
|
#pragma unroll |
|
|
|
|
for (int i=0; i<3; i++) |
|
|
|
|
{ |
|
|
|
|
sha2_step2(a,b,c,d,e,f,g,h,in,0, Kshared[16+16*i]); |
|
|
|
|
sha2_step2(h,a,b,c,d,e,f,g,in,1, Kshared[17+16*i]); |
|
|
|
|
sha2_step2(g,h,a,b,c,d,e,f,in,2, Kshared[18+16*i]); |
|
|
|
|
sha2_step2(f,g,h,a,b,c,d,e,in,3, Kshared[19+16*i]); |
|
|
|
|
sha2_step2(e,f,g,h,a,b,c,d,in,4, Kshared[20+16*i]); |
|
|
|
|
sha2_step2(d,e,f,g,h,a,b,c,in,5, Kshared[21+16*i]); |
|
|
|
|
sha2_step2(c,d,e,f,g,h,a,b,in,6, Kshared[22+16*i]); |
|
|
|
|
sha2_step2(b,c,d,e,f,g,h,a,in,7, Kshared[23+16*i]); |
|
|
|
|
sha2_step2(a,b,c,d,e,f,g,h,in,8, Kshared[24+16*i]); |
|
|
|
|
sha2_step2(h,a,b,c,d,e,f,g,in,9, Kshared[25+16*i]); |
|
|
|
|
sha2_step2(g,h,a,b,c,d,e,f,in,10,Kshared[26+16*i]); |
|
|
|
|
sha2_step2(f,g,h,a,b,c,d,e,in,11,Kshared[27+16*i]); |
|
|
|
|
sha2_step2(e,f,g,h,a,b,c,d,in,12,Kshared[28+16*i]); |
|
|
|
|
sha2_step2(d,e,f,g,h,a,b,c,in,13,Kshared[29+16*i]); |
|
|
|
|
sha2_step2(c,d,e,f,g,h,a,b,in,14,Kshared[30+16*i]); |
|
|
|
|
sha2_step2(b,c,d,e,f,g,h,a,in,15,Kshared[31+16*i]); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
buf[ 0] = state[0] + a; |
|
|
|
|
buf[ 1] = state[1] + b; |
|
|
|
|
buf[ 2] = state[2] + c; |
|
|
|
|
buf[ 3] = state[3] + d; |
|
|
|
|
buf[ 4] = state[4] + e; |
|
|
|
|
buf[ 5] = state[5] + f; |
|
|
|
|
buf[ 6] = state[6] + g; |
|
|
|
|
buf[ 7] = state[7] + h; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
__device__ |
|
|
|
|
static void sha256_round_body(uint32_t* in, uint32_t* state, uint32_t* const Kshared) |
|
|
|
|
{ |
|
|
|
@ -328,166 +356,70 @@ uint64_t cuda_swab32ll(uint64_t x) {
@@ -328,166 +356,70 @@ uint64_t cuda_swab32ll(uint64_t x) {
|
|
|
|
|
return MAKE_ULONGLONG(cuda_swab32(_LODWORD(x)), cuda_swab32(_HIDWORD(x))); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
#if 0 |
|
|
|
|
__global__ |
|
|
|
|
void lbry_sha256_gpu_hash_112(const uint32_t threads, const uint32_t startNonce, const bool swabNonce, uint64_t *outputHash) |
|
|
|
|
{ |
|
|
|
|
const uint32_t thread = (blockDim.x * blockIdx.x + threadIdx.x); |
|
|
|
|
if (thread < threads) |
|
|
|
|
{ |
|
|
|
|
const uint32_t nonce = startNonce + thread; |
|
|
|
|
|
|
|
|
|
uint32_t dat[16]; |
|
|
|
|
#pragma unroll |
|
|
|
|
for (int i=0;i<11;i++) dat[i] = c_dataEnd112[i]; // pre "swabed" |
|
|
|
|
dat[11] = swabNonce ? cuda_swab32(nonce) : nonce; |
|
|
|
|
dat[12] = 0x80000000; |
|
|
|
|
dat[13] = 0; |
|
|
|
|
dat[14] = 0; |
|
|
|
|
dat[15] = 0x380; |
|
|
|
|
|
|
|
|
|
uint32_t __align__(8) buf[8]; |
|
|
|
|
#pragma unroll |
|
|
|
|
for (int i=0;i<8;i++) buf[i] = c_midstate112[i]; |
|
|
|
|
|
|
|
|
|
sha256_round_body(dat, buf, c_K); |
|
|
|
|
|
|
|
|
|
// output |
|
|
|
|
uint2* output = (uint2*) (&outputHash[thread * 8U]); |
|
|
|
|
#pragma unroll |
|
|
|
|
for (int i=0;i<4;i++) { |
|
|
|
|
//output[i] = vectorize(cuda_swab32ll(((uint64_t*)buf)[i])); |
|
|
|
|
output[i] = vectorize(((uint64_t*)buf)[i]); // out without swap, new sha256 after |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
__global__ |
|
|
|
|
void lbry_sha256_gpu_hash_32(uint32_t threads, uint64_t *Hash512) |
|
|
|
|
{ |
|
|
|
|
const uint32_t thread = (blockDim.x * blockIdx.x + threadIdx.x); |
|
|
|
|
if (thread < threads) |
|
|
|
|
{ |
|
|
|
|
uint32_t __align__(8) buf[8]; // align for vectorize |
|
|
|
|
#pragma unroll |
|
|
|
|
for (int i=0; i<8; i++) buf[i] = c_H256[i]; |
|
|
|
|
|
|
|
|
|
uint32_t* input = (uint32_t*) (&Hash512[thread * 8U]); |
|
|
|
|
|
|
|
|
|
uint32_t dat[16]; |
|
|
|
|
#pragma unroll |
|
|
|
|
//for (int i=0;i<8;i++) dat[i] = cuda_swab32(input[i]); |
|
|
|
|
for (int i=0; i<8; i++) dat[i] = input[i]; |
|
|
|
|
dat[8] = 0x80000000; |
|
|
|
|
#pragma unroll |
|
|
|
|
for (int i=9; i<15; i++) dat[i] = 0; |
|
|
|
|
dat[15] = 0x100; |
|
|
|
|
|
|
|
|
|
sha256_round_body(dat, buf, c_K); |
|
|
|
|
|
|
|
|
|
// output |
|
|
|
|
uint2* output = (uint2*) input; |
|
|
|
|
#pragma unroll |
|
|
|
|
for (int i=0;i<4;i++) { |
|
|
|
|
//output[i] = vectorize(cuda_swab32ll(((uint64_t*)buf)[i])); |
|
|
|
|
output[i] = vectorizeswap(((uint64_t*)buf)[i]); |
|
|
|
|
} |
|
|
|
|
#ifdef PAD_ZEROS |
|
|
|
|
#pragma unroll |
|
|
|
|
for (int i=4; i<8; i++) output[i] = vectorize(0); |
|
|
|
|
#endif |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
__host__ |
|
|
|
|
void lbry_sha256_hash_112(int thr_id, uint32_t threads, uint32_t startNonce, uint32_t *d_outputHash, bool swabNonce, cudaStream_t stream) |
|
|
|
|
{ |
|
|
|
|
const int threadsperblock = 256; |
|
|
|
|
|
|
|
|
|
dim3 grid(threads/threadsperblock); |
|
|
|
|
dim3 block(threadsperblock); |
|
|
|
|
|
|
|
|
|
lbry_sha256_gpu_hash_112 <<<grid, block, 0, stream>>> (threads, startNonce, swabNonce, (uint64_t*) d_outputHash); |
|
|
|
|
cudaGetLastError(); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
__host__ |
|
|
|
|
void lbry_sha256_hash_32(int thr_id, uint32_t threads, uint32_t *d_Hash, cudaStream_t stream) |
|
|
|
|
{ |
|
|
|
|
const int threadsperblock = 256; |
|
|
|
|
|
|
|
|
|
dim3 grid(threads/threadsperblock); |
|
|
|
|
dim3 block(threadsperblock); |
|
|
|
|
|
|
|
|
|
lbry_sha256_gpu_hash_32 <<<grid, block, 0, stream>>> (threads, (uint64_t*) d_Hash); |
|
|
|
|
} |
|
|
|
|
#endif |
|
|
|
|
|
|
|
|
|
__global__ |
|
|
|
|
void lbry_sha256d_gpu_hash_112(const uint32_t threads, const uint32_t startNonce, const bool swabNonce, uint64_t *outputHash) |
|
|
|
|
__launch_bounds__(512,2) /* to force 64 regs */ |
|
|
|
|
void lbry_sha256d_gpu_hash_112(const uint32_t threads, const uint32_t startNonce, uint64_t *outputHash) |
|
|
|
|
{ |
|
|
|
|
const uint32_t thread = (blockDim.x * blockIdx.x + threadIdx.x); |
|
|
|
|
extern __shared__ uint32_t s_K[]; |
|
|
|
|
//s_K[thread & 63] = c_K[thread & 63]; |
|
|
|
|
if (threadIdx.x < 64U) s_K[threadIdx.x] = c_K[threadIdx.x]; |
|
|
|
|
//__threadfence_block(); |
|
|
|
|
if (thread < threads) |
|
|
|
|
{ |
|
|
|
|
const uint32_t nonce = startNonce + thread; |
|
|
|
|
|
|
|
|
|
uint32_t dat[16]; |
|
|
|
|
#pragma unroll |
|
|
|
|
for (int i=0; i<11; i++) dat[i] = c_dataEnd112[i]; |
|
|
|
|
dat[11] = swabNonce ? cuda_swab32(nonce) : nonce; |
|
|
|
|
dat[11] = startNonce + thread; |
|
|
|
|
dat[12] = 0x80000000; |
|
|
|
|
dat[13] = 0; |
|
|
|
|
dat[14] = 0; |
|
|
|
|
dat[15] = 0x380; |
|
|
|
|
|
|
|
|
|
uint32_t __align__(8) buf[8]; |
|
|
|
|
#pragma unroll |
|
|
|
|
for (int i=0;i<8;i++) buf[i] = c_midstate112[i]; |
|
|
|
|
uint32_t __align__(8) buf[8], state[8]; |
|
|
|
|
|
|
|
|
|
sha256_round_body(dat, buf, s_K); |
|
|
|
|
*(uint2x4*)&state[0] = *(uint2x4*)&c_midstate112[0]; |
|
|
|
|
*(uint2x4*)&buf[0] = *(uint2x4*)&c_midbuffer112[0]; |
|
|
|
|
|
|
|
|
|
sha256_round_first(dat, buf, state, c_K); // no shared mem here |
|
|
|
|
|
|
|
|
|
// second sha256 |
|
|
|
|
|
|
|
|
|
#pragma unroll |
|
|
|
|
for (int i=0; i<8; i++) dat[i] = buf[i]; |
|
|
|
|
*(uint2x4*)&dat[0] = *(uint2x4*)&buf[0]; |
|
|
|
|
|
|
|
|
|
dat[8] = 0x80000000; |
|
|
|
|
|
|
|
|
|
#pragma unroll |
|
|
|
|
for (int i=9; i<15; i++) dat[i] = 0; |
|
|
|
|
dat[15] = 0x100; |
|
|
|
|
|
|
|
|
|
#pragma unroll |
|
|
|
|
for (int i=0; i<8; i++) buf[i] = c_H256[i]; |
|
|
|
|
*(uint2x4*)&buf[0] = *(uint2x4*)&c_H256[0]; |
|
|
|
|
|
|
|
|
|
sha256_round_body(dat, buf, s_K); |
|
|
|
|
|
|
|
|
|
// output |
|
|
|
|
uint2* output = (uint2*) (&outputHash[thread * 8U]); |
|
|
|
|
#pragma unroll |
|
|
|
|
for (int i=0;i<4;i++) { |
|
|
|
|
// //output[i] = vectorize(cuda_swab32ll(((uint64_t*)buf)[i])); |
|
|
|
|
output[i] = vectorizeswap(((uint64_t*)buf)[i]); |
|
|
|
|
} |
|
|
|
|
*(uint2*)&buf[0] = vectorizeswap(((uint64_t*)buf)[0]); |
|
|
|
|
*(uint2*)&buf[2] = vectorizeswap(((uint64_t*)buf)[1]); |
|
|
|
|
*(uint2*)&buf[4] = vectorizeswap(((uint64_t*)buf)[2]); |
|
|
|
|
*(uint2*)&buf[6] = vectorizeswap(((uint64_t*)buf)[3]); |
|
|
|
|
|
|
|
|
|
*(uint2x4*)&outputHash[thread*8U] = *(uint2x4*)&buf[0]; |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
__host__ |
|
|
|
|
void lbry_sha256d_hash_112(int thr_id, uint32_t threads, uint32_t startNonce, uint32_t *d_outputHash, bool swabNonce, cudaStream_t stream) |
|
|
|
|
void lbry_sha256d_hash_112(int thr_id, uint32_t threads, uint32_t startNonce, uint32_t *d_outputHash) |
|
|
|
|
{ |
|
|
|
|
const int threadsperblock = 256; |
|
|
|
|
const int threadsperblock = 512; |
|
|
|
|
|
|
|
|
|
dim3 grid(threads/threadsperblock); |
|
|
|
|
dim3 block(threadsperblock); |
|
|
|
|
|
|
|
|
|
lbry_sha256d_gpu_hash_112 <<<grid, block, 64*4, stream>>> (threads, startNonce, swabNonce, (uint64_t*) d_outputHash); |
|
|
|
|
lbry_sha256d_gpu_hash_112 <<<grid, block, 64*4>>> (threads, startNonce, (uint64_t*) d_outputHash); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
__host__ |
|
|
|
|
void lbry_sha256_init(int thr_id) |
|
|
|
|
{ |
|
|
|
|
//cudaMemcpyToSymbol(c_H256, cpu_H256, sizeof(cpu_H256), 0, cudaMemcpyHostToDevice); |
|
|
|
|
cudaMemcpyToSymbol(c_K, cpu_K, sizeof(cpu_K), 0, cudaMemcpyHostToDevice); |
|
|
|
|
CUDA_SAFE_CALL(cudaMalloc(&d_resNonces, 4*sizeof(uint32_t))); |
|
|
|
|
} |
|
|
|
@ -507,21 +439,53 @@ void lbry_sha256_setBlock_112(uint32_t *pdata, uint32_t *ptarget)
@@ -507,21 +439,53 @@ void lbry_sha256_setBlock_112(uint32_t *pdata, uint32_t *ptarget)
|
|
|
|
|
for (int i=0;i<11;i++) end[i] = cuda_swab32(pdata[16+i]); |
|
|
|
|
sha256_round_body_host(in, buf, cpu_K); |
|
|
|
|
|
|
|
|
|
CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_midstate112, buf, 32, 0, cudaMemcpyHostToDevice)); |
|
|
|
|
CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_dataEnd112, end, sizeof(end), 0, cudaMemcpyHostToDevice)); |
|
|
|
|
CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_target, &ptarget[6], sizeof(uint64_t), 0, cudaMemcpyHostToDevice)); |
|
|
|
|
CUDA_SAFE_CALL(cudaMemcpyToSymbol(d_target, &ptarget[6], sizeof(uint64_t), 0, cudaMemcpyHostToDevice)); |
|
|
|
|
cudaMemcpyToSymbol(c_midstate112, buf, 32, 0, cudaMemcpyHostToDevice); |
|
|
|
|
cudaMemcpyToSymbol(c_dataEnd112, end, sizeof(end), 0, cudaMemcpyHostToDevice); |
|
|
|
|
|
|
|
|
|
uint32_t a = buf[0]; |
|
|
|
|
uint32_t b = buf[1]; |
|
|
|
|
uint32_t c = buf[2]; |
|
|
|
|
uint32_t d = buf[3]; |
|
|
|
|
uint32_t e = buf[4]; |
|
|
|
|
uint32_t f = buf[5]; |
|
|
|
|
uint32_t g = buf[6]; |
|
|
|
|
uint32_t h = buf[7]; |
|
|
|
|
|
|
|
|
|
sha256_step1_host(a,b,c,d,e,f,g,h,end[0], cpu_K[0]); |
|
|
|
|
sha256_step1_host(h,a,b,c,d,e,f,g,end[1], cpu_K[1]); |
|
|
|
|
sha256_step1_host(g,h,a,b,c,d,e,f,end[2], cpu_K[2]); |
|
|
|
|
sha256_step1_host(f,g,h,a,b,c,d,e,end[3], cpu_K[3]); |
|
|
|
|
sha256_step1_host(e,f,g,h,a,b,c,d,end[4], cpu_K[4]); |
|
|
|
|
sha256_step1_host(d,e,f,g,h,a,b,c,end[5], cpu_K[5]); |
|
|
|
|
sha256_step1_host(c,d,e,f,g,h,a,b,end[6], cpu_K[6]); |
|
|
|
|
sha256_step1_host(b,c,d,e,f,g,h,a,end[7], cpu_K[7]); |
|
|
|
|
sha256_step1_host(a,b,c,d,e,f,g,h,end[8], cpu_K[8]); |
|
|
|
|
sha256_step1_host(h,a,b,c,d,e,f,g,end[9], cpu_K[9]); |
|
|
|
|
sha256_step1_host(g,h,a,b,c,d,e,f,end[10],cpu_K[10]); |
|
|
|
|
|
|
|
|
|
buf[0] = a; |
|
|
|
|
buf[1] = b; |
|
|
|
|
buf[2] = c; |
|
|
|
|
buf[3] = d; |
|
|
|
|
buf[4] = e; |
|
|
|
|
buf[5] = f; |
|
|
|
|
buf[6] = g; |
|
|
|
|
buf[7] = h; |
|
|
|
|
|
|
|
|
|
cudaMemcpyToSymbol(c_midbuffer112, buf, 32, 0, cudaMemcpyHostToDevice); |
|
|
|
|
|
|
|
|
|
cudaMemcpyToSymbol(c_target, &ptarget[6], sizeof(uint64_t), 0, cudaMemcpyHostToDevice); |
|
|
|
|
cudaMemcpyToSymbol(d_target, &ptarget[6], sizeof(uint64_t), 0, cudaMemcpyHostToDevice); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
// ------------------------------------------------------------------------------------------ |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
static __constant__ uint32_t c_IV[5] = { |
|
|
|
|
0x67452301u, 0xEFCDAB89u, 0x98BADCFEu, 0x10325476u, 0xC3D2E1F0u |
|
|
|
|
}; |
|
|
|
|
|
|
|
|
|
/* |
|
|
|
|
* Round functions for RIPEMD-128 and RIPEMD-160. |
|
|
|
|
* Round functions for RIPEMD-160. |
|
|
|
|
*/ |
|
|
|
|
#if 1 |
|
|
|
|
#define F1(x, y, z) ((x) ^ (y) ^ (z)) |
|
|
|
@ -753,19 +717,21 @@ static __constant__ uint32_t c_IV[5] = {
@@ -753,19 +717,21 @@ static __constant__ uint32_t c_IV[5] = {
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
__global__ |
|
|
|
|
__launch_bounds__(256,3) |
|
|
|
|
void lbry_sha256d_gpu_hash_final(const uint32_t threads, const uint32_t startNonce, uint64_t *Hash512, uint32_t *resNonces) |
|
|
|
|
__launch_bounds__(640,2) /* 640,2 <= 48 regs, 512,2 <= 64 */ |
|
|
|
|
void lbry_sha256d_gpu_hash_final(const uint32_t threads, uint64_t *Hash512, uint32_t *resNonces) |
|
|
|
|
{ |
|
|
|
|
const uint32_t thread = (blockDim.x * blockIdx.x + threadIdx.x); |
|
|
|
|
extern __shared__ uint32_t s_K[]; |
|
|
|
|
if (threadIdx.x < 64U) s_K[threadIdx.x] = c_K[threadIdx.x]; |
|
|
|
|
//__threadfence_block(); |
|
|
|
|
if (thread < threads) |
|
|
|
|
{ |
|
|
|
|
uint32_t* input = (uint32_t*) (&Hash512[thread * 8U]); |
|
|
|
|
|
|
|
|
|
uint32_t __align__(8) buf[8]; // align for vectorize |
|
|
|
|
uint32_t dat[16]; |
|
|
|
|
#pragma unroll |
|
|
|
|
for (int i=0; i<8; i++) |
|
|
|
|
dat[i] = (input[i]); |
|
|
|
|
uint32_t __align__(8) dat[16]; |
|
|
|
|
|
|
|
|
|
*(uint2x4*)&dat[0] = *(uint2x4*)&input[0]; |
|
|
|
|
|
|
|
|
|
dat[8] = 0x80; |
|
|
|
|
|
|
|
|
|
#pragma unroll |
|
|
|
@ -780,15 +746,15 @@ void lbry_sha256d_gpu_hash_final(const uint32_t threads, const uint32_t startNon
@@ -780,15 +746,15 @@ void lbry_sha256d_gpu_hash_final(const uint32_t threads, const uint32_t startNon
|
|
|
|
|
|
|
|
|
|
RIPEMD160_ROUND_BODY(dat, h); |
|
|
|
|
|
|
|
|
|
uint32_t __align__(8) buf[8]; // align for vectorize |
|
|
|
|
#pragma unroll |
|
|
|
|
for (int i=0; i<5; i++) |
|
|
|
|
buf[i] = h[i]; |
|
|
|
|
|
|
|
|
|
// second 32 bytes block hash |
|
|
|
|
|
|
|
|
|
#pragma unroll |
|
|
|
|
for (int i=0; i<8; i++) |
|
|
|
|
dat[i] = (input[8+i]); |
|
|
|
|
*(uint2x4*)&dat[0] = *(uint2x4*)&input[8]; |
|
|
|
|
|
|
|
|
|
dat[8] = 0x80; |
|
|
|
|
|
|
|
|
|
#pragma unroll |
|
|
|
@ -810,56 +776,50 @@ void lbry_sha256d_gpu_hash_final(const uint32_t threads, const uint32_t startNon
@@ -810,56 +776,50 @@ void lbry_sha256d_gpu_hash_final(const uint32_t threads, const uint32_t startNon
|
|
|
|
|
for (int i=0;i<5;i++) dat[i+5] = cuda_swab32(h[i]); |
|
|
|
|
dat[10] = 0x80000000; |
|
|
|
|
#pragma unroll |
|
|
|
|
for (int i=11;i<15;i++) dat[i] = 0; |
|
|
|
|
for (int i=11; i<15; i++) dat[i] = 0; |
|
|
|
|
dat[15] = 0x140; |
|
|
|
|
|
|
|
|
|
#pragma unroll |
|
|
|
|
for (int i=0;i<8;i++) buf[i] = c_H256[i]; |
|
|
|
|
*(uint2x4*)&buf[0] = *(uint2x4*)&c_H256[0]; |
|
|
|
|
|
|
|
|
|
sha256_round_body(dat, buf, c_K); |
|
|
|
|
sha256_round_body(dat, buf, c_K); // s_K uses too much regs |
|
|
|
|
|
|
|
|
|
// second sha256 |
|
|
|
|
|
|
|
|
|
#pragma unroll |
|
|
|
|
for (int i=0;i<8;i++) dat[i] = buf[i]; |
|
|
|
|
*(uint2x4*)&dat[0] = *(uint2x4*)&buf[0]; |
|
|
|
|
dat[8] = 0x80000000; |
|
|
|
|
#pragma unroll |
|
|
|
|
for (int i=9;i<15;i++) dat[i] = 0; |
|
|
|
|
for (int i=9; i<15; i++) dat[i] = 0; |
|
|
|
|
dat[15] = 0x100; |
|
|
|
|
|
|
|
|
|
#pragma unroll |
|
|
|
|
for (int i=0;i<8;i++) buf[i] = c_H256[i]; |
|
|
|
|
*(uint2x4*)&buf[0] = *(uint2x4*)&c_H256[0]; |
|
|
|
|
|
|
|
|
|
sha256_round_body(dat, buf, c_K); |
|
|
|
|
sha256_round_body(dat, buf, s_K); |
|
|
|
|
|
|
|
|
|
// valid nonces |
|
|
|
|
uint64_t high = cuda_swab32ll(((uint64_t*)buf)[3]); |
|
|
|
|
const uint64_t high = cuda_swab32ll(((uint64_t*)buf)[3]); |
|
|
|
|
if (high <= d_target[0]) { |
|
|
|
|
// printf("%08x %08x - %016llx %016llx - %08x %08x\n", buf[7], buf[6], high, d_target[0], c_target[1], c_target[0]); |
|
|
|
|
uint32_t nonce = startNonce + thread; |
|
|
|
|
resNonces[1] = atomicExch(resNonces, nonce); |
|
|
|
|
resNonces[1] = atomicExch(resNonces, thread); |
|
|
|
|
d_target[0] = high; |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
__host__ |
|
|
|
|
void lbry_sha256d_hash_final(int thr_id, uint32_t threads, uint32_t startNonce, uint32_t *d_inputHash, uint32_t *resNonces, cudaStream_t stream) |
|
|
|
|
void lbry_sha256d_hash_final(int thr_id, uint32_t threads, uint32_t startNonce, uint32_t *d_inputHash, uint32_t *resNonces) |
|
|
|
|
{ |
|
|
|
|
const int threadsperblock = 256; |
|
|
|
|
const int threadsperblock = 512; |
|
|
|
|
|
|
|
|
|
dim3 grid(threads/threadsperblock); |
|
|
|
|
dim3 block(threadsperblock); |
|
|
|
|
|
|
|
|
|
CUDA_SAFE_CALL(cudaMemset(d_resNonces, 0xFF, 2 * sizeof(uint32_t))); |
|
|
|
|
cudaThreadSynchronize(); |
|
|
|
|
|
|
|
|
|
lbry_sha256d_gpu_hash_final <<<grid, block, 0, stream>>> (threads, startNonce, (uint64_t*) d_inputHash, d_resNonces); |
|
|
|
|
cudaMemset(d_resNonces, 0xFF, 2 * sizeof(uint32_t)); |
|
|
|
|
|
|
|
|
|
cudaThreadSynchronize(); |
|
|
|
|
lbry_sha256d_gpu_hash_final <<<grid, block, 64*4>>> (threads, (uint64_t*) d_inputHash, d_resNonces); |
|
|
|
|
|
|
|
|
|
CUDA_SAFE_CALL(cudaMemcpy(resNonces, d_resNonces, 2 * sizeof(uint32_t), cudaMemcpyDeviceToHost)); |
|
|
|
|
cudaMemcpy(resNonces, d_resNonces, 2 * sizeof(uint32_t), cudaMemcpyDeviceToHost); |
|
|
|
|
if (resNonces[0] == resNonces[1]) { |
|
|
|
|
resNonces[1] = UINT32_MAX; |
|
|
|
|
} |
|
|
|
|
if (resNonces[0] != UINT32_MAX) resNonces[0] += startNonce; |
|
|
|
|
if (resNonces[1] != UINT32_MAX) resNonces[1] += startNonce; |
|
|
|
|
} |