From 883fb19908fe069f1ce29cf77a48c029c80a630b Mon Sep 17 00:00:00 2001 From: Tanguy Pruvot Date: Thu, 28 Jul 2016 05:57:46 +0200 Subject: [PATCH] lbry: some changes from alexis, remove shared mem 105 LBC tipped ;) --- lbry/cuda_sha256_lbry.cu | 415 ++++++++++++++++++++++----------------- lbry/cuda_sha512_lbry.cu | 160 ++++++--------- lbry/lbry.cu | 45 +++-- 3 files changed, 315 insertions(+), 305 deletions(-) diff --git a/lbry/cuda_sha256_lbry.cu b/lbry/cuda_sha256_lbry.cu index 1af73f4..b6722c7 100644 --- a/lbry/cuda_sha256_lbry.cu +++ b/lbry/cuda_sha256_lbry.cu @@ -1,6 +1,6 @@ /* * sha256 + ripemd CUDA implementation. - * tpruvot and alexis78 + * tpruvot and Provos Alexis - JUL 2016 */ #include @@ -31,12 +31,13 @@ __constant__ static uint32_t _ALIGN(16) c_K[64] = { 0x748F82EE, 0x78A5636F, 0x84C87814, 0x8CC70208, 0x90BEFFFA, 0xA4506CEB, 0xBEF9A3F7, 0xC67178F2 }; -static __thread uint32_t* d_resNonces; __constant__ static uint32_t _ALIGN(8) c_target[2]; __device__ uint64_t d_target[1]; #ifdef __INTELLISENSE__ #define atomicExch(p,y) y +#define __byte_perm(x,y,c) x +#define __CUDA_ARCH__ 520 #endif // ------------------------------------------------------------------------------------------------ @@ -181,19 +182,6 @@ __device__ __forceinline__ uint32_t ssg2_1(const uint32_t x) return xor3b(ROTR32(x,17),ROTR32(x,19),(x>>10)); } -__device__ __forceinline__ uint32_t andor32(const uint32_t a, const uint32_t b, const uint32_t c) -{ - uint32_t result; - asm("{ .reg .u32 m,n,o; // andor32 \n\t" - "and.b32 m, %1, %2;\n\t" - " or.b32 n, %1, %2;\n\t" - "and.b32 o, n, %3;\n\t" - " or.b32 %0, m, o ;\n\t" - "}\n\t" : "=r"(result) : "r"(a), "r"(b), "r"(c) - ); - return result; -} - __device__ __forceinline__ uint2 vectorizeswap(uint64_t v) { uint2 result; @@ -202,59 +190,42 @@ __device__ __forceinline__ uint2 vectorizeswap(uint64_t v) return result; } -__device__ -__forceinline__ -static void sha2_step1(uint32_t a, uint32_t b, uint32_t c, uint32_t &d, uint32_t e, uint32_t f, uint32_t g, uint32_t &h, uint32_t in, const uint32_t Kshared) -{ - uint32_t t1 = bsg2_1(e) + ((((f) ^ (g)) & (e)) ^ (g)) + Kshared + in; - d = d + h + t1; - h += t1 + bsg2_0(a) + (((b) & (c)) | (((b) | (c)) & (a))); +__device__ __forceinline__ +uint32_t Maj(const uint32_t a, const uint32_t b, const uint32_t c) { //Sha256 - Maj - andor + uint32_t result; +#if __CUDA_ARCH__ >= 500 && CUDA_VERSION >= 7050 + asm ("lop3.b32 %0, %1, %2, %3, 0xE8;" : "=r"(result) : "r"(a), "r"(b),"r"(c)); // 0xE8 = ((0xF0 & (0xCC | 0xAA)) | (0xCC & 0xAA)) +#else + result = ((a & (b | c)) | (b & c)); +#endif + return result; } -__device__ -static void sha2_step2(uint32_t a, uint32_t b, uint32_t c, uint32_t &d, uint32_t e, uint32_t f, uint32_t g, uint32_t &h, - uint32_t* in, uint32_t pc, const uint32_t Kshared) +__device__ __forceinline__ +static void sha2_step1(uint32_t a, uint32_t b, uint32_t c, uint32_t &d, + uint32_t e, uint32_t f, uint32_t g, uint32_t &h, uint32_t in, const uint32_t Kshared) { - uint32_t t1,t2; - - int pcidx1 = (pc-2) & 0xF; - int pcidx2 = (pc-7) & 0xF; - int pcidx3 = (pc-15) & 0xF; - - uint32_t inx0 = in[pc]; - uint32_t inx1 = in[pcidx1]; - uint32_t inx2 = in[pcidx2]; - uint32_t inx3 = in[pcidx3]; + const uint32_t t1 = h + bsg2_1(e) + xandx(e, f, g) + Kshared + in; + h = t1 + bsg2_0(a) + Maj(a, b, c); + d+= t1; +} - uint32_t ssg21 = ssg2_1(inx1); - uint32_t ssg20 = ssg2_0(inx3); - uint32_t vxandx = xandx(e, f, g); - uint32_t bsg21 = bsg2_1(e); - uint32_t bsg20 = bsg2_0(a); - uint32_t andorv = andor32(a,b,c); +#define sha2_step2 sha2_step1 - in[pc] = ssg21 + inx2 + ssg20 + inx0; - - t1 = h + bsg21 + vxandx + Kshared + in[pc]; - t2 = bsg20 + andorv; - d = d + t1; - h = t1 + t2; -} __device__ __forceinline__ -static void sha256_round_first(uint32_t* in,uint32_t *buf, uint32_t* state, uint32_t* const Kshared) +static void sha256_round_first(uint32_t* in, uint32_t *buf, uint32_t* state, uint32_t* const Kshared) { - uint32_t a = buf[0]; + uint32_t a = buf[0] + in[11]; uint32_t b = buf[1]; uint32_t c = buf[2]; uint32_t d = buf[3]; - uint32_t e = buf[4]; + uint32_t e = buf[4] + in[11]; uint32_t f = buf[5]; uint32_t g = buf[6]; uint32_t h = buf[7]; - // 10 first steps made on host - sha2_step1(f,g,h,a,b,c,d,e,in[11],Kshared[11]); + // 12 first steps made on host sha2_step1(e,f,g,h,a,b,c,d,in[12],Kshared[12]); sha2_step1(d,e,f,g,h,a,b,c,in[13],Kshared[13]); sha2_step1(c,d,e,f,g,h,a,b,in[14],Kshared[14]); @@ -263,35 +234,39 @@ static void sha256_round_first(uint32_t* in,uint32_t *buf, uint32_t* state, uint #pragma unroll for (int i=0; i<3; i++) { - sha2_step2(a,b,c,d,e,f,g,h,in,0, Kshared[16+16*i]); - sha2_step2(h,a,b,c,d,e,f,g,in,1, Kshared[17+16*i]); - sha2_step2(g,h,a,b,c,d,e,f,in,2, Kshared[18+16*i]); - sha2_step2(f,g,h,a,b,c,d,e,in,3, Kshared[19+16*i]); - sha2_step2(e,f,g,h,a,b,c,d,in,4, Kshared[20+16*i]); - sha2_step2(d,e,f,g,h,a,b,c,in,5, Kshared[21+16*i]); - sha2_step2(c,d,e,f,g,h,a,b,in,6, Kshared[22+16*i]); - sha2_step2(b,c,d,e,f,g,h,a,in,7, Kshared[23+16*i]); - sha2_step2(a,b,c,d,e,f,g,h,in,8, Kshared[24+16*i]); - sha2_step2(h,a,b,c,d,e,f,g,in,9, Kshared[25+16*i]); - sha2_step2(g,h,a,b,c,d,e,f,in,10,Kshared[26+16*i]); - sha2_step2(f,g,h,a,b,c,d,e,in,11,Kshared[27+16*i]); - sha2_step2(e,f,g,h,a,b,c,d,in,12,Kshared[28+16*i]); - sha2_step2(d,e,f,g,h,a,b,c,in,13,Kshared[29+16*i]); - sha2_step2(c,d,e,f,g,h,a,b,in,14,Kshared[30+16*i]); - sha2_step2(b,c,d,e,f,g,h,a,in,15,Kshared[31+16*i]); + #pragma unroll 16 + for (int j = 0; j < 16; j++){ + in[j] = in[j] + in[(j + 9) & 15] + ssg2_0(in[(j + 1) & 15]) + ssg2_1(in[(j + 14) & 15]); + } + sha2_step2(a,b,c,d,e,f,g,h,in[0], Kshared[16+16*i]); + sha2_step2(h,a,b,c,d,e,f,g,in[1], Kshared[17+16*i]); + sha2_step2(g,h,a,b,c,d,e,f,in[2], Kshared[18+16*i]); + sha2_step2(f,g,h,a,b,c,d,e,in[3], Kshared[19+16*i]); + sha2_step2(e,f,g,h,a,b,c,d,in[4], Kshared[20+16*i]); + sha2_step2(d,e,f,g,h,a,b,c,in[5], Kshared[21+16*i]); + sha2_step2(c,d,e,f,g,h,a,b,in[6], Kshared[22+16*i]); + sha2_step2(b,c,d,e,f,g,h,a,in[7], Kshared[23+16*i]); + sha2_step2(a,b,c,d,e,f,g,h,in[8], Kshared[24+16*i]); + sha2_step2(h,a,b,c,d,e,f,g,in[9], Kshared[25+16*i]); + sha2_step2(g,h,a,b,c,d,e,f,in[10],Kshared[26+16*i]); + sha2_step2(f,g,h,a,b,c,d,e,in[11],Kshared[27+16*i]); + sha2_step2(e,f,g,h,a,b,c,d,in[12],Kshared[28+16*i]); + sha2_step2(d,e,f,g,h,a,b,c,in[13],Kshared[29+16*i]); + sha2_step2(c,d,e,f,g,h,a,b,in[14],Kshared[30+16*i]); + sha2_step2(b,c,d,e,f,g,h,a,in[15],Kshared[31+16*i]); } - buf[ 0] = state[0] + a; - buf[ 1] = state[1] + b; - buf[ 2] = state[2] + c; - buf[ 3] = state[3] + d; - buf[ 4] = state[4] + e; - buf[ 5] = state[5] + f; - buf[ 6] = state[6] + g; - buf[ 7] = state[7] + h; + buf[0] = state[0] + a; + buf[1] = state[1] + b; + buf[2] = state[2] + c; + buf[3] = state[3] + d; + buf[4] = state[4] + e; + buf[5] = state[5] + f; + buf[6] = state[6] + g; + buf[7] = state[7] + h; } -__device__ +__device__ __forceinline__ static void sha256_round_body(uint32_t* in, uint32_t* state, uint32_t* const Kshared) { uint32_t a = state[0]; @@ -323,22 +298,26 @@ static void sha256_round_body(uint32_t* in, uint32_t* state, uint32_t* const Ksh #pragma unroll for (int i=0; i<3; i++) { - sha2_step2(a,b,c,d,e,f,g,h,in,0, Kshared[16+16*i]); - sha2_step2(h,a,b,c,d,e,f,g,in,1, Kshared[17+16*i]); - sha2_step2(g,h,a,b,c,d,e,f,in,2, Kshared[18+16*i]); - sha2_step2(f,g,h,a,b,c,d,e,in,3, Kshared[19+16*i]); - sha2_step2(e,f,g,h,a,b,c,d,in,4, Kshared[20+16*i]); - sha2_step2(d,e,f,g,h,a,b,c,in,5, Kshared[21+16*i]); - sha2_step2(c,d,e,f,g,h,a,b,in,6, Kshared[22+16*i]); - sha2_step2(b,c,d,e,f,g,h,a,in,7, Kshared[23+16*i]); - sha2_step2(a,b,c,d,e,f,g,h,in,8, Kshared[24+16*i]); - sha2_step2(h,a,b,c,d,e,f,g,in,9, Kshared[25+16*i]); - sha2_step2(g,h,a,b,c,d,e,f,in,10,Kshared[26+16*i]); - sha2_step2(f,g,h,a,b,c,d,e,in,11,Kshared[27+16*i]); - sha2_step2(e,f,g,h,a,b,c,d,in,12,Kshared[28+16*i]); - sha2_step2(d,e,f,g,h,a,b,c,in,13,Kshared[29+16*i]); - sha2_step2(c,d,e,f,g,h,a,b,in,14,Kshared[30+16*i]); - sha2_step2(b,c,d,e,f,g,h,a,in,15,Kshared[31+16*i]); + #pragma unroll 16 + for (int j = 0; j < 16; j++) { + in[j] = in[j] + in[(j + 9) & 15] + ssg2_0(in[(j + 1) & 15]) + ssg2_1(in[(j + 14) & 15]); + } + sha2_step2(a, b, c, d, e, f, g, h, in[0], Kshared[16 + 16 * i]); + sha2_step2(h, a, b, c, d, e, f, g, in[1], Kshared[17 + 16 * i]); + sha2_step2(g, h, a, b, c, d, e, f, in[2], Kshared[18 + 16 * i]); + sha2_step2(f, g, h, a, b, c, d, e, in[3], Kshared[19 + 16 * i]); + sha2_step2(e, f, g, h, a, b, c, d, in[4], Kshared[20 + 16 * i]); + sha2_step2(d, e, f, g, h, a, b, c, in[5], Kshared[21 + 16 * i]); + sha2_step2(c, d, e, f, g, h, a, b, in[6], Kshared[22 + 16 * i]); + sha2_step2(b, c, d, e, f, g, h, a, in[7], Kshared[23 + 16 * i]); + sha2_step2(a, b, c, d, e, f, g, h, in[8], Kshared[24 + 16 * i]); + sha2_step2(h, a, b, c, d, e, f, g, in[9], Kshared[25 + 16 * i]); + sha2_step2(g, h, a, b, c, d, e, f, in[10], Kshared[26 + 16 * i]); + sha2_step2(f, g, h, a, b, c, d, e, in[11], Kshared[27 + 16 * i]); + sha2_step2(e, f, g, h, a, b, c, d, in[12], Kshared[28 + 16 * i]); + sha2_step2(d, e, f, g, h, a, b, c, in[13], Kshared[29 + 16 * i]); + sha2_step2(c, d, e, f, g, h, a, b, in[14], Kshared[30 + 16 * i]); + sha2_step2(b, c, d, e, f, g, h, a, in[15], Kshared[31 + 16 * i]); } state[0] += a; @@ -351,19 +330,92 @@ static void sha256_round_body(uint32_t* in, uint32_t* state, uint32_t* const Ksh state[7] += h; } -__device__ -uint64_t cuda_swab32ll(uint64_t x) { - return MAKE_ULONGLONG(cuda_swab32(_LODWORD(x)), cuda_swab32(_HIDWORD(x))); +__device__ __forceinline__ +static void sha256_round_body_final(uint32_t* in, uint32_t* state, uint32_t* const Kshared) +{ + uint32_t a = state[0]; + uint32_t b = state[1]; + uint32_t c = state[2]; + uint32_t d = state[3]; + uint32_t e = state[4]; + uint32_t f = state[5]; + uint32_t g = state[6]; + uint32_t h = state[7]; + + sha2_step1(a,b,c,d,e,f,g,h,in[0], Kshared[0]); + sha2_step1(h,a,b,c,d,e,f,g,in[1], Kshared[1]); + sha2_step1(g,h,a,b,c,d,e,f,in[2], Kshared[2]); + sha2_step1(f,g,h,a,b,c,d,e,in[3], Kshared[3]); + sha2_step1(e,f,g,h,a,b,c,d,in[4], Kshared[4]); + sha2_step1(d,e,f,g,h,a,b,c,in[5], Kshared[5]); + sha2_step1(c,d,e,f,g,h,a,b,in[6], Kshared[6]); + sha2_step1(b,c,d,e,f,g,h,a,in[7], Kshared[7]); + sha2_step1(a,b,c,d,e,f,g,h,in[8], Kshared[8]); + sha2_step1(h,a,b,c,d,e,f,g,in[9], Kshared[9]); + sha2_step1(g,h,a,b,c,d,e,f,in[10], Kshared[10]); + sha2_step1(f,g,h,a,b,c,d,e,in[11], Kshared[11]); + sha2_step1(e,f,g,h,a,b,c,d,in[12], Kshared[12]); + sha2_step1(d,e,f,g,h,a,b,c,in[13], Kshared[13]); + sha2_step1(c,d,e,f,g,h,a,b,in[14], Kshared[14]); + sha2_step1(b,c,d,e,f,g,h,a,in[15], Kshared[15]); + + #pragma unroll + for (int i=0; i<2; i++) + { + #pragma unroll 16 + for (int j = 0; j < 16; j++) { + in[j] = in[j] + in[(j + 9) & 15] + ssg2_0(in[(j + 1) & 15]) + ssg2_1(in[(j + 14) & 15]); + } + sha2_step2(a, b, c, d, e, f, g, h, in[0], Kshared[16 + 16 * i]); + sha2_step2(h, a, b, c, d, e, f, g, in[1], Kshared[17 + 16 * i]); + sha2_step2(g, h, a, b, c, d, e, f, in[2], Kshared[18 + 16 * i]); + sha2_step2(f, g, h, a, b, c, d, e, in[3], Kshared[19 + 16 * i]); + sha2_step2(e, f, g, h, a, b, c, d, in[4], Kshared[20 + 16 * i]); + sha2_step2(d, e, f, g, h, a, b, c, in[5], Kshared[21 + 16 * i]); + sha2_step2(c, d, e, f, g, h, a, b, in[6], Kshared[22 + 16 * i]); + sha2_step2(b, c, d, e, f, g, h, a, in[7], Kshared[23 + 16 * i]); + sha2_step2(a, b, c, d, e, f, g, h, in[8], Kshared[24 + 16 * i]); + sha2_step2(h, a, b, c, d, e, f, g, in[9], Kshared[25 + 16 * i]); + sha2_step2(g, h, a, b, c, d, e, f, in[10], Kshared[26 + 16 * i]); + sha2_step2(f, g, h, a, b, c, d, e, in[11], Kshared[27 + 16 * i]); + sha2_step2(e, f, g, h, a, b, c, d, in[12], Kshared[28 + 16 * i]); + sha2_step2(d, e, f, g, h, a, b, c, in[13], Kshared[29 + 16 * i]); + sha2_step2(c, d, e, f, g, h, a, b, in[14], Kshared[30 + 16 * i]); + sha2_step2(b, c, d, e, f, g, h, a, in[15], Kshared[31 + 16 * i]); + } + #pragma unroll 16 + for (int j = 0; j < 16; j++) { + in[j] = in[j] + in[(j + 9) & 15] + ssg2_0(in[(j + 1) & 15]) + ssg2_1(in[(j + 14) & 15]); + } + sha2_step2(a, b, c, d, e, f, g, h, in[0], Kshared[16 + 16 * 2]); + sha2_step2(h, a, b, c, d, e, f, g, in[1], Kshared[17 + 16 * 2]); + sha2_step2(g, h, a, b, c, d, e, f, in[2], Kshared[18 + 16 * 2]); + sha2_step2(f, g, h, a, b, c, d, e, in[3], Kshared[19 + 16 * 2]); + sha2_step2(e, f, g, h, a, b, c, d, in[4], Kshared[20 + 16 * 2]); + sha2_step2(d, e, f, g, h, a, b, c, in[5], Kshared[21 + 16 * 2]); + sha2_step2(c, d, e, f, g, h, a, b, in[6], Kshared[22 + 16 * 2]); + sha2_step2(b, c, d, e, f, g, h, a, in[7], Kshared[23 + 16 * 2]); + sha2_step2(a, b, c, d, e, f, g, h, in[8], Kshared[24 + 16 * 2]); + sha2_step2(h, a, b, c, d, e, f, g, in[9], Kshared[25 + 16 * 2]); + sha2_step2(g, h, a, b, c, d, e, f, in[10], Kshared[26 + 16 * 2]); + sha2_step2(f, g, h, a, b, c, d, e, in[11], Kshared[27 + 16 * 2]); + sha2_step2(e, f, g, h, a, b, c, d, in[12], Kshared[28 + 16 * 2]); + sha2_step2(d, e, f, g, h, a, b, c, in[13], Kshared[29 + 16 * 2]); + + state[6] += g; + state[7] += h; } __global__ -__launch_bounds__(512,2) /* to force 64 regs */ +#if __CUDA_ARCH__ > 500 +__launch_bounds__(1024,2) /* to force 32 regs */ +#else +__launch_bounds__(768,2) /* to force 32 regs */ +#endif void lbry_sha256d_gpu_hash_112(const uint32_t threads, const uint32_t startNonce, uint64_t *outputHash) { const uint32_t thread = (blockDim.x * blockIdx.x + threadIdx.x); - extern __shared__ uint32_t s_K[]; - if (threadIdx.x < 64U) s_K[threadIdx.x] = c_K[threadIdx.x]; - //__threadfence_block(); + uint32_t buf[8], state[8]; if (thread < threads) { uint32_t dat[16]; @@ -375,8 +427,6 @@ void lbry_sha256d_gpu_hash_112(const uint32_t threads, const uint32_t startNonce dat[14] = 0; dat[15] = 0x380; - uint32_t __align__(8) buf[8], state[8]; - *(uint2x4*)&state[0] = *(uint2x4*)&c_midstate112[0]; *(uint2x4*)&buf[0] = *(uint2x4*)&c_midbuffer112[0]; @@ -394,7 +444,7 @@ void lbry_sha256d_gpu_hash_112(const uint32_t threads, const uint32_t startNonce *(uint2x4*)&buf[0] = *(uint2x4*)&c_H256[0]; - sha256_round_body(dat, buf, s_K); + sha256_round_body(dat, buf, c_K); //no shared mem at all // output *(uint2*)&buf[0] = vectorizeswap(((uint64_t*)buf)[0]); @@ -402,32 +452,33 @@ void lbry_sha256d_gpu_hash_112(const uint32_t threads, const uint32_t startNonce *(uint2*)&buf[4] = vectorizeswap(((uint64_t*)buf)[2]); *(uint2*)&buf[6] = vectorizeswap(((uint64_t*)buf)[3]); - *(uint2x4*)&outputHash[thread*8U] = *(uint2x4*)&buf[0]; + *(uint2x4*)&outputHash[thread<<3] = *(uint2x4*)&buf[0]; } } __host__ void lbry_sha256d_hash_112(int thr_id, uint32_t threads, uint32_t startNonce, uint32_t *d_outputHash) { - const int threadsperblock = 512; + int dev_id = device_map[thr_id]; - dim3 grid(threads/threadsperblock); + const uint32_t threadsperblock = (device_sm[dev_id] <= 500) ? 768 : 1024; + + dim3 grid((threads + threadsperblock - 1) / threadsperblock); dim3 block(threadsperblock); - lbry_sha256d_gpu_hash_112 <<>> (threads, startNonce, (uint64_t*) d_outputHash); + lbry_sha256d_gpu_hash_112 <<>> (threads, startNonce, (uint64_t*) d_outputHash); } __host__ void lbry_sha256_init(int thr_id) { cudaMemcpyToSymbol(c_K, cpu_K, sizeof(cpu_K), 0, cudaMemcpyHostToDevice); - CUDA_SAFE_CALL(cudaMalloc(&d_resNonces, 4*sizeof(uint32_t))); } __host__ void lbry_sha256_free(int thr_id) { - cudaFree(d_resNonces); + } __host__ @@ -462,6 +513,7 @@ void lbry_sha256_setBlock_112(uint32_t *pdata, uint32_t *ptarget) sha256_step1_host(a,b,c,d,e,f,g,h,end[8], cpu_K[8]); sha256_step1_host(h,a,b,c,d,e,f,g,end[9], cpu_K[9]); sha256_step1_host(g,h,a,b,c,d,e,f,end[10],cpu_K[10]); + sha256_step1_host(f, g, h, a, b, c, d, e, 0, cpu_K[11]); buf[0] = a; buf[1] = b; @@ -484,59 +536,58 @@ static __constant__ uint32_t c_IV[5] = { 0x67452301u, 0xEFCDAB89u, 0x98BADCFEu, 0x10325476u, 0xC3D2E1F0u }; +__device__ __forceinline__ +static uint32_t ROTATE(const uint32_t x,const int r){ + if(r==8) + return __byte_perm(x, 0, 0x2103); + else + return ROTL32(x,r); +} + /* * Round functions for RIPEMD-160. */ -#if 1 #define F1(x, y, z) ((x) ^ (y) ^ (z)) -#define F2(x, y, z) ((((y) ^ (z)) & (x)) ^ (z)) -#define F3(x, y, z) (((x) | ~(y)) ^ (z)) -#define F4(x, y, z) ((((x) ^ (y)) & (z)) ^ (y)) -#define F5(x, y, z) ((x) ^ ((y) | ~(z))) -#else -#define F1(x, y, z) xor3b(x,y,z) -#define F2(x, y, z) xandx(x,y,z) -#define F3(x, y, z) xornot64(x,y,z) -#define F4(x, y, z) xandx(z,x,y) -#define F5(x, y, z) xornt64(x,y,z) -#endif +#define F2(x, y, z) ((x & (y ^ z)) ^ z) +#define F3(x, y, z) ((x | ~y) ^ z) +#define F4(x, y, z) (y ^ ((x ^ y) & z)) +#define F5(x, y, z) (x ^ (y | ~z)) /* * Round constants for RIPEMD-160. */ -#define K11 0x00000000u -#define K12 0x5A827999u -#define K13 0x6ED9EBA1u -#define K14 0x8F1BBCDCu -#define K15 0xA953FD4Eu - -#define K21 0x50A28BE6u -#define K22 0x5C4DD124u -#define K23 0x6D703EF3u -#define K24 0x7A6D76E9u -#define K25 0x00000000u +#define K11 0 +#define K12 0x5A827999 +#define K13 0x6ED9EBA1 +#define K14 0x8F1BBCDC +#define K15 0xA953FD4E + +#define K21 0x50A28BE6 +#define K22 0x5C4DD124 +#define K23 0x6D703EF3 +#define K24 0x7A6D76E9 +#define K25 0 #define RR(a, b, c, d, e, f, s, r, k) { \ - a = SPH_T32(ROTL32(SPH_T32(a + f(b, c, d) + r + k), s) + e); \ + a = e + ROTATE((a + r + k + f(b, c, d)), s); \ c = ROTL32(c, 10); \ } #define ROUND1(a, b, c, d, e, f, s, r, k) \ - RR(a ## 1, b ## 1, c ## 1, d ## 1, e ## 1, f, s, r, K1 ## k) + RR(a[0], b[0], c[0], d[0], e[0], f, s, r, K1 ## k) #define ROUND2(a, b, c, d, e, f, s, r, k) \ - RR(a ## 2, b ## 2, c ## 2, d ## 2, e ## 2, f, s, r, K2 ## k) + RR(a[1], b[1], c[1], d[1], e[1], f, s, r, K2 ## k) #define RIPEMD160_ROUND_BODY(in, h) { \ - uint32_t A1, B1, C1, D1, E1; \ - uint32_t A2, B2, C2, D2, E2; \ + uint32_t A[2], B[2], C[2], D[2], E[2]; \ uint32_t tmp; \ \ - A1 = A2 = h[0]; \ - B1 = B2 = h[1]; \ - C1 = C2 = h[2]; \ - D1 = D2 = h[3]; \ - E1 = E2 = h[4]; \ + A[0] = A[1] = h[0]; \ + B[0] = B[1] = h[1]; \ + C[0] = C[1] = h[2]; \ + D[0] = D[1] = h[3]; \ + E[0] = E[1] = h[4]; \ \ ROUND1(A, B, C, D, E, F1, 11, in[ 0], 1); \ ROUND1(E, A, B, C, D, F1, 14, in[ 1], 1); \ @@ -708,29 +759,40 @@ static __constant__ uint32_t c_IV[5] = { ROUND2(C, D, E, A, B, F1, 11, in[ 9], 5); \ ROUND2(B, C, D, E, A, F1, 11, in[11], 5); \ \ - tmp = (h[1] + C1 + D2); \ - h[1] = (h[2] + D1 + E2); \ - h[2] = (h[3] + E1 + A2); \ - h[3] = (h[4] + A1 + B2); \ - h[4] = (h[0] + B1 + C2); \ + tmp = h[1] + C[0] + D[1]; \ + h[1] = h[2] + D[0] + E[1]; \ + h[2] = h[3] + E[0] + A[1]; \ + h[3] = h[4] + A[0] + B[1]; \ + h[4] = h[0] + B[0] + C[1]; \ h[0] = tmp; \ } +__device__ __forceinline__ +uint64_t swab64ll(const uint32_t x, const uint32_t y) { + uint64_t r; + asm("prmt.b32 %1, %1, 0, 0x0123; // swab64ll\n\t" + "prmt.b32 %2, %2, 0, 0x0123;\n\t" + "mov.b64 %0, {%1,%2};\n\t" + : "=l"(r): "r"(x), "r"(y) ); + return r; +} + __global__ -__launch_bounds__(640,2) /* 640,2 <= 48 regs, 512,2 <= 64 */ +#if __CUDA_ARCH__ > 500 +__launch_bounds__(1024,2) /* to force 32 regs */ +#else +__launch_bounds__(768,2) /* to force 32 regs */ +#endif void lbry_sha256d_gpu_hash_final(const uint32_t threads, uint64_t *Hash512, uint32_t *resNonces) { const uint32_t thread = (blockDim.x * blockIdx.x + threadIdx.x); - extern __shared__ uint32_t s_K[]; - if (threadIdx.x < 64U) s_K[threadIdx.x] = c_K[threadIdx.x]; - //__threadfence_block(); - if (thread < threads) + uint32_t dat[16]; + uint32_t h[5]; +// if (thread < threads) { uint32_t* input = (uint32_t*) (&Hash512[thread * 8U]); - uint32_t __align__(8) dat[16]; - - *(uint2x4*)&dat[0] = *(uint2x4*)&input[0]; + *(uint2x4*)&dat[0] = __ldg4((uint2x4*)&input[0]); dat[8] = 0x80; @@ -739,7 +801,6 @@ void lbry_sha256d_gpu_hash_final(const uint32_t threads, uint64_t *Hash512, uint dat[14] = 0x100; // size in bits - uint32_t h[5]; #pragma unroll for (int i=0; i<5; i++) h[i] = c_IV[i]; @@ -753,12 +814,12 @@ void lbry_sha256d_gpu_hash_final(const uint32_t threads, uint64_t *Hash512, uint // second 32 bytes block hash - *(uint2x4*)&dat[0] = *(uint2x4*)&input[8]; + *(uint2x4*)&dat[0] = __ldg4((uint2x4*)&input[8]); dat[8] = 0x80; #pragma unroll - for (int i=9;i<16;i++) dat[i] = 0; + for (int i=9; i<16; i++) dat[i] = 0; dat[14] = 0x100; // size in bits @@ -771,9 +832,9 @@ void lbry_sha256d_gpu_hash_final(const uint32_t threads, uint64_t *Hash512, uint // first final sha256 #pragma unroll - for (int i=0;i<5;i++) dat[i] = cuda_swab32(buf[i]); + for (int i=0; i<5; i++) dat[i] = cuda_swab32(buf[i]); #pragma unroll - for (int i=0;i<5;i++) dat[i+5] = cuda_swab32(h[i]); + for (int i=0; i<5; i++) dat[i+5] = cuda_swab32(h[i]); dat[10] = 0x80000000; #pragma unroll for (int i=11; i<15; i++) dat[i] = 0; @@ -781,45 +842,39 @@ void lbry_sha256d_gpu_hash_final(const uint32_t threads, uint64_t *Hash512, uint *(uint2x4*)&buf[0] = *(uint2x4*)&c_H256[0]; - sha256_round_body(dat, buf, c_K); // s_K uses too much regs + sha256_round_body(dat, buf, c_K); // s_K uses too many regs // second sha256 *(uint2x4*)&dat[0] = *(uint2x4*)&buf[0]; + *(uint2x4*)&buf[0] = *(uint2x4*)&c_H256[0]; + dat[8] = 0x80000000; #pragma unroll for (int i=9; i<15; i++) dat[i] = 0; dat[15] = 0x100; - *(uint2x4*)&buf[0] = *(uint2x4*)&c_H256[0]; - - sha256_round_body(dat, buf, s_K); + sha256_round_body_final(dat, buf, c_K); // valid nonces - const uint64_t high = cuda_swab32ll(((uint64_t*)buf)[3]); + const uint64_t high = swab64ll(buf[6], buf[7]); if (high <= d_target[0]) { - resNonces[1] = atomicExch(resNonces, thread); + //resNonces[1] = atomicExch(resNonces, thread); + resNonces[1] = resNonces[0]; + resNonces[0] = thread; d_target[0] = high; } } } __host__ -void lbry_sha256d_hash_final(int thr_id, uint32_t threads, uint32_t startNonce, uint32_t *d_inputHash, uint32_t *resNonces) +void lbry_sha256d_hash_final(int thr_id, uint32_t threads, uint32_t *d_inputHash, uint32_t *d_resNonce) { - const int threadsperblock = 512; + int dev_id = device_map[thr_id]; + const uint32_t threadsperblock = (device_sm[dev_id] > 500) ? 1024 : 768; - dim3 grid(threads/threadsperblock); + dim3 grid((threads + threadsperblock - 1) / threadsperblock); dim3 block(threadsperblock); - cudaMemset(d_resNonces, 0xFF, 2 * sizeof(uint32_t)); - - lbry_sha256d_gpu_hash_final <<>> (threads, (uint64_t*) d_inputHash, d_resNonces); - - cudaMemcpy(resNonces, d_resNonces, 2 * sizeof(uint32_t), cudaMemcpyDeviceToHost); - if (resNonces[0] == resNonces[1]) { - resNonces[1] = UINT32_MAX; - } - if (resNonces[0] != UINT32_MAX) resNonces[0] += startNonce; - if (resNonces[1] != UINT32_MAX) resNonces[1] += startNonce; + lbry_sha256d_gpu_hash_final <<>> (threads, (uint64_t*) d_inputHash, d_resNonce); } diff --git a/lbry/cuda_sha512_lbry.cu b/lbry/cuda_sha512_lbry.cu index e4fdd25..fa070e7 100644 --- a/lbry/cuda_sha512_lbry.cu +++ b/lbry/cuda_sha512_lbry.cu @@ -1,17 +1,20 @@ /** * sha-512 CUDA implementation. + * Tanguy Pruvot and Provos Alexis - JUL 2016 */ -#include -#include -#include - //#define USE_ROT_ASM_OPT 0 #include - -static __constant__ uint64_t K_512[80]; - -static const uint64_t K512[80] = { +#include +#include "miner.h" + +static __constant__ +#if __CUDA_ARCH__ > 500 +_ALIGN(16) +#else +_ALIGN(8) +#endif +uint64_t K_512[80] = { 0x428A2F98D728AE22, 0x7137449123EF65CD, 0xB5C0FBCFEC4D3B2F, 0xE9B5DBA58189DBBC, 0x3956C25BF348B538, 0x59F111F1B605D019, 0x923F82A4AF194F9B, 0xAB1C5ED5DA6D8118, 0xD807AA98A3030242, 0x12835B0145706FBE, 0x243185BE4EE4B28C, 0x550C7DC3D5FFB4E2, @@ -34,94 +37,46 @@ static const uint64_t K512[80] = { 0x4CC5D4BECB3E42B6, 0x597F299CFC657E2A, 0x5FCB6FAB3AD6FAEC, 0x6C44198C4A475817 }; -//#undef xor3 -//#define xor3(a,b,c) (a^b^c) +#undef xor3 +#define xor3(a,b,c) (a^b^c) -static __device__ __forceinline__ -uint64_t bsg5_0(const uint64_t x) -{ - uint64_t r1 = ROTR64(x,28); - uint64_t r2 = ROTR64(x,34); - uint64_t r3 = ROTR64(x,39); - return xor3(r1,r2,r3); -} +#define bsg5_0(x) xor3(ROTR64(x,28),ROTR64(x,34),ROTR64(x,39)) +#define bsg5_1(x) xor3(ROTR64(x,14),ROTR64(x,18),ROTR64(x,41)) +#define ssg5_0(x) xor3(ROTR64(x,1),ROTR64(x,8),x>>7) +#define ssg5_1(x) xor3(ROTR64(x,19),ROTR64(x,61),x>>6) -static __device__ __forceinline__ -uint64_t bsg5_1(const uint64_t x) -{ - uint64_t r1 = ROTR64(x,14); - uint64_t r2 = ROTR64(x,18); - uint64_t r3 = ROTR64(x,41); - return xor3(r1,r2,r3); -} -static __device__ __forceinline__ -uint64_t ssg5_0(const uint64_t x) -{ - uint64_t r1 = ROTR64(x,1); - uint64_t r2 = ROTR64(x,8); - uint64_t r3 = shr_t64(x,7); - return xor3(r1,r2,r3); -} +#define andor64(a,b,c) ((a & (b | c)) | (b & c)) +#define xandx64(e,f,g) (g ^ (e & (g ^ f))) static __device__ __forceinline__ -uint64_t ssg5_1(const uint64_t x) +void sha512_step2(uint64_t* r,const uint64_t W,const uint64_t K, const int ord) { - uint64_t r1 = ROTR64(x,19); - uint64_t r2 = ROTR64(x,61); - uint64_t r3 = shr_t64(x,6); - return xor3(r1,r2,r3); -} - -static __device__ __forceinline__ -uint64_t xandx64(const uint64_t a, const uint64_t b, const uint64_t c) -{ - uint64_t result; - asm("{ .reg .u64 m,n; // xandx64\n\t" - "xor.b64 m, %2,%3;\n\t" - "and.b64 n, m,%1;\n\t" - "xor.b64 %0, n,%3;\n\t" - "}" : "=l"(result) : "l"(a), "l"(b), "l"(c)); - return result; -} - -static __device__ __forceinline__ -void sha512_step2(uint64_t* r, uint64_t* W, uint64_t* K, const int ord, int i) -{ - int u = 8-ord; - uint64_t a = r[(0+u) & 7]; - uint64_t b = r[(1+u) & 7]; - uint64_t c = r[(2+u) & 7]; - uint64_t d = r[(3+u) & 7]; - uint64_t e = r[(4+u) & 7]; - uint64_t f = r[(5+u) & 7]; - uint64_t g = r[(6+u) & 7]; - uint64_t h = r[(7+u) & 7]; - - uint64_t T1 = h + bsg5_1(e) + xandx64(e,f,g) + W[i] + K[i]; - uint64_t T2 = bsg5_0(a) + andor(a,b,c); - r[(3+u)& 7] = d + T1; - r[(7+u)& 7] = T1 + T2; + const uint64_t T1 = r[(15-ord) & 7] + K + W + bsg5_1(r[(12-ord) & 7]) + xandx64(r[(12-ord) & 7],r[(13-ord) & 7],r[(14-ord) & 7]); + r[(15-ord)& 7] = andor64(r[( 8-ord) & 7],r[( 9-ord) & 7],r[(10-ord) & 7]) + bsg5_0(r[( 8-ord) & 7]) + T1; + r[(11-ord)& 7]+= T1; } /**************************************************************************************************/ -__global__ +__global__ __launch_bounds__(512,2) void lbry_sha512_gpu_hash_32(const uint32_t threads, uint64_t *g_hash) { const uint32_t thread = (blockDim.x * blockIdx.x + threadIdx.x); - //if (thread < threads) + const uint64_t IV512[8] = { + 0x6A09E667F3BCC908, 0xBB67AE8584CAA73B, 0x3C6EF372FE94F82B, 0xA54FF53A5F1D36F1, + 0x510E527FADE682D1, 0x9B05688C2B3E6C1F, 0x1F83D9ABFB41BD6B, 0x5BE0CD19137E2179 + }; + uint64_t r[8]; + uint64_t W[16]; + if (thread < threads) { - uint64_t *pHash = &g_hash[thread * 8U]; + uint64_t *pHash = &g_hash[thread<<3]; - uint64_t W[80]; + *(uint2x4*)&r[ 0] = *(uint2x4*)&IV512[ 0]; + *(uint2x4*)&r[ 4] = *(uint2x4*)&IV512[ 4]; - #pragma unroll - for (int i = 0; i < 4; i++) { - // 32 bytes input - W[i] = pHash[i]; - //W[i] = cuda_swab64(pHash[i]); // made in sha256 - } + *(uint2x4*)&W[ 0] = __ldg4((uint2x4*)&pHash[ 0]); W[4] = 0x8000000000000000; // end tag @@ -130,46 +85,41 @@ void lbry_sha512_gpu_hash_32(const uint32_t threads, uint64_t *g_hash) W[15] = 0x100; // 256 bits - //#pragma unroll - //for (int i = 16; i < 78; i++) W[i] = 0; - - #pragma unroll - for (int i = 16; i < 80; i++) - W[i] = ssg5_1(W[i - 2]) + W[i - 7] + ssg5_0(W[i - 15]) + W[i - 16]; - - const uint64_t IV512[8] = { - 0x6A09E667F3BCC908, 0xBB67AE8584CAA73B, 0x3C6EF372FE94F82B, 0xA54FF53A5F1D36F1, - 0x510E527FADE682D1, 0x9B05688C2B3E6C1F, 0x1F83D9ABFB41BD6B, 0x5BE0CD19137E2179 - }; + #pragma unroll 16 + for (int i = 0; i < 16; i ++){ + sha512_step2(r, W[i], K_512[i], i&7); + } - uint64_t r[8]; #pragma unroll - for (int i = 0; i < 8; i++) - r[i] = IV512[i]; - - #pragma unroll 10 - for (int i = 0; i < 10; i++) { - #pragma unroll 8 - for (int ord=0; ord<8; ord++) - sha512_step2(r, W, K_512, ord, 8*i + ord); + for (int i = 16; i < 80; i+=16){ + #pragma unroll + for (int j = 0; j<16; j++) { + W[(i + j) & 15] += W[((i + j) - 7) & 15] + ssg5_0(W[((i + j) - 15) & 15]) + ssg5_1(W[((i + j) - 2) & 15]); + } + #pragma unroll + for (int j = 0; j<16; j++) { + sha512_step2(r, W[j], K_512[i+j], (i+j)&7); + } } #pragma unroll 8 for (int i = 0; i < 8; i++) - pHash[i] = cuda_swab64(r[i] + IV512[i]); + r[i] = cuda_swab64(r[i] + IV512[i]); + + *(uint2x4*)&pHash[0] = *(uint2x4*)&r[0]; + *(uint2x4*)&pHash[4] = *(uint2x4*)&r[4]; } } __host__ void lbry_sha512_hash_32(int thr_id, uint32_t threads, uint32_t *d_hash) { - const int threadsperblock = 256; + const uint32_t threadsperblock = 512; dim3 grid((threads + threadsperblock-1)/threadsperblock); dim3 block(threadsperblock); - size_t shared_size = 0; - lbry_sha512_gpu_hash_32 <<>> (threads, (uint64_t*)d_hash); + lbry_sha512_gpu_hash_32 <<>> (threads, (uint64_t*)d_hash); } /**************************************************************************************************/ @@ -177,5 +127,5 @@ void lbry_sha512_hash_32(int thr_id, uint32_t threads, uint32_t *d_hash) __host__ void lbry_sha512_init(int thr_id) { - cudaMemcpyToSymbol(K_512, K512, 80*sizeof(uint64_t), 0, cudaMemcpyHostToDevice); +// cudaMemcpyToSymbol(K_512, K512, 80*sizeof(uint64_t), 0, cudaMemcpyHostToDevice); } diff --git a/lbry/lbry.cu b/lbry/lbry.cu index 243cbde..ac1e4ce 100644 --- a/lbry/lbry.cu +++ b/lbry/lbry.cu @@ -68,7 +68,7 @@ extern void lbry_sha256_setBlock_112(uint32_t *pdata, uint32_t *ptarget); extern void lbry_sha256d_hash_112(int thr_id, uint32_t threads, uint32_t startNonce, uint32_t *d_outputHash); extern void lbry_sha512_init(int thr_id); extern void lbry_sha512_hash_32(int thr_id, uint32_t threads, uint32_t *d_hash); -extern void lbry_sha256d_hash_final(int thr_id, uint32_t threads, uint32_t startNonce, uint32_t *d_inputHash, uint32_t *resNonces); +extern void lbry_sha256d_hash_final(int thr_id, uint32_t threads, uint32_t *d_inputHash, uint32_t *d_resNonce); static __inline uint32_t swab32_if(uint32_t val, bool iftrue) { return iftrue ? swab32(val) : val; @@ -77,7 +77,7 @@ static __inline uint32_t swab32_if(uint32_t val, bool iftrue) { static bool init[MAX_GPUS] = { 0 }; static uint32_t *d_hash[MAX_GPUS]; - +static uint32_t *d_resNonce[MAX_GPUS]; // nonce position is different #define LBC_NONCE_OFT32 27 @@ -109,10 +109,12 @@ extern "C" int scanhash_lbry(int thr_id, struct work *work, uint32_t max_nonce, cudaDeviceReset(); // reduce cpu usage (linux) cudaSetDeviceFlags(cudaDeviceScheduleBlockingSync); + cudaDeviceSetCacheConfig(cudaFuncCachePreferL1); CUDA_LOG_ERROR(); } CUDA_SAFE_CALL(cudaMalloc(&d_hash[thr_id], (size_t) 64 * throughput)); + CUDA_SAFE_CALL(cudaMalloc(&d_resNonce[thr_id], 2 * sizeof(uint32_t))); lbry_sha256_init(thr_id); lbry_sha512_init(thr_id); @@ -126,38 +128,39 @@ extern "C" int scanhash_lbry(int thr_id, struct work *work, uint32_t max_nonce, } lbry_sha256_setBlock_112(endiandata, ptarget); + cudaMemset(d_resNonce[thr_id], 0xFF, 2 * sizeof(uint32_t)); do { // Hash with CUDA lbry_sha256d_hash_112(thr_id, throughput, pdata[LBC_NONCE_OFT32], d_hash[thr_id]); - CUDA_LOG_ERROR(); - lbry_sha512_hash_32(thr_id, throughput, d_hash[thr_id]); - CUDA_LOG_ERROR(); uint32_t resNonces[2] = { UINT32_MAX, UINT32_MAX }; - lbry_sha256d_hash_final(thr_id, throughput, pdata[LBC_NONCE_OFT32], d_hash[thr_id], resNonces); - CUDA_LOG_ERROR(); + lbry_sha256d_hash_final(thr_id, throughput, d_hash[thr_id], d_resNonce[thr_id]); + + cudaMemcpy(resNonces, d_resNonce[thr_id], 2 * sizeof(uint32_t), cudaMemcpyDeviceToHost); - uint32_t foundNonce = resNonces[0]; *hashes_done = pdata[LBC_NONCE_OFT32] - first_nonce + throughput; - if (foundNonce != UINT32_MAX) + if (resNonces[0] != UINT32_MAX) { - endiandata[LBC_NONCE_OFT32] = swab32_if(foundNonce, !swap); + const uint32_t startNonce = pdata[LBC_NONCE_OFT32]; + resNonces[0] += startNonce; + + endiandata[LBC_NONCE_OFT32] = swab32_if(resNonces[0], !swap); lbry_hash(vhash, endiandata); if (vhash[7] <= ptarget[7] && fulltest(vhash, ptarget)) { int res = 1; - uint32_t secNonce = resNonces[1]; - work->nonces[0] = swab32_if(foundNonce, swap); + work->nonces[0] = swab32_if(resNonces[0], swap); work_set_target_ratio(work, vhash); - if (secNonce != UINT32_MAX) { + if (resNonces[1] != UINT32_MAX) { + resNonces[1] += startNonce; if (opt_debug) - gpulog(LOG_BLUE, thr_id, "found second nonce %08x", swab32(secNonce)); - endiandata[LBC_NONCE_OFT32] = swab32_if(secNonce, !swap); + gpulog(LOG_BLUE, thr_id, "found second nonce %08x", resNonces[1]); + endiandata[LBC_NONCE_OFT32] = swab32_if(resNonces[1], !swap); lbry_hash(vhash, endiandata); - work->nonces[1] = swab32_if(secNonce, swap); + work->nonces[1] = swab32_if(resNonces[1], swap); if (bn_hash_target_ratio(vhash, ptarget) > work->shareratio) { work_set_target_ratio(work, vhash); xchg(work->nonces[0], work->nonces[1]); @@ -166,8 +169,9 @@ extern "C" int scanhash_lbry(int thr_id, struct work *work, uint32_t max_nonce, } pdata[LBC_NONCE_OFT32] = work->nonces[0]; return res; - } else { - gpulog(LOG_WARNING, thr_id, "result for %08x does not validate on CPU %08x > %08x!", foundNonce, vhash[7], ptarget[7]); + } else if (vhash[7] > ptarget[7]) { + gpulog(LOG_WARNING, thr_id, "result for %08x does not validate on CPU %08x > %08x!", resNonces[0], vhash[7], ptarget[7]); + cudaMemset(d_resNonce[thr_id], 0xFF, 2 * sizeof(uint32_t)); } } @@ -180,13 +184,13 @@ extern "C" int scanhash_lbry(int thr_id, struct work *work, uint32_t max_nonce, } while (!work_restart[thr_id].restart); - *hashes_done = pdata[LBC_NONCE_OFT32] - first_nonce; + *hashes_done = pdata[LBC_NONCE_OFT32] - first_nonce + 1; return 0; } // cleanup -void free_lbry(int thr_id) +extern "C" void free_lbry(int thr_id) { if (!init[thr_id]) return; @@ -194,6 +198,7 @@ void free_lbry(int thr_id) cudaThreadSynchronize(); cudaFree(d_hash[thr_id]); + cudaFree(d_resNonce[thr_id]); lbry_sha256_free(thr_id); init[thr_id] = false;