From 8fd2739a65db4a46055406b78e85eb314a01e50e Mon Sep 17 00:00:00 2001 From: Tanguy Pruvot Date: Wed, 14 Oct 2015 00:42:28 +0000 Subject: [PATCH] lyra2: support for SM 2.1 cards (GTX 460) also fix the build (scrypt) for this arch. else, 318,26 kH/s on a GTX 460... --- lyra2/cuda_lyra2.cu | 20 +++- lyra2/cuda_lyra2_sm2.cuh | 225 +++++++++++++++++++++++++++++++++++++++ lyra2/cuda_lyra2v2.cu | 11 +- lyra2/lyra2RE.cu | 3 +- scrypt/nv_kernel2.cu | 5 + scrypt/titan_kernel.cu | 4 + 6 files changed, 258 insertions(+), 10 deletions(-) create mode 100644 lyra2/cuda_lyra2_sm2.cuh diff --git a/lyra2/cuda_lyra2.cu b/lyra2/cuda_lyra2.cu index 665c364..e2757ea 100644 --- a/lyra2/cuda_lyra2.cu +++ b/lyra2/cuda_lyra2.cu @@ -6,11 +6,15 @@ #include #include -#include "cuda_lyra2_vectors.h" - #define TPB50 16 #define TPB52 8 +#include "cuda_lyra2_sm2.cuh" + +#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 500 + +#include "cuda_lyra2_vectors.h" + #define uint2x4 uint28 #define memshift 3 @@ -238,6 +242,11 @@ void lyra2_gpu_hash_32(uint32_t threads, uint32_t startNounce, uint2 *g_hash) g_hash[thread + threads*3] = ((uint2*)state)[3]; } } +#else +/* for unsupported SM arch */ +__device__ void* DMatrix; +__global__ void lyra2_gpu_hash_32(uint32_t threads, uint32_t startNounce, uint2 *g_hash) {} +#endif __host__ void lyra2_cpu_init(int thr_id, uint32_t threads, uint64_t* d_matrix) @@ -252,9 +261,14 @@ void lyra2_cpu_hash_32(int thr_id, uint32_t threads, uint32_t startNounce, uint6 int dev_id = device_map[thr_id % MAX_GPUS]; uint32_t tpb = TPB52; if (device_sm[dev_id] == 500) tpb = TPB50; + if (device_sm[dev_id] <= 300) tpb = TPB30; dim3 grid((threads + tpb - 1) / tpb); dim3 block(tpb); - lyra2_gpu_hash_32 <<< grid, block >>> (threads, startNounce, (uint2*)d_hash); + if (device_sm[dev_id] >= 500) + lyra2_gpu_hash_32 <<< grid, block >>> (threads, startNounce, (uint2*)d_hash); + else + lyra2_gpu_hash_32_sm2 <<< grid, block >>> (threads, startNounce, d_hash); + } diff --git a/lyra2/cuda_lyra2_sm2.cuh b/lyra2/cuda_lyra2_sm2.cuh new file mode 100644 index 0000000..b0ff1ec --- /dev/null +++ b/lyra2/cuda_lyra2_sm2.cuh @@ -0,0 +1,225 @@ +#include + +#ifdef __INTELLISENSE__ +/* just for vstudio code colors */ +#undef __CUDA_ARCH__ +#define __CUDA_ARCH__ 300 +#endif + +#include "cuda_helper.h" + +#define TPB30 160 + +#if __CUDA_ARCH__ >= 200 && __CUDA_ARCH__ <= 350 + +static __constant__ uint2 blake2b_IV[8] = { + { 0xf3bcc908, 0x6a09e667 }, + { 0x84caa73b, 0xbb67ae85 }, + { 0xfe94f82b, 0x3c6ef372 }, + { 0x5f1d36f1, 0xa54ff53a }, + { 0xade682d1, 0x510e527f }, + { 0x2b3e6c1f, 0x9b05688c }, + { 0xfb41bd6b, 0x1f83d9ab }, + { 0x137e2179, 0x5be0cd19 } +}; + +#define reduceDuplexRow(rowIn, rowInOut, rowOut) { \ + for (int i = 0; i < 8; i++) { \ + for (int j = 0; j < 12; j++) \ + state[j] ^= Matrix[12 * i + j][rowIn] + Matrix[12 * i + j][rowInOut]; \ + round_lyra(state); \ + for (int j = 0; j < 12; j++) \ + Matrix[j + 12 * i][rowOut] ^= state[j]; \ + Matrix[0 + 12 * i][rowInOut] ^= state[11]; \ + Matrix[1 + 12 * i][rowInOut] ^= state[0]; \ + Matrix[2 + 12 * i][rowInOut] ^= state[1]; \ + Matrix[3 + 12 * i][rowInOut] ^= state[2]; \ + Matrix[4 + 12 * i][rowInOut] ^= state[3]; \ + Matrix[5 + 12 * i][rowInOut] ^= state[4]; \ + Matrix[6 + 12 * i][rowInOut] ^= state[5]; \ + Matrix[7 + 12 * i][rowInOut] ^= state[6]; \ + Matrix[8 + 12 * i][rowInOut] ^= state[7]; \ + Matrix[9 + 12 * i][rowInOut] ^= state[8]; \ + Matrix[10+ 12 * i][rowInOut] ^= state[9]; \ + Matrix[11+ 12 * i][rowInOut] ^= state[10]; \ + } \ + } + +#define absorbblock(in) { \ + state[0] ^= Matrix[0][in]; \ + state[1] ^= Matrix[1][in]; \ + state[2] ^= Matrix[2][in]; \ + state[3] ^= Matrix[3][in]; \ + state[4] ^= Matrix[4][in]; \ + state[5] ^= Matrix[5][in]; \ + state[6] ^= Matrix[6][in]; \ + state[7] ^= Matrix[7][in]; \ + state[8] ^= Matrix[8][in]; \ + state[9] ^= Matrix[9][in]; \ + state[10] ^= Matrix[10][in]; \ + state[11] ^= Matrix[11][in]; \ + round_lyra(state); \ + round_lyra(state); \ + round_lyra(state); \ + round_lyra(state); \ + round_lyra(state); \ + round_lyra(state); \ + round_lyra(state); \ + round_lyra(state); \ + round_lyra(state); \ + round_lyra(state); \ + round_lyra(state); \ + round_lyra(state); \ + } + +static __device__ __forceinline__ +void Gfunc(uint2 & a, uint2 &b, uint2 &c, uint2 &d) +{ + a += b; d ^= a; d = SWAPUINT2(d); + c += d; b ^= c; b = ROR2(b, 24); + a += b; d ^= a; d = ROR2(d, 16); + c += d; b ^= c; b = ROR2(b, 63); +} + +__device__ __forceinline__ +static void round_lyra(uint2 *s) +{ + Gfunc(s[0], s[4], s[8], s[12]); + Gfunc(s[1], s[5], s[9], s[13]); + Gfunc(s[2], s[6], s[10], s[14]); + Gfunc(s[3], s[7], s[11], s[15]); + Gfunc(s[0], s[5], s[10], s[15]); + Gfunc(s[1], s[6], s[11], s[12]); + Gfunc(s[2], s[7], s[8], s[13]); + Gfunc(s[3], s[4], s[9], s[14]); +} + +__device__ __forceinline__ +void reduceDuplexRowSetup(const int rowIn, const int rowInOut, const int rowOut, uint2 state[16], uint2 Matrix[96][8]) +{ +#if __CUDA_ARCH__ > 500 + #pragma unroll +#endif + for (int i = 0; i < 8; i++) + { + #pragma unroll + for (int j = 0; j < 12; j++) + state[j] ^= Matrix[12 * i + j][rowIn] + Matrix[12 * i + j][rowInOut]; + + round_lyra(state); + + #pragma unroll + for (int j = 0; j < 12; j++) + Matrix[j + 84 - 12 * i][rowOut] = Matrix[12 * i + j][rowIn] ^ state[j]; + + Matrix[0 + 12 * i][rowInOut] ^= state[11]; + Matrix[1 + 12 * i][rowInOut] ^= state[0]; + Matrix[2 + 12 * i][rowInOut] ^= state[1]; + Matrix[3 + 12 * i][rowInOut] ^= state[2]; + Matrix[4 + 12 * i][rowInOut] ^= state[3]; + Matrix[5 + 12 * i][rowInOut] ^= state[4]; + Matrix[6 + 12 * i][rowInOut] ^= state[5]; + Matrix[7 + 12 * i][rowInOut] ^= state[6]; + Matrix[8 + 12 * i][rowInOut] ^= state[7]; + Matrix[9 + 12 * i][rowInOut] ^= state[8]; + Matrix[10 + 12 * i][rowInOut] ^= state[9]; + Matrix[11 + 12 * i][rowInOut] ^= state[10]; + } +} + +__global__ __launch_bounds__(TPB30, 1) +void lyra2_gpu_hash_32_sm2(uint32_t threads, uint32_t startNounce, uint64_t *g_hash) +{ + uint32_t thread = (blockDim.x * blockIdx.x + threadIdx.x); + if (thread < threads) + { + uint2 state[16]; + + #pragma unroll + for (int i = 0; i<4; i++) { + LOHI(state[i].x, state[i].y, g_hash[threads*i + thread]); + } //password + + #pragma unroll + for (int i = 0; i<4; i++) { + state[i + 4] = state[i]; + } //salt + + #pragma unroll + for (int i = 0; i<8; i++) { + state[i + 8] = blake2b_IV[i]; + } + + // blake2blyra x2 + //#pragma unroll 24 + for (int i = 0; i<24; i++) { + round_lyra(state); + } //because 12 is not enough + + uint2 Matrix[96][8]; // not cool + + // reducedSqueezeRow0 + #pragma unroll 8 + for (int i = 0; i < 8; i++) + { + #pragma unroll 12 + for (int j = 0; j<12; j++) { + Matrix[j + 84 - 12 * i][0] = state[j]; + } + round_lyra(state); + } + + // reducedSqueezeRow1 + #pragma unroll 8 + for (int i = 0; i < 8; i++) + { + #pragma unroll 12 + for (int j = 0; j<12; j++) { + state[j] ^= Matrix[j + 12 * i][0]; + } + round_lyra(state); + #pragma unroll 12 + for (int j = 0; j<12; j++) { + Matrix[j + 84 - 12 * i][1] = Matrix[j + 12 * i][0] ^ state[j]; + } + } + + reduceDuplexRowSetup(1, 0, 2, state, Matrix); + reduceDuplexRowSetup(2, 1, 3, state, Matrix); + reduceDuplexRowSetup(3, 0, 4, state, Matrix); + reduceDuplexRowSetup(4, 3, 5, state, Matrix); + reduceDuplexRowSetup(5, 2, 6, state, Matrix); + reduceDuplexRowSetup(6, 1, 7, state, Matrix); + + uint32_t rowa; + rowa = state[0].x & 7; + reduceDuplexRow(7, rowa, 0); + rowa = state[0].x & 7; + reduceDuplexRow(0, rowa, 3); + rowa = state[0].x & 7; + reduceDuplexRow(3, rowa, 6); + rowa = state[0].x & 7; + reduceDuplexRow(6, rowa, 1); + rowa = state[0].x & 7; + reduceDuplexRow(1, rowa, 4); + rowa = state[0].x & 7; + reduceDuplexRow(4, rowa, 7); + rowa = state[0].x & 7; + reduceDuplexRow(7, rowa, 2); + rowa = state[0].x & 7; + reduceDuplexRow(2, rowa, 5); + + absorbblock(rowa); + + #pragma unroll + for (int i = 0; i<4; i++) { + g_hash[threads*i + thread] = devectorize(state[i]); + } + + } //thread +} + +#else +/* if __CUDA_ARCH__ < 200 .. host */ +__global__ void lyra2_gpu_hash_32_sm2(uint32_t threads, uint32_t startNounce, uint64_t *g_hash) {} +#endif diff --git a/lyra2/cuda_lyra2v2.cu b/lyra2/cuda_lyra2v2.cu index 5b4bd07..9661a9a 100644 --- a/lyra2/cuda_lyra2v2.cu +++ b/lyra2/cuda_lyra2v2.cu @@ -1,4 +1,5 @@ #include +#include #include #ifdef __INTELLISENSE__ @@ -9,16 +10,12 @@ #define TPB52 10 #define TPB50 16 -#include "cuda_lyra2_vectors.h" - #include "cuda_lyra2v2_sm3.cuh" -#ifndef __CUDA_ARCH__ -__device__ void *DMatrix; -#endif - #if __CUDA_ARCH__ >= 500 +#include "cuda_lyra2_vectors.h" + #define Nrow 4 #define Ncol 4 #define u64type uint2 @@ -346,6 +343,8 @@ void lyra2v2_gpu_hash_32(uint32_t threads, uint32_t startNounce, uint2 *outputHa } } #else +#include "cuda_helper.h" +__device__ void* DMatrix; __global__ void lyra2v2_gpu_hash_32(uint32_t threads, uint32_t startNounce, uint2 *outputHash) {} #endif diff --git a/lyra2/lyra2RE.cu b/lyra2/lyra2RE.cu index f1fbd0a..05ffb52 100644 --- a/lyra2/lyra2RE.cu +++ b/lyra2/lyra2RE.cu @@ -15,6 +15,7 @@ static uint64_t* d_matrix[MAX_GPUS]; extern void blake256_cpu_init(int thr_id, uint32_t threads); extern void blake256_cpu_hash_80(const int thr_id, const uint32_t threads, const uint32_t startNonce, uint64_t *Hash, int order); extern void blake256_cpu_setBlock_80(uint32_t *pdata); + extern void keccak256_cpu_hash_32(int thr_id, uint32_t threads, uint32_t startNonce, uint64_t *d_outputHash, int order); extern void keccak256_cpu_init(int thr_id, uint32_t threads); extern void keccak256_cpu_free(int thr_id); @@ -110,7 +111,7 @@ extern "C" int scanhash_lyra2(int thr_id, struct work* work, uint32_t max_nonce, init[thr_id] = true; } - uint32_t endiandata[20]; + uint32_t _ALIGN(128) endiandata[20]; for (int k=0; k < 20; k++) be32enc(&endiandata[k], pdata[k]); diff --git a/scrypt/nv_kernel2.cu b/scrypt/nv_kernel2.cu index d875aba..0e94106 100644 --- a/scrypt/nv_kernel2.cu +++ b/scrypt/nv_kernel2.cu @@ -24,6 +24,8 @@ #define __ldg(x) (*(x)) #endif +#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 300 + // grab lane ID static __device__ __inline__ unsigned int __laneId() { unsigned int laneId; asm( "mov.u32 %0, %%laneid;" : "=r"( laneId ) ); return laneId; } @@ -635,3 +637,6 @@ template __global__ void nv2_scrypt_core_kernelB_LG(uint32_t *g_odata __transposed_write_BC(B, C, (uint4*)(g_odata), 1); } + +#endif /* prevent SM 2 */ + diff --git a/scrypt/titan_kernel.cu b/scrypt/titan_kernel.cu index 13b047c..e27b832 100644 --- a/scrypt/titan_kernel.cu +++ b/scrypt/titan_kernel.cu @@ -28,6 +28,8 @@ typedef enum #define __ldg(x) (*(x)) #endif +#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 300 + // scratchbuf constants (pointers to scratch buffer for each warp, i.e. 32 hashes) __constant__ uint32_t* c_V[TOTAL_WARP_LIMIT]; @@ -736,3 +738,5 @@ bool TitanKernel::run_kernel(dim3 grid, dim3 threads, int WARPS_PER_BLOCK, int t return success; } + +#endif /* prevent SM 2 */