From 8f98bde4fb20e5af1607542132c3f564e1967c33 Mon Sep 17 00:00:00 2001 From: Tanguy Pruvot Date: Sun, 6 Sep 2015 13:49:52 +0200 Subject: [PATCH] lyra2v2: improve cubehash with uint2 --- Algo256/cuda_cubehash256.cu | 85 ++++++++++++++++++++++++++++++------- configure.ac | 2 +- lyra2/cuda_lyra2v2.cu | 22 ++++++---- 3 files changed, 84 insertions(+), 25 deletions(-) diff --git a/Algo256/cuda_cubehash256.cu b/Algo256/cuda_cubehash256.cu index b52ed55..76b9c52 100644 --- a/Algo256/cuda_cubehash256.cu +++ b/Algo256/cuda_cubehash256.cu @@ -9,6 +9,12 @@ #define LROT(x, bits) __funnelshift_l(x, x, bits) #endif +#if __CUDA_ARCH__ < 500 +#define TPB 576 +#else +#define TPB 1024 +#endif + #define ROTATEUPWARDS7(a) LROT(a,7) #define ROTATEUPWARDS11(a) LROT(a,11) @@ -186,19 +192,65 @@ void Final(uint32_t x[2][2][2][2][2], uint32_t *hashval) hash_fromx(hashval, x); } +#if __CUDA_ARCH__ >= 500 + +__global__ __launch_bounds__(TPB, 1) +void cubehash256_gpu_hash_32(uint32_t threads, uint32_t startNounce, uint2 *g_hash) +{ + uint32_t thread = (blockDim.x * blockIdx.x + threadIdx.x); + if (thread < threads) + { + uint2 Hash[4]; + + Hash[0] = __ldg(&g_hash[thread]); + Hash[1] = __ldg(&g_hash[thread + 1 * threads]); + Hash[2] = __ldg(&g_hash[thread + 2 * threads]); + Hash[3] = __ldg(&g_hash[thread + 3 * threads]); + + uint32_t x[2][2][2][2][2] = + { + 0xEA2BD4B4, 0xCCD6F29F, 0x63117E71, 0x35481EAE, + 0x22512D5B, 0xE5D94E63, 0x7E624131, 0xF4CC12BE, + 0xC2D0B696, 0x42AF2070, 0xD0720C35, 0x3361DA8C, + 0x28CCECA4, 0x8EF8AD83, 0x4680AC00, 0x40E5FBAB, + 0xD89041C3, 0x6107FBD5, 0x6C859D41, 0xF0B26679, + 0x09392549, 0x5FA25603, 0x65C892FD, 0x93CB6285, + 0x2AF2B5AE, 0x9E4B4E60, 0x774ABFDD, 0x85254725, + 0x15815AEB, 0x4AB6AAD6, 0x9CDAF8AF, 0xD6032C0A + }; + + x[0][0][0][0][0] ^= Hash[0].x; + x[0][0][0][0][1] ^= Hash[0].y; + x[0][0][0][1][0] ^= Hash[1].x; + x[0][0][0][1][1] ^= Hash[1].y; + x[0][0][1][0][0] ^= Hash[2].x; + x[0][0][1][0][1] ^= Hash[2].y; + x[0][0][1][1][0] ^= Hash[3].x; + x[0][0][1][1][1] ^= Hash[3].y; + + rrounds(x); + x[0][0][0][0][0] ^= 0x80U; + rrounds(x); + + Final(x, (uint32_t*) Hash); + + g_hash[thread] = Hash[0]; + g_hash[1 * threads + thread] = Hash[1]; + g_hash[2 * threads + thread] = Hash[2]; + g_hash[3 * threads + thread] = Hash[3]; + } +} -// Die Hash-Funktion -#if __CUDA_ARCH__ <500 -__global__ __launch_bounds__(576,1) #else -__global__ __launch_bounds__(576,1) -#endif -void cubehash256_gpu_hash_32(uint32_t threads, uint32_t startNounce, uint64_t *g_hash) + +__global__ __launch_bounds__(TPB, 1) +void cubehash256_gpu_hash_32(uint32_t threads, uint32_t startNounce, uint2 *d_hash) { - uint32_t thread = (blockDim.x * blockIdx.x + threadIdx.x); - if (thread < threads) - { - uint32_t Hash[8]; // = &g_hash[16 * hashPosition]; + uint32_t thread = (blockDim.x * blockIdx.x + threadIdx.x); + if (thread < threads) + { + uint32_t Hash[8]; + uint64_t* g_hash = (uint64_t*) d_hash; LOHI(Hash[0], Hash[1], __ldg(&g_hash[thread])); LOHI(Hash[2], Hash[3], __ldg(&g_hash[thread + 1 * threads])); @@ -207,7 +259,7 @@ void cubehash256_gpu_hash_32(uint32_t threads, uint32_t startNounce, uint64_t *g uint32_t x[2][2][2][2][2] = { - 0xEA2BD4B4, 0xCCD6F29F, 0x63117E71, 0x35481EAE, + 0xEA2BD4B4, 0xCCD6F29F, 0x63117E71, 0x35481EAE, 0x22512D5B, 0xE5D94E63, 0x7E624131, 0xF4CC12BE, 0xC2D0B696, 0x42AF2070, 0xD0720C35, 0x3361DA8C, 0x28CCECA4, 0x8EF8AD83, 0x4680AC00, 0x40E5FBAB, @@ -236,17 +288,18 @@ void cubehash256_gpu_hash_32(uint32_t threads, uint32_t startNounce, uint64_t *g g_hash[1 * threads + thread] = ((uint64_t*)Hash)[1]; g_hash[2 * threads + thread] = ((uint64_t*)Hash)[2]; g_hash[3 * threads + thread] = ((uint64_t*)Hash)[3]; - } + } } +#endif __host__ void cubehash256_cpu_hash_32(int thr_id, uint32_t threads, uint32_t startNounce, uint64_t *d_hash, int order) { - uint32_t tpb = 576; + uint32_t tpb = TPB; - dim3 grid((threads + tpb-1)/tpb); - dim3 block(tpb); + dim3 grid((threads + tpb-1)/tpb); + dim3 block(tpb); - cubehash256_gpu_hash_32 <<>> (threads, startNounce, d_hash); + cubehash256_gpu_hash_32 <<>> (threads, startNounce, (uint2*) d_hash); } diff --git a/configure.ac b/configure.ac index 9ccf7f2..18bd776 100644 --- a/configure.ac +++ b/configure.ac @@ -1,4 +1,4 @@ -AC_INIT([ccminer], [1.6.6]) +AC_INIT([ccminer], [1.6.7-dev]) AC_PREREQ([2.59c]) AC_CANONICAL_SYSTEM diff --git a/lyra2/cuda_lyra2v2.cu b/lyra2/cuda_lyra2v2.cu index 0797dc3..ac48277 100644 --- a/lyra2/cuda_lyra2v2.cu +++ b/lyra2/cuda_lyra2v2.cu @@ -78,12 +78,16 @@ void reduceDuplex(vectype state[4], uint32_t thread) uint32_t s1 = ps1 + i*memshift; uint32_t s2 = ps2 - i*memshift; + #pragma unroll for (int j = 0; j < 3; j++) state1[j] = __ldg4(&(DMatrix+s1)[j]); for (int j = 0; j < 3; j++) state[j] ^= state1[j]; + round_lyra_v35(state); + + #pragma unroll for (int j = 0; j < 3; j++) state1[j] ^= state[j]; @@ -353,6 +357,7 @@ void lyra2v2_gpu_hash_32_v3(uint32_t threads, uint32_t startNounce, uint2 *outpu for (int i = 0; i<12; i++) round_lyra_v35(state); + state[0] ^= shuffle4(((vectype*)padding)[0], 0); state[1] ^= shuffle4(((vectype*)padding)[1], 0); @@ -417,14 +422,14 @@ void lyra2v2_gpu_hash_32(uint32_t threads, uint32_t startNounce, uint2 *outputHa if (threadIdx.x == 0) { ((uint16*)blake2b_IV)[0] = make_uint16( - 0xf3bcc908, 0x6a09e667 , 0x84caa73b, 0xbb67ae85 , - 0xfe94f82b, 0x3c6ef372 , 0x5f1d36f1, 0xa54ff53a , - 0xade682d1, 0x510e527f , 0x2b3e6c1f, 0x9b05688c , - 0xfb41bd6b, 0x1f83d9ab , 0x137e2179, 0x5be0cd19 + 0xf3bcc908, 0x6a09e667, 0x84caa73b, 0xbb67ae85, + 0xfe94f82b, 0x3c6ef372, 0x5f1d36f1, 0xa54ff53a, + 0xade682d1, 0x510e527f, 0x2b3e6c1f, 0x9b05688c, + 0xfb41bd6b, 0x1f83d9ab, 0x137e2179, 0x5be0cd19 ); ((uint16*)padding)[0] = make_uint16( - 0x20, 0x0 , 0x20, 0x0 , 0x20, 0x0 , 0x01, 0x0 , - 0x04, 0x0 , 0x04, 0x0 , 0x80, 0x0 , 0x0, 0x01000000 + 0x20, 0x0, 0x20, 0x0, 0x20, 0x0, 0x01, 0x0, + 0x04, 0x0, 0x04, 0x0, 0x80, 0x0, 0x0, 0x01000000 ); } @@ -497,9 +502,10 @@ __global__ void lyra2v2_gpu_hash_32_v3(uint32_t threads, uint32_t startNounce, u #endif __host__ -void lyra2v2_cpu_init(int thr_id, uint32_t threads,uint64_t *hash) +void lyra2v2_cpu_init(int thr_id, uint32_t threads, uint64_t *d_hash2) { - cudaMemcpyToSymbol(DMatrix, &hash, sizeof(hash), 0, cudaMemcpyHostToDevice); + // just assign the device pointer allocated in main loop + cudaMemcpyToSymbol(DMatrix, &d_hash2, sizeof(uint64_t*), 0, cudaMemcpyHostToDevice); } __host__