Browse Source

bmw512: indent and restore SM 3.0 compat

could be also the source of the problem seen with CUDA 7

restored the code before sp/klaus changes for SM 3.0 devices...
2upstream
Tanguy Pruvot 10 years ago
parent
commit
4f43abb402
  1. 1
      miner.h
  2. 151
      quark/cuda_bmw512.cu
  3. 253
      quark/cuda_bmw512_30.cu
  4. 9
      util.cpp
  5. 4
      x11/cuda_x11_simd512.cu
  6. 2
      x11/x11.cu

1
miner.h

@ -689,6 +689,7 @@ void x14hash(void *output, const void *input); @@ -689,6 +689,7 @@ void x14hash(void *output, const void *input);
void x15hash(void *output, const void *input);
void x17hash(void *output, const void *input);
void zr5hash(void *output, const void *input);
void zr5hash_pok(void *output, uint32_t *pdata);
#ifdef __cplusplus
}

151
quark/cuda_bmw512.cu

@ -5,6 +5,12 @@ @@ -5,6 +5,12 @@
__constant__ uint64_t c_PaddedMessage80[16]; // padded message (80 bytes + padding)
#include "cuda_bmw512_30.cu"
#undef SHL
#undef SHR
#undef CONST_EXP2
//#define SHL(x, n) ((x) << (n))
//#define SHR(x, n) ((x) >> (n))
#define SHR(x, n) SHR2(x, n)
@ -14,10 +20,11 @@ __constant__ uint64_t c_PaddedMessage80[16]; // padded message (80 bytes + paddi @@ -14,10 +20,11 @@ __constant__ uint64_t c_PaddedMessage80[16]; // padded message (80 bytes + paddi
#define ROTL64 ROL2
#define CONST_EXP2(i) q[i+0] + ROTL64(q[i+1], 5) + q[i+2] + ROTL64(q[i+3], 11) + \
q[i+4] + ROTL64(q[i+5], 27) + q[i+6] + SWAPUINT2(q[i+7]) + \
q[i+8] + ROTL64(q[i+9], 37) + q[i+10] + ROTL64(q[i+11], 43) + \
q[i+12] + ROTL64(q[i+13], 53) + (SHR(q[i+14],1) ^ q[i+14]) + (SHR(q[i+15],2) ^ q[i+15])
#define CONST_EXP2(i) \
q[i+0] + ROTL64(q[i+1], 5) + q[i+2] + ROTL64(q[i+3], 11) + \
q[i+4] + ROTL64(q[i+5], 27) + q[i+6] + SWAPUINT2(q[i+7]) + \
q[i+8] + ROTL64(q[i+9], 37) + q[i+10] + ROTL64(q[i+11], 43) + \
q[i+12] + ROTL64(q[i+13], 53) + (SHR(q[i+14],1) ^ q[i+14]) + (SHR(q[i+15],2) ^ q[i+15])
__device__ void Compression512_64_first(uint2 *msg, uint2 *hash)
{
@ -111,15 +118,14 @@ __device__ void Compression512_64_first(uint2 *msg, uint2 *hash) @@ -111,15 +118,14 @@ __device__ void Compression512_64_first(uint2 *msg, uint2 *hash)
((make_uint2(0xFFFFFFF9, 0x6FFFFFFF) + ROTL64(msg[5], 5 + 1) +
ROTL64(msg[5 + 3], 5 + 4) - ROTL64(msg[5 + 10], 5 + 11)) ^ hash[5 + 7]);
#pragma unroll 3
#pragma unroll 3
for (int i = 6; i<9; i++) {
q[i + 16] = CONST_EXP2(i) +
((vectorize((i + 16)*(0x0555555555555555ull)) + ROTL64(msg[i], i + 1) -
ROTL64(msg[i - 6], (i - 6) + 1)) ^ hash[i + 7]);
}
#pragma unroll 4
#pragma unroll 4
for (int i = 9; i<13; i++) {
q[i + 16] = CONST_EXP2(i) +
((vectorize((i + 16)*(0x0555555555555555ull)) +
@ -159,7 +165,8 @@ __device__ void Compression512_64_first(uint2 *msg, uint2 *hash) @@ -159,7 +165,8 @@ __device__ void Compression512_64_first(uint2 *msg, uint2 *hash)
hash[15] = ROTL64(hash[3], 16) + (XH64 ^ q[31] ^ msg[15]) + (SHR(XL64, 2) ^ q[22] ^ q[15]);
}
__device__ void Compression512(uint2 *msg, uint2 *hash)
__device__
void Compression512(uint2 *msg, uint2 *hash)
{
// Compression ref. implementation
uint2 q[32];
@ -198,7 +205,7 @@ __device__ void Compression512(uint2 *msg, uint2 *hash) @@ -198,7 +205,7 @@ __device__ void Compression512(uint2 *msg, uint2 *hash)
tmp = (msg[12] ^ hash[12]) - (msg[ 4] ^ hash[ 4]) - (msg[ 6] ^ hash[ 6]) - (msg[ 9] ^ hash[ 9]) + (msg[13] ^ hash[13]);
q[15] = (SHR(tmp, 1) ^ SHL(tmp, 3) ^ ROTL64(tmp, 4) ^ ROTL64(tmp, 37)) + hash[0];
q[0+16] =
q[0+16] =
(SHR(q[0], 1) ^ SHL(q[0], 2) ^ ROTL64(q[0], 13) ^ ROTL64(q[0], 43)) +
(SHR(q[0+1], 2) ^ SHL(q[0+1], 1) ^ ROTL64(q[0+1], 19) ^ ROTL64(q[0+1], 53)) +
(SHR(q[0+2], 2) ^ SHL(q[0+2], 2) ^ ROTL64(q[0+2], 28) ^ ROTL64(q[0+2], 59)) +
@ -216,52 +223,51 @@ __device__ void Compression512(uint2 *msg, uint2 *hash) @@ -216,52 +223,51 @@ __device__ void Compression512(uint2 *msg, uint2 *hash)
(SHR(q[0+14], 2) ^ SHL(q[0+14], 2) ^ ROTL64(q[0+14], 28) ^ ROTL64(q[0+14], 59)) +
(SHR(q[0+15], 1) ^ SHL(q[0+15], 3) ^ ROTL64(q[0+15], 4) ^ ROTL64(q[0+15], 37)) +
((make_uint2(0x55555550ul, 0x55555555) + ROTL64(msg[0], 0 + 1) +
ROTL64(msg[0+3], 0+4) - ROTL64(msg[0+10], 0+11) ) ^ hash[0+7]);
q[1 + 16] =
(SHR(q[1], 1) ^ SHL(q[1], 2) ^ ROTL64(q[1], 13) ^ ROTL64(q[1], 43)) +
(SHR(q[1 + 1], 2) ^ SHL(q[1 + 1], 1) ^ ROTL64(q[1 + 1], 19) ^ ROTL64(q[1 + 1], 53)) +
(SHR(q[1 + 2], 2) ^ SHL(q[1 + 2], 2) ^ ROTL64(q[1 + 2], 28) ^ ROTL64(q[1 + 2], 59)) +
(SHR(q[1 + 3], 1) ^ SHL(q[1 + 3], 3) ^ ROTL64(q[1 + 3], 4) ^ ROTL64(q[1 + 3], 37)) +
(SHR(q[1 + 4], 1) ^ SHL(q[1 + 4], 2) ^ ROTL64(q[1 + 4], 13) ^ ROTL64(q[1 + 4], 43)) +
(SHR(q[1 + 5], 2) ^ SHL(q[1 + 5], 1) ^ ROTL64(q[1 + 5], 19) ^ ROTL64(q[1 + 5], 53)) +
(SHR(q[1 + 6], 2) ^ SHL(q[1 + 6], 2) ^ ROTL64(q[1 + 6], 28) ^ ROTL64(q[1 + 6], 59)) +
(SHR(q[1 + 7], 1) ^ SHL(q[1 + 7], 3) ^ ROTL64(q[1 + 7], 4) ^ ROTL64(q[1 + 7], 37)) +
(SHR(q[1 + 8], 1) ^ SHL(q[1 + 8], 2) ^ ROTL64(q[1 + 8], 13) ^ ROTL64(q[1 + 8], 43)) +
(SHR(q[1 + 9], 2) ^ SHL(q[1 + 9], 1) ^ ROTL64(q[1 + 9], 19) ^ ROTL64(q[1 + 9], 53)) +
(SHR(q[1 + 10], 2) ^ SHL(q[1 + 10], 2) ^ ROTL64(q[1 + 10], 28) ^ ROTL64(q[1 + 10], 59)) +
(SHR(q[1 + 11], 1) ^ SHL(q[1 + 11], 3) ^ ROTL64(q[1 + 11], 4) ^ ROTL64(q[1 + 11], 37)) +
(SHR(q[1 + 12], 1) ^ SHL(q[1 + 12], 2) ^ ROTL64(q[1 + 12], 13) ^ ROTL64(q[1 + 12], 43)) +
(SHR(q[1 + 13], 2) ^ SHL(q[1 + 13], 1) ^ ROTL64(q[1 + 13], 19) ^ ROTL64(q[1 + 13], 53)) +
(SHR(q[1 + 14], 2) ^ SHL(q[1 + 14], 2) ^ ROTL64(q[1 + 14], 28) ^ ROTL64(q[1 + 14], 59)) +
(SHR(q[1 + 15], 1) ^ SHL(q[1 + 15], 3) ^ ROTL64(q[1 + 15], 4) ^ ROTL64(q[1 + 15], 37)) +
((make_uint2(0xAAAAAAA5, 0x5AAAAAAA) + ROTL64(msg[1], 1 + 1) +
ROTL64(msg[1 + 3], 1 + 4) - ROTL64(msg[1 + 10], 1 + 11)) ^ hash[1 + 7]);
q[2 + 16] = CONST_EXP2(2) +
((make_uint2(0xFFFFFFFA, 0x5FFFFFFF) + ROTL64(msg[2], 2 + 1) +
ROTL64(msg[2+3], 2+4) - ROTL64(msg[2+10], 2+11) ) ^ hash[2+7]);
q[3 + 16] = CONST_EXP2(3) +
((make_uint2(0x5555554F, 0x65555555) + ROTL64(msg[3], 3 + 1) +
ROTL64(msg[3 + 3], 3 + 4) - ROTL64(msg[3 + 10], 3 + 11)) ^ hash[3 + 7]);
q[4 + 16] = CONST_EXP2(4) +
((make_uint2(0xAAAAAAA4, 0x6AAAAAAA) + ROTL64(msg[4], 4 + 1) +
ROTL64(msg[4 + 3], 4 + 4) - ROTL64(msg[4 + 10], 4 + 11)) ^ hash[4 + 7]);
q[5 + 16] = CONST_EXP2(5) +
((make_uint2(0xFFFFFFF9, 0x6FFFFFFF) + ROTL64(msg[5], 5 + 1) +
ROTL64(msg[5 + 3], 5 + 4) - ROTL64(msg[5 + 10], 5 + 11)) ^ hash[5 + 7]);
q[6 + 16] = CONST_EXP2(6) +
((make_uint2(0x5555554E, 0x75555555)+ ROTL64(msg[6], 6 + 1) +
ROTL64(msg[6 + 3], 6 + 4) - ROTL64(msg[6 - 6], (6 - 6) + 1)) ^ hash[6 + 7]);
q[7 + 16] = CONST_EXP2(7) +
((make_uint2(0xAAAAAAA3, 0x7AAAAAAA) + ROTL64(msg[7], 7 + 1) +
ROTL64(msg[7 + 3], 7 + 4) - ROTL64(msg[7 - 6], (7 - 6) + 1)) ^ hash[7 + 7]);
q[8 + 16] = CONST_EXP2(8) +
((make_uint2(0xFFFFFFF8, 0x7FFFFFFF) + ROTL64(msg[8], 8 + 1) +
ROTL64(msg[8 + 3], 8 + 4) - ROTL64(msg[8 - 6], (8 - 6) + 1)) ^ hash[8 + 7]);
ROTL64(msg[0+3], 0+4) - ROTL64(msg[0+10], 0+11) ) ^ hash[0+7]);
q[1 + 16] =
(SHR(q[1], 1) ^ SHL(q[1], 2) ^ ROTL64(q[1], 13) ^ ROTL64(q[1], 43)) +
(SHR(q[1 + 1], 2) ^ SHL(q[1 + 1], 1) ^ ROTL64(q[1 + 1], 19) ^ ROTL64(q[1 + 1], 53)) +
(SHR(q[1 + 2], 2) ^ SHL(q[1 + 2], 2) ^ ROTL64(q[1 + 2], 28) ^ ROTL64(q[1 + 2], 59)) +
(SHR(q[1 + 3], 1) ^ SHL(q[1 + 3], 3) ^ ROTL64(q[1 + 3], 4) ^ ROTL64(q[1 + 3], 37)) +
(SHR(q[1 + 4], 1) ^ SHL(q[1 + 4], 2) ^ ROTL64(q[1 + 4], 13) ^ ROTL64(q[1 + 4], 43)) +
(SHR(q[1 + 5], 2) ^ SHL(q[1 + 5], 1) ^ ROTL64(q[1 + 5], 19) ^ ROTL64(q[1 + 5], 53)) +
(SHR(q[1 + 6], 2) ^ SHL(q[1 + 6], 2) ^ ROTL64(q[1 + 6], 28) ^ ROTL64(q[1 + 6], 59)) +
(SHR(q[1 + 7], 1) ^ SHL(q[1 + 7], 3) ^ ROTL64(q[1 + 7], 4) ^ ROTL64(q[1 + 7], 37)) +
(SHR(q[1 + 8], 1) ^ SHL(q[1 + 8], 2) ^ ROTL64(q[1 + 8], 13) ^ ROTL64(q[1 + 8], 43)) +
(SHR(q[1 + 9], 2) ^ SHL(q[1 + 9], 1) ^ ROTL64(q[1 + 9], 19) ^ ROTL64(q[1 + 9], 53)) +
(SHR(q[1 + 10], 2) ^ SHL(q[1 + 10], 2) ^ ROTL64(q[1 + 10], 28) ^ ROTL64(q[1 + 10], 59)) +
(SHR(q[1 + 11], 1) ^ SHL(q[1 + 11], 3) ^ ROTL64(q[1 + 11], 4) ^ ROTL64(q[1 + 11], 37)) +
(SHR(q[1 + 12], 1) ^ SHL(q[1 + 12], 2) ^ ROTL64(q[1 + 12], 13) ^ ROTL64(q[1 + 12], 43)) +
(SHR(q[1 + 13], 2) ^ SHL(q[1 + 13], 1) ^ ROTL64(q[1 + 13], 19) ^ ROTL64(q[1 + 13], 53)) +
(SHR(q[1 + 14], 2) ^ SHL(q[1 + 14], 2) ^ ROTL64(q[1 + 14], 28) ^ ROTL64(q[1 + 14], 59)) +
(SHR(q[1 + 15], 1) ^ SHL(q[1 + 15], 3) ^ ROTL64(q[1 + 15], 4) ^ ROTL64(q[1 + 15], 37)) +
((make_uint2(0xAAAAAAA5, 0x5AAAAAAA) + ROTL64(msg[1], 1 + 1) +
ROTL64(msg[1 + 3], 1 + 4) - ROTL64(msg[1 + 10], 1 + 11)) ^ hash[1 + 7]);
q[2 + 16] = CONST_EXP2(2) +
((make_uint2(0xFFFFFFFA, 0x5FFFFFFF) + ROTL64(msg[2], 2 + 1) +
ROTL64(msg[2+3], 2+4) - ROTL64(msg[2+10], 2+11) ) ^ hash[2+7]);
q[3 + 16] = CONST_EXP2(3) +
((make_uint2(0x5555554F, 0x65555555) + ROTL64(msg[3], 3 + 1) +
ROTL64(msg[3 + 3], 3 + 4) - ROTL64(msg[3 + 10], 3 + 11)) ^ hash[3 + 7]);
q[4 + 16] = CONST_EXP2(4) +
((make_uint2(0xAAAAAAA4, 0x6AAAAAAA) + ROTL64(msg[4], 4 + 1) +
ROTL64(msg[4 + 3], 4 + 4) - ROTL64(msg[4 + 10], 4 + 11)) ^ hash[4 + 7]);
q[5 + 16] = CONST_EXP2(5) +
((make_uint2(0xFFFFFFF9, 0x6FFFFFFF) + ROTL64(msg[5], 5 + 1) +
ROTL64(msg[5 + 3], 5 + 4) - ROTL64(msg[5 + 10], 5 + 11)) ^ hash[5 + 7]);
q[6 + 16] = CONST_EXP2(6) +
((make_uint2(0x5555554E, 0x75555555)+ ROTL64(msg[6], 6 + 1) +
ROTL64(msg[6 + 3], 6 + 4) - ROTL64(msg[6 - 6], (6 - 6) + 1)) ^ hash[6 + 7]);
q[7 + 16] = CONST_EXP2(7) +
((make_uint2(0xAAAAAAA3, 0x7AAAAAAA) + ROTL64(msg[7], 7 + 1) +
ROTL64(msg[7 + 3], 7 + 4) - ROTL64(msg[7 - 6], (7 - 6) + 1)) ^ hash[7 + 7]);
q[8 + 16] = CONST_EXP2(8) +
((make_uint2(0xFFFFFFF8, 0x7FFFFFFF) + ROTL64(msg[8], 8 + 1) +
ROTL64(msg[8 + 3], 8 + 4) - ROTL64(msg[8 - 6], (8 - 6) + 1)) ^ hash[8 + 7]);
q[9 + 16] = CONST_EXP2(9) +
((make_uint2(0x5555554D, 0x85555555) + ROTL64(msg[9], 9 + 1) +
((make_uint2(0x5555554D, 0x85555555) + ROTL64(msg[9], 9 + 1) +
ROTL64(msg[9 + 3], 9 + 4) - ROTL64(msg[9 - 6], (9 - 6) + 1)) ^ hash[9 - 9]);
q[10 + 16] = CONST_EXP2(10) +
((make_uint2(0xAAAAAAA2, 0x8AAAAAAA) + ROTL64(msg[10], 10 + 1) +
@ -272,18 +278,15 @@ __device__ void Compression512(uint2 *msg, uint2 *hash) @@ -272,18 +278,15 @@ __device__ void Compression512(uint2 *msg, uint2 *hash)
q[12 + 16] = CONST_EXP2(12) +
((make_uint2(0x5555554C, 0x95555555) + ROTL64(msg[12], 12 + 1) +
ROTL64(msg[12 + 3], 12 + 4) - ROTL64(msg[12 - 6], (12 - 6) + 1)) ^ hash[12 - 9]);
q[13 + 16] = CONST_EXP2(13) +
((make_uint2(0xAAAAAAA1, 0x9AAAAAAA) + ROTL64(msg[13], 13 + 1) +
ROTL64(msg[13 - 13], (13 - 13) + 1) - ROTL64(msg[13 - 6], (13 - 6) + 1)) ^ hash[13 - 9]);
q[14 + 16] = CONST_EXP2(14) +
((make_uint2(0xFFFFFFF6, 0x9FFFFFFF) + ROTL64(msg[14], 14 + 1) +
ROTL64(msg[14 - 13], (14 - 13) + 1) - ROTL64(msg[14 - 6], (14 - 6) + 1)) ^ hash[14 - 9]);
q[15 + 16] = CONST_EXP2(15) +
((make_uint2(0x5555554B, 0xA5555555) + ROTL64(msg[15], 15 + 1) +
ROTL64(msg[15 - 13], (15 - 13) + 1) - ROTL64(msg[15 - 6], (15 - 6) + 1)) ^ hash[15 - 9]);
q[13 + 16] = CONST_EXP2(13) +
((make_uint2(0xAAAAAAA1, 0x9AAAAAAA) + ROTL64(msg[13], 13 + 1) +
ROTL64(msg[13 - 13], (13 - 13) + 1) - ROTL64(msg[13 - 6], (13 - 6) + 1)) ^ hash[13 - 9]);
q[14 + 16] = CONST_EXP2(14) +
((make_uint2(0xFFFFFFF6, 0x9FFFFFFF) + ROTL64(msg[14], 14 + 1) +
ROTL64(msg[14 - 13], (14 - 13) + 1) - ROTL64(msg[14 - 6], (14 - 6) + 1)) ^ hash[14 - 9]);
q[15 + 16] = CONST_EXP2(15) +
((make_uint2(0x5555554B, 0xA5555555) + ROTL64(msg[15], 15 + 1) +
ROTL64(msg[15 - 13], (15 - 13) + 1) - ROTL64(msg[15 - 6], (15 - 6) + 1)) ^ hash[15 - 9]);
uint2 XL64 = q[16]^q[17]^q[18]^q[19]^q[20]^q[21]^q[22]^q[23];
uint2 XH64 = XL64^q[24] ^ q[25] ^ q[26] ^ q[27] ^ q[28] ^ q[29] ^ q[30] ^ q[31];
@ -438,6 +441,7 @@ void quark_bmw512_gpu_hash_80(uint32_t threads, uint32_t startNounce, uint64_t * @@ -438,6 +441,7 @@ void quark_bmw512_gpu_hash_80(uint32_t threads, uint32_t startNounce, uint64_t *
__host__
void quark_bmw512_cpu_init(int thr_id, uint32_t threads)
{
cuda_get_arch(thr_id);
}
__host__
@ -449,28 +453,33 @@ void quark_bmw512_cpu_setBlock_80(void *pdata) @@ -449,28 +453,33 @@ void quark_bmw512_cpu_setBlock_80(void *pdata)
uint64_t *message = (uint64_t*)PaddedMessage;
message[10] = SPH_C64(0x80);
message[15] = SPH_C64(640);
cudaMemcpyToSymbol( c_PaddedMessage80, PaddedMessage, 16*sizeof(uint64_t), 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(c_PaddedMessage80, PaddedMessage, 16*sizeof(uint64_t), 0, cudaMemcpyHostToDevice);
}
__host__
void quark_bmw512_cpu_hash_64(int thr_id, uint32_t threads, uint32_t startNounce, uint32_t *d_nonceVector, uint32_t *d_hash, int order)
{
const uint32_t threadsperblock = 32;
dim3 grid((threads + threadsperblock-1)/threadsperblock);
dim3 block(threadsperblock);
quark_bmw512_gpu_hash_64<<<grid, block>>>(threads, startNounce, (uint64_t*)d_hash, d_nonceVector);
// MyStreamSynchronize(NULL, order, thr_id);
int dev_id = device_map[thr_id];
if (device_sm[dev_id] > 300 && cuda_arch[dev_id] > 300)
quark_bmw512_gpu_hash_64<<<grid, block>>>(threads, startNounce, (uint64_t*)d_hash, d_nonceVector);
else
quark_bmw512_gpu_hash_64_30<<<grid, block>>>(threads, startNounce, (uint64_t*)d_hash, d_nonceVector);
}
__host__
void quark_bmw512_cpu_hash_80(int thr_id, uint32_t threads, uint32_t startNounce, uint32_t *d_hash, int order)
{
const uint32_t threadsperblock = 128;
dim3 grid((threads + threadsperblock-1)/threadsperblock);
dim3 block(threadsperblock);
quark_bmw512_gpu_hash_80<<<grid, block>>>(threads, startNounce, (uint64_t*)d_hash);
int dev_id = device_map[thr_id];
if (device_sm[dev_id] > 300 && cuda_arch[dev_id] > 300)
quark_bmw512_gpu_hash_80<<<grid, block>>>(threads, startNounce, (uint64_t*)d_hash);
else
quark_bmw512_gpu_hash_80_30<<<grid, block>>>(threads, startNounce, (uint64_t*)d_hash);
}

253
quark/cuda_bmw512_30.cu

@ -0,0 +1,253 @@ @@ -0,0 +1,253 @@
#include <stdio.h>
#include <memory.h>
#include "cuda_helper.h"
#define SHL(x, n) ((x) << (n))
#define SHR(x, n) ((x) >> (n))
#define CONST_EXP2 \
q[i+0] + ROTL64(q[i+1], 5) + q[i+2] + ROTL64(q[i+3], 11) + \
q[i+4] + ROTL64(q[i+5], 27) + q[i+6] + SWAPDWORDS(q[i+7]) + \
q[i+8] + ROTL64(q[i+9], 37) + q[i+10] + ROTL64(q[i+11], 43) + \
q[i+12] + ROTL64(q[i+13], 53) + (SHR(q[i+14],1) ^ q[i+14]) + (SHR(q[i+15],2) ^ q[i+15])
static __constant__ uint64_t d_constMem[16];
static uint64_t h_constMem[16] = {
SPH_C64(0x8081828384858687),
SPH_C64(0x88898A8B8C8D8E8F),
SPH_C64(0x9091929394959697),
SPH_C64(0x98999A9B9C9D9E9F),
SPH_C64(0xA0A1A2A3A4A5A6A7),
SPH_C64(0xA8A9AAABACADAEAF),
SPH_C64(0xB0B1B2B3B4B5B6B7),
SPH_C64(0xB8B9BABBBCBDBEBF),
SPH_C64(0xC0C1C2C3C4C5C6C7),
SPH_C64(0xC8C9CACBCCCDCECF),
SPH_C64(0xD0D1D2D3D4D5D6D7),
SPH_C64(0xD8D9DADBDCDDDEDF),
SPH_C64(0xE0E1E2E3E4E5E6E7),
SPH_C64(0xE8E9EAEBECEDEEEF),
SPH_C64(0xF0F1F2F3F4F5F6F7),
SPH_C64(0xF8F9FAFBFCFDFEFF)
};
__device__
void Compression512_30(uint64_t *msg, uint64_t *hash)
{
// Compression ref. implementation
uint64_t tmp;
uint64_t q[32];
tmp = (msg[ 5] ^ hash[ 5]) - (msg[ 7] ^ hash[ 7]) + (msg[10] ^ hash[10]) + (msg[13] ^ hash[13]) + (msg[14] ^ hash[14]);
q[0] = (SHR(tmp, 1) ^ SHL(tmp, 3) ^ ROTL64(tmp, 4) ^ ROTL64(tmp, 37)) + hash[1];
tmp = (msg[ 6] ^ hash[ 6]) - (msg[ 8] ^ hash[ 8]) + (msg[11] ^ hash[11]) + (msg[14] ^ hash[14]) - (msg[15] ^ hash[15]);
q[1] = (SHR(tmp, 1) ^ SHL(tmp, 2) ^ ROTL64(tmp, 13) ^ ROTL64(tmp, 43)) + hash[2];
tmp = (msg[ 0] ^ hash[ 0]) + (msg[ 7] ^ hash[ 7]) + (msg[ 9] ^ hash[ 9]) - (msg[12] ^ hash[12]) + (msg[15] ^ hash[15]);
q[2] = (SHR(tmp, 2) ^ SHL(tmp, 1) ^ ROTL64(tmp, 19) ^ ROTL64(tmp, 53)) + hash[3];
tmp = (msg[ 0] ^ hash[ 0]) - (msg[ 1] ^ hash[ 1]) + (msg[ 8] ^ hash[ 8]) - (msg[10] ^ hash[10]) + (msg[13] ^ hash[13]);
q[3] = (SHR(tmp, 2) ^ SHL(tmp, 2) ^ ROTL64(tmp, 28) ^ ROTL64(tmp, 59)) + hash[4];
tmp = (msg[ 1] ^ hash[ 1]) + (msg[ 2] ^ hash[ 2]) + (msg[ 9] ^ hash[ 9]) - (msg[11] ^ hash[11]) - (msg[14] ^ hash[14]);
q[4] = (SHR(tmp, 1) ^ tmp) + hash[5];
tmp = (msg[ 3] ^ hash[ 3]) - (msg[ 2] ^ hash[ 2]) + (msg[10] ^ hash[10]) - (msg[12] ^ hash[12]) + (msg[15] ^ hash[15]);
q[5] = (SHR(tmp, 1) ^ SHL(tmp, 3) ^ ROTL64(tmp, 4) ^ ROTL64(tmp, 37)) + hash[6];
tmp = (msg[ 4] ^ hash[ 4]) - (msg[ 0] ^ hash[ 0]) - (msg[ 3] ^ hash[ 3]) - (msg[11] ^ hash[11]) + (msg[13] ^ hash[13]);
q[6] = (SHR(tmp, 1) ^ SHL(tmp, 2) ^ ROTL64(tmp, 13) ^ ROTL64(tmp, 43)) + hash[7];
tmp = (msg[ 1] ^ hash[ 1]) - (msg[ 4] ^ hash[ 4]) - (msg[ 5] ^ hash[ 5]) - (msg[12] ^ hash[12]) - (msg[14] ^ hash[14]);
q[7] = (SHR(tmp, 2) ^ SHL(tmp, 1) ^ ROTL64(tmp, 19) ^ ROTL64(tmp, 53)) + hash[8];
tmp = (msg[ 2] ^ hash[ 2]) - (msg[ 5] ^ hash[ 5]) - (msg[ 6] ^ hash[ 6]) + (msg[13] ^ hash[13]) - (msg[15] ^ hash[15]);
q[8] = (SHR(tmp, 2) ^ SHL(tmp, 2) ^ ROTL64(tmp, 28) ^ ROTL64(tmp, 59)) + hash[9];
tmp = (msg[ 0] ^ hash[ 0]) - (msg[ 3] ^ hash[ 3]) + (msg[ 6] ^ hash[ 6]) - (msg[ 7] ^ hash[ 7]) + (msg[14] ^ hash[14]);
q[9] = (SHR(tmp, 1) ^ tmp) + hash[10];
tmp = (msg[ 8] ^ hash[ 8]) - (msg[ 1] ^ hash[ 1]) - (msg[ 4] ^ hash[ 4]) - (msg[ 7] ^ hash[ 7]) + (msg[15] ^ hash[15]);
q[10] = (SHR(tmp, 1) ^ SHL(tmp, 3) ^ ROTL64(tmp, 4) ^ ROTL64(tmp, 37)) + hash[11];
tmp = (msg[ 8] ^ hash[ 8]) - (msg[ 0] ^ hash[ 0]) - (msg[ 2] ^ hash[ 2]) - (msg[ 5] ^ hash[ 5]) + (msg[ 9] ^ hash[ 9]);
q[11] = (SHR(tmp, 1) ^ SHL(tmp, 2) ^ ROTL64(tmp, 13) ^ ROTL64(tmp, 43)) + hash[12];
tmp = (msg[ 1] ^ hash[ 1]) + (msg[ 3] ^ hash[ 3]) - (msg[ 6] ^ hash[ 6]) - (msg[ 9] ^ hash[ 9]) + (msg[10] ^ hash[10]);
q[12] = (SHR(tmp, 2) ^ SHL(tmp, 1) ^ ROTL64(tmp, 19) ^ ROTL64(tmp, 53)) + hash[13];
tmp = (msg[ 2] ^ hash[ 2]) + (msg[ 4] ^ hash[ 4]) + (msg[ 7] ^ hash[ 7]) + (msg[10] ^ hash[10]) + (msg[11] ^ hash[11]);
q[13] = (SHR(tmp, 2) ^ SHL(tmp, 2) ^ ROTL64(tmp, 28) ^ ROTL64(tmp, 59)) + hash[14];
tmp = (msg[ 3] ^ hash[ 3]) - (msg[ 5] ^ hash[ 5]) + (msg[ 8] ^ hash[ 8]) - (msg[11] ^ hash[11]) - (msg[12] ^ hash[12]);
q[14] = (SHR(tmp, 1) ^ tmp) + hash[15];
tmp = (msg[12] ^ hash[12]) - (msg[ 4] ^ hash[ 4]) - (msg[ 6] ^ hash[ 6]) - (msg[ 9] ^ hash[ 9]) + (msg[13] ^ hash[13]);
q[15] = (SHR(tmp, 1) ^ SHL(tmp, 3) ^ ROTL64(tmp, 4) ^ ROTL64(tmp, 37)) + hash[0];
// Expand 1
#pragma unroll 2
for(int i=0;i<2;i++)
{
q[i+16] =
(SHR(q[i], 1) ^ SHL(q[i], 2) ^ ROTL64(q[i], 13) ^ ROTL64(q[i], 43)) +
(SHR(q[i+1], 2) ^ SHL(q[i+1], 1) ^ ROTL64(q[i+1], 19) ^ ROTL64(q[i+1], 53)) +
(SHR(q[i+2], 2) ^ SHL(q[i+2], 2) ^ ROTL64(q[i+2], 28) ^ ROTL64(q[i+2], 59)) +
(SHR(q[i+3], 1) ^ SHL(q[i+3], 3) ^ ROTL64(q[i+3], 4) ^ ROTL64(q[i+3], 37)) +
(SHR(q[i+4], 1) ^ SHL(q[i+4], 2) ^ ROTL64(q[i+4], 13) ^ ROTL64(q[i+4], 43)) +
(SHR(q[i+5], 2) ^ SHL(q[i+5], 1) ^ ROTL64(q[i+5], 19) ^ ROTL64(q[i+5], 53)) +
(SHR(q[i+6], 2) ^ SHL(q[i+6], 2) ^ ROTL64(q[i+6], 28) ^ ROTL64(q[i+6], 59)) +
(SHR(q[i+7], 1) ^ SHL(q[i+7], 3) ^ ROTL64(q[i+7], 4) ^ ROTL64(q[i+7], 37)) +
(SHR(q[i+8], 1) ^ SHL(q[i+8], 2) ^ ROTL64(q[i+8], 13) ^ ROTL64(q[i+8], 43)) +
(SHR(q[i+9], 2) ^ SHL(q[i+9], 1) ^ ROTL64(q[i+9], 19) ^ ROTL64(q[i+9], 53)) +
(SHR(q[i+10], 2) ^ SHL(q[i+10], 2) ^ ROTL64(q[i+10], 28) ^ ROTL64(q[i+10], 59)) +
(SHR(q[i+11], 1) ^ SHL(q[i+11], 3) ^ ROTL64(q[i+11], 4) ^ ROTL64(q[i+11], 37)) +
(SHR(q[i+12], 1) ^ SHL(q[i+12], 2) ^ ROTL64(q[i+12], 13) ^ ROTL64(q[i+12], 43)) +
(SHR(q[i+13], 2) ^ SHL(q[i+13], 1) ^ ROTL64(q[i+13], 19) ^ ROTL64(q[i+13], 53)) +
(SHR(q[i+14], 2) ^ SHL(q[i+14], 2) ^ ROTL64(q[i+14], 28) ^ ROTL64(q[i+14], 59)) +
(SHR(q[i+15], 1) ^ SHL(q[i+15], 3) ^ ROTL64(q[i+15], 4) ^ ROTL64(q[i+15], 37)) +
(( ((i+16)*(0x0555555555555555ull)) + ROTL64(msg[i], i+1) +
ROTL64(msg[i+3], i+4) - ROTL64(msg[i+10], i+11) ) ^ hash[i+7]);
}
#pragma unroll 4
for(int i=2;i<6;i++) {
q[i+16] = CONST_EXP2 +
(( ((i+16)*(0x0555555555555555ull)) + ROTL64(msg[i], i+1) +
ROTL64(msg[i+3], i+4) - ROTL64(msg[i+10], i+11) ) ^ hash[i+7]);
}
#pragma unroll 3
for(int i=6;i<9;i++) {
q[i+16] = CONST_EXP2 +
(( ((i+16)*(0x0555555555555555ull)) + ROTL64(msg[i], i+1) +
ROTL64(msg[i+3], i+4) - ROTL64(msg[i-6], (i-6)+1) ) ^ hash[i+7]);
}
#pragma unroll 4
for(int i=9;i<13;i++) {
q[i+16] = CONST_EXP2 +
(( ((i+16)*(0x0555555555555555ull)) + ROTL64(msg[i], i+1) +
ROTL64(msg[i+3], i+4) - ROTL64(msg[i-6], (i-6)+1) ) ^ hash[i-9]);
}
#pragma unroll 3
for(int i=13;i<16;i++) {
q[i+16] = CONST_EXP2 +
(( ((i+16)*(0x0555555555555555ull)) + ROTL64(msg[i], i+1) +
ROTL64(msg[i-13], (i-13)+1) - ROTL64(msg[i-6], (i-6)+1) ) ^ hash[i-9]);
}
uint64_t XL64 = q[16]^q[17]^q[18]^q[19]^q[20]^q[21]^q[22]^q[23];
uint64_t XH64 = XL64^q[24]^q[25]^q[26]^q[27]^q[28]^q[29]^q[30]^q[31];
hash[0] = (SHL(XH64, 5) ^ SHR(q[16],5) ^ msg[ 0]) + ( XL64 ^ q[24] ^ q[ 0]);
hash[1] = (SHR(XH64, 7) ^ SHL(q[17],8) ^ msg[ 1]) + ( XL64 ^ q[25] ^ q[ 1]);
hash[2] = (SHR(XH64, 5) ^ SHL(q[18],5) ^ msg[ 2]) + ( XL64 ^ q[26] ^ q[ 2]);
hash[3] = (SHR(XH64, 1) ^ SHL(q[19],5) ^ msg[ 3]) + ( XL64 ^ q[27] ^ q[ 3]);
hash[4] = (SHR(XH64, 3) ^ q[20] ^ msg[ 4]) + ( XL64 ^ q[28] ^ q[ 4]);
hash[5] = (SHL(XH64, 6) ^ SHR(q[21],6) ^ msg[ 5]) + ( XL64 ^ q[29] ^ q[ 5]);
hash[6] = (SHR(XH64, 4) ^ SHL(q[22],6) ^ msg[ 6]) + ( XL64 ^ q[30] ^ q[ 6]);
hash[7] = (SHR(XH64,11) ^ SHL(q[23],2) ^ msg[ 7]) + ( XL64 ^ q[31] ^ q[ 7]);
hash[ 8] = ROTL64(hash[4], 9) + ( XH64 ^ q[24] ^ msg[ 8]) + (SHL(XL64,8) ^ q[23] ^ q[ 8]);
hash[ 9] = ROTL64(hash[5],10) + ( XH64 ^ q[25] ^ msg[ 9]) + (SHR(XL64,6) ^ q[16] ^ q[ 9]);
hash[10] = ROTL64(hash[6],11) + ( XH64 ^ q[26] ^ msg[10]) + (SHL(XL64,6) ^ q[17] ^ q[10]);
hash[11] = ROTL64(hash[7],12) + ( XH64 ^ q[27] ^ msg[11]) + (SHL(XL64,4) ^ q[18] ^ q[11]);
hash[12] = ROTL64(hash[0],13) + ( XH64 ^ q[28] ^ msg[12]) + (SHR(XL64,3) ^ q[19] ^ q[12]);
hash[13] = ROTL64(hash[1],14) + ( XH64 ^ q[29] ^ msg[13]) + (SHR(XL64,4) ^ q[20] ^ q[13]);
hash[14] = ROTL64(hash[2],15) + ( XH64 ^ q[30] ^ msg[14]) + (SHR(XL64,7) ^ q[21] ^ q[14]);
hash[15] = ROTL64(hash[3],16) + ( XH64 ^ q[31] ^ msg[15]) + (SHR(XL64,2) ^ q[22] ^ q[15]);
}
__global__
void quark_bmw512_gpu_hash_64_30(uint32_t threads, uint32_t startNounce, uint64_t *g_hash, uint32_t *g_nonceVector)
{
int thread = (blockDim.x * blockIdx.x + threadIdx.x);
if (thread < threads)
{
uint32_t nounce = (g_nonceVector != NULL) ? g_nonceVector[thread] : (startNounce + thread);
int hashPosition = nounce - startNounce;
uint64_t *inpHash = &g_hash[8 * hashPosition];
// Init
uint64_t h[16];
/*
h[ 0] = SPH_C64(0x8081828384858687);
h[ 1] = SPH_C64(0x88898A8B8C8D8E8F);
h[ 2] = SPH_C64(0x9091929394959697);
h[ 3] = SPH_C64(0x98999A9B9C9D9E9F);
h[ 4] = SPH_C64(0xA0A1A2A3A4A5A6A7);
h[ 5] = SPH_C64(0xA8A9AAABACADAEAF);
h[ 6] = SPH_C64(0xB0B1B2B3B4B5B6B7);
h[ 7] = SPH_C64(0xB8B9BABBBCBDBEBF);
h[ 8] = SPH_C64(0xC0C1C2C3C4C5C6C7);
h[ 9] = SPH_C64(0xC8C9CACBCCCDCECF);
h[10] = SPH_C64(0xD0D1D2D3D4D5D6D7);
h[11] = SPH_C64(0xD8D9DADBDCDDDEDF);
h[12] = SPH_C64(0xE0E1E2E3E4E5E6E7);
h[13] = SPH_C64(0xE8E9EAEBECEDEEEF);
h[14] = SPH_C64(0xF0F1F2F3F4F5F6F7);
h[15] = SPH_C64(0xF8F9FAFBFCFDFEFF);
*/
// Nachricht kopieren (Achtung, die Nachricht hat 64 Byte,
// BMW arbeitet mit 128 Byte!!!
uint64_t message[16];
#pragma unroll 8
for(int i=0;i<8;i++)
message[i] = inpHash[i];
#pragma unroll 6
for(int i=9;i<15;i++)
message[i] = 0;
message[8] = SPH_C64(0x80);
// 64 Bytes * 8 = 512 Bits
message[15] = SPH_C64(512);
#pragma unroll 16
for(int i=0;i<16;i++)
h[i] = d_constMem[i];
// Compression 1
Compression512_30(message, h);
// Final
#pragma unroll 16
for(int i=0;i<16;i++)
message[i] = 0xaaaaaaaaaaaaaaa0ull + (uint64_t)i;
Compression512_30(h, message);
uint64_t *outpHash = &g_hash[8 * hashPosition];
#pragma unroll 8
for(int i=0;i<8;i++)
outpHash[i] = message[i+8];
}
}
__global__
void quark_bmw512_gpu_hash_80_30(uint32_t threads, uint32_t startNounce, uint64_t *g_hash)
{
int thread = (blockDim.x * blockIdx.x + threadIdx.x);
if (thread < threads)
{
uint32_t nounce = startNounce + thread;
// Init
uint64_t h[16];
#pragma unroll 16
for(int i=0;i<16;i++)
h[i] = d_constMem[i];
uint64_t message[16];
#pragma unroll 16
for(int i=0;i<16;i++)
message[i] = c_PaddedMessage80[i];
message[9] = REPLACE_HIWORD(message[9], cuda_swab32(nounce));
// Compression 1
Compression512_30(message, h);
// Final
#pragma unroll 16
for(int i=0;i<16;i++)
message[i] = 0xaaaaaaaaaaaaaaa0ull + (uint64_t)i;
Compression512_30(h, message);
uint2 *outpHash = (uint2*) (&g_hash[thread * 8U]);
#pragma unroll 8
for(int i=0;i<8;i++)
outpHash[i] = ((uint2*)message)[i+8];
}
}

9
util.cpp

@ -1696,13 +1696,13 @@ void do_gpu_tests(void) @@ -1696,13 +1696,13 @@ void do_gpu_tests(void)
work_restart[0].restart = 1;
tgt[7] = 0xffff;
memset(buf, 0, sizeof buf);
//memcpy(buf, zrtest, 80);
scanhash_zr5(0, (uint32_t*)buf, tgt, zrtest[19]+1, &done);
//memset(buf, 0, sizeof buf);
//scanhash_skeincoin(0, (uint32_t*)buf, tgt, 1, &done);
//memset(buf, 0, sizeof buf);
//memcpy(buf, zrtest, 80);
//scanhash_zr5(0, (uint32_t*)buf, tgt, zrtest[19]+1, &done);
memset(buf, 0, sizeof buf);
scanhash_x11(0, (uint32_t*)buf, tgt, 1, &done);
@ -1718,7 +1718,6 @@ void do_gpu_tests(void) @@ -1718,7 +1718,6 @@ void do_gpu_tests(void)
opt_tracegpu = false;
#endif
}
extern "C" void zr5hash_pok(void *output, uint32_t *pdata);
void print_hash_tests(void)
{

4
x11/cuda_x11_simd512.cu

@ -646,6 +646,8 @@ void x11_simd512_gpu_final_64(uint32_t threads, uint32_t *g_hash, uint4 *g_fft4, @@ -646,6 +646,8 @@ void x11_simd512_gpu_final_64(uint32_t threads, uint32_t *g_hash, uint4 *g_fft4,
__host__
int x11_simd512_cpu_init(int thr_id, uint32_t threads)
{
cuda_get_arch(thr_id);
CUDA_CALL_OR_RET_X(cudaMalloc(&d_temp4[thr_id], 64*sizeof(uint4)*threads), (int) err); /* todo: prevent -i 21 */
CUDA_CALL_OR_RET_X(cudaMalloc(&d_state[thr_id], 32*sizeof(int)*threads), (int) err);
#ifndef DEVICE_DIRECT_CONSTANTS
@ -686,7 +688,7 @@ void x11_simd512_cpu_hash_64(int thr_id, uint32_t threads, uint32_t startNounce, @@ -686,7 +688,7 @@ void x11_simd512_cpu_hash_64(int thr_id, uint32_t threads, uint32_t startNounce,
x11_simd512_gpu_expand_64 <<<gridX8, block>>> (threads, d_hash, d_temp4[thr_id]);
if (device_sm[device_map[thr_id]] >= 500) {
if (device_sm[device_map[thr_id]] >= 500 && cuda_arch[device_map[thr_id]] >= 500) {
x11_simd512_gpu_compress_64_maxwell <<< grid, block >>> (threads, d_hash, d_temp4[thr_id], d_state[thr_id]);
} else {
x11_simd512_gpu_compress1_64 <<< grid, block >>> (threads, d_hash, d_temp4[thr_id], d_state[thr_id]);

2
x11/x11.cu

@ -159,9 +159,9 @@ extern "C" int scanhash_x11(int thr_id, uint32_t *pdata, @@ -159,9 +159,9 @@ extern "C" int scanhash_x11(int thr_id, uint32_t *pdata,
cudaSetDevice(device_map[thr_id]);
quark_blake512_cpu_init(thr_id, throughput);
quark_bmw512_cpu_init(thr_id, throughput);
quark_groestl512_cpu_init(thr_id, throughput);
quark_skein512_cpu_init(thr_id, throughput);
quark_bmw512_cpu_init(thr_id, throughput);
quark_keccak512_cpu_init(thr_id, throughput);
quark_jh512_cpu_init(thr_id, throughput);
x11_luffaCubehash512_cpu_init(thr_id, throughput);

Loading…
Cancel
Save