|
|
@ -2,26 +2,40 @@ |
|
|
|
#include "cuda_runtime.h" |
|
|
|
#include "cuda_runtime.h" |
|
|
|
#include "device_launch_parameters.h" |
|
|
|
#include "device_launch_parameters.h" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// aus cpu-miner.c |
|
|
|
|
|
|
|
extern int device_map[8]; |
|
|
|
|
|
|
|
|
|
|
|
#include <stdio.h> |
|
|
|
#include <stdio.h> |
|
|
|
#include <memory.h> |
|
|
|
#include <memory.h> |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
#define USE_SHARED 1 |
|
|
|
|
|
|
|
|
|
|
|
// Folgende Definitionen später durch header ersetzen |
|
|
|
// Folgende Definitionen später durch header ersetzen |
|
|
|
typedef unsigned int uint32_t; |
|
|
|
typedef unsigned int uint32_t; |
|
|
|
typedef unsigned char uint8_t; |
|
|
|
typedef unsigned char uint8_t; |
|
|
|
typedef unsigned short uint16_t; |
|
|
|
typedef unsigned short uint16_t; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// diese Struktur wird in der Init Funktion angefordert |
|
|
|
|
|
|
|
static cudaDeviceProp props; |
|
|
|
|
|
|
|
|
|
|
|
// globaler Speicher für alle HeftyHashes aller Threads |
|
|
|
// globaler Speicher für alle HeftyHashes aller Threads |
|
|
|
uint32_t *d_heftyHashes[8]; |
|
|
|
uint32_t *d_heftyHashes[8]; |
|
|
|
|
|
|
|
|
|
|
|
/* Hash-Tabellen */ |
|
|
|
/* Hash-Tabellen */ |
|
|
|
__constant__ uint32_t hefty_gpu_constantTable[64]; |
|
|
|
__constant__ uint32_t hefty_gpu_constantTable[64]; |
|
|
|
|
|
|
|
#if USE_SHARED |
|
|
|
|
|
|
|
#define heftyLookUp(x) (*((uint32_t*)heftytab + (x))) |
|
|
|
|
|
|
|
#else |
|
|
|
|
|
|
|
#define heftyLookUp(x) hefty_gpu_constantTable[x] |
|
|
|
|
|
|
|
#endif |
|
|
|
|
|
|
|
|
|
|
|
// muss expandiert werden |
|
|
|
// muss expandiert werden |
|
|
|
__constant__ uint32_t hefty_gpu_blockHeader[16]; // 2x512 Bit Message |
|
|
|
__constant__ uint32_t hefty_gpu_blockHeader[16]; // 2x512 Bit Message |
|
|
|
__constant__ uint32_t hefty_gpu_register[8]; |
|
|
|
__constant__ uint32_t hefty_gpu_register[8]; |
|
|
|
__constant__ uint32_t hefty_gpu_sponge[4]; |
|
|
|
__constant__ uint32_t hefty_gpu_sponge[4]; |
|
|
|
|
|
|
|
|
|
|
|
uint32_t hefty_cpu_hashTable[] = { 0x6a09e667UL, |
|
|
|
uint32_t hefty_cpu_hashTable[] = { |
|
|
|
|
|
|
|
0x6a09e667UL, |
|
|
|
0xbb67ae85UL, |
|
|
|
0xbb67ae85UL, |
|
|
|
0x3c6ef372UL, |
|
|
|
0x3c6ef372UL, |
|
|
|
0xa54ff53aUL, |
|
|
|
0xa54ff53aUL, |
|
|
@ -29,8 +43,9 @@ uint32_t hefty_cpu_hashTable[] = { 0x6a09e667UL, |
|
|
|
0x9b05688cUL, |
|
|
|
0x9b05688cUL, |
|
|
|
0x1f83d9abUL, |
|
|
|
0x1f83d9abUL, |
|
|
|
0x5be0cd19UL }; |
|
|
|
0x5be0cd19UL }; |
|
|
|
|
|
|
|
|
|
|
|
uint32_t hefty_cpu_constantTable[] = { |
|
|
|
uint32_t hefty_cpu_constantTable[] = { |
|
|
|
0x428a2f98UL, 0x71374491UL, 0xb5c0fbcfUL, 0xe9b5dba5UL, |
|
|
|
0x428a2f98UL, 0x71374491UL, 0xb5c0fbcfUL, 0xe9b5dba5UL, |
|
|
|
0x3956c25bUL, 0x59f111f1UL, 0x923f82a4UL, 0xab1c5ed5UL, |
|
|
|
0x3956c25bUL, 0x59f111f1UL, 0x923f82a4UL, 0xab1c5ed5UL, |
|
|
|
0xd807aa98UL, 0x12835b01UL, 0x243185beUL, 0x550c7dc3UL, |
|
|
|
0xd807aa98UL, 0x12835b01UL, 0x243185beUL, 0x550c7dc3UL, |
|
|
|
0x72be5d74UL, 0x80deb1feUL, 0x9bdc06a7UL, 0xc19bf174UL, |
|
|
|
0x72be5d74UL, 0x80deb1feUL, 0x9bdc06a7UL, 0xc19bf174UL, |
|
|
@ -48,350 +63,352 @@ uint32_t hefty_cpu_constantTable[] = { |
|
|
|
0x90befffaUL, 0xa4506cebUL, 0xbef9a3f7UL, 0xc67178f2UL |
|
|
|
0x90befffaUL, 0xa4506cebUL, 0xbef9a3f7UL, 0xc67178f2UL |
|
|
|
}; |
|
|
|
}; |
|
|
|
|
|
|
|
|
|
|
|
#define S(x, n) (((x) >> (n)) | ((x) << (32 - (n)))) |
|
|
|
//#define S(x, n) (((x) >> (n)) | ((x) << (32 - (n)))) |
|
|
|
#define R(x, n) ((x) >> (n)) |
|
|
|
static __host__ __device__ uint32_t S(uint32_t x, int n) |
|
|
|
#define Ch(x, y, z) ((x & (y ^ z)) ^ z) |
|
|
|
{ |
|
|
|
#define Maj(x, y, z) ((x & (y | z)) | (y & z)) |
|
|
|
return (((x) >> (n)) | ((x) << (32 - (n)))); |
|
|
|
#define S0(x) (S(x, 2) ^ S(x, 13) ^ S(x, 22)) |
|
|
|
} |
|
|
|
#define S1(x) (S(x, 6) ^ S(x, 11) ^ S(x, 25)) |
|
|
|
#define R(x, n) ((x) >> (n)) |
|
|
|
#define s0(x) (S(x, 7) ^ S(x, 18) ^ R(x, 3)) |
|
|
|
#define Ch(x, y, z) ((x & (y ^ z)) ^ z) |
|
|
|
#define s1(x) (S(x, 17) ^ S(x, 19) ^ R(x, 10)) |
|
|
|
#define Maj(x, y, z) ((x & (y | z)) | (y & z)) |
|
|
|
|
|
|
|
#define S0(x) (S(x, 2) ^ S(x, 13) ^ S(x, 22)) |
|
|
|
|
|
|
|
#define S1(x) (S(x, 6) ^ S(x, 11) ^ S(x, 25)) |
|
|
|
|
|
|
|
#define s0(x) (S(x, 7) ^ S(x, 18) ^ R(x, 3)) |
|
|
|
|
|
|
|
#define s1(x) (S(x, 17) ^ S(x, 19) ^ R(x, 10)) |
|
|
|
|
|
|
|
|
|
|
|
#define SWAB32(x) ( ((x & 0x000000FF) << 24) | ((x & 0x0000FF00) << 8) | ((x & 0x00FF0000) >> 8) | ((x & 0xFF000000) >> 24) ) |
|
|
|
#define SWAB32(x) ( ((x & 0x000000FF) << 24) | ((x & 0x0000FF00) << 8) | ((x & 0x00FF0000) >> 8) | ((x & 0xFF000000) >> 24) ) |
|
|
|
|
|
|
|
|
|
|
|
// uint8_t |
|
|
|
// uint8_t |
|
|
|
#define smoosh4(x) ( ((x)>>4) ^ ((x) & 0x0F) ) |
|
|
|
#define smoosh4(x) ( ((x)>>4) ^ ((x) & 0x0F) ) |
|
|
|
__host__ __forceinline__ __device__ uint8_t smoosh2(uint32_t x) |
|
|
|
__host__ __forceinline__ __device__ uint8_t smoosh2(uint32_t x) |
|
|
|
{ |
|
|
|
{ |
|
|
|
uint16_t w = (x >> 16) ^ (x & 0xffff); |
|
|
|
uint16_t w = (x >> 16) ^ (x & 0xffff); |
|
|
|
uint8_t n = smoosh4( (uint8_t)( (w >> 8) ^ (w & 0xFF) ) ); |
|
|
|
uint8_t n = smoosh4( (uint8_t)( (w >> 8) ^ (w & 0xFF) ) ); |
|
|
|
return (n >> 2) ^ (n & 0x03); |
|
|
|
return 24 - (((n >> 2) ^ (n & 0x03)) << 3); |
|
|
|
} |
|
|
|
} |
|
|
|
// 4 auf einmal |
|
|
|
// 4 auf einmal |
|
|
|
#define smoosh4Quad(x) ( (((x)>>4) ^ (x)) & 0x0F0F0F0F ) |
|
|
|
#define smoosh4Quad(x) ( (((x)>>4) ^ (x)) & 0x0F0F0F0F ) |
|
|
|
#define getByte(x,y) ( ((x) >> (y)) & 0xFF ) |
|
|
|
#define getByte(x,y) ( ((x) >> (y)) & 0xFF ) |
|
|
|
|
|
|
|
|
|
|
|
__host__ __device__ void Mangle(uint32_t *inp) |
|
|
|
__host__ __forceinline__ __device__ void Mangle(uint32_t *inp) |
|
|
|
{ |
|
|
|
{ |
|
|
|
uint32_t r = smoosh4Quad(inp[0]); |
|
|
|
uint32_t r = smoosh4Quad(inp[0]); |
|
|
|
//uint8_t r0 = smoosh4( (uint8_t)(inp[0] >> 24) ); |
|
|
|
uint32_t inp0org; |
|
|
|
//uint8_t r1 = smoosh4( (uint8_t)(inp[0] >> 16) ); |
|
|
|
uint32_t tmp0Mask, tmp1Mask; |
|
|
|
//uint8_t r2 = smoosh4( (uint8_t)(inp[0] >> 8) ); |
|
|
|
uint32_t in1, in2, isAddition; |
|
|
|
//uint8_t r3 = smoosh4( (uint8_t)(inp[0] & 0xFF) ); |
|
|
|
uint32_t tmp; |
|
|
|
|
|
|
|
uint8_t b; |
|
|
|
inp[1] = inp[1] ^ S(inp[0], getByte(r, 24)); |
|
|
|
|
|
|
|
|
|
|
|
inp[1] = inp[1] ^ S(inp[0], getByte(r, 24)); |
|
|
|
switch (smoosh2(inp[1])) { |
|
|
|
|
|
|
|
case 0: inp[2] ^= S(inp[0], 1 + getByte(r,24)); break; |
|
|
|
r += 0x01010101; |
|
|
|
case 1: inp[2] += S(~inp[0], 1 + getByte(r,16)); break; |
|
|
|
tmp = smoosh2(inp[1]); |
|
|
|
case 2: inp[2] &= S(~inp[0], 1 + getByte(r,8)); break; |
|
|
|
b = getByte(r,tmp); |
|
|
|
case 3: inp[2] ^= S(inp[0], 1 + getByte(r,0)); break; |
|
|
|
inp0org = S(inp[0], b); |
|
|
|
} |
|
|
|
tmp0Mask = -((tmp >> 3)&1); // Bit 3 an Position 0 |
|
|
|
|
|
|
|
tmp1Mask = -((tmp >> 4)&1); // Bit 4 an Position 0 |
|
|
|
uint32_t tmp = smoosh2(inp[1] ^ inp[2]); |
|
|
|
|
|
|
|
switch (tmp) { |
|
|
|
in1 = (inp[2] & ~inp0org) | |
|
|
|
case 0: inp[3] ^= S(inp[0], 2 + getByte(r,24)); break; |
|
|
|
(tmp1Mask & ~inp[2] & inp0org) | |
|
|
|
case 1: inp[3] += S(~inp[0], 2 + getByte(r,16)); break; |
|
|
|
(~tmp0Mask & ~inp[2] & inp0org); |
|
|
|
case 2: inp[3] &= S(~inp[0], 2 + getByte(r,8)); break; |
|
|
|
in2 = inp[2] += ~inp0org; |
|
|
|
case 3: inp[3] ^= S(inp[0], 2 + getByte(r,0)); break; |
|
|
|
isAddition = ~tmp0Mask & tmp1Mask; |
|
|
|
} |
|
|
|
inp[2] = isAddition ? in2 : in1; |
|
|
|
|
|
|
|
|
|
|
|
inp[0] ^= (inp[1] ^ inp[2]) + inp[3]; |
|
|
|
r += 0x01010101; |
|
|
|
|
|
|
|
tmp = smoosh2(inp[1] ^ inp[2]); |
|
|
|
|
|
|
|
b = getByte(r,tmp); |
|
|
|
|
|
|
|
inp0org = S(inp[0], b); |
|
|
|
|
|
|
|
tmp0Mask = -((tmp >> 3)&1); // Bit 3 an Position 0 |
|
|
|
|
|
|
|
tmp1Mask = -((tmp >> 4)&1); // Bit 4 an Position 0 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
in1 = (inp[3] & ~inp0org) | |
|
|
|
|
|
|
|
(tmp1Mask & ~inp[3] & inp0org) | |
|
|
|
|
|
|
|
(~tmp0Mask & ~inp[3] & inp0org); |
|
|
|
|
|
|
|
in2 = inp[3] += ~inp0org; |
|
|
|
|
|
|
|
isAddition = ~tmp0Mask & tmp1Mask; |
|
|
|
|
|
|
|
inp[3] = isAddition ? in2 : in1; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
inp[0] ^= (inp[1] ^ inp[2]) + inp[3]; |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
__host__ __forceinline__ __device__ void Absorb(uint32_t *inp, uint32_t x) |
|
|
|
__host__ __forceinline__ __device__ void Absorb(uint32_t *inp, uint32_t x) |
|
|
|
{ |
|
|
|
{ |
|
|
|
inp[0] ^= x; |
|
|
|
inp[0] ^= x; |
|
|
|
Mangle(inp); |
|
|
|
Mangle(inp); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
__host__ __forceinline__ __device__ uint32_t Squeeze(uint32_t *inp) |
|
|
|
__host__ __forceinline__ __device__ uint32_t Squeeze(uint32_t *inp) |
|
|
|
{ |
|
|
|
{ |
|
|
|
uint32_t y = inp[0]; |
|
|
|
uint32_t y = inp[0]; |
|
|
|
Mangle(inp); |
|
|
|
Mangle(inp); |
|
|
|
return y; |
|
|
|
return y; |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
__host__ __forceinline__ __device__ uint32_t Br(uint32_t *sponge, uint32_t x) |
|
|
|
__host__ __forceinline__ __device__ uint32_t Br(uint32_t *sponge, uint32_t x) |
|
|
|
{ |
|
|
|
{ |
|
|
|
uint32_t r = Squeeze(sponge); |
|
|
|
uint32_t r = Squeeze(sponge); |
|
|
|
|
|
|
|
uint32_t t = ((r >> 8) & 0x1F); |
|
|
|
//uint8_t r0 = r >> 8; |
|
|
|
uint32_t y = 1 << t; |
|
|
|
uint8_t r1 = r & 0xFF; |
|
|
|
|
|
|
|
uint32_t y = 1 << ((r >> 8) & 0x1F); |
|
|
|
uint32_t a = (((r>>1) & 0x01) << t) & y; |
|
|
|
|
|
|
|
uint32_t b = ((r & 0x01) << t) & y; |
|
|
|
//uint32_t retVal; |
|
|
|
uint32_t c = x & y; |
|
|
|
//retVal = x; |
|
|
|
|
|
|
|
|
|
|
|
uint32_t retVal = (x & ~y) | (~b & c) | (a & ~c); |
|
|
|
uint32_t resArr[4]; |
|
|
|
return retVal; |
|
|
|
resArr[0] = x; |
|
|
|
|
|
|
|
resArr[1] = x & ~y; |
|
|
|
|
|
|
|
resArr[2] = x | y; |
|
|
|
|
|
|
|
resArr[3] = x ^ y; |
|
|
|
|
|
|
|
return resArr[r1 & 0x03]; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/* |
|
|
|
|
|
|
|
switch(r1 & 0x03) |
|
|
|
|
|
|
|
{ |
|
|
|
|
|
|
|
case 0: |
|
|
|
|
|
|
|
break; |
|
|
|
|
|
|
|
case 1: |
|
|
|
|
|
|
|
retVal = x & ~y; |
|
|
|
|
|
|
|
break; |
|
|
|
|
|
|
|
case 2: |
|
|
|
|
|
|
|
retVal = x | y; |
|
|
|
|
|
|
|
break; |
|
|
|
|
|
|
|
case 3: |
|
|
|
|
|
|
|
retVal = x ^ y; |
|
|
|
|
|
|
|
break; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
return retVal; |
|
|
|
|
|
|
|
*/ |
|
|
|
|
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
__forceinline__ __device__ void hefty_gpu_round(uint32_t *regs, uint32_t W, uint32_t K, uint32_t *sponge) |
|
|
|
__forceinline__ __device__ void hefty_gpu_round(uint32_t *regs, uint32_t W, uint32_t K, uint32_t *sponge) |
|
|
|
{ |
|
|
|
{ |
|
|
|
uint32_t tmpBr; |
|
|
|
uint32_t tmpBr; |
|
|
|
|
|
|
|
|
|
|
|
uint32_t brG = Br(sponge, regs[6]); |
|
|
|
uint32_t brG = Br(sponge, regs[6]); |
|
|
|
uint32_t brF = Br(sponge, regs[5]); |
|
|
|
uint32_t brF = Br(sponge, regs[5]); |
|
|
|
uint32_t tmp1 = Ch(regs[4], brF, brG) + regs[7] + W + K; |
|
|
|
uint32_t tmp1 = Ch(regs[4], brF, brG) + regs[7] + W + K; |
|
|
|
uint32_t brE = Br(sponge, regs[4]); |
|
|
|
uint32_t brE = Br(sponge, regs[4]); |
|
|
|
uint32_t tmp2 = tmp1 + S1(brE); |
|
|
|
uint32_t tmp2 = tmp1 + S1(brE); |
|
|
|
uint32_t brC = Br(sponge, regs[2]); |
|
|
|
uint32_t brC = Br(sponge, regs[2]); |
|
|
|
uint32_t brB = Br(sponge, regs[1]); |
|
|
|
uint32_t brB = Br(sponge, regs[1]); |
|
|
|
uint32_t brA = Br(sponge, regs[0]); |
|
|
|
uint32_t brA = Br(sponge, regs[0]); |
|
|
|
uint32_t tmp3 = Maj(brA, brB, brC); |
|
|
|
uint32_t tmp3 = Maj(brA, brB, brC); |
|
|
|
tmpBr = Br(sponge, regs[0]); |
|
|
|
tmpBr = Br(sponge, regs[0]); |
|
|
|
uint32_t tmp4 = tmp3 + S0(tmpBr); |
|
|
|
uint32_t tmp4 = tmp3 + S0(tmpBr); |
|
|
|
tmpBr = Br(sponge, tmp2); |
|
|
|
tmpBr = Br(sponge, tmp2); |
|
|
|
|
|
|
|
|
|
|
|
#pragma unroll 7 |
|
|
|
#pragma unroll 7 |
|
|
|
for (int k=6; k >= 0; k--) regs[k+1] = regs[k]; |
|
|
|
for (int k=6; k >= 0; k--) regs[k+1] = regs[k]; |
|
|
|
regs[0] = tmp2 + tmp4; |
|
|
|
regs[0] = tmp2 + tmp4; |
|
|
|
regs[4] += tmpBr; |
|
|
|
regs[4] += tmpBr; |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
__host__ void hefty_cpu_round(uint32_t *regs, uint32_t W, uint32_t K, uint32_t *sponge) |
|
|
|
__host__ void hefty_cpu_round(uint32_t *regs, uint32_t W, uint32_t K, uint32_t *sponge) |
|
|
|
{ |
|
|
|
{ |
|
|
|
uint32_t tmpBr; |
|
|
|
uint32_t tmpBr; |
|
|
|
|
|
|
|
|
|
|
|
uint32_t brG = Br(sponge, regs[6]); |
|
|
|
uint32_t brG = Br(sponge, regs[6]); |
|
|
|
uint32_t brF = Br(sponge, regs[5]); |
|
|
|
uint32_t brF = Br(sponge, regs[5]); |
|
|
|
uint32_t tmp1 = Ch(regs[4], brF, brG) + regs[7] + W + K; |
|
|
|
uint32_t tmp1 = Ch(regs[4], brF, brG) + regs[7] + W + K; |
|
|
|
uint32_t brE = Br(sponge, regs[4]); |
|
|
|
uint32_t brE = Br(sponge, regs[4]); |
|
|
|
uint32_t tmp2 = tmp1 + S1(brE); |
|
|
|
uint32_t tmp2 = tmp1 + S1(brE); |
|
|
|
uint32_t brC = Br(sponge, regs[2]); |
|
|
|
uint32_t brC = Br(sponge, regs[2]); |
|
|
|
uint32_t brB = Br(sponge, regs[1]); |
|
|
|
uint32_t brB = Br(sponge, regs[1]); |
|
|
|
uint32_t brA = Br(sponge, regs[0]); |
|
|
|
uint32_t brA = Br(sponge, regs[0]); |
|
|
|
uint32_t tmp3 = Maj(brA, brB, brC); |
|
|
|
uint32_t tmp3 = Maj(brA, brB, brC); |
|
|
|
tmpBr = Br(sponge, regs[0]); |
|
|
|
tmpBr = Br(sponge, regs[0]); |
|
|
|
uint32_t tmp4 = tmp3 + S0(tmpBr); |
|
|
|
uint32_t tmp4 = tmp3 + S0(tmpBr); |
|
|
|
tmpBr = Br(sponge, tmp2); |
|
|
|
tmpBr = Br(sponge, tmp2); |
|
|
|
|
|
|
|
|
|
|
|
for (int k=6; k >= 0; k--) regs[k+1] = regs[k]; |
|
|
|
for (int k=6; k >= 0; k--) regs[k+1] = regs[k]; |
|
|
|
regs[0] = tmp2 + tmp4; |
|
|
|
regs[0] = tmp2 + tmp4; |
|
|
|
regs[4] += tmpBr; |
|
|
|
regs[4] += tmpBr; |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
// Die Hash-Funktion |
|
|
|
// Die Hash-Funktion |
|
|
|
__global__ void hefty_gpu_hash(int threads, uint32_t startNounce, void *outputHash) |
|
|
|
__global__ void hefty_gpu_hash(int threads, uint32_t startNounce, void *outputHash) |
|
|
|
{ |
|
|
|
{ |
|
|
|
int thread = (blockDim.x * blockIdx.x + threadIdx.x); |
|
|
|
#if USE_SHARED |
|
|
|
if (thread < threads) |
|
|
|
extern __shared__ char heftytab[]; |
|
|
|
{ |
|
|
|
if(threadIdx.x < 64) |
|
|
|
// bestimme den aktuellen Zähler |
|
|
|
{ |
|
|
|
uint32_t nounce = startNounce + thread; |
|
|
|
*((uint32_t*)heftytab + threadIdx.x) = hefty_gpu_constantTable[threadIdx.x]; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
// jeder thread in diesem Block bekommt sein eigenes W Array im Shared memory |
|
|
|
__syncthreads(); |
|
|
|
#if USE_SHARED |
|
|
|
|
|
|
|
extern __shared__ unsigned char s[]; |
|
|
|
|
|
|
|
uint32_t *W = (uint32_t *)(&s[W_ALIGNMENT * sizeof(uint32_t) * threadIdx.x]); |
|
|
|
|
|
|
|
#else |
|
|
|
|
|
|
|
// reduktion von 256 byte auf 128 byte |
|
|
|
|
|
|
|
uint32_t W1[16]; |
|
|
|
|
|
|
|
uint32_t W2[16]; |
|
|
|
|
|
|
|
#endif |
|
|
|
#endif |
|
|
|
|
|
|
|
|
|
|
|
// Initialisiere die register a bis h mit der Hash-Tabelle |
|
|
|
int thread = (blockDim.x * blockIdx.x + threadIdx.x); |
|
|
|
uint32_t regs[8]; |
|
|
|
if (thread < threads) |
|
|
|
uint32_t hash[8]; |
|
|
|
{ |
|
|
|
uint32_t sponge[4]; |
|
|
|
// bestimme den aktuellen Zähler |
|
|
|
|
|
|
|
uint32_t nounce = startNounce + thread; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// jeder thread in diesem Block bekommt sein eigenes W Array im Shared memory |
|
|
|
|
|
|
|
// reduktion von 256 byte auf 128 byte |
|
|
|
|
|
|
|
uint32_t W1[16]; |
|
|
|
|
|
|
|
uint32_t W2[16]; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// Initialisiere die register a bis h mit der Hash-Tabelle |
|
|
|
|
|
|
|
uint32_t regs[8]; |
|
|
|
|
|
|
|
uint32_t hash[8]; |
|
|
|
|
|
|
|
uint32_t sponge[4]; |
|
|
|
|
|
|
|
|
|
|
|
#pragma unroll 4 |
|
|
|
#pragma unroll 4 |
|
|
|
for(int k=0; k < 4; k++) |
|
|
|
for(int k=0; k < 4; k++) |
|
|
|
sponge[k] = hefty_gpu_sponge[k]; |
|
|
|
sponge[k] = hefty_gpu_sponge[k]; |
|
|
|
|
|
|
|
|
|
|
|
// pre |
|
|
|
// pre |
|
|
|
#pragma unroll 8 |
|
|
|
#pragma unroll 8 |
|
|
|
for (int k=0; k < 8; k++) |
|
|
|
for (int k=0; k < 8; k++) |
|
|
|
{ |
|
|
|
{ |
|
|
|
regs[k] = hefty_gpu_register[k]; |
|
|
|
regs[k] = hefty_gpu_register[k]; |
|
|
|
hash[k] = regs[k]; |
|
|
|
hash[k] = regs[k]; |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
//memcpy(W, &hefty_gpu_blockHeader[0], sizeof(uint32_t) * 16); // verbleibende 20 bytes aus Block 2 plus padding |
|
|
|
//memcpy(W, &hefty_gpu_blockHeader[0], sizeof(uint32_t) * 16); // verbleibende 20 bytes aus Block 2 plus padding |
|
|
|
#pragma unroll 16 |
|
|
|
#pragma unroll 16 |
|
|
|
for(int k=0;k<16;k++) |
|
|
|
for(int k=0;k<16;k++) |
|
|
|
W1[k] = hefty_gpu_blockHeader[k]; |
|
|
|
W1[k] = hefty_gpu_blockHeader[k]; |
|
|
|
W1[3] = SWAB32(nounce); |
|
|
|
W1[3] = SWAB32(nounce); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// 2. Runde |
|
|
|
// 2. Runde |
|
|
|
|
|
|
|
#pragma unroll 16 |
|
|
|
#pragma unroll 16 |
|
|
|
for(int j=0;j<16;j++) |
|
|
|
for(int j=0;j<16;j++) |
|
|
|
Absorb(sponge, W1[j] ^ hefty_gpu_constantTable[j]); |
|
|
|
Absorb(sponge, W1[j] ^ heftyLookUp(j)); |
|
|
|
|
|
|
|
|
|
|
|
// Progress W1 (Bytes 0...63) |
|
|
|
// Progress W1 (Bytes 0...63) |
|
|
|
#pragma unroll 16 |
|
|
|
#pragma unroll 16 |
|
|
|
for(int j=0;j<16;j++) |
|
|
|
for(int j=0;j<16;j++) |
|
|
|
{ |
|
|
|
{ |
|
|
|
Absorb(sponge, regs[3] ^ regs[7]); |
|
|
|
Absorb(sponge, regs[3] ^ regs[7]); |
|
|
|
hefty_gpu_round(regs, W1[j], hefty_gpu_constantTable[j], sponge); |
|
|
|
hefty_gpu_round(regs, W1[j], heftyLookUp(j), sponge); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
// Progress W2 (Bytes 64...127) then W3 (Bytes 128...191) ... |
|
|
|
// Progress W2 (Bytes 64...127) then W3 (Bytes 128...191) ... |
|
|
|
|
|
|
|
|
|
|
|
#pragma unroll 3 |
|
|
|
#pragma unroll 3 |
|
|
|
for(int k=0;k<3;k++) |
|
|
|
for(int k=0;k<3;k++) |
|
|
|
{ |
|
|
|
{ |
|
|
|
#pragma unroll 2 |
|
|
|
#pragma unroll 2 |
|
|
|
for(int j=0;j<2;j++) |
|
|
|
for(int j=0;j<2;j++) |
|
|
|
W2[j] = s1(W1[14+j]) + W1[9+j] + s0(W1[1+j]) + W1[j]; |
|
|
|
W2[j] = s1(W1[14+j]) + W1[9+j] + s0(W1[1+j]) + W1[j]; |
|
|
|
#pragma unroll 5 |
|
|
|
#pragma unroll 5 |
|
|
|
for(int j=2;j<7;j++) |
|
|
|
for(int j=2;j<7;j++) |
|
|
|
W2[j] = s1(W2[j-2]) + W1[9+j] + s0(W1[1+j]) + W1[j]; |
|
|
|
W2[j] = s1(W2[j-2]) + W1[9+j] + s0(W1[1+j]) + W1[j]; |
|
|
|
|
|
|
|
|
|
|
|
#pragma unroll 8 |
|
|
|
#pragma unroll 8 |
|
|
|
for(int j=7;j<15;j++) |
|
|
|
for(int j=7;j<15;j++) |
|
|
|
W2[j] = s1(W2[j-2]) + W2[j-7] + s0(W1[1+j]) + W1[j]; |
|
|
|
W2[j] = s1(W2[j-2]) + W2[j-7] + s0(W1[1+j]) + W1[j]; |
|
|
|
|
|
|
|
|
|
|
|
W2[15] = s1(W2[13]) + W2[8] + s0(W2[0]) + W1[15]; |
|
|
|
W2[15] = s1(W2[13]) + W2[8] + s0(W2[0]) + W1[15]; |
|
|
|
|
|
|
|
|
|
|
|
#pragma unroll 16 |
|
|
|
#pragma unroll 16 |
|
|
|
for(int j=0;j<16;j++) |
|
|
|
for(int j=0;j<16;j++) |
|
|
|
{ |
|
|
|
{ |
|
|
|
Absorb(sponge, regs[3] + regs[7]); |
|
|
|
Absorb(sponge, regs[3] + regs[7]); |
|
|
|
hefty_gpu_round(regs, W2[j], hefty_gpu_constantTable[j + 16 * (k+1)], sponge); |
|
|
|
hefty_gpu_round(regs, W2[j], heftyLookUp(j + 16 * (k+1)), sponge); |
|
|
|
} |
|
|
|
} |
|
|
|
#pragma unroll 16 |
|
|
|
#pragma unroll 16 |
|
|
|
for(int j=0;j<16;j++) |
|
|
|
for(int j=0;j<16;j++) |
|
|
|
W1[j] = W2[j]; |
|
|
|
W1[j] = W2[j]; |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
#pragma unroll 8 |
|
|
|
#pragma unroll 8 |
|
|
|
for(int k=0;k<8;k++) |
|
|
|
for(int k=0;k<8;k++) |
|
|
|
hash[k] += regs[k]; |
|
|
|
hash[k] += regs[k]; |
|
|
|
|
|
|
|
|
|
|
|
#pragma unroll 8 |
|
|
|
#pragma unroll 8 |
|
|
|
for(int k=0;k<8;k++) |
|
|
|
for(int k=0;k<8;k++) |
|
|
|
((uint32_t*)outputHash)[8*thread+k] = SWAB32(hash[k]); |
|
|
|
((uint32_t*)outputHash)[8*thread+k] = SWAB32(hash[k]); |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
// Setup-Funktionen |
|
|
|
// Setup-Funktionen |
|
|
|
__host__ void hefty_cpu_init(int thr_id, int threads) |
|
|
|
__host__ void hefty_cpu_init(int thr_id, int threads) |
|
|
|
{ |
|
|
|
{ |
|
|
|
cudaSetDevice(thr_id); |
|
|
|
cudaSetDevice(device_map[thr_id]); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
cudaGetDeviceProperties(&props, device_map[thr_id]); |
|
|
|
|
|
|
|
|
|
|
|
// Kopiere die Hash-Tabellen in den GPU-Speicher |
|
|
|
// Kopiere die Hash-Tabellen in den GPU-Speicher |
|
|
|
cudaMemcpyToSymbol( hefty_gpu_constantTable, |
|
|
|
cudaMemcpyToSymbol( hefty_gpu_constantTable, |
|
|
|
hefty_cpu_constantTable, |
|
|
|
hefty_cpu_constantTable, |
|
|
|
sizeof(uint32_t) * 64 ); |
|
|
|
sizeof(uint32_t) * 64 ); |
|
|
|
|
|
|
|
|
|
|
|
// Speicher für alle Hefty1 hashes belegen |
|
|
|
// Speicher für alle Hefty1 hashes belegen |
|
|
|
cudaMalloc(&d_heftyHashes[thr_id], 8 * sizeof(uint32_t) * threads); |
|
|
|
cudaMalloc(&d_heftyHashes[thr_id], 8 * sizeof(uint32_t) * threads); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
__host__ void hefty_cpu_setBlock(int thr_id, int threads, void *data) |
|
|
|
__host__ void hefty_cpu_setBlock(int thr_id, int threads, void *data) |
|
|
|
// data muss 84-Byte haben! |
|
|
|
// data muss 84-Byte haben! |
|
|
|
{ |
|
|
|
{ |
|
|
|
// Nachricht expandieren und setzen |
|
|
|
// Nachricht expandieren und setzen |
|
|
|
uint32_t msgBlock[32]; |
|
|
|
uint32_t msgBlock[32]; |
|
|
|
|
|
|
|
|
|
|
|
memset(msgBlock, 0, sizeof(uint32_t) * 32); |
|
|
|
memset(msgBlock, 0, sizeof(uint32_t) * 32); |
|
|
|
memcpy(&msgBlock[0], data, 84); |
|
|
|
memcpy(&msgBlock[0], data, 84); |
|
|
|
msgBlock[21] |= 0x80; |
|
|
|
msgBlock[21] |= 0x80; |
|
|
|
msgBlock[31] = 672; // bitlen |
|
|
|
msgBlock[31] = 672; // bitlen |
|
|
|
|
|
|
|
|
|
|
|
for(int i=0;i<31;i++) // Byteorder drehen |
|
|
|
for(int i=0;i<31;i++) // Byteorder drehen |
|
|
|
msgBlock[i] = SWAB32(msgBlock[i]); |
|
|
|
msgBlock[i] = SWAB32(msgBlock[i]); |
|
|
|
|
|
|
|
|
|
|
|
// die erste Runde wird auf der CPU durchgeführt, da diese für |
|
|
|
// die erste Runde wird auf der CPU durchgeführt, da diese für |
|
|
|
// alle Threads gleich ist. Der Hash wird dann an die Threads |
|
|
|
// alle Threads gleich ist. Der Hash wird dann an die Threads |
|
|
|
// übergeben |
|
|
|
// übergeben |
|
|
|
|
|
|
|
|
|
|
|
// Erstelle expandierten Block W |
|
|
|
// Erstelle expandierten Block W |
|
|
|
uint32_t W[64]; |
|
|
|
uint32_t W[64]; |
|
|
|
memcpy(W, &msgBlock[0], sizeof(uint32_t) * 16); |
|
|
|
memcpy(W, &msgBlock[0], sizeof(uint32_t) * 16); |
|
|
|
for(int j=16;j<64;j++) |
|
|
|
for(int j=16;j<64;j++) |
|
|
|
W[j] = s1(W[j-2]) + W[j-7] + s0(W[j-15]) + W[j-16]; |
|
|
|
W[j] = s1(W[j-2]) + W[j-7] + s0(W[j-15]) + W[j-16]; |
|
|
|
|
|
|
|
|
|
|
|
// Initialisiere die register a bis h mit der Hash-Tabelle |
|
|
|
// Initialisiere die register a bis h mit der Hash-Tabelle |
|
|
|
uint32_t regs[8]; |
|
|
|
uint32_t regs[8]; |
|
|
|
uint32_t hash[8]; |
|
|
|
uint32_t hash[8]; |
|
|
|
uint32_t sponge[4]; |
|
|
|
uint32_t sponge[4]; |
|
|
|
|
|
|
|
|
|
|
|
// pre |
|
|
|
// pre |
|
|
|
memset(sponge, 0, sizeof(uint32_t) * 4); |
|
|
|
memset(sponge, 0, sizeof(uint32_t) * 4); |
|
|
|
for (int k=0; k < 8; k++) |
|
|
|
for (int k=0; k < 8; k++) |
|
|
|
{ |
|
|
|
{ |
|
|
|
regs[k] = hefty_cpu_hashTable[k]; |
|
|
|
regs[k] = hefty_cpu_hashTable[k]; |
|
|
|
hash[k] = regs[k]; |
|
|
|
hash[k] = regs[k]; |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
// 1. Runde |
|
|
|
// 1. Runde |
|
|
|
for(int j=0;j<16;j++) |
|
|
|
for(int j=0;j<16;j++) |
|
|
|
Absorb(sponge, W[j] ^ hefty_cpu_constantTable[j]); |
|
|
|
Absorb(sponge, W[j] ^ hefty_cpu_constantTable[j]); |
|
|
|
|
|
|
|
|
|
|
|
for(int j=0;j<16;j++) |
|
|
|
for(int j=0;j<16;j++) |
|
|
|
{ |
|
|
|
{ |
|
|
|
Absorb(sponge, regs[3] ^ regs[7]); |
|
|
|
Absorb(sponge, regs[3] ^ regs[7]); |
|
|
|
hefty_cpu_round(regs, W[j], hefty_cpu_constantTable[j], sponge); |
|
|
|
hefty_cpu_round(regs, W[j], hefty_cpu_constantTable[j], sponge); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
for(int j=16;j<64;j++) |
|
|
|
for(int j=16;j<64;j++) |
|
|
|
{ |
|
|
|
{ |
|
|
|
Absorb(sponge, regs[3] + regs[7]); |
|
|
|
Absorb(sponge, regs[3] + regs[7]); |
|
|
|
hefty_cpu_round(regs, W[j], hefty_cpu_constantTable[j], sponge); |
|
|
|
hefty_cpu_round(regs, W[j], hefty_cpu_constantTable[j], sponge); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
for(int k=0;k<8;k++) |
|
|
|
for(int k=0;k<8;k++) |
|
|
|
hash[k] += regs[k]; |
|
|
|
hash[k] += regs[k]; |
|
|
|
|
|
|
|
|
|
|
|
// sponge speichern |
|
|
|
// sponge speichern |
|
|
|
|
|
|
|
|
|
|
|
cudaMemcpyToSymbol( hefty_gpu_sponge, |
|
|
|
cudaMemcpyToSymbol( hefty_gpu_sponge, |
|
|
|
sponge, |
|
|
|
sponge, |
|
|
|
sizeof(uint32_t) * 4 ); |
|
|
|
sizeof(uint32_t) * 4 ); |
|
|
|
// hash speichern |
|
|
|
// hash speichern |
|
|
|
cudaMemcpyToSymbol( hefty_gpu_register, |
|
|
|
cudaMemcpyToSymbol( hefty_gpu_register, |
|
|
|
hash, |
|
|
|
hash, |
|
|
|
sizeof(uint32_t) * 8 ); |
|
|
|
sizeof(uint32_t) * 8 ); |
|
|
|
|
|
|
|
|
|
|
|
// Blockheader setzen (korrekte Nonce fehlt da drin noch) |
|
|
|
// Blockheader setzen (korrekte Nonce fehlt da drin noch) |
|
|
|
cudaMemcpyToSymbol( hefty_gpu_blockHeader, |
|
|
|
cudaMemcpyToSymbol( hefty_gpu_blockHeader, |
|
|
|
&msgBlock[16], |
|
|
|
&msgBlock[16], |
|
|
|
64); |
|
|
|
64); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
__host__ void hefty_cpu_hash(int thr_id, int threads, int startNounce) |
|
|
|
__host__ void hefty_cpu_hash(int thr_id, int threads, int startNounce) |
|
|
|
{ |
|
|
|
{ |
|
|
|
const int threadsperblock = 128; |
|
|
|
// Compute 3.x und 5.x Geräte am besten mit 768 Threads ansteuern, |
|
|
|
|
|
|
|
// alle anderen mit 512 Threads. |
|
|
|
|
|
|
|
int threadsperblock = (props.major >= 3) ? 768 : 512; |
|
|
|
|
|
|
|
|
|
|
|
// berechne wie viele Thread Blocks wir brauchen |
|
|
|
// berechne wie viele Thread Blocks wir brauchen |
|
|
|
dim3 grid((threads + threadsperblock-1)/threadsperblock); |
|
|
|
dim3 grid((threads + threadsperblock-1)/threadsperblock); |
|
|
|
dim3 block(threadsperblock); |
|
|
|
dim3 block(threadsperblock); |
|
|
|
|
|
|
|
|
|
|
|
// Größe des dynamischen Shared Memory Bereichs (abhängig von der Threadanzahl) |
|
|
|
// Größe des dynamischen Shared Memory Bereichs |
|
|
|
#if USE_SHARED |
|
|
|
#if USE_SHARED |
|
|
|
size_t shared_size = W_ALIGNMENT*sizeof(uint32_t)*threadsperblock; // ein uint32_t eingefügt gegen Bank Konflikte |
|
|
|
size_t shared_size = 8 * 64 * sizeof(uint32_t); |
|
|
|
#else |
|
|
|
#else |
|
|
|
size_t shared_size = 0; |
|
|
|
size_t shared_size = 0; |
|
|
|
#endif |
|
|
|
#endif |
|
|
|
|
|
|
|
|
|
|
|
// fprintf(stderr, "threads=%d, %d blocks, %d threads per block, %d bytes shared\n", threads, grid.x, block.x, shared_size); |
|
|
|
// fprintf(stderr, "threads=%d, %d blocks, %d threads per block, %d bytes shared\n", threads, grid.x, block.x, shared_size); |
|
|
|
|
|
|
|
|
|
|
|
hefty_gpu_hash<<<grid, block, shared_size>>>(threads, startNounce, (void*)d_heftyHashes[thr_id]); |
|
|
|
hefty_gpu_hash<<<grid, block, shared_size>>>(threads, startNounce, (void*)d_heftyHashes[thr_id]); |
|
|
|
} |
|
|
|
} |
|
|
|