Christian Buchner
11 years ago
16 changed files with 3020 additions and 1354 deletions
@ -0,0 +1,292 @@
@@ -0,0 +1,292 @@
|
||||
|
||||
extern "C" |
||||
{ |
||||
#include "sph/sph_blake.h" |
||||
#include "sph/sph_bmw.h" |
||||
#include "sph/sph_groestl.h" |
||||
#include "sph/sph_skein.h" |
||||
#include "sph/sph_jh.h" |
||||
#include "sph/sph_keccak.h" |
||||
#include "miner.h" |
||||
} |
||||
|
||||
#include <stdint.h> |
||||
|
||||
// aus cpu-miner.c |
||||
extern int device_map[8]; |
||||
|
||||
// Speicher für Input/Output der verketteten Hashfunktionen |
||||
static uint32_t *d_hash[8]; |
||||
|
||||
// Speicher zur Generierung der Noncevektoren für die bedingten Hashes |
||||
static uint32_t *d_animeNonces[8]; |
||||
static uint32_t *d_branch1Nonces[8]; |
||||
static uint32_t *d_branch2Nonces[8]; |
||||
static uint32_t *d_branch3Nonces[8]; |
||||
|
||||
extern void quark_blake512_cpu_init(int thr_id, int threads); |
||||
extern void quark_blake512_cpu_hash_64(int thr_id, int threads, uint32_t startNounce, uint32_t *d_nonceVector, uint32_t *d_hash, int order); |
||||
|
||||
extern void quark_bmw512_cpu_init(int thr_id, int threads); |
||||
extern void quark_bmw512_cpu_setBlock_80(void *pdata); |
||||
extern void quark_bmw512_cpu_hash_80(int thr_id, int threads, uint32_t startNounce, uint32_t *d_outputHash, int order); |
||||
extern void quark_bmw512_cpu_hash_64(int thr_id, int threads, uint32_t startNounce, uint32_t *d_nonceVector, uint32_t *d_outputHash, int order); |
||||
|
||||
extern void quark_groestl512_cpu_init(int thr_id, int threads); |
||||
extern void quark_groestl512_cpu_hash_64(int thr_id, int threads, uint32_t startNounce, uint32_t *d_nonceVector, uint32_t *d_hash, int order); |
||||
extern void quark_doublegroestl512_cpu_hash_64(int thr_id, int threads, uint32_t startNounce, uint32_t *d_nonceVector, uint32_t *d_hash, int order); |
||||
|
||||
extern void quark_skein512_cpu_init(int thr_id, int threads); |
||||
extern void quark_skein512_cpu_hash_64(int thr_id, int threads, uint32_t startNounce, uint32_t *d_nonceVector, uint32_t *d_hash, int order); |
||||
|
||||
extern void quark_keccak512_cpu_init(int thr_id, int threads); |
||||
extern void quark_keccak512_cpu_hash_64(int thr_id, int threads, uint32_t startNounce, uint32_t *d_nonceVector, uint32_t *d_hash, int order); |
||||
|
||||
extern void quark_jh512_cpu_init(int thr_id, int threads); |
||||
extern void quark_jh512_cpu_hash_64(int thr_id, int threads, uint32_t startNounce, uint32_t *d_nonceVector, uint32_t *d_hash, int order); |
||||
|
||||
extern void quark_check_cpu_init(int thr_id, int threads); |
||||
extern void quark_check_cpu_setTarget(const void *ptarget); |
||||
extern uint32_t quark_check_cpu_hash_64(int thr_id, int threads, uint32_t startNounce, uint32_t *d_nonceVector, uint32_t *d_inputHash, int order); |
||||
|
||||
extern void quark_compactTest_cpu_init(int thr_id, int threads); |
||||
extern void quark_compactTest_cpu_hash_64(int thr_id, int threads, uint32_t startNounce, uint32_t *inpHashes, uint32_t *d_validNonceTable, |
||||
uint32_t *d_nonces1, size_t *nrm1, |
||||
uint32_t *d_nonces2, size_t *nrm2, |
||||
int order); |
||||
extern void quark_compactTest_single_false_cpu_hash_64(int thr_id, int threads, uint32_t startNounce, uint32_t *inpHashes, uint32_t *d_validNonceTable, |
||||
uint32_t *d_nonces1, size_t *nrm1, |
||||
int order); |
||||
|
||||
// Original Quarkhash Funktion aus einem miner Quelltext |
||||
inline void animehash(void *state, const void *input) |
||||
{ |
||||
sph_blake512_context ctx_blake; |
||||
sph_bmw512_context ctx_bmw; |
||||
sph_groestl512_context ctx_groestl; |
||||
sph_jh512_context ctx_jh; |
||||
sph_keccak512_context ctx_keccak; |
||||
sph_skein512_context ctx_skein; |
||||
|
||||
unsigned char hash[64]; |
||||
|
||||
sph_bmw512_init(&ctx_bmw); |
||||
// ZBMW; |
||||
sph_bmw512 (&ctx_bmw, (const void*) input, 80); |
||||
sph_bmw512_close(&ctx_bmw, (void*) hash); |
||||
|
||||
sph_blake512_init(&ctx_blake); |
||||
// ZBLAKE; |
||||
sph_blake512 (&ctx_blake, hash, 64); |
||||
sph_blake512_close(&ctx_blake, (void*) hash); |
||||
|
||||
if (hash[0] & 0x8) |
||||
{ |
||||
sph_groestl512_init(&ctx_groestl); |
||||
// ZGROESTL; |
||||
sph_groestl512 (&ctx_groestl, (const void*) hash, 64); |
||||
sph_groestl512_close(&ctx_groestl, (void*) hash); |
||||
} |
||||
else |
||||
{ |
||||
sph_skein512_init(&ctx_skein); |
||||
// ZSKEIN; |
||||
sph_skein512 (&ctx_skein, (const void*) hash, 64); |
||||
sph_skein512_close(&ctx_skein, (void*) hash); |
||||
} |
||||
|
||||
sph_groestl512_init(&ctx_groestl); |
||||
// ZGROESTL; |
||||
sph_groestl512 (&ctx_groestl, (const void*) hash, 64); |
||||
sph_groestl512_close(&ctx_groestl, (void*) hash); |
||||
|
||||
sph_jh512_init(&ctx_jh); |
||||
// ZJH; |
||||
sph_jh512 (&ctx_jh, (const void*) hash, 64); |
||||
sph_jh512_close(&ctx_jh, (void*) hash); |
||||
|
||||
if (hash[0] & 0x8) |
||||
{ |
||||
sph_blake512_init(&ctx_blake); |
||||
// ZBLAKE; |
||||
sph_blake512 (&ctx_blake, (const void*) hash, 64); |
||||
sph_blake512_close(&ctx_blake, (void*) hash); |
||||
} |
||||
else |
||||
{ |
||||
sph_bmw512_init(&ctx_bmw); |
||||
// ZBMW; |
||||
sph_bmw512 (&ctx_bmw, (const void*) hash, 64); |
||||
sph_bmw512_close(&ctx_bmw, (void*) hash); |
||||
} |
||||
|
||||
sph_keccak512_init(&ctx_keccak); |
||||
// ZKECCAK; |
||||
sph_keccak512 (&ctx_keccak, (const void*) hash, 64); |
||||
sph_keccak512_close(&ctx_keccak, (void*) hash); |
||||
|
||||
sph_skein512_init(&ctx_skein); |
||||
// SKEIN; |
||||
sph_skein512 (&ctx_skein, (const void*) hash, 64); |
||||
sph_skein512_close(&ctx_skein, (void*) hash); |
||||
|
||||
if (hash[0] & 0x8) |
||||
{ |
||||
sph_keccak512_init(&ctx_keccak); |
||||
// ZKECCAK; |
||||
sph_keccak512 (&ctx_keccak, (const void*) hash, 64); |
||||
sph_keccak512_close(&ctx_keccak, (void*) hash); |
||||
} |
||||
else |
||||
{ |
||||
sph_jh512_init(&ctx_jh); |
||||
// ZJH; |
||||
sph_jh512 (&ctx_jh, (const void*) hash, 64); |
||||
sph_jh512_close(&ctx_jh, (void*) hash); |
||||
} |
||||
|
||||
memcpy(state, hash, 32); |
||||
} |
||||
|
||||
|
||||
struct HashPredicate |
||||
{ |
||||
HashPredicate(uint32_t *hashes, uint32_t startNonce) : |
||||
m_hashes(hashes), |
||||
m_startNonce(startNonce) |
||||
{ } |
||||
|
||||
__device__ |
||||
bool operator()(const uint32_t x) |
||||
{ |
||||
uint32_t *hash = &m_hashes[(x - m_startNonce)*16]; |
||||
return hash[0] & 0x8; |
||||
} |
||||
|
||||
uint32_t *m_hashes; |
||||
uint32_t m_startNonce; |
||||
}; |
||||
|
||||
extern bool opt_benchmark; |
||||
|
||||
extern "C" int scanhash_anime(int thr_id, uint32_t *pdata, |
||||
const uint32_t *ptarget, uint32_t max_nonce, |
||||
unsigned long *hashes_done) |
||||
{ |
||||
const uint32_t first_nonce = pdata[19]; |
||||
|
||||
// TODO: entfernen für eine Release! Ist nur zum Testen! |
||||
if (opt_benchmark) |
||||
((uint32_t*)ptarget)[7] = 0x00000f; |
||||
|
||||
const uint32_t Htarg = ptarget[7]; |
||||
|
||||
const int throughput = 256*2048; // 100; |
||||
|
||||
static bool init[8] = {0,0,0,0,0,0,0,0}; |
||||
if (!init[thr_id]) |
||||
{ |
||||
cudaSetDevice(device_map[thr_id]); |
||||
|
||||
// Konstanten kopieren, Speicher belegen |
||||
cudaMalloc(&d_hash[thr_id], 16 * sizeof(uint32_t) * throughput); |
||||
quark_blake512_cpu_init(thr_id, throughput); |
||||
quark_groestl512_cpu_init(thr_id, throughput); |
||||
quark_skein512_cpu_init(thr_id, throughput); |
||||
quark_bmw512_cpu_init(thr_id, throughput); |
||||
quark_keccak512_cpu_init(thr_id, throughput); |
||||
quark_jh512_cpu_init(thr_id, throughput); |
||||
quark_check_cpu_init(thr_id, throughput); |
||||
quark_compactTest_cpu_init(thr_id, throughput); |
||||
cudaMalloc(&d_animeNonces[thr_id], sizeof(uint32_t)*throughput); |
||||
cudaMalloc(&d_branch1Nonces[thr_id], sizeof(uint32_t)*throughput); |
||||
cudaMalloc(&d_branch2Nonces[thr_id], sizeof(uint32_t)*throughput); |
||||
cudaMalloc(&d_branch3Nonces[thr_id], sizeof(uint32_t)*throughput); |
||||
init[thr_id] = true; |
||||
} |
||||
|
||||
uint32_t endiandata[20]; |
||||
for (int k=0; k < 20; k++) |
||||
be32enc(&endiandata[k], ((uint32_t*)pdata)[k]); |
||||
|
||||
quark_bmw512_cpu_setBlock_80((void*)endiandata); |
||||
quark_check_cpu_setTarget(ptarget); |
||||
|
||||
do { |
||||
int order = 0; |
||||
size_t nrm1=0, nrm2=0, nrm3=0; |
||||
|
||||
// erstes BMW512 Hash mit CUDA |
||||
quark_bmw512_cpu_hash_80(thr_id, throughput, pdata[19], d_hash[thr_id], order++); |
||||
|
||||
// das ist der unbedingte Branch für Blake512 |
||||
quark_blake512_cpu_hash_64(thr_id, throughput, pdata[19], NULL, d_hash[thr_id], order++); |
||||
|
||||
quark_compactTest_single_false_cpu_hash_64(thr_id, throughput, pdata[19], d_hash[thr_id], NULL, |
||||
d_branch3Nonces[thr_id], &nrm3, |
||||
order++); |
||||
|
||||
// nur den Skein Branch weiterverfolgen |
||||
quark_skein512_cpu_hash_64(thr_id, nrm3, pdata[19], d_branch3Nonces[thr_id], d_hash[thr_id], order++); |
||||
|
||||
// das ist der unbedingte Branch für Groestl512 |
||||
quark_groestl512_cpu_hash_64(thr_id, nrm3, pdata[19], d_branch3Nonces[thr_id], d_hash[thr_id], order++); |
||||
|
||||
// das ist der unbedingte Branch für JH512 |
||||
quark_jh512_cpu_hash_64(thr_id, nrm3, pdata[19], d_branch3Nonces[thr_id], d_hash[thr_id], order++); |
||||
|
||||
// quarkNonces in branch1 und branch2 aufsplitten gemäss if (hash[0] & 0x8) |
||||
quark_compactTest_cpu_hash_64(thr_id, nrm3, pdata[19], d_hash[thr_id], d_branch3Nonces[thr_id], |
||||
d_branch1Nonces[thr_id], &nrm1, |
||||
d_branch2Nonces[thr_id], &nrm2, |
||||
order++); |
||||
|
||||
// das ist der bedingte Branch für Blake512 |
||||
quark_blake512_cpu_hash_64(thr_id, nrm1, pdata[19], d_branch1Nonces[thr_id], d_hash[thr_id], order++); |
||||
|
||||
// das ist der bedingte Branch für Bmw512 |
||||
quark_bmw512_cpu_hash_64(thr_id, nrm2, pdata[19], d_branch2Nonces[thr_id], d_hash[thr_id], order++); |
||||
|
||||
// das ist der unbedingte Branch für Keccak512 |
||||
quark_keccak512_cpu_hash_64(thr_id, nrm3, pdata[19], d_branch3Nonces[thr_id], d_hash[thr_id], order++); |
||||
|
||||
// das ist der unbedingte Branch für Skein512 |
||||
quark_skein512_cpu_hash_64(thr_id, nrm3, pdata[19], d_branch3Nonces[thr_id], d_hash[thr_id], order++); |
||||
|
||||
// quarkNonces in branch1 und branch2 aufsplitten gemäss if (hash[0] & 0x8) |
||||
quark_compactTest_cpu_hash_64(thr_id, nrm3, pdata[19], d_hash[thr_id], d_branch3Nonces[thr_id], |
||||
d_branch1Nonces[thr_id], &nrm1, |
||||
d_branch2Nonces[thr_id], &nrm2, |
||||
order++); |
||||
|
||||
// das ist der bedingte Branch für Keccak512 |
||||
quark_keccak512_cpu_hash_64(thr_id, nrm1, pdata[19], d_branch1Nonces[thr_id], d_hash[thr_id], order++); |
||||
|
||||
// das ist der bedingte Branch für JH512 |
||||
quark_jh512_cpu_hash_64(thr_id, nrm2, pdata[19], d_branch2Nonces[thr_id], d_hash[thr_id], order++); |
||||
|
||||
// Scan nach Gewinner Hashes auf der GPU |
||||
uint32_t foundNonce = quark_check_cpu_hash_64(thr_id, nrm3, pdata[19], d_branch3Nonces[thr_id], d_hash[thr_id], order++); |
||||
if (foundNonce != 0xffffffff) |
||||
{ |
||||
uint32_t vhash64[8]; |
||||
be32enc(&endiandata[19], foundNonce); |
||||
animehash(vhash64, endiandata); |
||||
|
||||
if ((vhash64[7]<=Htarg) && fulltest(vhash64, ptarget)) { |
||||
|
||||
pdata[19] = foundNonce; |
||||
*hashes_done = (foundNonce - first_nonce + 1)/2; |
||||
return 1; |
||||
} else { |
||||
applog(LOG_INFO, "GPU #%d: result for nonce $%08X does not validate on CPU!", thr_id, foundNonce); |
||||
} |
||||
} |
||||
|
||||
pdata[19] += throughput; |
||||
|
||||
} while (pdata[19] < max_nonce && !work_restart[thr_id].restart); |
||||
|
||||
*hashes_done = (pdata[19] - first_nonce + 1)/2; |
||||
return 0; |
||||
} |
@ -0,0 +1,473 @@
@@ -0,0 +1,473 @@
|
||||
#if 1 |
||||
|
||||
#include <cuda.h> |
||||
#include "cuda_runtime.h" |
||||
#include "device_launch_parameters.h" |
||||
|
||||
#include <stdio.h> |
||||
#include <memory.h> |
||||
|
||||
// Folgende Definitionen später durch header ersetzen |
||||
typedef unsigned char uint8_t; |
||||
typedef unsigned int uint32_t; |
||||
|
||||
// Endian Drehung für 32 Bit Typen |
||||
/* |
||||
static __device__ uint32_t cuda_swab32(uint32_t x) |
||||
{ |
||||
return (((x << 24) & 0xff000000u) | ((x << 8) & 0x00ff0000u) |
||||
| ((x >> 8) & 0x0000ff00u) | ((x >> 24) & 0x000000ffu)); |
||||
} |
||||
*/ |
||||
static __device__ uint32_t cuda_swab32(uint32_t x) |
||||
{ |
||||
return __byte_perm(x, 0, 0x0123); |
||||
} |
||||
// Endian Drehung für 64 Bit Typen |
||||
static __device__ unsigned long long cuda_swab64(unsigned long long x) { |
||||
uint32_t h = (x >> 32); |
||||
uint32_t l = (x & 0xFFFFFFFFULL); |
||||
return (((unsigned long long)cuda_swab32(l)) << 32) | ((unsigned long long)cuda_swab32(h)); |
||||
} |
||||
|
||||
// das Hi Word aus einem 64 Bit Typen extrahieren |
||||
static __device__ uint32_t HIWORD(const unsigned long long &x) { |
||||
#if __CUDA_ARCH__ >= 130 |
||||
return (uint32_t)__double2hiint(__longlong_as_double(x)); |
||||
#else |
||||
return (uint32_t)(x >> 32); |
||||
#endif |
||||
} |
||||
|
||||
// das Hi Word in einem 64 Bit Typen ersetzen |
||||
static __device__ unsigned long long REPLACE_HIWORD(const unsigned long long &x, const uint32_t &y) { |
||||
return (x & 0xFFFFFFFFULL) | (((unsigned long long)y) << 32ULL); |
||||
} |
||||
|
||||
// das Lo Word aus einem 64 Bit Typen extrahieren |
||||
static __device__ uint32_t LOWORD(const unsigned long long &x) { |
||||
#if __CUDA_ARCH__ >= 130 |
||||
return (uint32_t)__double2loint(__longlong_as_double(x)); |
||||
#else |
||||
return (uint32_t)(x & 0xFFFFFFFFULL); |
||||
#endif |
||||
} |
||||
|
||||
static __device__ unsigned long long MAKE_ULONGLONG(uint32_t LO, uint32_t HI) |
||||
{ |
||||
#if __CUDA_ARCH__ >= 130 |
||||
return __double_as_longlong(__hiloint2double(HI, LO)); |
||||
#else |
||||
return (unsigned long long)LO | (((unsigned long long)HI) << 32ULL); |
||||
#endif |
||||
} |
||||
|
||||
// das Lo Word in einem 64 Bit Typen ersetzen |
||||
static __device__ unsigned long long REPLACE_LOWORD(const unsigned long long &x, const uint32_t &y) { |
||||
return (x & 0xFFFFFFFF00000000ULL) | ((unsigned long long)y); |
||||
} |
||||
|
||||
// der Versuch, einen Wrapper für einen aus 32 Bit Registern zusammengesetzten uin64_t Typen zu entferfen... |
||||
#if 1 |
||||
typedef unsigned long long uint64_t; |
||||
#else |
||||
typedef class uint64 |
||||
{ |
||||
public: |
||||
__device__ uint64() |
||||
{ |
||||
} |
||||
__device__ uint64(unsigned long long init) |
||||
{ |
||||
val = make_uint2( LOWORD(init), HIWORD(init) ); |
||||
} |
||||
__device__ uint64(uint32_t lo, uint32_t hi) |
||||
{ |
||||
val = make_uint2( lo, hi ); |
||||
} |
||||
__device__ const uint64 operator^(uint64 const& rhs) const |
||||
{ |
||||
return uint64(val.x ^ rhs.val.x, val.y ^ rhs.val.y); |
||||
} |
||||
__device__ const uint64 operator|(uint64 const& rhs) const |
||||
{ |
||||
return uint64(val.x | rhs.val.x, val.y | rhs.val.y); |
||||
} |
||||
__device__ const uint64 operator+(unsigned long long const& rhs) const |
||||
{ |
||||
return *this+uint64(rhs); |
||||
} |
||||
__device__ const uint64 operator+(uint64 const& rhs) const |
||||
{ |
||||
uint64 res; |
||||
asm ("add.cc.u32 %0, %2, %4;\n\t" |
||||
"addc.cc.u32 %1, %3, %5;\n\t" |
||||
: "=r"(res.val.x), "=r"(res.val.y) |
||||
: "r"( val.x), "r"( val.y), |
||||
"r"(rhs.val.x), "r"(rhs.val.y)); |
||||
return res; |
||||
} |
||||
__device__ const uint64 operator-(uint64 const& rhs) const |
||||
{ |
||||
uint64 res; |
||||
asm ("sub.cc.u32 %0, %2, %4;\n\t" |
||||
"subc.cc.u32 %1, %3, %5;\n\t" |
||||
: "=r"(res.val.x), "=r"(res.val.y) |
||||
: "r"( val.x), "r"( val.y), |
||||
"r"(rhs.val.x), "r"(rhs.val.y)); |
||||
return res; |
||||
} |
||||
__device__ const uint64 operator<<(int n) const |
||||
{ |
||||
return uint64(unsigned long long(*this)<<n); |
||||
} |
||||
__device__ const uint64 operator>>(int n) const |
||||
{ |
||||
return uint64(unsigned long long(*this)>>n); |
||||
} |
||||
__device__ operator unsigned long long() const |
||||
{ |
||||
return MAKE_ULONGLONG(val.x, val.y); |
||||
} |
||||
uint2 val; |
||||
} uint64_t; |
||||
#endif |
||||
|
||||
// aus heavy.cu |
||||
extern cudaError_t MyStreamSynchronize(cudaStream_t stream, int situation, int thr_id); |
||||
|
||||
// die Message it Padding zur Berechnung auf der GPU |
||||
__constant__ uint64_t c_PaddedMessage80[16]; // padded message (80 bytes + padding) |
||||
|
||||
#define SPH_C64(x) ((uint64_t)(x ## ULL)) |
||||
|
||||
// aus heavy.cu |
||||
extern cudaError_t MyStreamSynchronize(cudaStream_t stream, int situation, int thr_id); |
||||
|
||||
// diese 64 Bit Rotates werden unter Compute 3.5 (und besser) mit dem Funnel Shifter beschleunigt |
||||
#if __CUDA_ARCH__ >= 350 |
||||
__forceinline__ __device__ uint64_t ROTL64(const uint64_t value, const int offset) { |
||||
uint2 result; |
||||
if(offset >= 32) { |
||||
asm("shf.l.wrap.b32 %0, %1, %2, %3;" : "=r"(result.x) : "r"(__double2loint(__longlong_as_double(value))), "r"(__double2hiint(__longlong_as_double(value))), "r"(offset)); |
||||
asm("shf.l.wrap.b32 %0, %1, %2, %3;" : "=r"(result.y) : "r"(__double2hiint(__longlong_as_double(value))), "r"(__double2loint(__longlong_as_double(value))), "r"(offset)); |
||||
} else { |
||||
asm("shf.l.wrap.b32 %0, %1, %2, %3;" : "=r"(result.x) : "r"(__double2hiint(__longlong_as_double(value))), "r"(__double2loint(__longlong_as_double(value))), "r"(offset)); |
||||
asm("shf.l.wrap.b32 %0, %1, %2, %3;" : "=r"(result.y) : "r"(__double2loint(__longlong_as_double(value))), "r"(__double2hiint(__longlong_as_double(value))), "r"(offset)); |
||||
} |
||||
return __double_as_longlong(__hiloint2double(result.y, result.x)); |
||||
} |
||||
#else |
||||
#define ROTL64(x, n) (((x) << (n)) | ((x) >> (64 - (n)))) |
||||
#endif |
||||
#define SHL(x, n) ((x) << (n)) |
||||
#define SHR(x, n) ((x) >> (n)) |
||||
|
||||
#define CONST_EXP2 q[i+0] + ROTL64(q[i+1], 5) + q[i+2] + ROTL64(q[i+3], 11) + \ |
||||
q[i+4] + ROTL64(q[i+5], 27) + q[i+6] + ROTL64(q[i+7], 32) + \ |
||||
q[i+8] + ROTL64(q[i+9], 37) + q[i+10] + ROTL64(q[i+11], 43) + \ |
||||
q[i+12] + ROTL64(q[i+13], 53) + (SHR(q[i+14],1) ^ q[i+14]) + (SHR(q[i+15],2) ^ q[i+15]) |
||||
|
||||
__device__ void Compression512(uint64_t *msg, uint64_t *hash) |
||||
{ |
||||
// Compression ref. implementation |
||||
uint64_t tmp; |
||||
uint64_t q[32]; |
||||
|
||||
tmp = (msg[ 5] ^ hash[ 5]) - (msg[ 7] ^ hash[ 7]) + (msg[10] ^ hash[10]) + (msg[13] ^ hash[13]) + (msg[14] ^ hash[14]); |
||||
q[0] = (SHR(tmp, 1) ^ SHL(tmp, 3) ^ ROTL64(tmp, 4) ^ ROTL64(tmp, 37)) + hash[1]; |
||||
tmp = (msg[ 6] ^ hash[ 6]) - (msg[ 8] ^ hash[ 8]) + (msg[11] ^ hash[11]) + (msg[14] ^ hash[14]) - (msg[15] ^ hash[15]); |
||||
q[1] = (SHR(tmp, 1) ^ SHL(tmp, 2) ^ ROTL64(tmp, 13) ^ ROTL64(tmp, 43)) + hash[2]; |
||||
tmp = (msg[ 0] ^ hash[ 0]) + (msg[ 7] ^ hash[ 7]) + (msg[ 9] ^ hash[ 9]) - (msg[12] ^ hash[12]) + (msg[15] ^ hash[15]); |
||||
q[2] = (SHR(tmp, 2) ^ SHL(tmp, 1) ^ ROTL64(tmp, 19) ^ ROTL64(tmp, 53)) + hash[3]; |
||||
tmp = (msg[ 0] ^ hash[ 0]) - (msg[ 1] ^ hash[ 1]) + (msg[ 8] ^ hash[ 8]) - (msg[10] ^ hash[10]) + (msg[13] ^ hash[13]); |
||||
q[3] = (SHR(tmp, 2) ^ SHL(tmp, 2) ^ ROTL64(tmp, 28) ^ ROTL64(tmp, 59)) + hash[4]; |
||||
tmp = (msg[ 1] ^ hash[ 1]) + (msg[ 2] ^ hash[ 2]) + (msg[ 9] ^ hash[ 9]) - (msg[11] ^ hash[11]) - (msg[14] ^ hash[14]); |
||||
q[4] = (SHR(tmp, 1) ^ tmp) + hash[5]; |
||||
tmp = (msg[ 3] ^ hash[ 3]) - (msg[ 2] ^ hash[ 2]) + (msg[10] ^ hash[10]) - (msg[12] ^ hash[12]) + (msg[15] ^ hash[15]); |
||||
q[5] = (SHR(tmp, 1) ^ SHL(tmp, 3) ^ ROTL64(tmp, 4) ^ ROTL64(tmp, 37)) + hash[6]; |
||||
tmp = (msg[ 4] ^ hash[ 4]) - (msg[ 0] ^ hash[ 0]) - (msg[ 3] ^ hash[ 3]) - (msg[11] ^ hash[11]) + (msg[13] ^ hash[13]); |
||||
q[6] = (SHR(tmp, 1) ^ SHL(tmp, 2) ^ ROTL64(tmp, 13) ^ ROTL64(tmp, 43)) + hash[7]; |
||||
tmp = (msg[ 1] ^ hash[ 1]) - (msg[ 4] ^ hash[ 4]) - (msg[ 5] ^ hash[ 5]) - (msg[12] ^ hash[12]) - (msg[14] ^ hash[14]); |
||||
q[7] = (SHR(tmp, 2) ^ SHL(tmp, 1) ^ ROTL64(tmp, 19) ^ ROTL64(tmp, 53)) + hash[8]; |
||||
tmp = (msg[ 2] ^ hash[ 2]) - (msg[ 5] ^ hash[ 5]) - (msg[ 6] ^ hash[ 6]) + (msg[13] ^ hash[13]) - (msg[15] ^ hash[15]); |
||||
q[8] = (SHR(tmp, 2) ^ SHL(tmp, 2) ^ ROTL64(tmp, 28) ^ ROTL64(tmp, 59)) + hash[9]; |
||||
tmp = (msg[ 0] ^ hash[ 0]) - (msg[ 3] ^ hash[ 3]) + (msg[ 6] ^ hash[ 6]) - (msg[ 7] ^ hash[ 7]) + (msg[14] ^ hash[14]); |
||||
q[9] = (SHR(tmp, 1) ^ tmp) + hash[10]; |
||||
tmp = (msg[ 8] ^ hash[ 8]) - (msg[ 1] ^ hash[ 1]) - (msg[ 4] ^ hash[ 4]) - (msg[ 7] ^ hash[ 7]) + (msg[15] ^ hash[15]); |
||||
q[10] = (SHR(tmp, 1) ^ SHL(tmp, 3) ^ ROTL64(tmp, 4) ^ ROTL64(tmp, 37)) + hash[11]; |
||||
tmp = (msg[ 8] ^ hash[ 8]) - (msg[ 0] ^ hash[ 0]) - (msg[ 2] ^ hash[ 2]) - (msg[ 5] ^ hash[ 5]) + (msg[ 9] ^ hash[ 9]); |
||||
q[11] = (SHR(tmp, 1) ^ SHL(tmp, 2) ^ ROTL64(tmp, 13) ^ ROTL64(tmp, 43)) + hash[12]; |
||||
tmp = (msg[ 1] ^ hash[ 1]) + (msg[ 3] ^ hash[ 3]) - (msg[ 6] ^ hash[ 6]) - (msg[ 9] ^ hash[ 9]) + (msg[10] ^ hash[10]); |
||||
q[12] = (SHR(tmp, 2) ^ SHL(tmp, 1) ^ ROTL64(tmp, 19) ^ ROTL64(tmp, 53)) + hash[13]; |
||||
tmp = (msg[ 2] ^ hash[ 2]) + (msg[ 4] ^ hash[ 4]) + (msg[ 7] ^ hash[ 7]) + (msg[10] ^ hash[10]) + (msg[11] ^ hash[11]); |
||||
q[13] = (SHR(tmp, 2) ^ SHL(tmp, 2) ^ ROTL64(tmp, 28) ^ ROTL64(tmp, 59)) + hash[14]; |
||||
tmp = (msg[ 3] ^ hash[ 3]) - (msg[ 5] ^ hash[ 5]) + (msg[ 8] ^ hash[ 8]) - (msg[11] ^ hash[11]) - (msg[12] ^ hash[12]); |
||||
q[14] = (SHR(tmp, 1) ^ tmp) + hash[15]; |
||||
tmp = (msg[12] ^ hash[12]) - (msg[ 4] ^ hash[ 4]) - (msg[ 6] ^ hash[ 6]) - (msg[ 9] ^ hash[ 9]) + (msg[13] ^ hash[13]); |
||||
q[15] = (SHR(tmp, 1) ^ SHL(tmp, 3) ^ ROTL64(tmp, 4) ^ ROTL64(tmp, 37)) + hash[0]; |
||||
|
||||
// Expand 1 |
||||
#pragma unroll 2 |
||||
for(int i=0;i<2;i++) |
||||
{ |
||||
q[i+16] = |
||||
(SHR(q[i], 1) ^ SHL(q[i], 2) ^ ROTL64(q[i], 13) ^ ROTL64(q[i], 43)) + |
||||
(SHR(q[i+1], 2) ^ SHL(q[i+1], 1) ^ ROTL64(q[i+1], 19) ^ ROTL64(q[i+1], 53)) + |
||||
(SHR(q[i+2], 2) ^ SHL(q[i+2], 2) ^ ROTL64(q[i+2], 28) ^ ROTL64(q[i+2], 59)) + |
||||
(SHR(q[i+3], 1) ^ SHL(q[i+3], 3) ^ ROTL64(q[i+3], 4) ^ ROTL64(q[i+3], 37)) + |
||||
(SHR(q[i+4], 1) ^ SHL(q[i+4], 2) ^ ROTL64(q[i+4], 13) ^ ROTL64(q[i+4], 43)) + |
||||
(SHR(q[i+5], 2) ^ SHL(q[i+5], 1) ^ ROTL64(q[i+5], 19) ^ ROTL64(q[i+5], 53)) + |
||||
(SHR(q[i+6], 2) ^ SHL(q[i+6], 2) ^ ROTL64(q[i+6], 28) ^ ROTL64(q[i+6], 59)) + |
||||
(SHR(q[i+7], 1) ^ SHL(q[i+7], 3) ^ ROTL64(q[i+7], 4) ^ ROTL64(q[i+7], 37)) + |
||||
(SHR(q[i+8], 1) ^ SHL(q[i+8], 2) ^ ROTL64(q[i+8], 13) ^ ROTL64(q[i+8], 43)) + |
||||
(SHR(q[i+9], 2) ^ SHL(q[i+9], 1) ^ ROTL64(q[i+9], 19) ^ ROTL64(q[i+9], 53)) + |
||||
(SHR(q[i+10], 2) ^ SHL(q[i+10], 2) ^ ROTL64(q[i+10], 28) ^ ROTL64(q[i+10], 59)) + |
||||
(SHR(q[i+11], 1) ^ SHL(q[i+11], 3) ^ ROTL64(q[i+11], 4) ^ ROTL64(q[i+11], 37)) + |
||||
(SHR(q[i+12], 1) ^ SHL(q[i+12], 2) ^ ROTL64(q[i+12], 13) ^ ROTL64(q[i+12], 43)) + |
||||
(SHR(q[i+13], 2) ^ SHL(q[i+13], 1) ^ ROTL64(q[i+13], 19) ^ ROTL64(q[i+13], 53)) + |
||||
(SHR(q[i+14], 2) ^ SHL(q[i+14], 2) ^ ROTL64(q[i+14], 28) ^ ROTL64(q[i+14], 59)) + |
||||
(SHR(q[i+15], 1) ^ SHL(q[i+15], 3) ^ ROTL64(q[i+15], 4) ^ ROTL64(q[i+15], 37)) + |
||||
(( ((i+16)*(0x0555555555555555ull)) + ROTL64(msg[i], i+1) + |
||||
ROTL64(msg[i+3], i+4) - ROTL64(msg[i+10], i+11) ) ^ hash[i+7]); |
||||
} |
||||
|
||||
#pragma unroll 4 |
||||
for(int i=2;i<6;i++) { |
||||
q[i+16] = CONST_EXP2 + |
||||
(( ((i+16)*(0x0555555555555555ull)) + ROTL64(msg[i], i+1) + |
||||
ROTL64(msg[i+3], i+4) - ROTL64(msg[i+10], i+11) ) ^ hash[i+7]); |
||||
} |
||||
#pragma unroll 3 |
||||
for(int i=6;i<9;i++) { |
||||
q[i+16] = CONST_EXP2 + |
||||
(( ((i+16)*(0x0555555555555555ull)) + ROTL64(msg[i], i+1) + |
||||
ROTL64(msg[i+3], i+4) - ROTL64(msg[i-6], (i-6)+1) ) ^ hash[i+7]); |
||||
} |
||||
#pragma unroll 4 |
||||
for(int i=9;i<13;i++) { |
||||
q[i+16] = CONST_EXP2 + |
||||
(( ((i+16)*(0x0555555555555555ull)) + ROTL64(msg[i], i+1) + |
||||
ROTL64(msg[i+3], i+4) - ROTL64(msg[i-6], (i-6)+1) ) ^ hash[i-9]); |
||||
} |
||||
#pragma unroll 3 |
||||
for(int i=13;i<16;i++) { |
||||
q[i+16] = CONST_EXP2 + |
||||
(( ((i+16)*(0x0555555555555555ull)) + ROTL64(msg[i], i+1) + |
||||
ROTL64(msg[i-13], (i-13)+1) - ROTL64(msg[i-6], (i-6)+1) ) ^ hash[i-9]); |
||||
} |
||||
|
||||
uint64_t XL64 = q[16]^q[17]^q[18]^q[19]^q[20]^q[21]^q[22]^q[23]; |
||||
uint64_t XH64 = XL64^q[24]^q[25]^q[26]^q[27]^q[28]^q[29]^q[30]^q[31]; |
||||
|
||||
hash[0] = (SHL(XH64, 5) ^ SHR(q[16],5) ^ msg[ 0]) + ( XL64 ^ q[24] ^ q[ 0]); |
||||
hash[1] = (SHR(XH64, 7) ^ SHL(q[17],8) ^ msg[ 1]) + ( XL64 ^ q[25] ^ q[ 1]); |
||||
hash[2] = (SHR(XH64, 5) ^ SHL(q[18],5) ^ msg[ 2]) + ( XL64 ^ q[26] ^ q[ 2]); |
||||
hash[3] = (SHR(XH64, 1) ^ SHL(q[19],5) ^ msg[ 3]) + ( XL64 ^ q[27] ^ q[ 3]); |
||||
hash[4] = (SHR(XH64, 3) ^ q[20] ^ msg[ 4]) + ( XL64 ^ q[28] ^ q[ 4]); |
||||
hash[5] = (SHL(XH64, 6) ^ SHR(q[21],6) ^ msg[ 5]) + ( XL64 ^ q[29] ^ q[ 5]); |
||||
hash[6] = (SHR(XH64, 4) ^ SHL(q[22],6) ^ msg[ 6]) + ( XL64 ^ q[30] ^ q[ 6]); |
||||
hash[7] = (SHR(XH64,11) ^ SHL(q[23],2) ^ msg[ 7]) + ( XL64 ^ q[31] ^ q[ 7]); |
||||
|
||||
hash[ 8] = ROTL64(hash[4], 9) + ( XH64 ^ q[24] ^ msg[ 8]) + (SHL(XL64,8) ^ q[23] ^ q[ 8]); |
||||
hash[ 9] = ROTL64(hash[5],10) + ( XH64 ^ q[25] ^ msg[ 9]) + (SHR(XL64,6) ^ q[16] ^ q[ 9]); |
||||
hash[10] = ROTL64(hash[6],11) + ( XH64 ^ q[26] ^ msg[10]) + (SHL(XL64,6) ^ q[17] ^ q[10]); |
||||
hash[11] = ROTL64(hash[7],12) + ( XH64 ^ q[27] ^ msg[11]) + (SHL(XL64,4) ^ q[18] ^ q[11]); |
||||
hash[12] = ROTL64(hash[0],13) + ( XH64 ^ q[28] ^ msg[12]) + (SHR(XL64,3) ^ q[19] ^ q[12]); |
||||
hash[13] = ROTL64(hash[1],14) + ( XH64 ^ q[29] ^ msg[13]) + (SHR(XL64,4) ^ q[20] ^ q[13]); |
||||
hash[14] = ROTL64(hash[2],15) + ( XH64 ^ q[30] ^ msg[14]) + (SHR(XL64,7) ^ q[21] ^ q[14]); |
||||
hash[15] = ROTL64(hash[3],16) + ( XH64 ^ q[31] ^ msg[15]) + (SHR(XL64,2) ^ q[22] ^ q[15]); |
||||
} |
||||
static __constant__ uint64_t d_constMem[16]; |
||||
static uint64_t h_constMem[16] = { |
||||
SPH_C64(0x8081828384858687), |
||||
SPH_C64(0x88898A8B8C8D8E8F), |
||||
SPH_C64(0x9091929394959697), |
||||
SPH_C64(0x98999A9B9C9D9E9F), |
||||
SPH_C64(0xA0A1A2A3A4A5A6A7), |
||||
SPH_C64(0xA8A9AAABACADAEAF), |
||||
SPH_C64(0xB0B1B2B3B4B5B6B7), |
||||
SPH_C64(0xB8B9BABBBCBDBEBF), |
||||
SPH_C64(0xC0C1C2C3C4C5C6C7), |
||||
SPH_C64(0xC8C9CACBCCCDCECF), |
||||
SPH_C64(0xD0D1D2D3D4D5D6D7), |
||||
SPH_C64(0xD8D9DADBDCDDDEDF), |
||||
SPH_C64(0xE0E1E2E3E4E5E6E7), |
||||
SPH_C64(0xE8E9EAEBECEDEEEF), |
||||
SPH_C64(0xF0F1F2F3F4F5F6F7), |
||||
SPH_C64(0xF8F9FAFBFCFDFEFF) |
||||
}; |
||||
|
||||
__global__ void quark_bmw512_gpu_hash_64(int threads, uint32_t startNounce, uint64_t *g_hash, uint32_t *g_nonceVector) |
||||
{ |
||||
int thread = (blockDim.x * blockIdx.x + threadIdx.x); |
||||
if (thread < threads) |
||||
{ |
||||
uint32_t nounce = (g_nonceVector != NULL) ? g_nonceVector[thread] : (startNounce + thread); |
||||
|
||||
int hashPosition = nounce - startNounce; |
||||
uint64_t *inpHash = &g_hash[8 * hashPosition]; |
||||
|
||||
// Init |
||||
uint64_t h[16]; |
||||
/* |
||||
h[ 0] = SPH_C64(0x8081828384858687); |
||||
h[ 1] = SPH_C64(0x88898A8B8C8D8E8F); |
||||
h[ 2] = SPH_C64(0x9091929394959697); |
||||
h[ 3] = SPH_C64(0x98999A9B9C9D9E9F); |
||||
h[ 4] = SPH_C64(0xA0A1A2A3A4A5A6A7); |
||||
h[ 5] = SPH_C64(0xA8A9AAABACADAEAF); |
||||
h[ 6] = SPH_C64(0xB0B1B2B3B4B5B6B7); |
||||
h[ 7] = SPH_C64(0xB8B9BABBBCBDBEBF); |
||||
h[ 8] = SPH_C64(0xC0C1C2C3C4C5C6C7); |
||||
h[ 9] = SPH_C64(0xC8C9CACBCCCDCECF); |
||||
h[10] = SPH_C64(0xD0D1D2D3D4D5D6D7); |
||||
h[11] = SPH_C64(0xD8D9DADBDCDDDEDF); |
||||
h[12] = SPH_C64(0xE0E1E2E3E4E5E6E7); |
||||
h[13] = SPH_C64(0xE8E9EAEBECEDEEEF); |
||||
h[14] = SPH_C64(0xF0F1F2F3F4F5F6F7); |
||||
h[15] = SPH_C64(0xF8F9FAFBFCFDFEFF); |
||||
*/ |
||||
#pragma unroll 16 |
||||
for(int i=0;i<16;i++) |
||||
h[i] = d_constMem[i]; |
||||
// Nachricht kopieren (Achtung, die Nachricht hat 64 Byte, |
||||
// BMW arbeitet mit 128 Byte!!! |
||||
uint64_t message[16]; |
||||
#pragma unroll 8 |
||||
for(int i=0;i<8;i++) |
||||
message[i] = inpHash[i]; |
||||
#pragma unroll 6 |
||||
for(int i=9;i<15;i++) |
||||
message[i] = 0; |
||||
|
||||
// Padding einfügen (Byteorder?!?) |
||||
message[8] = SPH_C64(0x80); |
||||
// Länge (in Bits, d.h. 64 Byte * 8 = 512 Bits |
||||
message[15] = SPH_C64(512); |
||||
|
||||
// Compression 1 |
||||
Compression512(message, h); |
||||
|
||||
// Final |
||||
#pragma unroll 16 |
||||
for(int i=0;i<16;i++) |
||||
message[i] = 0xaaaaaaaaaaaaaaa0ull + (uint64_t)i; |
||||
|
||||
Compression512(h, message); |
||||
|
||||
// fertig |
||||
uint64_t *outpHash = &g_hash[8 * hashPosition]; |
||||
|
||||
#pragma unroll 8 |
||||
for(int i=0;i<8;i++) |
||||
outpHash[i] = message[i+8]; |
||||
} |
||||
} |
||||
|
||||
__global__ void quark_bmw512_gpu_hash_80(int threads, uint32_t startNounce, uint64_t *g_hash) |
||||
{ |
||||
int thread = (blockDim.x * blockIdx.x + threadIdx.x); |
||||
if (thread < threads) |
||||
{ |
||||
uint32_t nounce = startNounce + thread; |
||||
|
||||
// Init |
||||
uint64_t h[16]; |
||||
#pragma unroll 16 |
||||
for(int i=0;i<16;i++) |
||||
h[i] = d_constMem[i]; |
||||
|
||||
// Nachricht kopieren (Achtung, die Nachricht hat 64 Byte, |
||||
// BMW arbeitet mit 128 Byte!!! |
||||
uint64_t message[16]; |
||||
#pragma unroll 16 |
||||
for(int i=0;i<16;i++) |
||||
message[i] = c_PaddedMessage80[i]; |
||||
|
||||
// die Nounce durch die thread-spezifische ersetzen |
||||
message[9] = REPLACE_HIWORD(message[9], cuda_swab32(nounce)); |
||||
|
||||
// Compression 1 |
||||
Compression512(message, h); |
||||
|
||||
// Final |
||||
#pragma unroll 16 |
||||
for(int i=0;i<16;i++) |
||||
message[i] = 0xaaaaaaaaaaaaaaa0ull + (uint64_t)i; |
||||
|
||||
Compression512(h, message); |
||||
|
||||
// fertig |
||||
uint64_t *outpHash = &g_hash[8 * thread]; |
||||
|
||||
#pragma unroll 8 |
||||
for(int i=0;i<8;i++) |
||||
outpHash[i] = message[i+8]; |
||||
} |
||||
} |
||||
|
||||
// Setup-Funktionen |
||||
__host__ void quark_bmw512_cpu_init(int thr_id, int threads) |
||||
{ |
||||
// nix zu tun ;-) |
||||
// jetzt schon :D |
||||
cudaMemcpyToSymbol( d_constMem, |
||||
h_constMem, |
||||
sizeof(h_constMem), |
||||
0, cudaMemcpyHostToDevice); |
||||
} |
||||
|
||||
// Bmw512 für 80 Byte grosse Eingangsdaten |
||||
__host__ void quark_bmw512_cpu_setBlock_80(void *pdata) |
||||
{ |
||||
// Message mit Padding bereitstellen |
||||
// lediglich die korrekte Nonce ist noch ab Byte 76 einzusetzen. |
||||
unsigned char PaddedMessage[128]; |
||||
memcpy(PaddedMessage, pdata, 80); |
||||
memset(PaddedMessage+80, 0, 48); |
||||
uint64_t *message = (uint64_t*)PaddedMessage; |
||||
// Padding einfügen (Byteorder?!?) |
||||
message[10] = SPH_C64(0x80); |
||||
// Länge (in Bits, d.h. 80 Byte * 8 = 640 Bits |
||||
message[15] = SPH_C64(640); |
||||
|
||||
// die Message zur Berechnung auf der GPU |
||||
cudaMemcpyToSymbol( c_PaddedMessage80, PaddedMessage, 16*sizeof(uint64_t), 0, cudaMemcpyHostToDevice); |
||||
} |
||||
|
||||
__host__ void quark_bmw512_cpu_hash_64(int thr_id, int threads, uint32_t startNounce, uint32_t *d_nonceVector, uint32_t *d_hash, int order) |
||||
{ |
||||
const int threadsperblock = 256; |
||||
|
||||
// berechne wie viele Thread Blocks wir brauchen |
||||
dim3 grid((threads + threadsperblock-1)/threadsperblock); |
||||
dim3 block(threadsperblock); |
||||
|
||||
// Größe des dynamischen Shared Memory Bereichs |
||||
size_t shared_size = 0; |
||||
|
||||
// fprintf(stderr, "threads=%d, %d blocks, %d threads per block, %d bytes shared\n", threads, grid.x, block.x, shared_size); |
||||
|
||||
quark_bmw512_gpu_hash_64<<<grid, block, shared_size>>>(threads, startNounce, (uint64_t*)d_hash, d_nonceVector); |
||||
MyStreamSynchronize(NULL, order, thr_id); |
||||
} |
||||
|
||||
__host__ void quark_bmw512_cpu_hash_80(int thr_id, int threads, uint32_t startNounce, uint32_t *d_hash, int order) |
||||
{ |
||||
const int threadsperblock = 256; |
||||
|
||||
// berechne wie viele Thread Blocks wir brauchen |
||||
dim3 grid((threads + threadsperblock-1)/threadsperblock); |
||||
dim3 block(threadsperblock); |
||||
|
||||
// Größe des dynamischen Shared Memory Bereichs |
||||
size_t shared_size = 0; |
||||
|
||||
// fprintf(stderr, "threads=%d, %d blocks, %d threads per block, %d bytes shared\n", threads, grid.x, block.x, shared_size); |
||||
|
||||
quark_bmw512_gpu_hash_80<<<grid, block, shared_size>>>(threads, startNounce, (uint64_t*)d_hash); |
||||
MyStreamSynchronize(NULL, order, thr_id); |
||||
} |
||||
|
||||
#endif |
@ -0,0 +1,363 @@
@@ -0,0 +1,363 @@
|
||||
#include <cuda.h> |
||||
#include "cuda_runtime.h" |
||||
#include "device_launch_parameters.h" |
||||
#include "sm_30_intrinsics.h" |
||||
|
||||
#include <stdio.h> |
||||
#include <memory.h> |
||||
#include <stdint.h> |
||||
|
||||
// aus cpu-miner.c |
||||
extern "C" int device_map[8]; |
||||
|
||||
// diese Struktur wird in der Init Funktion angefordert |
||||
static cudaDeviceProp props[8]; |
||||
|
||||
static uint32_t *d_tempBranch1Nonces[8]; |
||||
static uint32_t *d_numValid[8]; |
||||
static uint32_t *h_numValid[8]; |
||||
|
||||
static uint32_t *d_partSum[2][8]; // für bis zu vier partielle Summen |
||||
|
||||
// aus heavy.cu |
||||
extern cudaError_t MyStreamSynchronize(cudaStream_t stream, int situation, int thr_id); |
||||
|
||||
// True/False tester |
||||
typedef uint32_t(*cuda_compactTestFunction_t)(uint32_t *inpHash); |
||||
|
||||
__device__ uint32_t QuarkTrueTest(uint32_t *inpHash) |
||||
{ |
||||
return ((inpHash[0] & 0x08) == 0x08); |
||||
} |
||||
|
||||
__device__ uint32_t QuarkFalseTest(uint32_t *inpHash) |
||||
{ |
||||
return ((inpHash[0] & 0x08) == 0); |
||||
} |
||||
|
||||
__device__ cuda_compactTestFunction_t d_QuarkTrueFunction = QuarkTrueTest, d_QuarkFalseFunction = QuarkFalseTest; |
||||
cuda_compactTestFunction_t h_QuarkTrueFunction[8], h_QuarkFalseFunction[8]; |
||||
|
||||
// Setup-Funktionen |
||||
__host__ void quark_compactTest_cpu_init(int thr_id, int threads) |
||||
{ |
||||
cudaGetDeviceProperties(&props[thr_id], device_map[thr_id]); |
||||
|
||||
cudaMemcpyFromSymbol(&h_QuarkTrueFunction[thr_id], d_QuarkTrueFunction, sizeof(cuda_compactTestFunction_t)); |
||||
cudaMemcpyFromSymbol(&h_QuarkFalseFunction[thr_id], d_QuarkFalseFunction, sizeof(cuda_compactTestFunction_t)); |
||||
|
||||
// wir brauchen auch Speicherplatz auf dem Device |
||||
cudaMalloc(&d_tempBranch1Nonces[thr_id], sizeof(uint32_t) * threads * 2); |
||||
cudaMalloc(&d_numValid[thr_id], 2*sizeof(uint32_t)); |
||||
cudaMallocHost(&h_numValid[thr_id], 2*sizeof(uint32_t)); |
||||
|
||||
uint32_t s1; |
||||
s1 = (threads / 256) * 2; |
||||
|
||||
cudaMalloc(&d_partSum[0][thr_id], sizeof(uint32_t) * s1); // BLOCKSIZE (Threads/Block) |
||||
cudaMalloc(&d_partSum[1][thr_id], sizeof(uint32_t) * s1); // BLOCKSIZE (Threads/Block) |
||||
} |
||||
|
||||
// Die Summenfunktion (vom NVIDIA SDK) |
||||
__global__ void quark_compactTest_gpu_SCAN(uint32_t *data, int width, uint32_t *partial_sums=NULL, cuda_compactTestFunction_t testFunc=NULL, int threads=0, uint32_t startNounce=0, uint32_t *inpHashes=NULL, uint32_t *d_validNonceTable=NULL) |
||||
{ |
||||
extern __shared__ uint32_t sums[]; |
||||
int id = ((blockIdx.x * blockDim.x) + threadIdx.x); |
||||
//int lane_id = id % warpSize; |
||||
int lane_id = id % width; |
||||
// determine a warp_id within a block |
||||
//int warp_id = threadIdx.x / warpSize; |
||||
int warp_id = threadIdx.x / width; |
||||
|
||||
sums[lane_id] = 0; |
||||
|
||||
// Below is the basic structure of using a shfl instruction |
||||
// for a scan. |
||||
// Record "value" as a variable - we accumulate it along the way |
||||
uint32_t value; |
||||
if(testFunc != NULL) |
||||
{ |
||||
if (id < threads) |
||||
{ |
||||
uint32_t *inpHash; |
||||
if(d_validNonceTable == NULL) |
||||
{ |
||||
// keine Nonce-Liste |
||||
inpHash = &inpHashes[id<<4]; |
||||
}else |
||||
{ |
||||
// Nonce-Liste verfügbar |
||||
int nonce = d_validNonceTable[id] - startNounce; |
||||
inpHash = &inpHashes[nonce<<4]; |
||||
} |
||||
value = (*testFunc)(inpHash); |
||||
}else |
||||
{ |
||||
value = 0; |
||||
} |
||||
}else |
||||
{ |
||||
value = data[id]; |
||||
} |
||||
|
||||
__syncthreads(); |
||||
|
||||
// Now accumulate in log steps up the chain |
||||
// compute sums, with another thread's value who is |
||||
// distance delta away (i). Note |
||||
// those threads where the thread 'i' away would have |
||||
// been out of bounds of the warp are unaffected. This |
||||
// creates the scan sum. |
||||
#pragma unroll |
||||
|
||||
for (int i=1; i<=width; i*=2) |
||||
{ |
||||
uint32_t n = __shfl_up((int)value, i, width); |
||||
|
||||
if (lane_id >= i) value += n; |
||||
} |
||||
|
||||
// value now holds the scan value for the individual thread |
||||
// next sum the largest values for each warp |
||||
|
||||
// write the sum of the warp to smem |
||||
//if (threadIdx.x % warpSize == warpSize-1) |
||||
if (threadIdx.x % width == width-1) |
||||
{ |
||||
sums[warp_id] = value; |
||||
} |
||||
|
||||
__syncthreads(); |
||||
|
||||
// |
||||
// scan sum the warp sums |
||||
// the same shfl scan operation, but performed on warp sums |
||||
// |
||||
if (warp_id == 0) |
||||
{ |
||||
uint32_t warp_sum = sums[lane_id]; |
||||
|
||||
for (int i=1; i<=width; i*=2) |
||||
{ |
||||
uint32_t n = __shfl_up((int)warp_sum, i, width); |
||||
|
||||
if (lane_id >= i) warp_sum += n; |
||||
} |
||||
|
||||
sums[lane_id] = warp_sum; |
||||
} |
||||
|
||||
__syncthreads(); |
||||
|
||||
// perform a uniform add across warps in the block |
||||
// read neighbouring warp's sum and add it to threads value |
||||
uint32_t blockSum = 0; |
||||
|
||||
if (warp_id > 0) |
||||
{ |
||||
blockSum = sums[warp_id-1]; |
||||
} |
||||
|
||||
value += blockSum; |
||||
|
||||
// Now write out our result |
||||
data[id] = value; |
||||
|
||||
// last thread has sum, write write out the block's sum |
||||
if (partial_sums != NULL && threadIdx.x == blockDim.x-1) |
||||
{ |
||||
partial_sums[blockIdx.x] = value; |
||||
} |
||||
} |
||||
|
||||
// Uniform add: add partial sums array |
||||
__global__ void quark_compactTest_gpu_ADD(uint32_t *data, uint32_t *partial_sums, int len) |
||||
{ |
||||
__shared__ uint32_t buf; |
||||
int id = ((blockIdx.x * blockDim.x) + threadIdx.x); |
||||
|
||||
if (id > len) return; |
||||
|
||||
if (threadIdx.x == 0) |
||||
{ |
||||
buf = partial_sums[blockIdx.x]; |
||||
} |
||||
|
||||
__syncthreads(); |
||||
data[id] += buf; |
||||
} |
||||
|
||||
// Der Scatter |
||||
__global__ void quark_compactTest_gpu_SCATTER(uint32_t *sum, uint32_t *outp, cuda_compactTestFunction_t testFunc, int threads=0, uint32_t startNounce=0, uint32_t *inpHashes=NULL, uint32_t *d_validNonceTable=NULL) |
||||
{ |
||||
int id = ((blockIdx.x * blockDim.x) + threadIdx.x); |
||||
uint32_t actNounce = id; |
||||
uint32_t value; |
||||
if (id < threads) |
||||
{ |
||||
// uint32_t nounce = startNounce + id; |
||||
uint32_t *inpHash; |
||||
if(d_validNonceTable == NULL) |
||||
{ |
||||
// keine Nonce-Liste |
||||
inpHash = &inpHashes[id<<4]; |
||||
}else |
||||
{ |
||||
// Nonce-Liste verfügbar |
||||
int nonce = d_validNonceTable[id] - startNounce; |
||||
actNounce = nonce; |
||||
inpHash = &inpHashes[nonce<<4]; |
||||
} |
||||
|
||||
value = (*testFunc)(inpHash); |
||||
}else |
||||
{ |
||||
value = 0; |
||||
} |
||||
|
||||
if( value ) |
||||
{ |
||||
int idx = sum[id]; |
||||
if(idx > 0) |
||||
outp[idx-1] = startNounce + actNounce; |
||||
} |
||||
} |
||||
|
||||
__host__ static uint32_t quark_compactTest_roundUpExp(uint32_t val) |
||||
{ |
||||
if(val == 0) |
||||
return 0; |
||||
|
||||
uint32_t mask = 0x80000000; |
||||
while( (val & mask) == 0 ) mask = mask >> 1; |
||||
|
||||
if( (val & (~mask)) != 0 ) |
||||
return mask << 1; |
||||
|
||||
return mask; |
||||
} |
||||
|
||||
__host__ void quark_compactTest_cpu_singleCompaction(int thr_id, int threads, uint32_t *nrm, |
||||
uint32_t *d_nonces1, cuda_compactTestFunction_t function, |
||||
uint32_t startNounce, uint32_t *inpHashes, uint32_t *d_validNonceTable) |
||||
{ |
||||
int orgThreads = threads; |
||||
threads = (int)quark_compactTest_roundUpExp((uint32_t)threads); |
||||
// threadsPerBlock ausrechnen |
||||
int blockSize = 256; |
||||
int nSummen = threads / blockSize; |
||||
|
||||
int thr1 = (threads+blockSize-1) / blockSize; |
||||
int thr2 = threads / (blockSize*blockSize); |
||||
int blockSize2 = (nSummen < blockSize) ? nSummen : blockSize; |
||||
int thr3 = (nSummen + blockSize2-1) / blockSize2; |
||||
|
||||
bool callThrid = (thr2 > 0) ? true : false; |
||||
|
||||
// Erster Initialscan |
||||
quark_compactTest_gpu_SCAN<<<thr1,blockSize, 32*sizeof(uint32_t)>>>( |
||||
d_tempBranch1Nonces[thr_id], 32, d_partSum[0][thr_id], function, orgThreads, startNounce, inpHashes, d_validNonceTable); |
||||
|
||||
// weitere Scans |
||||
if(callThrid) |
||||
{ |
||||
quark_compactTest_gpu_SCAN<<<thr2,blockSize, 32*sizeof(uint32_t)>>>(d_partSum[0][thr_id], 32, d_partSum[1][thr_id]); |
||||
quark_compactTest_gpu_SCAN<<<1, thr2, 32*sizeof(uint32_t)>>>(d_partSum[1][thr_id], (thr2>32) ? 32 : thr2); |
||||
}else |
||||
{ |
||||
quark_compactTest_gpu_SCAN<<<thr3,blockSize2, 32*sizeof(uint32_t)>>>(d_partSum[0][thr_id], (blockSize2>32) ? 32 : blockSize2); |
||||
} |
||||
|
||||
// Sync + Anzahl merken |
||||
cudaStreamSynchronize(NULL); |
||||
|
||||
if(callThrid) |
||||
cudaMemcpy(nrm, &(d_partSum[1][thr_id])[thr2-1], sizeof(uint32_t), cudaMemcpyDeviceToHost); |
||||
else |
||||
cudaMemcpy(nrm, &(d_partSum[0][thr_id])[nSummen-1], sizeof(uint32_t), cudaMemcpyDeviceToHost); |
||||
|
||||
|
||||
// Addieren |
||||
if(callThrid) |
||||
{ |
||||
quark_compactTest_gpu_ADD<<<thr2-1, blockSize>>>(d_partSum[0][thr_id]+blockSize, d_partSum[1][thr_id], blockSize*thr2); |
||||
} |
||||
quark_compactTest_gpu_ADD<<<thr1-1, blockSize>>>(d_tempBranch1Nonces[thr_id]+blockSize, d_partSum[0][thr_id], threads); |
||||
|
||||
// Scatter |
||||
quark_compactTest_gpu_SCATTER<<<thr1,blockSize,0>>>(d_tempBranch1Nonces[thr_id], d_nonces1, |
||||
function, orgThreads, startNounce, inpHashes, d_validNonceTable); |
||||
|
||||
// Sync |
||||
cudaStreamSynchronize(NULL); |
||||
} |
||||
|
||||
////// ACHTUNG: Diese funktion geht aktuell nur mit threads > 65536 (Am besten 256 * 1024 oder 256*2048) |
||||
__host__ void quark_compactTest_cpu_dualCompaction(int thr_id, int threads, uint32_t *nrm, |
||||
uint32_t *d_nonces1, uint32_t *d_nonces2, |
||||
uint32_t startNounce, uint32_t *inpHashes, uint32_t *d_validNonceTable) |
||||
{ |
||||
quark_compactTest_cpu_singleCompaction(thr_id, threads, &nrm[0], d_nonces1, h_QuarkTrueFunction[thr_id], startNounce, inpHashes, d_validNonceTable); |
||||
quark_compactTest_cpu_singleCompaction(thr_id, threads, &nrm[1], d_nonces2, h_QuarkFalseFunction[thr_id], startNounce, inpHashes, d_validNonceTable); |
||||
|
||||
/* |
||||
// threadsPerBlock ausrechnen |
||||
int blockSize = 256; |
||||
int thr1 = threads / blockSize; |
||||
int thr2 = threads / (blockSize*blockSize); |
||||
|
||||
// 1 |
||||
quark_compactTest_gpu_SCAN<<<thr1,blockSize, 32*sizeof(uint32_t)>>>(d_tempBranch1Nonces[thr_id], 32, d_partSum1[thr_id], h_QuarkTrueFunction[thr_id], threads, startNounce, inpHashes); |
||||
quark_compactTest_gpu_SCAN<<<thr2,blockSize, 32*sizeof(uint32_t)>>>(d_partSum1[thr_id], 32, d_partSum2[thr_id]); |
||||
quark_compactTest_gpu_SCAN<<<1, thr2, 32*sizeof(uint32_t)>>>(d_partSum2[thr_id], (thr2>32) ? 32 : thr2); |
||||
cudaStreamSynchronize(NULL); |
||||
cudaMemcpy(&nrm[0], &(d_partSum2[thr_id])[thr2-1], sizeof(uint32_t), cudaMemcpyDeviceToHost); |
||||
quark_compactTest_gpu_ADD<<<thr2-1, blockSize>>>(d_partSum1[thr_id]+blockSize, d_partSum2[thr_id], blockSize*thr2); |
||||
quark_compactTest_gpu_ADD<<<thr1-1, blockSize>>>(d_tempBranch1Nonces[thr_id]+blockSize, d_partSum1[thr_id], threads); |
||||
|
||||
// 2 |
||||
quark_compactTest_gpu_SCAN<<<thr1,blockSize, 32*sizeof(uint32_t)>>>(d_tempBranch2Nonces[thr_id], 32, d_partSum1[thr_id], h_QuarkFalseFunction[thr_id], threads, startNounce, inpHashes); |
||||
quark_compactTest_gpu_SCAN<<<thr2,blockSize, 32*sizeof(uint32_t)>>>(d_partSum1[thr_id], 32, d_partSum2[thr_id]); |
||||
quark_compactTest_gpu_SCAN<<<1, thr2, 32*sizeof(uint32_t)>>>(d_partSum2[thr_id], (thr2>32) ? 32 : thr2); |
||||
cudaStreamSynchronize(NULL); |
||||
cudaMemcpy(&nrm[1], &(d_partSum2[thr_id])[thr2-1], sizeof(uint32_t), cudaMemcpyDeviceToHost); |
||||
quark_compactTest_gpu_ADD<<<thr2-1, blockSize>>>(d_partSum1[thr_id]+blockSize, d_partSum2[thr_id], blockSize*thr2); |
||||
quark_compactTest_gpu_ADD<<<thr1-1, blockSize>>>(d_tempBranch2Nonces[thr_id]+blockSize, d_partSum1[thr_id], threads); |
||||
|
||||
// Hier ist noch eine Besonderheit: in d_tempBranch1Nonces sind die element von 1...nrm1 die Interessanten |
||||
// Schritt 3: Scatter |
||||
quark_compactTest_gpu_SCATTER<<<thr1,blockSize,0>>>(d_tempBranch1Nonces[thr_id], d_nonces1, h_QuarkTrueFunction[thr_id], threads, startNounce, inpHashes); |
||||
quark_compactTest_gpu_SCATTER<<<thr1,blockSize,0>>>(d_tempBranch2Nonces[thr_id], d_nonces2, h_QuarkFalseFunction[thr_id], threads, startNounce, inpHashes); |
||||
cudaStreamSynchronize(NULL); |
||||
*/ |
||||
} |
||||
|
||||
__host__ void quark_compactTest_cpu_hash_64(int thr_id, int threads, uint32_t startNounce, uint32_t *inpHashes, uint32_t *d_validNonceTable, |
||||
uint32_t *d_nonces1, size_t *nrm1, |
||||
uint32_t *d_nonces2, size_t *nrm2, |
||||
int order) |
||||
{ |
||||
// Wenn validNonceTable genutzt wird, dann werden auch nur die Nonces betrachtet, die dort enthalten sind |
||||
// "threads" ist in diesem Fall auf die Länge dieses Array's zu setzen! |
||||
|
||||
quark_compactTest_cpu_dualCompaction(thr_id, threads, |
||||
h_numValid[thr_id], d_nonces1, d_nonces2, |
||||
startNounce, inpHashes, d_validNonceTable); |
||||
|
||||
cudaStreamSynchronize(NULL); // Das original braucht zwar etwas CPU-Last, ist an dieser Stelle aber evtl besser |
||||
*nrm1 = (size_t)h_numValid[thr_id][0]; |
||||
*nrm2 = (size_t)h_numValid[thr_id][1]; |
||||
} |
||||
|
||||
__host__ void quark_compactTest_single_false_cpu_hash_64(int thr_id, int threads, uint32_t startNounce, uint32_t *inpHashes, uint32_t *d_validNonceTable, |
||||
uint32_t *d_nonces1, size_t *nrm1, |
||||
int order) |
||||
{ |
||||
// Wenn validNonceTable genutzt wird, dann werden auch nur die Nonces betrachtet, die dort enthalten sind |
||||
// "threads" ist in diesem Fall auf die Länge dieses Array's zu setzen! |
||||
|
||||
quark_compactTest_cpu_singleCompaction(thr_id, threads, h_numValid[thr_id], d_nonces1, h_QuarkFalseFunction[thr_id], startNounce, inpHashes, d_validNonceTable); |
||||
|
||||
cudaStreamSynchronize(NULL); // Das original braucht zwar etwas CPU-Last, ist an dieser Stelle aber evtl besser |
||||
*nrm1 = (size_t)h_numValid[thr_id][0]; |
||||
} |
@ -0,0 +1,182 @@
@@ -0,0 +1,182 @@
|
||||
#include <cuda.h> |
||||
#include "cuda_runtime.h" |
||||
#include "device_launch_parameters.h" |
||||
|
||||
#include <stdio.h> |
||||
#include <memory.h> |
||||
|
||||
// Folgende Definitionen später durch header ersetzen |
||||
typedef unsigned char uint8_t; |
||||
typedef unsigned int uint32_t; |
||||
typedef unsigned long long uint64_t; |
||||
|
||||
// aus heavy.cu |
||||
extern cudaError_t MyStreamSynchronize(cudaStream_t stream, int situation, int thr_id); |
||||
|
||||
#include "cuda_helper.h" |
||||
|
||||
#define U32TO64_LE(p) \ |
||||
(((uint64_t)(*p)) | (((uint64_t)(*(p + 1))) << 32)) |
||||
|
||||
#define U64TO32_LE(p, v) \ |
||||
*p = (uint32_t)((v)); *(p+1) = (uint32_t)((v) >> 32); |
||||
|
||||
static const uint64_t host_keccak_round_constants[24] = { |
||||
0x0000000000000001ull, 0x0000000000008082ull, |
||||
0x800000000000808aull, 0x8000000080008000ull, |
||||
0x000000000000808bull, 0x0000000080000001ull, |
||||
0x8000000080008081ull, 0x8000000000008009ull, |
||||
0x000000000000008aull, 0x0000000000000088ull, |
||||
0x0000000080008009ull, 0x000000008000000aull, |
||||
0x000000008000808bull, 0x800000000000008bull, |
||||
0x8000000000008089ull, 0x8000000000008003ull, |
||||
0x8000000000008002ull, 0x8000000000000080ull, |
||||
0x000000000000800aull, 0x800000008000000aull, |
||||
0x8000000080008081ull, 0x8000000000008080ull, |
||||
0x0000000080000001ull, 0x8000000080008008ull |
||||
}; |
||||
|
||||
__constant__ uint64_t c_keccak_round_constants[24]; |
||||
|
||||
static __device__ __forceinline__ void |
||||
keccak_block(uint64_t *s, const uint32_t *in, const uint64_t *keccak_round_constants) { |
||||
size_t i; |
||||
uint64_t t[5], u[5], v, w; |
||||
|
||||
/* absorb input */ |
||||
#pragma unroll 9 |
||||
for (i = 0; i < 72 / 8; i++, in += 2) |
||||
s[i] ^= U32TO64_LE(in); |
||||
|
||||
for (i = 0; i < 24; i++) { |
||||
/* theta: c = a[0,i] ^ a[1,i] ^ .. a[4,i] */ |
||||
t[0] = s[0] ^ s[5] ^ s[10] ^ s[15] ^ s[20]; |
||||
t[1] = s[1] ^ s[6] ^ s[11] ^ s[16] ^ s[21]; |
||||
t[2] = s[2] ^ s[7] ^ s[12] ^ s[17] ^ s[22]; |
||||
t[3] = s[3] ^ s[8] ^ s[13] ^ s[18] ^ s[23]; |
||||
t[4] = s[4] ^ s[9] ^ s[14] ^ s[19] ^ s[24]; |
||||
|
||||
/* theta: d[i] = c[i+4] ^ rotl(c[i+1],1) */ |
||||
u[0] = t[4] ^ ROTL64(t[1], 1); |
||||
u[1] = t[0] ^ ROTL64(t[2], 1); |
||||
u[2] = t[1] ^ ROTL64(t[3], 1); |
||||
u[3] = t[2] ^ ROTL64(t[4], 1); |
||||
u[4] = t[3] ^ ROTL64(t[0], 1); |
||||
|
||||
/* theta: a[0,i], a[1,i], .. a[4,i] ^= d[i] */ |
||||
s[0] ^= u[0]; s[5] ^= u[0]; s[10] ^= u[0]; s[15] ^= u[0]; s[20] ^= u[0]; |
||||
s[1] ^= u[1]; s[6] ^= u[1]; s[11] ^= u[1]; s[16] ^= u[1]; s[21] ^= u[1]; |
||||
s[2] ^= u[2]; s[7] ^= u[2]; s[12] ^= u[2]; s[17] ^= u[2]; s[22] ^= u[2]; |
||||
s[3] ^= u[3]; s[8] ^= u[3]; s[13] ^= u[3]; s[18] ^= u[3]; s[23] ^= u[3]; |
||||
s[4] ^= u[4]; s[9] ^= u[4]; s[14] ^= u[4]; s[19] ^= u[4]; s[24] ^= u[4]; |
||||
|
||||
/* rho pi: b[..] = rotl(a[..], ..) */ |
||||
v = s[ 1]; |
||||
s[ 1] = ROTL64(s[ 6], 44); |
||||
s[ 6] = ROTL64(s[ 9], 20); |
||||
s[ 9] = ROTL64(s[22], 61); |
||||
s[22] = ROTL64(s[14], 39); |
||||
s[14] = ROTL64(s[20], 18); |
||||
s[20] = ROTL64(s[ 2], 62); |
||||
s[ 2] = ROTL64(s[12], 43); |
||||
s[12] = ROTL64(s[13], 25); |
||||
s[13] = ROTL64(s[19], 8); |
||||
s[19] = ROTL64(s[23], 56); |
||||
s[23] = ROTL64(s[15], 41); |
||||
s[15] = ROTL64(s[ 4], 27); |
||||
s[ 4] = ROTL64(s[24], 14); |
||||
s[24] = ROTL64(s[21], 2); |
||||
s[21] = ROTL64(s[ 8], 55); |
||||
s[ 8] = ROTL64(s[16], 45); |
||||
s[16] = ROTL64(s[ 5], 36); |
||||
s[ 5] = ROTL64(s[ 3], 28); |
||||
s[ 3] = ROTL64(s[18], 21); |
||||
s[18] = ROTL64(s[17], 15); |
||||
s[17] = ROTL64(s[11], 10); |
||||
s[11] = ROTL64(s[ 7], 6); |
||||
s[ 7] = ROTL64(s[10], 3); |
||||
s[10] = ROTL64( v, 1); |
||||
|
||||
/* chi: a[i,j] ^= ~b[i,j+1] & b[i,j+2] */ |
||||
v = s[ 0]; w = s[ 1]; s[ 0] ^= (~w) & s[ 2]; s[ 1] ^= (~s[ 2]) & s[ 3]; s[ 2] ^= (~s[ 3]) & s[ 4]; s[ 3] ^= (~s[ 4]) & v; s[ 4] ^= (~v) & w; |
||||
v = s[ 5]; w = s[ 6]; s[ 5] ^= (~w) & s[ 7]; s[ 6] ^= (~s[ 7]) & s[ 8]; s[ 7] ^= (~s[ 8]) & s[ 9]; s[ 8] ^= (~s[ 9]) & v; s[ 9] ^= (~v) & w; |
||||
v = s[10]; w = s[11]; s[10] ^= (~w) & s[12]; s[11] ^= (~s[12]) & s[13]; s[12] ^= (~s[13]) & s[14]; s[13] ^= (~s[14]) & v; s[14] ^= (~v) & w; |
||||
v = s[15]; w = s[16]; s[15] ^= (~w) & s[17]; s[16] ^= (~s[17]) & s[18]; s[17] ^= (~s[18]) & s[19]; s[18] ^= (~s[19]) & v; s[19] ^= (~v) & w; |
||||
v = s[20]; w = s[21]; s[20] ^= (~w) & s[22]; s[21] ^= (~s[22]) & s[23]; s[22] ^= (~s[23]) & s[24]; s[23] ^= (~s[24]) & v; s[24] ^= (~v) & w; |
||||
|
||||
/* iota: a[0,0] ^= round constant */ |
||||
s[0] ^= keccak_round_constants[i]; |
||||
} |
||||
} |
||||
|
||||
__global__ void quark_keccak512_gpu_hash_64(int threads, uint32_t startNounce, uint64_t *g_hash, uint32_t *g_nonceVector) |
||||
{ |
||||
int thread = (blockDim.x * blockIdx.x + threadIdx.x); |
||||
if (thread < threads) |
||||
{ |
||||
uint32_t nounce = (g_nonceVector != NULL) ? g_nonceVector[thread] : (startNounce + thread); |
||||
|
||||
int hashPosition = nounce - startNounce; |
||||
uint32_t *inpHash = (uint32_t*)&g_hash[8 * hashPosition]; |
||||
|
||||
// Nachricht kopieren |
||||
uint32_t message[18]; |
||||
#pragma unroll 16 |
||||
for(int i=0;i<16;i++) |
||||
message[i] = inpHash[i]; |
||||
|
||||
message[16] = 0x01; |
||||
message[17] = 0x80000000; |
||||
|
||||
// State initialisieren |
||||
uint64_t keccak_gpu_state[25]; |
||||
#pragma unroll 25 |
||||
for (int i=0; i<25; i++) |
||||
keccak_gpu_state[i] = 0; |
||||
|
||||
// den Block einmal gut durchschütteln |
||||
keccak_block(keccak_gpu_state, message, c_keccak_round_constants); |
||||
|
||||
// das Hash erzeugen |
||||
uint32_t hash[16]; |
||||
|
||||
#pragma unroll 8 |
||||
for (size_t i = 0; i < 64; i += 8) { |
||||
U64TO32_LE((&hash[i/4]), keccak_gpu_state[i / 8]); |
||||
} |
||||
|
||||
// fertig |
||||
uint32_t *outpHash = (uint32_t*)&g_hash[8 * hashPosition]; |
||||
|
||||
#pragma unroll 16 |
||||
for(int i=0;i<16;i++) |
||||
outpHash[i] = hash[i]; |
||||
} |
||||
} |
||||
|
||||
// Setup-Funktionen |
||||
__host__ void quark_keccak512_cpu_init(int thr_id, int threads) |
||||
{ |
||||
// Kopiere die Hash-Tabellen in den GPU-Speicher |
||||
cudaMemcpyToSymbol( c_keccak_round_constants, |
||||
host_keccak_round_constants, |
||||
sizeof(host_keccak_round_constants), |
||||
0, cudaMemcpyHostToDevice); |
||||
} |
||||
|
||||
__host__ void quark_keccak512_cpu_hash_64(int thr_id, int threads, uint32_t startNounce, uint32_t *d_nonceVector, uint32_t *d_hash, int order) |
||||
{ |
||||
const int threadsperblock = 256; |
||||
|
||||
// berechne wie viele Thread Blocks wir brauchen |
||||
dim3 grid((threads + threadsperblock-1)/threadsperblock); |
||||
dim3 block(threadsperblock); |
||||
|
||||
// Größe des dynamischen Shared Memory Bereichs |
||||
size_t shared_size = 0; |
||||
|
||||
// fprintf(stderr, "threads=%d, %d blocks, %d threads per block, %d bytes shared\n", threads, grid.x, block.x, shared_size); |
||||
|
||||
quark_keccak512_gpu_hash_64<<<grid, block, shared_size>>>(threads, startNounce, (uint64_t*)d_hash, d_nonceVector); |
||||
MyStreamSynchronize(NULL, order, thr_id); |
||||
} |
@ -0,0 +1,274 @@
@@ -0,0 +1,274 @@
|
||||
|
||||
extern "C" |
||||
{ |
||||
#include "sph/sph_blake.h" |
||||
#include "sph/sph_bmw.h" |
||||
#include "sph/sph_groestl.h" |
||||
#include "sph/sph_skein.h" |
||||
#include "sph/sph_jh.h" |
||||
#include "sph/sph_keccak.h" |
||||
#include "miner.h" |
||||
} |
||||
|
||||
#include <stdint.h> |
||||
|
||||
// aus cpu-miner.c |
||||
extern int device_map[8]; |
||||
|
||||
// Speicher für Input/Output der verketteten Hashfunktionen |
||||
static uint32_t *d_hash[8]; |
||||
|
||||
// Speicher zur Generierung der Noncevektoren für die bedingten Hashes |
||||
static uint32_t *d_quarkNonces[8]; |
||||
static uint32_t *d_branch1Nonces[8]; |
||||
static uint32_t *d_branch2Nonces[8]; |
||||
static uint32_t *d_branch3Nonces[8]; |
||||
|
||||
extern void quark_blake512_cpu_init(int thr_id, int threads); |
||||
extern void quark_blake512_cpu_setBlock_80(void *pdata); |
||||
extern void quark_blake512_cpu_hash_80(int thr_id, int threads, uint32_t startNounce, uint32_t *d_hash, int order); |
||||
extern void quark_blake512_cpu_hash_64(int thr_id, int threads, uint32_t startNounce, uint32_t *d_nonceVector, uint32_t *d_hash, int order); |
||||
|
||||
extern void quark_bmw512_cpu_init(int thr_id, int threads); |
||||
extern void quark_bmw512_cpu_hash_64(int thr_id, int threads, uint32_t startNounce, uint32_t *d_nonceVector, uint32_t *d_hash, int order); |
||||
|
||||
extern void quark_groestl512_cpu_init(int thr_id, int threads); |
||||
extern void quark_groestl512_cpu_hash_64(int thr_id, int threads, uint32_t startNounce, uint32_t *d_nonceVector, uint32_t *d_hash, int order); |
||||
extern void quark_doublegroestl512_cpu_hash_64(int thr_id, int threads, uint32_t startNounce, uint32_t *d_nonceVector, uint32_t *d_hash, int order); |
||||
|
||||
extern void quark_skein512_cpu_init(int thr_id, int threads); |
||||
extern void quark_skein512_cpu_hash_64(int thr_id, int threads, uint32_t startNounce, uint32_t *d_nonceVector, uint32_t *d_hash, int order); |
||||
|
||||
extern void quark_keccak512_cpu_init(int thr_id, int threads); |
||||
extern void quark_keccak512_cpu_hash_64(int thr_id, int threads, uint32_t startNounce, uint32_t *d_nonceVector, uint32_t *d_hash, int order); |
||||
|
||||
extern void quark_jh512_cpu_init(int thr_id, int threads); |
||||
extern void quark_jh512_cpu_hash_64(int thr_id, int threads, uint32_t startNounce, uint32_t *d_nonceVector, uint32_t *d_hash, int order); |
||||
|
||||
extern void quark_check_cpu_init(int thr_id, int threads); |
||||
extern void quark_check_cpu_setTarget(const void *ptarget); |
||||
extern uint32_t quark_check_cpu_hash_64(int thr_id, int threads, uint32_t startNounce, uint32_t *d_nonceVector, uint32_t *d_inputHash, int order); |
||||
|
||||
extern void quark_compactTest_cpu_init(int thr_id, int threads); |
||||
extern void quark_compactTest_cpu_hash_64(int thr_id, int threads, uint32_t startNounce, uint32_t *inpHashes, uint32_t *d_validNonceTable, |
||||
uint32_t *d_nonces1, size_t *nrm1, |
||||
uint32_t *d_nonces2, size_t *nrm2, |
||||
int order); |
||||
extern void quark_compactTest_single_false_cpu_hash_64(int thr_id, int threads, uint32_t startNounce, uint32_t *inpHashes, uint32_t *d_validNonceTable, |
||||
uint32_t *d_nonces1, size_t *nrm1, |
||||
int order); |
||||
|
||||
// Original Quarkhash Funktion aus einem miner Quelltext |
||||
inline void quarkhash(void *state, const void *input) |
||||
{ |
||||
sph_blake512_context ctx_blake; |
||||
sph_bmw512_context ctx_bmw; |
||||
sph_groestl512_context ctx_groestl; |
||||
sph_jh512_context ctx_jh; |
||||
sph_keccak512_context ctx_keccak; |
||||
sph_skein512_context ctx_skein; |
||||
|
||||
unsigned char hash[64]; |
||||
|
||||
sph_blake512_init(&ctx_blake); |
||||
// ZBLAKE; |
||||
sph_blake512 (&ctx_blake, input, 80); |
||||
sph_blake512_close(&ctx_blake, (void*) hash); |
||||
|
||||
sph_bmw512_init(&ctx_bmw); |
||||
// ZBMW; |
||||
sph_bmw512 (&ctx_bmw, (const void*) hash, 64); |
||||
sph_bmw512_close(&ctx_bmw, (void*) hash); |
||||
|
||||
if (hash[0] & 0x8) |
||||
{ |
||||
sph_groestl512_init(&ctx_groestl); |
||||
// ZGROESTL; |
||||
sph_groestl512 (&ctx_groestl, (const void*) hash, 64); |
||||
sph_groestl512_close(&ctx_groestl, (void*) hash); |
||||
} |
||||
else |
||||
{ |
||||
sph_skein512_init(&ctx_skein); |
||||
// ZSKEIN; |
||||
sph_skein512 (&ctx_skein, (const void*) hash, 64); |
||||
sph_skein512_close(&ctx_skein, (void*) hash); |
||||
} |
||||
|
||||
sph_groestl512_init(&ctx_groestl); |
||||
// ZGROESTL; |
||||
sph_groestl512 (&ctx_groestl, (const void*) hash, 64); |
||||
sph_groestl512_close(&ctx_groestl, (void*) hash); |
||||
|
||||
sph_jh512_init(&ctx_jh); |
||||
// ZJH; |
||||
sph_jh512 (&ctx_jh, (const void*) hash, 64); |
||||
sph_jh512_close(&ctx_jh, (void*) hash); |
||||
|
||||
if (hash[0] & 0x8) |
||||
{ |
||||
sph_blake512_init(&ctx_blake); |
||||
// ZBLAKE; |
||||
sph_blake512 (&ctx_blake, (const void*) hash, 64); |
||||
sph_blake512_close(&ctx_blake, (void*) hash); |
||||
} |
||||
else |
||||
{ |
||||
sph_bmw512_init(&ctx_bmw); |
||||
// ZBMW; |
||||
sph_bmw512 (&ctx_bmw, (const void*) hash, 64); |
||||
sph_bmw512_close(&ctx_bmw, (void*) hash); |
||||
} |
||||
|
||||
sph_keccak512_init(&ctx_keccak); |
||||
// ZKECCAK; |
||||
sph_keccak512 (&ctx_keccak, (const void*) hash, 64); |
||||
sph_keccak512_close(&ctx_keccak, (void*) hash); |
||||
|
||||
sph_skein512_init(&ctx_skein); |
||||
// SKEIN; |
||||
sph_skein512 (&ctx_skein, (const void*) hash, 64); |
||||
sph_skein512_close(&ctx_skein, (void*) hash); |
||||
|
||||
if (hash[0] & 0x8) |
||||
{ |
||||
sph_keccak512_init(&ctx_keccak); |
||||
// ZKECCAK; |
||||
sph_keccak512 (&ctx_keccak, (const void*) hash, 64); |
||||
sph_keccak512_close(&ctx_keccak, (void*) hash); |
||||
} |
||||
else |
||||
{ |
||||
sph_jh512_init(&ctx_jh); |
||||
// ZJH; |
||||
sph_jh512 (&ctx_jh, (const void*) hash, 64); |
||||
sph_jh512_close(&ctx_jh, (void*) hash); |
||||
} |
||||
|
||||
memcpy(state, hash, 32); |
||||
} |
||||
|
||||
|
||||
extern bool opt_benchmark; |
||||
|
||||
extern "C" int scanhash_quark(int thr_id, uint32_t *pdata, |
||||
const uint32_t *ptarget, uint32_t max_nonce, |
||||
unsigned long *hashes_done) |
||||
{ |
||||
const uint32_t first_nonce = pdata[19]; |
||||
|
||||
// TODO: entfernen für eine Release! Ist nur zum Testen! |
||||
if (opt_benchmark) |
||||
((uint32_t*)ptarget)[7] = 0x0000ff; |
||||
|
||||
const uint32_t Htarg = ptarget[7]; |
||||
|
||||
const int throughput = 256*4096; // 100; |
||||
|
||||
static bool init[8] = {0,0,0,0,0,0,0,0}; |
||||
if (!init[thr_id]) |
||||
{ |
||||
cudaSetDevice(device_map[thr_id]); |
||||
|
||||
// Konstanten kopieren, Speicher belegen |
||||
cudaMalloc(&d_hash[thr_id], 16 * sizeof(uint32_t) * throughput); |
||||
quark_blake512_cpu_init(thr_id, throughput); |
||||
quark_groestl512_cpu_init(thr_id, throughput); |
||||
quark_skein512_cpu_init(thr_id, throughput); |
||||
quark_bmw512_cpu_init(thr_id, throughput); |
||||
quark_keccak512_cpu_init(thr_id, throughput); |
||||
quark_jh512_cpu_init(thr_id, throughput); |
||||
quark_check_cpu_init(thr_id, throughput); |
||||
quark_compactTest_cpu_init(thr_id, throughput); |
||||
cudaMalloc(&d_quarkNonces[thr_id], sizeof(uint32_t)*throughput); |
||||
cudaMalloc(&d_branch1Nonces[thr_id], sizeof(uint32_t)*throughput); |
||||
cudaMalloc(&d_branch2Nonces[thr_id], sizeof(uint32_t)*throughput); |
||||
cudaMalloc(&d_branch3Nonces[thr_id], sizeof(uint32_t)*throughput); |
||||
init[thr_id] = true; |
||||
} |
||||
|
||||
uint32_t endiandata[20]; |
||||
for (int k=0; k < 20; k++) |
||||
be32enc(&endiandata[k], ((uint32_t*)pdata)[k]); |
||||
|
||||
quark_blake512_cpu_setBlock_80((void*)endiandata); |
||||
quark_check_cpu_setTarget(ptarget); |
||||
|
||||
do { |
||||
int order = 0; |
||||
size_t nrm1=0, nrm2=0, nrm3=0; |
||||
|
||||
// erstes Blake512 Hash mit CUDA |
||||
quark_blake512_cpu_hash_80(thr_id, throughput, pdata[19], d_hash[thr_id], order++); |
||||
|
||||
// das ist der unbedingte Branch für BMW512 |
||||
quark_bmw512_cpu_hash_64(thr_id, throughput, pdata[19], NULL, d_hash[thr_id], order++); |
||||
|
||||
quark_compactTest_single_false_cpu_hash_64(thr_id, throughput, pdata[19], d_hash[thr_id], NULL, |
||||
d_branch3Nonces[thr_id], &nrm3, |
||||
order++); |
||||
|
||||
// nur den Skein Branch weiterverfolgen |
||||
quark_skein512_cpu_hash_64(thr_id, nrm3, pdata[19], d_branch3Nonces[thr_id], d_hash[thr_id], order++); |
||||
|
||||
// das ist der unbedingte Branch für Groestl512 |
||||
quark_groestl512_cpu_hash_64(thr_id, nrm3, pdata[19], d_branch3Nonces[thr_id], d_hash[thr_id], order++); |
||||
|
||||
// das ist der unbedingte Branch für JH512 |
||||
quark_jh512_cpu_hash_64(thr_id, nrm3, pdata[19], d_branch3Nonces[thr_id], d_hash[thr_id], order++); |
||||
|
||||
// quarkNonces in branch1 und branch2 aufsplitten gemäss if (hash[0] & 0x8) |
||||
quark_compactTest_cpu_hash_64(thr_id, nrm3, pdata[19], d_hash[thr_id], d_branch3Nonces[thr_id], |
||||
d_branch1Nonces[thr_id], &nrm1, |
||||
d_branch2Nonces[thr_id], &nrm2, |
||||
order++); |
||||
|
||||
// das ist der bedingte Branch für Blake512 |
||||
quark_blake512_cpu_hash_64(thr_id, nrm1, pdata[19], d_branch1Nonces[thr_id], d_hash[thr_id], order++); |
||||
|
||||
// das ist der bedingte Branch für Bmw512 |
||||
quark_bmw512_cpu_hash_64(thr_id, nrm2, pdata[19], d_branch2Nonces[thr_id], d_hash[thr_id], order++); |
||||
|
||||
// das ist der unbedingte Branch für Keccak512 |
||||
quark_keccak512_cpu_hash_64(thr_id, nrm3, pdata[19], d_branch3Nonces[thr_id], d_hash[thr_id], order++); |
||||
|
||||
// das ist der unbedingte Branch für Skein512 |
||||
quark_skein512_cpu_hash_64(thr_id, nrm3, pdata[19], d_branch3Nonces[thr_id], d_hash[thr_id], order++); |
||||
|
||||
// quarkNonces in branch1 und branch2 aufsplitten gemäss if (hash[0] & 0x8) |
||||
quark_compactTest_cpu_hash_64(thr_id, nrm3, pdata[19], d_hash[thr_id], d_branch3Nonces[thr_id], |
||||
d_branch1Nonces[thr_id], &nrm1, |
||||
d_branch2Nonces[thr_id], &nrm2, |
||||
order++); |
||||
|
||||
// das ist der bedingte Branch für Keccak512 |
||||
quark_keccak512_cpu_hash_64(thr_id, nrm1, pdata[19], d_branch1Nonces[thr_id], d_hash[thr_id], order++); |
||||
|
||||
// das ist der bedingte Branch für JH512 |
||||
quark_jh512_cpu_hash_64(thr_id, nrm2, pdata[19], d_branch2Nonces[thr_id], d_hash[thr_id], order++); |
||||
|
||||
// Scan nach Gewinner Hashes auf der GPU |
||||
uint32_t foundNonce = quark_check_cpu_hash_64(thr_id, nrm3, pdata[19], d_branch3Nonces[thr_id], d_hash[thr_id], order++); |
||||
if (foundNonce != 0xffffffff) |
||||
{ |
||||
uint32_t vhash64[8]; |
||||
be32enc(&endiandata[19], foundNonce); |
||||
quarkhash(vhash64, endiandata); |
||||
|
||||
if ((vhash64[7]<=Htarg) && fulltest(vhash64, ptarget)) { |
||||
|
||||
pdata[19] = foundNonce; |
||||
*hashes_done = (foundNonce - first_nonce + 1)/2; |
||||
return 1; |
||||
} else { |
||||
applog(LOG_INFO, "GPU #%d: result for nonce $%08X does not validate on CPU!", thr_id, foundNonce); |
||||
} |
||||
} |
||||
|
||||
pdata[19] += throughput; |
||||
|
||||
} while (pdata[19] < max_nonce && !work_restart[thr_id].restart); |
||||
|
||||
*hashes_done = (pdata[19] - first_nonce + 1)/2; |
||||
return 0; |
||||
} |
Loading…
Reference in new issue