Browse Source

whirlpoolx: fix for multi-gpu support

add also cputest entry and do some cleanup...
master
Tanguy Pruvot 9 years ago
parent
commit
f8f46bfe5b
  1. 10
      ccminer.cpp
  2. 1
      miner.h
  3. 4
      util.cpp
  4. 65
      x15/cuda_whirlpoolx.cu
  5. 41
      x15/whirlpoolx.cu

10
ccminer.cpp

@ -100,8 +100,8 @@ enum sha_algos {
ALGO_QUARK, ALGO_QUARK,
ALGO_QUBIT, ALGO_QUBIT,
ALGO_S3, ALGO_S3,
ALGO_WHC, ALGO_WHIRLCOIN,
ALGO_WHPX, ALGO_WHIRLPOOLX,
ALGO_X11, ALGO_X11,
ALGO_X13, ALGO_X13,
ALGO_X14, ALGO_X14,
@ -1001,7 +1001,7 @@ static void stratum_gen_work(struct stratum_ctx *sctx, struct work *work)
case ALGO_GROESTL: case ALGO_GROESTL:
case ALGO_KECCAK: case ALGO_KECCAK:
case ALGO_BLAKECOIN: case ALGO_BLAKECOIN:
case ALGO_WHC: case ALGO_WHIRLCOIN:
SHA256((uchar*)sctx->job.coinbase, sctx->job.coinbase_size, (uchar*)merkle_root); SHA256((uchar*)sctx->job.coinbase, sctx->job.coinbase_size, (uchar*)merkle_root);
break; break;
default: default:
@ -1401,12 +1401,12 @@ static void *miner_thread(void *userdata)
max_nonce, &hashes_done); max_nonce, &hashes_done);
break; break;
case ALGO_WHC: case ALGO_WHIRLCOIN:
rc = scanhash_whc(thr_id, work.data, work.target, rc = scanhash_whc(thr_id, work.data, work.target,
max_nonce, &hashes_done); max_nonce, &hashes_done);
break; break;
case ALGO_WHPX: case ALGO_WHIRLPOOLX:
rc = scanhash_whirlpoolx(thr_id, work.data, work.target, rc = scanhash_whirlpoolx(thr_id, work.data, work.target,
max_nonce, &hashes_done); max_nonce, &hashes_done);
break; break;

1
miner.h

@ -667,6 +667,7 @@ void quarkhash(void *state, const void *input);
void qubithash(void *state, const void *input); void qubithash(void *state, const void *input);
void s3hash(void *output, const void *input); void s3hash(void *output, const void *input);
void wcoinhash(void *state, const void *input); void wcoinhash(void *state, const void *input);
void whirlxHash(void *state, const void *input);
void x11hash(void *output, const void *input); void x11hash(void *output, const void *input);
void x13hash(void *output, const void *input); void x13hash(void *output, const void *input);
void x14hash(void *output, const void *input); void x14hash(void *output, const void *input);

4
util.cpp

@ -1760,6 +1760,10 @@ void print_hash_tests(void)
wcoinhash(&hash[0], &buf[0]); wcoinhash(&hash[0], &buf[0]);
printpfx("whirl", hash); printpfx("whirl", hash);
memset(hash, 0, sizeof hash);
whirlxHash(&hash[0], &buf[0]);
printpfx("whirlpoolx", hash);
memset(hash, 0, sizeof hash); memset(hash, 0, sizeof hash);
x11hash(&hash[0], &buf[0]); x11hash(&hash[0], &buf[0]);
printpfx("X11", hash); printpfx("X11", hash);

65
x15/cuda_whirlpoolx.cu

@ -155,7 +155,8 @@ uint64_t* d_xtra;
uint64_t* d_tmp; uint64_t* d_tmp;
__device__ __forceinline__ __device__ __forceinline__
static void getShared(uint64_t* sharedMemory){ static void whirlpoolx_getShared(uint64_t* sharedMemory)
{
if (threadIdx.x < 256) { if (threadIdx.x < 256) {
sharedMemory[threadIdx.x] = mixTob0Tox[threadIdx.x]; sharedMemory[threadIdx.x] = mixTob0Tox[threadIdx.x];
sharedMemory[threadIdx.x+256] = ROTL64(sharedMemory[threadIdx.x], 8); sharedMemory[threadIdx.x+256] = ROTL64(sharedMemory[threadIdx.x], 8);
@ -170,11 +171,12 @@ static void getShared(uint64_t* sharedMemory){
} }
__global__ void precomputeX(int threads,uint64_t* d_xtra,uint64_t* d_tmp){ __global__
void whirlpoolx_gpu_precompute(int threads, uint64_t* d_xtra, uint64_t* d_tmp)
{
__shared__ uint64_t sharedMemory[2048]; __shared__ uint64_t sharedMemory[2048];
getShared(sharedMemory); whirlpoolx_getShared(sharedMemory);
int thread = (blockDim.x * blockIdx.x + threadIdx.x); int thread = (blockDim.x * blockIdx.x + threadIdx.x);
if (thread < threads) if (thread < threads)
{ {
@ -268,7 +270,7 @@ __global__ void precomputeX(int threads,uint64_t* d_xtra,uint64_t* d_tmp){
n[1] ^= h[1]; n[1] ^= h[1];
tmp2[1]^=sharedMemory[__byte_perm(n32[2], 0, 0x4440)]; tmp2[1]^=sharedMemory[__byte_perm(n32[2], 0, 0x4440)];
tmp2[2]^=sharedMemory[__byte_perm(n32[2], 0, 0x4441) + 256]; tmp2[2]^=sharedMemory[__byte_perm(n32[2], 0, 0x4441) + 256];
tmp2[3]^=sharedMemory[__byte_perm(n32[2], 0, 0x4442) + 512]; tmp2[3]^=sharedMemory[__byte_perm(n32[2], 0, 0x4442) + 512];
tmp2[4]^=sharedMemory[__byte_perm(n32[2], 0, 0x4443) + 768]; tmp2[4]^=sharedMemory[__byte_perm(n32[2], 0, 0x4443) + 768];
d_tmp[threadIdx.x]=tmp2[threadIdx.x]; d_tmp[threadIdx.x]=tmp2[threadIdx.x];
@ -303,7 +305,7 @@ __global__ void precomputeX(int threads,uint64_t* d_xtra,uint64_t* d_tmp){
tmp4[5]=(sharedMemory[__byte_perm(n32[ 8], 0, 0x4441) + 256] ^sharedMemory[__byte_perm(n32[ 6], 0, 0x4442) + 512]^ tmp4[5]=(sharedMemory[__byte_perm(n32[ 8], 0, 0x4441) + 256] ^sharedMemory[__byte_perm(n32[ 6], 0, 0x4442) + 512]^
sharedMemory[__byte_perm(n32[ 4], 0, 0x4443) + 768] ^sharedMemory[__byte_perm(n32[ 3], 0, 0x4440) + 1024]) ^tmp3[5]; sharedMemory[__byte_perm(n32[ 4], 0, 0x4443) + 768] ^sharedMemory[__byte_perm(n32[ 3], 0, 0x4440) + 1024]) ^tmp3[5];
tmp4[6]=(sharedMemory[__byte_perm(n32[ 8], 0, 0x4442) + 512] ^sharedMemory[__byte_perm(n32[ 6], 0, 0x4443) + 768]^ tmp4[6]=(sharedMemory[__byte_perm(n32[ 8], 0, 0x4442) + 512] ^sharedMemory[__byte_perm(n32[ 6], 0, 0x4443) + 768]^
sharedMemory[__byte_perm(n32[ 5], 0, 0x4440) + 1024] ^sharedMemory[__byte_perm(n32[ 3], 0, 0x4441) + 1280]) ^tmp3[6]; sharedMemory[__byte_perm(n32[ 5], 0, 0x4440) + 1024] ^sharedMemory[__byte_perm(n32[ 3], 0, 0x4441) + 1280]) ^tmp3[6];
tmp4[7]=(sharedMemory[__byte_perm(n32[ 8], 0, 0x4443) + 768] ^sharedMemory[__byte_perm(n32[ 7], 0, 0x4440) + 1024]^ tmp4[7]=(sharedMemory[__byte_perm(n32[ 8], 0, 0x4443) + 768] ^sharedMemory[__byte_perm(n32[ 7], 0, 0x4440) + 1024]^
@ -346,7 +348,7 @@ __global__ void precomputeX(int threads,uint64_t* d_xtra,uint64_t* d_tmp){
tmp7[7] = ROUND_ELT(sharedMemory, tmp6, 7, 6, 5, 4, 3, 2, 1, 0); tmp7[7] = ROUND_ELT(sharedMemory, tmp6, 7, 6, 5, 4, 3, 2, 1, 0);
d_tmp[threadIdx.x+32]=tmp7[threadIdx.x]; d_tmp[threadIdx.x+32]=tmp7[threadIdx.x];
//-------------------
uint64_t tmp8[8]; uint64_t tmp8[8];
tmp8[0] = xor1(ROUND_ELT(sharedMemory, tmp7, 0, 7, 6, 5, 4, 3, 2, 1), InitVector_RC[5]); tmp8[0] = xor1(ROUND_ELT(sharedMemory, tmp7, 0, 7, 6, 5, 4, 3, 2, 1), InitVector_RC[5]);
tmp8[1] = ROUND_ELT(sharedMemory, tmp7, 1, 0, 7, 6, 5, 4, 3, 2); tmp8[1] = ROUND_ELT(sharedMemory, tmp7, 1, 0, 7, 6, 5, 4, 3, 2);
@ -406,21 +408,19 @@ __global__ void precomputeX(int threads,uint64_t* d_xtra,uint64_t* d_tmp){
} }
__global__ __launch_bounds__(threadsPerBlock,2) __global__ __launch_bounds__(threadsPerBlock,2)
void whirlpoolx(uint32_t threads, uint32_t startNounce,uint32_t *resNounce){ void whirlpoolx_gpu_hash(uint32_t threads, uint32_t startNounce, uint32_t *resNounce)
{
__shared__ uint64_t sharedMemory[2048]; __shared__ uint64_t sharedMemory[2048];
getShared(sharedMemory); whirlpoolx_getShared(sharedMemory);
uint32_t thread = (blockDim.x * blockIdx.x + threadIdx.x); uint32_t thread = (blockDim.x * blockIdx.x + threadIdx.x);
if (thread < threads)
if (thread < threads){ {
uint64_t n[8]; uint64_t n[8];
uint64_t tmp[8]; uint64_t tmp[8];
uint32_t nounce = startNounce + thread; uint32_t nounce = startNounce + thread;
n[1] = xor1(REPLACE_HIWORD(c_PaddedMessage80[9], cuda_swab32(nounce)),c_xtra[0]); n[1] = xor1(REPLACE_HIWORD(c_PaddedMessage80[9], cuda_swab32(nounce)),c_xtra[0]);
uint32_t* n32 = (uint32_t*)&n[0]; uint32_t* n32 = (uint32_t*)&n[0];
@ -527,48 +527,57 @@ void whirlpoolx(uint32_t threads, uint32_t startNounce,uint32_t *resNounce){
tmp[6] = xor1(ROUND_ELT(sharedMemory, n, 6, 5, 4, 3, 2, 1, 0, 7), c_tmp[6+64]); tmp[6] = xor1(ROUND_ELT(sharedMemory, n, 6, 5, 4, 3, 2, 1, 0, 7), c_tmp[6+64]);
tmp[7] = xor1(ROUND_ELT(sharedMemory, n, 7, 6, 5, 4, 3, 2, 1, 0), c_tmp[7+64]); tmp[7] = xor1(ROUND_ELT(sharedMemory, n, 7, 6, 5, 4, 3, 2, 1, 0), c_tmp[7+64]);
if (xor3(c_xtra[1],ROUND_ELT(sharedMemory, tmp, 3, 2, 1, 0, 7, 6, 5, 4),ROUND_ELT(sharedMemory, tmp, 5, 4, 3, 2, 1, 0, 7, 6)) <= pTarget[3]) if (xor3(c_xtra[1], ROUND_ELT(sharedMemory, tmp, 3, 2, 1, 0, 7, 6, 5, 4), ROUND_ELT(sharedMemory, tmp, 5, 4, 3, 2, 1, 0, 7, 6)) <= pTarget[3]) {
atomicMin(&resNounce[0],nounce); atomicMin(&resNounce[0], nounce);
} // thread < threads }
}
} }
__host__ extern void whirlpoolx_cpu_init(int thr_id, int threads) __host__
extern void whirlpoolx_cpu_init(int thr_id, int threads)
{ {
cudaMemcpyToSymbol(InitVector_RC, plain_RC, sizeof(plain_RC), 0, cudaMemcpyHostToDevice); cudaMemcpyToSymbol(InitVector_RC, plain_RC, sizeof(plain_RC), 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(mixTob0Tox, plain_T0, sizeof(plain_T0), 0, cudaMemcpyHostToDevice); cudaMemcpyToSymbol(mixTob0Tox, plain_T0, sizeof(plain_T0), 0, cudaMemcpyHostToDevice);
cudaMalloc(&d_WXNonce[thr_id], sizeof(uint32_t)); cudaMalloc(&d_WXNonce[thr_id], sizeof(uint32_t));
cudaMallocHost(&d_wxnounce[thr_id], sizeof(uint32_t)); cudaMallocHost(&d_wxnounce[thr_id], sizeof(uint32_t));
cudaMalloc((void **)&d_xtra,8*sizeof(uint64_t)); cudaMalloc((void **)&d_xtra, 8 * sizeof(uint64_t));
cudaMalloc((void **)&d_tmp,8*9*sizeof(uint64_t)); CUDA_SAFE_CALL(cudaMalloc((void **)&d_tmp, 8 * 9 * sizeof(uint64_t)));
} }
__host__ void whirlpoolx_setBlock_80(void *pdata, const void *ptarget) __host__
void whirlpoolx_setBlock_80(void *pdata, const void *ptarget)
{ {
uint64_t PaddedMessage[16]; uint64_t PaddedMessage[16];
memcpy(PaddedMessage, pdata, 80); memcpy(PaddedMessage, pdata, 80);
memset((uint8_t*)&PaddedMessage+80, 0, 48); memset((uint8_t*)&PaddedMessage+80, 0, 48);
*(uint8_t*)(&PaddedMessage+80) = 0x80; /* ending */ *(uint8_t*)(&PaddedMessage+80) = 0x80; /* ending */
cudaMemcpyToSymbol(pTarget, ptarget, 4*sizeof(uint64_t), 0, cudaMemcpyHostToDevice); cudaMemcpyToSymbol(pTarget, ptarget, 4*sizeof(uint64_t), 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(c_PaddedMessage80, PaddedMessage, 16*sizeof(uint64_t), 0, cudaMemcpyHostToDevice); cudaMemcpyToSymbol(c_PaddedMessage80, PaddedMessage, 16 * sizeof(uint64_t), 0, cudaMemcpyHostToDevice);
} }
__host__ void whirlpoolx_precompute(){ __host__
void whirlpoolx_precompute()
{
dim3 grid(1); dim3 grid(1);
dim3 block(256); dim3 block(256);
precomputeX<<<grid, block>>>(8,&d_xtra[0],&d_tmp[0]); whirlpoolx_gpu_precompute <<<grid, block>>>(8, &d_xtra[0], &d_tmp[0]);
cudaThreadSynchronize(); cudaThreadSynchronize();
cudaMemcpyToSymbol(c_xtra,d_xtra,8*sizeof(uint64_t),0,cudaMemcpyDeviceToDevice); cudaMemcpyToSymbol(c_xtra, d_xtra, 8 * sizeof(uint64_t), 0, cudaMemcpyDeviceToDevice);
cudaMemcpyToSymbol(c_tmp,d_tmp,8*9*sizeof(uint64_t),0,cudaMemcpyDeviceToDevice); cudaMemcpyToSymbol(c_tmp, d_tmp, 8 * 9 * sizeof(uint64_t), 0, cudaMemcpyDeviceToDevice);
} }
__host__ extern uint32_t cpu_whirlpoolx(int thr_id, uint32_t threads, uint32_t startNounce)
__host__
uint32_t whirlpoolx_cpu_hash(int thr_id, uint32_t threads, uint32_t startNounce)
{ {
dim3 grid((threads + threadsPerBlock-1) / threadsPerBlock); dim3 grid((threads + threadsPerBlock-1) / threadsPerBlock);
dim3 block(threadsPerBlock); dim3 block(threadsPerBlock);
cudaMemset(d_WXNonce[thr_id], 0xff, sizeof(uint32_t)); cudaMemset(d_WXNonce[thr_id], 0xff, sizeof(uint32_t));
whirlpoolx<<<grid, block>>>(threads, startNounce,d_WXNonce[thr_id]); whirlpoolx_gpu_hash<<<grid, block>>>(threads, startNounce,d_WXNonce[thr_id]);
cudaThreadSynchronize(); cudaThreadSynchronize();
cudaMemcpy(d_wxnounce[thr_id], d_WXNonce[thr_id], sizeof(uint32_t), cudaMemcpyDeviceToHost); cudaMemcpy(d_wxnounce[thr_id], d_WXNonce[thr_id], sizeof(uint32_t), cudaMemcpyDeviceToHost);
return *d_wxnounce[thr_id]; return *d_wxnounce[thr_id];
} }

41
x15/whirlpoolx.cu

@ -2,8 +2,7 @@
* whirlpool routine (djm) * whirlpool routine (djm)
* whirlpoolx routine (provos alexis) * whirlpoolx routine (provos alexis)
*/ */
extern "C" extern "C" {
{
#include "sph/sph_whirlpool.h" #include "sph/sph_whirlpool.h"
#include "miner.h" #include "miner.h"
} }
@ -14,13 +13,12 @@ static uint32_t *d_hash[MAX_GPUS];
extern void whirlpoolx_cpu_init(int thr_id, int threads); extern void whirlpoolx_cpu_init(int thr_id, int threads);
extern void whirlpoolx_setBlock_80(void *pdata, const void *ptarget); extern void whirlpoolx_setBlock_80(void *pdata, const void *ptarget);
extern uint32_t cpu_whirlpoolx(int thr_id, uint32_t threads, uint32_t startNounce); extern uint32_t whirlpoolx_cpu_hash(int thr_id, uint32_t threads, uint32_t startNounce);
extern void whirlpoolx_precompute(); extern void whirlpoolx_precompute();
// CPU Hash function // CPU Hash function
extern "C" void whirlxHash(void *state, const void *input) extern "C" void whirlxHash(void *state, const void *input)
{ {
sph_whirlpool_context ctx_whirlpool; sph_whirlpool_context ctx_whirlpool;
unsigned char hash[64]; unsigned char hash[64];
@ -41,10 +39,13 @@ extern "C" void whirlxHash(void *state, const void *input)
static bool init[MAX_GPUS] = { 0 }; static bool init[MAX_GPUS] = { 0 };
extern "C" int scanhash_whirlpoolx(int thr_id, uint32_t *pdata,const uint32_t *ptarget, uint32_t max_nonce,unsigned long *hashes_done){ extern "C" int scanhash_whirlpoolx(int thr_id, uint32_t *pdata, const uint32_t *ptarget,
uint32_t max_nonce, unsigned long *hashes_done)
{
const uint32_t first_nonce = pdata[19]; const uint32_t first_nonce = pdata[19];
uint64_t n = first_nonce;
uint32_t endiandata[20]; uint32_t endiandata[20];
uint32_t throughput = pow(2,25); uint32_t throughput = device_intensity(thr_id, __func__, 1U << 22);
throughput = min(throughput, max_nonce - first_nonce); throughput = min(throughput, max_nonce - first_nonce);
if (opt_benchmark) if (opt_benchmark)
@ -52,28 +53,27 @@ extern "C" int scanhash_whirlpoolx(int thr_id, uint32_t *pdata,const uint32_t *p
if (!init[thr_id]) { if (!init[thr_id]) {
cudaSetDevice(device_map[thr_id]); cudaSetDevice(device_map[thr_id]);
// Konstanten kopieren, Speicher belegen
cudaMalloc(&d_hash[thr_id], 16 * sizeof(uint32_t) * throughput); cudaMalloc(&d_hash[thr_id], 64 * throughput);
whirlpoolx_cpu_init(thr_id, throughput); whirlpoolx_cpu_init(thr_id, throughput);
init[thr_id] = true; init[thr_id] = true;
} }
for (int k=0; k < 20; k++) { for (int k=0; k < 20; k++) {
be32enc(&endiandata[k], ((uint32_t*)pdata)[k]); be32enc(&endiandata[k], pdata[k]);
} }
whirlpoolx_setBlock_80((void*)endiandata, ptarget); whirlpoolx_setBlock_80((void*)endiandata, ptarget);
whirlpoolx_precompute(); whirlpoolx_precompute();
uint64_t n=pdata[19];
uint32_t foundNonce;
do { do {
if(n+throughput>=max_nonce){ uint32_t foundNonce = UINT32_MAX;
// applog(LOG_INFO, "GPU #%d: Preventing glitch.", thr_id); if((n+throughput) >= max_nonce) {
throughput=max_nonce-n; // Preventing glitch
throughput = (uint32_t) (max_nonce-n);
} }
foundNonce = cpu_whirlpoolx(thr_id, throughput, n); foundNonce = whirlpoolx_cpu_hash(thr_id, throughput, (uint32_t) n);
if (foundNonce != 0xffffffff) if (foundNonce != UINT32_MAX)
{ {
const uint32_t Htarg = ptarget[7]; const uint32_t Htarg = ptarget[7];
uint32_t vhash64[8]; uint32_t vhash64[8];
@ -82,12 +82,7 @@ extern "C" int scanhash_whirlpoolx(int thr_id, uint32_t *pdata,const uint32_t *p
if (vhash64[7] <= Htarg && fulltest(vhash64, ptarget)) { if (vhash64[7] <= Htarg && fulltest(vhash64, ptarget)) {
int res = 1; int res = 1;
// uint32_t secNonce = cuda_check_hash_suppl(thr_id, throughput, pdata[19], d_hash[thr_id], 1); *hashes_done = (unsigned long)(n - first_nonce + throughput);
*hashes_done = n - first_nonce + throughput;
/* if (secNonce != 0) {
pdata[21] = secNonce;
res++;
}*/
pdata[19] = foundNonce; pdata[19] = foundNonce;
return res; return res;
} }
@ -101,6 +96,6 @@ extern "C" int scanhash_whirlpoolx(int thr_id, uint32_t *pdata,const uint32_t *p
n += throughput; n += throughput;
} while (n < max_nonce && !work_restart[thr_id].restart); } while (n < max_nonce && !work_restart[thr_id].restart);
*hashes_done = n - first_nonce; *hashes_done = (unsigned long)(n - first_nonce);
return 0; return 0;
} }

Loading…
Cancel
Save