Browse Source

update c11 like tribus + 2.2.1 readme

pull/2/head
Tanguy Pruvot 7 years ago
parent
commit
ebf055d585
  1. 4
      README.txt
  2. 2
      compat/ccminer-config.h
  3. 1
      tribus/tribus.cu
  4. 46
      x11/c11.cu

4
README.txt

@ -1,5 +1,5 @@ @@ -1,5 +1,5 @@
ccminer 2.2 (August 2017) "Equihash, tribus and optimized skunk"
ccminer 2.2.1 (Sept. 2017) "optimized tribus kernel (Maxwell+)"
---------------------------------------------------------------
***************************************************************
@ -277,6 +277,8 @@ so we can more efficiently implement new algorithms using the latest hardware @@ -277,6 +277,8 @@ so we can more efficiently implement new algorithms using the latest hardware
features.
>>> RELEASE HISTORY <<<
Sep. 01st 2017 v2.2.1
Improve tribus algo on recent cards (up to +10%)
Aug. 13th 2017 v2.2
New skunk algo, using the heavy streebog algorithm

2
compat/ccminer-config.h

@ -164,7 +164,7 @@ @@ -164,7 +164,7 @@
#define PACKAGE_URL "http://github.com/tpruvot/ccminer"
/* Define to the version of this package. */
#define PACKAGE_VERSION "2.2"
#define PACKAGE_VERSION "2.2.1"
/* If using the C implementation of alloca, define if you know the
direction of stack growth for your system; otherwise it will be

1
tribus/tribus.cu

@ -146,6 +146,7 @@ extern "C" int scanhash_tribus(int thr_id, struct work *work, uint32_t max_nonce @@ -146,6 +146,7 @@ extern "C" int scanhash_tribus(int thr_id, struct work *work, uint32_t max_nonce
gpu_increment_reject(thr_id);
if (!opt_quiet)
gpulog(LOG_WARNING, thr_id, "result for %08x does not validate on CPU!", work->nonces[0]);
cudaMemset(d_resNonce[thr_id], 0xFF, 2 * sizeof(uint32_t));
pdata[19] = work->nonces[0] + 1;
continue;
}

46
x11/c11.cu

@ -18,10 +18,13 @@ extern "C" @@ -18,10 +18,13 @@ extern "C"
#include "cuda_helper.h"
#include "cuda_x11.h"
void tribus_echo512_final(int thr_id, uint32_t threads, uint32_t *d_hash, uint32_t *d_resNonce, const uint64_t target);
#include <stdio.h>
#include <memory.h>
static uint32_t *d_hash[MAX_GPUS];
static uint32_t *d_resNonce[MAX_GPUS];
// Flax/Chaincoin C11 CPU Hash
extern "C" void c11hash(void *output, const void *input)
@ -103,6 +106,7 @@ extern "C" void c11hash(void *output, const void *input) @@ -103,6 +106,7 @@ extern "C" void c11hash(void *output, const void *input)
#endif
static bool init[MAX_GPUS] = { 0 };
static bool use_compat_kernels[MAX_GPUS] = { 0 };
extern "C" int scanhash_c11(int thr_id, struct work* work, uint32_t max_nonce, unsigned long *hashes_done)
{
@ -118,7 +122,8 @@ extern "C" int scanhash_c11(int thr_id, struct work* work, uint32_t max_nonce, u @@ -118,7 +122,8 @@ extern "C" int scanhash_c11(int thr_id, struct work* work, uint32_t max_nonce, u
if (!init[thr_id])
{
cudaSetDevice(device_map[thr_id]);
int dev_id = device_map[thr_id];
cudaSetDevice(dev_id);
if (opt_cudaschedule == -1 && gpu_threads == 1) {
cudaDeviceReset();
// reduce cpu usage
@ -127,6 +132,9 @@ extern "C" int scanhash_c11(int thr_id, struct work* work, uint32_t max_nonce, u @@ -127,6 +132,9 @@ extern "C" int scanhash_c11(int thr_id, struct work* work, uint32_t max_nonce, u
}
gpulog(LOG_INFO, thr_id, "Intensity set to %g, %u cuda threads", throughput2intensity(throughput), throughput);
cuda_get_arch(thr_id);
use_compat_kernels[thr_id] = (cuda_arch[dev_id] < 500);
quark_blake512_cpu_init(thr_id, throughput);
quark_bmw512_cpu_init(thr_id, throughput);
quark_groestl512_cpu_init(thr_id, throughput);
@ -135,11 +143,13 @@ extern "C" int scanhash_c11(int thr_id, struct work* work, uint32_t max_nonce, u @@ -135,11 +143,13 @@ extern "C" int scanhash_c11(int thr_id, struct work* work, uint32_t max_nonce, u
quark_jh512_cpu_init(thr_id, throughput);
x11_luffaCubehash512_cpu_init(thr_id, throughput);
x11_shavite512_cpu_init(thr_id, throughput);
x11_echo512_cpu_init(thr_id, throughput);
if (use_compat_kernels[thr_id])
x11_echo512_cpu_init(thr_id, throughput);
if (x11_simd512_cpu_init(thr_id, throughput) != 0) {
return 0;
}
CUDA_CALL_OR_RET_X(cudaMalloc(&d_hash[thr_id], 64 * throughput), 0); // why 64 ?
CUDA_CALL_OR_RET_X(cudaMalloc(&d_hash[thr_id], 64 * throughput), 0);
CUDA_SAFE_CALL(cudaMalloc(&d_resNonce[thr_id], 2 * sizeof(uint32_t)));
cuda_check_cpu_init(thr_id, throughput);
@ -151,7 +161,10 @@ extern "C" int scanhash_c11(int thr_id, struct work* work, uint32_t max_nonce, u @@ -151,7 +161,10 @@ extern "C" int scanhash_c11(int thr_id, struct work* work, uint32_t max_nonce, u
be32enc(&endiandata[k], pdata[k]);
quark_blake512_cpu_setBlock_80(thr_id, endiandata);
cuda_check_cpu_setTarget(ptarget);
if (use_compat_kernels[thr_id])
cuda_check_cpu_setTarget(ptarget);
else
cudaMemset(d_resNonce[thr_id], 0xFF, 2 * sizeof(uint32_t));
do {
int order = 0;
@ -175,24 +188,32 @@ extern "C" int scanhash_c11(int thr_id, struct work* work, uint32_t max_nonce, u @@ -175,24 +188,32 @@ extern "C" int scanhash_c11(int thr_id, struct work* work, uint32_t max_nonce, u
TRACE("shavite:");
x11_simd512_cpu_hash_64(thr_id, throughput, pdata[19], NULL, d_hash[thr_id], order++);
TRACE("simd :");
x11_echo512_cpu_hash_64(thr_id, throughput, pdata[19], NULL, d_hash[thr_id], order++);
TRACE("echo => ");
if (use_compat_kernels[thr_id]) {
x11_echo512_cpu_hash_64(thr_id, throughput, pdata[19], NULL, d_hash[thr_id], order++);
work->nonces[0] = cuda_check_hash(thr_id, throughput, pdata[19], d_hash[thr_id]);
work->nonces[1] = UINT32_MAX;
} else {
tribus_echo512_final(thr_id, throughput, d_hash[thr_id], d_resNonce[thr_id], AS_U64(&ptarget[6]));
cudaMemcpy(&work->nonces[0], d_resNonce[thr_id], 2 * sizeof(uint32_t), cudaMemcpyDeviceToHost);
}
*hashes_done = pdata[19] - first_nonce + throughput;
work->nonces[0] = cuda_check_hash(thr_id, throughput, pdata[19], d_hash[thr_id]);
if (work->nonces[0] != UINT32_MAX)
{
const uint32_t Htarg = ptarget[7];
uint32_t _ALIGN(64) vhash[8];
const uint32_t Htarg = ptarget[7];
const uint32_t startNounce = pdata[19];
if (!use_compat_kernels[thr_id]) work->nonces[0] += startNounce;
be32enc(&endiandata[19], work->nonces[0]);
c11hash(vhash, endiandata);
if (vhash[7] <= Htarg && fulltest(vhash, ptarget)) {
work->valid_nonces = 1;
work_set_target_ratio(work, vhash);
work->nonces[1] = cuda_check_hash_suppl(thr_id, throughput, pdata[19], d_hash[thr_id], 1);
if (work->nonces[1] != 0) {
if (work->nonces[1] != UINT32_MAX) {
work->nonces[1] += startNounce;
be32enc(&endiandata[19], work->nonces[1]);
c11hash(vhash, endiandata);
bn_set_target_ratio(work, vhash, 1);
@ -206,7 +227,8 @@ extern "C" int scanhash_c11(int thr_id, struct work* work, uint32_t max_nonce, u @@ -206,7 +227,8 @@ extern "C" int scanhash_c11(int thr_id, struct work* work, uint32_t max_nonce, u
else if (vhash[7] > Htarg) {
gpu_increment_reject(thr_id);
if (!opt_quiet)
gpulog(LOG_WARNING, thr_id, "result for %08x does not validate on CPU!", work->nonces[0]);
gpulog(LOG_WARNING, thr_id, "result for %08x does not validate on CPU!", work->nonces[0]);
cudaMemset(d_resNonce[thr_id], 0xFF, 2 * sizeof(uint32_t));
pdata[19] = work->nonces[0] + 1;
continue;
}
@ -234,6 +256,8 @@ extern "C" void free_c11(int thr_id) @@ -234,6 +256,8 @@ extern "C" void free_c11(int thr_id)
cudaThreadSynchronize();
cudaFree(d_hash[thr_id]);
cudaFree(d_resNonce[thr_id]);
quark_blake512_cpu_free(thr_id);
quark_groestl512_cpu_free(thr_id);
x11_simd512_cpu_free(thr_id);

Loading…
Cancel
Save