Browse Source

Show intensity on init for all algos

2upstream
Tanguy Pruvot 8 years ago
parent
commit
34e97bf3e6
  1. 1
      Algo256/blake256.cu
  2. 6
      Algo256/bmw.cu
  3. 1
      Algo256/decred.cu
  4. 10
      Algo256/keccak256.cu
  5. 12
      Algo256/vanilla.cu
  6. 1
      JHA/jackpotcoin.cu
  7. 2
      ccminer.cpp
  8. 2
      configure.ac
  9. 7
      fuguecoin.cpp
  10. 2
      groestlcoin.cpp
  11. 1
      heavy/heavy.cu
  12. 43
      lyra2/lyra2RE.cu
  13. 1
      lyra2/lyra2REv2.cu
  14. 2
      myriadgroestl.cpp
  15. 1
      neoscrypt/neoscrypt.cpp
  16. 1
      pentablake.cu
  17. 1
      quark/quarkcoin.cu
  18. 3
      qubit/deep.cu
  19. 1
      qubit/luffa.cu
  20. 1
      qubit/qubit.cu
  21. 3
      scrypt-jane.cpp
  22. 3
      scrypt.cpp
  23. 1
      sia.cu
  24. 1
      skein.cu
  25. 1
      skein2.cpp
  26. 1
      x11/c11.cu
  27. 6
      x11/fresh.cu
  28. 1
      x11/s3.cu
  29. 1
      x11/sib.cu
  30. 1
      x11/x11.cu
  31. 1
      x11/x11evo.cu
  32. 1
      x13/x13.cu
  33. 2
      x15/whirlpool.cu
  34. 1
      x15/whirlpoolx.cu
  35. 1
      x15/x14.cu
  36. 1
      x15/x15.cu
  37. 1
      x17/x17.cu
  38. 1
      zr5.cu

1
Algo256/blake256.cu

@ -504,6 +504,7 @@ extern "C" int scanhash_blake256(int thr_id, struct work* work, uint32_t max_non @@ -504,6 +504,7 @@ extern "C" int scanhash_blake256(int thr_id, struct work* work, uint32_t max_non
cudaDeviceSetCacheConfig(cudaFuncCachePreferL1);
CUDA_LOG_ERROR();
}
gpulog(LOG_INFO, thr_id, "Intensity set to %g, %u cuda threads", throughput2intensity(throughput), throughput);
CUDA_CALL_OR_RET_X(cudaMalloc(&d_resNonce[thr_id], NBN * sizeof(uint32_t)), -1);
CUDA_CALL_OR_RET_X(cudaMallocHost(&h_resNonce[thr_id], NBN * sizeof(uint32_t)), -1);

6
Algo256/bmw.cu

@ -52,6 +52,12 @@ extern "C" int scanhash_bmw(int thr_id, struct work* work, uint32_t max_nonce, u @@ -52,6 +52,12 @@ extern "C" int scanhash_bmw(int thr_id, struct work* work, uint32_t max_nonce, u
if (!init[thr_id]) {
cudaSetDevice(device_map[thr_id]);
if (opt_cudaschedule == -1 && gpu_threads == 1) {
cudaDeviceReset();
// reduce cpu usage
cudaSetDeviceFlags(cudaDeviceScheduleBlockingSync);
}
gpulog(LOG_INFO, thr_id, "Intensity set to %g, %u cuda threads", throughput2intensity(throughput), throughput);
cuda_check_cpu_init(thr_id, throughput);
bmw256_midstate_init(thr_id, throughput);

1
Algo256/decred.cu

@ -376,6 +376,7 @@ extern "C" int scanhash_decred(int thr_id, struct work* work, uint32_t max_nonce @@ -376,6 +376,7 @@ extern "C" int scanhash_decred(int thr_id, struct work* work, uint32_t max_nonce
cudaDeviceSetCacheConfig(cudaFuncCachePreferL1);
CUDA_LOG_ERROR();
}
gpulog(LOG_INFO, thr_id, "Intensity set to %g, %u cuda threads", throughput2intensity(throughput), throughput);
CUDA_CALL_OR_RET_X(cudaMalloc(&d_resNonce[thr_id], maxResults*sizeof(uint32_t)), -1);
CUDA_CALL_OR_RET_X(cudaMallocHost(&h_resNonce[thr_id], maxResults*sizeof(uint32_t)), -1);

10
Algo256/keccak256.cu

@ -48,8 +48,16 @@ extern "C" int scanhash_keccak256(int thr_id, struct work* work, uint32_t max_no @@ -48,8 +48,16 @@ extern "C" int scanhash_keccak256(int thr_id, struct work* work, uint32_t max_no
if (opt_benchmark)
ptarget[7] = 0x000f;
if (!init[thr_id]) {
if (!init[thr_id])
{
cudaSetDevice(device_map[thr_id]);
if (opt_cudaschedule == -1 && gpu_threads == 1) {
cudaDeviceReset();
// reduce cpu usage
cudaSetDeviceFlags(cudaDeviceScheduleBlockingSync);
CUDA_LOG_ERROR();
}
gpulog(LOG_INFO, thr_id, "Intensity set to %g, %u cuda threads", throughput2intensity(throughput), throughput);
CUDA_SAFE_CALL(cudaMalloc(&d_hash[thr_id], throughput * 64));
keccak256_cpu_init(thr_id, throughput);

12
Algo256/vanilla.cu

@ -378,6 +378,11 @@ extern "C" int scanhash_vanilla(int thr_id, struct work* work, uint32_t max_nonc @@ -378,6 +378,11 @@ extern "C" int scanhash_vanilla(int thr_id, struct work* work, uint32_t max_nonc
const uint32_t targetHigh = ptarget[6];
int dev_id = device_map[thr_id];
int intensity = (device_sm[dev_id] > 500 && !is_windows()) ? 30 : 24;
if (device_sm[dev_id] < 350) intensity = 22;
uint32_t throughput = cuda_default_throughput(thr_id, 1U << intensity);
if (init[thr_id]) throughput = min(throughput, max_nonce - first_nonce);
if (!init[thr_id]) {
cudaSetDevice(dev_id);
if (opt_cudaschedule == -1 && gpu_threads == 1) {
@ -387,6 +392,8 @@ extern "C" int scanhash_vanilla(int thr_id, struct work* work, uint32_t max_nonc @@ -387,6 +392,8 @@ extern "C" int scanhash_vanilla(int thr_id, struct work* work, uint32_t max_nonc
cudaDeviceSetCacheConfig(cudaFuncCachePreferL1);
CUDA_LOG_ERROR();
}
gpulog(LOG_INFO, thr_id, "Intensity set to %g, %u cuda threads", throughput2intensity(throughput), throughput);
CUDA_CALL_OR_RET_X(cudaMalloc(&d_resNonce[thr_id], NBN * sizeof(uint32_t)), -1);
CUDA_CALL_OR_RET_X(cudaMallocHost(&h_resNonce[thr_id], NBN * sizeof(uint32_t)), -1);
cudaStreamCreate(&streams[thr_id]);
@ -402,11 +409,6 @@ extern "C" int scanhash_vanilla(int thr_id, struct work* work, uint32_t max_nonc @@ -402,11 +409,6 @@ extern "C" int scanhash_vanilla(int thr_id, struct work* work, uint32_t max_nonc
vanilla_cpu_setBlock_16(thr_id,endiandata,&pdata[16]);
int intensity = (device_sm[dev_id] > 500 && !is_windows()) ? 30 : 24;
if (device_sm[dev_id] < 350) intensity = 22;
uint32_t throughput = cuda_default_throughput(thr_id, 1U << intensity);
if (init[thr_id]) throughput = min(throughput, max_nonce - first_nonce);
const dim3 grid((throughput + (NPT*TPB)-1)/(NPT*TPB));
const dim3 block(TPB);
int rc = 0;

1
JHA/jackpotcoin.cu

@ -105,6 +105,7 @@ extern "C" int scanhash_jackpot(int thr_id, struct work *work, uint32_t max_nonc @@ -105,6 +105,7 @@ extern "C" int scanhash_jackpot(int thr_id, struct work *work, uint32_t max_nonc
gpulog(LOG_ERR, thr_id, "Sorry, This algo is not supported by this GPU arch (SM 3.0 required)");
proper_exit(EXIT_CODE_CUDA_ERROR);
}
gpulog(LOG_INFO, thr_id, "Intensity set to %g, %u cuda threads", throughput2intensity(throughput), throughput);
CUDA_SAFE_CALL(cudaMalloc(&d_hash[thr_id], (size_t) 64 * throughput));

2
ccminer.cpp

@ -2981,8 +2981,6 @@ void parse_arg(int key, char *arg) @@ -2981,8 +2981,6 @@ void parse_arg(int key, char *arg)
}
else if (gpus_intensity[n] != (1 << v)) {
gpus_intensity[n] = (1 << v);
applog(LOG_INFO, "Intensity set to %u, %u cuda threads",
v, gpus_intensity[n]);
}
}
last = gpus_intensity[n];

2
configure.ac

@ -1,4 +1,4 @@ @@ -1,4 +1,4 @@
AC_INIT([ccminer], [1.8.2], [], [ccminer], [http://github.com/tpruvot/ccminer])
AC_INIT([ccminer], [1.8.3], [], [ccminer], [http://github.com/tpruvot/ccminer])
AC_PREREQ([2.59c])
AC_CANONICAL_SYSTEM

7
fuguecoin.cpp

@ -40,6 +40,13 @@ int scanhash_fugue256(int thr_id, struct work* work, uint32_t max_nonce, unsigne @@ -40,6 +40,13 @@ int scanhash_fugue256(int thr_id, struct work* work, uint32_t max_nonce, unsigne
if(!init[thr_id])
{
cudaSetDevice(device_map[thr_id]);
if (opt_cudaschedule == -1 && gpu_threads == 1) {
cudaDeviceReset();
// reduce cpu usage
cudaSetDeviceFlags(cudaDeviceScheduleBlockingSync);
CUDA_LOG_ERROR();
}
gpulog(LOG_INFO, thr_id, "Intensity set to %g, %u cuda threads", throughput2intensity(throughput), throughput);
fugue256_cpu_init(thr_id, throughput);
init[thr_id] = true;

2
groestlcoin.cpp

@ -48,6 +48,8 @@ int scanhash_groestlcoin(int thr_id, struct work *work, uint32_t max_nonce, unsi @@ -48,6 +48,8 @@ int scanhash_groestlcoin(int thr_id, struct work *work, uint32_t max_nonce, unsi
cudaSetDeviceFlags(cudaDeviceScheduleBlockingSync);
CUDA_LOG_ERROR();
}
gpulog(LOG_INFO, thr_id, "Intensity set to %g, %u cuda threads", throughput2intensity(throughput), throughput);
CUDA_LOG_ERROR();
groestlcoin_cpu_init(thr_id, throughput);
init[thr_id] = true;

1
heavy/heavy.cu

@ -178,6 +178,7 @@ int scanhash_heavy(int thr_id, struct work *work, uint32_t max_nonce, unsigned l @@ -178,6 +178,7 @@ int scanhash_heavy(int thr_id, struct work *work, uint32_t max_nonce, unsigned l
cudaSetDeviceFlags(cudaDeviceScheduleBlockingSync);
CUDA_LOG_ERROR();
}
gpulog(LOG_INFO, thr_id, "Intensity set to %g, %u cuda threads", throughput2intensity(throughput), throughput);
hefty_cpu_init(thr_id, throughput);
sha256_cpu_init(thr_id, throughput);

43
lyra2/lyra2RE.cu

@ -79,7 +79,7 @@ extern "C" void lyra2re_hash(void *state, const void *input) @@ -79,7 +79,7 @@ extern "C" void lyra2re_hash(void *state, const void *input)
}
static bool init[MAX_GPUS] = { 0 };
static uint32_t throughput[MAX_GPUS] = { 0 };
static __thread uint32_t throughput = 0;
extern "C" int scanhash_lyra2(int thr_id, struct work* work, uint32_t max_nonce, unsigned long *hashes_done)
{
@ -99,9 +99,8 @@ extern "C" int scanhash_lyra2(int thr_id, struct work* work, uint32_t max_nonce, @@ -99,9 +99,8 @@ extern "C" int scanhash_lyra2(int thr_id, struct work* work, uint32_t max_nonce,
int intensity = (device_sm[dev_id] >= 500 && !is_windows()) ? 17 : 16;
if (device_sm[device_map[thr_id]] == 500) intensity = 15;
int temp = intensity;
throughput[thr_id] = cuda_default_throughput(thr_id, 1U << intensity); // 18=256*256*4;
if (init[thr_id]) throughput[thr_id] = min(throughput[thr_id], max_nonce - first_nonce);
throughput = cuda_default_throughput(thr_id, 1U << intensity); // 18=256*256*4;
if (init[thr_id]) throughput = min(throughput, max_nonce - first_nonce);
cudaDeviceProp props;
cudaGetDeviceProperties(&props, dev_id);
@ -109,25 +108,23 @@ extern "C" int scanhash_lyra2(int thr_id, struct work* work, uint32_t max_nonce, @@ -109,25 +108,23 @@ extern "C" int scanhash_lyra2(int thr_id, struct work* work, uint32_t max_nonce,
if (strstr(props.name, "750 Ti")) gtx750ti = true;
else gtx750ti = false;
blake256_cpu_init(thr_id, throughput[thr_id]);
keccak256_cpu_init(thr_id, throughput[thr_id]);
skein256_cpu_init(thr_id, throughput[thr_id]);
groestl256_cpu_init(thr_id, throughput[thr_id]);
gpulog(LOG_INFO, thr_id, "Intensity set to %g, %u cuda threads", throughput2intensity(throughput), throughput);
blake256_cpu_init(thr_id, throughput);
keccak256_cpu_init(thr_id, throughput);
skein256_cpu_init(thr_id, throughput);
groestl256_cpu_init(thr_id, throughput);
if (device_sm[dev_id] >= 500)
{
size_t matrix_sz = device_sm[dev_id] > 500 ? sizeof(uint64_t) * 4 * 4 : sizeof(uint64_t) * 8 * 8 * 3 * 4;
CUDA_SAFE_CALL(cudaMalloc(&d_matrix[thr_id], matrix_sz * throughput[thr_id]));
lyra2_cpu_init(thr_id, throughput[thr_id], d_matrix[thr_id]);
CUDA_SAFE_CALL(cudaMalloc(&d_matrix[thr_id], matrix_sz * throughput));
lyra2_cpu_init(thr_id, throughput, d_matrix[thr_id]);
}
CUDA_SAFE_CALL(cudaMalloc(&d_hash[thr_id], (size_t)32 * throughput[thr_id]));
CUDA_SAFE_CALL(cudaMalloc(&d_hash[thr_id], (size_t)32 * throughput));
init[thr_id] = true;
if (temp != intensity){
gpulog(LOG_INFO, thr_id, "Intensity set to %u, %u cuda threads",
intensity, throughput[thr_id]);
}
}
uint32_t _ALIGN(128) endiandata[20];
@ -141,15 +138,15 @@ extern "C" int scanhash_lyra2(int thr_id, struct work* work, uint32_t max_nonce, @@ -141,15 +138,15 @@ extern "C" int scanhash_lyra2(int thr_id, struct work* work, uint32_t max_nonce,
int order = 0;
uint32_t foundNonce;
blake256_cpu_hash_80(thr_id, throughput[thr_id], pdata[19], d_hash[thr_id], order++);
keccak256_cpu_hash_32(thr_id, throughput[thr_id], pdata[19], d_hash[thr_id], order++);
lyra2_cpu_hash_32(thr_id, throughput[thr_id], pdata[19], d_hash[thr_id], gtx750ti);
skein256_cpu_hash_32(thr_id, throughput[thr_id], pdata[19], d_hash[thr_id], order++);
blake256_cpu_hash_80(thr_id, throughput, pdata[19], d_hash[thr_id], order++);
keccak256_cpu_hash_32(thr_id, throughput, pdata[19], d_hash[thr_id], order++);
lyra2_cpu_hash_32(thr_id, throughput, pdata[19], d_hash[thr_id], gtx750ti);
skein256_cpu_hash_32(thr_id, throughput, pdata[19], d_hash[thr_id], order++);
TRACE("S")
*hashes_done = pdata[19] - first_nonce + throughput[thr_id];
*hashes_done = pdata[19] - first_nonce + throughput;
foundNonce = groestl256_cpu_hash_32(thr_id, throughput[thr_id], pdata[19], d_hash[thr_id], order++);
foundNonce = groestl256_cpu_hash_32(thr_id, throughput, pdata[19], d_hash[thr_id], order++);
if (foundNonce != UINT32_MAX)
{
uint32_t _ALIGN(64) vhash64[8];
@ -181,11 +178,11 @@ extern "C" int scanhash_lyra2(int thr_id, struct work* work, uint32_t max_nonce, @@ -181,11 +178,11 @@ extern "C" int scanhash_lyra2(int thr_id, struct work* work, uint32_t max_nonce,
}
}
if ((uint64_t)throughput[thr_id] + pdata[19] >= max_nonce) {
if ((uint64_t)throughput + pdata[19] >= max_nonce) {
pdata[19] = max_nonce;
break;
}
pdata[19] += throughput[thr_id];
pdata[19] += throughput;
} while (!work_restart[thr_id].restart);

1
lyra2/lyra2REv2.cu

@ -113,6 +113,7 @@ extern "C" int scanhash_lyra2v2(int thr_id, struct work* work, uint32_t max_nonc @@ -113,6 +113,7 @@ extern "C" int scanhash_lyra2v2(int thr_id, struct work* work, uint32_t max_nonc
cudaSetDeviceFlags(cudaDeviceScheduleBlockingSync);
CUDA_LOG_ERROR();
}
gpulog(LOG_INFO, thr_id, "Intensity set to %g, %u cuda threads", throughput2intensity(throughput), throughput);
blake256_cpu_init(thr_id, throughput);
keccak256_cpu_init(thr_id,throughput);

2
myriadgroestl.cpp

@ -55,6 +55,8 @@ int scanhash_myriad(int thr_id, struct work *work, uint32_t max_nonce, unsigned @@ -55,6 +55,8 @@ int scanhash_myriad(int thr_id, struct work *work, uint32_t max_nonce, unsigned
cudaSetDeviceFlags(cudaDeviceScheduleBlockingSync);
CUDA_LOG_ERROR();
}
gpulog(LOG_INFO, thr_id, "Intensity set to %g, %u cuda threads", throughput2intensity(throughput), throughput);
myriadgroestl_cpu_init(thr_id, throughput);
init[thr_id] = true;
}

1
neoscrypt/neoscrypt.cpp

@ -40,6 +40,7 @@ int scanhash_neoscrypt(int thr_id, struct work* work, uint32_t max_nonce, unsign @@ -40,6 +40,7 @@ int scanhash_neoscrypt(int thr_id, struct work* work, uint32_t max_nonce, unsign
cudaSetDeviceFlags(cudaDeviceScheduleBlockingSync);
cudaGetLastError(); // reset errors if device is not "reset"
}
gpulog(LOG_INFO, thr_id, "Intensity set to %g (+5), %u cuda threads", throughput2intensity(throughput), throughput);
if (device_sm[dev_id] <= 300) {
gpulog(LOG_ERR, thr_id, "Sorry neoscrypt is not supported on SM 3.0 devices");

1
pentablake.cu

@ -69,6 +69,7 @@ extern "C" int scanhash_pentablake(int thr_id, struct work *work, uint32_t max_n @@ -69,6 +69,7 @@ extern "C" int scanhash_pentablake(int thr_id, struct work *work, uint32_t max_n
cudaSetDeviceFlags(cudaDeviceScheduleBlockingSync);
CUDA_LOG_ERROR();
}
gpulog(LOG_INFO, thr_id, "Intensity set to %g, %u cuda threads", throughput2intensity(throughput), throughput);
CUDA_SAFE_CALL(cudaMalloc(&d_hash[thr_id], (size_t) 64 * throughput));

1
quark/quarkcoin.cu

@ -145,6 +145,7 @@ extern "C" int scanhash_quark(int thr_id, struct work* work, uint32_t max_nonce, @@ -145,6 +145,7 @@ extern "C" int scanhash_quark(int thr_id, struct work* work, uint32_t max_nonce,
cudaSetDeviceFlags(cudaDeviceScheduleBlockingSync);
CUDA_LOG_ERROR();
}
gpulog(LOG_INFO, thr_id, "Intensity set to %g, %u cuda threads", throughput2intensity(throughput), throughput);
cudaGetLastError();
CUDA_SAFE_CALL(cudaMalloc(&d_hash[thr_id], (size_t) 64 * throughput));

3
qubit/deep.cu

@ -66,8 +66,9 @@ extern "C" int scanhash_deep(int thr_id, struct work* work, uint32_t max_nonce, @@ -66,8 +66,9 @@ extern "C" int scanhash_deep(int thr_id, struct work* work, uint32_t max_nonce,
cudaDeviceReset();
// reduce cpu usage
cudaSetDeviceFlags(cudaDeviceScheduleBlockingSync);
}
CUDA_LOG_ERROR();
}
gpulog(LOG_INFO, thr_id, "Intensity set to %g, %u cuda threads", throughput2intensity(throughput), throughput);
CUDA_SAFE_CALL(cudaMalloc(&d_hash[thr_id], (size_t) 64 * throughput));

1
qubit/luffa.cu

@ -51,6 +51,7 @@ extern "C" int scanhash_luffa(int thr_id, struct work* work, uint32_t max_nonce, @@ -51,6 +51,7 @@ extern "C" int scanhash_luffa(int thr_id, struct work* work, uint32_t max_nonce,
cudaSetDeviceFlags(cudaDeviceScheduleBlockingSync);
CUDA_LOG_ERROR();
}
gpulog(LOG_INFO, thr_id, "Intensity set to %g, %u cuda threads", throughput2intensity(throughput), throughput);
CUDA_SAFE_CALL(cudaMalloc(&d_hash[thr_id], (size_t) 64 * throughput));

1
qubit/qubit.cu

@ -79,6 +79,7 @@ extern "C" int scanhash_qubit(int thr_id, struct work* work, uint32_t max_nonce, @@ -79,6 +79,7 @@ extern "C" int scanhash_qubit(int thr_id, struct work* work, uint32_t max_nonce,
cudaSetDeviceFlags(cudaDeviceScheduleBlockingSync);
CUDA_LOG_ERROR();
}
gpulog(LOG_INFO, thr_id, "Intensity set to %g, %u cuda threads", throughput2intensity(throughput), throughput);
qubit_luffa512_cpu_init(thr_id, throughput);
x11_cubehash512_cpu_init(thr_id, throughput);

3
scrypt-jane.cpp

@ -489,8 +489,9 @@ int scanhash_scrypt_jane(int thr_id, struct work *work, uint32_t max_nonce, unsi @@ -489,8 +489,9 @@ int scanhash_scrypt_jane(int thr_id, struct work *work, uint32_t max_nonce, unsi
cudaDeviceSynchronize();
cudaDeviceReset();
cudaSetDevice(dev_id);
throughput = cuda_throughput(thr_id);
applog(LOG_INFO, "GPU #%d: cuda throughput is %d", dev_id, throughput);
gpulog(LOG_INFO, thr_id, "Intensity set to %g, %u cuda threads", throughput2intensity(throughput), throughput);
init[thr_id] = true;
}

3
scrypt.cpp

@ -721,8 +721,9 @@ int scanhash_scrypt(int thr_id, struct work *work, uint32_t max_nonce, unsigned @@ -721,8 +721,9 @@ int scanhash_scrypt(int thr_id, struct work *work, uint32_t max_nonce, unsigned
cudaDeviceSynchronize();
cudaDeviceReset();
cudaSetDevice(dev_id);
throughput = cuda_throughput(thr_id);
applog(LOG_INFO, "GPU #%d: cuda throughput is %d", dev_id, throughput);
gpulog(LOG_INFO, thr_id, "Intensity set to %g, %u cuda threads", throughput2intensity(throughput), throughput);
init[thr_id] = true;
}

1
sia.cu

@ -214,6 +214,7 @@ int scanhash_sia(int thr_id, struct work *work, uint32_t max_nonce, unsigned lon @@ -214,6 +214,7 @@ int scanhash_sia(int thr_id, struct work *work, uint32_t max_nonce, unsigned lon
//cudaDeviceSetCacheConfig(cudaFuncCachePreferL1);
CUDA_LOG_ERROR();
}
gpulog(LOG_INFO, thr_id, "Intensity set to %g, %u cuda threads", throughput2intensity(throughput), throughput);
CUDA_CALL_OR_RET_X(cudaMalloc(&d_resNonces[thr_id], NBN * sizeof(uint32_t)), -1);
init[thr_id] = true;

1
skein.cu

@ -378,6 +378,7 @@ extern "C" int scanhash_skeincoin(int thr_id, struct work* work, uint32_t max_no @@ -378,6 +378,7 @@ extern "C" int scanhash_skeincoin(int thr_id, struct work* work, uint32_t max_no
cudaSetDeviceFlags(cudaDeviceScheduleBlockingSync);
CUDA_LOG_ERROR();
}
gpulog(LOG_INFO, thr_id, "Intensity set to %g, %u cuda threads", throughput2intensity(throughput), throughput);
if (sm5) {
skeincoin_init(thr_id);

1
skein2.cpp

@ -62,6 +62,7 @@ int scanhash_skein2(int thr_id, struct work* work, uint32_t max_nonce, unsigned @@ -62,6 +62,7 @@ int scanhash_skein2(int thr_id, struct work* work, uint32_t max_nonce, unsigned
cudaSetDeviceFlags(cudaDeviceScheduleBlockingSync);
CUDA_LOG_ERROR();
}
gpulog(LOG_INFO, thr_id, "Intensity set to %g, %u cuda threads", throughput2intensity(throughput), throughput);
cudaMalloc(&d_hash[thr_id], (size_t) 64 * throughput);

1
x11/c11.cu

@ -125,6 +125,7 @@ extern "C" int scanhash_c11(int thr_id, struct work* work, uint32_t max_nonce, u @@ -125,6 +125,7 @@ extern "C" int scanhash_c11(int thr_id, struct work* work, uint32_t max_nonce, u
cudaSetDeviceFlags(cudaDeviceScheduleBlockingSync);
CUDA_LOG_ERROR();
}
gpulog(LOG_INFO, thr_id, "Intensity set to %g, %u cuda threads", throughput2intensity(throughput), throughput);
quark_blake512_cpu_init(thr_id, throughput);
quark_bmw512_cpu_init(thr_id, throughput);

6
x11/fresh.cu

@ -83,7 +83,13 @@ extern "C" int scanhash_fresh(int thr_id, struct work* work, uint32_t max_nonce, @@ -83,7 +83,13 @@ extern "C" int scanhash_fresh(int thr_id, struct work* work, uint32_t max_nonce,
if (!init[thr_id])
{
cudaSetDevice(device_map[thr_id]);
if (opt_cudaschedule == -1 && gpu_threads == 1) {
cudaDeviceReset();
// reduce cpu usage
cudaSetDeviceFlags(cudaDeviceScheduleBlockingSync);
CUDA_LOG_ERROR();
}
gpulog(LOG_INFO, thr_id, "Intensity set to %g, %u cuda threads", throughput2intensity(throughput), throughput);
CUDA_CALL_OR_RET_X(cudaMalloc(&d_hash[thr_id], (size_t)64 * throughput + 4), -1);

1
x11/s3.cu

@ -86,6 +86,7 @@ extern "C" int scanhash_s3(int thr_id, struct work* work, uint32_t max_nonce, un @@ -86,6 +86,7 @@ extern "C" int scanhash_s3(int thr_id, struct work* work, uint32_t max_nonce, un
cudaSetDeviceFlags(cudaDeviceScheduleBlockingSync);
CUDA_LOG_ERROR();
}
gpulog(LOG_INFO, thr_id, "Intensity set to %g, %u cuda threads", throughput2intensity(throughput), throughput);
CUDA_SAFE_CALL(cudaMalloc(&d_hash[thr_id], (size_t) 64 * throughput));

1
x11/sib.cu

@ -120,6 +120,7 @@ extern "C" int scanhash_sib(int thr_id, struct work* work, uint32_t max_nonce, u @@ -120,6 +120,7 @@ extern "C" int scanhash_sib(int thr_id, struct work* work, uint32_t max_nonce, u
cudaSetDeviceFlags(cudaDeviceScheduleBlockingSync);
CUDA_LOG_ERROR();
}
gpulog(LOG_INFO, thr_id, "Intensity set to %g, %u cuda threads", throughput2intensity(throughput), throughput);
quark_blake512_cpu_init(thr_id, throughput);
quark_bmw512_cpu_init(thr_id, throughput);

1
x11/x11.cu

@ -114,6 +114,7 @@ extern "C" int scanhash_x11(int thr_id, struct work* work, uint32_t max_nonce, u @@ -114,6 +114,7 @@ extern "C" int scanhash_x11(int thr_id, struct work* work, uint32_t max_nonce, u
cudaSetDeviceFlags(cudaDeviceScheduleBlockingSync);
CUDA_LOG_ERROR();
}
gpulog(LOG_INFO, thr_id, "Intensity set to %g, %u cuda threads", throughput2intensity(throughput), throughput);
quark_blake512_cpu_init(thr_id, throughput);
quark_bmw512_cpu_init(thr_id, throughput);

1
x11/x11evo.cu

@ -257,6 +257,7 @@ extern "C" int scanhash_x11evo(int thr_id, struct work* work, uint32_t max_nonce @@ -257,6 +257,7 @@ extern "C" int scanhash_x11evo(int thr_id, struct work* work, uint32_t max_nonce
cudaSetDeviceFlags(cudaDeviceScheduleBlockingSync);
CUDA_LOG_ERROR();
}
gpulog(LOG_INFO, thr_id, "Intensity set to %g, %u cuda threads", throughput2intensity(throughput), throughput);
quark_blake512_cpu_init(thr_id, throughput);
quark_bmw512_cpu_init(thr_id, throughput);

1
x13/x13.cu

@ -133,6 +133,7 @@ extern "C" int scanhash_x13(int thr_id, struct work* work, uint32_t max_nonce, u @@ -133,6 +133,7 @@ extern "C" int scanhash_x13(int thr_id, struct work* work, uint32_t max_nonce, u
cudaSetDeviceFlags(cudaDeviceScheduleBlockingSync);
CUDA_LOG_ERROR();
}
gpulog(LOG_INFO, thr_id, "Intensity set to %g, %u cuda threads", throughput2intensity(throughput), throughput);
quark_blake512_cpu_init(thr_id, throughput);
quark_groestl512_cpu_init(thr_id, throughput);

2
x15/whirlpool.cu

@ -86,6 +86,8 @@ extern "C" int scanhash_whirl(int thr_id, struct work* work, uint32_t max_nonce, @@ -86,6 +86,8 @@ extern "C" int scanhash_whirl(int thr_id, struct work* work, uint32_t max_nonce,
cudaSetDeviceFlags(cudaDeviceScheduleBlockingSync);
CUDA_LOG_ERROR();
}
gpulog(LOG_INFO, thr_id, "Intensity set to %g, %u cuda threads", throughput2intensity(throughput), throughput);
CUDA_SAFE_CALL(cudaMalloc(&d_hash[thr_id], (size_t) 64 * throughput));
x15_whirlpool_cpu_init(thr_id, throughput, 1 /* old whirlpool */);

1
x15/whirlpoolx.cu

@ -59,6 +59,7 @@ extern "C" int scanhash_whirlx(int thr_id, struct work* work, uint32_t max_nonc @@ -59,6 +59,7 @@ extern "C" int scanhash_whirlx(int thr_id, struct work* work, uint32_t max_nonc
cudaSetDeviceFlags(cudaDeviceScheduleBlockingSync);
CUDA_LOG_ERROR();
}
gpulog(LOG_INFO, thr_id, "Intensity set to %g, %u cuda threads", throughput2intensity(throughput), throughput);
CUDA_CALL_OR_RET_X(cudaMalloc(&d_hash[thr_id], (size_t) 64 * throughput), -1);

1
x15/x14.cu

@ -147,6 +147,7 @@ extern "C" int scanhash_x14(int thr_id, struct work* work, uint32_t max_nonce, @@ -147,6 +147,7 @@ extern "C" int scanhash_x14(int thr_id, struct work* work, uint32_t max_nonce,
cudaSetDeviceFlags(cudaDeviceScheduleBlockingSync);
CUDA_LOG_ERROR();
}
gpulog(LOG_INFO, thr_id, "Intensity set to %g, %u cuda threads", throughput2intensity(throughput), throughput);
quark_blake512_cpu_init(thr_id, throughput);
quark_groestl512_cpu_init(thr_id, throughput);

1
x15/x15.cu

@ -157,6 +157,7 @@ extern "C" int scanhash_x15(int thr_id, struct work* work, uint32_t max_nonce, @@ -157,6 +157,7 @@ extern "C" int scanhash_x15(int thr_id, struct work* work, uint32_t max_nonce,
cudaSetDeviceFlags(cudaDeviceScheduleBlockingSync);
CUDA_LOG_ERROR();
}
gpulog(LOG_INFO, thr_id, "Intensity set to %g, %u cuda threads", throughput2intensity(throughput), throughput);
quark_blake512_cpu_init(thr_id, throughput);
quark_groestl512_cpu_init(thr_id, throughput);

1
x17/x17.cu

@ -172,6 +172,7 @@ extern "C" int scanhash_x17(int thr_id, struct work* work, uint32_t max_nonce, u @@ -172,6 +172,7 @@ extern "C" int scanhash_x17(int thr_id, struct work* work, uint32_t max_nonce, u
// reduce cpu usage
cudaSetDeviceFlags(cudaDeviceScheduleBlockingSync);
}
gpulog(LOG_INFO, thr_id, "Intensity set to %g, %u cuda threads", throughput2intensity(throughput), throughput);
quark_blake512_cpu_init(thr_id, throughput);
quark_groestl512_cpu_init(thr_id, throughput);

1
zr5.cu

@ -357,6 +357,7 @@ extern "C" int scanhash_zr5(int thr_id, struct work *work, @@ -357,6 +357,7 @@ extern "C" int scanhash_zr5(int thr_id, struct work *work,
cudaSetDeviceFlags(cudaDeviceScheduleBlockingSync);
CUDA_LOG_ERROR();
}
gpulog(LOG_INFO, thr_id, "Intensity set to %g, %u cuda threads", throughput2intensity(throughput), throughput);
// constants
cudaMemcpyToSymbol(c_permut, permut, 24*4, 0, cudaMemcpyHostToDevice);

Loading…
Cancel
Save