Browse Source

Release 1.4.1, with blake cache (220MH/s)

master
Tanguy Pruvot 10 years ago
parent
commit
9140e7f8ad
  1. 151
      blake32.cu
  2. 12
      cpu-miner.c
  3. 6
      cpuminer-config.h
  4. 6
      util.c

151
blake32.cu

@ -41,7 +41,7 @@ extern bool opt_n_threads; @@ -41,7 +41,7 @@ extern bool opt_n_threads;
extern bool opt_benchmark;
extern int device_map[8];
extern cudaError_t MyStreamSynchronize(cudaStream_t stream, int situation, int thr_id);
uint32_t crc32(const uint32_t *buf, size_t size);
__constant__
static uint32_t __align__(32) c_Target[8];
@ -51,6 +51,15 @@ static uint32_t __align__(32) c_data[20]; @@ -51,6 +51,15 @@ static uint32_t __align__(32) c_data[20];
static uint32_t *d_resNounce[8];
static uint32_t *h_resNounce[8];
static uint32_t extra_results[2] = { MAXU, MAXU };
#define USE_CACHE 1
#if USE_CACHE
__device__
static uint32_t cache[8];
__device__
static uint32_t prevsum = 0;
#endif
/* prefer uint32_t to prevent size conversions = speed +5/10 % */
__constant__
@ -191,7 +200,7 @@ void blake256_compress(uint32_t *h, const uint32_t *block, const uint32_t T0, in @@ -191,7 +200,7 @@ void blake256_compress(uint32_t *h, const uint32_t *block, const uint32_t T0, in
}
__global__
void blake256_gpu_hash_80(uint32_t threads, uint32_t startNounce, uint32_t *resNounce, const int blakerounds)
void blake256_gpu_hash_80(uint32_t threads, uint32_t startNounce, uint32_t *resNounce, const int blakerounds, const int crcsum)
{
uint32_t thread = (blockDim.x * blockIdx.x + threadIdx.x);
if (thread < threads)
@ -200,11 +209,27 @@ void blake256_gpu_hash_80(uint32_t threads, uint32_t startNounce, uint32_t *resN @@ -200,11 +209,27 @@ void blake256_gpu_hash_80(uint32_t threads, uint32_t startNounce, uint32_t *resN
uint32_t h[8];
#pragma unroll
for(int i=0; i<8; i++)
for(int i=0; i<8; i++) {
h[i] = c_IV256[i];
}
#if !USE_CACHE
blake256_compress(h, c_data, 512, blakerounds);
#else
if (crcsum != prevsum) {
prevsum = crcsum;
blake256_compress(h, c_data, 512, blakerounds);
#pragma unroll
for(int i=0; i<8; i++) {
cache[i] = h[i];
}
} else {
#pragma unroll
for(int i=0; i<8; i++) {
h[i] = cache[i];
}
}
#endif
// ------ Close: Bytes 64 to 80 ------
uint32_t ending[4];
@ -225,14 +250,18 @@ void blake256_gpu_hash_80(uint32_t threads, uint32_t startNounce, uint32_t *resN @@ -225,14 +250,18 @@ void blake256_gpu_hash_80(uint32_t threads, uint32_t startNounce, uint32_t *resN
}
}
/* keep the smallest nounce, hmm... */
if(resNounce[0] > nounce)
/* keep the smallest nounce, + extra one if found */
if (resNounce[0] > nounce) {
resNounce[1] = resNounce[0];
resNounce[0] = nounce;
}
else
resNounce[1] = nounce;
}
}
__host__
uint32_t blake256_cpu_hash_80(int thr_id, uint32_t threads, uint32_t startNounce, const int blakerounds)
uint32_t blake256_cpu_hash_80(int thr_id, uint32_t threads, uint32_t startNounce, const int blakerounds, const uint32_t crcsum)
{
const int threadsperblock = TPB;
uint32_t result = MAXU;
@ -242,14 +271,15 @@ uint32_t blake256_cpu_hash_80(int thr_id, uint32_t threads, uint32_t startNounce @@ -242,14 +271,15 @@ uint32_t blake256_cpu_hash_80(int thr_id, uint32_t threads, uint32_t startNounce
size_t shared_size = 0;
/* Check error on Ctrl+C or kill to prevent segfaults on exit */
if (cudaMemset(d_resNounce[thr_id], 0xff, sizeof(uint32_t)) != cudaSuccess)
if (cudaMemset(d_resNounce[thr_id], 0xff, 2*sizeof(uint32_t)) != cudaSuccess)
return result;
blake256_gpu_hash_80<<<grid, block, shared_size>>>(threads, startNounce, d_resNounce[thr_id], blakerounds);
blake256_gpu_hash_80<<<grid, block, shared_size>>>(threads, startNounce, d_resNounce[thr_id], blakerounds, crcsum);
cudaDeviceSynchronize();
if (cudaSuccess == cudaMemcpy(h_resNounce[thr_id], d_resNounce[thr_id], sizeof(uint32_t), cudaMemcpyDeviceToHost)) {
if (cudaSuccess == cudaMemcpy(h_resNounce[thr_id], d_resNounce[thr_id], 2*sizeof(uint32_t), cudaMemcpyDeviceToHost)) {
cudaThreadSynchronize();
result = *h_resNounce[thr_id];
result = h_resNounce[thr_id][0];
extra_results[0] = h_resNounce[thr_id][1];
}
return result;
}
@ -269,9 +299,21 @@ extern "C" int scanhash_blake256(int thr_id, uint32_t *pdata, const uint32_t *pt @@ -269,9 +299,21 @@ extern "C" int scanhash_blake256(int thr_id, uint32_t *pdata, const uint32_t *pt
{
const uint32_t first_nonce = pdata[19];
static bool init[8] = { 0, 0, 0, 0, 0, 0, 0, 0 };
uint32_t throughput = min(TPB * 2048, max_nonce - first_nonce);
uint32_t throughput = min(TPB * 4096, max_nonce - first_nonce);
uint32_t crcsum = MAXU;
int rc = 0;
if (extra_results[0] != MAXU) {
// possible extra result found in previous call
if (first_nonce <= extra_results[0] && max_nonce >= extra_results[0]) {
pdata[19] = extra_results[0];
*hashes_done = pdata[19] - first_nonce + 1;
extra_results[0] = MAXU;
rc = 1;
goto exit_scan;
}
}
if (opt_benchmark)
((uint32_t*)ptarget)[7] = 0x00000f;
@ -279,19 +321,22 @@ extern "C" int scanhash_blake256(int thr_id, uint32_t *pdata, const uint32_t *pt @@ -279,19 +321,22 @@ extern "C" int scanhash_blake256(int thr_id, uint32_t *pdata, const uint32_t *pt
if (opt_n_threads > 1) {
CUDA_SAFE_CALL(cudaSetDevice(device_map[thr_id]));
}
CUDA_SAFE_CALL(cudaMallocHost(&h_resNounce[thr_id], sizeof(uint32_t)));
CUDA_SAFE_CALL(cudaMalloc(&d_resNounce[thr_id], sizeof(uint32_t)));
CUDA_SAFE_CALL(cudaMallocHost(&h_resNounce[thr_id], 2*sizeof(uint32_t)));
CUDA_SAFE_CALL(cudaMalloc(&d_resNounce[thr_id], 2*sizeof(uint32_t)));
init[thr_id] = true;
}
if (throughput < (TPB * 2048))
applog(LOG_WARNING, "throughput=%u, start=%x, max=%x", throughput, first_nonce, max_nonce);
if (opt_debug && throughput < (TPB * 4096))
applog(LOG_DEBUG, "throughput=%u, start=%x, max=%x", throughput, first_nonce, max_nonce);
blake256_cpu_setBlock_80(pdata, ptarget);
#if USE_CACHE
crcsum = crc32(pdata, 64);
#endif
do {
// GPU HASH
uint32_t foundNonce = blake256_cpu_hash_80(thr_id, throughput, pdata[19], blakerounds);
uint32_t foundNonce = blake256_cpu_hash_80(thr_id, throughput, pdata[19], blakerounds, crcsum);
if (foundNonce != MAXU)
{
uint32_t endiandata[20];
@ -309,6 +354,18 @@ extern "C" int scanhash_blake256(int thr_id, uint32_t *pdata, const uint32_t *pt @@ -309,6 +354,18 @@ extern "C" int scanhash_blake256(int thr_id, uint32_t *pdata, const uint32_t *pt
{
pdata[19] = foundNonce;
rc = 1;
if (extra_results[0] != MAXU) {
// Rare but possible if the throughput is big
be32enc(&endiandata[19], extra_results[0]);
blake256hash(vhashcpu, endiandata, blakerounds);
if (vhashcpu[7] <= Htarg && fulltest(vhashcpu, ptarget)) {
applog(LOG_NOTICE, "GPU found more than one result yippee!");
} else {
extra_results[0] = MAXU;
}
}
goto exit_scan;
}
else if (vhashcpu[7] > Htarg) {
@ -340,3 +397,63 @@ exit_scan: @@ -340,3 +397,63 @@ exit_scan:
cudaDeviceSynchronize();
return rc;
}
static uint32_t crc32_tab[] = {
0x00000000, 0x77073096, 0xee0e612c, 0x990951ba, 0x076dc419, 0x706af48f,
0xe963a535, 0x9e6495a3, 0x0edb8832, 0x79dcb8a4, 0xe0d5e91e, 0x97d2d988,
0x09b64c2b, 0x7eb17cbd, 0xe7b82d07, 0x90bf1d91, 0x1db71064, 0x6ab020f2,
0xf3b97148, 0x84be41de, 0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7,
0x136c9856, 0x646ba8c0, 0xfd62f97a, 0x8a65c9ec, 0x14015c4f, 0x63066cd9,
0xfa0f3d63, 0x8d080df5, 0x3b6e20c8, 0x4c69105e, 0xd56041e4, 0xa2677172,
0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b, 0x35b5a8fa, 0x42b2986c,
0xdbbbc9d6, 0xacbcf940, 0x32d86ce3, 0x45df5c75, 0xdcd60dcf, 0xabd13d59,
0x26d930ac, 0x51de003a, 0xc8d75180, 0xbfd06116, 0x21b4f4b5, 0x56b3c423,
0xcfba9599, 0xb8bda50f, 0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924,
0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d, 0x76dc4190, 0x01db7106,
0x98d220bc, 0xefd5102a, 0x71b18589, 0x06b6b51f, 0x9fbfe4a5, 0xe8b8d433,
0x7807c9a2, 0x0f00f934, 0x9609a88e, 0xe10e9818, 0x7f6a0dbb, 0x086d3d2d,
0x91646c97, 0xe6635c01, 0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e,
0x6c0695ed, 0x1b01a57b, 0x8208f4c1, 0xf50fc457, 0x65b0d9c6, 0x12b7e950,
0x8bbeb8ea, 0xfcb9887c, 0x62dd1ddf, 0x15da2d49, 0x8cd37cf3, 0xfbd44c65,
0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2, 0x4adfa541, 0x3dd895d7,
0xa4d1c46d, 0xd3d6f4fb, 0x4369e96a, 0x346ed9fc, 0xad678846, 0xda60b8d0,
0x44042d73, 0x33031de5, 0xaa0a4c5f, 0xdd0d7cc9, 0x5005713c, 0x270241aa,
0xbe0b1010, 0xc90c2086, 0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f,
0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4, 0x59b33d17, 0x2eb40d81,
0xb7bd5c3b, 0xc0ba6cad, 0xedb88320, 0x9abfb3b6, 0x03b6e20c, 0x74b1d29a,
0xead54739, 0x9dd277af, 0x04db2615, 0x73dc1683, 0xe3630b12, 0x94643b84,
0x0d6d6a3e, 0x7a6a5aa8, 0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1,
0xf00f9344, 0x8708a3d2, 0x1e01f268, 0x6906c2fe, 0xf762575d, 0x806567cb,
0x196c3671, 0x6e6b06e7, 0xfed41b76, 0x89d32be0, 0x10da7a5a, 0x67dd4acc,
0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5, 0xd6d6a3e8, 0xa1d1937e,
0x38d8c2c4, 0x4fdff252, 0xd1bb67f1, 0xa6bc5767, 0x3fb506dd, 0x48b2364b,
0xd80d2bda, 0xaf0a1b4c, 0x36034af6, 0x41047a60, 0xdf60efc3, 0xa867df55,
0x316e8eef, 0x4669be79, 0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236,
0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f, 0xc5ba3bbe, 0xb2bd0b28,
0x2bb45a92, 0x5cb36a04, 0xc2d7ffa7, 0xb5d0cf31, 0x2cd99e8b, 0x5bdeae1d,
0x9b64c2b0, 0xec63f226, 0x756aa39c, 0x026d930a, 0x9c0906a9, 0xeb0e363f,
0x72076785, 0x05005713, 0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38,
0x92d28e9b, 0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21, 0x86d3d2d4, 0xf1d4e242,
0x68ddb3f8, 0x1fda836e, 0x81be16cd, 0xf6b9265b, 0x6fb077e1, 0x18b74777,
0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c, 0x8f659eff, 0xf862ae69,
0x616bffd3, 0x166ccf45, 0xa00ae278, 0xd70dd2ee, 0x4e048354, 0x3903b3c2,
0xa7672661, 0xd06016f7, 0x4969474d, 0x3e6e77db, 0xaed16a4a, 0xd9d65adc,
0x40df0b66, 0x37d83bf0, 0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9,
0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6, 0xbad03605, 0xcdd70693,
0x54de5729, 0x23d967bf, 0xb3667a2e, 0xc4614ab8, 0x5d681b02, 0x2a6f2b94,
0xb40bbe37, 0xc30c8ea1, 0x5a05df1b, 0x2d02ef8d
};
uint32_t crc32(const uint32_t *buf, size_t size)
{
const uint8_t *p;
uint32_t crc = 0;
p = (uint8_t *) buf;
crc = crc ^ ~0U;
while (size--)
crc = crc32_tab[(crc ^ *p++) & 0xFF] ^ (crc >> 8);
return crc ^ ~0U;
}

12
cpu-miner.c

@ -582,8 +582,8 @@ static bool get_upstream_work(CURL *curl, struct work *work) @@ -582,8 +582,8 @@ static bool get_upstream_work(CURL *curl, struct work *work)
if (opt_debug && rc) {
timeval_subtract(&diff, &tv_end, &tv_start);
applog(LOG_DEBUG, "DEBUG: got new work in %d ms",
diff.tv_sec * 1000 + diff.tv_usec / 1000);
applog(LOG_DEBUG, "DEBUG: got new work in %u µs",
diff.tv_sec * 1000000 + diff.tv_usec);
}
json_decref(val);
@ -1005,7 +1005,7 @@ static void *miner_thread(void *userdata) @@ -1005,7 +1005,7 @@ static void *miner_thread(void *userdata)
work_restart[thr_id].restart = 1;
hashlog_purge_old();
// wait a bit for a new job...
usleep(1500*1000);
sleep(1);
(*nonceptr) = end_nonce + 1;
work_done = true;
continue;
@ -1345,12 +1345,12 @@ static void *stratum_thread(void *userdata) @@ -1345,12 +1345,12 @@ static void *stratum_thread(void *userdata)
pthread_mutex_unlock(&g_work_lock);
if (stratum.job.clean) {
if (!opt_quiet)
applog(LOG_BLUE, "%s requested %s job %d restart, block %d", short_url, algo_names[opt_algo],
strtoul(stratum.job.job_id, NULL, 16), stratum.bloc_height);
applog(LOG_BLUE, "%s send a new %s block %d", short_url, algo_names[opt_algo],
stratum.bloc_height);
restart_threads();
hashlog_purge_old();
} else if (!opt_quiet) {
applog(LOG_BLUE, "%s send %s job %d, block %d", short_url, algo_names[opt_algo],
applog(LOG_BLUE, "%s send job %d for block %d", short_url,
strtoul(stratum.job.job_id, NULL, 16), stratum.bloc_height);
}
}

6
cpuminer-config.h

@ -156,7 +156,7 @@ @@ -156,7 +156,7 @@
#define PACKAGE_NAME "ccminer"
/* Define to the full name and version of this package. */
#define PACKAGE_STRING "ccminer 2014.09.01"
#define PACKAGE_STRING "ccminer 2014.09.06"
/* Define to the one symbol short name of this package. */
#define PACKAGE_TARNAME "ccminer"
@ -165,7 +165,7 @@ @@ -165,7 +165,7 @@
#define PACKAGE_URL ""
/* Define to the version of this package. */
#define PACKAGE_VERSION "2014.09.01"
#define PACKAGE_VERSION "2014.09.06"
/* If using the C implementation of alloca, define if you know the
direction of stack growth for your system; otherwise it will be
@ -188,7 +188,7 @@ @@ -188,7 +188,7 @@
#define USE_XOP 1
/* Version number of package */
#define VERSION "2014.09.01"
#define VERSION "2014.09.06"
/* Define curl_free() as free() if our version of curl lacks curl_free. */
/* #undef curl_free */

6
util.c

@ -557,6 +557,9 @@ bool fulltest(const uint32_t *hash, const uint32_t *target) @@ -557,6 +557,9 @@ bool fulltest(const uint32_t *hash, const uint32_t *target)
rc = true;
break;
}
if (hash[1] == target[1]) {
applog(LOG_NOTICE, "We found a close match!");
}
}
if (!rc && opt_debug) {
@ -1122,8 +1125,7 @@ static bool stratum_set_difficulty(struct stratum_ctx *sctx, json_t *params) @@ -1122,8 +1125,7 @@ static bool stratum_set_difficulty(struct stratum_ctx *sctx, json_t *params)
sctx->next_diff = diff;
pthread_mutex_unlock(&sctx->work_lock);
if (opt_debug)
applog(LOG_DEBUG, "Stratum difficulty set to %g", diff);
applog(LOG_INFO, "Stratum difficulty set to %g", diff);
return true;
}

Loading…
Cancel
Save