Browse Source

Merge remote-tracking branch 'troky/nscrypt' into nfactor-troky

nfactor-troky
Noel Maersk 10 years ago
parent
commit
c3ae56a505
  1. 57
      driver-opencl.c
  2. 982
      kernel/nscrypt.cl
  3. 6
      kernels.h
  4. 3
      miner.h
  5. 39
      ocl.c
  6. 39
      scrypt.c
  7. 4
      sgminer.c
  8. 42
      util.c
  9. 2
      util.h
  10. 6
      winbuild/dist/include/config.h
  11. 1
      winbuild/sgminer.vcxproj
  12. 3
      winbuild/sgminer.vcxproj.filters

57
driver-opencl.c

@ -206,6 +206,8 @@ static enum cl_kernels select_kernel(char *arg) @@ -206,6 +206,8 @@ static enum cl_kernels select_kernel(char *arg)
return KL_ZUIKKIS;
if (!strcmp(arg, PSW_KERNNAME))
return KL_PSW;
if (!strcmp(arg, NSCRYPT_KERNNAME))
return KL_NSCRYPT;
return KL_NONE;
}
@ -1019,6 +1021,13 @@ static cl_int queue_scrypt_kernel(_clState *clState, dev_blk_ctx *blk, __maybe_u @@ -1019,6 +1021,13 @@ static cl_int queue_scrypt_kernel(_clState *clState, dev_blk_ctx *blk, __maybe_u
unsigned int num = 0;
cl_uint le_target;
cl_int status = 0;
uint32_t timestamp;
cl_uint nfactor = 10; // scrypt default
if (use_nscrypt) {
timestamp = bswap_32(*((uint32_t *)(blk->work->data + 17*4)));
nfactor = vert_GetNfactor(timestamp) + 1;
}
le_target = *(cl_uint *)(blk->work->device_target + 28);
clState->cldata = blk->work->data;
@ -1030,27 +1039,41 @@ static cl_int queue_scrypt_kernel(_clState *clState, dev_blk_ctx *blk, __maybe_u @@ -1030,27 +1039,41 @@ static cl_int queue_scrypt_kernel(_clState *clState, dev_blk_ctx *blk, __maybe_u
CL_SET_VARG(4, &midstate[0]);
CL_SET_VARG(4, &midstate[16]);
CL_SET_ARG(le_target);
if (use_nscrypt) {
CL_SET_ARG(nfactor);
}
return status;
}
static void set_threads_hashes(unsigned int vectors, unsigned int compute_shaders, int64_t *hashes, size_t *globalThreads,
static void set_threads_hashes(unsigned int vectors, size_t shaders, int64_t *hashes, size_t *globalThreads,
unsigned int minthreads, __maybe_unused int *intensity, __maybe_unused int *xintensity, __maybe_unused int *rawintensity)
{
unsigned int threads = 0;
while (threads < minthreads) {
if (*rawintensity > 0) {
threads = *rawintensity;
} else if (*xintensity > 0) {
threads = compute_shaders * *xintensity;
} else {
threads = 1 << *intensity;
}
if (threads < minthreads) {
if (likely(*intensity < MAX_INTENSITY))
(*intensity)++;
else
threads = minthreads;
if (use_nscrypt && shaders) {
// new intensity calculation based on shader count
threads = (shaders * minthreads << (MAX_INTENSITY-19)) >> (MAX_INTENSITY - *intensity);
if (threads < minthreads)
threads = minthreads;
else if (threads % minthreads)
threads += minthreads - (threads % minthreads);
}
else {
while (threads < minthreads) {
if (*rawintensity > 0) {
threads = *rawintensity;
} else if (*xintensity > 0) {
threads = shaders * *xintensity;
} else {
threads = 1 << *intensity;
}
if (threads < minthreads) {
if (likely(*intensity < MAX_INTENSITY))
(*intensity)++;
else
threads = minthreads;
}
}
}
@ -1320,6 +1343,9 @@ static bool opencl_thread_prepare(struct thr_info *thr) @@ -1320,6 +1343,9 @@ static bool opencl_thread_prepare(struct thr_info *thr)
case KL_PSW:
cgpu->kname = PSW_KERNNAME;
break;
case KL_NSCRYPT:
cgpu->kname = NSCRYPT_KERNNAME;
break;
default:
break;
}
@ -1353,6 +1379,7 @@ static bool opencl_thread_init(struct thr_info *thr) @@ -1353,6 +1379,7 @@ static bool opencl_thread_init(struct thr_info *thr)
case KL_CKOLIVAS:
case KL_PSW:
case KL_ZUIKKIS:
case KL_NSCRYPT:
thrdata->queue_kernel_parameters = &queue_scrypt_kernel;
break;
default:
@ -1426,7 +1453,7 @@ static int64_t opencl_scanhash(struct thr_info *thr, struct work *work, @@ -1426,7 +1453,7 @@ static int64_t opencl_scanhash(struct thr_info *thr, struct work *work,
gpu->intervals = 0;
}
set_threads_hashes(clState->vwidth, clState->compute_shaders, &hashes, globalThreads, localThreads[0],
set_threads_hashes(clState->vwidth, gpu->shaders ? gpu->shaders : clState->compute_shaders, &hashes, globalThreads, localThreads[0],
&gpu->intensity, &gpu->xintensity, &gpu->rawintensity);
if (hashes > gpu->max_hashes)
gpu->max_hashes = hashes;

982
kernel/nscrypt.cl

@ -0,0 +1,982 @@ @@ -0,0 +1,982 @@
/*-
* Copyright 2009 Colin Percival, 2011 ArtForz, 2011 pooler, 2012 mtrlt,
* 2012-2013 Con Kolivas.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* This file was originally written by Colin Percival as part of the Tarsnap
* online backup system.
*/
__constant uint N[] ={
0x00000000U,
0x00000001U,
0x00000003U,
0x00000007U,
0x0000000FU,
0x0000001FU,
0x0000003FU,
0x0000007FU,
0x000000FFU,
0x000001FFU,
0x000003FFU, //2^10 - 1 -> nFactor 1024
0x000007FFU,
0x00000FFFU,
0x00001FFFU,
0x00003FFFU,
0x00007FFFU,
0x0000FFFFU,
0x0001FFFFU,
0x0003FFFFU,
0x0000FFFFU,
0x000FFFFFU
};
__constant uint K[] = {
0x428a2f98U,
0x71374491U,
0xb5c0fbcfU,
0xe9b5dba5U,
0x3956c25bU,
0x59f111f1U,
0x923f82a4U,
0xab1c5ed5U,
0xd807aa98U,
0x12835b01U,
0x243185beU, // 10
0x550c7dc3U,
0x72be5d74U,
0x80deb1feU,
0x9bdc06a7U,
0xe49b69c1U,
0xefbe4786U,
0x0fc19dc6U,
0x240ca1ccU,
0x2de92c6fU,
0x4a7484aaU, // 20
0x5cb0a9dcU,
0x76f988daU,
0x983e5152U,
0xa831c66dU,
0xb00327c8U,
0xbf597fc7U,
0xc6e00bf3U,
0xd5a79147U,
0x06ca6351U,
0x14292967U, // 30
0x27b70a85U,
0x2e1b2138U,
0x4d2c6dfcU,
0x53380d13U,
0x650a7354U,
0x766a0abbU,
0x81c2c92eU,
0x92722c85U,
0xa2bfe8a1U,
0xa81a664bU, // 40
0xc24b8b70U,
0xc76c51a3U,
0xd192e819U,
0xd6990624U,
0xf40e3585U,
0x106aa070U,
0x19a4c116U,
0x1e376c08U,
0x2748774cU,
0x34b0bcb5U, // 50
0x391c0cb3U,
0x4ed8aa4aU,
0x5b9cca4fU,
0x682e6ff3U,
0x748f82eeU,
0x78a5636fU,
0x84c87814U,
0x8cc70208U,
0x90befffaU,
0xa4506cebU, // 60
0xbef9a3f7U,
0xc67178f2U,
0x98c7e2a2U,
0xfc08884dU,
0xcd2a11aeU,
0x510e527fU,
0x9b05688cU,
0xC3910C8EU,
0xfb6feee7U,
0x2a01a605U, // 70
0x0c2e12e0U,
0x4498517BU,
0x6a09e667U,
0xa4ce148bU,
0x95F61999U,
0xc19bf174U,
0xBB67AE85U,
0x3C6EF372U,
0xA54FF53AU,
0x1F83D9ABU, // 80
0x5BE0CD19U,
0x5C5C5C5CU,
0x36363636U,
0x80000000U,
0x000007FFU,
0x00000280U,
0x000004a0U,
0x00000300U
};
#define rotl(x,y) rotate(x,y)
#define Ch(x,y,z) bitselect(z,y,x)
#define Maj(x,y,z) Ch((x^z),y,z)
#define mod2(x,y) (x & (y-1))
#define mod4(x) (x & 3)
#define EndianSwap(n) (rotl(n&0x00FF00FF,24U)|rotl(n&0xFF00FF00,8U))
#define Tr2(x) (rotl(x, 30U) ^ rotl(x, 19U) ^ rotl(x, 10U))
#define Tr1(x) (rotl(x, 26U) ^ rotl(x, 21U) ^ rotl(x, 7U))
#define Wr2(x) (rotl(x, 25U) ^ rotl(x, 14U) ^ (x>>3U))
#define Wr1(x) (rotl(x, 15U) ^ rotl(x, 13U) ^ (x>>10U))
#define RND(a, b, c, d, e, f, g, h, k) \
h += Tr1(e) + Ch(e, f, g) + k; \
d += h; \
h += Tr2(a) + Maj(a, b, c);
#define WUpdate(i) { \
uint4 tmp1 = (uint4) (W[mod4(i)].y, W[mod4(i)].z, W[mod4(i)].w, W[mod4(i+1)].x); \
uint4 tmp2 = (uint4) (W[mod4(i+2)].y, W[mod4(i+2)].z, W[mod4(i+2)].w, W[mod4(i+3)].x); \
uint4 tmp3 = (uint4) (W[mod4(i+3)].z, W[mod4(i+3)].w, 0, 0); \
W[mod4(i)] += tmp2 + Wr2(tmp1) + Wr1(tmp3); \
W[mod4(i)] += Wr1((uint4) (0, 0, W[mod4(i)].x, W[mod4(i)].y)); \
}
void SHA256(uint4*restrict state0,uint4*restrict state1, const uint4 block0, const uint4 block1, const uint4 block2, const uint4 block3)
{
uint4 S0 = *state0;
uint4 S1 = *state1;
#define A S0.x
#define B S0.y
#define C S0.z
#define D S0.w
#define E S1.x
#define F S1.y
#define G S1.z
#define H S1.w
uint4 W[4] = {block0, block1, block2, block3};
RND(A,B,C,D,E,F,G,H, W[0].x+ K[0]);
RND(H,A,B,C,D,E,F,G, W[0].y+ K[1]);
RND(G,H,A,B,C,D,E,F, W[0].z+ K[2]);
RND(F,G,H,A,B,C,D,E, W[0].w+ K[3]);
RND(E,F,G,H,A,B,C,D, W[1].x+ K[4]);
RND(D,E,F,G,H,A,B,C, W[1].y+ K[5]);
RND(C,D,E,F,G,H,A,B, W[1].z+ K[6]);
RND(B,C,D,E,F,G,H,A, W[1].w+ K[7]);
RND(A,B,C,D,E,F,G,H, W[2].x+ K[8]);
RND(H,A,B,C,D,E,F,G, W[2].y+ K[9]);
RND(G,H,A,B,C,D,E,F, W[2].z+ K[10]);
RND(F,G,H,A,B,C,D,E, W[2].w+ K[11]);
RND(E,F,G,H,A,B,C,D, W[3].x+ K[12]);
RND(D,E,F,G,H,A,B,C, W[3].y+ K[13]);
RND(C,D,E,F,G,H,A,B, W[3].z+ K[14]);
RND(B,C,D,E,F,G,H,A, W[3].w+ K[76]);
WUpdate (0);
RND(A,B,C,D,E,F,G,H, W[0].x+ K[15]);
RND(H,A,B,C,D,E,F,G, W[0].y+ K[16]);
RND(G,H,A,B,C,D,E,F, W[0].z+ K[17]);
RND(F,G,H,A,B,C,D,E, W[0].w+ K[18]);
WUpdate (1);
RND(E,F,G,H,A,B,C,D, W[1].x+ K[19]);
RND(D,E,F,G,H,A,B,C, W[1].y+ K[20]);
RND(C,D,E,F,G,H,A,B, W[1].z+ K[21]);
RND(B,C,D,E,F,G,H,A, W[1].w+ K[22]);
WUpdate (2);
RND(A,B,C,D,E,F,G,H, W[2].x+ K[23]);
RND(H,A,B,C,D,E,F,G, W[2].y+ K[24]);
RND(G,H,A,B,C,D,E,F, W[2].z+ K[25]);
RND(F,G,H,A,B,C,D,E, W[2].w+ K[26]);
WUpdate (3);
RND(E,F,G,H,A,B,C,D, W[3].x+ K[27]);
RND(D,E,F,G,H,A,B,C, W[3].y+ K[28]);
RND(C,D,E,F,G,H,A,B, W[3].z+ K[29]);
RND(B,C,D,E,F,G,H,A, W[3].w+ K[30]);
WUpdate (0);
RND(A,B,C,D,E,F,G,H, W[0].x+ K[31]);
RND(H,A,B,C,D,E,F,G, W[0].y+ K[32]);
RND(G,H,A,B,C,D,E,F, W[0].z+ K[33]);
RND(F,G,H,A,B,C,D,E, W[0].w+ K[34]);
WUpdate (1);
RND(E,F,G,H,A,B,C,D, W[1].x+ K[35]);
RND(D,E,F,G,H,A,B,C, W[1].y+ K[36]);
RND(C,D,E,F,G,H,A,B, W[1].z+ K[37]);
RND(B,C,D,E,F,G,H,A, W[1].w+ K[38]);
WUpdate (2);
RND(A,B,C,D,E,F,G,H, W[2].x+ K[39]);
RND(H,A,B,C,D,E,F,G, W[2].y+ K[40]);
RND(G,H,A,B,C,D,E,F, W[2].z+ K[41]);
RND(F,G,H,A,B,C,D,E, W[2].w+ K[42]);
WUpdate (3);
RND(E,F,G,H,A,B,C,D, W[3].x+ K[43]);
RND(D,E,F,G,H,A,B,C, W[3].y+ K[44]);
RND(C,D,E,F,G,H,A,B, W[3].z+ K[45]);
RND(B,C,D,E,F,G,H,A, W[3].w+ K[46]);
WUpdate (0);
RND(A,B,C,D,E,F,G,H, W[0].x+ K[47]);
RND(H,A,B,C,D,E,F,G, W[0].y+ K[48]);
RND(G,H,A,B,C,D,E,F, W[0].z+ K[49]);
RND(F,G,H,A,B,C,D,E, W[0].w+ K[50]);
WUpdate (1);
RND(E,F,G,H,A,B,C,D, W[1].x+ K[51]);
RND(D,E,F,G,H,A,B,C, W[1].y+ K[52]);
RND(C,D,E,F,G,H,A,B, W[1].z+ K[53]);
RND(B,C,D,E,F,G,H,A, W[1].w+ K[54]);
WUpdate (2);
RND(A,B,C,D,E,F,G,H, W[2].x+ K[55]);
RND(H,A,B,C,D,E,F,G, W[2].y+ K[56]);
RND(G,H,A,B,C,D,E,F, W[2].z+ K[57]);
RND(F,G,H,A,B,C,D,E, W[2].w+ K[58]);
WUpdate (3);
RND(E,F,G,H,A,B,C,D, W[3].x+ K[59]);
RND(D,E,F,G,H,A,B,C, W[3].y+ K[60]);
RND(C,D,E,F,G,H,A,B, W[3].z+ K[61]);
RND(B,C,D,E,F,G,H,A, W[3].w+ K[62]);
#undef A
#undef B
#undef C
#undef D
#undef E
#undef F
#undef G
#undef H
*state0 += S0;
*state1 += S1;
}
void SHA256_fresh(uint4*restrict state0,uint4*restrict state1, const uint4 block0, const uint4 block1, const uint4 block2, const uint4 block3)
{
#define A (*state0).x
#define B (*state0).y
#define C (*state0).z
#define D (*state0).w
#define E (*state1).x
#define F (*state1).y
#define G (*state1).z
#define H (*state1).w
uint4 W[4] = {block0, block1, block2, block3};
D= K[63] +W[0].x;
H= K[64] +W[0].x;
C= K[65] +Tr1(D)+Ch(D, K[66], K[67])+W[0].y;
G= K[68] +C+Tr2(H)+Ch(H, K[69] ,K[70]);
B= K[71] +Tr1(C)+Ch(C,D,K[66])+W[0].z;
F= K[72] +B+Tr2(G)+Maj(G,H, K[73]);
A= K[74] +Tr1(B)+Ch(B,C,D)+W[0].w;
E= K[75] +A+Tr2(F)+Maj(F,G,H);
RND(E,F,G,H,A,B,C,D, W[1].x+ K[4]);
RND(D,E,F,G,H,A,B,C, W[1].y+ K[5]);
RND(C,D,E,F,G,H,A,B, W[1].z+ K[6]);
RND(B,C,D,E,F,G,H,A, W[1].w+ K[7]);
RND(A,B,C,D,E,F,G,H, W[2].x+ K[8]);
RND(H,A,B,C,D,E,F,G, W[2].y+ K[9]);
RND(G,H,A,B,C,D,E,F, W[2].z+ K[10]);
RND(F,G,H,A,B,C,D,E, W[2].w+ K[11]);
RND(E,F,G,H,A,B,C,D, W[3].x+ K[12]);
RND(D,E,F,G,H,A,B,C, W[3].y+ K[13]);
RND(C,D,E,F,G,H,A,B, W[3].z+ K[14]);
RND(B,C,D,E,F,G,H,A, W[3].w+ K[76]);
WUpdate (0);
RND(A,B,C,D,E,F,G,H, W[0].x+ K[15]);
RND(H,A,B,C,D,E,F,G, W[0].y+ K[16]);
RND(G,H,A,B,C,D,E,F, W[0].z+ K[17]);
RND(F,G,H,A,B,C,D,E, W[0].w+ K[18]);
WUpdate (1);
RND(E,F,G,H,A,B,C,D, W[1].x+ K[19]);
RND(D,E,F,G,H,A,B,C, W[1].y+ K[20]);
RND(C,D,E,F,G,H,A,B, W[1].z+ K[21]);
RND(B,C,D,E,F,G,H,A, W[1].w+ K[22]);
WUpdate (2);
RND(A,B,C,D,E,F,G,H, W[2].x+ K[23]);
RND(H,A,B,C,D,E,F,G, W[2].y+ K[24]);
RND(G,H,A,B,C,D,E,F, W[2].z+ K[25]);
RND(F,G,H,A,B,C,D,E, W[2].w+ K[26]);
WUpdate (3);
RND(E,F,G,H,A,B,C,D, W[3].x+ K[27]);
RND(D,E,F,G,H,A,B,C, W[3].y+ K[28]);
RND(C,D,E,F,G,H,A,B, W[3].z+ K[29]);
RND(B,C,D,E,F,G,H,A, W[3].w+ K[30]);
WUpdate (0);
RND(A,B,C,D,E,F,G,H, W[0].x+ K[31]);
RND(H,A,B,C,D,E,F,G, W[0].y+ K[32]);
RND(G,H,A,B,C,D,E,F, W[0].z+ K[33]);
RND(F,G,H,A,B,C,D,E, W[0].w+ K[34]);
WUpdate (1);
RND(E,F,G,H,A,B,C,D, W[1].x+ K[35]);
RND(D,E,F,G,H,A,B,C, W[1].y+ K[36]);
RND(C,D,E,F,G,H,A,B, W[1].z+ K[37]);
RND(B,C,D,E,F,G,H,A, W[1].w+ K[38]);
WUpdate (2);
RND(A,B,C,D,E,F,G,H, W[2].x+ K[39]);
RND(H,A,B,C,D,E,F,G, W[2].y+ K[40]);
RND(G,H,A,B,C,D,E,F, W[2].z+ K[41]);
RND(F,G,H,A,B,C,D,E, W[2].w+ K[42]);
WUpdate (3);
RND(E,F,G,H,A,B,C,D, W[3].x+ K[43]);
RND(D,E,F,G,H,A,B,C, W[3].y+ K[44]);
RND(C,D,E,F,G,H,A,B, W[3].z+ K[45]);
RND(B,C,D,E,F,G,H,A, W[3].w+ K[46]);
WUpdate (0);
RND(A,B,C,D,E,F,G,H, W[0].x+ K[47]);
RND(H,A,B,C,D,E,F,G, W[0].y+ K[48]);
RND(G,H,A,B,C,D,E,F, W[0].z+ K[49]);
RND(F,G,H,A,B,C,D,E, W[0].w+ K[50]);
WUpdate (1);
RND(E,F,G,H,A,B,C,D, W[1].x+ K[51]);
RND(D,E,F,G,H,A,B,C, W[1].y+ K[52]);
RND(C,D,E,F,G,H,A,B, W[1].z+ K[53]);
RND(B,C,D,E,F,G,H,A, W[1].w+ K[54]);
WUpdate (2);
RND(A,B,C,D,E,F,G,H, W[2].x+ K[55]);
RND(H,A,B,C,D,E,F,G, W[2].y+ K[56]);
RND(G,H,A,B,C,D,E,F, W[2].z+ K[57]);
RND(F,G,H,A,B,C,D,E, W[2].w+ K[58]);
WUpdate (3);
RND(E,F,G,H,A,B,C,D, W[3].x+ K[59]);
RND(D,E,F,G,H,A,B,C, W[3].y+ K[60]);
RND(C,D,E,F,G,H,A,B, W[3].z+ K[61]);
RND(B,C,D,E,F,G,H,A, W[3].w+ K[62]);
/*
W[0].x += Wr1(W[3].z) + W[2].y + Wr2(W[0].y);
RND(A,B,C,D,E,F,G,H, W[0].x+ K[15]);
W[0].y += Wr1(W[3].w) + W[2].z + Wr2(W[0].z);
RND(H,A,B,C,D,E,F,G, W[0].y+ K[16]);
W[0].z += Wr1(W[0].x) + W[2].w + Wr2(W[0].w);
RND(G,H,A,B,C,D,E,F, W[0].z+ K[17]);
W[0].w += Wr1(W[0].y) + W[3].x + Wr2(W[1].x);
RND(F,G,H,A,B,C,D,E, W[0].w+ K[18]);
W[1].x += Wr1(W[0].z) + W[3].y + Wr2(W[1].y);
RND(E,F,G,H,A,B,C,D, W[1].x+ K[19]);
W[1].y += Wr1(W[0].w) + W[3].z + Wr2(W[1].z);
RND(D,E,F,G,H,A,B,C, W[1].y+ K[20]);
W[1].z += Wr1(W[1].x) + W[3].w + Wr2(W[1].w);
RND(C,D,E,F,G,H,A,B, W[1].z+ K[21]);
W[1].w += Wr1(W[1].y) + W[0].x + Wr2(W[2].x);
RND(B,C,D,E,F,G,H,A, W[1].w+ K[22]);
W[2].x += Wr1(W[1].z) + W[0].y + Wr2(W[2].y);
RND(A,B,C,D,E,F,G,H, W[2].x+ K[23]);
W[2].y += Wr1(W[1].w) + W[0].z + Wr2(W[2].z);
RND(H,A,B,C,D,E,F,G, W[2].y+ K[24]);
W[2].z += Wr1(W[2].x) + W[0].w + Wr2(W[2].w);
RND(G,H,A,B,C,D,E,F, W[2].z+ K[25]);
W[2].w += Wr1(W[2].y) + W[1].x + Wr2(W[3].x);
RND(F,G,H,A,B,C,D,E, W[2].w+ K[26]);
W[3].x += Wr1(W[2].z) + W[1].y + Wr2(W[3].y);
RND(E,F,G,H,A,B,C,D, W[3].x+ K[27]);
W[3].y += Wr1(W[2].w) + W[1].z + Wr2(W[3].z);
RND(D,E,F,G,H,A,B,C, W[3].y+ K[28]);
W[3].z += Wr1(W[3].x) + W[1].w + Wr2(W[3].w);
RND(C,D,E,F,G,H,A,B, W[3].z+ K[29]);
W[3].w += Wr1(W[3].y) + W[2].x + Wr2(W[0].x);
RND(B,C,D,E,F,G,H,A, W[3].w+ K[30]);
W[0].x += Wr1(W[3].z) + W[2].y + Wr2(W[0].y);
RND(A,B,C,D,E,F,G,H, W[0].x+ K[31]);
W[0].y += Wr1(W[3].w) + W[2].z + Wr2(W[0].z);
RND(H,A,B,C,D,E,F,G, W[0].y+ K[32]);
W[0].z += Wr1(W[0].x) + W[2].w + Wr2(W[0].w);
RND(G,H,A,B,C,D,E,F, W[0].z+ K[33]);
W[0].w += Wr1(W[0].y) + W[3].x + Wr2(W[1].x);
RND(F,G,H,A,B,C,D,E, W[0].w+ K[34]);
W[1].x += Wr1(W[0].z) + W[3].y + Wr2(W[1].y);
RND(E,F,G,H,A,B,C,D, W[1].x+ K[35]);
W[1].y += Wr1(W[0].w) + W[3].z + Wr2(W[1].z);
RND(D,E,F,G,H,A,B,C, W[1].y+ K[36]);
W[1].z += Wr1(W[1].x) + W[3].w + Wr2(W[1].w);
RND(C,D,E,F,G,H,A,B, W[1].z+ K[37]);
W[1].w += Wr1(W[1].y) + W[0].x + Wr2(W[2].x);
RND(B,C,D,E,F,G,H,A, W[1].w+ K[38]);
W[2].x += Wr1(W[1].z) + W[0].y + Wr2(W[2].y);
RND(A,B,C,D,E,F,G,H, W[2].x+ K[39]);
W[2].y += Wr1(W[1].w) + W[0].z + Wr2(W[2].z);
RND(H,A,B,C,D,E,F,G, W[2].y+ K[40]);
W[2].z += Wr1(W[2].x) + W[0].w + Wr2(W[2].w);
RND(G,H,A,B,C,D,E,F, W[2].z+ K[41]);
W[2].w += Wr1(W[2].y) + W[1].x + Wr2(W[3].x);
RND(F,G,H,A,B,C,D,E, W[2].w+ K[42]);
W[3].x += Wr1(W[2].z) + W[1].y + Wr2(W[3].y);
RND(E,F,G,H,A,B,C,D, W[3].x+ K[43]);
W[3].y += Wr1(W[2].w) + W[1].z + Wr2(W[3].z);
RND(D,E,F,G,H,A,B,C, W[3].y+ K[44]);
W[3].z += Wr1(W[3].x) + W[1].w + Wr2(W[3].w);
RND(C,D,E,F,G,H,A,B, W[3].z+ K[45]);
W[3].w += Wr1(W[3].y) + W[2].x + Wr2(W[0].x);
RND(B,C,D,E,F,G,H,A, W[3].w+ K[46]);
W[0].x += Wr1(W[3].z) + W[2].y + Wr2(W[0].y);
RND(A,B,C,D,E,F,G,H, W[0].x+ K[47]);
W[0].y += Wr1(W[3].w) + W[2].z + Wr2(W[0].z);
RND(H,A,B,C,D,E,F,G, W[0].y+ K[48]);
W[0].z += Wr1(W[0].x) + W[2].w + Wr2(W[0].w);
RND(G,H,A,B,C,D,E,F, W[0].z+ K[49]);
W[0].w += Wr1(W[0].y) + W[3].x + Wr2(W[1].x);
RND(F,G,H,A,B,C,D,E, W[0].w+ K[50]);
W[1].x += Wr1(W[0].z) + W[3].y + Wr2(W[1].y);
RND(E,F,G,H,A,B,C,D, W[1].x+ K[51]);
W[1].y += Wr1(W[0].w) + W[3].z + Wr2(W[1].z);
RND(D,E,F,G,H,A,B,C, W[1].y+ K[52]);
W[1].z += Wr1(W[1].x) + W[3].w + Wr2(W[1].w);
RND(C,D,E,F,G,H,A,B, W[1].z+ K[53]);
W[1].w += Wr1(W[1].y) + W[0].x + Wr2(W[2].x);
RND(B,C,D,E,F,G,H,A, W[1].w+ K[54]);
W[2].x += Wr1(W[1].z) + W[0].y + Wr2(W[2].y);
RND(A,B,C,D,E,F,G,H, W[2].x+ K[55]);
W[2].y += Wr1(W[1].w) + W[0].z + Wr2(W[2].z);
RND(H,A,B,C,D,E,F,G, W[2].y+ K[56]);
W[2].z += Wr1(W[2].x) + W[0].w + Wr2(W[2].w);
RND(G,H,A,B,C,D,E,F, W[2].z+ K[57]);
W[2].w += Wr1(W[2].y) + W[1].x + Wr2(W[3].x);
RND(F,G,H,A,B,C,D,E, W[2].w+ K[58]);
W[3].x += Wr1(W[2].z) + W[1].y + Wr2(W[3].y);
RND(E,F,G,H,A,B,C,D, W[3].x+ K[59]);
W[3].y += Wr1(W[2].w) + W[1].z + Wr2(W[3].z);
RND(D,E,F,G,H,A,B,C, W[3].y+ K[60]);
W[3].z += Wr1(W[3].x) + W[1].w + Wr2(W[3].w);
RND(C,D,E,F,G,H,A,B, W[3].z+ K[61]);
W[3].w += Wr1(W[3].y) + W[2].x + Wr2(W[0].x);
RND(B,C,D,E,F,G,H,A, W[3].w+ K[62]);
*/
#undef A
#undef B
#undef C
#undef D
#undef E
#undef F
#undef G
#undef H
*state0 += (uint4)(K[73], K[77], K[78], K[79]);
*state1 += (uint4)(K[66], K[67], K[80], K[81]);
}
__constant uint fixedW[64] =
{
0x428a2f99,0xf1374491,0xb5c0fbcf,0xe9b5dba5,0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5,
0xd807aa98,0x12835b01,0x243185be,0x550c7dc3,0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf794,
0xf59b89c2,0x73924787,0x23c6886e,0xa42ca65c,0x15ed3627,0x4d6edcbf,0xe28217fc,0xef02488f,
0xb707775c,0x0468c23f,0xe7e72b4c,0x49e1f1a2,0x4b99c816,0x926d1570,0xaa0fc072,0xadb36e2c,
0xad87a3ea,0xbcb1d3a3,0x7b993186,0x562b9420,0xbff3ca0c,0xda4b0c23,0x6cd8711a,0x8f337caa,
0xc91b1417,0xc359dce1,0xa83253a7,0x3b13c12d,0x9d3d725d,0xd9031a84,0xb1a03340,0x16f58012,
0xe64fb6a2,0xe84d923a,0xe93a5730,0x09837686,0x078ff753,0x29833341,0xd5de0b7e,0x6948ccf4,
0xe0a1adbe,0x7c728e11,0x511c78e4,0x315b45bd,0xfca71413,0xea28f96a,0x79703128,0x4e1ef848,
};
void SHA256_fixed(uint4*restrict state0,uint4*restrict state1)
{
uint4 S0 = *state0;
uint4 S1 = *state1;
#define A S0.x
#define B S0.y
#define C S0.z
#define D S0.w
#define E S1.x
#define F S1.y
#define G S1.z
#define H S1.w
RND(A,B,C,D,E,F,G,H, fixedW[0]);
RND(H,A,B,C,D,E,F,G, fixedW[1]);
RND(G,H,A,B,C,D,E,F, fixedW[2]);
RND(F,G,H,A,B,C,D,E, fixedW[3]);
RND(E,F,G,H,A,B,C,D, fixedW[4]);
RND(D,E,F,G,H,A,B,C, fixedW[5]);
RND(C,D,E,F,G,H,A,B, fixedW[6]);
RND(B,C,D,E,F,G,H,A, fixedW[7]);
RND(A,B,C,D,E,F,G,H, fixedW[8]);
RND(H,A,B,C,D,E,F,G, fixedW[9]);
RND(G,H,A,B,C,D,E,F, fixedW[10]);
RND(F,G,H,A,B,C,D,E, fixedW[11]);
RND(E,F,G,H,A,B,C,D, fixedW[12]);
RND(D,E,F,G,H,A,B,C, fixedW[13]);
RND(C,D,E,F,G,H,A,B, fixedW[14]);
RND(B,C,D,E,F,G,H,A, fixedW[15]);
RND(A,B,C,D,E,F,G,H, fixedW[16]);
RND(H,A,B,C,D,E,F,G, fixedW[17]);
RND(G,H,A,B,C,D,E,F, fixedW[18]);
RND(F,G,H,A,B,C,D,E, fixedW[19]);
RND(E,F,G,H,A,B,C,D, fixedW[20]);
RND(D,E,F,G,H,A,B,C, fixedW[21]);
RND(C,D,E,F,G,H,A,B, fixedW[22]);
RND(B,C,D,E,F,G,H,A, fixedW[23]);
RND(A,B,C,D,E,F,G,H, fixedW[24]);
RND(H,A,B,C,D,E,F,G, fixedW[25]);
RND(G,H,A,B,C,D,E,F, fixedW[26]);
RND(F,G,H,A,B,C,D,E, fixedW[27]);
RND(E,F,G,H,A,B,C,D, fixedW[28]);
RND(D,E,F,G,H,A,B,C, fixedW[29]);
RND(C,D,E,F,G,H,A,B, fixedW[30]);
RND(B,C,D,E,F,G,H,A, fixedW[31]);
RND(A,B,C,D,E,F,G,H, fixedW[32]);
RND(H,A,B,C,D,E,F,G, fixedW[33]);
RND(G,H,A,B,C,D,E,F, fixedW[34]);
RND(F,G,H,A,B,C,D,E, fixedW[35]);
RND(E,F,G,H,A,B,C,D, fixedW[36]);
RND(D,E,F,G,H,A,B,C, fixedW[37]);
RND(C,D,E,F,G,H,A,B, fixedW[38]);
RND(B,C,D,E,F,G,H,A, fixedW[39]);
RND(A,B,C,D,E,F,G,H, fixedW[40]);
RND(H,A,B,C,D,E,F,G, fixedW[41]);
RND(G,H,A,B,C,D,E,F, fixedW[42]);
RND(F,G,H,A,B,C,D,E, fixedW[43]);
RND(E,F,G,H,A,B,C,D, fixedW[44]);
RND(D,E,F,G,H,A,B,C, fixedW[45]);
RND(C,D,E,F,G,H,A,B, fixedW[46]);
RND(B,C,D,E,F,G,H,A, fixedW[47]);
RND(A,B,C,D,E,F,G,H, fixedW[48]);
RND(H,A,B,C,D,E,F,G, fixedW[49]);
RND(G,H,A,B,C,D,E,F, fixedW[50]);
RND(F,G,H,A,B,C,D,E, fixedW[51]);
RND(E,F,G,H,A,B,C,D, fixedW[52]);
RND(D,E,F,G,H,A,B,C, fixedW[53]);
RND(C,D,E,F,G,H,A,B, fixedW[54]);
RND(B,C,D,E,F,G,H,A, fixedW[55]);
RND(A,B,C,D,E,F,G,H, fixedW[56]);
RND(H,A,B,C,D,E,F,G, fixedW[57]);
RND(G,H,A,B,C,D,E,F, fixedW[58]);
RND(F,G,H,A,B,C,D,E, fixedW[59]);
RND(E,F,G,H,A,B,C,D, fixedW[60]);
RND(D,E,F,G,H,A,B,C, fixedW[61]);
RND(C,D,E,F,G,H,A,B, fixedW[62]);
RND(B,C,D,E,F,G,H,A, fixedW[63]);
#undef A
#undef B
#undef C
#undef D
#undef E
#undef F
#undef G
#undef H
*state0 += S0;
*state1 += S1;
}
void shittify(uint4 B[8])
{
uint4 tmp[8];
tmp[0] = (uint4)(B[1].x,B[2].y,B[3].z,B[0].w);
tmp[1] = (uint4)(B[2].x,B[3].y,B[0].z,B[1].w);
tmp[2] = (uint4)(B[3].x,B[0].y,B[1].z,B[2].w);
tmp[3] = (uint4)(B[0].x,B[1].y,B[2].z,B[3].w);
tmp[4] = (uint4)(B[5].x,B[6].y,B[7].z,B[4].w);
tmp[5] = (uint4)(B[6].x,B[7].y,B[4].z,B[5].w);
tmp[6] = (uint4)(B[7].x,B[4].y,B[5].z,B[6].w);
tmp[7] = (uint4)(B[4].x,B[5].y,B[6].z,B[7].w);
#pragma unroll 8
for(uint i=0; i<8; ++i)
B[i] = EndianSwap(tmp[i]);
}
void unshittify(uint4 B[8])
{
uint4 tmp[8];
tmp[0] = (uint4)(B[3].x,B[2].y,B[1].z,B[0].w);
tmp[1] = (uint4)(B[0].x,B[3].y,B[2].z,B[1].w);
tmp[2] = (uint4)(B[1].x,B[0].y,B[3].z,B[2].w);
tmp[3] = (uint4)(B[2].x,B[1].y,B[0].z,B[3].w);
tmp[4] = (uint4)(B[7].x,B[6].y,B[5].z,B[4].w);
tmp[5] = (uint4)(B[4].x,B[7].y,B[6].z,B[5].w);
tmp[6] = (uint4)(B[5].x,B[4].y,B[7].z,B[6].w);
tmp[7] = (uint4)(B[6].x,B[5].y,B[4].z,B[7].w);
#pragma unroll 8
for(uint i=0; i<8; ++i)
B[i] = EndianSwap(tmp[i]);
}
void salsa(uint4 B[8])
{
uint i;
uint4 w[4];
#pragma unroll 4
for(i=0; i<4; ++i)
w[i] = (B[i]^=B[i+4]);
#pragma unroll 4
for(i=0; i<4; ++i)
{
w[0] ^= rotl(w[3] +w[2] , 7U);
w[1] ^= rotl(w[0] +w[3] , 9U);
w[2] ^= rotl(w[1] +w[0] ,13U);
w[3] ^= rotl(w[2] +w[1] ,18U);
w[2] ^= rotl(w[3].wxyz+w[0].zwxy, 7U);
w[1] ^= rotl(w[2].wxyz+w[3].zwxy, 9U);
w[0] ^= rotl(w[1].wxyz+w[2].zwxy,13U);
w[3] ^= rotl(w[0].wxyz+w[1].zwxy,18U);
}
#pragma unroll 4
for(i=0; i<4; ++i)
w[i] = (B[i+4]^=(B[i]+=w[i]));
#pragma unroll 4
for(i=0; i<4; ++i)
{
w[0] ^= rotl(w[3] +w[2] , 7U);
w[1] ^= rotl(w[0] +w[3] , 9U);
w[2] ^= rotl(w[1] +w[0] ,13U);
w[3] ^= rotl(w[2] +w[1] ,18U);
w[2] ^= rotl(w[3].wxyz+w[0].zwxy, 7U);
w[1] ^= rotl(w[2].wxyz+w[3].zwxy, 9U);
w[0] ^= rotl(w[1].wxyz+w[2].zwxy,13U);
w[3] ^= rotl(w[0].wxyz+w[1].zwxy,18U);
}
#pragma unroll 4
for(i=0; i<4; ++i)
B[i+4] += w[i];
}
void salsa_double(uint4 B[8])
{
uint i;
uint4 w[4];
#pragma unroll 4
for(i=0; i<4; ++i)
w[i] = (B[i]^=B[i+4]);
#pragma unroll 4
for(i=0; i<4; ++i)
{
w[0] ^= rotl(w[3] +w[2] , 7U);
w[1] ^= rotl(w[0] +w[3] , 9U);
w[2] ^= rotl(w[1] +w[0] ,13U);
w[3] ^= rotl(w[2] +w[1] ,18U);
w[2] ^= rotl(w[3].wxyz+w[0].zwxy, 7U);
w[1] ^= rotl(w[2].wxyz+w[3].zwxy, 9U);
w[0] ^= rotl(w[1].wxyz+w[2].zwxy,13U);
w[3] ^= rotl(w[0].wxyz+w[1].zwxy,18U);
}
#pragma unroll 4
for(i=0; i<4; ++i)
w[i] = (B[i+4]^=(B[i]+=w[i]));
#pragma unroll 4
for(i=0; i<4; ++i)
{
w[0] ^= rotl(w[3] +w[2] , 7U);
w[1] ^= rotl(w[0] +w[3] , 9U);
w[2] ^= rotl(w[1] +w[0] ,13U);
w[3] ^= rotl(w[2] +w[1] ,18U);
w[2] ^= rotl(w[3].wxyz+w[0].zwxy, 7U);
w[1] ^= rotl(w[2].wxyz+w[3].zwxy, 9U);
w[0] ^= rotl(w[1].wxyz+w[2].zwxy,13U);
w[3] ^= rotl(w[0].wxyz+w[1].zwxy,18U);
}
#pragma unroll 4
for(i=0; i<4; ++i)
B[i+4] += w[i];
#pragma unroll 4
for(i=0; i<4; ++i)
w[i] = (B[i]^=B[i+4]);
#pragma unroll 4
for(i=0; i<4; ++i)
{
w[0] ^= rotl(w[3] +w[2] , 7U);
w[1] ^= rotl(w[0] +w[3] , 9U);
w[2] ^= rotl(w[1] +w[0] ,13U);
w[3] ^= rotl(w[2] +w[1] ,18U);
w[2] ^= rotl(w[3].wxyz+w[0].zwxy, 7U);
w[1] ^= rotl(w[2].wxyz+w[3].zwxy, 9U);
w[0] ^= rotl(w[1].wxyz+w[2].zwxy,13U);
w[3] ^= rotl(w[0].wxyz+w[1].zwxy,18U);
}
#pragma unroll 4
for(i=0; i<4; ++i)
w[i] = (B[i+4]^=(B[i]+=w[i]));
#pragma unroll 4
for(i=0; i<4; ++i)
{
w[0] ^= rotl(w[3] +w[2] , 7U);
w[1] ^= rotl(w[0] +w[3] , 9U);
w[2] ^= rotl(w[1] +w[0] ,13U);
w[3] ^= rotl(w[2] +w[1] ,18U);
w[2] ^= rotl(w[3].wxyz+w[0].zwxy, 7U);
w[1] ^= rotl(w[2].wxyz+w[3].zwxy, 9U);
w[0] ^= rotl(w[1].wxyz+w[2].zwxy,13U);
w[3] ^= rotl(w[0].wxyz+w[1].zwxy,18U);
}
#pragma unroll 4
for(i=0; i<4; ++i)
B[i+4] += w[i];
}
//#define Coord(x,y,z) z+x*(z ## SIZE)+y*(x ## SIZE)*(z ## SIZE)
//#define CO Coord(x,y,z)
// z + x * (z ## SIZE) + y * (x ## SIZE) * (z ## SIZE)
//#define Coord(y,z) z + ((coor ## SIZE).w << (coor ## SIZE).z) + y * ((coor ## SIZE).x << (coor ## SIZE).z)
//#define CO Coord(y,z)
void scrypt_core(const uint gid, uint4 X[8], __global uint4*restrict lookup, const uint n)
{
const uint4 coorSIZE = (uint4)(CONCURRENT_THREADS, ((N[n]+1)/LOOKUP_GAP+((N[n]+1)%LOOKUP_GAP>0)), 3, (gid%CONCURRENT_THREADS));
uint4 V[8];
uint i=0, y=0, z=0;
uint COx=coorSIZE.w<<3U;
uint COy=coorSIZE.x<<3U;
uint COz=0;
#if (LOOKUP_GAP > 2)
uint j = 0;
#endif
shittify(X);
// write lookup table to memory
do {
COz = COx;
#pragma unroll 8
for(z=0; z<(1<<coorSIZE.z); ++z, ++COz)
lookup[COz] = X[z];
i = 0;
#if ((LOOKUP_GAP == 2) || (LOOKUP_GAP == 4) || (LOOKUP_GAP == 8))
do {
salsa_double(X);
} while (++i < (LOOKUP_GAP>>1));
#else
do {
salsa(&X);
} while (++i < LOOKUP_GAP);
#endif
COx += COy;
} while (++y < coorSIZE.y);
COy = coorSIZE.w<<3U;
// write something more
#if (LOOKUP_GAP != 1) && (LOOKUP_GAP != 2) && (LOOKUP_GAP != 4) && (LOOKUP_GAP != 8)
{
y = ((N[n]+1)/LOOKUP_GAP);
COx = COy + (coorSIZE.w*y)<<3U;
#pragma unroll 8
for(z=0; z<(1<<coorSIZE.z); ++z, ++COx)
lookup[COx] = X[z];
#pragma unroll
for(i=0; i<(N[n]+1)%LOOKUP_GAP; ++i)
salsa(X);
}
#endif
// read lookup table from memory
i = 0;
do {
#if ((LOOKUP_GAP == 2) || (LOOKUP_GAP == 4) || (LOOKUP_GAP == 8))
y = ((X[7].x & N[n]) >> (LOOKUP_GAP>>1));
#else
y = ((X[7].x & N[n])/LOOKUP_GAP);
#endif
COz = COy + ((coorSIZE.x*y) << 3U);
#pragma unroll 8
for(z=0; z<(1<<coorSIZE.z); ++z, ++COz)
V[z] = lookup[COz];
#if (LOOKUP_GAP == 2)
if (X[7].x&1)
salsa(V);
#elif ((LOOKUP_GAP == 4) || (LOOKUP_GAP == 8))
j = 0;
while (j++ < mod2(X[7].x, LOOKUP_GAP)) {
salsa(V);
}
#else
j = 0;
while (j++ < (X[7].x & N[n])%LOOKUP_GAP) {
salsa(V);
}
#endif
#pragma unroll 8
for(z=0; z<(1<<coorSIZE.z); ++z)
X[z] ^= V[z];
salsa(X);
} while (++i <= N[n]);
unshittify(X);
}
#define SCRYPT_FOUND (0xFF)
#define SETFOUND(Xnonce) output[output[SCRYPT_FOUND]++] = Xnonce
__attribute__((reqd_work_group_size(WORKSIZE, 1, 1)))
__kernel void search(__global const uint4 * restrict input,
volatile __global uint*restrict output, __global uint4*restrict padcache,
const uint4 midstate0, const uint4 midstate16, const uint target, const uint nFactor)
{
uint gid = get_global_id(0);
uint4 X[8];
uint4 tstate0, tstate1, ostate0, ostate1, tmp0, tmp1;
uint4 data = (uint4)(input[4].x, input[4].y, input[4].z, gid);
uint4 pad0 = midstate0, pad1 = midstate16;
SHA256(&pad0, &pad1, data, (uint4)(K[84],0,0,0), (uint4)(0,0,0,0), (uint4)(0,0,0, K[86]));
SHA256_fresh(&ostate0, &ostate1, pad0^ K[82], pad1^ K[82], K[82], K[82]);
SHA256_fresh(&tstate0, &tstate1, pad0^ K[83], pad1^ K[83], K[83], K[83]);
tmp0 = tstate0;
tmp1 = tstate1;
SHA256(&tstate0, &tstate1, input[0],input[1],input[2],input[3]);
#pragma unroll 4
for (uint i=0; i<4; i++)
{
pad0 = tstate0;
pad1 = tstate1;
X[(i<<1) ] = ostate0;
X[(i<<1)+1] = ostate1;
SHA256(&pad0, &pad1, data, (uint4)(i+1,K[84],0,0), (uint4)(0,0,0,0), (uint4)(0,0,0, K[87]));
SHA256(X+(i<<1), X+(i<<1)+1, pad0, pad1, (uint4)(K[84], 0U, 0U, 0U), (uint4)(0U, 0U, 0U, K[88]));
}
// workaround for fast computation
switch (nFactor) {
case 10:
scrypt_core(gid, X, padcache, 10);
break;
case 11:
scrypt_core(gid, X, padcache, 11);
break;
case 12:
scrypt_core(gid, X, padcache, 12);
break;
}
SHA256(&tmp0,&tmp1, X[0], X[1], X[2], X[3]);
SHA256(&tmp0,&tmp1, X[4], X[5], X[6], X[7]);
SHA256_fixed(&tmp0, &tmp1);
SHA256(&ostate0, &ostate1, tmp0, tmp1, (uint4)(K[84], 0U, 0U, 0U), (uint4)(0U, 0U, 0U, K[88]));
bool result = (EndianSwap(ostate1.w) <= target);
if (EndianSwap(ostate1.w) <= target)
SETFOUND(gid);
}

6
kernels.h

@ -0,0 +1,6 @@ @@ -0,0 +1,6 @@
#define ALEXKARNEW_KERNNAME "alexkarnew"
#define ALEXKAROLD_KERNNAME "alexkarold"
#define CKOLIVAS_KERNNAME "ckolivas"
#define ZUIKKIS_KERNNAME "zuikkis"
#define PSW_KERNNAME "psw"
#define NSCRYPT_KERNNAME "nscrypt"

3
miner.h

@ -2,6 +2,7 @@ @@ -2,6 +2,7 @@
#define __MINER_H__
#include "config.h"
#include "kernels.h"
#include <stdbool.h>
#include <stdint.h>
@ -383,6 +384,7 @@ enum cl_kernels { @@ -383,6 +384,7 @@ enum cl_kernels {
KL_CKOLIVAS,
KL_PSW,
KL_ZUIKKIS,
KL_NSCRYPT,
};
enum dev_reason {
@ -986,6 +988,7 @@ extern bool opt_restart; @@ -986,6 +988,7 @@ extern bool opt_restart;
extern bool opt_worktime;
extern int swork_id;
extern int opt_tcp_keepalive;
extern bool use_nscrypt;
#if LOCK_TRACKING
extern pthread_mutex_t lockstat_lock;

39
ocl.c

@ -383,9 +383,12 @@ _clState *initCl(unsigned int gpu, char *name, size_t nameSize) @@ -383,9 +383,12 @@ _clState *initCl(unsigned int gpu, char *name, size_t nameSize)
applog(LOG_ERR, "Error %d: Failed to clGetDeviceInfo when trying to get CL_DEVICE_MAX_COMPUTE_UNITS", status);
return NULL;
}
// AMD architechture got 64 compute shaders per compute unit.
// Source: http://www.amd.com/us/Documents/GCN_Architecture_whitepaper.pdf
clState->compute_shaders = compute_units * 64;
if (!cgpu->shaders) {
// AMD architechture got 64 compute shaders per compute unit.
// Source: http://www.amd.com/us/Documents/GCN_Architecture_whitepaper.pdf
clState->compute_shaders = compute_units * 64;
}
applog(LOG_DEBUG, "Max shaders calculated %d", (int)(clState->compute_shaders));
status = clGetDeviceInfo(devices[gpu], CL_DEVICE_MAX_MEM_ALLOC_SIZE , sizeof(cl_ulong), (void *)&cgpu->max_alloc, NULL);
@ -454,6 +457,12 @@ _clState *initCl(unsigned int gpu, char *name, size_t nameSize) @@ -454,6 +457,12 @@ _clState *initCl(unsigned int gpu, char *name, size_t nameSize)
/* Kernel only supports worksize 256 */
cgpu->work_size = 256;
break;
case KL_NSCRYPT:
applog(LOG_WARNING, "Kernel nscrypt is experimental.");
strcpy(filename, NSCRYPT_KERNNAME".cl");
strcpy(binaryfilename, NSCRYPT_KERNNAME);
use_nscrypt = true;
break;
case KL_NONE: /* Shouldn't happen */
break;
}
@ -478,20 +487,28 @@ _clState *initCl(unsigned int gpu, char *name, size_t nameSize) @@ -478,20 +487,28 @@ _clState *initCl(unsigned int gpu, char *name, size_t nameSize)
} else
cgpu->lookup_gap = cgpu->opt_lg;
// TODO: check if this params can be the same for both scrypt and nscrypt
unsigned int sixtyfours = use_nscrypt ? ((cgpu->max_alloc*cgpu->lookup_gap) / (2048*128) / 64 - 1) : (cgpu->max_alloc / 131072 / 64 - 1);
if (!cgpu->opt_tc) {
unsigned int sixtyfours;
sixtyfours = cgpu->max_alloc / 131072 / 64 - 1;
cgpu->thread_concurrency = sixtyfours * 64;
if (cgpu->shaders && cgpu->thread_concurrency > cgpu->shaders) {
cgpu->thread_concurrency -= cgpu->thread_concurrency % cgpu->shaders;
if (cgpu->thread_concurrency > cgpu->shaders * 5)
cgpu->thread_concurrency = cgpu->shaders * 5;
size_t tc_limit = cgpu->shaders * (use_nscrypt ? 11 : 5);
if (cgpu->thread_concurrency > tc_limit)
cgpu->thread_concurrency = tc_limit;
}
applog(LOG_DEBUG, "GPU %d: selecting thread concurrency of %d", gpu, (int)(cgpu->thread_concurrency));
} else
} else {
cgpu->thread_concurrency = cgpu->opt_tc;
}
// TODO: check if this works with standard scrypt, too
if (use_nscrypt) {
if (((cgpu->thread_concurrency * (2048*128)) / cgpu->lookup_gap) > cgpu->max_alloc) {
cgpu->thread_concurrency = sixtyfours * 64;
applog(LOG_INFO, "GPU %d: thread concurrency too high, set to %d", gpu, (int)(cgpu->thread_concurrency));
}
}
FILE *binaryfile;
size_t *binary_sizes;
@ -777,7 +794,9 @@ built: @@ -777,7 +794,9 @@ built:
return NULL;
}
size_t ipt = (1024 / cgpu->lookup_gap + (1024 % cgpu->lookup_gap > 0));
cl_uint bsize = use_nscrypt ? 2048 : 1024;
size_t ipt = (bsize / cgpu->lookup_gap + (bsize % cgpu->lookup_gap > 0));
size_t bufsize = 128 * ipt * cgpu->thread_concurrency;
/* Use the max alloc value which has been rounded to a power of

39
scrypt.c

@ -356,7 +356,8 @@ salsa20_8(uint32_t B[16], const uint32_t Bx[16]) @@ -356,7 +356,8 @@ salsa20_8(uint32_t B[16], const uint32_t Bx[16])
/* cpu and memory intensive function to transform a 80 byte buffer into a 32 byte output
scratchpad size needs to be at least 63 + (128 * r * p) + (256 * r + 64) + (128 * r * N) bytes
*/
static void scrypt_1024_1_1_256_sp(const uint32_t* input, char* scratchpad, uint32_t *ostate)
static void scrypt_n_1_1_256_sp(const uint32_t* input, char* scratchpad, uint32_t *ostate, const cl_uint n)
{
uint32_t * V;
uint32_t X[32];
@ -370,7 +371,7 @@ static void scrypt_1024_1_1_256_sp(const uint32_t* input, char* scratchpad, uint @@ -370,7 +371,7 @@ static void scrypt_1024_1_1_256_sp(const uint32_t* input, char* scratchpad, uint
PBKDF2_SHA256_80_128(input, X);
for (i = 0; i < 1024; i += 2) {
for (i = 0; i < n; i += 2) {
memcpy(&V[i * 32], X, 128);
salsa20_8(&X[0], &X[16]);
@ -381,8 +382,8 @@ static void scrypt_1024_1_1_256_sp(const uint32_t* input, char* scratchpad, uint @@ -381,8 +382,8 @@ static void scrypt_1024_1_1_256_sp(const uint32_t* input, char* scratchpad, uint
salsa20_8(&X[0], &X[16]);
salsa20_8(&X[16], &X[0]);
}
for (i = 0; i < 1024; i += 2) {
j = X[16] & 1023;
for (i = 0; i < n; i += 2) {
j = X[16] & (n-1);
p2 = (uint64_t *)(&V[j * 32]);
for(k = 0; k < 16; k++)
p1[k] ^= p2[k];
@ -390,7 +391,7 @@ static void scrypt_1024_1_1_256_sp(const uint32_t* input, char* scratchpad, uint @@ -390,7 +391,7 @@ static void scrypt_1024_1_1_256_sp(const uint32_t* input, char* scratchpad, uint
salsa20_8(&X[0], &X[16]);
salsa20_8(&X[16], &X[0]);
j = X[16] & 1023;
j = X[16] & (n-1);
p2 = (uint64_t *)(&V[j * 32]);
for(k = 0; k < 16; k++)
p1[k] ^= p2[k];
@ -402,20 +403,25 @@ static void scrypt_1024_1_1_256_sp(const uint32_t* input, char* scratchpad, uint @@ -402,20 +403,25 @@ static void scrypt_1024_1_1_256_sp(const uint32_t* input, char* scratchpad, uint
PBKDF2_SHA256_80_128_32(input, X, ostate);
}
/* 131583 rounded up to 4 byte alignment */
#define SCRATCHBUF_SIZE (131584)
void scrypt_regenhash(struct work *work)
{
uint32_t data[20];
char *scratchbuf;
cl_uint nfactor = 10; // scrypt default
uint32_t *nonce = (uint32_t *)(work->data + 76);
uint32_t *ohash = (uint32_t *)(work->hash);
if (use_nscrypt) {
uint32_t timestamp = bswap_32(*((uint32_t *)(work->data + 17*4)));
nfactor = vert_GetNfactor(timestamp) + 1;
}
be32enc_vect(data, (const uint32_t *)work->data, 19);
data[19] = htobe32(*nonce);
scratchbuf = (char *)alloca(SCRATCHBUF_SIZE);
scrypt_1024_1_1_256_sp(data, scratchbuf, ohash);
scratchbuf = (char *)alloca((1 << nfactor) * 128 + 512);
scrypt_n_1_1_256_sp(data, scratchbuf, ohash, (1 << nfactor));
flip32(ohash, ohash);
}
@ -427,11 +433,13 @@ int scrypt_test(unsigned char *pdata, const unsigned char *ptarget, uint32_t non @@ -427,11 +433,13 @@ int scrypt_test(unsigned char *pdata, const unsigned char *ptarget, uint32_t non
uint32_t tmp_hash7, Htarg = le32toh(((const uint32_t *)ptarget)[7]);
uint32_t data[20], ohash[8];
char *scratchbuf;
cl_uint nfactor = 10; // scrypt default
be32enc_vect(data, (const uint32_t *)pdata, 19);
data[19] = htobe32(nonce);
scratchbuf = (char *)alloca(SCRATCHBUF_SIZE);
scrypt_1024_1_1_256_sp(data, scratchbuf, ohash);
scratchbuf = (char *)alloca((1 << nfactor) * 128 + 512);
scrypt_n_1_1_256_sp(data, scratchbuf, ohash, (1 << nfactor));
tmp_hash7 = be32toh(ohash[7]);
applog(LOG_DEBUG, "htarget %08lx diff1 %08lx hash %08lx",
@ -456,10 +464,11 @@ bool scanhash_scrypt(struct thr_info *thr, const unsigned char __maybe_unused *p @@ -456,10 +464,11 @@ bool scanhash_scrypt(struct thr_info *thr, const unsigned char __maybe_unused *p
uint32_t tmp_hash7;
uint32_t Htarg = le32toh(((const uint32_t *)ptarget)[7]);
bool ret = false;
cl_uint nfactor = 10; // scrypt default
be32enc_vect(data, (const uint32_t *)pdata, 19);
scratchbuf = (char *)malloc(SCRATCHBUF_SIZE);
scratchbuf = (char *)alloca((1 << nfactor) * 128 + 512);
if (unlikely(!scratchbuf)) {
applog(LOG_ERR, "Failed to malloc scratchbuf in scanhash_scrypt");
return ret;
@ -470,7 +479,7 @@ bool scanhash_scrypt(struct thr_info *thr, const unsigned char __maybe_unused *p @@ -470,7 +479,7 @@ bool scanhash_scrypt(struct thr_info *thr, const unsigned char __maybe_unused *p
*nonce = ++n;
data[19] = htobe32(n);
scrypt_1024_1_1_256_sp(data, scratchbuf, ostate);
scrypt_n_1_1_256_sp(data, scratchbuf, ostate, (1 << nfactor));
tmp_hash7 = be32toh(ostate[7]);
if (unlikely(tmp_hash7 <= Htarg)) {
@ -486,6 +495,6 @@ bool scanhash_scrypt(struct thr_info *thr, const unsigned char __maybe_unused *p @@ -486,6 +495,6 @@ bool scanhash_scrypt(struct thr_info *thr, const unsigned char __maybe_unused *p
}
}
free(scratchbuf);;
free(scratchbuf);
return ret;
}

4
sgminer.c

@ -102,6 +102,7 @@ int opt_dynamic_interval = 7; @@ -102,6 +102,7 @@ int opt_dynamic_interval = 7;
int opt_g_threads = -1;
int gpu_threads;
bool opt_restart = true;
bool use_nscrypt = false;
struct list_head scan_devices;
static bool devices_enabled[MAX_DEVICES];
@ -4218,6 +4219,9 @@ void write_config(FILE *fcfg) @@ -4218,6 +4219,9 @@ void write_config(FILE *fcfg)
case KL_ZUIKKIS:
fprintf(fcfg, ZUIKKIS_KERNNAME);
break;
case KL_NSCRYPT:
fprintf(fcfg, NSCRYPT_KERNNAME);
break;
}
}

42
util.c

@ -2782,3 +2782,45 @@ bool cg_completion_timeout(void *fn, void *fnarg, int timeout) @@ -2782,3 +2782,45 @@ bool cg_completion_timeout(void *fn, void *fnarg, int timeout)
pthread_cancel(pthread);
return !ret;
}
const unsigned char minNfactor = 10;
const unsigned char maxNfactor = 30;
const unsigned int vert_nChainStartTime = 1389306217;
unsigned char vert_GetNfactor(const long int nTimestamp) {
int l, n;
long int s;
l = 0;
if (nTimestamp <= vert_nChainStartTime) {
return minNfactor;
}
s = nTimestamp - vert_nChainStartTime;
while ((s >> 1) > 3) {
l += 1;
s >>= 1;
}
s &= 3;
n = (l * 158 + s * 28 - 2670) / 100;
if (n < 0)
n = 0;
if (n > 255)
printf( "GetNfactor(%ld) - something wrong(n == %d)\n", nTimestamp, n );
unsigned char N = ((unsigned char) n);
//printf("GetNfactor: %d -> %d %d : %d / %d\n", nTimestamp - nChainStartTime, l, s, n, min(max(N, minNfactor), maxNfactor));
if (N < minNfactor) {
return minNfactor;
} else if (N > maxNfactor) {
return maxNfactor;
}
return N;
//return min(max(N, minNfactor), maxNfactor);
}

2
util.h

@ -152,6 +152,8 @@ void cgsem_reset(cgsem_t *cgsem); @@ -152,6 +152,8 @@ void cgsem_reset(cgsem_t *cgsem);
void cgsem_destroy(cgsem_t *cgsem);
bool cg_completion_timeout(void *fn, void *fnarg, int timeout);
unsigned char vert_GetNfactor(const long int nTimestamp);
#define cgsem_init(_sem) _cgsem_init(_sem, __FILE__, __func__, __LINE__)
#define cgsem_post(_sem) _cgsem_post(_sem, __FILE__, __func__, __LINE__)
#define cgsem_wait(_sem) _cgsem_wait(_sem, __FILE__, __func__, __LINE__)

6
winbuild/dist/include/config.h vendored

@ -3,12 +3,6 @@ @@ -3,12 +3,6 @@
#define HAVE_STDINT_H
#define ALEXKARNEW_KERNNAME "alexkarnew"
#define ALEXKAROLD_KERNNAME "alexkarold"
#define CKOLIVAS_KERNNAME "ckolivas"
#define ZUIKKIS_KERNNAME "zuikkis"
#define PSW_KERNNAME "psw"
#if defined(_MSC_VER)
#define HAVE_LIBCURL 1

1
winbuild/sgminer.vcxproj

@ -280,6 +280,7 @@ exit 0</Command> @@ -280,6 +280,7 @@ exit 0</Command>
<ClInclude Include="..\driver-opencl.h" />
<ClInclude Include="..\elist.h" />
<ClInclude Include="..\findnonce.h" />
<ClInclude Include="..\kernels.h" />
<ClInclude Include="..\logging.h" />
<ClInclude Include="..\miner.h" />
<ClInclude Include="..\ocl.h" />

3
winbuild/sgminer.vcxproj.filters

@ -163,6 +163,9 @@ @@ -163,6 +163,9 @@
<ClInclude Include="dist\include\winbuild.h">
<Filter>Header Files</Filter>
</ClInclude>
<ClInclude Include="..\kernels.h">
<Filter>Header Files</Filter>
</ClInclude>
</ItemGroup>
<ItemGroup>
<None Include="README.txt" />

Loading…
Cancel
Save