Browse Source

lyra2v2: SM3.0 compat too

and strip most of the unused vectors defines...
2upstream
Tanguy Pruvot 9 years ago
parent
commit
877f25aee5
  1. 614
      lyra2/cuda_lyra2_vectors.h
  2. 22
      lyra2/cuda_lyra2v2.cu
  3. 4
      lyra2/lyra2REv2.cu

614
lyra2/cuda_lyra2_vectors.h

@ -1,9 +1,8 @@ @@ -1,9 +1,8 @@
/* DJM CRAP */
/* DJM CRAP to strip (again) made for SM 3.2+ */
#ifndef CUDA_LYRA_VECTOR_H
#define CUDA_LYRA_VECTOR_H
///////////////////////////////////////////////////////////////////////////////////
#if (defined(_MSC_VER) && defined(_WIN64)) || defined(__LP64__)
#define __LDG_PTR "l"
@ -13,52 +12,39 @@ @@ -13,52 +12,39 @@
#include "cuda_helper.h"
//typedef __device_builtin__ struct ulong16 ulong16;
#if __CUDA_ARCH__ < 320 && !defined(__ldg4)
#define __ldg4(x) (*(x))
#endif
typedef struct __align__(32) uint8
{
typedef struct __align__(32) uint8 {
unsigned int s0, s1, s2, s3, s4, s5, s6, s7;
} uint8;
typedef struct __align__(64) uint2_8
{
typedef struct __align__(64) uint2_8 {
uint2 s0, s1, s2, s3, s4, s5, s6, s7;
} uint2_8;
typedef struct __align__(64) ulonglong2to8
{
ulonglong2 l0,l1,l2,l3;
typedef struct __align__(64) ulonglong2to8 {
ulonglong2 l0,l1,l2,l3;
} ulonglong2to8;
typedef struct __align__(128) ulonglong8to16
{
typedef struct __align__(128) ulonglong8to16 {
ulonglong2to8 lo, hi;
} ulonglong8to16;
typedef struct __align__(256) ulonglong16to32
{
typedef struct __align__(256) ulonglong16to32 {
ulonglong8to16 lo, hi;
} ulonglong16to32;
typedef struct __align__(512) ulonglong32to64
{
typedef struct __align__(512) ulonglong32to64 {
ulonglong16to32 lo, hi;
} ulonglong32to64;
typedef struct __align__(128) ulonglonglong
{
typedef struct __align__(128) ulonglonglong {
ulonglong2 s0,s1,s2,s3,s4,s5,s6,s7;
} ulonglonglong;
typedef struct __align__(64) uint16
{
typedef struct __align__(64) uint16 {
union {
struct {unsigned int s0, s1, s2, s3, s4, s5, s6, s7;};
uint8 lo;
@ -69,8 +55,7 @@ typedef struct __align__(64) uint16 @@ -69,8 +55,7 @@ typedef struct __align__(64) uint16
};
} uint16;
typedef struct __align__(128) uint2_16
{
typedef struct __align__(128) uint2_16 {
union {
struct { uint2 s0, s1, s2, s3, s4, s5, s6, s7; };
uint2_8 lo;
@ -81,101 +66,35 @@ typedef struct __align__(128) uint2_16 @@ -81,101 +66,35 @@ typedef struct __align__(128) uint2_16
};
} uint2_16;
typedef struct __align__(128) uint32
{
uint16 lo,hi;
typedef struct __align__(128) uint32 {
uint16 lo,hi;
} uint32;
struct __align__(128) ulong8
{
struct __align__(128) ulong8 {
ulonglong4 s0, s1, s2, s3;
};
typedef __device_builtin__ struct ulong8 ulong8;
/*
typedef struct __align__(256) ulonglong16
{
ulonglong2 s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, sa, sb, sc, sd, se, sf;
} ulonglong16;
*/
typedef struct __align__(256) ulonglong16
{
typedef struct __align__(256) ulonglong16 {
ulonglong4 s0, s1, s2, s3, s4, s5, s6, s7;
} ulonglong16;
//typedef struct __align__(32) uint48
//{
// uint4 s0, s1;
//
//} uint48;
typedef struct __align__(16) uint28
{
typedef struct __align__(16) uint28 {
uint2 x, y, z, w;
} uint28;
/*
typedef struct __builtin_align__(32) uint48
{
union {
uint4 s0;
struct { uint2 x, y;};
};
union {
uint4 s1;
struct { uint2 z, w; };
};
} uint48;
*/
typedef struct __builtin_align__(32) uint48
{
typedef struct __builtin_align__(32) uint48 {
uint4 s0,s1;
} uint48;
typedef struct __align__(64) uint816
{
uint48 s0, s1;
} uint816;
typedef struct __align__(128) uint1632
{
uint816 s0, s1;
} uint1632;
typedef struct __align__(256) uintx64
{
uint1632 s0, s1;
} uintx64;
typedef struct __builtin_align__(256) uintx64bis
{
uint28 s0, s1, s2, s3, s4, s5, s6, s7;
} uintx64bis;
typedef struct __align__(256) uint4x16
{
typedef struct __align__(256) uint4x16 {
uint4 s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, s12, s13, s14, s15;
} uint4x16;
static __inline__ __device__ ulonglong2to8 make_ulonglong2to8(ulonglong2 s0, ulonglong2 s1, ulonglong2 s2, ulonglong2 s3)
{
ulonglong2to8 t; t.l0=s0; t.l1=s1; t.l2=s2; t.l3=s3;
return t;
ulonglong2to8 t; t.l0=s0; t.l1=s1; t.l2=s2; t.l3=s3;
return t;
}
static __inline__ __device__ ulonglong8to16 make_ulonglong8to16(const ulonglong2to8 &s0, const ulonglong2to8 &s1)
@ -196,7 +115,6 @@ static __inline__ __device__ ulonglong32to64 make_ulonglong32to64(const ulonglon @@ -196,7 +115,6 @@ static __inline__ __device__ ulonglong32to64 make_ulonglong32to64(const ulonglon
return t;
}
static __inline__ __host__ __device__ ulonglonglong make_ulonglonglong(
const ulonglong2 &s0, const ulonglong2 &s1, const ulonglong2 &s2, const ulonglong2 &s3,
const ulonglong2 &s4, const ulonglong2 &s5)
@ -205,63 +123,18 @@ static __inline__ __host__ __device__ ulonglonglong make_ulonglonglong( @@ -205,63 +123,18 @@ static __inline__ __host__ __device__ ulonglonglong make_ulonglonglong(
return t;
}
static __inline__ __device__ uint48 make_uint48(uint4 s0, uint4 s1)
{
uint48 t; t.s0 = s0; t.s1 = s1;
return t;
}
/*
static __inline__ __device__ uint48 make_uint48(uint2 s0, uint2 s1, uint2 s2, uint2 s3)
{
uint48 t; t.x = s0; t.y = s1; t.z = s2; t.w = s3;
return t;
}
static __inline__ __device__ uint48 make_uint48(const uint28 &s0)
{
uint48 t; t.x = s0.x; t.y = s0.y; t.z = s0.z; t.w = s0.w;
return t;
}
*/
static __inline__ __device__ uint28 make_uint28(uint2 s0, uint2 s1, uint2 s2, uint2 s3)
{
uint28 t; t.x = s0; t.y = s1; t.z = s2; t.w = s3;
return t;
}
static __inline__ __device__ uint816 make_uint816(const uint48 &s0, const uint48 &s1)
{
uint816 t; t.s0 = s0; t.s1 = s1;
return t;
}
static __inline__ __device__ uint1632 make_uint1632(const uint816 &s0, const uint816 &s1)
{
uint1632 t; t.s0 = s0; t.s1 = s1;
return t;
}
static __inline__ __device__ uintx64 make_uintx64(const uint1632 &s0, const uint1632 &s1)
{
uintx64 t; t.s0 = s0; t.s1 = s1;
return t;
}
static __inline__ __device__ uintx64bis make_uintx64bis(
const uint28 &s0, const uint28 &s1, const uint28 &s2, const uint28 &s3,
const uint28 &s4, const uint28 &s5, const uint28 &s6, const uint28 &s7
)
{
uintx64bis t; t.s0 = s0; t.s1 = s1; t.s2 = s2; t.s3 = s3; t.s4 = s4; t.s5 = s5; t.s6 = s6; t.s7 = s7;
return t;
}
static __inline__ __host__ __device__ uint4x16 make_uint4x16(
uint4 s0, uint4 s1, uint4 s2, uint4 s3, uint4 s4, uint4 s5, uint4 s6, uint4 s7,
uint4 s8, uint4 s9, uint4 sa, uint4 sb, uint4 sc, uint4 sd, uint4 se, uint4 sf)
@ -271,7 +144,6 @@ static __inline__ __host__ __device__ uint4x16 make_uint4x16( @@ -271,7 +144,6 @@ static __inline__ __host__ __device__ uint4x16 make_uint4x16(
return t;
}
static __inline__ __device__ uint2_16 make_uint2_16(
uint2 s0, uint2 s1, uint2 s2, uint2 s3, uint2 s4, uint2 s5, uint2 s6, uint2 s7,
uint2 s8, uint2 s9, uint2 sa, uint2 sb, uint2 sc, uint2 sd, uint2 se, uint2 sf)
@ -281,7 +153,6 @@ static __inline__ __device__ uint2_16 make_uint2_16( @@ -281,7 +153,6 @@ static __inline__ __device__ uint2_16 make_uint2_16(
return t;
}
static __inline__ __host__ __device__ uint16 make_uint16(
unsigned int s0, unsigned int s1, unsigned int s2, unsigned int s3, unsigned int s4, unsigned int s5, unsigned int s6, unsigned int s7,
unsigned int s8, unsigned int s9, unsigned int sa, unsigned int sb, unsigned int sc, unsigned int sd, unsigned int se, unsigned int sf)
@ -316,7 +187,6 @@ static __inline__ __host__ __device__ uint2_8 make_uint2_8( @@ -316,7 +187,6 @@ static __inline__ __host__ __device__ uint2_8 make_uint2_8(
return t;
}
static __inline__ __host__ __device__ ulonglong16 make_ulonglong16(const ulonglong4 &s0, const ulonglong4 &s1,
const ulonglong4 &s2, const ulonglong4 &s3, const ulonglong4 &s4, const ulonglong4 &s5, const ulonglong4 &s6, const ulonglong4 &s7)
{
@ -324,9 +194,6 @@ static __inline__ __host__ __device__ ulonglong16 make_ulonglong16(const ulonglo @@ -324,9 +194,6 @@ static __inline__ __host__ __device__ ulonglong16 make_ulonglong16(const ulonglo
return t;
}
static __inline__ __host__ __device__ ulong8 make_ulong8(
ulonglong4 s0, ulonglong4 s1, ulonglong4 s2, ulonglong4 s3)
{
@ -338,14 +205,8 @@ static __inline__ __host__ __device__ ulong8 make_ulong8( @@ -338,14 +205,8 @@ static __inline__ __host__ __device__ ulong8 make_ulong8(
static __forceinline__ __device__ uchar4 operator^ (uchar4 a, uchar4 b) { return make_uchar4(a.x ^ b.x, a.y ^ b.y, a.z ^ b.z, a.w ^ b.w); }
static __forceinline__ __device__ uchar4 operator+ (uchar4 a, uchar4 b) { return make_uchar4(a.x + b.x, a.y + b.y, a.z + b.z, a.w + b.w); }
//static __forceinline__ __device__ uint4 operator^ (uint4 a, uint4 b) { return make_uint4(a.x ^ b.x, a.y ^ b.y, a.z ^ b.z, a.w ^ b.w); }
static __forceinline__ __device__ uint4 operator+ (uint4 a, uint4 b) { return make_uint4(a.x + b.x, a.y + b.y, a.z + b.z, a.w + b.w); }
static __forceinline__ __device__ ulonglong4 operator^ (ulonglong4 a, ulonglong4 b) { return make_ulonglong4(a.x ^ b.x, a.y ^ b.y, a.z ^ b.z, a.w ^ b.w); }
static __forceinline__ __device__ ulonglong4 operator+ (ulonglong4 a, ulonglong4 b) { return make_ulonglong4(a.x + b.x, a.y + b.y, a.z + b.z, a.w + b.w); }
static __forceinline__ __device__ ulonglong2 operator^ (ulonglong2 a, ulonglong2 b) { return make_ulonglong2(a.x ^ b.x, a.y ^ b.y); }
@ -353,12 +214,11 @@ static __forceinline__ __device__ ulonglong2 operator+ (ulonglong2 a, ulonglong2 @@ -353,12 +214,11 @@ static __forceinline__ __device__ ulonglong2 operator+ (ulonglong2 a, ulonglong2
static __forceinline__ __device__ ulong8 operator^ (const ulong8 &a, const ulong8 &b) {
return make_ulong8(a.s0 ^ b.s0, a.s1 ^ b.s1, a.s2 ^ b.s2, a.s3 ^ b.s3);
} //, a.s4 ^ b.s4, a.s5 ^ b.s5, a.s6 ^ b.s6, a.s7 ^ b.s7); }
}
static __forceinline__ __device__ ulong8 operator+ (const ulong8 &a, const ulong8 &b) {
return make_ulong8(a.s0 + b.s0, a.s1 + b.s1, a.s2 + b.s2, a.s3 + b.s3);
} //, a.s4 + b.s4, a.s5 + b.s5, a.s6 + b.s6, a.s7 + b.s7); }
}
static __forceinline__ __device__ __host__ uint8 operator^ (const uint8 &a, const uint8 &b) { return make_uint8(a.s0 ^ b.s0, a.s1 ^ b.s1, a.s2 ^ b.s2, a.s3 ^ b.s3, a.s4 ^ b.s4, a.s5 ^ b.s5, a.s6 ^ b.s6, a.s7 ^ b.s7); }
@ -371,10 +231,6 @@ static __forceinline__ __device__ uint2_8 operator+ (const uint2_8 &a, const u @@ -371,10 +231,6 @@ static __forceinline__ __device__ uint2_8 operator+ (const uint2_8 &a, const u
////////////// mess++ //////
//static __forceinline__ __device__ uint48 operator^ (const uint48 &a, const uint48 &b) {
// return make_uint48(a.s0 ^ b.s0, a.s1 ^ b.s1);
//}
static __forceinline__ __device__ uint28 operator^ (const uint28 &a, const uint28 &b) {
return make_uint28(a.x ^ b.x, a.y ^ b.y, a.z ^ b.z, a.w ^ b.w);
}
@ -386,29 +242,6 @@ static __forceinline__ __device__ uint28 operator+ (const uint28 &a, const uint @@ -386,29 +242,6 @@ static __forceinline__ __device__ uint28 operator+ (const uint28 &a, const uint
static __forceinline__ __device__ uint48 operator+ (const uint48 &a, const uint48 &b) {
return make_uint48(a.s0 + b.s0, a.s1 + b.s1);
}
/*
static __forceinline__ __device__ uint48 operator+ (const uint48 &a, const uint48 &b) {
return make_uint48(a.x + b.x, a.y + b.y, a.z + b.z, a.w + b.w);
}
*/
//static __forceinline__ __device__ uint816 operator^ (const uint816 &a, const uint816 &b) {
// return make_uint816(a.s0 ^ b.s0, a.s1 ^ b.s1);
//}
static __forceinline__ __device__ uint816 operator+ (const uint816 &a, const uint816 &b) {
return make_uint816(a.s0 + b.s0, a.s1 + b.s1);
}
//static __forceinline__ __device__ uint1632 operator^ (const uint1632 &a, const uint1632 &b) {
// return make_uint1632(a.s0 ^ b.s0, a.s1 ^ b.s1);
//}
//static __forceinline__ __device__ uintx64 operator^ (const uintx64 &a, const uintx64 &b) {
// return make_uintx64(a.s0 ^ b.s0, a.s1 ^ b.s1);
//}
/////////////////////////
@ -432,15 +265,6 @@ static __forceinline__ __device__ uint2_16 operator+ (const uint2_16 &a, const @@ -432,15 +265,6 @@ static __forceinline__ __device__ uint2_16 operator+ (const uint2_16 &a, const
a.s8 + b.s8, a.s9 + b.s9, a.sa + b.sa, a.sb + b.sb, a.sc + b.sc, a.sd + b.sd, a.se + b.se, a.sf + b.sf);
}
static __forceinline__ __device__ uintx64bis operator^ (const uintx64bis &a, const uintx64bis &b) {
return make_uintx64bis(a.s0 ^ b.s0, a.s1 ^ b.s1, a.s2 ^ b.s2, a.s3 ^ b.s3, a.s4 ^ b.s4, a.s5 ^ b.s5, a.s6 ^ b.s6, a.s7 ^ b.s7);
}
static __forceinline__ __device__ uintx64bis operator+ (const uintx64bis &a, const uintx64bis &b) {
return make_uintx64bis(a.s0 + b.s0, a.s1 + b.s1, a.s2 + b.s2, a.s3 + b.s3, a.s4 + b.s4, a.s5 + b.s5, a.s6 + b.s6, a.s7 + b.s7);
}
static __forceinline__ __device__ uint32 operator^ (const uint32 &a, const uint32 &b) {
return make_uint32(a.lo ^ b.lo, a.hi ^ b.hi);
}
@ -449,7 +273,6 @@ static __forceinline__ __device__ uint32 operator+ (const uint32 &a, const uint @@ -449,7 +273,6 @@ static __forceinline__ __device__ uint32 operator+ (const uint32 &a, const uint
return make_uint32(a.lo + b.lo, a.hi + b.hi);
}
static __forceinline__ __device__ ulonglong16 operator^ (const ulonglong16 &a, const ulonglong16 &b) {
return make_ulonglong16(a.s0 ^ b.s0, a.s1 ^ b.s1, a.s2 ^ b.s2, a.s3 ^ b.s3, a.s4 ^ b.s4, a.s5 ^ b.s5, a.s6 ^ b.s6, a.s7 ^ b.s7);
}
@ -459,35 +282,18 @@ static __forceinline__ __device__ ulonglong16 operator+ (const ulonglong16 &a, c @@ -459,35 +282,18 @@ static __forceinline__ __device__ ulonglong16 operator+ (const ulonglong16 &a, c
}
static __forceinline__ __device__ void operator^= (ulong8 &a, const ulong8 &b) { a = a ^ b; }
//static __forceinline__ __device__ void operator^= (uintx64 &a, const uintx64 &b) { a = a ^ b; }
static __forceinline__ __device__ void operator^= (uintx64bis &a, const uintx64bis &b) { a = a ^ b; }
//static __forceinline__ __device__ void operator^= (uint816 &a, const uint816 &b) { a = a ^ b; }
static __forceinline__ __device__ void operator+= (uint816 &a, const uint816 &b) { a = a + b; }
//static __forceinline__ __device__ void operator^= (uint48 &a, const uint48 &b) { a = a ^ b; }
//static __forceinline__ __device__ void operator+= (uint48 &a, const uint48 &b) { a = a + b; }
static __forceinline__ __device__ void operator^= (uint28 &a, const uint28 &b) { a = a ^ b; }
static __forceinline__ __device__ void operator+= (uint28 &a, const uint28 &b) { a = a + b; }
static __forceinline__ __device__ void operator^= (uint2_8 &a, const uint2_8 &b) { a = a ^ b; }
static __forceinline__ __device__ void operator+= (uint2_8 &a, const uint2_8 &b) { a = a + b; }
static __forceinline__ __device__ void operator^= (uint32 &a, const uint32 &b) { a = a ^ b; }
static __forceinline__ __device__ void operator+= (uint32 &a, const uint32 &b) { a = a + b; }
//static __forceinline__ __device__ void operator^= (uint4 &a, uint4 b) { a = a ^ b; }
static __forceinline__ __device__ void operator^= (uchar4 &a, uchar4 b) { a = a ^ b; }
static __forceinline__ __device__ __host__ void operator^= (uint8 &a, const uint8 &b) { a = a ^ b; }
static __forceinline__ __device__ __host__ void operator^= (uint16 &a, const uint16 &b) { a = a ^ b; }
@ -509,7 +315,6 @@ ulonglong2to8 operator+ (const ulonglong2to8 &a, const ulonglong2to8 &b) @@ -509,7 +315,6 @@ ulonglong2to8 operator+ (const ulonglong2to8 &a, const ulonglong2to8 &b)
return make_ulonglong2to8(a.l0 + b.l0, a.l1 + b.l1, a.l2 + b.l2, a.l3 + b.l3);
}
static __forceinline__ __device__
ulonglong8to16 operator^ (const ulonglong8to16 &a, const ulonglong8to16 &b)
{
@ -546,7 +351,6 @@ ulonglong32to64 operator+ (const ulonglong32to64 &a, const ulonglong32to64 &b) @@ -546,7 +351,6 @@ ulonglong32to64 operator+ (const ulonglong32to64 &a, const ulonglong32to64 &b)
return make_ulonglong32to64(a.lo + b.lo, a.hi + b.hi);
}
static __forceinline__ __device__ ulonglonglong operator^ (const ulonglonglong &a, const ulonglonglong &b) {
return make_ulonglonglong(a.s0 ^ b.s0, a.s1 ^ b.s1, a.s2 ^ b.s2, a.s3 ^ b.s3, a.s4 ^ b.s4, a.s5 ^ b.s5);
}
@ -555,10 +359,8 @@ static __forceinline__ __device__ ulonglonglong operator+ (const ulonglonglong & @@ -555,10 +359,8 @@ static __forceinline__ __device__ ulonglonglong operator+ (const ulonglonglong &
return make_ulonglonglong(a.s0 + b.s0, a.s1 + b.s1, a.s2 + b.s2, a.s3 + b.s3, a.s4 + b.s4, a.s5 + b.s5);
}
static __forceinline__ __device__ void operator^= (ulonglong2to8 &a, const ulonglong2to8 &b) { a = a ^ b; }
static __forceinline__ __device__ void operator+= (uint4 &a, uint4 b) { a = a + b; }
static __forceinline__ __device__ void operator+= (uchar4 &a, uchar4 b) { a = a + b; }
static __forceinline__ __device__ __host__ void operator+= (uint8 &a, const uint8 &b) { a = a + b; }
@ -577,7 +379,6 @@ static __forceinline__ __device__ void operator^= (ulonglong16to32 &a, const ulo @@ -577,7 +379,6 @@ static __forceinline__ __device__ void operator^= (ulonglong16to32 &a, const ulo
static __forceinline__ __device__ void operator+= (ulonglong32to64 &a, const ulonglong32to64 &b) { a = a + b; }
static __forceinline__ __device__ void operator^= (ulonglong32to64 &a, const ulonglong32to64 &b) { a = a ^ b; }
static __forceinline__ __device__ void operator+= (ulonglonglong &a, const ulonglonglong &b) { a = a + b; }
static __forceinline__ __device__ void operator^= (ulonglonglong &a, const ulonglonglong &b) { a = a ^ b; }
@ -615,7 +416,6 @@ static __forceinline__ __device__ uint32_t rotate(uint32_t vec4, uint32_t shift) @@ -615,7 +416,6 @@ static __forceinline__ __device__ uint32_t rotate(uint32_t vec4, uint32_t shift)
return ret;
}
static __forceinline__ __device__ uint32_t rotateR(uint32_t vec4, uint32_t shift)
{
uint32_t ret;
@ -623,245 +423,6 @@ static __forceinline__ __device__ uint32_t rotateR(uint32_t vec4, uint32_t shift @@ -623,245 +423,6 @@ static __forceinline__ __device__ uint32_t rotateR(uint32_t vec4, uint32_t shift
return ret;
}
static __device__ __inline__ uint8 __ldg8(const uint8_t *ptr)
{
uint8 test;
asm volatile ("ld.global.nc.v4.u32 {%0,%1,%2,%3},[%4];" : "=r"(test.s0), "=r"(test.s1), "=r"(test.s2), "=r"(test.s3) : __LDG_PTR(ptr));
asm volatile ("ld.global.nc.v4.u32 {%0,%1,%2,%3},[%4+16];" : "=r"(test.s4), "=r"(test.s5), "=r"(test.s6), "=r"(test.s7) : __LDG_PTR(ptr));
return (test);
}
static __device__ __inline__ uint32_t __ldgtoint(const uint8_t *ptr)
{
uint32_t test;
asm volatile ("ld.global.nc.u32 {%0},[%1];" : "=r"(test) : __LDG_PTR(ptr));
return (test);
}
static __device__ __inline__ uint32_t __ldgtoint64(const uint8_t *ptr)
{
uint64_t test;
asm volatile ("ld.global.nc.u64 {%0},[%1];" : "=l"(test) : __LDG_PTR(ptr));
return (test);
}
static __device__ __inline__ uint32_t __ldgtoint_unaligned(const uint8_t *ptr)
{
uint32_t test;
asm volatile ("{\n\t"
".reg .u8 a,b,c,d; \n\t"
"ld.global.nc.u8 a,[%1]; \n\t"
"ld.global.nc.u8 b,[%1+1]; \n\t"
"ld.global.nc.u8 c,[%1+2]; \n\t"
"ld.global.nc.u8 d,[%1+3]; \n\t"
"mov.b32 %0,{a,b,c,d}; }\n\t"
: "=r"(test) : __LDG_PTR(ptr));
return (test);
}
static __device__ __inline__ uint64_t __ldgtoint64_unaligned(const uint8_t *ptr)
{
uint64_t test;
asm volatile ("{\n\t"
".reg .u8 a,b,c,d,e,f,g,h; \n\t"
".reg .u32 i,j; \n\t"
"ld.global.nc.u8 a,[%1]; \n\t"
"ld.global.nc.u8 b,[%1+1]; \n\t"
"ld.global.nc.u8 c,[%1+2]; \n\t"
"ld.global.nc.u8 d,[%1+3]; \n\t"
"ld.global.nc.u8 e,[%1+4]; \n\t"
"ld.global.nc.u8 f,[%1+5]; \n\t"
"ld.global.nc.u8 g,[%1+6]; \n\t"
"ld.global.nc.u8 h,[%1+7]; \n\t"
"mov.b32 i,{a,b,c,d}; \n\t"
"mov.b32 j,{e,f,g,h}; \n\t"
"mov.b64 %0,{i,j}; }\n\t"
: "=l"(test) : __LDG_PTR(ptr));
return (test);
}
static __device__ __inline__ uint64_t __ldgtoint64_trunc(const uint8_t *ptr)
{
uint32_t zero = 0;
uint64_t test;
asm volatile ("{\n\t"
".reg .u8 a,b,c,d; \n\t"
".reg .u32 i; \n\t"
"ld.global.nc.u8 a,[%1]; \n\t"
"ld.global.nc.u8 b,[%1+1]; \n\t"
"ld.global.nc.u8 c,[%1+2]; \n\t"
"ld.global.nc.u8 d,[%1+3]; \n\t"
"mov.b32 i,{a,b,c,d}; \n\t"
"mov.b64 %0,{i,%1}; }\n\t"
: "=l"(test) : __LDG_PTR(ptr), "r"(zero));
return (test);
}
static __device__ __inline__ uint32_t __ldgtoint_unaligned2(const uint8_t *ptr)
{
uint32_t test;
asm("{\n\t"
".reg .u8 e,b,c,d; \n\t"
"ld.global.nc.u8 e,[%1]; \n\t"
"ld.global.nc.u8 b,[%1+1]; \n\t"
"ld.global.nc.u8 c,[%1+2]; \n\t"
"ld.global.nc.u8 d,[%1+3]; \n\t"
"mov.b32 %0,{e,b,c,d}; }\n\t"
: "=r"(test) : __LDG_PTR(ptr));
return (test);
}
#endif /* __CUDA_ARCH__ < 320 */
static __forceinline__ __device__ void shift256R2(uint32_t * ret, const uint8 &vec4, const uint32_t shift)
{
uint32_t truc = 0, truc2 = cuda_swab32(vec4.s7), truc3 = 0;
asm("shf.r.clamp.b32 %0, %1, %2, %3;" : "=r"(truc) : "r"(truc3), "r"(truc2), "r"(shift));
ret[8] = cuda_swab32(truc);
truc3 = cuda_swab32(vec4.s6);
asm("shf.r.clamp.b32 %0, %1, %2, %3;" : "=r"(truc) : "r"(truc2), "r"(truc3), "r"(shift));
ret[7] = cuda_swab32(truc);
truc2 = cuda_swab32(vec4.s5);
asm("shf.r.clamp.b32 %0, %1, %2, %3;" : "=r"(truc) : "r"(truc3), "r"(truc2), "r"(shift));
ret[6] = cuda_swab32(truc);
truc3 = cuda_swab32(vec4.s4);
asm("shf.r.clamp.b32 %0, %1, %2, %3;" : "=r"(truc) : "r"(truc2), "r"(truc3), "r"(shift));
ret[5] = cuda_swab32(truc);
truc2 = cuda_swab32(vec4.s3);
asm("shf.r.clamp.b32 %0, %1, %2, %3;" : "=r"(truc) : "r"(truc3), "r"(truc2), "r"(shift));
ret[4] = cuda_swab32(truc);
truc3 = cuda_swab32(vec4.s2);
asm("shf.r.clamp.b32 %0, %1, %2, %3;" : "=r"(truc) : "r"(truc2), "r"(truc3), "r"(shift));
ret[3] = cuda_swab32(truc);
truc2 = cuda_swab32(vec4.s1);
asm("shf.r.clamp.b32 %0, %1, %2, %3;" : "=r"(truc) : "r"(truc3), "r"(truc2), "r"(shift));
ret[2] = cuda_swab32(truc);
truc3 = cuda_swab32(vec4.s0);
asm("shf.r.clamp.b32 %0, %1, %2, %3;" : "=r"(truc) : "r"(truc2), "r"(truc3), "r"(shift));
ret[1] = cuda_swab32(truc);
asm("shr.b32 %0, %1, %2;" : "=r"(truc) : "r"(truc3), "r"(shift));
ret[0] = cuda_swab32(truc);
}
#define shift256R3(ret,vec4, shift) \
{ \
\
uint32_t truc=0,truc2=cuda_swab32(vec4.s7),truc3=0; \
asm volatile ("shf.r.clamp.b32 %0, %1, %2, %3;" : "=r"(truc) : "r"(truc3), "r"(truc2), "r"(shift)); \
ret[8] = cuda_swab32(truc); \
truc2=cuda_swab32(vec4.s6);truc3=cuda_swab32(vec4.s7); \
asm volatile ("shf.r.clamp.b32 %0, %1, %2, %3;" : "=r"(truc) : "r"(truc3), "r"(truc2), "r"(shift)); \
ret[7] = cuda_swab32(truc); \
truc2=cuda_swab32(vec4.s5);truc3=cuda_swab32(vec4.s6); \
asm volatile ("shf.r.clamp.b32 %0, %1, %2, %3;" : "=r"(truc) : "r"(truc3), "r"(truc2), "r"(shift)); \
ret[6] = cuda_swab32(truc); \
truc2 = cuda_swab32(vec4.s4); truc3 = cuda_swab32(vec4.s5); \
asm volatile ("shf.r.clamp.b32 %0, %1, %2, %3;" : "=r"(truc) : "r"(truc3), "r"(truc2), "r"(shift)); \
ret[5] = cuda_swab32(truc); \
truc2 = cuda_swab32(vec4.s3); truc3 = cuda_swab32(vec4.s4); \
asm volatile ("shf.r.clamp.b32 %0, %1, %2, %3;" : "=r"(truc) : "r"(truc3), "r"(truc2), "r"(shift)); \
ret[4] = cuda_swab32(truc); \
truc2 = cuda_swab32(vec4.s2); truc3 = cuda_swab32(vec4.s3); \
asm volatile ("shf.r.clamp.b32 %0, %1, %2, %3;" : "=r"(truc) : "r"(truc3), "r"(truc2), "r"(shift)); \
ret[3] = cuda_swab32(truc); \
truc2 = cuda_swab32(vec4.s1); truc3 = cuda_swab32(vec4.s2); \
asm volatile ("shf.r.clamp.b32 %0, %1, %2, %3;" : "=r"(truc) : "r"(truc3), "r"(truc2), "r"(shift)); \
ret[2] = cuda_swab32(truc); \
truc2 = cuda_swab32(vec4.s0); truc3 = cuda_swab32(vec4.s1); \
asm volatile ("shf.r.clamp.b32 %0, %1, %2, %3;" : "=r"(truc) : "r"(truc3), "r"(truc2), "r"(shift)); \
ret[1] = cuda_swab32(truc); \
truc3 = cuda_swab32(vec4.s0); \
asm volatile ("shr.b32 %0, %1, %2;" : "=r"(truc) : "r"(truc3), "r"(shift)); \
ret[0] = cuda_swab32(truc); \
\
\
}
static __device__ __inline__ uint32 __ldg32b(const uint32 *ptr)
{
uint32 ret;
asm("ld.global.nc.v4.u32 {%0,%1,%2,%3}, [%4];" : "=r"(ret.lo.s0), "=r"(ret.lo.s1), "=r"(ret.lo.s2), "=r"(ret.lo.s3) : __LDG_PTR(ptr));
asm("ld.global.nc.v4.u32 {%0,%1,%2,%3}, [%4+16];" : "=r"(ret.lo.s4), "=r"(ret.lo.s5), "=r"(ret.lo.s6), "=r"(ret.lo.s7) : __LDG_PTR(ptr));
asm("ld.global.nc.v4.u32 {%0,%1,%2,%3}, [%4+32];" : "=r"(ret.lo.s8), "=r"(ret.lo.s9), "=r"(ret.lo.sa), "=r"(ret.lo.sb) : __LDG_PTR(ptr));
asm("ld.global.nc.v4.u32 {%0,%1,%2,%3}, [%4+48];" : "=r"(ret.lo.sc), "=r"(ret.lo.sd), "=r"(ret.lo.se), "=r"(ret.lo.sf) : __LDG_PTR(ptr));
asm("ld.global.nc.v4.u32 {%0,%1,%2,%3}, [%4+64];" : "=r"(ret.hi.s0), "=r"(ret.hi.s1), "=r"(ret.hi.s2), "=r"(ret.hi.s3) : __LDG_PTR(ptr));
asm("ld.global.nc.v4.u32 {%0,%1,%2,%3}, [%4+80];" : "=r"(ret.hi.s4), "=r"(ret.hi.s5), "=r"(ret.hi.s6), "=r"(ret.hi.s7) : __LDG_PTR(ptr));
asm("ld.global.nc.v4.u32 {%0,%1,%2,%3}, [%4+96];" : "=r"(ret.hi.s8), "=r"(ret.hi.s9), "=r"(ret.hi.sa), "=r"(ret.hi.sb) : __LDG_PTR(ptr));
asm("ld.global.nc.v4.u32 {%0,%1,%2,%3}, [%4+112];" : "=r"(ret.hi.sc), "=r"(ret.hi.sd), "=r"(ret.hi.se), "=r"(ret.hi.sf) : __LDG_PTR(ptr));
return ret;
}
static __device__ __inline__ uint16 __ldg16b(const uint16 *ptr)
{
uint16 ret;
asm("ld.global.nc.v4.u32 {%0,%1,%2,%3}, [%4];" : "=r"(ret.s0), "=r"(ret.s1), "=r"(ret.s2), "=r"(ret.s3) : __LDG_PTR(ptr));
asm("ld.global.nc.v4.u32 {%0,%1,%2,%3}, [%4+16];" : "=r"(ret.s4), "=r"(ret.s5), "=r"(ret.s6), "=r"(ret.s7) : __LDG_PTR(ptr));
asm("ld.global.nc.v4.u32 {%0,%1,%2,%3}, [%4+32];" : "=r"(ret.s8), "=r"(ret.s9), "=r"(ret.sa), "=r"(ret.sb) : __LDG_PTR(ptr));
asm("ld.global.nc.v4.u32 {%0,%1,%2,%3}, [%4+48];" : "=r"(ret.sc), "=r"(ret.sd), "=r"(ret.se), "=r"(ret.sf) : __LDG_PTR(ptr));
return ret;
}
static __device__ __inline__ uintx64 __ldg32(const uint4 *ptr)
{
uintx64 ret;
asm("ld.global.nc.v4.u32 {%0,%1,%2,%3}, [%4];" : "=r"(ret.s0.s0.s0.s0.x), "=r"(ret.s0.s0.s0.s0.y), "=r"(ret.s0.s0.s0.s0.z), "=r"(ret.s0.s0.s0.s0.w) : __LDG_PTR(ptr));
asm("ld.global.nc.v4.u32 {%0,%1,%2,%3}, [%4+16];" : "=r"(ret.s0.s0.s0.s1.x), "=r"(ret.s0.s0.s0.s1.y), "=r"(ret.s0.s0.s0.s1.z), "=r"(ret.s0.s0.s0.s1.w) : __LDG_PTR(ptr));
asm("ld.global.nc.v4.u32 {%0,%1,%2,%3}, [%4+32];" : "=r"(ret.s0.s0.s1.s0.x), "=r"(ret.s0.s0.s1.s0.y), "=r"(ret.s0.s0.s1.s0.z), "=r"(ret.s0.s0.s1.s0.w) : __LDG_PTR(ptr));
asm("ld.global.nc.v4.u32 {%0,%1,%2,%3}, [%4+48];" : "=r"(ret.s0.s0.s1.s1.x), "=r"(ret.s0.s0.s1.s1.y), "=r"(ret.s0.s0.s1.s1.z), "=r"(ret.s0.s0.s1.s1.w) : __LDG_PTR(ptr));
asm("ld.global.nc.v4.u32 {%0,%1,%2,%3}, [%4+64];" : "=r"(ret.s0.s1.s0.s0.x), "=r"(ret.s0.s1.s0.s0.y), "=r"(ret.s0.s1.s0.s0.z), "=r"(ret.s0.s1.s0.s0.w) : __LDG_PTR(ptr));
asm("ld.global.nc.v4.u32 {%0,%1,%2,%3}, [%4+80];" : "=r"(ret.s0.s1.s0.s1.x), "=r"(ret.s0.s1.s0.s1.y), "=r"(ret.s0.s1.s0.s1.z), "=r"(ret.s0.s1.s0.s1.w) : __LDG_PTR(ptr));
asm("ld.global.nc.v4.u32 {%0,%1,%2,%3}, [%4+96];" : "=r"(ret.s0.s1.s1.s0.x), "=r"(ret.s0.s1.s1.s0.y), "=r"(ret.s0.s1.s1.s0.z), "=r"(ret.s0.s1.s1.s0.w) : __LDG_PTR(ptr));
asm("ld.global.nc.v4.u32 {%0,%1,%2,%3}, [%4+112];" : "=r"(ret.s0.s1.s1.s1.x), "=r"(ret.s0.s1.s1.s1.y), "=r"(ret.s0.s1.s1.s1.z), "=r"(ret.s0.s1.s1.s1.w) : __LDG_PTR(ptr));
asm("ld.global.nc.v4.u32 {%0,%1,%2,%3}, [%4+128];" : "=r"(ret.s1.s0.s0.s0.x), "=r"(ret.s1.s0.s0.s0.y), "=r"(ret.s1.s0.s0.s0.z), "=r"(ret.s1.s0.s0.s0.w) : __LDG_PTR(ptr));
asm("ld.global.nc.v4.u32 {%0,%1,%2,%3}, [%4+144];" : "=r"(ret.s1.s0.s0.s1.x), "=r"(ret.s1.s0.s0.s1.y), "=r"(ret.s1.s0.s0.s1.z), "=r"(ret.s1.s0.s0.s1.w) : __LDG_PTR(ptr));
asm("ld.global.nc.v4.u32 {%0,%1,%2,%3}, [%4+160];" : "=r"(ret.s1.s0.s1.s0.x), "=r"(ret.s1.s0.s1.s0.y), "=r"(ret.s1.s0.s1.s0.z), "=r"(ret.s1.s0.s1.s0.w) : __LDG_PTR(ptr));
asm("ld.global.nc.v4.u32 {%0,%1,%2,%3}, [%4+176];" : "=r"(ret.s1.s0.s1.s1.x), "=r"(ret.s1.s0.s1.s1.y), "=r"(ret.s1.s0.s1.s1.z), "=r"(ret.s1.s0.s1.s1.w) : __LDG_PTR(ptr));
asm("ld.global.nc.v4.u32 {%0,%1,%2,%3}, [%4+192];" : "=r"(ret.s1.s1.s0.s0.x), "=r"(ret.s1.s1.s0.s0.y), "=r"(ret.s1.s1.s0.s0.z), "=r"(ret.s1.s1.s0.s0.w) : __LDG_PTR(ptr));
asm("ld.global.nc.v4.u32 {%0,%1,%2,%3}, [%4+208];" : "=r"(ret.s1.s1.s0.s1.x), "=r"(ret.s1.s1.s0.s1.y), "=r"(ret.s1.s1.s0.s1.z), "=r"(ret.s1.s1.s0.s1.w) : __LDG_PTR(ptr));
asm("ld.global.nc.v4.u32 {%0,%1,%2,%3}, [%4+224];" : "=r"(ret.s1.s1.s1.s0.x), "=r"(ret.s1.s1.s1.s0.y), "=r"(ret.s1.s1.s1.s0.z), "=r"(ret.s1.s1.s1.s0.w) : __LDG_PTR(ptr));
asm("ld.global.nc.v4.u32 {%0,%1,%2,%3}, [%4+240];" : "=r"(ret.s1.s1.s1.s1.x), "=r"(ret.s1.s1.s1.s1.y), "=r"(ret.s1.s1.s1.s1.z), "=r"(ret.s1.s1.s1.s1.w) : __LDG_PTR(ptr));
return ret;
}
static __device__ __inline__ uintx64 __ldg32c(const uintx64 *ptr)
{
uintx64 ret;
asm("ld.global.nc.v4.u32 {%0,%1,%2,%3}, [%4];" : "=r"(ret.s0.s0.s0.s0.x), "=r"(ret.s0.s0.s0.s0.y), "=r"(ret.s0.s0.s0.s0.z), "=r"(ret.s0.s0.s0.s0.w) : __LDG_PTR(ptr));
asm("ld.global.nc.v4.u32 {%0,%1,%2,%3}, [%4+16];" : "=r"(ret.s0.s0.s0.s1.x), "=r"(ret.s0.s0.s0.s1.y), "=r"(ret.s0.s0.s0.s1.z), "=r"(ret.s0.s0.s0.s1.w) : __LDG_PTR(ptr));
asm("ld.global.nc.v4.u32 {%0,%1,%2,%3}, [%4+32];" : "=r"(ret.s0.s0.s1.s0.x), "=r"(ret.s0.s0.s1.s0.y), "=r"(ret.s0.s0.s1.s0.z), "=r"(ret.s0.s0.s1.s0.w) : __LDG_PTR(ptr));
asm("ld.global.nc.v4.u32 {%0,%1,%2,%3}, [%4+48];" : "=r"(ret.s0.s0.s1.s1.x), "=r"(ret.s0.s0.s1.s1.y), "=r"(ret.s0.s0.s1.s1.z), "=r"(ret.s0.s0.s1.s1.w) : __LDG_PTR(ptr));
asm("ld.global.nc.v4.u32 {%0,%1,%2,%3}, [%4+64];" : "=r"(ret.s0.s1.s0.s0.x), "=r"(ret.s0.s1.s0.s0.y), "=r"(ret.s0.s1.s0.s0.z), "=r"(ret.s0.s1.s0.s0.w) : __LDG_PTR(ptr));
asm("ld.global.nc.v4.u32 {%0,%1,%2,%3}, [%4+80];" : "=r"(ret.s0.s1.s0.s1.x), "=r"(ret.s0.s1.s0.s1.y), "=r"(ret.s0.s1.s0.s1.z), "=r"(ret.s0.s1.s0.s1.w) : __LDG_PTR(ptr));
asm("ld.global.nc.v4.u32 {%0,%1,%2,%3}, [%4+96];" : "=r"(ret.s0.s1.s1.s0.x), "=r"(ret.s0.s1.s1.s0.y), "=r"(ret.s0.s1.s1.s0.z), "=r"(ret.s0.s1.s1.s0.w) : __LDG_PTR(ptr));
asm("ld.global.nc.v4.u32 {%0,%1,%2,%3}, [%4+112];" : "=r"(ret.s0.s1.s1.s1.x), "=r"(ret.s0.s1.s1.s1.y), "=r"(ret.s0.s1.s1.s1.z), "=r"(ret.s0.s1.s1.s1.w) : __LDG_PTR(ptr));
asm("ld.global.nc.v4.u32 {%0,%1,%2,%3}, [%4+128];" : "=r"(ret.s1.s0.s0.s0.x), "=r"(ret.s1.s0.s0.s0.y), "=r"(ret.s1.s0.s0.s0.z), "=r"(ret.s1.s0.s0.s0.w) : __LDG_PTR(ptr));
asm("ld.global.nc.v4.u32 {%0,%1,%2,%3}, [%4+144];" : "=r"(ret.s1.s0.s0.s1.x), "=r"(ret.s1.s0.s0.s1.y), "=r"(ret.s1.s0.s0.s1.z), "=r"(ret.s1.s0.s0.s1.w) : __LDG_PTR(ptr));
asm("ld.global.nc.v4.u32 {%0,%1,%2,%3}, [%4+160];" : "=r"(ret.s1.s0.s1.s0.x), "=r"(ret.s1.s0.s1.s0.y), "=r"(ret.s1.s0.s1.s0.z), "=r"(ret.s1.s0.s1.s0.w) : __LDG_PTR(ptr));
asm("ld.global.nc.v4.u32 {%0,%1,%2,%3}, [%4+176];" : "=r"(ret.s1.s0.s1.s1.x), "=r"(ret.s1.s0.s1.s1.y), "=r"(ret.s1.s0.s1.s1.z), "=r"(ret.s1.s0.s1.s1.w) : __LDG_PTR(ptr));
asm("ld.global.nc.v4.u32 {%0,%1,%2,%3}, [%4+192];" : "=r"(ret.s1.s1.s0.s0.x), "=r"(ret.s1.s1.s0.s0.y), "=r"(ret.s1.s1.s0.s0.z), "=r"(ret.s1.s1.s0.s0.w) : __LDG_PTR(ptr));
asm("ld.global.nc.v4.u32 {%0,%1,%2,%3}, [%4+208];" : "=r"(ret.s1.s1.s0.s1.x), "=r"(ret.s1.s1.s0.s1.y), "=r"(ret.s1.s1.s0.s1.z), "=r"(ret.s1.s1.s0.s1.w) : __LDG_PTR(ptr));
asm("ld.global.nc.v4.u32 {%0,%1,%2,%3}, [%4+224];" : "=r"(ret.s1.s1.s1.s0.x), "=r"(ret.s1.s1.s1.s0.y), "=r"(ret.s1.s1.s1.s0.z), "=r"(ret.s1.s1.s1.s0.w) : __LDG_PTR(ptr));
asm("ld.global.nc.v4.u32 {%0,%1,%2,%3}, [%4+240];" : "=r"(ret.s1.s1.s1.s1.x), "=r"(ret.s1.s1.s1.s1.y), "=r"(ret.s1.s1.s1.s1.z), "=r"(ret.s1.s1.s1.s1.w) : __LDG_PTR(ptr));
return ret;
}
static __device__ __inline__ ulonglong2 __ldg2(const ulonglong2 *ptr)
{
ulonglong2 ret;
asm("ld.global.nc.v2.u64 {%0,%1}, [%2];" : "=l"(ret.x), "=l"(ret.y) : __LDG_PTR(ptr));
return ret;
}
static __device__ __inline__ ulonglong4 __ldg4(const ulonglong4 *ptr)
{
ulonglong4 ret;
@ -869,6 +430,7 @@ static __device__ __inline__ ulonglong4 __ldg4(const ulonglong4 *ptr) @@ -869,6 +430,7 @@ static __device__ __inline__ ulonglong4 __ldg4(const ulonglong4 *ptr)
asm("ld.global.nc.v2.u64 {%0,%1}, [%2+16];" : "=l"(ret.z), "=l"(ret.w) : __LDG_PTR(ptr));
return ret;
}
static __device__ __inline__ void ldg4(const ulonglong4 *ptr,ulonglong4 *ret)
{
asm("ld.global.nc.v2.u64 {%0,%1}, [%2];" : "=l"(ret[0].x), "=l"(ret[0].y) : __LDG_PTR(ptr));
@ -878,20 +440,6 @@ static __device__ __inline__ void ldg4(const ulonglong4 *ptr,ulonglong4 *ret) @@ -878,20 +440,6 @@ static __device__ __inline__ void ldg4(const ulonglong4 *ptr,ulonglong4 *ret)
asm("ld.global.nc.v2.u64 {%0,%1}, [%2+64];" : "=l"(ret[2].x), "=l"(ret[2].y) : __LDG_PTR(ptr));
asm("ld.global.nc.v2.u64 {%0,%1}, [%2+80];" : "=l"(ret[2].z), "=l"(ret[2].w) : __LDG_PTR(ptr));
}
static __device__ __inline__ void ldg4xor(const ulonglong4 *ptr, ulonglong4 *ret, ulonglong4 *state)
{
asm("ld.global.nc.v2.u64 {%0,%1}, [%2];" : "=l"(ret[0].x), "=l"(ret[0].y) : __LDG_PTR(ptr));
asm("ld.global.nc.v2.u64 {%0,%1}, [%2+16];" : "=l"(ret[0].z), "=l"(ret[0].w) : __LDG_PTR(ptr));
state[0] ^= ret[0];
asm("ld.global.nc.v2.u64 {%0,%1}, [%2+32];" : "=l"(ret[1].x), "=l"(ret[1].y) : __LDG_PTR(ptr));
asm("ld.global.nc.v2.u64 {%0,%1}, [%2+48];" : "=l"(ret[1].z), "=l"(ret[1].w) : __LDG_PTR(ptr));
state[1] ^= ret[1];
asm("ld.global.nc.v2.u64 {%0,%1}, [%2+64];" : "=l"(ret[2].x), "=l"(ret[2].y) : __LDG_PTR(ptr));
asm("ld.global.nc.v2.u64 {%0,%1}, [%2+80];" : "=l"(ret[2].z), "=l"(ret[2].w) : __LDG_PTR(ptr));
state[2] ^= ret[2];
}
static __device__ __inline__ uint28 __ldg4(const uint28 *ptr)
{
@ -909,7 +457,6 @@ static __device__ __inline__ uint48 __ldg4(const uint48 *ptr) @@ -909,7 +457,6 @@ static __device__ __inline__ uint48 __ldg4(const uint48 *ptr)
return ret;
}
static __device__ __inline__ void ldg4(const uint28 *ptr, uint28 *ret)
{
asm("ld.global.nc.v4.u32 {%0,%1,%2,%3}, [%4];" : "=r"(ret[0].x.x), "=r"(ret[0].x.y), "=r"(ret[0].y.x), "=r"(ret[0].y.y) : __LDG_PTR(ptr));
@ -919,109 +466,8 @@ static __device__ __inline__ void ldg4(const uint28 *ptr, uint28 *ret) @@ -919,109 +466,8 @@ static __device__ __inline__ void ldg4(const uint28 *ptr, uint28 *ret)
asm("ld.global.nc.v4.u32 {%0,%1,%2,%3}, [%4+64];" : "=r"(ret[2].x.x), "=r"(ret[2].x.y), "=r"(ret[2].y.x), "=r"(ret[2].y.y) : __LDG_PTR(ptr));
asm("ld.global.nc.v4.u32 {%0,%1,%2,%3}, [%4+80];" : "=r"(ret[2].z.x), "=r"(ret[2].z.y), "=r"(ret[2].w.x), "=r"(ret[2].w.y) : __LDG_PTR(ptr));
}
static __device__ __inline__ void ldg4xor(const uint28 *ptr, uint28 *ret,uint28* state)
{
asm("ld.global.nc.v4.u32 {%0,%1,%2,%3}, [%4];" : "=r"(ret[0].x.x), "=r"(ret[0].x.y), "=r"(ret[0].y.x), "=r"(ret[0].y.y) : __LDG_PTR(ptr));
state[0].x ^= ret[0].x; state[0].y ^= ret[0].y;
asm("ld.global.nc.v4.u32 {%0,%1,%2,%3}, [%4+16];" : "=r"(ret[0].z.x), "=r"(ret[0].z.y), "=r"(ret[0].w.x), "=r"(ret[0].w.y) : __LDG_PTR(ptr));
state[0].z ^= ret[0].z; state[0].w ^= ret[0].w;
asm("ld.global.nc.v4.u32 {%0,%1,%2,%3}, [%4+32];" : "=r"(ret[1].x.x), "=r"(ret[1].x.y), "=r"(ret[1].y.x), "=r"(ret[1].y.y) : __LDG_PTR(ptr));
state[1].x ^= ret[1].x; state[1].y ^= ret[1].y;
asm("ld.global.nc.v4.u32 {%0,%1,%2,%3}, [%4+48];" : "=r"(ret[1].z.x), "=r"(ret[1].z.y), "=r"(ret[1].w.x), "=r"(ret[1].w.y) : __LDG_PTR(ptr));
state[1].z ^= ret[1].z; state[1].w ^= ret[1].w;
asm("ld.global.nc.v4.u32 {%0,%1,%2,%3}, [%4+64];" : "=r"(ret[2].x.x), "=r"(ret[2].x.y), "=r"(ret[2].y.x), "=r"(ret[2].y.y) : __LDG_PTR(ptr));
state[2].x ^= ret[2].x; state[2].y ^= ret[2].y;
asm("ld.global.nc.v4.u32 {%0,%1,%2,%3}, [%4+80];" : "=r"(ret[2].z.x), "=r"(ret[2].z.y), "=r"(ret[2].w.x), "=r"(ret[2].w.y) : __LDG_PTR(ptr));
state[2].z ^= ret[2].z; state[2].w ^= ret[2].w;
}
static __device__ __inline__ ulonglong2to8 __ldg2to8(const ulonglong2to8 *ptr)
{
ulonglong2to8 ret;
asm("ld.global.nc.v2.u64 {%0,%1}, [%2];" : "=l"(ret.l0.x), "=l"(ret.l0.y) : __LDG_PTR(ptr));
asm("ld.global.nc.v2.u64 {%0,%1}, [%2+16];" : "=l"(ret.l1.x), "=l"(ret.l1.y) : __LDG_PTR(ptr));
asm("ld.global.nc.v2.u64 {%0,%1}, [%2+32];" : "=l"(ret.l2.x), "=l"(ret.l2.y) : __LDG_PTR(ptr));
asm("ld.global.nc.v2.u64 {%0,%1}, [%2+48];" : "=l"(ret.l3.x), "=l"(ret.l3.y) : __LDG_PTR(ptr));
return ret;
}
static __device__ __inline__ ulonglong8to16 __ldg8to16(const ulonglong8to16 *ptr)
{
ulonglong8to16 ret;
asm("ld.global.nc.v2.u64 {%0,%1}, [%2];" : "=l"(ret.lo.l0.x), "=l"(ret.lo.l0.y) : __LDG_PTR(ptr));
asm("ld.global.nc.v2.u64 {%0,%1}, [%2+16];" : "=l"(ret.lo.l1.x), "=l"(ret.lo.l1.y) : __LDG_PTR(ptr));
asm("ld.global.nc.v2.u64 {%0,%1}, [%2+32];" : "=l"(ret.lo.l2.x), "=l"(ret.lo.l2.y) : __LDG_PTR(ptr));
asm("ld.global.nc.v2.u64 {%0,%1}, [%2+48];" : "=l"(ret.lo.l3.x), "=l"(ret.lo.l3.y) : __LDG_PTR(ptr));
asm("ld.global.nc.v2.u64 {%0,%1}, [%2+64];" : "=l"(ret.hi.l0.x), "=l"(ret.hi.l0.y) : __LDG_PTR(ptr));
asm("ld.global.nc.v2.u64 {%0,%1}, [%2+80];" : "=l"(ret.hi.l1.x), "=l"(ret.hi.l1.y) : __LDG_PTR(ptr));
asm("ld.global.nc.v2.u64 {%0,%1}, [%2+96];" : "=l"(ret.hi.l2.x), "=l"(ret.hi.l2.y) : __LDG_PTR(ptr));
asm("ld.global.nc.v2.u64 {%0,%1}, [%2+112];" : "=l"(ret.hi.l3.x), "=l"(ret.hi.l3.y) : __LDG_PTR(ptr));
return ret;
}
static __device__ __inline__ ulonglonglong __ldgxtralong(const ulonglonglong *ptr)
{
ulonglonglong ret;
asm("ld.global.nc.v2.u64 {%0,%1}, [%2];" : "=l"(ret.s0.x), "=l"(ret.s0.y) : __LDG_PTR(ptr));
asm("ld.global.nc.v2.u64 {%0,%1}, [%2+16];" : "=l"(ret.s1.x), "=l"(ret.s1.y) : __LDG_PTR(ptr));
asm("ld.global.nc.v2.u64 {%0,%1}, [%2+32];" : "=l"(ret.s2.x), "=l"(ret.s2.y) : __LDG_PTR(ptr));
asm("ld.global.nc.v2.u64 {%0,%1}, [%2+48];" : "=l"(ret.s3.x), "=l"(ret.s3.y) : __LDG_PTR(ptr));
asm("ld.global.nc.v2.u64 {%0,%1}, [%2+64];" : "=l"(ret.s4.x), "=l"(ret.s4.y) : __LDG_PTR(ptr));
asm("ld.global.nc.v2.u64 {%0,%1}, [%2+80];" : "=l"(ret.s5.x), "=l"(ret.s5.y) : __LDG_PTR(ptr));
return ret;
}
static __device__ __inline__ uint8 ldg8bis(const uint8 *ptr)
{
uint8 test;
asm volatile ("ld.global.nc.v4.u32 {%0,%1,%2,%3},[%4];" : "=r"(test.s0), "=r"(test.s1), "=r"(test.s2), "=r"(test.s3) : __LDG_PTR(ptr));
asm volatile ("ld.global.nc.v4.u32 {%0,%1,%2,%3},[%4+16];" : "=r"(test.s4), "=r"(test.s5), "=r"(test.s6), "=r"(test.s7) : __LDG_PTR(ptr));
return (test);
}
static __device__ __inline__ ulonglong16 __ldg32(const ulonglong4 *ptr)
{
ulonglong16 ret;
asm("ld.global.nc.v2.u64 {%0,%1}, [%2];" : "=l"(ret.s0.x), "=l"(ret.s0.y) : __LDG_PTR(ptr));
asm("ld.global.nc.v2.u64 {%0,%1}, [%2+16];" : "=l"(ret.s0.z), "=l"(ret.s0.w) : __LDG_PTR(ptr));
asm("ld.global.nc.v2.u64 {%0,%1}, [%2+32];" : "=l"(ret.s1.x), "=l"(ret.s1.y) : __LDG_PTR(ptr));
asm("ld.global.nc.v2.u64 {%0,%1}, [%2+48];" : "=l"(ret.s1.z), "=l"(ret.s1.w) : __LDG_PTR(ptr));
asm("ld.global.nc.v2.u64 {%0,%1}, [%2+64];" : "=l"(ret.s2.x), "=l"(ret.s2.y) : __LDG_PTR(ptr));
asm("ld.global.nc.v2.u64 {%0,%1}, [%2+80];" : "=l"(ret.s2.z), "=l"(ret.s2.w) : __LDG_PTR(ptr));
asm("ld.global.nc.v2.u64 {%0,%1}, [%2+96];" : "=l"(ret.s3.x), "=l"(ret.s3.y) : __LDG_PTR(ptr));
asm("ld.global.nc.v2.u64 {%0,%1}, [%2+112];" : "=l"(ret.s3.z), "=l"(ret.s3.w) : __LDG_PTR(ptr));
asm("ld.global.nc.v2.u64 {%0,%1}, [%2+128];" : "=l"(ret.s4.x), "=l"(ret.s4.y) : __LDG_PTR(ptr));
asm("ld.global.nc.v2.u64 {%0,%1}, [%2+144];" : "=l"(ret.s4.z), "=l"(ret.s4.w) : __LDG_PTR(ptr));
asm("ld.global.nc.v2.u64 {%0,%1}, [%2+160];" : "=l"(ret.s5.x), "=l"(ret.s5.y) : __LDG_PTR(ptr));
asm("ld.global.nc.v2.u64 {%0,%1}, [%2+176];" : "=l"(ret.s5.z), "=l"(ret.s5.w) : __LDG_PTR(ptr));
asm("ld.global.nc.v2.u64 {%0,%1}, [%2+192];" : "=l"(ret.s6.x), "=l"(ret.s6.y) : __LDG_PTR(ptr));
asm("ld.global.nc.v2.u64 {%0,%1}, [%2+208];" : "=l"(ret.s6.z), "=l"(ret.s6.w) : __LDG_PTR(ptr));
asm("ld.global.nc.v2.u64 {%0,%1}, [%2+224];" : "=l"(ret.s7.x), "=l"(ret.s7.y) : __LDG_PTR(ptr));
asm("ld.global.nc.v2.u64 {%0,%1}, [%2+240];" : "=l"(ret.s7.z), "=l"(ret.s7.w) : __LDG_PTR(ptr));
return ret;
}
static __device__ __inline__ uintx64bis __ldg32(const uint28 *ptr)
{
uintx64bis ret;
asm("ld.global.nc.v4.u32 {%0,%1,%2,%3}, [%4];" : "=r"(ret.s0.x.x), "=r"(ret.s0.x.y), "=r"(ret.s0.y.x), "=r"(ret.s0.y.y) : __LDG_PTR(ptr));
asm("ld.global.nc.v4.u32 {%0,%1,%2,%3}, [%4+16];" : "=r"(ret.s0.z.x), "=r"(ret.s0.z.y), "=r"(ret.s0.w.x), "=r"(ret.s0.w.y) : __LDG_PTR(ptr));
asm("ld.global.nc.v4.u32 {%0,%1,%2,%3}, [%4+32];" : "=r"(ret.s1.x.x), "=r"(ret.s1.x.y), "=r"(ret.s1.y.x), "=r"(ret.s1.y.y) : __LDG_PTR(ptr));
asm("ld.global.nc.v4.u32 {%0,%1,%2,%3}, [%4+48];" : "=r"(ret.s1.z.x), "=r"(ret.s1.z.y), "=r"(ret.s1.w.x), "=r"(ret.s1.w.y) : __LDG_PTR(ptr));
asm("ld.global.nc.v4.u32 {%0,%1,%2,%3}, [%4+64];" : "=r"(ret.s2.x.x), "=r"(ret.s2.x.y), "=r"(ret.s2.y.x), "=r"(ret.s2.y.y) : __LDG_PTR(ptr));
asm("ld.global.nc.v4.u32 {%0,%1,%2,%3}, [%4+80];" : "=r"(ret.s2.z.x), "=r"(ret.s2.z.y), "=r"(ret.s2.w.x), "=r"(ret.s2.w.y) : __LDG_PTR(ptr));
asm("ld.global.nc.v4.u32 {%0,%1,%2,%3}, [%4+96];" : "=r"(ret.s3.x.x), "=r"(ret.s3.x.y), "=r"(ret.s3.y.x), "=r"(ret.s3.y.y) : __LDG_PTR(ptr));
asm("ld.global.nc.v4.u32 {%0,%1,%2,%3}, [%4+112];" : "=r"(ret.s3.z.x), "=r"(ret.s3.z.y), "=r"(ret.s3.w.x), "=r"(ret.s3.w.y) : __LDG_PTR(ptr));
asm("ld.global.nc.v4.u32 {%0,%1,%2,%3}, [%4+128];" : "=r"(ret.s4.x.x), "=r"(ret.s4.x.y), "=r"(ret.s4.y.x), "=r"(ret.s4.y.y) : __LDG_PTR(ptr));
asm("ld.global.nc.v4.u32 {%0,%1,%2,%3}, [%4+144];" : "=r"(ret.s4.z.x), "=r"(ret.s4.z.y), "=r"(ret.s4.w.x), "=r"(ret.s4.w.y) : __LDG_PTR(ptr));
asm("ld.global.nc.v4.u32 {%0,%1,%2,%3}, [%4+160];" : "=r"(ret.s5.x.x), "=r"(ret.s5.x.y), "=r"(ret.s5.y.x), "=r"(ret.s5.y.y) : __LDG_PTR(ptr));
asm("ld.global.nc.v4.u32 {%0,%1,%2,%3}, [%4+176];" : "=r"(ret.s5.z.x), "=r"(ret.s5.z.y), "=r"(ret.s5.w.x), "=r"(ret.s5.w.y) : __LDG_PTR(ptr));
asm("ld.global.nc.v4.u32 {%0,%1,%2,%3}, [%4+192];" : "=r"(ret.s6.x.x), "=r"(ret.s6.x.y), "=r"(ret.s6.y.x), "=r"(ret.s6.y.y) : __LDG_PTR(ptr));
asm("ld.global.nc.v4.u32 {%0,%1,%2,%3}, [%4+208];" : "=r"(ret.s6.z.x), "=r"(ret.s6.z.y), "=r"(ret.s6.w.x), "=r"(ret.s6.w.y) : __LDG_PTR(ptr));
asm("ld.global.nc.v4.u32 {%0,%1,%2,%3}, [%4+224];" : "=r"(ret.s7.x.x), "=r"(ret.s7.x.y), "=r"(ret.s7.y.x), "=r"(ret.s7.y.y) : __LDG_PTR(ptr));
asm("ld.global.nc.v4.u32 {%0,%1,%2,%3}, [%4+240];" : "=r"(ret.s7.z.x), "=r"(ret.s7.z.y), "=r"(ret.s7.w.x), "=r"(ret.s7.w.y) : __LDG_PTR(ptr));
return ret;
}
#endif /* __CUDA_ARCH__ < 320 */
static __forceinline__ __device__ uint8 swapvec(const uint8 &buf)
@ -1038,7 +484,6 @@ static __forceinline__ __device__ uint8 swapvec(const uint8 &buf) @@ -1038,7 +484,6 @@ static __forceinline__ __device__ uint8 swapvec(const uint8 &buf)
return vec;
}
static __forceinline__ __device__ uint8 swapvec(const uint8 *buf)
{
uint8 vec;
@ -1111,7 +556,6 @@ static __device__ __forceinline__ uint28 shuffle4(const uint28 &var, int lane) @@ -1111,7 +556,6 @@ static __device__ __forceinline__ uint28 shuffle4(const uint28 &var, int lane)
return res;
}
static __device__ __forceinline__ ulonglong4 shuffle4(ulonglong4 var, int lane)
{
ulonglong4 res;
@ -1135,4 +579,4 @@ static __device__ __forceinline__ ulonglong4 shuffle4(ulonglong4 var, int lane) @@ -1135,4 +579,4 @@ static __device__ __forceinline__ ulonglong4 shuffle4(ulonglong4 var, int lane)
return res;
}
#endif // #ifndef CUDA_VECTOR_H
#endif // #ifndef CUDA_LYRA_VECTOR_H

22
lyra2/cuda_lyra2v2.cu

@ -2,6 +2,7 @@ @@ -2,6 +2,7 @@
#include <memory.h>
#ifdef __INTELLISENSE__
/* just for vstudio code colors */
#define __CUDA_ARCH__ 500
#endif
@ -28,7 +29,7 @@ @@ -28,7 +29,7 @@
__device__ vectype *DMatrix;
#if __CUDA_ARCH__ >= 320
#if __CUDA_ARCH__ >= 300
#if __CUDA_ARCH__ >= 500
static __device__ __forceinline__
@ -338,7 +339,7 @@ void lyra2v2_gpu_hash_32_v3(uint32_t threads, uint32_t startNounce, uint2 *outpu @@ -338,7 +339,7 @@ void lyra2v2_gpu_hash_32_v3(uint32_t threads, uint32_t startNounce, uint2 *outpu
);
}
#if __CUDA_ARCH__ == 350
#if __CUDA_ARCH__ <= 350
if (thread < threads)
#endif
{
@ -427,7 +428,7 @@ void lyra2v2_gpu_hash_32(uint32_t threads, uint32_t startNounce, uint2 *outputHa @@ -427,7 +428,7 @@ void lyra2v2_gpu_hash_32(uint32_t threads, uint32_t startNounce, uint2 *outputHa
);
}
#if __CUDA_ARCH__ == 350
#if __CUDA_ARCH__ <= 350
if (thread < threads)
#endif
{
@ -483,15 +484,14 @@ void lyra2v2_gpu_hash_32(uint32_t threads, uint32_t startNounce, uint2 *outputHa @@ -483,15 +484,14 @@ void lyra2v2_gpu_hash_32(uint32_t threads, uint32_t startNounce, uint2 *outputHa
for (int i = 0; i < 12; i++)
round_lyra_v35(state);
outputHash[thread]= ((uint2*)state)[0];
outputHash[thread + threads] = ((uint2*)state)[1];
outputHash[thread] = ((uint2*)state)[0];
outputHash[thread + threads] = ((uint2*)state)[1];
outputHash[thread + 2 * threads] = ((uint2*)state)[2];
outputHash[thread + 3 * threads] = ((uint2*)state)[3];
// ((vectype*)outputHash)[thread] = state[0];
} //thread
}
}
#else /*__CUDA_ARCH__ >= 320 */
#else
/* if __CUDA_ARCH__ < 300 .. */
__global__ void lyra2v2_gpu_hash_32(uint32_t threads, uint32_t startNounce, uint2 *outputHash) {}
__global__ void lyra2v2_gpu_hash_32_v3(uint32_t threads, uint32_t startNounce, uint2 *outputHash) {}
#endif
@ -506,7 +506,7 @@ __host__ @@ -506,7 +506,7 @@ __host__
void lyra2v2_cpu_hash_32(int thr_id, uint32_t threads, uint32_t startNounce, uint64_t *d_outputHash, int order)
{
uint32_t tpb;
if (device_sm[device_map[thr_id]] < 500)
if (device_sm[device_map[thr_id]] == 350)
tpb = 64;
else if (device_sm[device_map[thr_id]] == 500)
tpb = 32;
@ -517,7 +517,7 @@ void lyra2v2_cpu_hash_32(int thr_id, uint32_t threads, uint32_t startNounce, uin @@ -517,7 +517,7 @@ void lyra2v2_cpu_hash_32(int thr_id, uint32_t threads, uint32_t startNounce, uin
dim3 block(tpb);
if (device_sm[device_map[thr_id]] >= 500)
lyra2v2_gpu_hash_32 <<<grid, block>>> (threads, startNounce, (uint2*)d_outputHash);
lyra2v2_gpu_hash_32 <<<grid, block>>> (threads, startNounce, (uint2*)d_outputHash);
else
lyra2v2_gpu_hash_32_v3 <<<grid, block>>> (threads, startNounce, (uint2*)d_outputHash);

4
lyra2/lyra2REv2.cu

@ -94,8 +94,8 @@ extern "C" int scanhash_lyra2v2(int thr_id, uint32_t *pdata, @@ -94,8 +94,8 @@ extern "C" int scanhash_lyra2v2(int thr_id, uint32_t *pdata,
skein256_cpu_init(thr_id, throughput);
bmw256_cpu_init(thr_id, throughput);
if (device_sm[device_map[thr_id]] < 320) {
applog(LOG_ERR, "Device SM 3.2 required!");
if (device_sm[device_map[thr_id]] < 300) {
applog(LOG_ERR, "Device SM 3.0 or more recent required!");
proper_exit(1);
return -1;
}

Loading…
Cancel
Save