diff --git a/OpenCL/m08900-pure.cl b/OpenCL/m08900-pure.cl index b74b3d9c6..a16e80f9a 100644 --- a/OpenCL/m08900-pure.cl +++ b/OpenCL/m08900-pure.cl @@ -55,146 +55,146 @@ DECLSPEC uint4 hc_swap32_4 (uint4 v) #define STATE_CNT GET_STATE_CNT (SCRYPT_R) #define STATE_CNT4 (STATE_CNT / 4) -#define ADD_ROTATE_XOR(r,i1,i2,s) (r) ^= rotate ((i1) + (i2), (s)); - -#if defined IS_CUDA || defined IS_HIP - -#define SALSA20_2R() \ -{ \ - ADD_ROTATE_XOR (X1, X0, X3, 7); \ - ADD_ROTATE_XOR (X2, X1, X0, 9); \ - ADD_ROTATE_XOR (X3, X2, X1, 13); \ - ADD_ROTATE_XOR (X0, X3, X2, 18); \ - \ - X1 = make_uint4 (X1.w, X1.x, X1.y, X1.z); \ - X2 = make_uint4 (X2.z, X2.w, X2.x, X2.y); \ - X3 = make_uint4 (X3.y, X3.z, X3.w, X3.x); \ - \ - ADD_ROTATE_XOR (X3, X0, X1, 7); \ - ADD_ROTATE_XOR (X2, X3, X0, 9); \ - ADD_ROTATE_XOR (X1, X2, X3, 13); \ - ADD_ROTATE_XOR (X0, X1, X2, 18); \ - \ - X1 = make_uint4 (X1.y, X1.z, X1.w, X1.x); \ - X2 = make_uint4 (X2.z, X2.w, X2.x, X2.y); \ - X3 = make_uint4 (X3.w, X3.x, X3.y, X3.z); \ -} -#elif defined IS_METAL -#define SALSA20_2R() \ -{ \ - ADD_ROTATE_XOR (X1, X0, X3, 7); \ - ADD_ROTATE_XOR (X2, X1, X0, 9); \ - ADD_ROTATE_XOR (X3, X2, X1, 13); \ - ADD_ROTATE_XOR (X0, X3, X2, 18); \ - \ - X1 = X1.wxyz; \ - X2 = X2.zwxy; \ - X3 = X3.yzwx; \ - \ - ADD_ROTATE_XOR (X3, X0, X1, 7); \ - ADD_ROTATE_XOR (X2, X3, X0, 9); \ - ADD_ROTATE_XOR (X1, X2, X3, 13); \ - ADD_ROTATE_XOR (X0, X1, X2, 18); \ - \ - X1 = X1.yzwx; \ - X2 = X2.zwxy; \ - X3 = X3.wxyz; \ -} -#else -#define SALSA20_2R() \ -{ \ - ADD_ROTATE_XOR (X1, X0, X3, 7); \ - ADD_ROTATE_XOR (X2, X1, X0, 9); \ - ADD_ROTATE_XOR (X3, X2, X1, 13); \ - ADD_ROTATE_XOR (X0, X3, X2, 18); \ - \ - X1 = X1.s3012; \ - X2 = X2.s2301; \ - X3 = X3.s1230; \ - \ - ADD_ROTATE_XOR (X3, X0, X1, 7); \ - ADD_ROTATE_XOR (X2, X3, X0, 9); \ - ADD_ROTATE_XOR (X1, X2, X3, 13); \ - ADD_ROTATE_XOR (X0, X1, X2, 18); \ - \ - X1 = X1.s1230; \ - X2 = X2.s2301; \ - X3 = X3.s3012; \ -} -#endif - #define Coord(xd4,y,z) (((xd4) * ySIZE * zSIZE) + ((y) * zSIZE) + (z)) #define CO Coord(xd4,y,z) -DECLSPEC void salsa_r (PRIVATE_AS uint4 *TI) +DECLSPEC void salsa_r (PRIVATE_AS u32 *TI) { + u32 x[16]; + + for (int j = 0; j < 16; j++) x[j] = TI[STATE_CNT - 16 + j]; + + for (int i = 0; i < STATE_CNT; i += 16) + { + for (int j = 0; j < 16; j++) + { + x[j] ^= TI[i + j]; + } + + for (int j = 0; j < 16; j++) + { + TI[i + j] = x[j]; + } + + for (int r = 0; r < 4; r++) + { + u32 t0, t1, t2, t3; + + t0 = x[ 0] + x[12]; + t1 = x[ 1] + x[13]; + t2 = x[ 2] + x[14]; + t3 = x[ 3] + x[15]; + x[ 4] ^= hc_rotl32_S (t0, 7); + x[ 5] ^= hc_rotl32_S (t1, 7); + x[ 6] ^= hc_rotl32_S (t2, 7); + x[ 7] ^= hc_rotl32_S (t3, 7); + + t0 = x[ 4] + x[ 0]; + t1 = x[ 5] + x[ 1]; + t2 = x[ 6] + x[ 2]; + t3 = x[ 7] + x[ 3]; + x[ 8] ^= hc_rotl32_S (t0, 9); + x[ 9] ^= hc_rotl32_S (t1, 9); + x[10] ^= hc_rotl32_S (t2, 9); + x[11] ^= hc_rotl32_S (t3, 9); + + t0 = x[ 8] + x[ 4]; + t1 = x[ 9] + x[ 5]; + t2 = x[10] + x[ 6]; + t3 = x[11] + x[ 7]; + x[12] ^= hc_rotl32_S (t0, 13); + x[13] ^= hc_rotl32_S (t1, 13); + x[14] ^= hc_rotl32_S (t2, 13); + x[15] ^= hc_rotl32_S (t3, 13); + + t0 = x[12] + x[ 8]; + t1 = x[13] + x[ 9]; + t2 = x[14] + x[10]; + t3 = x[15] + x[11]; + x[ 0] ^= hc_rotl32_S (t0, 18); + x[ 1] ^= hc_rotl32_S (t1, 18); + x[ 2] ^= hc_rotl32_S (t2, 18); + x[ 3] ^= hc_rotl32_S (t3, 18); + + t0 = x[ 4]; x[ 4] = x[ 7]; x[ 7] = x[ 6]; x[ 6] = x[ 5]; x[ 5] = t0; + t0 = x[ 8]; x[ 8] = x[10]; x[10] = t0; + t0 = x[ 9]; x[ 9] = x[11]; x[11] = t0; + t0 = x[12]; x[12] = x[13]; x[13] = x[14]; x[14] = x[15]; x[15] = t0; + + t0 = x[ 0] + x[ 4]; + t1 = x[ 1] + x[ 5]; + t2 = x[ 2] + x[ 6]; + t3 = x[ 3] + x[ 7]; + x[12] ^= hc_rotl32_S (t0, 7); + x[13] ^= hc_rotl32_S (t1, 7); + x[14] ^= hc_rotl32_S (t2, 7); + x[15] ^= hc_rotl32_S (t3, 7); + + t0 = x[12] + x[ 0]; + t1 = x[13] + x[ 1]; + t2 = x[14] + x[ 2]; + t3 = x[15] + x[ 3]; + x[ 8] ^= hc_rotl32_S (t0, 9); + x[ 9] ^= hc_rotl32_S (t1, 9); + x[10] ^= hc_rotl32_S (t2, 9); + x[11] ^= hc_rotl32_S (t3, 9); + + t0 = x[ 8] + x[12]; + t1 = x[ 9] + x[13]; + t2 = x[10] + x[14]; + t3 = x[11] + x[15]; + x[ 4] ^= hc_rotl32_S (t0, 13); + x[ 5] ^= hc_rotl32_S (t1, 13); + x[ 6] ^= hc_rotl32_S (t2, 13); + x[ 7] ^= hc_rotl32_S (t3, 13); + + t0 = x[ 4] + x[ 8]; + t1 = x[ 5] + x[ 9]; + t2 = x[ 6] + x[10]; + t3 = x[ 7] + x[11]; + x[ 0] ^= hc_rotl32_S (t0, 18); + x[ 1] ^= hc_rotl32_S (t1, 18); + x[ 2] ^= hc_rotl32_S (t2, 18); + x[ 3] ^= hc_rotl32_S (t3, 18); + + t0 = x[ 4]; x[ 4] = x[ 5]; x[ 5] = x[ 6]; x[ 6] = x[ 7]; x[ 7] = t0; + t0 = x[ 8]; x[ 8] = x[10]; x[10] = t0; + t0 = x[ 9]; x[ 9] = x[11]; x[11] = t0; + t0 = x[15]; x[15] = x[14]; x[14] = x[13]; x[13] = x[12]; x[12] = t0; + } + + for (int j = 0; j < 16; j++) + { + x[j] += TI[i + j]; + } + + for (int j = 0; j < 16; j++) + { + TI[i + j] = x[j]; + } + } + #if SCRYPT_R > 1 - uint4 TT[STATE_CNT4 / 2]; + u32 TT[STATE_CNT / 2]; - for (int dst_off = 0, src_off = 4; src_off < STATE_CNT4; dst_off += 4, src_off += 8) + for (int dst_off = 0, src_off = 16; src_off < STATE_CNT; dst_off += 16, src_off += 32) { - TT[dst_off + 0] = TI[src_off + 0]; - TT[dst_off + 1] = TI[src_off + 1]; - TT[dst_off + 2] = TI[src_off + 2]; - TT[dst_off + 3] = TI[src_off + 3]; + for (int j = 0; j < 16; j++) TT[dst_off + j] = TI[src_off + j]; } - for (int dst_off = 4, src_off = 8; src_off < STATE_CNT4; dst_off += 4, src_off += 8) + for (int dst_off = 16, src_off = 32; src_off < STATE_CNT; dst_off += 16, src_off += 32) { - TI[dst_off + 0] = TI[src_off + 0]; - TI[dst_off + 1] = TI[src_off + 1]; - TI[dst_off + 2] = TI[src_off + 2]; - TI[dst_off + 3] = TI[src_off + 3]; + for (int j = 0; j < 16; j++) TI[dst_off + j] = TI[src_off + j]; } - for (int dst_off = STATE_CNT4 / 2, src_off = 0; dst_off < STATE_CNT4; dst_off += 4, src_off += 4) + for (int dst_off = STATE_CNT / 2, src_off = 0; dst_off < STATE_CNT; dst_off += 16, src_off += 16) { - TI[dst_off + 0] = TT[src_off + 0]; - TI[dst_off + 1] = TT[src_off + 1]; - TI[dst_off + 2] = TT[src_off + 2]; - TI[dst_off + 3] = TT[src_off + 3]; + for (int j = 0; j < 16; j++) TI[dst_off + j] = TT[src_off + j]; } #endif - uint4 R0 = TI[STATE_CNT4 - 4]; - uint4 R1 = TI[STATE_CNT4 - 3]; - uint4 R2 = TI[STATE_CNT4 - 2]; - uint4 R3 = TI[STATE_CNT4 - 1]; - - for (int i = 0; i < STATE_CNT4; i += 4) - { - uint4 Y0 = TI[i + 0]; - uint4 Y1 = TI[i + 1]; - uint4 Y2 = TI[i + 2]; - uint4 Y3 = TI[i + 3]; - - R0 = R0 ^ Y0; - R1 = R1 ^ Y1; - R2 = R2 ^ Y2; - R3 = R3 ^ Y3; - - uint4 X0 = R0; - uint4 X1 = R1; - uint4 X2 = R2; - uint4 X3 = R3; - - SALSA20_2R (); - SALSA20_2R (); - SALSA20_2R (); - SALSA20_2R (); - - R0 = R0 + X0; - R1 = R1 + X1; - R2 = R2 + X2; - R3 = R3 + X3; - - TI[i + 0] = R0; - TI[i + 1] = R1; - TI[i + 2] = R2; - TI[i + 3] = R3; - } } DECLSPEC void scrypt_smix_init (PRIVATE_AS uint4 *X, GLOBAL_AS uint4 *V0, GLOBAL_AS uint4 *V1, GLOBAL_AS uint4 *V2, GLOBAL_AS uint4 *V3, const u64 gid) @@ -217,35 +217,11 @@ DECLSPEC void scrypt_smix_init (PRIVATE_AS uint4 *X, GLOBAL_AS uint4 *V0, GLOBAL case 3: V = V3; break; } - #if SCRYPT_R > 1 - - uint4 TT[STATE_CNT4]; - - for (int z = 0; z < zSIZE; z++) TT[z] = X[z]; - - for (int dst_off = 8, src_off = 4; dst_off < zSIZE; dst_off += 8, src_off += 4) - { - X[dst_off + 0] = TT[src_off + 0]; - X[dst_off + 1] = TT[src_off + 1]; - X[dst_off + 2] = TT[src_off + 2]; - X[dst_off + 3] = TT[src_off + 3]; - } - - for (int dst_off = 4, src_off = zSIZE / 2; dst_off < zSIZE; dst_off += 8, src_off += 4) - { - X[dst_off + 0] = TT[src_off + 0]; - X[dst_off + 1] = TT[src_off + 1]; - X[dst_off + 2] = TT[src_off + 2]; - X[dst_off + 3] = TT[src_off + 3]; - } - - #endif - for (u32 y = 0; y < ySIZE; y++) { for (u32 z = 0; z < zSIZE; z++) V[CO] = X[z]; - for (u32 i = 0; i < SCRYPT_TMTO; i++) salsa_r (X); + for (u32 i = 0; i < SCRYPT_TMTO; i++) salsa_r ((u32 *) X); } } @@ -283,11 +259,11 @@ DECLSPEC void scrypt_smix_loop (PRIVATE_AS uint4 *X, GLOBAL_AS uint4 *V0, GLOBAL for (u32 z = 0; z < zSIZE; z++) T[z] = V[CO]; - for (u32 i = 0; i < km; i++) salsa_r (T); + for (u32 i = 0; i < km; i++) salsa_r ((u32 *) T); for (u32 z = 0; z < zSIZE; z++) X[z] ^= T[z]; - salsa_r (X); + salsa_r ((u32 *) X); } } @@ -483,14 +459,10 @@ KERNEL_FQ void m08900_comp (KERN_ATTR_TMPS (scrypt_tmp_t)) sha256_hmac_init_global_swap (&ctx, pws[gid].i, pws[gid].pw_len); - for (u32 i = 0; i < SCRYPT_CNT4; i += STATE_CNT4) + for (u32 l = 0; l < SCRYPT_CNT4; l += 4) { - for (u32 j = 0; j < (STATE_CNT4 * 2); j += 8) - { uint4 X[4]; - const u32 l = i + j + ((j >= STATE_CNT4) ? (4 - STATE_CNT4) : 0); - X[0] = tmps[gid].P[l + 0]; X[1] = tmps[gid].P[l + 1]; X[2] = tmps[gid].P[l + 2]; @@ -538,7 +510,6 @@ KERNEL_FQ void m08900_comp (KERN_ATTR_TMPS (scrypt_tmp_t)) w3[3] = T[3].w; sha256_hmac_update_64 (&ctx, w0, w1, w2, w3, 64); - } } w0[0] = 1; diff --git a/OpenCL/m15700-pure.cl b/OpenCL/m15700-pure.cl index d435883ce..277b3d729 100644 --- a/OpenCL/m15700-pure.cl +++ b/OpenCL/m15700-pure.cl @@ -62,145 +62,145 @@ DECLSPEC uint4 hc_swap32_4 (uint4 v) #define STATE_CNT GET_STATE_CNT (SCRYPT_R) #define STATE_CNT4 (STATE_CNT / 4) -#define ADD_ROTATE_XOR(r,i1,i2,s) (r) ^= rotate ((i1) + (i2), (s)); - -#if defined IS_CUDA || defined IS_HIP - -#define SALSA20_2R() \ -{ \ - ADD_ROTATE_XOR (X1, X0, X3, 7); \ - ADD_ROTATE_XOR (X2, X1, X0, 9); \ - ADD_ROTATE_XOR (X3, X2, X1, 13); \ - ADD_ROTATE_XOR (X0, X3, X2, 18); \ - \ - X1 = make_uint4 (X1.w, X1.x, X1.y, X1.z); \ - X2 = make_uint4 (X2.z, X2.w, X2.x, X2.y); \ - X3 = make_uint4 (X3.y, X3.z, X3.w, X3.x); \ - \ - ADD_ROTATE_XOR (X3, X0, X1, 7); \ - ADD_ROTATE_XOR (X2, X3, X0, 9); \ - ADD_ROTATE_XOR (X1, X2, X3, 13); \ - ADD_ROTATE_XOR (X0, X1, X2, 18); \ - \ - X1 = make_uint4 (X1.y, X1.z, X1.w, X1.x); \ - X2 = make_uint4 (X2.z, X2.w, X2.x, X2.y); \ - X3 = make_uint4 (X3.w, X3.x, X3.y, X3.z); \ -} -#elif defined IS_METAL -#define SALSA20_2R() \ -{ \ - ADD_ROTATE_XOR (X1, X0, X3, 7); \ - ADD_ROTATE_XOR (X2, X1, X0, 9); \ - ADD_ROTATE_XOR (X3, X2, X1, 13); \ - ADD_ROTATE_XOR (X0, X3, X2, 18); \ - \ - X1 = X1.wxyz; \ - X2 = X2.zwxy; \ - X3 = X3.yzwx; \ - \ - ADD_ROTATE_XOR (X3, X0, X1, 7); \ - ADD_ROTATE_XOR (X2, X3, X0, 9); \ - ADD_ROTATE_XOR (X1, X2, X3, 13); \ - ADD_ROTATE_XOR (X0, X1, X2, 18); \ - \ - X1 = X1.yzwx; \ - X2 = X2.zwxy; \ - X3 = X3.wxyz; \ -} -#else -#define SALSA20_2R() \ -{ \ - ADD_ROTATE_XOR (X1, X0, X3, 7); \ - ADD_ROTATE_XOR (X2, X1, X0, 9); \ - ADD_ROTATE_XOR (X3, X2, X1, 13); \ - ADD_ROTATE_XOR (X0, X3, X2, 18); \ - \ - X1 = X1.s3012; \ - X2 = X2.s2301; \ - X3 = X3.s1230; \ - \ - ADD_ROTATE_XOR (X3, X0, X1, 7); \ - ADD_ROTATE_XOR (X2, X3, X0, 9); \ - ADD_ROTATE_XOR (X1, X2, X3, 13); \ - ADD_ROTATE_XOR (X0, X1, X2, 18); \ - \ - X1 = X1.s1230; \ - X2 = X2.s2301; \ - X3 = X3.s3012; \ -} -#endif - #define Coord(xd4,y,z) (((xd4) * ySIZE * zSIZE) + ((y) * zSIZE) + (z)) #define CO Coord(xd4,y,z) -DECLSPEC void salsa_r (PRIVATE_AS uint4 *TI) + +DECLSPEC void salsa_r (PRIVATE_AS u32 *TI) { #if SCRYPT_R > 1 - uint4 TT[STATE_CNT4 / 2]; + u32 TT[STATE_CNT / 2]; - for (int dst_off = 0, src_off = 4; src_off < STATE_CNT4; dst_off += 4, src_off += 8) + for (int dst_off = 0, src_off = 16; src_off < STATE_CNT; dst_off += 16, src_off += 32) { - TT[dst_off + 0] = TI[src_off + 0]; - TT[dst_off + 1] = TI[src_off + 1]; - TT[dst_off + 2] = TI[src_off + 2]; - TT[dst_off + 3] = TI[src_off + 3]; + for (int j = 0; j < 16; j++) TT[dst_off + j] = TI[src_off + j]; } - for (int dst_off = 4, src_off = 8; src_off < STATE_CNT4; dst_off += 4, src_off += 8) + for (int dst_off = 16, src_off = 32; src_off < STATE_CNT; dst_off += 16, src_off += 32) { - TI[dst_off + 0] = TI[src_off + 0]; - TI[dst_off + 1] = TI[src_off + 1]; - TI[dst_off + 2] = TI[src_off + 2]; - TI[dst_off + 3] = TI[src_off + 3]; + for (int j = 0; j < 16; j++) TI[dst_off + j] = TI[src_off + j]; } - for (int dst_off = STATE_CNT4 / 2, src_off = 0; dst_off < STATE_CNT4; dst_off += 4, src_off += 4) + for (int dst_off = STATE_CNT / 2, src_off = 0; dst_off < STATE_CNT; dst_off += 16, src_off += 16) { - TI[dst_off + 0] = TT[src_off + 0]; - TI[dst_off + 1] = TT[src_off + 1]; - TI[dst_off + 2] = TT[src_off + 2]; - TI[dst_off + 3] = TT[src_off + 3]; + for (int j = 0; j < 16; j++) TI[dst_off + j] = TT[src_off + j]; } #endif - uint4 R0 = TI[STATE_CNT4 - 4]; - uint4 R1 = TI[STATE_CNT4 - 3]; - uint4 R2 = TI[STATE_CNT4 - 2]; - uint4 R3 = TI[STATE_CNT4 - 1]; + u32 x[16]; - for (int i = 0; i < STATE_CNT4; i += 4) + for (int j = 0; j < 16; j++) x[j] = TI[STATE_CNT - 16 + j]; + + for (int i = 0; i < STATE_CNT; i += 16) { - uint4 Y0 = TI[i + 0]; - uint4 Y1 = TI[i + 1]; - uint4 Y2 = TI[i + 2]; - uint4 Y3 = TI[i + 3]; + for (int j = 0; j < 16; j++) + { + x[j] ^= TI[i + j]; + } - R0 = R0 ^ Y0; - R1 = R1 ^ Y1; - R2 = R2 ^ Y2; - R3 = R3 ^ Y3; + for (int j = 0; j < 16; j++) + { + TI[i + j] = x[j]; + } - uint4 X0 = R0; - uint4 X1 = R1; - uint4 X2 = R2; - uint4 X3 = R3; + for (int r = 0; r < 4; r++) + { + u32 t0, t1, t2, t3; - SALSA20_2R (); - SALSA20_2R (); - SALSA20_2R (); - SALSA20_2R (); + t0 = x[ 0] + x[12]; + t1 = x[ 1] + x[13]; + t2 = x[ 2] + x[14]; + t3 = x[ 3] + x[15]; + x[ 4] ^= hc_rotl32_S (t0, 7); + x[ 5] ^= hc_rotl32_S (t1, 7); + x[ 6] ^= hc_rotl32_S (t2, 7); + x[ 7] ^= hc_rotl32_S (t3, 7); - R0 = R0 + X0; - R1 = R1 + X1; - R2 = R2 + X2; - R3 = R3 + X3; + t0 = x[ 4] + x[ 0]; + t1 = x[ 5] + x[ 1]; + t2 = x[ 6] + x[ 2]; + t3 = x[ 7] + x[ 3]; + x[ 8] ^= hc_rotl32_S (t0, 9); + x[ 9] ^= hc_rotl32_S (t1, 9); + x[10] ^= hc_rotl32_S (t2, 9); + x[11] ^= hc_rotl32_S (t3, 9); - TI[i + 0] = R0; - TI[i + 1] = R1; - TI[i + 2] = R2; - TI[i + 3] = R3; + t0 = x[ 8] + x[ 4]; + t1 = x[ 9] + x[ 5]; + t2 = x[10] + x[ 6]; + t3 = x[11] + x[ 7]; + x[12] ^= hc_rotl32_S (t0, 13); + x[13] ^= hc_rotl32_S (t1, 13); + x[14] ^= hc_rotl32_S (t2, 13); + x[15] ^= hc_rotl32_S (t3, 13); + + t0 = x[12] + x[ 8]; + t1 = x[13] + x[ 9]; + t2 = x[14] + x[10]; + t3 = x[15] + x[11]; + x[ 0] ^= hc_rotl32_S (t0, 18); + x[ 1] ^= hc_rotl32_S (t1, 18); + x[ 2] ^= hc_rotl32_S (t2, 18); + x[ 3] ^= hc_rotl32_S (t3, 18); + + t0 = x[ 4]; x[ 4] = x[ 7]; x[ 7] = x[ 6]; x[ 6] = x[ 5]; x[ 5] = t0; + t0 = x[ 8]; x[ 8] = x[10]; x[10] = t0; + t0 = x[ 9]; x[ 9] = x[11]; x[11] = t0; + t0 = x[12]; x[12] = x[13]; x[13] = x[14]; x[14] = x[15]; x[15] = t0; + + t0 = x[ 0] + x[ 4]; + t1 = x[ 1] + x[ 5]; + t2 = x[ 2] + x[ 6]; + t3 = x[ 3] + x[ 7]; + x[12] ^= hc_rotl32_S (t0, 7); + x[13] ^= hc_rotl32_S (t1, 7); + x[14] ^= hc_rotl32_S (t2, 7); + x[15] ^= hc_rotl32_S (t3, 7); + + t0 = x[12] + x[ 0]; + t1 = x[13] + x[ 1]; + t2 = x[14] + x[ 2]; + t3 = x[15] + x[ 3]; + x[ 8] ^= hc_rotl32_S (t0, 9); + x[ 9] ^= hc_rotl32_S (t1, 9); + x[10] ^= hc_rotl32_S (t2, 9); + x[11] ^= hc_rotl32_S (t3, 9); + + t0 = x[ 8] + x[12]; + t1 = x[ 9] + x[13]; + t2 = x[10] + x[14]; + t3 = x[11] + x[15]; + x[ 4] ^= hc_rotl32_S (t0, 13); + x[ 5] ^= hc_rotl32_S (t1, 13); + x[ 6] ^= hc_rotl32_S (t2, 13); + x[ 7] ^= hc_rotl32_S (t3, 13); + + t0 = x[ 4] + x[ 8]; + t1 = x[ 5] + x[ 9]; + t2 = x[ 6] + x[10]; + t3 = x[ 7] + x[11]; + x[ 0] ^= hc_rotl32_S (t0, 18); + x[ 1] ^= hc_rotl32_S (t1, 18); + x[ 2] ^= hc_rotl32_S (t2, 18); + x[ 3] ^= hc_rotl32_S (t3, 18); + + t0 = x[ 4]; x[ 4] = x[ 5]; x[ 5] = x[ 6]; x[ 6] = x[ 7]; x[ 7] = t0; + t0 = x[ 8]; x[ 8] = x[10]; x[10] = t0; + t0 = x[ 9]; x[ 9] = x[11]; x[11] = t0; + t0 = x[15]; x[15] = x[14]; x[14] = x[13]; x[13] = x[12]; x[12] = t0; + } + + for (int j = 0; j < 16; j++) + { + x[j] += TI[i + j]; + } + + for (int j = 0; j < 16; j++) + { + TI[i + j] = x[j]; + } } } @@ -252,7 +252,7 @@ DECLSPEC void scrypt_smix_init (PRIVATE_AS uint4 *X, GLOBAL_AS uint4 *V0, GLOBAL { for (u32 z = 0; z < zSIZE; z++) V[CO] = X[z]; - for (u32 i = 0; i < SCRYPT_TMTO; i++) salsa_r (X); + for (u32 i = 0; i < SCRYPT_TMTO; i++) salsa_r ((u32 *) X); } } @@ -290,11 +290,11 @@ DECLSPEC void scrypt_smix_loop (PRIVATE_AS uint4 *X, GLOBAL_AS uint4 *V0, GLOBAL for (u32 z = 0; z < zSIZE; z++) T[z] = V[CO]; - for (u32 i = 0; i < km; i++) salsa_r (T); + for (u32 i = 0; i < km; i++) salsa_r ((u32 *) T); for (u32 z = 0; z < zSIZE; z++) X[z] ^= T[z]; - salsa_r (X); + salsa_r ((u32 *) X); } } @@ -619,15 +619,10 @@ KERNEL_FQ void m15700_comp (KERN_ATTR_TMPS_ESALT (scrypt_tmp_t, ethereum_scrypt_ sha256_hmac_init_global_swap (&ctx, pws[gid].i, pws[gid].pw_len); - - for (u32 i = 0; i < SCRYPT_CNT4; i += STATE_CNT4) + for (u32 l = 0; l < SCRYPT_CNT4; l += 4) { - for (u32 j = 0; j < (STATE_CNT4 * 2); j += 8) - { uint4 X[4]; - const u32 l = i + j + ((j >= STATE_CNT4) ? (4 - STATE_CNT4) : 0); - X[0] = tmps[gid].P[l + 0]; X[1] = tmps[gid].P[l + 1]; X[2] = tmps[gid].P[l + 2]; @@ -675,7 +670,6 @@ KERNEL_FQ void m15700_comp (KERN_ATTR_TMPS_ESALT (scrypt_tmp_t, ethereum_scrypt_ w3[3] = T[3].w; sha256_hmac_update_64 (&ctx, w0, w1, w2, w3, 64); - } } w0[0] = 1; diff --git a/OpenCL/m22700-pure.cl b/OpenCL/m22700-pure.cl index 303e5e334..291e44acc 100644 --- a/OpenCL/m22700-pure.cl +++ b/OpenCL/m22700-pure.cl @@ -103,146 +103,146 @@ DECLSPEC uint4 hc_swap32_4 (uint4 v) #define STATE_CNT GET_STATE_CNT (SCRYPT_R) #define STATE_CNT4 (STATE_CNT / 4) -#define ADD_ROTATE_XOR(r,i1,i2,s) (r) ^= rotate ((i1) + (i2), (s)); - -#if defined IS_CUDA || defined IS_HIP - -#define SALSA20_2R() \ -{ \ - ADD_ROTATE_XOR (X1, X0, X3, 7); \ - ADD_ROTATE_XOR (X2, X1, X0, 9); \ - ADD_ROTATE_XOR (X3, X2, X1, 13); \ - ADD_ROTATE_XOR (X0, X3, X2, 18); \ - \ - X1 = make_uint4 (X1.w, X1.x, X1.y, X1.z); \ - X2 = make_uint4 (X2.z, X2.w, X2.x, X2.y); \ - X3 = make_uint4 (X3.y, X3.z, X3.w, X3.x); \ - \ - ADD_ROTATE_XOR (X3, X0, X1, 7); \ - ADD_ROTATE_XOR (X2, X3, X0, 9); \ - ADD_ROTATE_XOR (X1, X2, X3, 13); \ - ADD_ROTATE_XOR (X0, X1, X2, 18); \ - \ - X1 = make_uint4 (X1.y, X1.z, X1.w, X1.x); \ - X2 = make_uint4 (X2.z, X2.w, X2.x, X2.y); \ - X3 = make_uint4 (X3.w, X3.x, X3.y, X3.z); \ -} -#elif defined IS_METAL -#define SALSA20_2R() \ -{ \ - ADD_ROTATE_XOR (X1, X0, X3, 7); \ - ADD_ROTATE_XOR (X2, X1, X0, 9); \ - ADD_ROTATE_XOR (X3, X2, X1, 13); \ - ADD_ROTATE_XOR (X0, X3, X2, 18); \ - \ - X1 = X1.wxyz; \ - X2 = X2.zwxy; \ - X3 = X3.yzwx; \ - \ - ADD_ROTATE_XOR (X3, X0, X1, 7); \ - ADD_ROTATE_XOR (X2, X3, X0, 9); \ - ADD_ROTATE_XOR (X1, X2, X3, 13); \ - ADD_ROTATE_XOR (X0, X1, X2, 18); \ - \ - X1 = X1.yzwx; \ - X2 = X2.zwxy; \ - X3 = X3.wxyz; \ -} -#else -#define SALSA20_2R() \ -{ \ - ADD_ROTATE_XOR (X1, X0, X3, 7); \ - ADD_ROTATE_XOR (X2, X1, X0, 9); \ - ADD_ROTATE_XOR (X3, X2, X1, 13); \ - ADD_ROTATE_XOR (X0, X3, X2, 18); \ - \ - X1 = X1.s3012; \ - X2 = X2.s2301; \ - X3 = X3.s1230; \ - \ - ADD_ROTATE_XOR (X3, X0, X1, 7); \ - ADD_ROTATE_XOR (X2, X3, X0, 9); \ - ADD_ROTATE_XOR (X1, X2, X3, 13); \ - ADD_ROTATE_XOR (X0, X1, X2, 18); \ - \ - X1 = X1.s1230; \ - X2 = X2.s2301; \ - X3 = X3.s3012; \ -} -#endif - #define Coord(xd4,y,z) (((xd4) * ySIZE * zSIZE) + ((y) * zSIZE) + (z)) #define CO Coord(xd4,y,z) -DECLSPEC void salsa_r (PRIVATE_AS uint4 *TI) +DECLSPEC void salsa_r (PRIVATE_AS u32 *TI) { + u32 x[16]; + + for (int j = 0; j < 16; j++) x[j] = TI[STATE_CNT - 16 + j]; + + for (int i = 0; i < STATE_CNT; i += 16) + { + for (int j = 0; j < 16; j++) + { + x[j] ^= TI[i + j]; + } + + for (int j = 0; j < 16; j++) + { + TI[i + j] = x[j]; + } + + for (int r = 0; r < 4; r++) + { + u32 t0, t1, t2, t3; + + t0 = x[ 0] + x[12]; + t1 = x[ 1] + x[13]; + t2 = x[ 2] + x[14]; + t3 = x[ 3] + x[15]; + x[ 4] ^= hc_rotl32_S (t0, 7); + x[ 5] ^= hc_rotl32_S (t1, 7); + x[ 6] ^= hc_rotl32_S (t2, 7); + x[ 7] ^= hc_rotl32_S (t3, 7); + + t0 = x[ 4] + x[ 0]; + t1 = x[ 5] + x[ 1]; + t2 = x[ 6] + x[ 2]; + t3 = x[ 7] + x[ 3]; + x[ 8] ^= hc_rotl32_S (t0, 9); + x[ 9] ^= hc_rotl32_S (t1, 9); + x[10] ^= hc_rotl32_S (t2, 9); + x[11] ^= hc_rotl32_S (t3, 9); + + t0 = x[ 8] + x[ 4]; + t1 = x[ 9] + x[ 5]; + t2 = x[10] + x[ 6]; + t3 = x[11] + x[ 7]; + x[12] ^= hc_rotl32_S (t0, 13); + x[13] ^= hc_rotl32_S (t1, 13); + x[14] ^= hc_rotl32_S (t2, 13); + x[15] ^= hc_rotl32_S (t3, 13); + + t0 = x[12] + x[ 8]; + t1 = x[13] + x[ 9]; + t2 = x[14] + x[10]; + t3 = x[15] + x[11]; + x[ 0] ^= hc_rotl32_S (t0, 18); + x[ 1] ^= hc_rotl32_S (t1, 18); + x[ 2] ^= hc_rotl32_S (t2, 18); + x[ 3] ^= hc_rotl32_S (t3, 18); + + t0 = x[ 4]; x[ 4] = x[ 7]; x[ 7] = x[ 6]; x[ 6] = x[ 5]; x[ 5] = t0; + t0 = x[ 8]; x[ 8] = x[10]; x[10] = t0; + t0 = x[ 9]; x[ 9] = x[11]; x[11] = t0; + t0 = x[12]; x[12] = x[13]; x[13] = x[14]; x[14] = x[15]; x[15] = t0; + + t0 = x[ 0] + x[ 4]; + t1 = x[ 1] + x[ 5]; + t2 = x[ 2] + x[ 6]; + t3 = x[ 3] + x[ 7]; + x[12] ^= hc_rotl32_S (t0, 7); + x[13] ^= hc_rotl32_S (t1, 7); + x[14] ^= hc_rotl32_S (t2, 7); + x[15] ^= hc_rotl32_S (t3, 7); + + t0 = x[12] + x[ 0]; + t1 = x[13] + x[ 1]; + t2 = x[14] + x[ 2]; + t3 = x[15] + x[ 3]; + x[ 8] ^= hc_rotl32_S (t0, 9); + x[ 9] ^= hc_rotl32_S (t1, 9); + x[10] ^= hc_rotl32_S (t2, 9); + x[11] ^= hc_rotl32_S (t3, 9); + + t0 = x[ 8] + x[12]; + t1 = x[ 9] + x[13]; + t2 = x[10] + x[14]; + t3 = x[11] + x[15]; + x[ 4] ^= hc_rotl32_S (t0, 13); + x[ 5] ^= hc_rotl32_S (t1, 13); + x[ 6] ^= hc_rotl32_S (t2, 13); + x[ 7] ^= hc_rotl32_S (t3, 13); + + t0 = x[ 4] + x[ 8]; + t1 = x[ 5] + x[ 9]; + t2 = x[ 6] + x[10]; + t3 = x[ 7] + x[11]; + x[ 0] ^= hc_rotl32_S (t0, 18); + x[ 1] ^= hc_rotl32_S (t1, 18); + x[ 2] ^= hc_rotl32_S (t2, 18); + x[ 3] ^= hc_rotl32_S (t3, 18); + + t0 = x[ 4]; x[ 4] = x[ 5]; x[ 5] = x[ 6]; x[ 6] = x[ 7]; x[ 7] = t0; + t0 = x[ 8]; x[ 8] = x[10]; x[10] = t0; + t0 = x[ 9]; x[ 9] = x[11]; x[11] = t0; + t0 = x[15]; x[15] = x[14]; x[14] = x[13]; x[13] = x[12]; x[12] = t0; + } + + for (int j = 0; j < 16; j++) + { + x[j] += TI[i + j]; + } + + for (int j = 0; j < 16; j++) + { + TI[i + j] = x[j]; + } + } + #if SCRYPT_R > 1 - uint4 TT[STATE_CNT4 / 2]; + u32 TT[STATE_CNT / 2]; - for (int dst_off = 0, src_off = 4; src_off < STATE_CNT4; dst_off += 4, src_off += 8) + for (int dst_off = 0, src_off = 16; src_off < STATE_CNT; dst_off += 16, src_off += 32) { - TT[dst_off + 0] = TI[src_off + 0]; - TT[dst_off + 1] = TI[src_off + 1]; - TT[dst_off + 2] = TI[src_off + 2]; - TT[dst_off + 3] = TI[src_off + 3]; + for (int j = 0; j < 16; j++) TT[dst_off + j] = TI[src_off + j]; } - for (int dst_off = 4, src_off = 8; src_off < STATE_CNT4; dst_off += 4, src_off += 8) + for (int dst_off = 16, src_off = 32; src_off < STATE_CNT; dst_off += 16, src_off += 32) { - TI[dst_off + 0] = TI[src_off + 0]; - TI[dst_off + 1] = TI[src_off + 1]; - TI[dst_off + 2] = TI[src_off + 2]; - TI[dst_off + 3] = TI[src_off + 3]; + for (int j = 0; j < 16; j++) TI[dst_off + j] = TI[src_off + j]; } - for (int dst_off = STATE_CNT4 / 2, src_off = 0; dst_off < STATE_CNT4; dst_off += 4, src_off += 4) + for (int dst_off = STATE_CNT / 2, src_off = 0; dst_off < STATE_CNT; dst_off += 16, src_off += 16) { - TI[dst_off + 0] = TT[src_off + 0]; - TI[dst_off + 1] = TT[src_off + 1]; - TI[dst_off + 2] = TT[src_off + 2]; - TI[dst_off + 3] = TT[src_off + 3]; + for (int j = 0; j < 16; j++) TI[dst_off + j] = TT[src_off + j]; } #endif - uint4 R0 = TI[STATE_CNT4 - 4]; - uint4 R1 = TI[STATE_CNT4 - 3]; - uint4 R2 = TI[STATE_CNT4 - 2]; - uint4 R3 = TI[STATE_CNT4 - 1]; - - for (int i = 0; i < STATE_CNT4; i += 4) - { - uint4 Y0 = TI[i + 0]; - uint4 Y1 = TI[i + 1]; - uint4 Y2 = TI[i + 2]; - uint4 Y3 = TI[i + 3]; - - R0 = R0 ^ Y0; - R1 = R1 ^ Y1; - R2 = R2 ^ Y2; - R3 = R3 ^ Y3; - - uint4 X0 = R0; - uint4 X1 = R1; - uint4 X2 = R2; - uint4 X3 = R3; - - SALSA20_2R (); - SALSA20_2R (); - SALSA20_2R (); - SALSA20_2R (); - - R0 = R0 + X0; - R1 = R1 + X1; - R2 = R2 + X2; - R3 = R3 + X3; - - TI[i + 0] = R0; - TI[i + 1] = R1; - TI[i + 2] = R2; - TI[i + 3] = R3; - } } DECLSPEC void scrypt_smix_init (PRIVATE_AS uint4 *X, GLOBAL_AS uint4 *V0, GLOBAL_AS uint4 *V1, GLOBAL_AS uint4 *V2, GLOBAL_AS uint4 *V3, const u64 gid) @@ -265,35 +265,11 @@ DECLSPEC void scrypt_smix_init (PRIVATE_AS uint4 *X, GLOBAL_AS uint4 *V0, GLOBAL case 3: V = V3; break; } - #if SCRYPT_R > 1 - - uint4 TT[STATE_CNT4]; - - for (int z = 0; z < zSIZE; z++) TT[z] = X[z]; - - for (int dst_off = 8, src_off = 4; dst_off < zSIZE; dst_off += 8, src_off += 4) - { - X[dst_off + 0] = TT[src_off + 0]; - X[dst_off + 1] = TT[src_off + 1]; - X[dst_off + 2] = TT[src_off + 2]; - X[dst_off + 3] = TT[src_off + 3]; - } - - for (int dst_off = 4, src_off = zSIZE / 2; dst_off < zSIZE; dst_off += 8, src_off += 4) - { - X[dst_off + 0] = TT[src_off + 0]; - X[dst_off + 1] = TT[src_off + 1]; - X[dst_off + 2] = TT[src_off + 2]; - X[dst_off + 3] = TT[src_off + 3]; - } - - #endif - for (u32 y = 0; y < ySIZE; y++) { for (u32 z = 0; z < zSIZE; z++) V[CO] = X[z]; - for (u32 i = 0; i < SCRYPT_TMTO; i++) salsa_r (X); + for (u32 i = 0; i < SCRYPT_TMTO; i++) salsa_r ((u32 *) X); } } @@ -331,11 +307,11 @@ DECLSPEC void scrypt_smix_loop (PRIVATE_AS uint4 *X, GLOBAL_AS uint4 *V0, GLOBAL for (u32 z = 0; z < zSIZE; z++) T[z] = V[CO]; - for (u32 i = 0; i < km; i++) salsa_r (T); + for (u32 i = 0; i < km; i++) salsa_r ((u32 *) T); for (u32 z = 0; z < zSIZE; z++) X[z] ^= T[z]; - salsa_r (X); + salsa_r ((u32 *) X); } } @@ -621,14 +597,10 @@ KERNEL_FQ void m22700_comp (KERN_ATTR_TMPS (scrypt_tmp_t)) u32 w2[4]; u32 w3[4]; - for (u32 i = 0; i < SCRYPT_CNT4; i += STATE_CNT4) + for (u32 l = 0; l < SCRYPT_CNT4; l += 4) { - for (u32 j = 0; j < (STATE_CNT4 * 2); j += 8) - { uint4 X[4]; - const u32 l = i + j + ((j >= STATE_CNT4) ? (4 - STATE_CNT4) : 0); - X[0] = tmps[gid].P[l + 0]; X[1] = tmps[gid].P[l + 1]; X[2] = tmps[gid].P[l + 2]; @@ -676,7 +648,6 @@ KERNEL_FQ void m22700_comp (KERN_ATTR_TMPS (scrypt_tmp_t)) w3[3] = T[3].w; sha256_hmac_update_64 (&ctx, w0, w1, w2, w3, 64); - } } w0[0] = 1; diff --git a/OpenCL/m24000-pure.cl b/OpenCL/m24000-pure.cl index 37e219e2c..77afe5ef5 100644 --- a/OpenCL/m24000-pure.cl +++ b/OpenCL/m24000-pure.cl @@ -64,147 +64,142 @@ DECLSPEC uint4 hc_swap32_4 (uint4 v) #define STATE_CNT GET_STATE_CNT (SCRYPT_R) #define STATE_CNT4 (STATE_CNT / 4) -#define ADD_ROTATE_XOR(r,i1,i2,s) (r) ^= rotate ((i1) + (i2), (s)); - -#if defined IS_CUDA || defined IS_HIP - -#define SALSA20_2R() \ -{ \ - ADD_ROTATE_XOR (X1, X0, X3, 7); \ - ADD_ROTATE_XOR (X2, X1, X0, 9); \ - ADD_ROTATE_XOR (X3, X2, X1, 13); \ - ADD_ROTATE_XOR (X0, X3, X2, 18); \ - \ - X1 = make_uint4 (X1.w, X1.x, X1.y, X1.z); \ - X2 = make_uint4 (X2.z, X2.w, X2.x, X2.y); \ - X3 = make_uint4 (X3.y, X3.z, X3.w, X3.x); \ - \ - ADD_ROTATE_XOR (X3, X0, X1, 7); \ - ADD_ROTATE_XOR (X2, X3, X0, 9); \ - ADD_ROTATE_XOR (X1, X2, X3, 13); \ - ADD_ROTATE_XOR (X0, X1, X2, 18); \ - \ - X1 = make_uint4 (X1.y, X1.z, X1.w, X1.x); \ - X2 = make_uint4 (X2.z, X2.w, X2.x, X2.y); \ - X3 = make_uint4 (X3.w, X3.x, X3.y, X3.z); \ -} -#elif defined IS_METAL -#define SALSA20_2R() \ -{ \ - ADD_ROTATE_XOR (X1, X0, X3, 7); \ - ADD_ROTATE_XOR (X2, X1, X0, 9); \ - ADD_ROTATE_XOR (X3, X2, X1, 13); \ - ADD_ROTATE_XOR (X0, X3, X2, 18); \ - \ - X1 = X1.wxyz; \ - X2 = X2.zwxy; \ - X3 = X3.yzwx; \ - \ - ADD_ROTATE_XOR (X3, X0, X1, 7); \ - ADD_ROTATE_XOR (X2, X3, X0, 9); \ - ADD_ROTATE_XOR (X1, X2, X3, 13); \ - ADD_ROTATE_XOR (X0, X1, X2, 18); \ - \ - X1 = X1.yzwx; \ - X2 = X2.zwxy; \ - X3 = X3.wxyz; \ -} -#else -#define SALSA20_2R() \ -{ \ - ADD_ROTATE_XOR (X1, X0, X3, 7); \ - ADD_ROTATE_XOR (X2, X1, X0, 9); \ - ADD_ROTATE_XOR (X3, X2, X1, 13); \ - ADD_ROTATE_XOR (X0, X3, X2, 18); \ - \ - X1 = X1.s3012; \ - X2 = X2.s2301; \ - X3 = X3.s1230; \ - \ - ADD_ROTATE_XOR (X3, X0, X1, 7); \ - ADD_ROTATE_XOR (X2, X3, X0, 9); \ - ADD_ROTATE_XOR (X1, X2, X3, 13); \ - ADD_ROTATE_XOR (X0, X1, X2, 18); \ - \ - X1 = X1.s1230; \ - X2 = X2.s2301; \ - X3 = X3.s3012; \ -} -#endif - -#define SALSA20_8_XOR() \ -{ \ - R0 = R0 ^ Y0; \ - R1 = R1 ^ Y1; \ - R2 = R2 ^ Y2; \ - R3 = R3 ^ Y3; \ - \ - uint4 X0 = R0; \ - uint4 X1 = R1; \ - uint4 X2 = R2; \ - uint4 X3 = R3; \ - \ - SALSA20_2R (); \ - SALSA20_2R (); \ - SALSA20_2R (); \ - SALSA20_2R (); \ - \ - R0 = R0 + X0; \ - R1 = R1 + X1; \ - R2 = R2 + X2; \ - R3 = R3 + X3; \ -} - -DECLSPEC void salsa_r (PRIVATE_AS uint4 *TI) +DECLSPEC void salsa_r (PRIVATE_AS u32 *TI) { - uint4 R0 = TI[STATE_CNT4 - 4]; - uint4 R1 = TI[STATE_CNT4 - 3]; - uint4 R2 = TI[STATE_CNT4 - 2]; - uint4 R3 = TI[STATE_CNT4 - 1]; + u32 x[16]; - uint4 TO[STATE_CNT4]; + for (int j = 0; j < 16; j++) x[j] = TI[STATE_CNT - 16 + j]; - int idx_y = 0; - int idx_r1 = 0; - int idx_r2 = SCRYPT_R * 4; - - for (int i = 0; i < SCRYPT_R; i++) + for (int i = 0; i < STATE_CNT; i += 16) { - uint4 Y0; - uint4 Y1; - uint4 Y2; - uint4 Y3; + for (int j = 0; j < 16; j++) + { + x[j] ^= TI[i + j]; + } - Y0 = TI[idx_y++]; - Y1 = TI[idx_y++]; - Y2 = TI[idx_y++]; - Y3 = TI[idx_y++]; + for (int j = 0; j < 16; j++) + { + TI[i + j] = x[j]; + } - SALSA20_8_XOR (); + for (int r = 0; r < 4; r++) + { + u32 t0, t1, t2, t3; - TO[idx_r1++] = R0; - TO[idx_r1++] = R1; - TO[idx_r1++] = R2; - TO[idx_r1++] = R3; + t0 = x[ 0] + x[12]; + t1 = x[ 1] + x[13]; + t2 = x[ 2] + x[14]; + t3 = x[ 3] + x[15]; + x[ 4] ^= hc_rotl32_S (t0, 7); + x[ 5] ^= hc_rotl32_S (t1, 7); + x[ 6] ^= hc_rotl32_S (t2, 7); + x[ 7] ^= hc_rotl32_S (t3, 7); - Y0 = TI[idx_y++]; - Y1 = TI[idx_y++]; - Y2 = TI[idx_y++]; - Y3 = TI[idx_y++]; + t0 = x[ 4] + x[ 0]; + t1 = x[ 5] + x[ 1]; + t2 = x[ 6] + x[ 2]; + t3 = x[ 7] + x[ 3]; + x[ 8] ^= hc_rotl32_S (t0, 9); + x[ 9] ^= hc_rotl32_S (t1, 9); + x[10] ^= hc_rotl32_S (t2, 9); + x[11] ^= hc_rotl32_S (t3, 9); - SALSA20_8_XOR (); + t0 = x[ 8] + x[ 4]; + t1 = x[ 9] + x[ 5]; + t2 = x[10] + x[ 6]; + t3 = x[11] + x[ 7]; + x[12] ^= hc_rotl32_S (t0, 13); + x[13] ^= hc_rotl32_S (t1, 13); + x[14] ^= hc_rotl32_S (t2, 13); + x[15] ^= hc_rotl32_S (t3, 13); - TO[idx_r2++] = R0; - TO[idx_r2++] = R1; - TO[idx_r2++] = R2; - TO[idx_r2++] = R3; + t0 = x[12] + x[ 8]; + t1 = x[13] + x[ 9]; + t2 = x[14] + x[10]; + t3 = x[15] + x[11]; + x[ 0] ^= hc_rotl32_S (t0, 18); + x[ 1] ^= hc_rotl32_S (t1, 18); + x[ 2] ^= hc_rotl32_S (t2, 18); + x[ 3] ^= hc_rotl32_S (t3, 18); + + t0 = x[ 4]; x[ 4] = x[ 7]; x[ 7] = x[ 6]; x[ 6] = x[ 5]; x[ 5] = t0; + t0 = x[ 8]; x[ 8] = x[10]; x[10] = t0; + t0 = x[ 9]; x[ 9] = x[11]; x[11] = t0; + t0 = x[12]; x[12] = x[13]; x[13] = x[14]; x[14] = x[15]; x[15] = t0; + + t0 = x[ 0] + x[ 4]; + t1 = x[ 1] + x[ 5]; + t2 = x[ 2] + x[ 6]; + t3 = x[ 3] + x[ 7]; + x[12] ^= hc_rotl32_S (t0, 7); + x[13] ^= hc_rotl32_S (t1, 7); + x[14] ^= hc_rotl32_S (t2, 7); + x[15] ^= hc_rotl32_S (t3, 7); + + t0 = x[12] + x[ 0]; + t1 = x[13] + x[ 1]; + t2 = x[14] + x[ 2]; + t3 = x[15] + x[ 3]; + x[ 8] ^= hc_rotl32_S (t0, 9); + x[ 9] ^= hc_rotl32_S (t1, 9); + x[10] ^= hc_rotl32_S (t2, 9); + x[11] ^= hc_rotl32_S (t3, 9); + + t0 = x[ 8] + x[12]; + t1 = x[ 9] + x[13]; + t2 = x[10] + x[14]; + t3 = x[11] + x[15]; + x[ 4] ^= hc_rotl32_S (t0, 13); + x[ 5] ^= hc_rotl32_S (t1, 13); + x[ 6] ^= hc_rotl32_S (t2, 13); + x[ 7] ^= hc_rotl32_S (t3, 13); + + t0 = x[ 4] + x[ 8]; + t1 = x[ 5] + x[ 9]; + t2 = x[ 6] + x[10]; + t3 = x[ 7] + x[11]; + x[ 0] ^= hc_rotl32_S (t0, 18); + x[ 1] ^= hc_rotl32_S (t1, 18); + x[ 2] ^= hc_rotl32_S (t2, 18); + x[ 3] ^= hc_rotl32_S (t3, 18); + + t0 = x[ 4]; x[ 4] = x[ 5]; x[ 5] = x[ 6]; x[ 6] = x[ 7]; x[ 7] = t0; + t0 = x[ 8]; x[ 8] = x[10]; x[10] = t0; + t0 = x[ 9]; x[ 9] = x[11]; x[11] = t0; + t0 = x[15]; x[15] = x[14]; x[14] = x[13]; x[13] = x[12]; x[12] = t0; + } + + for (int j = 0; j < 16; j++) + { + x[j] += TI[i + j]; + } + + for (int j = 0; j < 16; j++) + { + TI[i + j] = x[j]; + } } - #pragma unroll - for (int i = 0; i < STATE_CNT4; i++) + #if SCRYPT_R > 1 + + u32 TT[STATE_CNT / 2]; + + for (int dst_off = 0, src_off = 16; src_off < STATE_CNT; dst_off += 16, src_off += 32) { - TI[i] = TO[i]; + for (int j = 0; j < 16; j++) TT[dst_off + j] = TI[src_off + j]; } + + for (int dst_off = 16, src_off = 32; src_off < STATE_CNT; dst_off += 16, src_off += 32) + { + for (int j = 0; j < 16; j++) TI[dst_off + j] = TI[src_off + j]; + } + + for (int dst_off = STATE_CNT / 2, src_off = 0; dst_off < STATE_CNT; dst_off += 16, src_off += 16) + { + for (int j = 0; j < 16; j++) TI[dst_off + j] = TT[src_off + j]; + } + + #endif } DECLSPEC void scrypt_smix (PRIVATE_AS uint4 *X, PRIVATE_AS uint4 *T, GLOBAL_AS uint4 *V0, GLOBAL_AS uint4 *V1, GLOBAL_AS uint4 *V2, GLOBAL_AS uint4 *V3, const u64 gid) @@ -262,7 +257,7 @@ DECLSPEC void scrypt_smix (PRIVATE_AS uint4 *X, PRIVATE_AS uint4 *T, GLOBAL_AS u { for (u32 z = 0; z < zSIZE; z++) V[CO] = X[z]; - for (u32 i = 0; i < SCRYPT_TMTO; i++) salsa_r (X); + for (u32 i = 0; i < SCRYPT_TMTO; i++) salsa_r ((u32 *) X); } for (u32 i = 0; i < SCRYPT_N; i++) @@ -275,11 +270,11 @@ DECLSPEC void scrypt_smix (PRIVATE_AS uint4 *X, PRIVATE_AS uint4 *T, GLOBAL_AS u for (u32 z = 0; z < zSIZE; z++) T[z] = V[CO]; - for (u32 i = 0; i < km; i++) salsa_r (T); + for (u32 i = 0; i < km; i++) salsa_r ((u32 *) T); for (u32 z = 0; z < zSIZE; z++) X[z] ^= T[z]; - salsa_r (X); + salsa_r ((u32 *) X); } #ifdef _unroll diff --git a/OpenCL/m27700-pure.cl b/OpenCL/m27700-pure.cl index d9bf11510..42ee0974d 100644 --- a/OpenCL/m27700-pure.cl +++ b/OpenCL/m27700-pure.cl @@ -53,146 +53,146 @@ DECLSPEC uint4 hc_swap32_4 (uint4 v) #define STATE_CNT GET_STATE_CNT (SCRYPT_R) #define STATE_CNT4 (STATE_CNT / 4) -#define ADD_ROTATE_XOR(r,i1,i2,s) (r) ^= rotate ((i1) + (i2), (s)); - -#if defined IS_CUDA || defined IS_HIP - -#define SALSA20_2R() \ -{ \ - ADD_ROTATE_XOR (X1, X0, X3, 7); \ - ADD_ROTATE_XOR (X2, X1, X0, 9); \ - ADD_ROTATE_XOR (X3, X2, X1, 13); \ - ADD_ROTATE_XOR (X0, X3, X2, 18); \ - \ - X1 = make_uint4 (X1.w, X1.x, X1.y, X1.z); \ - X2 = make_uint4 (X2.z, X2.w, X2.x, X2.y); \ - X3 = make_uint4 (X3.y, X3.z, X3.w, X3.x); \ - \ - ADD_ROTATE_XOR (X3, X0, X1, 7); \ - ADD_ROTATE_XOR (X2, X3, X0, 9); \ - ADD_ROTATE_XOR (X1, X2, X3, 13); \ - ADD_ROTATE_XOR (X0, X1, X2, 18); \ - \ - X1 = make_uint4 (X1.y, X1.z, X1.w, X1.x); \ - X2 = make_uint4 (X2.z, X2.w, X2.x, X2.y); \ - X3 = make_uint4 (X3.w, X3.x, X3.y, X3.z); \ -} -#elif defined IS_METAL -#define SALSA20_2R() \ -{ \ - ADD_ROTATE_XOR (X1, X0, X3, 7); \ - ADD_ROTATE_XOR (X2, X1, X0, 9); \ - ADD_ROTATE_XOR (X3, X2, X1, 13); \ - ADD_ROTATE_XOR (X0, X3, X2, 18); \ - \ - X1 = X1.wxyz; \ - X2 = X2.zwxy; \ - X3 = X3.yzwx; \ - \ - ADD_ROTATE_XOR (X3, X0, X1, 7); \ - ADD_ROTATE_XOR (X2, X3, X0, 9); \ - ADD_ROTATE_XOR (X1, X2, X3, 13); \ - ADD_ROTATE_XOR (X0, X1, X2, 18); \ - \ - X1 = X1.yzwx; \ - X2 = X2.zwxy; \ - X3 = X3.wxyz; \ -} -#else -#define SALSA20_2R() \ -{ \ - ADD_ROTATE_XOR (X1, X0, X3, 7); \ - ADD_ROTATE_XOR (X2, X1, X0, 9); \ - ADD_ROTATE_XOR (X3, X2, X1, 13); \ - ADD_ROTATE_XOR (X0, X3, X2, 18); \ - \ - X1 = X1.s3012; \ - X2 = X2.s2301; \ - X3 = X3.s1230; \ - \ - ADD_ROTATE_XOR (X3, X0, X1, 7); \ - ADD_ROTATE_XOR (X2, X3, X0, 9); \ - ADD_ROTATE_XOR (X1, X2, X3, 13); \ - ADD_ROTATE_XOR (X0, X1, X2, 18); \ - \ - X1 = X1.s1230; \ - X2 = X2.s2301; \ - X3 = X3.s3012; \ -} -#endif - #define Coord(xd4,y,z) (((xd4) * ySIZE * zSIZE) + ((y) * zSIZE) + (z)) #define CO Coord(xd4,y,z) -DECLSPEC void salsa_r (PRIVATE_AS uint4 *TI) +DECLSPEC void salsa_r (PRIVATE_AS u32 *TI) { + u32 x[16]; + + for (int j = 0; j < 16; j++) x[j] = TI[STATE_CNT - 16 + j]; + + for (int i = 0; i < STATE_CNT; i += 16) + { + for (int j = 0; j < 16; j++) + { + x[j] ^= TI[i + j]; + } + + for (int j = 0; j < 16; j++) + { + TI[i + j] = x[j]; + } + + for (int r = 0; r < 4; r++) + { + u32 t0, t1, t2, t3; + + t0 = x[ 0] + x[12]; + t1 = x[ 1] + x[13]; + t2 = x[ 2] + x[14]; + t3 = x[ 3] + x[15]; + x[ 4] ^= hc_rotl32_S (t0, 7); + x[ 5] ^= hc_rotl32_S (t1, 7); + x[ 6] ^= hc_rotl32_S (t2, 7); + x[ 7] ^= hc_rotl32_S (t3, 7); + + t0 = x[ 4] + x[ 0]; + t1 = x[ 5] + x[ 1]; + t2 = x[ 6] + x[ 2]; + t3 = x[ 7] + x[ 3]; + x[ 8] ^= hc_rotl32_S (t0, 9); + x[ 9] ^= hc_rotl32_S (t1, 9); + x[10] ^= hc_rotl32_S (t2, 9); + x[11] ^= hc_rotl32_S (t3, 9); + + t0 = x[ 8] + x[ 4]; + t1 = x[ 9] + x[ 5]; + t2 = x[10] + x[ 6]; + t3 = x[11] + x[ 7]; + x[12] ^= hc_rotl32_S (t0, 13); + x[13] ^= hc_rotl32_S (t1, 13); + x[14] ^= hc_rotl32_S (t2, 13); + x[15] ^= hc_rotl32_S (t3, 13); + + t0 = x[12] + x[ 8]; + t1 = x[13] + x[ 9]; + t2 = x[14] + x[10]; + t3 = x[15] + x[11]; + x[ 0] ^= hc_rotl32_S (t0, 18); + x[ 1] ^= hc_rotl32_S (t1, 18); + x[ 2] ^= hc_rotl32_S (t2, 18); + x[ 3] ^= hc_rotl32_S (t3, 18); + + t0 = x[ 4]; x[ 4] = x[ 7]; x[ 7] = x[ 6]; x[ 6] = x[ 5]; x[ 5] = t0; + t0 = x[ 8]; x[ 8] = x[10]; x[10] = t0; + t0 = x[ 9]; x[ 9] = x[11]; x[11] = t0; + t0 = x[12]; x[12] = x[13]; x[13] = x[14]; x[14] = x[15]; x[15] = t0; + + t0 = x[ 0] + x[ 4]; + t1 = x[ 1] + x[ 5]; + t2 = x[ 2] + x[ 6]; + t3 = x[ 3] + x[ 7]; + x[12] ^= hc_rotl32_S (t0, 7); + x[13] ^= hc_rotl32_S (t1, 7); + x[14] ^= hc_rotl32_S (t2, 7); + x[15] ^= hc_rotl32_S (t3, 7); + + t0 = x[12] + x[ 0]; + t1 = x[13] + x[ 1]; + t2 = x[14] + x[ 2]; + t3 = x[15] + x[ 3]; + x[ 8] ^= hc_rotl32_S (t0, 9); + x[ 9] ^= hc_rotl32_S (t1, 9); + x[10] ^= hc_rotl32_S (t2, 9); + x[11] ^= hc_rotl32_S (t3, 9); + + t0 = x[ 8] + x[12]; + t1 = x[ 9] + x[13]; + t2 = x[10] + x[14]; + t3 = x[11] + x[15]; + x[ 4] ^= hc_rotl32_S (t0, 13); + x[ 5] ^= hc_rotl32_S (t1, 13); + x[ 6] ^= hc_rotl32_S (t2, 13); + x[ 7] ^= hc_rotl32_S (t3, 13); + + t0 = x[ 4] + x[ 8]; + t1 = x[ 5] + x[ 9]; + t2 = x[ 6] + x[10]; + t3 = x[ 7] + x[11]; + x[ 0] ^= hc_rotl32_S (t0, 18); + x[ 1] ^= hc_rotl32_S (t1, 18); + x[ 2] ^= hc_rotl32_S (t2, 18); + x[ 3] ^= hc_rotl32_S (t3, 18); + + t0 = x[ 4]; x[ 4] = x[ 5]; x[ 5] = x[ 6]; x[ 6] = x[ 7]; x[ 7] = t0; + t0 = x[ 8]; x[ 8] = x[10]; x[10] = t0; + t0 = x[ 9]; x[ 9] = x[11]; x[11] = t0; + t0 = x[15]; x[15] = x[14]; x[14] = x[13]; x[13] = x[12]; x[12] = t0; + } + + for (int j = 0; j < 16; j++) + { + x[j] += TI[i + j]; + } + + for (int j = 0; j < 16; j++) + { + TI[i + j] = x[j]; + } + } + #if SCRYPT_R > 1 - uint4 TT[STATE_CNT4 / 2]; + u32 TT[STATE_CNT / 2]; - for (int dst_off = 0, src_off = 4; src_off < STATE_CNT4; dst_off += 4, src_off += 8) + for (int dst_off = 0, src_off = 16; src_off < STATE_CNT; dst_off += 16, src_off += 32) { - TT[dst_off + 0] = TI[src_off + 0]; - TT[dst_off + 1] = TI[src_off + 1]; - TT[dst_off + 2] = TI[src_off + 2]; - TT[dst_off + 3] = TI[src_off + 3]; + for (int j = 0; j < 16; j++) TT[dst_off + j] = TI[src_off + j]; } - for (int dst_off = 4, src_off = 8; src_off < STATE_CNT4; dst_off += 4, src_off += 8) + for (int dst_off = 16, src_off = 32; src_off < STATE_CNT; dst_off += 16, src_off += 32) { - TI[dst_off + 0] = TI[src_off + 0]; - TI[dst_off + 1] = TI[src_off + 1]; - TI[dst_off + 2] = TI[src_off + 2]; - TI[dst_off + 3] = TI[src_off + 3]; + for (int j = 0; j < 16; j++) TI[dst_off + j] = TI[src_off + j]; } - for (int dst_off = STATE_CNT4 / 2, src_off = 0; dst_off < STATE_CNT4; dst_off += 4, src_off += 4) + for (int dst_off = STATE_CNT / 2, src_off = 0; dst_off < STATE_CNT; dst_off += 16, src_off += 16) { - TI[dst_off + 0] = TT[src_off + 0]; - TI[dst_off + 1] = TT[src_off + 1]; - TI[dst_off + 2] = TT[src_off + 2]; - TI[dst_off + 3] = TT[src_off + 3]; + for (int j = 0; j < 16; j++) TI[dst_off + j] = TT[src_off + j]; } #endif - uint4 R0 = TI[STATE_CNT4 - 4]; - uint4 R1 = TI[STATE_CNT4 - 3]; - uint4 R2 = TI[STATE_CNT4 - 2]; - uint4 R3 = TI[STATE_CNT4 - 1]; - - for (int i = 0; i < STATE_CNT4; i += 4) - { - uint4 Y0 = TI[i + 0]; - uint4 Y1 = TI[i + 1]; - uint4 Y2 = TI[i + 2]; - uint4 Y3 = TI[i + 3]; - - R0 = R0 ^ Y0; - R1 = R1 ^ Y1; - R2 = R2 ^ Y2; - R3 = R3 ^ Y3; - - uint4 X0 = R0; - uint4 X1 = R1; - uint4 X2 = R2; - uint4 X3 = R3; - - SALSA20_2R (); - SALSA20_2R (); - SALSA20_2R (); - SALSA20_2R (); - - R0 = R0 + X0; - R1 = R1 + X1; - R2 = R2 + X2; - R3 = R3 + X3; - - TI[i + 0] = R0; - TI[i + 1] = R1; - TI[i + 2] = R2; - TI[i + 3] = R3; - } } DECLSPEC void scrypt_smix_init (PRIVATE_AS uint4 *X, GLOBAL_AS uint4 *V0, GLOBAL_AS uint4 *V1, GLOBAL_AS uint4 *V2, GLOBAL_AS uint4 *V3, const u64 gid) @@ -215,35 +215,11 @@ DECLSPEC void scrypt_smix_init (PRIVATE_AS uint4 *X, GLOBAL_AS uint4 *V0, GLOBAL case 3: V = V3; break; } - #if SCRYPT_R > 1 - - uint4 TT[STATE_CNT4]; - - for (int z = 0; z < zSIZE; z++) TT[z] = X[z]; - - for (int dst_off = 8, src_off = 4; dst_off < zSIZE; dst_off += 8, src_off += 4) - { - X[dst_off + 0] = TT[src_off + 0]; - X[dst_off + 1] = TT[src_off + 1]; - X[dst_off + 2] = TT[src_off + 2]; - X[dst_off + 3] = TT[src_off + 3]; - } - - for (int dst_off = 4, src_off = zSIZE / 2; dst_off < zSIZE; dst_off += 8, src_off += 4) - { - X[dst_off + 0] = TT[src_off + 0]; - X[dst_off + 1] = TT[src_off + 1]; - X[dst_off + 2] = TT[src_off + 2]; - X[dst_off + 3] = TT[src_off + 3]; - } - - #endif - for (u32 y = 0; y < ySIZE; y++) { for (u32 z = 0; z < zSIZE; z++) V[CO] = X[z]; - for (u32 i = 0; i < SCRYPT_TMTO; i++) salsa_r (X); + for (u32 i = 0; i < SCRYPT_TMTO; i++) salsa_r ((u32 *) X); } } @@ -281,11 +257,11 @@ DECLSPEC void scrypt_smix_loop (PRIVATE_AS uint4 *X, GLOBAL_AS uint4 *V0, GLOBAL for (u32 z = 0; z < zSIZE; z++) T[z] = V[CO]; - for (u32 i = 0; i < km; i++) salsa_r (T); + for (u32 i = 0; i < km; i++) salsa_r ((u32 *) T); for (u32 z = 0; z < zSIZE; z++) X[z] ^= T[z]; - salsa_r (X); + salsa_r ((u32 *) X); } } @@ -573,14 +549,10 @@ KERNEL_FQ void m27700_comp (KERN_ATTR_TMPS (scrypt_tmp_t)) u32 w2[4]; u32 w3[4]; - for (u32 i = 0; i < SCRYPT_CNT4; i += STATE_CNT4) + for (u32 l = 0; l < SCRYPT_CNT4; l += 4) { - for (u32 j = 0; j < (STATE_CNT4 * 2); j += 8) - { uint4 X[4]; - const u32 l = i + j + ((j >= STATE_CNT4) ? (4 - STATE_CNT4) : 0); - X[0] = tmps[gid].P[l + 0]; X[1] = tmps[gid].P[l + 1]; X[2] = tmps[gid].P[l + 2]; @@ -628,7 +600,6 @@ KERNEL_FQ void m27700_comp (KERN_ATTR_TMPS (scrypt_tmp_t)) w3[3] = T[3].w; sha256_hmac_update_64 (&ctx, w0, w1, w2, w3, 64); - } } w0[0] = 1; diff --git a/OpenCL/m28200-pure.cl b/OpenCL/m28200-pure.cl index 58106a007..49774658c 100644 --- a/OpenCL/m28200-pure.cl +++ b/OpenCL/m28200-pure.cl @@ -65,149 +65,148 @@ DECLSPEC uint4 hc_swap32_4 (uint4 v) #define STATE_CNT GET_STATE_CNT (SCRYPT_R) #define STATE_CNT4 (STATE_CNT / 4) -#define ADD_ROTATE_XOR(r,i1,i2,s) (r) ^= rotate ((i1) + (i2), (s)); - -#if defined IS_CUDA || defined IS_HIP - -#define SALSA20_2R() \ -{ \ - ADD_ROTATE_XOR (X1, X0, X3, 7); \ - ADD_ROTATE_XOR (X2, X1, X0, 9); \ - ADD_ROTATE_XOR (X3, X2, X1, 13); \ - ADD_ROTATE_XOR (X0, X3, X2, 18); \ - \ - X1 = make_uint4 (X1.w, X1.x, X1.y, X1.z); \ - X2 = make_uint4 (X2.z, X2.w, X2.x, X2.y); \ - X3 = make_uint4 (X3.y, X3.z, X3.w, X3.x); \ - \ - ADD_ROTATE_XOR (X3, X0, X1, 7); \ - ADD_ROTATE_XOR (X2, X3, X0, 9); \ - ADD_ROTATE_XOR (X1, X2, X3, 13); \ - ADD_ROTATE_XOR (X0, X1, X2, 18); \ - \ - X1 = make_uint4 (X1.y, X1.z, X1.w, X1.x); \ - X2 = make_uint4 (X2.z, X2.w, X2.x, X2.y); \ - X3 = make_uint4 (X3.w, X3.x, X3.y, X3.z); \ -} -#elif defined IS_METAL -#define SALSA20_2R() \ -{ \ - ADD_ROTATE_XOR (X1, X0, X3, 7); \ - ADD_ROTATE_XOR (X2, X1, X0, 9); \ - ADD_ROTATE_XOR (X3, X2, X1, 13); \ - ADD_ROTATE_XOR (X0, X3, X2, 18); \ - \ - X1 = X1.wxyz; \ - X2 = X2.zwxy; \ - X3 = X3.yzwx; \ - \ - ADD_ROTATE_XOR (X3, X0, X1, 7); \ - ADD_ROTATE_XOR (X2, X3, X0, 9); \ - ADD_ROTATE_XOR (X1, X2, X3, 13); \ - ADD_ROTATE_XOR (X0, X1, X2, 18); \ - \ - X1 = X1.yzwx; \ - X2 = X2.zwxy; \ - X3 = X3.wxyz; \ -} -#else -#define SALSA20_2R() \ -{ \ - ADD_ROTATE_XOR (X1, X0, X3, 7); \ - ADD_ROTATE_XOR (X2, X1, X0, 9); \ - ADD_ROTATE_XOR (X3, X2, X1, 13); \ - ADD_ROTATE_XOR (X0, X3, X2, 18); \ - \ - X1 = X1.s3012; \ - X2 = X2.s2301; \ - X3 = X3.s1230; \ - \ - ADD_ROTATE_XOR (X3, X0, X1, 7); \ - ADD_ROTATE_XOR (X2, X3, X0, 9); \ - ADD_ROTATE_XOR (X1, X2, X3, 13); \ - ADD_ROTATE_XOR (X0, X1, X2, 18); \ - \ - X1 = X1.s1230; \ - X2 = X2.s2301; \ - X3 = X3.s3012; \ -} -#endif - #define Coord(xd4,y,z) (((xd4) * ySIZE * zSIZE) + ((y) * zSIZE) + (z)) #define CO Coord(xd4,y,z) -DECLSPEC void salsa_r (PRIVATE_AS uint4 *TI) +DECLSPEC void salsa_r (PRIVATE_AS u32 *TI) { + u32 x[16]; + + for (int j = 0; j < 16; j++) x[j] = TI[STATE_CNT - 16 + j]; + + for (int i = 0; i < STATE_CNT; i += 16) + { + for (int j = 0; j < 16; j++) + { + x[j] ^= TI[i + j]; + } + + for (int j = 0; j < 16; j++) + { + TI[i + j] = x[j]; + } + + for (int r = 0; r < 4; r++) + { + u32 t0, t1, t2, t3; + + t0 = x[ 0] + x[12]; + t1 = x[ 1] + x[13]; + t2 = x[ 2] + x[14]; + t3 = x[ 3] + x[15]; + x[ 4] ^= hc_rotl32_S (t0, 7); + x[ 5] ^= hc_rotl32_S (t1, 7); + x[ 6] ^= hc_rotl32_S (t2, 7); + x[ 7] ^= hc_rotl32_S (t3, 7); + + t0 = x[ 4] + x[ 0]; + t1 = x[ 5] + x[ 1]; + t2 = x[ 6] + x[ 2]; + t3 = x[ 7] + x[ 3]; + x[ 8] ^= hc_rotl32_S (t0, 9); + x[ 9] ^= hc_rotl32_S (t1, 9); + x[10] ^= hc_rotl32_S (t2, 9); + x[11] ^= hc_rotl32_S (t3, 9); + + t0 = x[ 8] + x[ 4]; + t1 = x[ 9] + x[ 5]; + t2 = x[10] + x[ 6]; + t3 = x[11] + x[ 7]; + x[12] ^= hc_rotl32_S (t0, 13); + x[13] ^= hc_rotl32_S (t1, 13); + x[14] ^= hc_rotl32_S (t2, 13); + x[15] ^= hc_rotl32_S (t3, 13); + + t0 = x[12] + x[ 8]; + t1 = x[13] + x[ 9]; + t2 = x[14] + x[10]; + t3 = x[15] + x[11]; + x[ 0] ^= hc_rotl32_S (t0, 18); + x[ 1] ^= hc_rotl32_S (t1, 18); + x[ 2] ^= hc_rotl32_S (t2, 18); + x[ 3] ^= hc_rotl32_S (t3, 18); + + t0 = x[ 4]; x[ 4] = x[ 7]; x[ 7] = x[ 6]; x[ 6] = x[ 5]; x[ 5] = t0; + t0 = x[ 8]; x[ 8] = x[10]; x[10] = t0; + t0 = x[ 9]; x[ 9] = x[11]; x[11] = t0; + t0 = x[12]; x[12] = x[13]; x[13] = x[14]; x[14] = x[15]; x[15] = t0; + + t0 = x[ 0] + x[ 4]; + t1 = x[ 1] + x[ 5]; + t2 = x[ 2] + x[ 6]; + t3 = x[ 3] + x[ 7]; + x[12] ^= hc_rotl32_S (t0, 7); + x[13] ^= hc_rotl32_S (t1, 7); + x[14] ^= hc_rotl32_S (t2, 7); + x[15] ^= hc_rotl32_S (t3, 7); + + t0 = x[12] + x[ 0]; + t1 = x[13] + x[ 1]; + t2 = x[14] + x[ 2]; + t3 = x[15] + x[ 3]; + x[ 8] ^= hc_rotl32_S (t0, 9); + x[ 9] ^= hc_rotl32_S (t1, 9); + x[10] ^= hc_rotl32_S (t2, 9); + x[11] ^= hc_rotl32_S (t3, 9); + + t0 = x[ 8] + x[12]; + t1 = x[ 9] + x[13]; + t2 = x[10] + x[14]; + t3 = x[11] + x[15]; + x[ 4] ^= hc_rotl32_S (t0, 13); + x[ 5] ^= hc_rotl32_S (t1, 13); + x[ 6] ^= hc_rotl32_S (t2, 13); + x[ 7] ^= hc_rotl32_S (t3, 13); + + t0 = x[ 4] + x[ 8]; + t1 = x[ 5] + x[ 9]; + t2 = x[ 6] + x[10]; + t3 = x[ 7] + x[11]; + x[ 0] ^= hc_rotl32_S (t0, 18); + x[ 1] ^= hc_rotl32_S (t1, 18); + x[ 2] ^= hc_rotl32_S (t2, 18); + x[ 3] ^= hc_rotl32_S (t3, 18); + + t0 = x[ 4]; x[ 4] = x[ 5]; x[ 5] = x[ 6]; x[ 6] = x[ 7]; x[ 7] = t0; + t0 = x[ 8]; x[ 8] = x[10]; x[10] = t0; + t0 = x[ 9]; x[ 9] = x[11]; x[11] = t0; + t0 = x[15]; x[15] = x[14]; x[14] = x[13]; x[13] = x[12]; x[12] = t0; + } + + for (int j = 0; j < 16; j++) + { + x[j] += TI[i + j]; + } + + for (int j = 0; j < 16; j++) + { + TI[i + j] = x[j]; + } + } + #if SCRYPT_R > 1 - uint4 TT[STATE_CNT4 / 2]; + u32 TT[STATE_CNT / 2]; - for (int dst_off = 0, src_off = 4; src_off < STATE_CNT4; dst_off += 4, src_off += 8) + for (int dst_off = 0, src_off = 16; src_off < STATE_CNT; dst_off += 16, src_off += 32) { - TT[dst_off + 0] = TI[src_off + 0]; - TT[dst_off + 1] = TI[src_off + 1]; - TT[dst_off + 2] = TI[src_off + 2]; - TT[dst_off + 3] = TI[src_off + 3]; + for (int j = 0; j < 16; j++) TT[dst_off + j] = TI[src_off + j]; } - for (int dst_off = 4, src_off = 8; src_off < STATE_CNT4; dst_off += 4, src_off += 8) + for (int dst_off = 16, src_off = 32; src_off < STATE_CNT; dst_off += 16, src_off += 32) { - TI[dst_off + 0] = TI[src_off + 0]; - TI[dst_off + 1] = TI[src_off + 1]; - TI[dst_off + 2] = TI[src_off + 2]; - TI[dst_off + 3] = TI[src_off + 3]; + for (int j = 0; j < 16; j++) TI[dst_off + j] = TI[src_off + j]; } - for (int dst_off = STATE_CNT4 / 2, src_off = 0; dst_off < STATE_CNT4; dst_off += 4, src_off += 4) + for (int dst_off = STATE_CNT / 2, src_off = 0; dst_off < STATE_CNT; dst_off += 16, src_off += 16) { - TI[dst_off + 0] = TT[src_off + 0]; - TI[dst_off + 1] = TT[src_off + 1]; - TI[dst_off + 2] = TT[src_off + 2]; - TI[dst_off + 3] = TT[src_off + 3]; + for (int j = 0; j < 16; j++) TI[dst_off + j] = TT[src_off + j]; } #endif - uint4 R0 = TI[STATE_CNT4 - 4]; - uint4 R1 = TI[STATE_CNT4 - 3]; - uint4 R2 = TI[STATE_CNT4 - 2]; - uint4 R3 = TI[STATE_CNT4 - 1]; - - for (int i = 0; i < STATE_CNT4; i += 4) - { - uint4 Y0 = TI[i + 0]; - uint4 Y1 = TI[i + 1]; - uint4 Y2 = TI[i + 2]; - uint4 Y3 = TI[i + 3]; - - R0 = R0 ^ Y0; - R1 = R1 ^ Y1; - R2 = R2 ^ Y2; - R3 = R3 ^ Y3; - - uint4 X0 = R0; - uint4 X1 = R1; - uint4 X2 = R2; - uint4 X3 = R3; - - SALSA20_2R (); - SALSA20_2R (); - SALSA20_2R (); - SALSA20_2R (); - - R0 = R0 + X0; - R1 = R1 + X1; - R2 = R2 + X2; - R3 = R3 + X3; - - TI[i + 0] = R0; - TI[i + 1] = R1; - TI[i + 2] = R2; - TI[i + 3] = R3; - } } - DECLSPEC void scrypt_smix_init (PRIVATE_AS uint4 *X, GLOBAL_AS uint4 *V0, GLOBAL_AS uint4 *V1, GLOBAL_AS uint4 *V2, GLOBAL_AS uint4 *V3, const u64 gid) { const u32 ySIZE = SCRYPT_N / SCRYPT_TMTO; @@ -228,35 +227,11 @@ DECLSPEC void scrypt_smix_init (PRIVATE_AS uint4 *X, GLOBAL_AS uint4 *V0, GLOBAL case 3: V = V3; break; } - #if SCRYPT_R > 1 - - uint4 TT[STATE_CNT4]; - - for (int z = 0; z < zSIZE; z++) TT[z] = X[z]; - - for (int dst_off = 8, src_off = 4; dst_off < zSIZE; dst_off += 8, src_off += 4) - { - X[dst_off + 0] = TT[src_off + 0]; - X[dst_off + 1] = TT[src_off + 1]; - X[dst_off + 2] = TT[src_off + 2]; - X[dst_off + 3] = TT[src_off + 3]; - } - - for (int dst_off = 4, src_off = zSIZE / 2; dst_off < zSIZE; dst_off += 8, src_off += 4) - { - X[dst_off + 0] = TT[src_off + 0]; - X[dst_off + 1] = TT[src_off + 1]; - X[dst_off + 2] = TT[src_off + 2]; - X[dst_off + 3] = TT[src_off + 3]; - } - - #endif - for (u32 y = 0; y < ySIZE; y++) { for (u32 z = 0; z < zSIZE; z++) V[CO] = X[z]; - for (u32 i = 0; i < SCRYPT_TMTO; i++) salsa_r (X); + for (u32 i = 0; i < SCRYPT_TMTO; i++) salsa_r ((u32 *) X); } } @@ -294,11 +269,11 @@ DECLSPEC void scrypt_smix_loop (PRIVATE_AS uint4 *X, GLOBAL_AS uint4 *V0, GLOBAL for (u32 z = 0; z < zSIZE; z++) T[z] = V[CO]; - for (u32 i = 0; i < km; i++) salsa_r (T); + for (u32 i = 0; i < km; i++) salsa_r ((u32 *) T); for (u32 z = 0; z < zSIZE; z++) X[z] ^= T[z]; - salsa_r (X); + salsa_r ((u32 *) X); } } @@ -542,14 +517,10 @@ KERNEL_FQ void m28200_comp (KERN_ATTR_TMPS_ESALT (exodus_tmp_t, exodus_t)) sha256_hmac_init_global_swap (&ctx, pws[gid].i, pws[gid].pw_len); - for (u32 i = 0; i < SCRYPT_CNT4; i += STATE_CNT4) + for (u32 l = 0; l < SCRYPT_CNT4; l += 4) { - for (u32 j = 0; j < (STATE_CNT4 * 2); j += 8) - { uint4 X[4]; - const u32 l = i + j + ((j >= STATE_CNT4) ? (4 - STATE_CNT4) : 0); - X[0] = tmps[gid].P[l + 0]; X[1] = tmps[gid].P[l + 1]; X[2] = tmps[gid].P[l + 2]; @@ -597,7 +568,6 @@ KERNEL_FQ void m28200_comp (KERN_ATTR_TMPS_ESALT (exodus_tmp_t, exodus_t)) w3[3] = T[3].w; sha256_hmac_update_64 (&ctx, w0, w1, w2, w3, 64); - } } w0[0] = 1; diff --git a/OpenCL/m29800-pure.cl b/OpenCL/m29800-pure.cl index c72f5c267..114c97a82 100644 --- a/OpenCL/m29800-pure.cl +++ b/OpenCL/m29800-pure.cl @@ -53,146 +53,146 @@ DECLSPEC uint4 hc_swap32_4 (uint4 v) #define STATE_CNT GET_STATE_CNT (SCRYPT_R) #define STATE_CNT4 (STATE_CNT / 4) -#define ADD_ROTATE_XOR(r,i1,i2,s) (r) ^= rotate ((i1) + (i2), (s)); - -#if defined IS_CUDA || defined IS_HIP - -#define SALSA20_2R() \ -{ \ - ADD_ROTATE_XOR (X1, X0, X3, 7); \ - ADD_ROTATE_XOR (X2, X1, X0, 9); \ - ADD_ROTATE_XOR (X3, X2, X1, 13); \ - ADD_ROTATE_XOR (X0, X3, X2, 18); \ - \ - X1 = make_uint4 (X1.w, X1.x, X1.y, X1.z); \ - X2 = make_uint4 (X2.z, X2.w, X2.x, X2.y); \ - X3 = make_uint4 (X3.y, X3.z, X3.w, X3.x); \ - \ - ADD_ROTATE_XOR (X3, X0, X1, 7); \ - ADD_ROTATE_XOR (X2, X3, X0, 9); \ - ADD_ROTATE_XOR (X1, X2, X3, 13); \ - ADD_ROTATE_XOR (X0, X1, X2, 18); \ - \ - X1 = make_uint4 (X1.y, X1.z, X1.w, X1.x); \ - X2 = make_uint4 (X2.z, X2.w, X2.x, X2.y); \ - X3 = make_uint4 (X3.w, X3.x, X3.y, X3.z); \ -} -#elif defined IS_METAL -#define SALSA20_2R() \ -{ \ - ADD_ROTATE_XOR (X1, X0, X3, 7); \ - ADD_ROTATE_XOR (X2, X1, X0, 9); \ - ADD_ROTATE_XOR (X3, X2, X1, 13); \ - ADD_ROTATE_XOR (X0, X3, X2, 18); \ - \ - X1 = X1.wxyz; \ - X2 = X2.zwxy; \ - X3 = X3.yzwx; \ - \ - ADD_ROTATE_XOR (X3, X0, X1, 7); \ - ADD_ROTATE_XOR (X2, X3, X0, 9); \ - ADD_ROTATE_XOR (X1, X2, X3, 13); \ - ADD_ROTATE_XOR (X0, X1, X2, 18); \ - \ - X1 = X1.yzwx; \ - X2 = X2.zwxy; \ - X3 = X3.wxyz; \ -} -#else -#define SALSA20_2R() \ -{ \ - ADD_ROTATE_XOR (X1, X0, X3, 7); \ - ADD_ROTATE_XOR (X2, X1, X0, 9); \ - ADD_ROTATE_XOR (X3, X2, X1, 13); \ - ADD_ROTATE_XOR (X0, X3, X2, 18); \ - \ - X1 = X1.s3012; \ - X2 = X2.s2301; \ - X3 = X3.s1230; \ - \ - ADD_ROTATE_XOR (X3, X0, X1, 7); \ - ADD_ROTATE_XOR (X2, X3, X0, 9); \ - ADD_ROTATE_XOR (X1, X2, X3, 13); \ - ADD_ROTATE_XOR (X0, X1, X2, 18); \ - \ - X1 = X1.s1230; \ - X2 = X2.s2301; \ - X3 = X3.s3012; \ -} -#endif - #define Coord(xd4,y,z) (((xd4) * ySIZE * zSIZE) + ((y) * zSIZE) + (z)) #define CO Coord(xd4,y,z) -DECLSPEC void salsa_r (PRIVATE_AS uint4 *TI) +DECLSPEC void salsa_r (PRIVATE_AS u32 *TI) { - uint4 R0 = TI[STATE_CNT4 - 4]; - uint4 R1 = TI[STATE_CNT4 - 3]; - uint4 R2 = TI[STATE_CNT4 - 2]; - uint4 R3 = TI[STATE_CNT4 - 1]; + u32 x[16]; - for (int i = 0; i < STATE_CNT4; i += 4) + for (int j = 0; j < 16; j++) x[j] = TI[STATE_CNT - 16 + j]; + + for (int i = 0; i < STATE_CNT; i += 16) { - uint4 Y0 = TI[i + 0]; - uint4 Y1 = TI[i + 1]; - uint4 Y2 = TI[i + 2]; - uint4 Y3 = TI[i + 3]; + for (int j = 0; j < 16; j++) + { + x[j] ^= TI[i + j]; + } - R0 = R0 ^ Y0; - R1 = R1 ^ Y1; - R2 = R2 ^ Y2; - R3 = R3 ^ Y3; + for (int j = 0; j < 16; j++) + { + TI[i + j] = x[j]; + } - uint4 X0 = R0; - uint4 X1 = R1; - uint4 X2 = R2; - uint4 X3 = R3; + for (int r = 0; r < 4; r++) + { + u32 t0, t1, t2, t3; - SALSA20_2R (); - SALSA20_2R (); - SALSA20_2R (); - SALSA20_2R (); + t0 = x[ 0] + x[12]; + t1 = x[ 1] + x[13]; + t2 = x[ 2] + x[14]; + t3 = x[ 3] + x[15]; + x[ 4] ^= hc_rotl32_S (t0, 7); + x[ 5] ^= hc_rotl32_S (t1, 7); + x[ 6] ^= hc_rotl32_S (t2, 7); + x[ 7] ^= hc_rotl32_S (t3, 7); - R0 = R0 + X0; - R1 = R1 + X1; - R2 = R2 + X2; - R3 = R3 + X3; + t0 = x[ 4] + x[ 0]; + t1 = x[ 5] + x[ 1]; + t2 = x[ 6] + x[ 2]; + t3 = x[ 7] + x[ 3]; + x[ 8] ^= hc_rotl32_S (t0, 9); + x[ 9] ^= hc_rotl32_S (t1, 9); + x[10] ^= hc_rotl32_S (t2, 9); + x[11] ^= hc_rotl32_S (t3, 9); - TI[i + 0] = R0; - TI[i + 1] = R1; - TI[i + 2] = R2; - TI[i + 3] = R3; + t0 = x[ 8] + x[ 4]; + t1 = x[ 9] + x[ 5]; + t2 = x[10] + x[ 6]; + t3 = x[11] + x[ 7]; + x[12] ^= hc_rotl32_S (t0, 13); + x[13] ^= hc_rotl32_S (t1, 13); + x[14] ^= hc_rotl32_S (t2, 13); + x[15] ^= hc_rotl32_S (t3, 13); + + t0 = x[12] + x[ 8]; + t1 = x[13] + x[ 9]; + t2 = x[14] + x[10]; + t3 = x[15] + x[11]; + x[ 0] ^= hc_rotl32_S (t0, 18); + x[ 1] ^= hc_rotl32_S (t1, 18); + x[ 2] ^= hc_rotl32_S (t2, 18); + x[ 3] ^= hc_rotl32_S (t3, 18); + + t0 = x[ 4]; x[ 4] = x[ 7]; x[ 7] = x[ 6]; x[ 6] = x[ 5]; x[ 5] = t0; + t0 = x[ 8]; x[ 8] = x[10]; x[10] = t0; + t0 = x[ 9]; x[ 9] = x[11]; x[11] = t0; + t0 = x[12]; x[12] = x[13]; x[13] = x[14]; x[14] = x[15]; x[15] = t0; + + t0 = x[ 0] + x[ 4]; + t1 = x[ 1] + x[ 5]; + t2 = x[ 2] + x[ 6]; + t3 = x[ 3] + x[ 7]; + x[12] ^= hc_rotl32_S (t0, 7); + x[13] ^= hc_rotl32_S (t1, 7); + x[14] ^= hc_rotl32_S (t2, 7); + x[15] ^= hc_rotl32_S (t3, 7); + + t0 = x[12] + x[ 0]; + t1 = x[13] + x[ 1]; + t2 = x[14] + x[ 2]; + t3 = x[15] + x[ 3]; + x[ 8] ^= hc_rotl32_S (t0, 9); + x[ 9] ^= hc_rotl32_S (t1, 9); + x[10] ^= hc_rotl32_S (t2, 9); + x[11] ^= hc_rotl32_S (t3, 9); + + t0 = x[ 8] + x[12]; + t1 = x[ 9] + x[13]; + t2 = x[10] + x[14]; + t3 = x[11] + x[15]; + x[ 4] ^= hc_rotl32_S (t0, 13); + x[ 5] ^= hc_rotl32_S (t1, 13); + x[ 6] ^= hc_rotl32_S (t2, 13); + x[ 7] ^= hc_rotl32_S (t3, 13); + + t0 = x[ 4] + x[ 8]; + t1 = x[ 5] + x[ 9]; + t2 = x[ 6] + x[10]; + t3 = x[ 7] + x[11]; + x[ 0] ^= hc_rotl32_S (t0, 18); + x[ 1] ^= hc_rotl32_S (t1, 18); + x[ 2] ^= hc_rotl32_S (t2, 18); + x[ 3] ^= hc_rotl32_S (t3, 18); + + t0 = x[ 4]; x[ 4] = x[ 5]; x[ 5] = x[ 6]; x[ 6] = x[ 7]; x[ 7] = t0; + t0 = x[ 8]; x[ 8] = x[10]; x[10] = t0; + t0 = x[ 9]; x[ 9] = x[11]; x[11] = t0; + t0 = x[15]; x[15] = x[14]; x[14] = x[13]; x[13] = x[12]; x[12] = t0; + } + + for (int j = 0; j < 16; j++) + { + x[j] += TI[i + j]; + } + + for (int j = 0; j < 16; j++) + { + TI[i + j] = x[j]; + } } #if SCRYPT_R > 1 - uint4 TT[STATE_CNT4 / 2]; + u32 TT[STATE_CNT / 2]; - for (int dst_off = 0, src_off = 4; src_off < STATE_CNT4; dst_off += 4, src_off += 8) + for (int dst_off = 0, src_off = 16; src_off < STATE_CNT; dst_off += 16, src_off += 32) { - TT[dst_off + 0] = TI[src_off + 0]; - TT[dst_off + 1] = TI[src_off + 1]; - TT[dst_off + 2] = TI[src_off + 2]; - TT[dst_off + 3] = TI[src_off + 3]; + for (int j = 0; j < 16; j++) TT[dst_off + j] = TI[src_off + j]; } - for (int dst_off = 4, src_off = 8; src_off < STATE_CNT4; dst_off += 4, src_off += 8) + for (int dst_off = 16, src_off = 32; src_off < STATE_CNT; dst_off += 16, src_off += 32) { - TI[dst_off + 0] = TI[src_off + 0]; - TI[dst_off + 1] = TI[src_off + 1]; - TI[dst_off + 2] = TI[src_off + 2]; - TI[dst_off + 3] = TI[src_off + 3]; + for (int j = 0; j < 16; j++) TI[dst_off + j] = TI[src_off + j]; } - for (int dst_off = STATE_CNT4 / 2, src_off = 0; dst_off < STATE_CNT4; dst_off += 4, src_off += 4) + for (int dst_off = STATE_CNT / 2, src_off = 0; dst_off < STATE_CNT; dst_off += 16, src_off += 16) { - TI[dst_off + 0] = TT[src_off + 0]; - TI[dst_off + 1] = TT[src_off + 1]; - TI[dst_off + 2] = TT[src_off + 2]; - TI[dst_off + 3] = TT[src_off + 3]; + for (int j = 0; j < 16; j++) TI[dst_off + j] = TT[src_off + j]; } #endif + } DECLSPEC void scrypt_smix_init (PRIVATE_AS uint4 *X, GLOBAL_AS uint4 *V0, GLOBAL_AS uint4 *V1, GLOBAL_AS uint4 *V2, GLOBAL_AS uint4 *V3, const u64 gid) @@ -219,7 +219,7 @@ DECLSPEC void scrypt_smix_init (PRIVATE_AS uint4 *X, GLOBAL_AS uint4 *V0, GLOBAL { for (u32 z = 0; z < zSIZE; z++) V[CO] = X[z]; - for (u32 i = 0; i < SCRYPT_TMTO; i++) salsa_r (X); + for (u32 i = 0; i < SCRYPT_TMTO; i++) salsa_r ((u32 *) X); } } @@ -257,11 +257,11 @@ DECLSPEC void scrypt_smix_loop (PRIVATE_AS uint4 *X, GLOBAL_AS uint4 *V0, GLOBAL for (u32 z = 0; z < zSIZE; z++) T[z] = V[CO]; - for (u32 i = 0; i < km; i++) salsa_r (T); + for (u32 i = 0; i < km; i++) salsa_r ((u32 *) T); for (u32 z = 0; z < zSIZE; z++) X[z] ^= T[z]; - salsa_r (X); + salsa_r ((u32 *) X); } }