mirror of
https://github.com/hashcat/hashcat.git
synced 2025-02-20 11:32:12 +00:00
Removed unnecessary swaps in SCRYPT based algorithms
This commit is contained in:
parent
81a76e363b
commit
1dac869cb7
@ -170,22 +170,18 @@ DECLSPEC void salsa_r (uint4 *TI)
|
|||||||
TT[idx_r2++] = R3;
|
TT[idx_r2++] = R3;
|
||||||
}
|
}
|
||||||
|
|
||||||
idx_r1 = 0;
|
idx_r2 = 0;
|
||||||
idx_r2 = SCRYPT_R * 4;
|
|
||||||
|
|
||||||
#ifdef _unroll
|
|
||||||
#pragma unroll
|
|
||||||
#endif
|
|
||||||
for (int i = 0; i < SCRYPT_R; i++)
|
for (int i = 0; i < SCRYPT_R; i++)
|
||||||
{
|
{
|
||||||
TI[idx_r2++] = TT[idx_r1++];
|
TI[idx_r1++] = TT[idx_r2++];
|
||||||
TI[idx_r2++] = TT[idx_r1++];
|
TI[idx_r1++] = TT[idx_r2++];
|
||||||
TI[idx_r2++] = TT[idx_r1++];
|
TI[idx_r1++] = TT[idx_r2++];
|
||||||
TI[idx_r2++] = TT[idx_r1++];
|
TI[idx_r1++] = TT[idx_r2++];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
DECLSPEC void scrypt_smix_init (uint4 *X, uint4 *T, GLOBAL_AS uint4 *V0, GLOBAL_AS uint4 *V1, GLOBAL_AS uint4 *V2, GLOBAL_AS uint4 *V3)
|
DECLSPEC void scrypt_smix_init (uint4 *X, GLOBAL_AS uint4 *V0, GLOBAL_AS uint4 *V1, GLOBAL_AS uint4 *V2, GLOBAL_AS uint4 *V3)
|
||||||
{
|
{
|
||||||
#define Coord(xd4,y,z) (((xd4) * ySIZE * zSIZE) + ((y) * zSIZE) + (z))
|
#define Coord(xd4,y,z) (((xd4) * ySIZE * zSIZE) + ((y) * zSIZE) + (z))
|
||||||
#define CO Coord(xd4,y,z)
|
#define CO Coord(xd4,y,z)
|
||||||
@ -208,55 +204,15 @@ DECLSPEC void scrypt_smix_init (uint4 *X, uint4 *T, GLOBAL_AS uint4 *V0, GLOBAL_
|
|||||||
case 3: V = V3; break;
|
case 3: V = V3; break;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (u32 i = 0; i < STATE_CNT4; i += 4)
|
|
||||||
{
|
|
||||||
#ifdef IS_CUDA
|
|
||||||
T[0] = make_uint4 (X[i + 0].x, X[i + 1].y, X[i + 2].z, X[i + 3].w);
|
|
||||||
T[1] = make_uint4 (X[i + 1].x, X[i + 2].y, X[i + 3].z, X[i + 0].w);
|
|
||||||
T[2] = make_uint4 (X[i + 2].x, X[i + 3].y, X[i + 0].z, X[i + 1].w);
|
|
||||||
T[3] = make_uint4 (X[i + 3].x, X[i + 0].y, X[i + 1].z, X[i + 2].w);
|
|
||||||
#else
|
|
||||||
T[0] = (uint4) (X[i + 0].x, X[i + 1].y, X[i + 2].z, X[i + 3].w);
|
|
||||||
T[1] = (uint4) (X[i + 1].x, X[i + 2].y, X[i + 3].z, X[i + 0].w);
|
|
||||||
T[2] = (uint4) (X[i + 2].x, X[i + 3].y, X[i + 0].z, X[i + 1].w);
|
|
||||||
T[3] = (uint4) (X[i + 3].x, X[i + 0].y, X[i + 1].z, X[i + 2].w);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
X[i + 0] = T[0];
|
|
||||||
X[i + 1] = T[1];
|
|
||||||
X[i + 2] = T[2];
|
|
||||||
X[i + 3] = T[3];
|
|
||||||
}
|
|
||||||
|
|
||||||
for (u32 y = 0; y < ySIZE; y++)
|
for (u32 y = 0; y < ySIZE; y++)
|
||||||
{
|
{
|
||||||
for (u32 z = 0; z < zSIZE; z++) V[CO] = X[z];
|
for (u32 z = 0; z < zSIZE; z++) V[CO] = X[z];
|
||||||
|
|
||||||
for (u32 i = 0; i < SCRYPT_TMTO; i++) salsa_r (X);
|
for (u32 i = 0; i < SCRYPT_TMTO; i++) salsa_r (X);
|
||||||
}
|
}
|
||||||
|
|
||||||
for (u32 i = 0; i < STATE_CNT4; i += 4)
|
|
||||||
{
|
|
||||||
#ifdef IS_CUDA
|
|
||||||
T[0] = make_uint4 (X[i + 0].x, X[i + 3].y, X[i + 2].z, X[i + 1].w);
|
|
||||||
T[1] = make_uint4 (X[i + 1].x, X[i + 0].y, X[i + 3].z, X[i + 2].w);
|
|
||||||
T[2] = make_uint4 (X[i + 2].x, X[i + 1].y, X[i + 0].z, X[i + 3].w);
|
|
||||||
T[3] = make_uint4 (X[i + 3].x, X[i + 2].y, X[i + 1].z, X[i + 0].w);
|
|
||||||
#else
|
|
||||||
T[0] = (uint4) (X[i + 0].x, X[i + 3].y, X[i + 2].z, X[i + 1].w);
|
|
||||||
T[1] = (uint4) (X[i + 1].x, X[i + 0].y, X[i + 3].z, X[i + 2].w);
|
|
||||||
T[2] = (uint4) (X[i + 2].x, X[i + 1].y, X[i + 0].z, X[i + 3].w);
|
|
||||||
T[3] = (uint4) (X[i + 3].x, X[i + 2].y, X[i + 1].z, X[i + 0].w);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
X[i + 0] = T[0];
|
|
||||||
X[i + 1] = T[1];
|
|
||||||
X[i + 2] = T[2];
|
|
||||||
X[i + 3] = T[3];
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
DECLSPEC void scrypt_smix_loop (uint4 *X, uint4 *T, GLOBAL_AS uint4 *V0, GLOBAL_AS uint4 *V1, GLOBAL_AS uint4 *V2, GLOBAL_AS uint4 *V3)
|
DECLSPEC void scrypt_smix_loop (uint4 *X, GLOBAL_AS uint4 *V0, GLOBAL_AS uint4 *V1, GLOBAL_AS uint4 *V2, GLOBAL_AS uint4 *V3)
|
||||||
{
|
{
|
||||||
#define Coord(xd4,y,z) (((xd4) * ySIZE * zSIZE) + ((y) * zSIZE) + (z))
|
#define Coord(xd4,y,z) (((xd4) * ySIZE * zSIZE) + ((y) * zSIZE) + (z))
|
||||||
#define CO Coord(xd4,y,z)
|
#define CO Coord(xd4,y,z)
|
||||||
@ -279,26 +235,6 @@ DECLSPEC void scrypt_smix_loop (uint4 *X, uint4 *T, GLOBAL_AS uint4 *V0, GLOBAL_
|
|||||||
case 3: V = V3; break;
|
case 3: V = V3; break;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (u32 i = 0; i < STATE_CNT4; i += 4)
|
|
||||||
{
|
|
||||||
#ifdef IS_CUDA
|
|
||||||
T[0] = make_uint4 (X[i + 0].x, X[i + 1].y, X[i + 2].z, X[i + 3].w);
|
|
||||||
T[1] = make_uint4 (X[i + 1].x, X[i + 2].y, X[i + 3].z, X[i + 0].w);
|
|
||||||
T[2] = make_uint4 (X[i + 2].x, X[i + 3].y, X[i + 0].z, X[i + 1].w);
|
|
||||||
T[3] = make_uint4 (X[i + 3].x, X[i + 0].y, X[i + 1].z, X[i + 2].w);
|
|
||||||
#else
|
|
||||||
T[0] = (uint4) (X[i + 0].x, X[i + 1].y, X[i + 2].z, X[i + 3].w);
|
|
||||||
T[1] = (uint4) (X[i + 1].x, X[i + 2].y, X[i + 3].z, X[i + 0].w);
|
|
||||||
T[2] = (uint4) (X[i + 2].x, X[i + 3].y, X[i + 0].z, X[i + 1].w);
|
|
||||||
T[3] = (uint4) (X[i + 3].x, X[i + 0].y, X[i + 1].z, X[i + 2].w);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
X[i + 0] = T[0];
|
|
||||||
X[i + 1] = T[1];
|
|
||||||
X[i + 2] = T[2];
|
|
||||||
X[i + 3] = T[3];
|
|
||||||
}
|
|
||||||
|
|
||||||
for (u32 N_pos = 0; N_pos < 1024; N_pos++)
|
for (u32 N_pos = 0; N_pos < 1024; N_pos++)
|
||||||
{
|
{
|
||||||
const u32 k = X[zSIZE - 4].x & (SCRYPT_N - 1);
|
const u32 k = X[zSIZE - 4].x & (SCRYPT_N - 1);
|
||||||
@ -307,6 +243,8 @@ DECLSPEC void scrypt_smix_loop (uint4 *X, uint4 *T, GLOBAL_AS uint4 *V0, GLOBAL_
|
|||||||
|
|
||||||
const u32 km = k - (y * SCRYPT_TMTO);
|
const u32 km = k - (y * SCRYPT_TMTO);
|
||||||
|
|
||||||
|
uint4 T[STATE_CNT4];
|
||||||
|
|
||||||
for (u32 z = 0; z < zSIZE; z++) T[z] = V[CO];
|
for (u32 z = 0; z < zSIZE; z++) T[z] = V[CO];
|
||||||
|
|
||||||
for (u32 i = 0; i < km; i++) salsa_r (T);
|
for (u32 i = 0; i < km; i++) salsa_r (T);
|
||||||
@ -315,26 +253,6 @@ DECLSPEC void scrypt_smix_loop (uint4 *X, uint4 *T, GLOBAL_AS uint4 *V0, GLOBAL_
|
|||||||
|
|
||||||
salsa_r (X);
|
salsa_r (X);
|
||||||
}
|
}
|
||||||
|
|
||||||
for (u32 i = 0; i < STATE_CNT4; i += 4)
|
|
||||||
{
|
|
||||||
#ifdef IS_CUDA
|
|
||||||
T[0] = make_uint4 (X[i + 0].x, X[i + 3].y, X[i + 2].z, X[i + 1].w);
|
|
||||||
T[1] = make_uint4 (X[i + 1].x, X[i + 0].y, X[i + 3].z, X[i + 2].w);
|
|
||||||
T[2] = make_uint4 (X[i + 2].x, X[i + 1].y, X[i + 0].z, X[i + 3].w);
|
|
||||||
T[3] = make_uint4 (X[i + 3].x, X[i + 2].y, X[i + 1].z, X[i + 0].w);
|
|
||||||
#else
|
|
||||||
T[0] = (uint4) (X[i + 0].x, X[i + 3].y, X[i + 2].z, X[i + 1].w);
|
|
||||||
T[1] = (uint4) (X[i + 1].x, X[i + 0].y, X[i + 3].z, X[i + 2].w);
|
|
||||||
T[2] = (uint4) (X[i + 2].x, X[i + 1].y, X[i + 0].z, X[i + 3].w);
|
|
||||||
T[3] = (uint4) (X[i + 3].x, X[i + 2].y, X[i + 1].z, X[i + 0].w);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
X[i + 0] = T[0];
|
|
||||||
X[i + 1] = T[1];
|
|
||||||
X[i + 2] = T[2];
|
|
||||||
X[i + 3] = T[3];
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
KERNEL_FQ void m08900_init (KERN_ATTR_TMPS (scrypt_tmp_t))
|
KERNEL_FQ void m08900_init (KERN_ATTR_TMPS (scrypt_tmp_t))
|
||||||
@ -405,6 +323,40 @@ KERNEL_FQ void m08900_init (KERN_ATTR_TMPS (scrypt_tmp_t))
|
|||||||
tmps[gid].P[k + 0] = tmp0;
|
tmps[gid].P[k + 0] = tmp0;
|
||||||
tmps[gid].P[k + 1] = tmp1;
|
tmps[gid].P[k + 1] = tmp1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for (u32 l = 0; l < SCRYPT_CNT4; l += 4)
|
||||||
|
{
|
||||||
|
uint4 T[4];
|
||||||
|
|
||||||
|
T[0] = tmps[gid].P[l + 0];
|
||||||
|
T[1] = tmps[gid].P[l + 1];
|
||||||
|
T[2] = tmps[gid].P[l + 2];
|
||||||
|
T[3] = tmps[gid].P[l + 3];
|
||||||
|
|
||||||
|
T[0] = hc_swap32_4 (T[0]);
|
||||||
|
T[1] = hc_swap32_4 (T[1]);
|
||||||
|
T[2] = hc_swap32_4 (T[2]);
|
||||||
|
T[3] = hc_swap32_4 (T[3]);
|
||||||
|
|
||||||
|
uint4 X[4];
|
||||||
|
|
||||||
|
#ifdef IS_CUDA
|
||||||
|
X[0] = make_uint4 (T[0].x, T[1].y, T[2].z, T[3].w);
|
||||||
|
X[1] = make_uint4 (T[1].x, T[2].y, T[3].z, T[0].w);
|
||||||
|
X[2] = make_uint4 (T[2].x, T[3].y, T[0].z, T[1].w);
|
||||||
|
X[3] = make_uint4 (T[3].x, T[0].y, T[1].z, T[2].w);
|
||||||
|
#else
|
||||||
|
X[0] = (uint4) (T[0].x, T[1].y, T[2].z, T[3].w);
|
||||||
|
X[1] = (uint4) (T[1].x, T[2].y, T[3].z, T[0].w);
|
||||||
|
X[2] = (uint4) (T[2].x, T[3].y, T[0].z, T[1].w);
|
||||||
|
X[3] = (uint4) (T[3].x, T[0].y, T[1].z, T[2].w);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
tmps[gid].P[l + 0] = X[0];
|
||||||
|
tmps[gid].P[l + 1] = X[1];
|
||||||
|
tmps[gid].P[l + 2] = X[2];
|
||||||
|
tmps[gid].P[l + 3] = X[3];
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
KERNEL_FQ void m08900_loop_prepare (KERN_ATTR_TMPS (scrypt_tmp_t))
|
KERNEL_FQ void m08900_loop_prepare (KERN_ATTR_TMPS (scrypt_tmp_t))
|
||||||
@ -414,6 +366,7 @@ KERNEL_FQ void m08900_loop_prepare (KERN_ATTR_TMPS (scrypt_tmp_t))
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
const u64 gid = get_global_id (0);
|
const u64 gid = get_global_id (0);
|
||||||
|
const u64 lid = get_local_id (0);
|
||||||
|
|
||||||
if (gid >= gid_max) return;
|
if (gid >= gid_max) return;
|
||||||
|
|
||||||
@ -425,26 +378,20 @@ KERNEL_FQ void m08900_loop_prepare (KERN_ATTR_TMPS (scrypt_tmp_t))
|
|||||||
GLOBAL_AS uint4 *d_scrypt3_buf = (GLOBAL_AS uint4 *) d_extra3_buf;
|
GLOBAL_AS uint4 *d_scrypt3_buf = (GLOBAL_AS uint4 *) d_extra3_buf;
|
||||||
|
|
||||||
uint4 X[STATE_CNT4];
|
uint4 X[STATE_CNT4];
|
||||||
uint4 T[STATE_CNT4];
|
|
||||||
|
|
||||||
const u32 P_offset = salt_repeat * STATE_CNT4;
|
const u32 P_offset = salt_repeat * STATE_CNT4;
|
||||||
|
|
||||||
#ifdef _unroll
|
for (int z = 0; z < STATE_CNT4; z++) X[z] = tmps[gid].P[P_offset + z];
|
||||||
#pragma unroll
|
|
||||||
#endif
|
|
||||||
for (int z = 0; z < STATE_CNT4; z++) X[z] = hc_swap32_4 (tmps[gid].P[P_offset + z]);
|
|
||||||
|
|
||||||
scrypt_smix_init (X, T, d_scrypt0_buf, d_scrypt1_buf, d_scrypt2_buf, d_scrypt3_buf);
|
scrypt_smix_init (X, d_scrypt0_buf, d_scrypt1_buf, d_scrypt2_buf, d_scrypt3_buf);
|
||||||
|
|
||||||
#ifdef _unroll
|
for (int z = 0; z < STATE_CNT4; z++) tmps[gid].P[P_offset + z] = X[z];
|
||||||
#pragma unroll
|
|
||||||
#endif
|
|
||||||
for (int z = 0; z < STATE_CNT4; z++) tmps[gid].P[P_offset + z] = hc_swap32_4 (X[z]);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
KERNEL_FQ void m08900_loop (KERN_ATTR_TMPS (scrypt_tmp_t))
|
KERNEL_FQ void m08900_loop (KERN_ATTR_TMPS (scrypt_tmp_t))
|
||||||
{
|
{
|
||||||
const u64 gid = get_global_id (0);
|
const u64 gid = get_global_id (0);
|
||||||
|
const u64 lid = get_local_id (0);
|
||||||
|
|
||||||
if (gid >= gid_max) return;
|
if (gid >= gid_max) return;
|
||||||
|
|
||||||
@ -454,21 +401,14 @@ KERNEL_FQ void m08900_loop (KERN_ATTR_TMPS (scrypt_tmp_t))
|
|||||||
GLOBAL_AS uint4 *d_scrypt3_buf = (GLOBAL_AS uint4 *) d_extra3_buf;
|
GLOBAL_AS uint4 *d_scrypt3_buf = (GLOBAL_AS uint4 *) d_extra3_buf;
|
||||||
|
|
||||||
uint4 X[STATE_CNT4];
|
uint4 X[STATE_CNT4];
|
||||||
uint4 T[STATE_CNT4];
|
|
||||||
|
|
||||||
const u32 P_offset = salt_repeat * STATE_CNT4;
|
const u32 P_offset = salt_repeat * STATE_CNT4;
|
||||||
|
|
||||||
#ifdef _unroll
|
for (int z = 0; z < STATE_CNT4; z++) X[z] = tmps[gid].P[P_offset + z];
|
||||||
#pragma unroll
|
|
||||||
#endif
|
|
||||||
for (int z = 0; z < STATE_CNT4; z++) X[z] = hc_swap32_4 (tmps[gid].P[P_offset + z]);
|
|
||||||
|
|
||||||
scrypt_smix_loop (X, T, d_scrypt0_buf, d_scrypt1_buf, d_scrypt2_buf, d_scrypt3_buf);
|
scrypt_smix_loop (X, d_scrypt0_buf, d_scrypt1_buf, d_scrypt2_buf, d_scrypt3_buf);
|
||||||
|
|
||||||
#ifdef _unroll
|
for (int z = 0; z < STATE_CNT4; z++) tmps[gid].P[P_offset + z] = X[z];
|
||||||
#pragma unroll
|
|
||||||
#endif
|
|
||||||
for (int z = 0; z < STATE_CNT4; z++) tmps[gid].P[P_offset + z] = hc_swap32_4 (X[z]);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
KERNEL_FQ void m08900_comp (KERN_ATTR_TMPS (scrypt_tmp_t))
|
KERNEL_FQ void m08900_comp (KERN_ATTR_TMPS (scrypt_tmp_t))
|
||||||
@ -497,35 +437,48 @@ KERNEL_FQ void m08900_comp (KERN_ATTR_TMPS (scrypt_tmp_t))
|
|||||||
|
|
||||||
for (u32 l = 0; l < SCRYPT_CNT4; l += 4)
|
for (u32 l = 0; l < SCRYPT_CNT4; l += 4)
|
||||||
{
|
{
|
||||||
uint4 tmp;
|
uint4 X[4];
|
||||||
|
|
||||||
tmp = tmps[gid].P[l + 0];
|
X[0] = tmps[gid].P[l + 0];
|
||||||
|
X[1] = tmps[gid].P[l + 1];
|
||||||
|
X[2] = tmps[gid].P[l + 2];
|
||||||
|
X[3] = tmps[gid].P[l + 3];
|
||||||
|
|
||||||
w0[0] = tmp.x;
|
uint4 T[4];
|
||||||
w0[1] = tmp.y;
|
|
||||||
w0[2] = tmp.z;
|
|
||||||
w0[3] = tmp.w;
|
|
||||||
|
|
||||||
tmp = tmps[gid].P[l + 1];
|
#ifdef IS_CUDA
|
||||||
|
T[0] = make_uint4 (X[0].x, X[3].y, X[2].z, X[1].w);
|
||||||
|
T[1] = make_uint4 (X[1].x, X[0].y, X[3].z, X[2].w);
|
||||||
|
T[2] = make_uint4 (X[2].x, X[1].y, X[0].z, X[3].w);
|
||||||
|
T[3] = make_uint4 (X[3].x, X[2].y, X[1].z, X[0].w);
|
||||||
|
#else
|
||||||
|
T[0] = (uint4) (X[0].x, X[3].y, X[2].z, X[1].w);
|
||||||
|
T[1] = (uint4) (X[1].x, X[0].y, X[3].z, X[2].w);
|
||||||
|
T[2] = (uint4) (X[2].x, X[1].y, X[0].z, X[3].w);
|
||||||
|
T[3] = (uint4) (X[3].x, X[2].y, X[1].z, X[0].w);
|
||||||
|
#endif
|
||||||
|
|
||||||
w1[0] = tmp.x;
|
T[0] = hc_swap32_4 (T[0]);
|
||||||
w1[1] = tmp.y;
|
T[1] = hc_swap32_4 (T[1]);
|
||||||
w1[2] = tmp.z;
|
T[2] = hc_swap32_4 (T[2]);
|
||||||
w1[3] = tmp.w;
|
T[3] = hc_swap32_4 (T[3]);
|
||||||
|
|
||||||
tmp = tmps[gid].P[l + 2];
|
w0[0] = T[0].x;
|
||||||
|
w0[1] = T[0].y;
|
||||||
w2[0] = tmp.x;
|
w0[2] = T[0].z;
|
||||||
w2[1] = tmp.y;
|
w0[3] = T[0].w;
|
||||||
w2[2] = tmp.z;
|
w1[0] = T[1].x;
|
||||||
w2[3] = tmp.w;
|
w1[1] = T[1].y;
|
||||||
|
w1[2] = T[1].z;
|
||||||
tmp = tmps[gid].P[l + 3];
|
w1[3] = T[1].w;
|
||||||
|
w2[0] = T[2].x;
|
||||||
w3[0] = tmp.x;
|
w2[1] = T[2].y;
|
||||||
w3[1] = tmp.y;
|
w2[2] = T[2].z;
|
||||||
w3[2] = tmp.z;
|
w2[3] = T[2].w;
|
||||||
w3[3] = tmp.w;
|
w3[0] = T[3].x;
|
||||||
|
w3[1] = T[3].y;
|
||||||
|
w3[2] = T[3].z;
|
||||||
|
w3[3] = T[3].w;
|
||||||
|
|
||||||
sha256_hmac_update_64 (&ctx, w0, w1, w2, w3, 64);
|
sha256_hmac_update_64 (&ctx, w0, w1, w2, w3, 64);
|
||||||
}
|
}
|
||||||
|
@ -177,22 +177,18 @@ DECLSPEC void salsa_r (uint4 *TI)
|
|||||||
TT[idx_r2++] = R3;
|
TT[idx_r2++] = R3;
|
||||||
}
|
}
|
||||||
|
|
||||||
idx_r1 = 0;
|
idx_r2 = 0;
|
||||||
idx_r2 = SCRYPT_R * 4;
|
|
||||||
|
|
||||||
#ifdef _unroll
|
|
||||||
#pragma unroll
|
|
||||||
#endif
|
|
||||||
for (int i = 0; i < SCRYPT_R; i++)
|
for (int i = 0; i < SCRYPT_R; i++)
|
||||||
{
|
{
|
||||||
TI[idx_r2++] = TT[idx_r1++];
|
TI[idx_r1++] = TT[idx_r2++];
|
||||||
TI[idx_r2++] = TT[idx_r1++];
|
TI[idx_r1++] = TT[idx_r2++];
|
||||||
TI[idx_r2++] = TT[idx_r1++];
|
TI[idx_r1++] = TT[idx_r2++];
|
||||||
TI[idx_r2++] = TT[idx_r1++];
|
TI[idx_r1++] = TT[idx_r2++];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
DECLSPEC void scrypt_smix_init (uint4 *X, uint4 *T, GLOBAL_AS uint4 *V0, GLOBAL_AS uint4 *V1, GLOBAL_AS uint4 *V2, GLOBAL_AS uint4 *V3)
|
DECLSPEC void scrypt_smix_init (uint4 *X, GLOBAL_AS uint4 *V0, GLOBAL_AS uint4 *V1, GLOBAL_AS uint4 *V2, GLOBAL_AS uint4 *V3)
|
||||||
{
|
{
|
||||||
#define Coord(xd4,y,z) (((xd4) * ySIZE * zSIZE) + ((y) * zSIZE) + (z))
|
#define Coord(xd4,y,z) (((xd4) * ySIZE * zSIZE) + ((y) * zSIZE) + (z))
|
||||||
#define CO Coord(xd4,y,z)
|
#define CO Coord(xd4,y,z)
|
||||||
@ -215,55 +211,15 @@ DECLSPEC void scrypt_smix_init (uint4 *X, uint4 *T, GLOBAL_AS uint4 *V0, GLOBAL_
|
|||||||
case 3: V = V3; break;
|
case 3: V = V3; break;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (u32 i = 0; i < STATE_CNT4; i += 4)
|
|
||||||
{
|
|
||||||
#ifdef IS_CUDA
|
|
||||||
T[0] = make_uint4 (X[i + 0].x, X[i + 1].y, X[i + 2].z, X[i + 3].w);
|
|
||||||
T[1] = make_uint4 (X[i + 1].x, X[i + 2].y, X[i + 3].z, X[i + 0].w);
|
|
||||||
T[2] = make_uint4 (X[i + 2].x, X[i + 3].y, X[i + 0].z, X[i + 1].w);
|
|
||||||
T[3] = make_uint4 (X[i + 3].x, X[i + 0].y, X[i + 1].z, X[i + 2].w);
|
|
||||||
#else
|
|
||||||
T[0] = (uint4) (X[i + 0].x, X[i + 1].y, X[i + 2].z, X[i + 3].w);
|
|
||||||
T[1] = (uint4) (X[i + 1].x, X[i + 2].y, X[i + 3].z, X[i + 0].w);
|
|
||||||
T[2] = (uint4) (X[i + 2].x, X[i + 3].y, X[i + 0].z, X[i + 1].w);
|
|
||||||
T[3] = (uint4) (X[i + 3].x, X[i + 0].y, X[i + 1].z, X[i + 2].w);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
X[i + 0] = T[0];
|
|
||||||
X[i + 1] = T[1];
|
|
||||||
X[i + 2] = T[2];
|
|
||||||
X[i + 3] = T[3];
|
|
||||||
}
|
|
||||||
|
|
||||||
for (u32 y = 0; y < ySIZE; y++)
|
for (u32 y = 0; y < ySIZE; y++)
|
||||||
{
|
{
|
||||||
for (u32 z = 0; z < zSIZE; z++) V[CO] = X[z];
|
for (u32 z = 0; z < zSIZE; z++) V[CO] = X[z];
|
||||||
|
|
||||||
for (u32 i = 0; i < SCRYPT_TMTO; i++) salsa_r (X);
|
for (u32 i = 0; i < SCRYPT_TMTO; i++) salsa_r (X);
|
||||||
}
|
}
|
||||||
|
|
||||||
for (u32 i = 0; i < STATE_CNT4; i += 4)
|
|
||||||
{
|
|
||||||
#ifdef IS_CUDA
|
|
||||||
T[0] = make_uint4 (X[i + 0].x, X[i + 3].y, X[i + 2].z, X[i + 1].w);
|
|
||||||
T[1] = make_uint4 (X[i + 1].x, X[i + 0].y, X[i + 3].z, X[i + 2].w);
|
|
||||||
T[2] = make_uint4 (X[i + 2].x, X[i + 1].y, X[i + 0].z, X[i + 3].w);
|
|
||||||
T[3] = make_uint4 (X[i + 3].x, X[i + 2].y, X[i + 1].z, X[i + 0].w);
|
|
||||||
#else
|
|
||||||
T[0] = (uint4) (X[i + 0].x, X[i + 3].y, X[i + 2].z, X[i + 1].w);
|
|
||||||
T[1] = (uint4) (X[i + 1].x, X[i + 0].y, X[i + 3].z, X[i + 2].w);
|
|
||||||
T[2] = (uint4) (X[i + 2].x, X[i + 1].y, X[i + 0].z, X[i + 3].w);
|
|
||||||
T[3] = (uint4) (X[i + 3].x, X[i + 2].y, X[i + 1].z, X[i + 0].w);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
X[i + 0] = T[0];
|
|
||||||
X[i + 1] = T[1];
|
|
||||||
X[i + 2] = T[2];
|
|
||||||
X[i + 3] = T[3];
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
DECLSPEC void scrypt_smix_loop (uint4 *X, uint4 *T, GLOBAL_AS uint4 *V0, GLOBAL_AS uint4 *V1, GLOBAL_AS uint4 *V2, GLOBAL_AS uint4 *V3)
|
DECLSPEC void scrypt_smix_loop (uint4 *X, GLOBAL_AS uint4 *V0, GLOBAL_AS uint4 *V1, GLOBAL_AS uint4 *V2, GLOBAL_AS uint4 *V3)
|
||||||
{
|
{
|
||||||
#define Coord(xd4,y,z) (((xd4) * ySIZE * zSIZE) + ((y) * zSIZE) + (z))
|
#define Coord(xd4,y,z) (((xd4) * ySIZE * zSIZE) + ((y) * zSIZE) + (z))
|
||||||
#define CO Coord(xd4,y,z)
|
#define CO Coord(xd4,y,z)
|
||||||
@ -286,26 +242,6 @@ DECLSPEC void scrypt_smix_loop (uint4 *X, uint4 *T, GLOBAL_AS uint4 *V0, GLOBAL_
|
|||||||
case 3: V = V3; break;
|
case 3: V = V3; break;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (u32 i = 0; i < STATE_CNT4; i += 4)
|
|
||||||
{
|
|
||||||
#ifdef IS_CUDA
|
|
||||||
T[0] = make_uint4 (X[i + 0].x, X[i + 1].y, X[i + 2].z, X[i + 3].w);
|
|
||||||
T[1] = make_uint4 (X[i + 1].x, X[i + 2].y, X[i + 3].z, X[i + 0].w);
|
|
||||||
T[2] = make_uint4 (X[i + 2].x, X[i + 3].y, X[i + 0].z, X[i + 1].w);
|
|
||||||
T[3] = make_uint4 (X[i + 3].x, X[i + 0].y, X[i + 1].z, X[i + 2].w);
|
|
||||||
#else
|
|
||||||
T[0] = (uint4) (X[i + 0].x, X[i + 1].y, X[i + 2].z, X[i + 3].w);
|
|
||||||
T[1] = (uint4) (X[i + 1].x, X[i + 2].y, X[i + 3].z, X[i + 0].w);
|
|
||||||
T[2] = (uint4) (X[i + 2].x, X[i + 3].y, X[i + 0].z, X[i + 1].w);
|
|
||||||
T[3] = (uint4) (X[i + 3].x, X[i + 0].y, X[i + 1].z, X[i + 2].w);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
X[i + 0] = T[0];
|
|
||||||
X[i + 1] = T[1];
|
|
||||||
X[i + 2] = T[2];
|
|
||||||
X[i + 3] = T[3];
|
|
||||||
}
|
|
||||||
|
|
||||||
for (u32 N_pos = 0; N_pos < 1024; N_pos++)
|
for (u32 N_pos = 0; N_pos < 1024; N_pos++)
|
||||||
{
|
{
|
||||||
const u32 k = X[zSIZE - 4].x & (SCRYPT_N - 1);
|
const u32 k = X[zSIZE - 4].x & (SCRYPT_N - 1);
|
||||||
@ -314,6 +250,8 @@ DECLSPEC void scrypt_smix_loop (uint4 *X, uint4 *T, GLOBAL_AS uint4 *V0, GLOBAL_
|
|||||||
|
|
||||||
const u32 km = k - (y * SCRYPT_TMTO);
|
const u32 km = k - (y * SCRYPT_TMTO);
|
||||||
|
|
||||||
|
uint4 T[STATE_CNT4];
|
||||||
|
|
||||||
for (u32 z = 0; z < zSIZE; z++) T[z] = V[CO];
|
for (u32 z = 0; z < zSIZE; z++) T[z] = V[CO];
|
||||||
|
|
||||||
for (u32 i = 0; i < km; i++) salsa_r (T);
|
for (u32 i = 0; i < km; i++) salsa_r (T);
|
||||||
@ -322,26 +260,6 @@ DECLSPEC void scrypt_smix_loop (uint4 *X, uint4 *T, GLOBAL_AS uint4 *V0, GLOBAL_
|
|||||||
|
|
||||||
salsa_r (X);
|
salsa_r (X);
|
||||||
}
|
}
|
||||||
|
|
||||||
for (u32 i = 0; i < STATE_CNT4; i += 4)
|
|
||||||
{
|
|
||||||
#ifdef IS_CUDA
|
|
||||||
T[0] = make_uint4 (X[i + 0].x, X[i + 3].y, X[i + 2].z, X[i + 1].w);
|
|
||||||
T[1] = make_uint4 (X[i + 1].x, X[i + 0].y, X[i + 3].z, X[i + 2].w);
|
|
||||||
T[2] = make_uint4 (X[i + 2].x, X[i + 1].y, X[i + 0].z, X[i + 3].w);
|
|
||||||
T[3] = make_uint4 (X[i + 3].x, X[i + 2].y, X[i + 1].z, X[i + 0].w);
|
|
||||||
#else
|
|
||||||
T[0] = (uint4) (X[i + 0].x, X[i + 3].y, X[i + 2].z, X[i + 1].w);
|
|
||||||
T[1] = (uint4) (X[i + 1].x, X[i + 0].y, X[i + 3].z, X[i + 2].w);
|
|
||||||
T[2] = (uint4) (X[i + 2].x, X[i + 1].y, X[i + 0].z, X[i + 3].w);
|
|
||||||
T[3] = (uint4) (X[i + 3].x, X[i + 2].y, X[i + 1].z, X[i + 0].w);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
X[i + 0] = T[0];
|
|
||||||
X[i + 1] = T[1];
|
|
||||||
X[i + 2] = T[2];
|
|
||||||
X[i + 3] = T[3];
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifndef KECCAK_ROUNDS
|
#ifndef KECCAK_ROUNDS
|
||||||
@ -541,15 +459,50 @@ KERNEL_FQ void m15700_init (KERN_ATTR_TMPS_ESALT (scrypt_tmp_t, ethereum_scrypt_
|
|||||||
tmps[gid].P[k + 0] = tmp0;
|
tmps[gid].P[k + 0] = tmp0;
|
||||||
tmps[gid].P[k + 1] = tmp1;
|
tmps[gid].P[k + 1] = tmp1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for (u32 l = 0; l < SCRYPT_CNT4; l += 4)
|
||||||
|
{
|
||||||
|
uint4 T[4];
|
||||||
|
|
||||||
|
T[0] = tmps[gid].P[l + 0];
|
||||||
|
T[1] = tmps[gid].P[l + 1];
|
||||||
|
T[2] = tmps[gid].P[l + 2];
|
||||||
|
T[3] = tmps[gid].P[l + 3];
|
||||||
|
|
||||||
|
T[0] = hc_swap32_4 (T[0]);
|
||||||
|
T[1] = hc_swap32_4 (T[1]);
|
||||||
|
T[2] = hc_swap32_4 (T[2]);
|
||||||
|
T[3] = hc_swap32_4 (T[3]);
|
||||||
|
|
||||||
|
uint4 X[4];
|
||||||
|
|
||||||
|
#ifdef IS_CUDA
|
||||||
|
X[0] = make_uint4 (T[0].x, T[1].y, T[2].z, T[3].w);
|
||||||
|
X[1] = make_uint4 (T[1].x, T[2].y, T[3].z, T[0].w);
|
||||||
|
X[2] = make_uint4 (T[2].x, T[3].y, T[0].z, T[1].w);
|
||||||
|
X[3] = make_uint4 (T[3].x, T[0].y, T[1].z, T[2].w);
|
||||||
|
#else
|
||||||
|
X[0] = (uint4) (T[0].x, T[1].y, T[2].z, T[3].w);
|
||||||
|
X[1] = (uint4) (T[1].x, T[2].y, T[3].z, T[0].w);
|
||||||
|
X[2] = (uint4) (T[2].x, T[3].y, T[0].z, T[1].w);
|
||||||
|
X[3] = (uint4) (T[3].x, T[0].y, T[1].z, T[2].w);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
tmps[gid].P[l + 0] = X[0];
|
||||||
|
tmps[gid].P[l + 1] = X[1];
|
||||||
|
tmps[gid].P[l + 2] = X[2];
|
||||||
|
tmps[gid].P[l + 3] = X[3];
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
KERNEL_FQ void m15700_loop_prepare (KERN_ATTR_TMPS_ESALT (scrypt_tmp_t, ethereum_scrypt_t))
|
KERNEL_FQ void m15700_loop_prepare (KERN_ATTR_TMPS (scrypt_tmp_t))
|
||||||
{
|
{
|
||||||
/**
|
/**
|
||||||
* base
|
* base
|
||||||
*/
|
*/
|
||||||
|
|
||||||
const u64 gid = get_global_id (0);
|
const u64 gid = get_global_id (0);
|
||||||
|
const u64 lid = get_local_id (0);
|
||||||
|
|
||||||
if (gid >= gid_max) return;
|
if (gid >= gid_max) return;
|
||||||
|
|
||||||
@ -561,26 +514,20 @@ KERNEL_FQ void m15700_loop_prepare (KERN_ATTR_TMPS_ESALT (scrypt_tmp_t, ethereum
|
|||||||
GLOBAL_AS uint4 *d_scrypt3_buf = (GLOBAL_AS uint4 *) d_extra3_buf;
|
GLOBAL_AS uint4 *d_scrypt3_buf = (GLOBAL_AS uint4 *) d_extra3_buf;
|
||||||
|
|
||||||
uint4 X[STATE_CNT4];
|
uint4 X[STATE_CNT4];
|
||||||
uint4 T[STATE_CNT4];
|
|
||||||
|
|
||||||
const u32 P_offset = salt_repeat * STATE_CNT4;
|
const u32 P_offset = salt_repeat * STATE_CNT4;
|
||||||
|
|
||||||
#ifdef _unroll
|
for (int z = 0; z < STATE_CNT4; z++) X[z] = tmps[gid].P[P_offset + z];
|
||||||
#pragma unroll
|
|
||||||
#endif
|
|
||||||
for (int z = 0; z < STATE_CNT4; z++) X[z] = hc_swap32_4 (tmps[gid].P[P_offset + z]);
|
|
||||||
|
|
||||||
scrypt_smix_init (X, T, d_scrypt0_buf, d_scrypt1_buf, d_scrypt2_buf, d_scrypt3_buf);
|
scrypt_smix_init (X, d_scrypt0_buf, d_scrypt1_buf, d_scrypt2_buf, d_scrypt3_buf);
|
||||||
|
|
||||||
#ifdef _unroll
|
for (int z = 0; z < STATE_CNT4; z++) tmps[gid].P[P_offset + z] = X[z];
|
||||||
#pragma unroll
|
|
||||||
#endif
|
|
||||||
for (int z = 0; z < STATE_CNT4; z++) tmps[gid].P[P_offset + z] = hc_swap32_4 (X[z]);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
KERNEL_FQ void m15700_loop (KERN_ATTR_TMPS_ESALT (scrypt_tmp_t, ethereum_scrypt_t))
|
KERNEL_FQ void m15700_loop (KERN_ATTR_TMPS (scrypt_tmp_t))
|
||||||
{
|
{
|
||||||
const u64 gid = get_global_id (0);
|
const u64 gid = get_global_id (0);
|
||||||
|
const u64 lid = get_local_id (0);
|
||||||
|
|
||||||
if (gid >= gid_max) return;
|
if (gid >= gid_max) return;
|
||||||
|
|
||||||
@ -590,21 +537,14 @@ KERNEL_FQ void m15700_loop (KERN_ATTR_TMPS_ESALT (scrypt_tmp_t, ethereum_scrypt_
|
|||||||
GLOBAL_AS uint4 *d_scrypt3_buf = (GLOBAL_AS uint4 *) d_extra3_buf;
|
GLOBAL_AS uint4 *d_scrypt3_buf = (GLOBAL_AS uint4 *) d_extra3_buf;
|
||||||
|
|
||||||
uint4 X[STATE_CNT4];
|
uint4 X[STATE_CNT4];
|
||||||
uint4 T[STATE_CNT4];
|
|
||||||
|
|
||||||
const u32 P_offset = salt_repeat * STATE_CNT4;
|
const u32 P_offset = salt_repeat * STATE_CNT4;
|
||||||
|
|
||||||
#ifdef _unroll
|
for (int z = 0; z < STATE_CNT4; z++) X[z] = tmps[gid].P[P_offset + z];
|
||||||
#pragma unroll
|
|
||||||
#endif
|
|
||||||
for (int z = 0; z < STATE_CNT4; z++) X[z] = hc_swap32_4 (tmps[gid].P[P_offset + z]);
|
|
||||||
|
|
||||||
scrypt_smix_loop (X, T, d_scrypt0_buf, d_scrypt1_buf, d_scrypt2_buf, d_scrypt3_buf);
|
scrypt_smix_loop (X, d_scrypt0_buf, d_scrypt1_buf, d_scrypt2_buf, d_scrypt3_buf);
|
||||||
|
|
||||||
#ifdef _unroll
|
for (int z = 0; z < STATE_CNT4; z++) tmps[gid].P[P_offset + z] = X[z];
|
||||||
#pragma unroll
|
|
||||||
#endif
|
|
||||||
for (int z = 0; z < STATE_CNT4; z++) tmps[gid].P[P_offset + z] = hc_swap32_4 (X[z]);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
KERNEL_FQ void m15700_comp (KERN_ATTR_TMPS_ESALT (scrypt_tmp_t, ethereum_scrypt_t))
|
KERNEL_FQ void m15700_comp (KERN_ATTR_TMPS_ESALT (scrypt_tmp_t, ethereum_scrypt_t))
|
||||||
@ -633,35 +573,48 @@ KERNEL_FQ void m15700_comp (KERN_ATTR_TMPS_ESALT (scrypt_tmp_t, ethereum_scrypt_
|
|||||||
|
|
||||||
for (u32 l = 0; l < SCRYPT_CNT4; l += 4)
|
for (u32 l = 0; l < SCRYPT_CNT4; l += 4)
|
||||||
{
|
{
|
||||||
uint4 tmp;
|
uint4 X[4];
|
||||||
|
|
||||||
tmp = tmps[gid].P[l + 0];
|
X[0] = tmps[gid].P[l + 0];
|
||||||
|
X[1] = tmps[gid].P[l + 1];
|
||||||
|
X[2] = tmps[gid].P[l + 2];
|
||||||
|
X[3] = tmps[gid].P[l + 3];
|
||||||
|
|
||||||
w0[0] = tmp.x;
|
uint4 T[4];
|
||||||
w0[1] = tmp.y;
|
|
||||||
w0[2] = tmp.z;
|
|
||||||
w0[3] = tmp.w;
|
|
||||||
|
|
||||||
tmp = tmps[gid].P[l + 1];
|
#ifdef IS_CUDA
|
||||||
|
T[0] = make_uint4 (X[0].x, X[3].y, X[2].z, X[1].w);
|
||||||
|
T[1] = make_uint4 (X[1].x, X[0].y, X[3].z, X[2].w);
|
||||||
|
T[2] = make_uint4 (X[2].x, X[1].y, X[0].z, X[3].w);
|
||||||
|
T[3] = make_uint4 (X[3].x, X[2].y, X[1].z, X[0].w);
|
||||||
|
#else
|
||||||
|
T[0] = (uint4) (X[0].x, X[3].y, X[2].z, X[1].w);
|
||||||
|
T[1] = (uint4) (X[1].x, X[0].y, X[3].z, X[2].w);
|
||||||
|
T[2] = (uint4) (X[2].x, X[1].y, X[0].z, X[3].w);
|
||||||
|
T[3] = (uint4) (X[3].x, X[2].y, X[1].z, X[0].w);
|
||||||
|
#endif
|
||||||
|
|
||||||
w1[0] = tmp.x;
|
T[0] = hc_swap32_4 (T[0]);
|
||||||
w1[1] = tmp.y;
|
T[1] = hc_swap32_4 (T[1]);
|
||||||
w1[2] = tmp.z;
|
T[2] = hc_swap32_4 (T[2]);
|
||||||
w1[3] = tmp.w;
|
T[3] = hc_swap32_4 (T[3]);
|
||||||
|
|
||||||
tmp = tmps[gid].P[l + 2];
|
w0[0] = T[0].x;
|
||||||
|
w0[1] = T[0].y;
|
||||||
w2[0] = tmp.x;
|
w0[2] = T[0].z;
|
||||||
w2[1] = tmp.y;
|
w0[3] = T[0].w;
|
||||||
w2[2] = tmp.z;
|
w1[0] = T[1].x;
|
||||||
w2[3] = tmp.w;
|
w1[1] = T[1].y;
|
||||||
|
w1[2] = T[1].z;
|
||||||
tmp = tmps[gid].P[l + 3];
|
w1[3] = T[1].w;
|
||||||
|
w2[0] = T[2].x;
|
||||||
w3[0] = tmp.x;
|
w2[1] = T[2].y;
|
||||||
w3[1] = tmp.y;
|
w2[2] = T[2].z;
|
||||||
w3[2] = tmp.z;
|
w2[3] = T[2].w;
|
||||||
w3[3] = tmp.w;
|
w3[0] = T[3].x;
|
||||||
|
w3[1] = T[3].y;
|
||||||
|
w3[2] = T[3].z;
|
||||||
|
w3[3] = T[3].w;
|
||||||
|
|
||||||
sha256_hmac_update_64 (&ctx, w0, w1, w2, w3, 64);
|
sha256_hmac_update_64 (&ctx, w0, w1, w2, w3, 64);
|
||||||
}
|
}
|
||||||
|
@ -218,22 +218,18 @@ DECLSPEC void salsa_r (uint4 *TI)
|
|||||||
TT[idx_r2++] = R3;
|
TT[idx_r2++] = R3;
|
||||||
}
|
}
|
||||||
|
|
||||||
idx_r1 = 0;
|
idx_r2 = 0;
|
||||||
idx_r2 = SCRYPT_R * 4;
|
|
||||||
|
|
||||||
#ifdef _unroll
|
|
||||||
#pragma unroll
|
|
||||||
#endif
|
|
||||||
for (int i = 0; i < SCRYPT_R; i++)
|
for (int i = 0; i < SCRYPT_R; i++)
|
||||||
{
|
{
|
||||||
TI[idx_r2++] = TT[idx_r1++];
|
TI[idx_r1++] = TT[idx_r2++];
|
||||||
TI[idx_r2++] = TT[idx_r1++];
|
TI[idx_r1++] = TT[idx_r2++];
|
||||||
TI[idx_r2++] = TT[idx_r1++];
|
TI[idx_r1++] = TT[idx_r2++];
|
||||||
TI[idx_r2++] = TT[idx_r1++];
|
TI[idx_r1++] = TT[idx_r2++];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
DECLSPEC void scrypt_smix_init (uint4 *X, uint4 *T, GLOBAL_AS uint4 *V0, GLOBAL_AS uint4 *V1, GLOBAL_AS uint4 *V2, GLOBAL_AS uint4 *V3)
|
DECLSPEC void scrypt_smix_init (uint4 *X, GLOBAL_AS uint4 *V0, GLOBAL_AS uint4 *V1, GLOBAL_AS uint4 *V2, GLOBAL_AS uint4 *V3)
|
||||||
{
|
{
|
||||||
#define Coord(xd4,y,z) (((xd4) * ySIZE * zSIZE) + ((y) * zSIZE) + (z))
|
#define Coord(xd4,y,z) (((xd4) * ySIZE * zSIZE) + ((y) * zSIZE) + (z))
|
||||||
#define CO Coord(xd4,y,z)
|
#define CO Coord(xd4,y,z)
|
||||||
@ -256,55 +252,15 @@ DECLSPEC void scrypt_smix_init (uint4 *X, uint4 *T, GLOBAL_AS uint4 *V0, GLOBAL_
|
|||||||
case 3: V = V3; break;
|
case 3: V = V3; break;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (u32 i = 0; i < STATE_CNT4; i += 4)
|
|
||||||
{
|
|
||||||
#ifdef IS_CUDA
|
|
||||||
T[0] = make_uint4 (X[i + 0].x, X[i + 1].y, X[i + 2].z, X[i + 3].w);
|
|
||||||
T[1] = make_uint4 (X[i + 1].x, X[i + 2].y, X[i + 3].z, X[i + 0].w);
|
|
||||||
T[2] = make_uint4 (X[i + 2].x, X[i + 3].y, X[i + 0].z, X[i + 1].w);
|
|
||||||
T[3] = make_uint4 (X[i + 3].x, X[i + 0].y, X[i + 1].z, X[i + 2].w);
|
|
||||||
#else
|
|
||||||
T[0] = (uint4) (X[i + 0].x, X[i + 1].y, X[i + 2].z, X[i + 3].w);
|
|
||||||
T[1] = (uint4) (X[i + 1].x, X[i + 2].y, X[i + 3].z, X[i + 0].w);
|
|
||||||
T[2] = (uint4) (X[i + 2].x, X[i + 3].y, X[i + 0].z, X[i + 1].w);
|
|
||||||
T[3] = (uint4) (X[i + 3].x, X[i + 0].y, X[i + 1].z, X[i + 2].w);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
X[i + 0] = T[0];
|
|
||||||
X[i + 1] = T[1];
|
|
||||||
X[i + 2] = T[2];
|
|
||||||
X[i + 3] = T[3];
|
|
||||||
}
|
|
||||||
|
|
||||||
for (u32 y = 0; y < ySIZE; y++)
|
for (u32 y = 0; y < ySIZE; y++)
|
||||||
{
|
{
|
||||||
for (u32 z = 0; z < zSIZE; z++) V[CO] = X[z];
|
for (u32 z = 0; z < zSIZE; z++) V[CO] = X[z];
|
||||||
|
|
||||||
for (u32 i = 0; i < SCRYPT_TMTO; i++) salsa_r (X);
|
for (u32 i = 0; i < SCRYPT_TMTO; i++) salsa_r (X);
|
||||||
}
|
}
|
||||||
|
|
||||||
for (u32 i = 0; i < STATE_CNT4; i += 4)
|
|
||||||
{
|
|
||||||
#ifdef IS_CUDA
|
|
||||||
T[0] = make_uint4 (X[i + 0].x, X[i + 3].y, X[i + 2].z, X[i + 1].w);
|
|
||||||
T[1] = make_uint4 (X[i + 1].x, X[i + 0].y, X[i + 3].z, X[i + 2].w);
|
|
||||||
T[2] = make_uint4 (X[i + 2].x, X[i + 1].y, X[i + 0].z, X[i + 3].w);
|
|
||||||
T[3] = make_uint4 (X[i + 3].x, X[i + 2].y, X[i + 1].z, X[i + 0].w);
|
|
||||||
#else
|
|
||||||
T[0] = (uint4) (X[i + 0].x, X[i + 3].y, X[i + 2].z, X[i + 1].w);
|
|
||||||
T[1] = (uint4) (X[i + 1].x, X[i + 0].y, X[i + 3].z, X[i + 2].w);
|
|
||||||
T[2] = (uint4) (X[i + 2].x, X[i + 1].y, X[i + 0].z, X[i + 3].w);
|
|
||||||
T[3] = (uint4) (X[i + 3].x, X[i + 2].y, X[i + 1].z, X[i + 0].w);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
X[i + 0] = T[0];
|
|
||||||
X[i + 1] = T[1];
|
|
||||||
X[i + 2] = T[2];
|
|
||||||
X[i + 3] = T[3];
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
DECLSPEC void scrypt_smix_loop (uint4 *X, uint4 *T, GLOBAL_AS uint4 *V0, GLOBAL_AS uint4 *V1, GLOBAL_AS uint4 *V2, GLOBAL_AS uint4 *V3)
|
DECLSPEC void scrypt_smix_loop (uint4 *X, GLOBAL_AS uint4 *V0, GLOBAL_AS uint4 *V1, GLOBAL_AS uint4 *V2, GLOBAL_AS uint4 *V3)
|
||||||
{
|
{
|
||||||
#define Coord(xd4,y,z) (((xd4) * ySIZE * zSIZE) + ((y) * zSIZE) + (z))
|
#define Coord(xd4,y,z) (((xd4) * ySIZE * zSIZE) + ((y) * zSIZE) + (z))
|
||||||
#define CO Coord(xd4,y,z)
|
#define CO Coord(xd4,y,z)
|
||||||
@ -327,26 +283,6 @@ DECLSPEC void scrypt_smix_loop (uint4 *X, uint4 *T, GLOBAL_AS uint4 *V0, GLOBAL_
|
|||||||
case 3: V = V3; break;
|
case 3: V = V3; break;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (u32 i = 0; i < STATE_CNT4; i += 4)
|
|
||||||
{
|
|
||||||
#ifdef IS_CUDA
|
|
||||||
T[0] = make_uint4 (X[i + 0].x, X[i + 1].y, X[i + 2].z, X[i + 3].w);
|
|
||||||
T[1] = make_uint4 (X[i + 1].x, X[i + 2].y, X[i + 3].z, X[i + 0].w);
|
|
||||||
T[2] = make_uint4 (X[i + 2].x, X[i + 3].y, X[i + 0].z, X[i + 1].w);
|
|
||||||
T[3] = make_uint4 (X[i + 3].x, X[i + 0].y, X[i + 1].z, X[i + 2].w);
|
|
||||||
#else
|
|
||||||
T[0] = (uint4) (X[i + 0].x, X[i + 1].y, X[i + 2].z, X[i + 3].w);
|
|
||||||
T[1] = (uint4) (X[i + 1].x, X[i + 2].y, X[i + 3].z, X[i + 0].w);
|
|
||||||
T[2] = (uint4) (X[i + 2].x, X[i + 3].y, X[i + 0].z, X[i + 1].w);
|
|
||||||
T[3] = (uint4) (X[i + 3].x, X[i + 0].y, X[i + 1].z, X[i + 2].w);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
X[i + 0] = T[0];
|
|
||||||
X[i + 1] = T[1];
|
|
||||||
X[i + 2] = T[2];
|
|
||||||
X[i + 3] = T[3];
|
|
||||||
}
|
|
||||||
|
|
||||||
for (u32 N_pos = 0; N_pos < 1024; N_pos++)
|
for (u32 N_pos = 0; N_pos < 1024; N_pos++)
|
||||||
{
|
{
|
||||||
const u32 k = X[zSIZE - 4].x & (SCRYPT_N - 1);
|
const u32 k = X[zSIZE - 4].x & (SCRYPT_N - 1);
|
||||||
@ -355,6 +291,8 @@ DECLSPEC void scrypt_smix_loop (uint4 *X, uint4 *T, GLOBAL_AS uint4 *V0, GLOBAL_
|
|||||||
|
|
||||||
const u32 km = k - (y * SCRYPT_TMTO);
|
const u32 km = k - (y * SCRYPT_TMTO);
|
||||||
|
|
||||||
|
uint4 T[STATE_CNT4];
|
||||||
|
|
||||||
for (u32 z = 0; z < zSIZE; z++) T[z] = V[CO];
|
for (u32 z = 0; z < zSIZE; z++) T[z] = V[CO];
|
||||||
|
|
||||||
for (u32 i = 0; i < km; i++) salsa_r (T);
|
for (u32 i = 0; i < km; i++) salsa_r (T);
|
||||||
@ -363,26 +301,6 @@ DECLSPEC void scrypt_smix_loop (uint4 *X, uint4 *T, GLOBAL_AS uint4 *V0, GLOBAL_
|
|||||||
|
|
||||||
salsa_r (X);
|
salsa_r (X);
|
||||||
}
|
}
|
||||||
|
|
||||||
for (u32 i = 0; i < STATE_CNT4; i += 4)
|
|
||||||
{
|
|
||||||
#ifdef IS_CUDA
|
|
||||||
T[0] = make_uint4 (X[i + 0].x, X[i + 3].y, X[i + 2].z, X[i + 1].w);
|
|
||||||
T[1] = make_uint4 (X[i + 1].x, X[i + 0].y, X[i + 3].z, X[i + 2].w);
|
|
||||||
T[2] = make_uint4 (X[i + 2].x, X[i + 1].y, X[i + 0].z, X[i + 3].w);
|
|
||||||
T[3] = make_uint4 (X[i + 3].x, X[i + 2].y, X[i + 1].z, X[i + 0].w);
|
|
||||||
#else
|
|
||||||
T[0] = (uint4) (X[i + 0].x, X[i + 3].y, X[i + 2].z, X[i + 1].w);
|
|
||||||
T[1] = (uint4) (X[i + 1].x, X[i + 0].y, X[i + 3].z, X[i + 2].w);
|
|
||||||
T[2] = (uint4) (X[i + 2].x, X[i + 1].y, X[i + 0].z, X[i + 3].w);
|
|
||||||
T[3] = (uint4) (X[i + 3].x, X[i + 2].y, X[i + 1].z, X[i + 0].w);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
X[i + 0] = T[0];
|
|
||||||
X[i + 1] = T[1];
|
|
||||||
X[i + 2] = T[2];
|
|
||||||
X[i + 3] = T[3];
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
KERNEL_FQ void m22700_init (KERN_ATTR_TMPS (scrypt_tmp_t))
|
KERNEL_FQ void m22700_init (KERN_ATTR_TMPS (scrypt_tmp_t))
|
||||||
@ -493,6 +411,40 @@ KERNEL_FQ void m22700_init (KERN_ATTR_TMPS (scrypt_tmp_t))
|
|||||||
tmps[gid].P[k + 0] = tmp0;
|
tmps[gid].P[k + 0] = tmp0;
|
||||||
tmps[gid].P[k + 1] = tmp1;
|
tmps[gid].P[k + 1] = tmp1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for (u32 l = 0; l < SCRYPT_CNT4; l += 4)
|
||||||
|
{
|
||||||
|
uint4 T[4];
|
||||||
|
|
||||||
|
T[0] = tmps[gid].P[l + 0];
|
||||||
|
T[1] = tmps[gid].P[l + 1];
|
||||||
|
T[2] = tmps[gid].P[l + 2];
|
||||||
|
T[3] = tmps[gid].P[l + 3];
|
||||||
|
|
||||||
|
T[0] = hc_swap32_4 (T[0]);
|
||||||
|
T[1] = hc_swap32_4 (T[1]);
|
||||||
|
T[2] = hc_swap32_4 (T[2]);
|
||||||
|
T[3] = hc_swap32_4 (T[3]);
|
||||||
|
|
||||||
|
uint4 X[4];
|
||||||
|
|
||||||
|
#ifdef IS_CUDA
|
||||||
|
X[0] = make_uint4 (T[0].x, T[1].y, T[2].z, T[3].w);
|
||||||
|
X[1] = make_uint4 (T[1].x, T[2].y, T[3].z, T[0].w);
|
||||||
|
X[2] = make_uint4 (T[2].x, T[3].y, T[0].z, T[1].w);
|
||||||
|
X[3] = make_uint4 (T[3].x, T[0].y, T[1].z, T[2].w);
|
||||||
|
#else
|
||||||
|
X[0] = (uint4) (T[0].x, T[1].y, T[2].z, T[3].w);
|
||||||
|
X[1] = (uint4) (T[1].x, T[2].y, T[3].z, T[0].w);
|
||||||
|
X[2] = (uint4) (T[2].x, T[3].y, T[0].z, T[1].w);
|
||||||
|
X[3] = (uint4) (T[3].x, T[0].y, T[1].z, T[2].w);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
tmps[gid].P[l + 0] = X[0];
|
||||||
|
tmps[gid].P[l + 1] = X[1];
|
||||||
|
tmps[gid].P[l + 2] = X[2];
|
||||||
|
tmps[gid].P[l + 3] = X[3];
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
KERNEL_FQ void m22700_loop_prepare (KERN_ATTR_TMPS (scrypt_tmp_t))
|
KERNEL_FQ void m22700_loop_prepare (KERN_ATTR_TMPS (scrypt_tmp_t))
|
||||||
@ -502,6 +454,7 @@ KERNEL_FQ void m22700_loop_prepare (KERN_ATTR_TMPS (scrypt_tmp_t))
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
const u64 gid = get_global_id (0);
|
const u64 gid = get_global_id (0);
|
||||||
|
const u64 lid = get_local_id (0);
|
||||||
|
|
||||||
if (gid >= gid_max) return;
|
if (gid >= gid_max) return;
|
||||||
|
|
||||||
@ -513,26 +466,20 @@ KERNEL_FQ void m22700_loop_prepare (KERN_ATTR_TMPS (scrypt_tmp_t))
|
|||||||
GLOBAL_AS uint4 *d_scrypt3_buf = (GLOBAL_AS uint4 *) d_extra3_buf;
|
GLOBAL_AS uint4 *d_scrypt3_buf = (GLOBAL_AS uint4 *) d_extra3_buf;
|
||||||
|
|
||||||
uint4 X[STATE_CNT4];
|
uint4 X[STATE_CNT4];
|
||||||
uint4 T[STATE_CNT4];
|
|
||||||
|
|
||||||
const u32 P_offset = salt_repeat * STATE_CNT4;
|
const u32 P_offset = salt_repeat * STATE_CNT4;
|
||||||
|
|
||||||
#ifdef _unroll
|
for (int z = 0; z < STATE_CNT4; z++) X[z] = tmps[gid].P[P_offset + z];
|
||||||
#pragma unroll
|
|
||||||
#endif
|
|
||||||
for (int z = 0; z < STATE_CNT4; z++) X[z] = hc_swap32_4 (tmps[gid].P[P_offset + z]);
|
|
||||||
|
|
||||||
scrypt_smix_init (X, T, d_scrypt0_buf, d_scrypt1_buf, d_scrypt2_buf, d_scrypt3_buf);
|
scrypt_smix_init (X, d_scrypt0_buf, d_scrypt1_buf, d_scrypt2_buf, d_scrypt3_buf);
|
||||||
|
|
||||||
#ifdef _unroll
|
for (int z = 0; z < STATE_CNT4; z++) tmps[gid].P[P_offset + z] = X[z];
|
||||||
#pragma unroll
|
|
||||||
#endif
|
|
||||||
for (int z = 0; z < STATE_CNT4; z++) tmps[gid].P[P_offset + z] = hc_swap32_4 (X[z]);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
KERNEL_FQ void m22700_loop (KERN_ATTR_TMPS (scrypt_tmp_t))
|
KERNEL_FQ void m22700_loop (KERN_ATTR_TMPS (scrypt_tmp_t))
|
||||||
{
|
{
|
||||||
const u64 gid = get_global_id (0);
|
const u64 gid = get_global_id (0);
|
||||||
|
const u64 lid = get_local_id (0);
|
||||||
|
|
||||||
if (gid >= gid_max) return;
|
if (gid >= gid_max) return;
|
||||||
|
|
||||||
@ -542,21 +489,14 @@ KERNEL_FQ void m22700_loop (KERN_ATTR_TMPS (scrypt_tmp_t))
|
|||||||
GLOBAL_AS uint4 *d_scrypt3_buf = (GLOBAL_AS uint4 *) d_extra3_buf;
|
GLOBAL_AS uint4 *d_scrypt3_buf = (GLOBAL_AS uint4 *) d_extra3_buf;
|
||||||
|
|
||||||
uint4 X[STATE_CNT4];
|
uint4 X[STATE_CNT4];
|
||||||
uint4 T[STATE_CNT4];
|
|
||||||
|
|
||||||
const u32 P_offset = salt_repeat * STATE_CNT4;
|
const u32 P_offset = salt_repeat * STATE_CNT4;
|
||||||
|
|
||||||
#ifdef _unroll
|
for (int z = 0; z < STATE_CNT4; z++) X[z] = tmps[gid].P[P_offset + z];
|
||||||
#pragma unroll
|
|
||||||
#endif
|
|
||||||
for (int z = 0; z < STATE_CNT4; z++) X[z] = hc_swap32_4 (tmps[gid].P[P_offset + z]);
|
|
||||||
|
|
||||||
scrypt_smix_loop (X, T, d_scrypt0_buf, d_scrypt1_buf, d_scrypt2_buf, d_scrypt3_buf);
|
scrypt_smix_loop (X, d_scrypt0_buf, d_scrypt1_buf, d_scrypt2_buf, d_scrypt3_buf);
|
||||||
|
|
||||||
#ifdef _unroll
|
for (int z = 0; z < STATE_CNT4; z++) tmps[gid].P[P_offset + z] = X[z];
|
||||||
#pragma unroll
|
|
||||||
#endif
|
|
||||||
for (int z = 0; z < STATE_CNT4; z++) tmps[gid].P[P_offset + z] = hc_swap32_4 (X[z]);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
KERNEL_FQ void m22700_comp (KERN_ATTR_TMPS (scrypt_tmp_t))
|
KERNEL_FQ void m22700_comp (KERN_ATTR_TMPS (scrypt_tmp_t))
|
||||||
@ -665,35 +605,48 @@ KERNEL_FQ void m22700_comp (KERN_ATTR_TMPS (scrypt_tmp_t))
|
|||||||
|
|
||||||
for (u32 l = 0; l < SCRYPT_CNT4; l += 4)
|
for (u32 l = 0; l < SCRYPT_CNT4; l += 4)
|
||||||
{
|
{
|
||||||
uint4 tmp;
|
uint4 X[4];
|
||||||
|
|
||||||
tmp = tmps[gid].P[l + 0];
|
X[0] = tmps[gid].P[l + 0];
|
||||||
|
X[1] = tmps[gid].P[l + 1];
|
||||||
|
X[2] = tmps[gid].P[l + 2];
|
||||||
|
X[3] = tmps[gid].P[l + 3];
|
||||||
|
|
||||||
w0[0] = tmp.x;
|
uint4 T[4];
|
||||||
w0[1] = tmp.y;
|
|
||||||
w0[2] = tmp.z;
|
|
||||||
w0[3] = tmp.w;
|
|
||||||
|
|
||||||
tmp = tmps[gid].P[l + 1];
|
#ifdef IS_CUDA
|
||||||
|
T[0] = make_uint4 (X[0].x, X[3].y, X[2].z, X[1].w);
|
||||||
|
T[1] = make_uint4 (X[1].x, X[0].y, X[3].z, X[2].w);
|
||||||
|
T[2] = make_uint4 (X[2].x, X[1].y, X[0].z, X[3].w);
|
||||||
|
T[3] = make_uint4 (X[3].x, X[2].y, X[1].z, X[0].w);
|
||||||
|
#else
|
||||||
|
T[0] = (uint4) (X[0].x, X[3].y, X[2].z, X[1].w);
|
||||||
|
T[1] = (uint4) (X[1].x, X[0].y, X[3].z, X[2].w);
|
||||||
|
T[2] = (uint4) (X[2].x, X[1].y, X[0].z, X[3].w);
|
||||||
|
T[3] = (uint4) (X[3].x, X[2].y, X[1].z, X[0].w);
|
||||||
|
#endif
|
||||||
|
|
||||||
w1[0] = tmp.x;
|
T[0] = hc_swap32_4 (T[0]);
|
||||||
w1[1] = tmp.y;
|
T[1] = hc_swap32_4 (T[1]);
|
||||||
w1[2] = tmp.z;
|
T[2] = hc_swap32_4 (T[2]);
|
||||||
w1[3] = tmp.w;
|
T[3] = hc_swap32_4 (T[3]);
|
||||||
|
|
||||||
tmp = tmps[gid].P[l + 2];
|
w0[0] = T[0].x;
|
||||||
|
w0[1] = T[0].y;
|
||||||
w2[0] = tmp.x;
|
w0[2] = T[0].z;
|
||||||
w2[1] = tmp.y;
|
w0[3] = T[0].w;
|
||||||
w2[2] = tmp.z;
|
w1[0] = T[1].x;
|
||||||
w2[3] = tmp.w;
|
w1[1] = T[1].y;
|
||||||
|
w1[2] = T[1].z;
|
||||||
tmp = tmps[gid].P[l + 3];
|
w1[3] = T[1].w;
|
||||||
|
w2[0] = T[2].x;
|
||||||
w3[0] = tmp.x;
|
w2[1] = T[2].y;
|
||||||
w3[1] = tmp.y;
|
w2[2] = T[2].z;
|
||||||
w3[2] = tmp.z;
|
w2[3] = T[2].w;
|
||||||
w3[3] = tmp.w;
|
w3[0] = T[3].x;
|
||||||
|
w3[1] = T[3].y;
|
||||||
|
w3[2] = T[3].z;
|
||||||
|
w3[3] = T[3].w;
|
||||||
|
|
||||||
sha256_hmac_update_64 (&ctx, w0, w1, w2, w3, 64);
|
sha256_hmac_update_64 (&ctx, w0, w1, w2, w3, 64);
|
||||||
}
|
}
|
||||||
|
@ -370,12 +370,13 @@ GeForce_GTX_TITAN 3 9900 2 A
|
|||||||
##
|
##
|
||||||
|
|
||||||
DEVICE_TYPE_CPU * 8900 1 N A
|
DEVICE_TYPE_CPU * 8900 1 N A
|
||||||
DEVICE_TYPE_GPU * 8900 1 N A
|
|
||||||
DEVICE_TYPE_CPU * 9300 1 N A
|
DEVICE_TYPE_CPU * 9300 1 N A
|
||||||
DEVICE_TYPE_GPU * 9300 1 N A
|
|
||||||
DEVICE_TYPE_CPU * 15700 1 N A
|
DEVICE_TYPE_CPU * 15700 1 N A
|
||||||
DEVICE_TYPE_GPU * 15700 1 1 A
|
|
||||||
DEVICE_TYPE_CPU * 22700 1 N A
|
DEVICE_TYPE_CPU * 22700 1 N A
|
||||||
|
|
||||||
|
DEVICE_TYPE_GPU * 8900 1 N A
|
||||||
|
DEVICE_TYPE_GPU * 9300 1 N A
|
||||||
|
DEVICE_TYPE_GPU * 15700 1 1 A
|
||||||
DEVICE_TYPE_GPU * 22700 1 N A
|
DEVICE_TYPE_GPU * 22700 1 N A
|
||||||
|
|
||||||
## Here's an example of how to manually tune SCRYPT algorithm kernels for your hardware.
|
## Here's an example of how to manually tune SCRYPT algorithm kernels for your hardware.
|
||||||
@ -468,10 +469,15 @@ DEVICE_TYPE_GPU * 22700 1 N
|
|||||||
|
|
||||||
GeForce_GTX_980 * 8900 1 28 A
|
GeForce_GTX_980 * 8900 1 28 A
|
||||||
GeForce_GTX_980 * 9300 1 128 A
|
GeForce_GTX_980 * 9300 1 128 A
|
||||||
GeForce_GTX_980 * 15700 1 1 A
|
GeForce_GTX_980 * 15700 1 2 A
|
||||||
GeForce_GTX_980 * 22700 1 28 A
|
GeForce_GTX_980 * 22700 1 28 A
|
||||||
|
|
||||||
GeForce_RTX_2080_Ti * 8900 1 N A
|
GeForce_RTX_2080_Ti * 8900 1 38 A
|
||||||
GeForce_RTX_2080_Ti * 9300 1 544 A
|
GeForce_RTX_2080_Ti * 9300 1 544 A
|
||||||
GeForce_RTX_2080_Ti * 15700 1 4 A
|
GeForce_RTX_2080_Ti * 15700 1 8 A
|
||||||
GeForce_RTX_2080_Ti * 22700 1 N A
|
GeForce_RTX_2080_Ti * 22700 1 38 A
|
||||||
|
|
||||||
|
gfx900 * 8900 1 28 A
|
||||||
|
gfx900 * 9300 1 384 A
|
||||||
|
gfx900 * 15700 1 6 A
|
||||||
|
gfx900 * 22700 1 28 A
|
||||||
|
@ -8381,6 +8381,8 @@ int backend_session_begin (hashcat_ctx_t *hashcat_ctx)
|
|||||||
device_param->size_st_salts = size_st_salts;
|
device_param->size_st_salts = size_st_salts;
|
||||||
device_param->size_st_esalts = size_st_esalts;
|
device_param->size_st_esalts = size_st_esalts;
|
||||||
|
|
||||||
|
// extra buffer
|
||||||
|
|
||||||
u64 size_extra_buffer = 4;
|
u64 size_extra_buffer = 4;
|
||||||
|
|
||||||
if (module_ctx->module_extra_buffer_size != MODULE_DEFAULT)
|
if (module_ctx->module_extra_buffer_size != MODULE_DEFAULT)
|
||||||
|
@ -250,24 +250,9 @@ char *module_jit_build_options (MAYBE_UNUSED const hashconfig_t *hashconfig, MAY
|
|||||||
|
|
||||||
const u64 tmp_size = 128ULL * scrypt_r * scrypt_p;
|
const u64 tmp_size = 128ULL * scrypt_r * scrypt_p;
|
||||||
|
|
||||||
char *unroll = "";
|
|
||||||
|
|
||||||
// NVIDIA GPU
|
|
||||||
if (device_param->opencl_device_vendor_id == VENDOR_ID_NV)
|
|
||||||
{
|
|
||||||
unroll = "-D _unroll";
|
|
||||||
}
|
|
||||||
|
|
||||||
// ROCM
|
|
||||||
if ((device_param->opencl_device_vendor_id == VENDOR_ID_AMD) && (device_param->has_vperm == true))
|
|
||||||
{
|
|
||||||
unroll = "-D _unroll";
|
|
||||||
}
|
|
||||||
|
|
||||||
char *jit_build_options = NULL;
|
char *jit_build_options = NULL;
|
||||||
|
|
||||||
hc_asprintf (&jit_build_options, "%s -DSCRYPT_N=%u -DSCRYPT_R=%u -DSCRYPT_P=%u -DSCRYPT_TMTO=%" PRIu64 " -DSCRYPT_TMP_ELEM=%" PRIu64,
|
hc_asprintf (&jit_build_options, "-DSCRYPT_N=%u -DSCRYPT_R=%u -DSCRYPT_P=%u -DSCRYPT_TMTO=%" PRIu64 " -DSCRYPT_TMP_ELEM=%" PRIu64,
|
||||||
unroll,
|
|
||||||
hashes->salts_buf[0].scrypt_N,
|
hashes->salts_buf[0].scrypt_N,
|
||||||
hashes->salts_buf[0].scrypt_r,
|
hashes->salts_buf[0].scrypt_r,
|
||||||
hashes->salts_buf[0].scrypt_p,
|
hashes->salts_buf[0].scrypt_p,
|
||||||
|
@ -250,24 +250,9 @@ char *module_jit_build_options (MAYBE_UNUSED const hashconfig_t *hashconfig, MAY
|
|||||||
|
|
||||||
const u64 tmp_size = 128ULL * scrypt_r * scrypt_p;
|
const u64 tmp_size = 128ULL * scrypt_r * scrypt_p;
|
||||||
|
|
||||||
char *unroll = "";
|
|
||||||
|
|
||||||
// NVIDIA GPU
|
|
||||||
if (device_param->opencl_device_vendor_id == VENDOR_ID_NV)
|
|
||||||
{
|
|
||||||
unroll = "-D _unroll";
|
|
||||||
}
|
|
||||||
|
|
||||||
// ROCM
|
|
||||||
if ((device_param->opencl_device_vendor_id == VENDOR_ID_AMD) && (device_param->has_vperm == true))
|
|
||||||
{
|
|
||||||
unroll = "-D _unroll";
|
|
||||||
}
|
|
||||||
|
|
||||||
char *jit_build_options = NULL;
|
char *jit_build_options = NULL;
|
||||||
|
|
||||||
hc_asprintf (&jit_build_options, "%s -DSCRYPT_N=%u -DSCRYPT_R=%u -DSCRYPT_P=%u -DSCRYPT_TMTO=%" PRIu64 " -DSCRYPT_TMP_ELEM=%" PRIu64,
|
hc_asprintf (&jit_build_options, "-DSCRYPT_N=%u -DSCRYPT_R=%u -DSCRYPT_P=%u -DSCRYPT_TMTO=%" PRIu64 " -DSCRYPT_TMP_ELEM=%" PRIu64,
|
||||||
unroll,
|
|
||||||
hashes->salts_buf[0].scrypt_N,
|
hashes->salts_buf[0].scrypt_N,
|
||||||
hashes->salts_buf[0].scrypt_r,
|
hashes->salts_buf[0].scrypt_r,
|
||||||
hashes->salts_buf[0].scrypt_p,
|
hashes->salts_buf[0].scrypt_p,
|
||||||
|
@ -23,7 +23,6 @@ static const u64 KERN_TYPE = 15700;
|
|||||||
static const u32 OPTI_TYPE = OPTI_TYPE_ZERO_BYTE;
|
static const u32 OPTI_TYPE = OPTI_TYPE_ZERO_BYTE;
|
||||||
static const u64 OPTS_TYPE = OPTS_TYPE_PT_GENERATE_LE
|
static const u64 OPTS_TYPE = OPTS_TYPE_PT_GENERATE_LE
|
||||||
| OPTS_TYPE_MP_MULTI_DISABLE
|
| OPTS_TYPE_MP_MULTI_DISABLE
|
||||||
| OPTS_TYPE_NATIVE_THREADS
|
|
||||||
| OPTS_TYPE_LOOP_PREPARE
|
| OPTS_TYPE_LOOP_PREPARE
|
||||||
| OPTS_TYPE_SELF_TEST_DISABLE
|
| OPTS_TYPE_SELF_TEST_DISABLE
|
||||||
| OPTS_TYPE_ST_HEX;
|
| OPTS_TYPE_ST_HEX;
|
||||||
@ -73,6 +72,13 @@ u32 module_kernel_loops_max (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_
|
|||||||
return kernel_loops_max;
|
return kernel_loops_max;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
u32 module_kernel_threads_max (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSED const user_options_t *user_options, MAYBE_UNUSED const user_options_extra_t *user_options_extra)
|
||||||
|
{
|
||||||
|
const u32 kernel_threads_max = 4;
|
||||||
|
|
||||||
|
return kernel_threads_max;
|
||||||
|
}
|
||||||
|
|
||||||
u64 module_esalt_size (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSED const user_options_t *user_options, MAYBE_UNUSED const user_options_extra_t *user_options_extra)
|
u64 module_esalt_size (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSED const user_options_t *user_options, MAYBE_UNUSED const user_options_extra_t *user_options_extra)
|
||||||
{
|
{
|
||||||
const u64 esalt_size = (const u64) sizeof (ethereum_scrypt_t);
|
const u64 esalt_size = (const u64) sizeof (ethereum_scrypt_t);
|
||||||
@ -265,24 +271,9 @@ char *module_jit_build_options (MAYBE_UNUSED const hashconfig_t *hashconfig, MAY
|
|||||||
|
|
||||||
const u64 tmp_size = 128ULL * scrypt_r * scrypt_p;
|
const u64 tmp_size = 128ULL * scrypt_r * scrypt_p;
|
||||||
|
|
||||||
char *unroll = "";
|
|
||||||
|
|
||||||
// NVIDIA GPU
|
|
||||||
if (device_param->opencl_device_vendor_id == VENDOR_ID_NV)
|
|
||||||
{
|
|
||||||
unroll = "-D _unroll";
|
|
||||||
}
|
|
||||||
|
|
||||||
// ROCM
|
|
||||||
if ((device_param->opencl_device_vendor_id == VENDOR_ID_AMD) && (device_param->has_vperm == true))
|
|
||||||
{
|
|
||||||
unroll = "-D _unroll";
|
|
||||||
}
|
|
||||||
|
|
||||||
char *jit_build_options = NULL;
|
char *jit_build_options = NULL;
|
||||||
|
|
||||||
hc_asprintf (&jit_build_options, "%s -DSCRYPT_N=%u -DSCRYPT_R=%u -DSCRYPT_P=%u -DSCRYPT_TMTO=%" PRIu64 " -DSCRYPT_TMP_ELEM=%" PRIu64,
|
hc_asprintf (&jit_build_options, "-DSCRYPT_N=%u -DSCRYPT_R=%u -DSCRYPT_P=%u -DSCRYPT_TMTO=%" PRIu64 " -DSCRYPT_TMP_ELEM=%" PRIu64,
|
||||||
unroll,
|
|
||||||
hashes->salts_buf[0].scrypt_N,
|
hashes->salts_buf[0].scrypt_N,
|
||||||
hashes->salts_buf[0].scrypt_r,
|
hashes->salts_buf[0].scrypt_r,
|
||||||
hashes->salts_buf[0].scrypt_p,
|
hashes->salts_buf[0].scrypt_p,
|
||||||
@ -507,7 +498,7 @@ void module_init (module_ctx_t *module_ctx)
|
|||||||
module_ctx->module_kernel_accel_min = MODULE_DEFAULT;
|
module_ctx->module_kernel_accel_min = MODULE_DEFAULT;
|
||||||
module_ctx->module_kernel_loops_max = module_kernel_loops_max;
|
module_ctx->module_kernel_loops_max = module_kernel_loops_max;
|
||||||
module_ctx->module_kernel_loops_min = module_kernel_loops_min;
|
module_ctx->module_kernel_loops_min = module_kernel_loops_min;
|
||||||
module_ctx->module_kernel_threads_max = MODULE_DEFAULT;
|
module_ctx->module_kernel_threads_max = module_kernel_threads_max;
|
||||||
module_ctx->module_kernel_threads_min = MODULE_DEFAULT;
|
module_ctx->module_kernel_threads_min = MODULE_DEFAULT;
|
||||||
module_ctx->module_kern_type = module_kern_type;
|
module_ctx->module_kern_type = module_kern_type;
|
||||||
module_ctx->module_kern_type_dynamic = MODULE_DEFAULT;
|
module_ctx->module_kern_type_dynamic = MODULE_DEFAULT;
|
||||||
|
@ -251,24 +251,9 @@ char *module_jit_build_options (MAYBE_UNUSED const hashconfig_t *hashconfig, MAY
|
|||||||
|
|
||||||
const u64 tmp_size = 128ULL * scrypt_r * scrypt_p;
|
const u64 tmp_size = 128ULL * scrypt_r * scrypt_p;
|
||||||
|
|
||||||
char *unroll = "";
|
|
||||||
|
|
||||||
// NVIDIA GPU
|
|
||||||
if (device_param->opencl_device_vendor_id == VENDOR_ID_NV)
|
|
||||||
{
|
|
||||||
unroll = "-D _unroll";
|
|
||||||
}
|
|
||||||
|
|
||||||
// ROCM
|
|
||||||
if ((device_param->opencl_device_vendor_id == VENDOR_ID_AMD) && (device_param->has_vperm == true))
|
|
||||||
{
|
|
||||||
unroll = "-D _unroll";
|
|
||||||
}
|
|
||||||
|
|
||||||
char *jit_build_options = NULL;
|
char *jit_build_options = NULL;
|
||||||
|
|
||||||
hc_asprintf (&jit_build_options, "%s -DSCRYPT_N=%u -DSCRYPT_R=%u -DSCRYPT_P=%u -DSCRYPT_TMTO=%" PRIu64 " -DSCRYPT_TMP_ELEM=%" PRIu64,
|
hc_asprintf (&jit_build_options, "-DSCRYPT_N=%u -DSCRYPT_R=%u -DSCRYPT_P=%u -DSCRYPT_TMTO=%" PRIu64 " -DSCRYPT_TMP_ELEM=%" PRIu64,
|
||||||
unroll,
|
|
||||||
hashes->salts_buf[0].scrypt_N,
|
hashes->salts_buf[0].scrypt_N,
|
||||||
hashes->salts_buf[0].scrypt_r,
|
hashes->salts_buf[0].scrypt_r,
|
||||||
hashes->salts_buf[0].scrypt_p,
|
hashes->salts_buf[0].scrypt_p,
|
||||||
|
Loading…
Reference in New Issue
Block a user