diff --git a/OpenCL/m27700-pure.cl b/OpenCL/m27700-pure.cl new file mode 100644 index 000000000..4bcca4946 --- /dev/null +++ b/OpenCL/m27700-pure.cl @@ -0,0 +1,642 @@ +/** + * Author......: See docs/credits.txt + * License.....: MIT + */ + +#ifdef KERNEL_STATIC +#include "inc_vendor.h" +#include "inc_types.h" +#include "inc_platform.cl" +#include "inc_common.cl" +#include "inc_hash_sha256.cl" +#include "inc_cipher_aes.cl" +#endif + +typedef struct +{ + #ifndef SCRYPT_TMP_ELEM + #define SCRYPT_TMP_ELEM 1 + #endif + + uint4 P[SCRYPT_TMP_ELEM]; + +} scrypt_tmp_t; + +#if defined IS_CUDA || defined IS_HIP + +inline __device__ uint4 operator & (const uint4 a, const u32 b) { return make_uint4 ((a.x & b ), (a.y & b ), (a.z & b ), (a.w & b )); } +inline __device__ uint4 operator << (const uint4 a, const u32 b) { return make_uint4 ((a.x << b ), (a.y << b ), (a.z << b ), (a.w << b )); } +inline __device__ uint4 operator >> (const uint4 a, const u32 b) { return make_uint4 ((a.x >> b ), (a.y >> b ), (a.z >> b ), (a.w >> b )); } +inline __device__ uint4 operator + (const uint4 a, const uint4 b) { return make_uint4 ((a.x + b.x), (a.y + b.y), (a.z + b.z), (a.w + b.w)); } +inline __device__ uint4 operator ^ (const uint4 a, const uint4 b) { return make_uint4 ((a.x ^ b.x), (a.y ^ b.y), (a.z ^ b.z), (a.w ^ b.w)); } +inline __device__ uint4 operator | (const uint4 a, const uint4 b) { return make_uint4 ((a.x | b.x), (a.y | b.y), (a.z | b.z), (a.w | b.w)); } +inline __device__ void operator ^= ( uint4 &a, const uint4 b) { a.x ^= b.x; a.y ^= b.y; a.z ^= b.z; a.w ^= b.w; } + +inline __device__ uint4 rotate (const uint4 a, const int n) +{ + return ((a << n) | ((a >> (32 - n)))); +} + +#endif + +DECLSPEC uint4 hc_swap32_4 (uint4 v) +{ + return (rotate ((v & 0x00FF00FF), 24u) | rotate ((v & 0xFF00FF00), 8u)); +} + +#define GET_SCRYPT_CNT(r,p) (2 * (r) * 16 * (p)) +#define GET_SMIX_CNT(r,N) (2 * (r) * 16 * (N)) +#define GET_STATE_CNT(r) (2 * (r) * 16) + +#define SCRYPT_CNT GET_SCRYPT_CNT (SCRYPT_R, SCRYPT_P) +#define SCRYPT_CNT4 (SCRYPT_CNT / 4) +#define STATE_CNT GET_STATE_CNT (SCRYPT_R) +#define STATE_CNT4 (STATE_CNT / 4) + +#define ADD_ROTATE_XOR(r,i1,i2,s) (r) ^= rotate ((i1) + (i2), (s)); + +#if defined IS_CUDA || defined IS_HIP + +#define SALSA20_2R() \ +{ \ + ADD_ROTATE_XOR (X1, X0, X3, 7); \ + ADD_ROTATE_XOR (X2, X1, X0, 9); \ + ADD_ROTATE_XOR (X3, X2, X1, 13); \ + ADD_ROTATE_XOR (X0, X3, X2, 18); \ + \ + X1 = make_uint4 (X1.w, X1.x, X1.y, X1.z); \ + X2 = make_uint4 (X2.z, X2.w, X2.x, X2.y); \ + X3 = make_uint4 (X3.y, X3.z, X3.w, X3.x); \ + \ + ADD_ROTATE_XOR (X3, X0, X1, 7); \ + ADD_ROTATE_XOR (X2, X3, X0, 9); \ + ADD_ROTATE_XOR (X1, X2, X3, 13); \ + ADD_ROTATE_XOR (X0, X1, X2, 18); \ + \ + X1 = make_uint4 (X1.y, X1.z, X1.w, X1.x); \ + X2 = make_uint4 (X2.z, X2.w, X2.x, X2.y); \ + X3 = make_uint4 (X3.w, X3.x, X3.y, X3.z); \ +} +#else +#define SALSA20_2R() \ +{ \ + ADD_ROTATE_XOR (X1, X0, X3, 7); \ + ADD_ROTATE_XOR (X2, X1, X0, 9); \ + ADD_ROTATE_XOR (X3, X2, X1, 13); \ + ADD_ROTATE_XOR (X0, X3, X2, 18); \ + \ + X1 = X1.s3012; \ + X2 = X2.s2301; \ + X3 = X3.s1230; \ + \ + ADD_ROTATE_XOR (X3, X0, X1, 7); \ + ADD_ROTATE_XOR (X2, X3, X0, 9); \ + ADD_ROTATE_XOR (X1, X2, X3, 13); \ + ADD_ROTATE_XOR (X0, X1, X2, 18); \ + \ + X1 = X1.s1230; \ + X2 = X2.s2301; \ + X3 = X3.s3012; \ +} +#endif + +#define Coord(xd4,y,z) (((xd4) * ySIZE * zSIZE) + ((y) * zSIZE) + (z)) +#define CO Coord(xd4,y,z) + +DECLSPEC void salsa_r (uint4 *TI) +{ + uint4 R0 = TI[STATE_CNT4 - 4]; + uint4 R1 = TI[STATE_CNT4 - 3]; + uint4 R2 = TI[STATE_CNT4 - 2]; + uint4 R3 = TI[STATE_CNT4 - 1]; + + for (int i = 0; i < STATE_CNT4; i += 4) + { + uint4 Y0 = TI[i + 0]; + uint4 Y1 = TI[i + 1]; + uint4 Y2 = TI[i + 2]; + uint4 Y3 = TI[i + 3]; + + R0 = R0 ^ Y0; + R1 = R1 ^ Y1; + R2 = R2 ^ Y2; + R3 = R3 ^ Y3; + + uint4 X0 = R0; + uint4 X1 = R1; + uint4 X2 = R2; + uint4 X3 = R3; + + SALSA20_2R (); + SALSA20_2R (); + SALSA20_2R (); + SALSA20_2R (); + + R0 = R0 + X0; + R1 = R1 + X1; + R2 = R2 + X2; + R3 = R3 + X3; + + TI[i + 0] = R0; + TI[i + 1] = R1; + TI[i + 2] = R2; + TI[i + 3] = R3; + } + + #if SCRYPT_R > 1 + + uint4 TT[STATE_CNT4 / 2]; + + for (int dst_off = 0, src_off = 4; src_off < STATE_CNT4; dst_off += 4, src_off += 8) + { + TT[dst_off + 0] = TI[src_off + 0]; + TT[dst_off + 1] = TI[src_off + 1]; + TT[dst_off + 2] = TI[src_off + 2]; + TT[dst_off + 3] = TI[src_off + 3]; + } + + for (int dst_off = 4, src_off = 8; src_off < STATE_CNT4; dst_off += 4, src_off += 8) + { + TI[dst_off + 0] = TI[src_off + 0]; + TI[dst_off + 1] = TI[src_off + 1]; + TI[dst_off + 2] = TI[src_off + 2]; + TI[dst_off + 3] = TI[src_off + 3]; + } + + for (int dst_off = STATE_CNT4 / 2, src_off = 0; dst_off < STATE_CNT4; dst_off += 4, src_off += 4) + { + TI[dst_off + 0] = TT[src_off + 0]; + TI[dst_off + 1] = TT[src_off + 1]; + TI[dst_off + 2] = TT[src_off + 2]; + TI[dst_off + 3] = TT[src_off + 3]; + } + + #endif +} + +DECLSPEC void scrypt_smix_init (uint4 *X, GLOBAL_AS uint4 *V0, GLOBAL_AS uint4 *V1, GLOBAL_AS uint4 *V2, GLOBAL_AS uint4 *V3) +{ + const u32 ySIZE = SCRYPT_N / SCRYPT_TMTO; + const u32 zSIZE = STATE_CNT4; + + const u32 x = get_global_id (0); + + const u32 xd4 = x / 4; + const u32 xm4 = x & 3; + + GLOBAL_AS uint4 *V; + + switch (xm4) + { + case 0: V = V0; break; + case 1: V = V1; break; + case 2: V = V2; break; + case 3: V = V3; break; + } + + for (u32 y = 0; y < ySIZE; y++) + { + for (u32 z = 0; z < zSIZE; z++) V[CO] = X[z]; + + for (u32 i = 0; i < SCRYPT_TMTO; i++) salsa_r (X); + } +} + +DECLSPEC void scrypt_smix_loop (uint4 *X, GLOBAL_AS uint4 *V0, GLOBAL_AS uint4 *V1, GLOBAL_AS uint4 *V2, GLOBAL_AS uint4 *V3) +{ + const u32 ySIZE = SCRYPT_N / SCRYPT_TMTO; + const u32 zSIZE = STATE_CNT4; + + const u32 x = get_global_id (0); + + const u32 xd4 = x / 4; + const u32 xm4 = x & 3; + + GLOBAL_AS uint4 *V; + + switch (xm4) + { + case 0: V = V0; break; + case 1: V = V1; break; + case 2: V = V2; break; + case 3: V = V3; break; + } + + // note: fixed 1024 iterations = forced -u 1024 + + for (u32 N_pos = 0; N_pos < 1024; N_pos++) + { + const u32 k = X[zSIZE - 4].x & (SCRYPT_N - 1); + + const u32 y = k / SCRYPT_TMTO; + + const u32 km = k - (y * SCRYPT_TMTO); + + uint4 T[STATE_CNT4]; + + for (u32 z = 0; z < zSIZE; z++) T[z] = V[CO]; + + for (u32 i = 0; i < km; i++) salsa_r (T); + + for (u32 z = 0; z < zSIZE; z++) X[z] ^= T[z]; + + salsa_r (X); + } +} + +KERNEL_FQ void m27700_init (KERN_ATTR_TMPS (scrypt_tmp_t)) +{ + /** + * base + */ + + const u64 gid = get_global_id (0); + + if (gid >= gid_max) return; + + u32 w[128] = { 0 }; + + hc_enc_t hc_enc; + + hc_enc_init (&hc_enc); + + const u32 w_len = hc_enc_next_global (&hc_enc, pws[gid].i, pws[gid].pw_len, 256, w, sizeof (w)); + + // utf16le to utf16be + + for (int i = 0, j = 0; i < w_len; i += 4, j += 1) + { + w[j] = ((w[j] >> 8) & 0x00ff00ff) + | ((w[j] << 8) & 0xff00ff00); + } + + sha256_hmac_ctx_t sha256_hmac_ctx; + + sha256_hmac_init_swap (&sha256_hmac_ctx, w, w_len); + + u32 s0[4] = { 0 }; + u32 s1[4] = { 0 }; + u32 s2[4] = { 0 }; + u32 s3[4] = { 0 }; + + s0[0] = salt_bufs[SALT_POS].salt_buf[0]; + s0[1] = salt_bufs[SALT_POS].salt_buf[1]; + + sha256_hmac_update_64 (&sha256_hmac_ctx, s0, s1, s2, s3, 8); + + for (u32 i = 0, j = 1, k = 0; i < SCRYPT_CNT; i += 8, j += 1, k += 2) + { + sha256_hmac_ctx_t sha256_hmac_ctx2 = sha256_hmac_ctx; + + u32 w0[4]; + u32 w1[4]; + u32 w2[4]; + u32 w3[4]; + + w0[0] = j; + w0[1] = 0; + w0[2] = 0; + w0[3] = 0; + w1[0] = 0; + w1[1] = 0; + w1[2] = 0; + w1[3] = 0; + w2[0] = 0; + w2[1] = 0; + w2[2] = 0; + w2[3] = 0; + w3[0] = 0; + w3[1] = 0; + w3[2] = 0; + w3[3] = 0; + + sha256_hmac_update_64 (&sha256_hmac_ctx2, w0, w1, w2, w3, 4); + + sha256_hmac_final (&sha256_hmac_ctx2); + + u32 digest[8]; + + digest[0] = sha256_hmac_ctx2.opad.h[0]; + digest[1] = sha256_hmac_ctx2.opad.h[1]; + digest[2] = sha256_hmac_ctx2.opad.h[2]; + digest[3] = sha256_hmac_ctx2.opad.h[3]; + digest[4] = sha256_hmac_ctx2.opad.h[4]; + digest[5] = sha256_hmac_ctx2.opad.h[5]; + digest[6] = sha256_hmac_ctx2.opad.h[6]; + digest[7] = sha256_hmac_ctx2.opad.h[7]; + + #if defined IS_CUDA + const uint4 tmp0 = make_uint4 (digest[0], digest[1], digest[2], digest[3]); + const uint4 tmp1 = make_uint4 (digest[4], digest[5], digest[6], digest[7]); + #else + const uint4 tmp0 = (uint4) (digest[0], digest[1], digest[2], digest[3]); + const uint4 tmp1 = (uint4) (digest[4], digest[5], digest[6], digest[7]); + #endif + + tmps[gid].P[k + 0] = tmp0; + tmps[gid].P[k + 1] = tmp1; + } + + for (u32 l = 0; l < SCRYPT_CNT4; l += 4) + { + uint4 T[4]; + + T[0] = tmps[gid].P[l + 0]; + T[1] = tmps[gid].P[l + 1]; + T[2] = tmps[gid].P[l + 2]; + T[3] = tmps[gid].P[l + 3]; + + T[0] = hc_swap32_4 (T[0]); + T[1] = hc_swap32_4 (T[1]); + T[2] = hc_swap32_4 (T[2]); + T[3] = hc_swap32_4 (T[3]); + + uint4 X[4]; + + #if defined IS_CUDA + X[0] = make_uint4 (T[0].x, T[1].y, T[2].z, T[3].w); + X[1] = make_uint4 (T[1].x, T[2].y, T[3].z, T[0].w); + X[2] = make_uint4 (T[2].x, T[3].y, T[0].z, T[1].w); + X[3] = make_uint4 (T[3].x, T[0].y, T[1].z, T[2].w); + #else + X[0] = (uint4) (T[0].x, T[1].y, T[2].z, T[3].w); + X[1] = (uint4) (T[1].x, T[2].y, T[3].z, T[0].w); + X[2] = (uint4) (T[2].x, T[3].y, T[0].z, T[1].w); + X[3] = (uint4) (T[3].x, T[0].y, T[1].z, T[2].w); + #endif + + tmps[gid].P[l + 0] = X[0]; + tmps[gid].P[l + 1] = X[1]; + tmps[gid].P[l + 2] = X[2]; + tmps[gid].P[l + 3] = X[3]; + } +} + +KERNEL_FQ void m27700_loop_prepare (KERN_ATTR_TMPS (scrypt_tmp_t)) +{ + /** + * base + */ + + const u64 gid = get_global_id (0); + const u64 lid = get_local_id (0); + + if (gid >= gid_max) return; + + // SCRYPT part, init V + + GLOBAL_AS uint4 *d_scrypt0_buf = (GLOBAL_AS uint4 *) d_extra0_buf; + GLOBAL_AS uint4 *d_scrypt1_buf = (GLOBAL_AS uint4 *) d_extra1_buf; + GLOBAL_AS uint4 *d_scrypt2_buf = (GLOBAL_AS uint4 *) d_extra2_buf; + GLOBAL_AS uint4 *d_scrypt3_buf = (GLOBAL_AS uint4 *) d_extra3_buf; + + uint4 X[STATE_CNT4]; + + const u32 P_offset = salt_repeat * STATE_CNT4; + + GLOBAL_AS uint4 *P = tmps[gid].P + P_offset; + + for (int z = 0; z < STATE_CNT4; z++) X[z] = P[z]; + + scrypt_smix_init (X, d_scrypt0_buf, d_scrypt1_buf, d_scrypt2_buf, d_scrypt3_buf); + + for (int z = 0; z < STATE_CNT4; z++) P[z] = X[z]; +} + +KERNEL_FQ void m27700_loop (KERN_ATTR_TMPS (scrypt_tmp_t)) +{ + const u64 gid = get_global_id (0); + const u64 lid = get_local_id (0); + + if (gid >= gid_max) return; + + GLOBAL_AS uint4 *d_scrypt0_buf = (GLOBAL_AS uint4 *) d_extra0_buf; + GLOBAL_AS uint4 *d_scrypt1_buf = (GLOBAL_AS uint4 *) d_extra1_buf; + GLOBAL_AS uint4 *d_scrypt2_buf = (GLOBAL_AS uint4 *) d_extra2_buf; + GLOBAL_AS uint4 *d_scrypt3_buf = (GLOBAL_AS uint4 *) d_extra3_buf; + + uint4 X[STATE_CNT4]; + + const u32 P_offset = salt_repeat * STATE_CNT4; + + GLOBAL_AS uint4 *P = tmps[gid].P + P_offset; + + for (int z = 0; z < STATE_CNT4; z++) X[z] = P[z]; + + scrypt_smix_loop (X, d_scrypt0_buf, d_scrypt1_buf, d_scrypt2_buf, d_scrypt3_buf); + + for (int z = 0; z < STATE_CNT4; z++) P[z] = X[z]; +} + +KERNEL_FQ void m27700_comp (KERN_ATTR_TMPS (scrypt_tmp_t)) +{ + const u64 gid = get_global_id (0); + const u64 lid = get_local_id (0); + const u64 lsz = get_local_size (0); + + /** + * aes shared + */ + + #ifdef REAL_SHM + + LOCAL_VK u32 s_td0[256]; + LOCAL_VK u32 s_td1[256]; + LOCAL_VK u32 s_td2[256]; + LOCAL_VK u32 s_td3[256]; + LOCAL_VK u32 s_td4[256]; + + LOCAL_VK u32 s_te0[256]; + LOCAL_VK u32 s_te1[256]; + LOCAL_VK u32 s_te2[256]; + LOCAL_VK u32 s_te3[256]; + LOCAL_VK u32 s_te4[256]; + + for (u32 i = lid; i < 256; i += lsz) + { + s_td0[i] = td0[i]; + s_td1[i] = td1[i]; + s_td2[i] = td2[i]; + s_td3[i] = td3[i]; + s_td4[i] = td4[i]; + + s_te0[i] = te0[i]; + s_te1[i] = te1[i]; + s_te2[i] = te2[i]; + s_te3[i] = te3[i]; + s_te4[i] = te4[i]; + } + + SYNC_THREADS (); + + #else + + CONSTANT_AS u32a *s_td0 = td0; + CONSTANT_AS u32a *s_td1 = td1; + CONSTANT_AS u32a *s_td2 = td2; + CONSTANT_AS u32a *s_td3 = td3; + CONSTANT_AS u32a *s_td4 = td4; + + CONSTANT_AS u32a *s_te0 = te0; + CONSTANT_AS u32a *s_te1 = te1; + CONSTANT_AS u32a *s_te2 = te2; + CONSTANT_AS u32a *s_te3 = te3; + CONSTANT_AS u32a *s_te4 = te4; + + #endif + + if (gid >= gid_max) return; + + /** + * 2nd pbkdf2, creates B + */ + + u32 w[128] = { 0 }; + + hc_enc_t hc_enc; + + hc_enc_init (&hc_enc); + + const u32 w_len = hc_enc_next_global (&hc_enc, pws[gid].i, pws[gid].pw_len, 256, w, sizeof (w)); + + // utf16le to utf16be + + for (int i = 0, j = 0; i < w_len; i += 4, j += 1) + { + w[j] = ((w[j] >> 8) & 0x00ff00ff) + | ((w[j] << 8) & 0xff00ff00); + } + + sha256_hmac_ctx_t ctx; + + sha256_hmac_init_swap (&ctx, w, w_len); + + u32 w0[4]; + u32 w1[4]; + u32 w2[4]; + u32 w3[4]; + + for (u32 l = 0; l < SCRYPT_CNT4; l += 4) + { + uint4 X[4]; + + X[0] = tmps[gid].P[l + 0]; + X[1] = tmps[gid].P[l + 1]; + X[2] = tmps[gid].P[l + 2]; + X[3] = tmps[gid].P[l + 3]; + + uint4 T[4]; + + #if defined IS_CUDA + T[0] = make_uint4 (X[0].x, X[3].y, X[2].z, X[1].w); + T[1] = make_uint4 (X[1].x, X[0].y, X[3].z, X[2].w); + T[2] = make_uint4 (X[2].x, X[1].y, X[0].z, X[3].w); + T[3] = make_uint4 (X[3].x, X[2].y, X[1].z, X[0].w); + #else + T[0] = (uint4) (X[0].x, X[3].y, X[2].z, X[1].w); + T[1] = (uint4) (X[1].x, X[0].y, X[3].z, X[2].w); + T[2] = (uint4) (X[2].x, X[1].y, X[0].z, X[3].w); + T[3] = (uint4) (X[3].x, X[2].y, X[1].z, X[0].w); + #endif + + T[0] = hc_swap32_4 (T[0]); + T[1] = hc_swap32_4 (T[1]); + T[2] = hc_swap32_4 (T[2]); + T[3] = hc_swap32_4 (T[3]); + + w0[0] = T[0].x; + w0[1] = T[0].y; + w0[2] = T[0].z; + w0[3] = T[0].w; + w1[0] = T[1].x; + w1[1] = T[1].y; + w1[2] = T[1].z; + w1[3] = T[1].w; + w2[0] = T[2].x; + w2[1] = T[2].y; + w2[2] = T[2].z; + w2[3] = T[2].w; + w3[0] = T[3].x; + w3[1] = T[3].y; + w3[2] = T[3].z; + w3[3] = T[3].w; + + sha256_hmac_update_64 (&ctx, w0, w1, w2, w3, 64); + } + + w0[0] = 1; + w0[1] = 0; + w0[2] = 0; + w0[3] = 0; + w1[0] = 0; + w1[1] = 0; + w1[2] = 0; + w1[3] = 0; + w2[0] = 0; + w2[1] = 0; + w2[2] = 0; + w2[3] = 0; + w3[0] = 0; + w3[1] = 0; + w3[2] = 0; + w3[3] = 0; + + sha256_hmac_update_64 (&ctx, w0, w1, w2, w3, 4); + + sha256_hmac_final (&ctx); + + // AES256-CBC decrypt + + u32 key[8]; + + key[0] = ctx.opad.h[0]; + key[1] = ctx.opad.h[1]; + key[2] = ctx.opad.h[2]; + key[3] = ctx.opad.h[3]; + key[4] = ctx.opad.h[4]; + key[5] = ctx.opad.h[5]; + key[6] = ctx.opad.h[6]; + key[7] = ctx.opad.h[7]; + + #define KEYLEN 60 + + u32 ks[KEYLEN]; + + AES256_set_decrypt_key (ks, key, s_te0, s_te1, s_te2, s_te3, s_td0, s_td1, s_td2, s_td3); + + u32 iv[4]; + + iv[0] = salt_bufs[SALT_POS].salt_buf[2]; + iv[1] = salt_bufs[SALT_POS].salt_buf[3]; + iv[2] = salt_bufs[SALT_POS].salt_buf[4]; + iv[3] = salt_bufs[SALT_POS].salt_buf[5]; + + u32 enc[4]; + + enc[0] = salt_bufs[SALT_POS].salt_buf[6]; + enc[1] = salt_bufs[SALT_POS].salt_buf[7]; + enc[2] = salt_bufs[SALT_POS].salt_buf[8]; + enc[3] = salt_bufs[SALT_POS].salt_buf[9]; + + u32 dec[4]; + + aes256_decrypt (ks, enc, dec, s_td0, s_td1, s_td2, s_td3, s_td4); + + dec[0] ^= iv[0]; + dec[1] ^= iv[1]; + dec[2] ^= iv[2]; + dec[3] ^= iv[3]; + + if ((dec[0] == 0x10101010) && + (dec[1] == 0x10101010) && + (dec[2] == 0x10101010) && + (dec[3] == 0x10101010)) + { + if (hc_atomic_inc (&hashes_shown[DIGESTS_OFFSET]) == 0) + { + mark_hash (plains_buf, d_return_buf, SALT_POS, digests_cnt, 0, DIGESTS_OFFSET + 0, gid, 0, 0, 0); + } + + return; + } +} diff --git a/docs/changes.txt b/docs/changes.txt index 2292050d2..ef0d2d34d 100644 --- a/docs/changes.txt +++ b/docs/changes.txt @@ -21,6 +21,7 @@ ## Algorithms ## +- Added hash-mode: MultiBit Classic .wallet (scrypt) - Added hash-mode: SNMPv3 HMAC-MD5-96/HMAC-SHA1-96 - Added hash-mode: SNMPv3 HMAC-MD5-96 - Added hash-mode: SNMPv3 HMAC-SHA1-96 diff --git a/docs/readme.txt b/docs/readme.txt index 00d85069d..814d474cb 100644 --- a/docs/readme.txt +++ b/docs/readme.txt @@ -399,6 +399,7 @@ NVIDIA GPUs require "NVIDIA Driver" (440.64 or later) and "CUDA Toolkit" (9.0 or - Ethereum Wallet, PBKDF2-HMAC-SHA256 - Ethereum Wallet, SCRYPT - MultiBit Classic .key (MD5) +- MultiBit Classic .wallet (scrypt) - MultiBit HD (scrypt) ## diff --git a/src/modules/module_27700.c b/src/modules/module_27700.c new file mode 100644 index 000000000..7e2c4bc53 --- /dev/null +++ b/src/modules/module_27700.c @@ -0,0 +1,540 @@ +/** + * Author......: See docs/credits.txt + * License.....: MIT + */ + +#include +#include "common.h" +#include "types.h" +#include "modules.h" +#include "bitops.h" +#include "convert.h" +#include "shared.h" + +static const u32 ATTACK_EXEC = ATTACK_EXEC_OUTSIDE_KERNEL; +static const u32 DGST_POS0 = 0; +static const u32 DGST_POS1 = 1; +static const u32 DGST_POS2 = 2; +static const u32 DGST_POS3 = 3; +static const u32 DGST_SIZE = DGST_SIZE_4_4; +static const u32 HASH_CATEGORY = HASH_CATEGORY_CRYPTOCURRENCY_WALLET; +static const char *HASH_NAME = "MultiBit Classic .wallet (scrypt)"; +static const u64 KERN_TYPE = 27700; +static const u32 OPTI_TYPE = OPTI_TYPE_ZERO_BYTE; +static const u64 OPTS_TYPE = OPTS_TYPE_PT_GENERATE_LE + | OPTS_TYPE_MP_MULTI_DISABLE + | OPTS_TYPE_NATIVE_THREADS + | OPTS_TYPE_LOOP_PREPARE + | OPTS_TYPE_SELF_TEST_DISABLE; +static const u32 SALT_TYPE = SALT_TYPE_EMBEDDED; +static const char *ST_PASS = "hashcat"; +static const char *ST_HASH = "$multibit$3*16384*8*1*7523cb5482e81b81*91780fd49b81a782ab840157a69ba7996d81270eaf456c850f314fc1787d9b0b"; + +u32 module_attack_exec (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSED const user_options_t *user_options, MAYBE_UNUSED const user_options_extra_t *user_options_extra) { return ATTACK_EXEC; } +u32 module_dgst_pos0 (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSED const user_options_t *user_options, MAYBE_UNUSED const user_options_extra_t *user_options_extra) { return DGST_POS0; } +u32 module_dgst_pos1 (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSED const user_options_t *user_options, MAYBE_UNUSED const user_options_extra_t *user_options_extra) { return DGST_POS1; } +u32 module_dgst_pos2 (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSED const user_options_t *user_options, MAYBE_UNUSED const user_options_extra_t *user_options_extra) { return DGST_POS2; } +u32 module_dgst_pos3 (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSED const user_options_t *user_options, MAYBE_UNUSED const user_options_extra_t *user_options_extra) { return DGST_POS3; } +u32 module_dgst_size (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSED const user_options_t *user_options, MAYBE_UNUSED const user_options_extra_t *user_options_extra) { return DGST_SIZE; } +u32 module_hash_category (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSED const user_options_t *user_options, MAYBE_UNUSED const user_options_extra_t *user_options_extra) { return HASH_CATEGORY; } +const char *module_hash_name (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSED const user_options_t *user_options, MAYBE_UNUSED const user_options_extra_t *user_options_extra) { return HASH_NAME; } +u64 module_kern_type (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSED const user_options_t *user_options, MAYBE_UNUSED const user_options_extra_t *user_options_extra) { return KERN_TYPE; } +u32 module_opti_type (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSED const user_options_t *user_options, MAYBE_UNUSED const user_options_extra_t *user_options_extra) { return OPTI_TYPE; } +u64 module_opts_type (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSED const user_options_t *user_options, MAYBE_UNUSED const user_options_extra_t *user_options_extra) { return OPTS_TYPE; } +u32 module_salt_type (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSED const user_options_t *user_options, MAYBE_UNUSED const user_options_extra_t *user_options_extra) { return SALT_TYPE; } +const char *module_st_hash (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSED const user_options_t *user_options, MAYBE_UNUSED const user_options_extra_t *user_options_extra) { return ST_HASH; } +const char *module_st_pass (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSED const user_options_t *user_options, MAYBE_UNUSED const user_options_extra_t *user_options_extra) { return ST_PASS; } + +static const char *SIGNATURE_MULTIBIT = "$multibit$"; + +static const u64 SCRYPT_N = 16384; +static const u64 SCRYPT_R = 8; +static const u64 SCRYPT_P = 1; + +bool module_unstable_warning (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSED const user_options_t *user_options, MAYBE_UNUSED const user_options_extra_t *user_options_extra, MAYBE_UNUSED const hc_device_param_t *device_param) +{ + // AMD Radeon Pro W5700X Compute Engine; 1.2 (Apr 22 2021 21:54:44); 11.3.1; 20E241 + if ((device_param->opencl_platform_vendor_id == VENDOR_ID_APPLE) && (device_param->opencl_device_type & CL_DEVICE_TYPE_GPU)) + { + return true; + } + + // amdgpu-pro-20.50-1234664-ubuntu-20.04 (legacy) + if ((device_param->opencl_device_vendor_id == VENDOR_ID_AMD) && (device_param->has_vperm == false)) + { + return true; + } + + return false; +} + +u32 module_kernel_loops_min (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSED const user_options_t *user_options, MAYBE_UNUSED const user_options_extra_t *user_options_extra) +{ + const u32 kernel_loops_min = 1024; + + return kernel_loops_min; +} + +u32 module_kernel_loops_max (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSED const user_options_t *user_options, MAYBE_UNUSED const user_options_extra_t *user_options_extra) +{ + const u32 kernel_loops_max = 1024; + + return kernel_loops_max; +} + +u32 module_pw_max (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSED const user_options_t *user_options, MAYBE_UNUSED const user_options_extra_t *user_options_extra) +{ + // this overrides the reductions of PW_MAX in case optimized kernel is selected + // IOW, even in optimized kernel mode it support length 256 + + const u32 pw_max = PW_MAX; + + return pw_max; +} + +u64 module_extra_buffer_size (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSED const user_options_t *user_options, MAYBE_UNUSED const user_options_extra_t *user_options_extra, MAYBE_UNUSED const hashes_t *hashes, MAYBE_UNUSED const hc_device_param_t *device_param) +{ + // we need to set the self-test hash settings to pass the self-test + // the decoder for the self-test is called after this function + + const u64 scrypt_N = (hashes->salts_buf[0].scrypt_N) ? hashes->salts_buf[0].scrypt_N : SCRYPT_N; + const u64 scrypt_r = (hashes->salts_buf[0].scrypt_r) ? hashes->salts_buf[0].scrypt_r : SCRYPT_R; + + const u64 kernel_power_max = ((OPTS_TYPE & OPTS_TYPE_MP_MULTI_DISABLE) ? 1 : device_param->device_processors) * device_param->kernel_threads_max * device_param->kernel_accel_max; + + u64 tmto_start = 0; + u64 tmto_stop = 4; + + if (user_options->scrypt_tmto_chgd == true) + { + tmto_start = user_options->scrypt_tmto; + tmto_stop = user_options->scrypt_tmto; + } + + // size_pws + + const u64 size_pws = kernel_power_max * sizeof (pw_t); + + const u64 size_pws_amp = size_pws; + + // size_pws_comp + + const u64 size_pws_comp = kernel_power_max * (sizeof (u32) * 64); + + // size_pws_idx + + const u64 size_pws_idx = (kernel_power_max + 1) * sizeof (pw_idx_t); + + // size_tmps + + const u64 size_tmps = kernel_power_max * hashconfig->tmp_size; + + // size_hooks + + const u64 size_hooks = kernel_power_max * hashconfig->hook_size; + + u64 size_pws_pre = 4; + u64 size_pws_base = 4; + + if (user_options->slow_candidates == true) + { + // size_pws_pre + + size_pws_pre = kernel_power_max * sizeof (pw_pre_t); + + // size_pws_base + + size_pws_base = kernel_power_max * sizeof (pw_pre_t); + } + + // sometimes device_available_mem and device_maxmem_alloc reported back from the opencl runtime are a bit inaccurate. + // let's add some extra space just to be sure. + // now depends on the kernel-accel value (where scrypt and similar benefits), but also hard minimum 64mb and maximum 1024mb limit + + u64 EXTRA_SPACE = (1024ULL * 1024ULL) * device_param->kernel_accel_max; + + EXTRA_SPACE = MAX (EXTRA_SPACE, ( 64ULL * 1024ULL * 1024ULL)); + EXTRA_SPACE = MIN (EXTRA_SPACE, (1024ULL * 1024ULL * 1024ULL)); + + const u64 scrypt_extra_space + = device_param->size_bfs + + device_param->size_combs + + device_param->size_digests + + device_param->size_esalts + + device_param->size_markov_css + + device_param->size_plains + + device_param->size_results + + device_param->size_root_css + + device_param->size_rules + + device_param->size_rules_c + + device_param->size_salts + + device_param->size_shown + + device_param->size_tm + + device_param->size_st_digests + + device_param->size_st_salts + + device_param->size_st_esalts + + size_pws + + size_pws_amp + + size_pws_comp + + size_pws_idx + + size_tmps + + size_hooks + + size_pws_pre + + size_pws_base + + EXTRA_SPACE; + + bool not_enough_memory = true; + + u64 size_scrypt = 0; + + u64 tmto; + + for (tmto = tmto_start; tmto <= tmto_stop; tmto++) + { + size_scrypt = (128ULL * scrypt_r) * scrypt_N; + + size_scrypt /= 1ull << tmto; + + size_scrypt *= kernel_power_max; + + if ((size_scrypt / 4) > device_param->device_maxmem_alloc) continue; + + if ((size_scrypt + scrypt_extra_space) > device_param->device_available_mem) continue; + + not_enough_memory = false; + + break; + } + + if (not_enough_memory == true) return -1; + + return size_scrypt; +} + +u64 module_tmp_size (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSED const user_options_t *user_options, MAYBE_UNUSED const user_options_extra_t *user_options_extra) +{ + const u64 tmp_size = 0; // we'll add some later + + return tmp_size; +} + +u64 module_extra_tmp_size (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSED const user_options_t *user_options, MAYBE_UNUSED const user_options_extra_t *user_options_extra, MAYBE_UNUSED const hashes_t *hashes) +{ + const u64 scrypt_N = (hashes->salts_buf[0].scrypt_N) ? hashes->salts_buf[0].scrypt_N : SCRYPT_N; + const u64 scrypt_r = (hashes->salts_buf[0].scrypt_r) ? hashes->salts_buf[0].scrypt_r : SCRYPT_R; + const u64 scrypt_p = (hashes->salts_buf[0].scrypt_p) ? hashes->salts_buf[0].scrypt_p : SCRYPT_P; + + // we need to check that all hashes have the same scrypt settings + + for (u32 i = 1; i < hashes->salts_cnt; i++) + { + if ((hashes->salts_buf[i].scrypt_N != scrypt_N) + || (hashes->salts_buf[i].scrypt_r != scrypt_r) + || (hashes->salts_buf[i].scrypt_p != scrypt_p)) + { + return -1; + } + } + + const u64 tmp_size = 128ULL * scrypt_r * scrypt_p; + + return tmp_size; +} + +bool module_warmup_disable (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSED const user_options_t *user_options, MAYBE_UNUSED const user_options_extra_t *user_options_extra) +{ + return true; +} + +char *module_jit_build_options (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSED const user_options_t *user_options, MAYBE_UNUSED const user_options_extra_t *user_options_extra, MAYBE_UNUSED const hashes_t *hashes, MAYBE_UNUSED const hc_device_param_t *device_param) +{ + const u64 scrypt_N = (hashes->salts_buf[0].scrypt_N) ? hashes->salts_buf[0].scrypt_N : SCRYPT_N; + const u64 scrypt_r = (hashes->salts_buf[0].scrypt_r) ? hashes->salts_buf[0].scrypt_r : SCRYPT_R; + const u64 scrypt_p = (hashes->salts_buf[0].scrypt_p) ? hashes->salts_buf[0].scrypt_p : SCRYPT_P; + + const u64 extra_buffer_size = device_param->extra_buffer_size; + + const u64 kernel_power_max = ((OPTS_TYPE & OPTS_TYPE_MP_MULTI_DISABLE) ? 1 : device_param->device_processors) * device_param->kernel_threads_max * device_param->kernel_accel_max; + + const u64 size_scrypt = 128ULL * scrypt_r * scrypt_N; + + const u64 scrypt_tmto_final = (kernel_power_max * size_scrypt) / extra_buffer_size; + + const u64 tmp_size = 128ULL * scrypt_r * scrypt_p; + + char *jit_build_options = NULL; + + hc_asprintf (&jit_build_options, "-DSCRYPT_N=%u -DSCRYPT_R=%u -DSCRYPT_P=%u -DSCRYPT_TMTO=%" PRIu64 " -DSCRYPT_TMP_ELEM=%" PRIu64, + hashes->salts_buf[0].scrypt_N, + hashes->salts_buf[0].scrypt_r, + hashes->salts_buf[0].scrypt_p, + scrypt_tmto_final, + tmp_size / 16); + + return jit_build_options; +} + +int module_hash_decode (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSED void *digest_buf, MAYBE_UNUSED salt_t *salt, MAYBE_UNUSED void *esalt_buf, MAYBE_UNUSED void *hook_salt_buf, MAYBE_UNUSED hashinfo_t *hash_info, const char *line_buf, MAYBE_UNUSED const int line_len) +{ + u32 *digest = (u32 *) digest_buf; + + token_t token; + + token.token_cnt = 7; + + token.signatures_cnt = 1; + token.signatures_buf[0] = SIGNATURE_MULTIBIT; + + token.len[0] = 10; + token.attr[0] = TOKEN_ATTR_FIXED_LENGTH + | TOKEN_ATTR_VERIFY_SIGNATURE; + + token.len_min[1] = 1; + token.len_max[1] = 1; + token.sep[1] = '*'; + token.attr[1] = TOKEN_ATTR_VERIFY_LENGTH + | TOKEN_ATTR_VERIFY_DIGIT; + + token.len_min[2] = 5; + token.len_max[2] = 5; + token.sep[2] = '*'; + token.attr[2] = TOKEN_ATTR_VERIFY_LENGTH + | TOKEN_ATTR_VERIFY_DIGIT; + + token.len_min[3] = 1; + token.len_max[3] = 1; + token.sep[3] = '*'; + token.attr[3] = TOKEN_ATTR_VERIFY_LENGTH + | TOKEN_ATTR_VERIFY_DIGIT; + + token.len_min[4] = 1; + token.len_max[4] = 1; + token.sep[4] = '*'; + token.attr[4] = TOKEN_ATTR_VERIFY_LENGTH + | TOKEN_ATTR_VERIFY_DIGIT; + + token.len_min[5] = 16; + token.len_max[5] = 16; + token.sep[5] = '*'; + token.attr[5] = TOKEN_ATTR_VERIFY_LENGTH + | TOKEN_ATTR_VERIFY_HEX; + + token.len[6] = 64; + token.attr[6] = TOKEN_ATTR_FIXED_LENGTH + | TOKEN_ATTR_VERIFY_HEX; + + const int rc_tokenizer = input_tokenizer ((const u8 *) line_buf, line_len, &token); + + if (rc_tokenizer != PARSER_OK) return (rc_tokenizer); + + // version + + const u8 *version_pos = token.buf[1]; + + if (version_pos[0] != (u8) '3') return (PARSER_SIGNATURE_UNMATCHED); + + // scrypt settings + + const u8 *scrypt_n_pos = token.buf[2]; + + const u32 scrypt_n = hc_strtoul ((const char *) scrypt_n_pos, NULL, 10); + + if (scrypt_n != SCRYPT_N) return (PARSER_SALT_VALUE); + + const u8 *scrypt_r_pos = token.buf[3]; + + const u32 scrypt_r = hc_strtoul ((const char *) scrypt_r_pos, NULL, 10); + + if (scrypt_r != SCRYPT_R) return (PARSER_SALT_VALUE); + + const u8 *scrypt_p_pos = token.buf[4]; + + const u32 scrypt_p = hc_strtoul ((const char *) scrypt_p_pos, NULL, 10); + + if (scrypt_p != SCRYPT_P) return (PARSER_SALT_VALUE); + + salt->scrypt_N = SCRYPT_N; + salt->scrypt_r = SCRYPT_R; + salt->scrypt_p = SCRYPT_P; + + salt->salt_iter = salt->scrypt_N; + salt->salt_repeats = salt->scrypt_p - 1; + + // salt + + const u8 *salt_pos = token.buf[5]; + + salt->salt_buf[0] = hex_to_u32 (salt_pos + 0); + salt->salt_buf[1] = hex_to_u32 (salt_pos + 8); + + salt->salt_buf[0] = byte_swap_32 (salt->salt_buf[0]); // swap the salt (only the salt) + salt->salt_buf[1] = byte_swap_32 (salt->salt_buf[1]); + + // IV + + const u8 *blob_pos = token.buf[6]; + + salt->salt_buf[2] = hex_to_u32 (blob_pos + 0); + salt->salt_buf[3] = hex_to_u32 (blob_pos + 8); + salt->salt_buf[4] = hex_to_u32 (blob_pos + 16); + salt->salt_buf[5] = hex_to_u32 (blob_pos + 24); + + // data + + salt->salt_buf[6] = hex_to_u32 (blob_pos + 32); + salt->salt_buf[7] = hex_to_u32 (blob_pos + 40); + salt->salt_buf[8] = hex_to_u32 (blob_pos + 48); + salt->salt_buf[9] = hex_to_u32 (blob_pos + 56); + + salt->salt_len = 40; + + // fake digest: + + digest[0] = salt->salt_buf[4]; + digest[1] = salt->salt_buf[5]; + digest[2] = salt->salt_buf[6]; + digest[3] = salt->salt_buf[7]; + + return (PARSER_OK); +} + +int module_hash_encode (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSED const void *digest_buf, MAYBE_UNUSED const salt_t *salt, MAYBE_UNUSED const void *esalt_buf, MAYBE_UNUSED const void *hook_salt_buf, MAYBE_UNUSED const hashinfo_t *hash_info, char *line_buf, MAYBE_UNUSED const int line_size) +{ + const int line_len = snprintf (line_buf, line_size, "%s%u*%lu*%lu*%lu*%08x%08x*%08x%08x%08x%08x%08x%08x%08x%08x", + SIGNATURE_MULTIBIT, + 3, + SCRYPT_N, + SCRYPT_R, + SCRYPT_P, + salt->salt_buf[0], + salt->salt_buf[1], + byte_swap_32 (salt->salt_buf[2]), + byte_swap_32 (salt->salt_buf[3]), + byte_swap_32 (salt->salt_buf[4]), + byte_swap_32 (salt->salt_buf[5]), + byte_swap_32 (salt->salt_buf[6]), + byte_swap_32 (salt->salt_buf[7]), + byte_swap_32 (salt->salt_buf[8]), + byte_swap_32 (salt->salt_buf[9])); + + return line_len; +} + +/* + +Find the right -n value for your GPU: +===================================== + +1. For example, to find the value for 27700, first create a valid hash for 27700 as follows: + +$ ./hashcat --example-hashes -m 27700 | grep Example.Hash | grep -v Format | cut -b 25- > tmp.hash.27700 + +2. Now let it iterate through all -n values to a certain point. In this case, I'm using 200, but in general it's a value that is at least twice that of the multiprocessor. If you don't mind you can just leave it as it is, it just runs a little longer. + +$ export i=1; while [ $i -ne 201 ]; do echo $i; ./hashcat --quiet tmp.hash.27700 --keep-guessing --self-test-disable --markov-disable --restore-disable --outfile-autohex-disable --wordlist-autohex-disable --potfile-disable --logfile-disable --hwmon-disable --status --status-timer 1 --runtime 28 --machine-readable --optimized-kernel-enable --workload-profile 3 --hash-type 27700 --attack-mode 3 ?b?b?b?b?b?b?b --backend-devices 1 --force -n $i; i=$(($i+1)); done | tee x + +3. Determine the highest measured H/s speed. But don't just use the highest value. Instead, use the number that seems most stable, usually at the beginning. + +$ grep "$(printf 'STATUS\t3')" x | cut -f4 -d$'\t' | sort -n | tail + +4. To match the speed you have chosen to the correct value in the 'x' file, simply search for it in it. Then go up a little on the block where you found him. The value -n is the single value that begins before the block start. If you have multiple blocks at the same speed, choose the lowest value for -n + +*/ + +const char *module_extra_tuningdb_block (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSED const user_options_t *user_options, MAYBE_UNUSED const user_options_extra_t *user_options_extra) +{ + const char *extra_tuningdb_block = + "DEVICE_TYPE_CPU * 27700 1 N A\n" + "DEVICE_TYPE_GPU * 27700 1 N A\n" + "GeForce_GTX_980 * 27700 1 29 A\n" + "GeForce_GTX_1080 * 27700 1 15 A\n" + "GeForce_RTX_2080_Ti * 27700 1 68 A\n" + "GeForce_RTX_3060_Ti * 27700 1 51 A\n" + "GeForce_RTX_3070 * 27700 1 46 A\n" + "GeForce_RTX_3090 * 27700 1 82 A\n" + "ALIAS_AMD_RX480 * 27700 1 15 A\n" + "ALIAS_AMD_Vega64 * 27700 1 31 A\n" + "ALIAS_AMD_MI100 * 27700 1 79 A\n" + "ALIAS_AMD_RX6900XT * 27700 1 59 A\n" + ; + + return extra_tuningdb_block; +} + +void module_init (module_ctx_t *module_ctx) +{ + module_ctx->module_context_size = MODULE_CONTEXT_SIZE_CURRENT; + module_ctx->module_interface_version = MODULE_INTERFACE_VERSION_CURRENT; + + module_ctx->module_attack_exec = module_attack_exec; + module_ctx->module_benchmark_esalt = MODULE_DEFAULT; + module_ctx->module_benchmark_hook_salt = MODULE_DEFAULT; + module_ctx->module_benchmark_mask = MODULE_DEFAULT; + module_ctx->module_benchmark_salt = MODULE_DEFAULT; + module_ctx->module_build_plain_postprocess = MODULE_DEFAULT; + module_ctx->module_deep_comp_kernel = MODULE_DEFAULT; + module_ctx->module_deprecated_notice = MODULE_DEFAULT; + module_ctx->module_dgst_pos0 = module_dgst_pos0; + module_ctx->module_dgst_pos1 = module_dgst_pos1; + module_ctx->module_dgst_pos2 = module_dgst_pos2; + module_ctx->module_dgst_pos3 = module_dgst_pos3; + module_ctx->module_dgst_size = module_dgst_size; + module_ctx->module_dictstat_disable = MODULE_DEFAULT; + module_ctx->module_esalt_size = MODULE_DEFAULT; + module_ctx->module_extra_buffer_size = module_extra_buffer_size; + module_ctx->module_extra_tmp_size = module_extra_tmp_size; + module_ctx->module_extra_tuningdb_block = module_extra_tuningdb_block; + module_ctx->module_forced_outfile_format = MODULE_DEFAULT; + module_ctx->module_hash_binary_count = MODULE_DEFAULT; + module_ctx->module_hash_binary_parse = MODULE_DEFAULT; + module_ctx->module_hash_binary_save = MODULE_DEFAULT; + module_ctx->module_hash_decode_potfile = MODULE_DEFAULT; + module_ctx->module_hash_decode_zero_hash = MODULE_DEFAULT; + module_ctx->module_hash_decode = module_hash_decode; + module_ctx->module_hash_encode_status = MODULE_DEFAULT; + module_ctx->module_hash_encode_potfile = MODULE_DEFAULT; + module_ctx->module_hash_encode = module_hash_encode; + module_ctx->module_hash_init_selftest = MODULE_DEFAULT; + module_ctx->module_hash_mode = MODULE_DEFAULT; + module_ctx->module_hash_category = module_hash_category; + module_ctx->module_hash_name = module_hash_name; + module_ctx->module_hashes_count_min = MODULE_DEFAULT; + module_ctx->module_hashes_count_max = MODULE_DEFAULT; + module_ctx->module_hlfmt_disable = MODULE_DEFAULT; + module_ctx->module_hook_extra_param_size = MODULE_DEFAULT; + module_ctx->module_hook_extra_param_init = MODULE_DEFAULT; + module_ctx->module_hook_extra_param_term = MODULE_DEFAULT; + module_ctx->module_hook12 = MODULE_DEFAULT; + module_ctx->module_hook23 = MODULE_DEFAULT; + module_ctx->module_hook_salt_size = MODULE_DEFAULT; + module_ctx->module_hook_size = MODULE_DEFAULT; + module_ctx->module_jit_build_options = module_jit_build_options; + module_ctx->module_jit_cache_disable = MODULE_DEFAULT; + module_ctx->module_kernel_accel_max = MODULE_DEFAULT; + module_ctx->module_kernel_accel_min = MODULE_DEFAULT; + module_ctx->module_kernel_loops_max = module_kernel_loops_max; + module_ctx->module_kernel_loops_min = module_kernel_loops_min; + module_ctx->module_kernel_threads_max = MODULE_DEFAULT; + module_ctx->module_kernel_threads_min = MODULE_DEFAULT; + module_ctx->module_kern_type = module_kern_type; + module_ctx->module_kern_type_dynamic = MODULE_DEFAULT; + module_ctx->module_opti_type = module_opti_type; + module_ctx->module_opts_type = module_opts_type; + module_ctx->module_outfile_check_disable = MODULE_DEFAULT; + module_ctx->module_outfile_check_nocomp = MODULE_DEFAULT; + module_ctx->module_potfile_custom_check = MODULE_DEFAULT; + module_ctx->module_potfile_disable = MODULE_DEFAULT; + module_ctx->module_potfile_keep_all_hashes = MODULE_DEFAULT; + module_ctx->module_pwdump_column = MODULE_DEFAULT; + module_ctx->module_pw_max = module_pw_max; + module_ctx->module_pw_min = MODULE_DEFAULT; + module_ctx->module_salt_max = MODULE_DEFAULT; + module_ctx->module_salt_min = MODULE_DEFAULT; + module_ctx->module_salt_type = module_salt_type; + module_ctx->module_separator = MODULE_DEFAULT; + module_ctx->module_st_hash = module_st_hash; + module_ctx->module_st_pass = module_st_pass; + module_ctx->module_tmp_size = module_tmp_size; + module_ctx->module_unstable_warning = module_unstable_warning; + module_ctx->module_warmup_disable = module_warmup_disable; +} diff --git a/tools/test_modules/m27700.pm b/tools/test_modules/m27700.pm new file mode 100644 index 000000000..78fa12095 --- /dev/null +++ b/tools/test_modules/m27700.pm @@ -0,0 +1,135 @@ +#!/usr/bin/env perl + +## +## Author......: See docs/credits.txt +## License.....: MIT +## + +use strict; +use warnings; + +use Crypt::ScryptKDF qw (scrypt_raw); +use Encode; +use Crypt::CBC; + +sub module_constraints { [[0, 256], [8, 8], [-1, -1], [-1, -1], [-1, -1]] } + +my $SCRYPT_N = 16384; +my $SCRYPT_R = 8; +my $SCRYPT_P = 1; + +my $DATA_FIXED = "\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10"; + +sub module_generate_hash +{ + my $word = shift; + my $salt = shift; + my $iv = shift // random_bytes (16); + my $data = shift; + + my $word_utf16be = encode ('UTF-16BE', $word); + + my $key = scrypt_raw ($word_utf16be, $salt, $SCRYPT_N, $SCRYPT_R, $SCRYPT_P, 32); + + my $aes_cbc = Crypt::CBC->new ({ + cipher => "Crypt::Rijndael", + iv => $iv, + key => $key, + keysize => 32, + literal_key => 1, + header => "none", + padding => "none" + }); + + my $data_block = ""; + + if (defined ($data)) # verify + { + my $data_dec = $aes_cbc->decrypt ($data); + + if ($data_dec eq $DATA_FIXED) + { + $data_block = $data; + } + } + else + { + $data = $DATA_FIXED; + + $data_block = $aes_cbc->encrypt ($data); + } + + my $hash = sprintf ("\$multibit\$3*%d*%d*%d*%s*%s", $SCRYPT_N, $SCRYPT_R, $SCRYPT_P, unpack ("H*", $salt), unpack ("H*", $iv . $data_block)); + + return $hash; +} + +sub module_verify_hash +{ + my $line = shift; + + return unless (substr ($line, 0, 12) eq '$multibit$3*'); + + # split hash and word: + + my $idx1 = index ($line, ":", 12); + + return if ($idx1 < 1); + + my $hash = substr ($line, 0, $idx1); + my $word = substr ($line, $idx1 + 1); + + # scrypt parameters: + + my $idx2 = index ($hash, "*", 12); + + return if ($idx2 < 0); + + my $scrypt_n = substr ($hash, 12, $idx2 - 12); + + $idx1 = index ($hash, "*", $idx2 + 1); + + return if ($idx1 < 0); + + my $scrypt_r = substr ($hash, $idx2 + 1, $idx1 - $idx2 - 1); + + $idx2 = index ($hash, "*", $idx1 + 1); + + return if ($idx2 < 0); + + my $scrypt_p = substr ($hash, $idx1 + 1, $idx2 - $idx1 - 1); + + # salt: + + $idx1 = index ($hash, "*", $idx2 + 1); + + return if ($idx1 < 0); + + my $salt = substr ($hash, $idx2 + 1, $idx1 - $idx2 - 1); + + # IV: + + my $iv = substr ($hash, $idx1 + 1, 32); + + # data: + + my $data = substr ($hash, $idx1 + 1 + 32, 32); + + return unless $salt =~ m/^[0-9a-fA-F]{16}$/; + return unless $iv =~ m/^[0-9a-fA-F]{32}$/; + return unless $data =~ m/^[0-9a-fA-F]{32}$/; + + # hex to binary/raw: + + $salt = pack ("H*", $salt); + $iv = pack ("H*", $iv); + $data = pack ("H*", $data); + + $word = pack_if_HEX_notation ($word); + + my $new_hash = module_generate_hash ($word, $salt, $iv, $data); + + return ($new_hash, $word); +} + +1;