From bf865d8bff2db0d7cc617c3fd5d8e73bd1b4a8b3 Mon Sep 17 00:00:00 2001 From: Jens Steube Date: Tue, 14 Dec 2021 13:46:20 +0100 Subject: [PATCH] Added hash-mode: Exodus Desktop Wallet (scrypt) --- OpenCL/m28200-pure.cl | 610 +++++++++++++++++++++++++++++++++++ docs/changes.txt | 6 + docs/readme.txt | 1 + src/modules/module_28200.c | 594 ++++++++++++++++++++++++++++++++++ tools/exodus2hashcat.py | 69 ++++ tools/test_modules/m28200.pm | 111 +++++++ 6 files changed, 1391 insertions(+) create mode 100644 OpenCL/m28200-pure.cl create mode 100644 src/modules/module_28200.c create mode 100755 tools/exodus2hashcat.py create mode 100644 tools/test_modules/m28200.pm diff --git a/OpenCL/m28200-pure.cl b/OpenCL/m28200-pure.cl new file mode 100644 index 000000000..45fee0267 --- /dev/null +++ b/OpenCL/m28200-pure.cl @@ -0,0 +1,610 @@ +/** + * Author......: See docs/credits.txt + * License.....: MIT + */ + +#ifdef KERNEL_STATIC +#include "inc_vendor.h" +#include "inc_types.h" +#include "inc_platform.cl" +#include "inc_common.cl" +#include "inc_hash_sha256.cl" +#include "inc_cipher_aes.cl" +#include "inc_cipher_aes-gcm.cl" +#endif + +#define COMPARE_S "inc_comp_single.cl" +#define COMPARE_M "inc_comp_multi.cl" + +typedef struct exodus_tmp +{ + #ifndef SCRYPT_TMP_ELEM + #define SCRYPT_TMP_ELEM 1 + #endif + + uint4 P[SCRYPT_TMP_ELEM]; + +} exodus_tmp_t; + +typedef struct exodus +{ + u32 iv[4]; + u32 data[8]; + u32 tag[4]; + +} exodus_t; + +#ifdef IS_CUDA + +inline __device__ uint4 operator & (const uint4 a, const u32 b) { return make_uint4 ((a.x & b ), (a.y & b ), (a.z & b ), (a.w & b )); } +inline __device__ uint4 operator << (const uint4 a, const u32 b) { return make_uint4 ((a.x << b ), (a.y << b ), (a.z << b ), (a.w << b )); } +inline __device__ uint4 operator >> (const uint4 a, const u32 b) { return make_uint4 ((a.x >> b ), (a.y >> b ), (a.z >> b ), (a.w >> b )); } +inline __device__ uint4 operator + (const uint4 a, const uint4 b) { return make_uint4 ((a.x + b.x), (a.y + b.y), (a.z + b.z), (a.w + b.w)); } +inline __device__ uint4 operator ^ (const uint4 a, const uint4 b) { return make_uint4 ((a.x ^ b.x), (a.y ^ b.y), (a.z ^ b.z), (a.w ^ b.w)); } +inline __device__ uint4 operator | (const uint4 a, const uint4 b) { return make_uint4 ((a.x | b.x), (a.y | b.y), (a.z | b.z), (a.w | b.w)); } +inline __device__ void operator ^= ( uint4 &a, const uint4 b) { a.x ^= b.x; a.y ^= b.y; a.z ^= b.z; a.w ^= b.w; } + +inline __device__ uint4 rotate (const uint4 a, const int n) +{ + return ((a << n) | ((a >> (32 - n)))); +} + +#endif + +DECLSPEC uint4 hc_swap32_4 (uint4 v) +{ + return (rotate ((v & 0x00FF00FF), 24u) | rotate ((v & 0xFF00FF00), 8u)); +} + +#define GET_SCRYPT_CNT(r,p) (2 * (r) * 16 * (p)) +#define GET_SMIX_CNT(r,N) (2 * (r) * 16 * (N)) +#define GET_STATE_CNT(r) (2 * (r) * 16) + +#define SCRYPT_CNT GET_SCRYPT_CNT (SCRYPT_R, SCRYPT_P) +#define SCRYPT_CNT4 (SCRYPT_CNT / 4) +#define STATE_CNT GET_STATE_CNT (SCRYPT_R) +#define STATE_CNT4 (STATE_CNT / 4) + +#define ADD_ROTATE_XOR(r,i1,i2,s) (r) ^= rotate ((i1) + (i2), (s)); + +#ifdef IS_CUDA + +#define SALSA20_2R() \ +{ \ + ADD_ROTATE_XOR (X1, X0, X3, 7); \ + ADD_ROTATE_XOR (X2, X1, X0, 9); \ + ADD_ROTATE_XOR (X3, X2, X1, 13); \ + ADD_ROTATE_XOR (X0, X3, X2, 18); \ + \ + X1 = make_uint4 (X1.w, X1.x, X1.y, X1.z); \ + X2 = make_uint4 (X2.z, X2.w, X2.x, X2.y); \ + X3 = make_uint4 (X3.y, X3.z, X3.w, X3.x); \ + \ + ADD_ROTATE_XOR (X3, X0, X1, 7); \ + ADD_ROTATE_XOR (X2, X3, X0, 9); \ + ADD_ROTATE_XOR (X1, X2, X3, 13); \ + ADD_ROTATE_XOR (X0, X1, X2, 18); \ + \ + X1 = make_uint4 (X1.y, X1.z, X1.w, X1.x); \ + X2 = make_uint4 (X2.z, X2.w, X2.x, X2.y); \ + X3 = make_uint4 (X3.w, X3.x, X3.y, X3.z); \ +} +#else +#define SALSA20_2R() \ +{ \ + ADD_ROTATE_XOR (X1, X0, X3, 7); \ + ADD_ROTATE_XOR (X2, X1, X0, 9); \ + ADD_ROTATE_XOR (X3, X2, X1, 13); \ + ADD_ROTATE_XOR (X0, X3, X2, 18); \ + \ + X1 = X1.s3012; \ + X2 = X2.s2301; \ + X3 = X3.s1230; \ + \ + ADD_ROTATE_XOR (X3, X0, X1, 7); \ + ADD_ROTATE_XOR (X2, X3, X0, 9); \ + ADD_ROTATE_XOR (X1, X2, X3, 13); \ + ADD_ROTATE_XOR (X0, X1, X2, 18); \ + \ + X1 = X1.s1230; \ + X2 = X2.s2301; \ + X3 = X3.s3012; \ +} +#endif + +#define Coord(xd4,y,z) (((xd4) * ySIZE * zSIZE) + ((y) * zSIZE) + (z)) +#define CO Coord(xd4,y,z) + +DECLSPEC void salsa_r (uint4 *TI) +{ + uint4 R0 = TI[STATE_CNT4 - 4]; + uint4 R1 = TI[STATE_CNT4 - 3]; + uint4 R2 = TI[STATE_CNT4 - 2]; + uint4 R3 = TI[STATE_CNT4 - 1]; + + for (int i = 0; i < STATE_CNT4; i += 4) + { + uint4 Y0 = TI[i + 0]; + uint4 Y1 = TI[i + 1]; + uint4 Y2 = TI[i + 2]; + uint4 Y3 = TI[i + 3]; + + R0 = R0 ^ Y0; + R1 = R1 ^ Y1; + R2 = R2 ^ Y2; + R3 = R3 ^ Y3; + + uint4 X0 = R0; + uint4 X1 = R1; + uint4 X2 = R2; + uint4 X3 = R3; + + SALSA20_2R (); + SALSA20_2R (); + SALSA20_2R (); + SALSA20_2R (); + + R0 = R0 + X0; + R1 = R1 + X1; + R2 = R2 + X2; + R3 = R3 + X3; + + TI[i + 0] = R0; + TI[i + 1] = R1; + TI[i + 2] = R2; + TI[i + 3] = R3; + } + + #if SCRYPT_R > 1 + + uint4 TT[STATE_CNT4 / 2]; + + for (int dst_off = 0, src_off = 4; src_off < STATE_CNT4; dst_off += 4, src_off += 8) + { + TT[dst_off + 0] = TI[src_off + 0]; + TT[dst_off + 1] = TI[src_off + 1]; + TT[dst_off + 2] = TI[src_off + 2]; + TT[dst_off + 3] = TI[src_off + 3]; + } + + for (int dst_off = 4, src_off = 8; src_off < STATE_CNT4; dst_off += 4, src_off += 8) + { + TI[dst_off + 0] = TI[src_off + 0]; + TI[dst_off + 1] = TI[src_off + 1]; + TI[dst_off + 2] = TI[src_off + 2]; + TI[dst_off + 3] = TI[src_off + 3]; + } + + for (int dst_off = STATE_CNT4 / 2, src_off = 0; dst_off < STATE_CNT4; dst_off += 4, src_off += 4) + { + TI[dst_off + 0] = TT[src_off + 0]; + TI[dst_off + 1] = TT[src_off + 1]; + TI[dst_off + 2] = TT[src_off + 2]; + TI[dst_off + 3] = TT[src_off + 3]; + } + + #endif +} + +DECLSPEC void scrypt_smix_init (uint4 *X, GLOBAL_AS uint4 *V0, GLOBAL_AS uint4 *V1, GLOBAL_AS uint4 *V2, GLOBAL_AS uint4 *V3) +{ + const u32 ySIZE = SCRYPT_N / SCRYPT_TMTO; + const u32 zSIZE = STATE_CNT4; + + const u32 x = get_global_id (0); + + const u32 xd4 = x / 4; + const u32 xm4 = x & 3; + + GLOBAL_AS uint4 *V; + + switch (xm4) + { + case 0: V = V0; break; + case 1: V = V1; break; + case 2: V = V2; break; + case 3: V = V3; break; + } + + for (u32 y = 0; y < ySIZE; y++) + { + for (u32 z = 0; z < zSIZE; z++) V[CO] = X[z]; + + for (u32 i = 0; i < SCRYPT_TMTO; i++) salsa_r (X); + } +} + +DECLSPEC void scrypt_smix_loop (uint4 *X, GLOBAL_AS uint4 *V0, GLOBAL_AS uint4 *V1, GLOBAL_AS uint4 *V2, GLOBAL_AS uint4 *V3) +{ + const u32 ySIZE = SCRYPT_N / SCRYPT_TMTO; + const u32 zSIZE = STATE_CNT4; + + const u32 x = get_global_id (0); + + const u32 xd4 = x / 4; + const u32 xm4 = x & 3; + + GLOBAL_AS uint4 *V; + + switch (xm4) + { + case 0: V = V0; break; + case 1: V = V1; break; + case 2: V = V2; break; + case 3: V = V3; break; + } + + // note: fixed 1024 iterations = forced -u 1024 + + for (u32 N_pos = 0; N_pos < 1024; N_pos++) + { + const u32 k = X[zSIZE - 4].x & (SCRYPT_N - 1); + + const u32 y = k / SCRYPT_TMTO; + + const u32 km = k - (y * SCRYPT_TMTO); + + uint4 T[STATE_CNT4]; + + for (u32 z = 0; z < zSIZE; z++) T[z] = V[CO]; + + for (u32 i = 0; i < km; i++) salsa_r (T); + + for (u32 z = 0; z < zSIZE; z++) X[z] ^= T[z]; + + salsa_r (X); + } +} + +KERNEL_FQ void m28200_init (KERN_ATTR_TMPS_ESALT (exodus_tmp_t, exodus_t)) +{ + /** + * base + */ + + const u64 gid = get_global_id (0); + + if (gid >= gid_max) return; + + sha256_hmac_ctx_t sha256_hmac_ctx; + + sha256_hmac_init_global_swap (&sha256_hmac_ctx, pws[gid].i, pws[gid].pw_len); + + sha256_hmac_update_global_swap (&sha256_hmac_ctx, salt_bufs[SALT_POS].salt_buf, salt_bufs[SALT_POS].salt_len); + + for (u32 i = 0, j = 1, k = 0; i < SCRYPT_CNT; i += 8, j += 1, k += 2) + { + sha256_hmac_ctx_t sha256_hmac_ctx2 = sha256_hmac_ctx; + + u32 w0[4]; + u32 w1[4]; + u32 w2[4]; + u32 w3[4]; + + w0[0] = j; + w0[1] = 0; + w0[2] = 0; + w0[3] = 0; + w1[0] = 0; + w1[1] = 0; + w1[2] = 0; + w1[3] = 0; + w2[0] = 0; + w2[1] = 0; + w2[2] = 0; + w2[3] = 0; + w3[0] = 0; + w3[1] = 0; + w3[2] = 0; + w3[3] = 0; + + sha256_hmac_update_64 (&sha256_hmac_ctx2, w0, w1, w2, w3, 4); + + sha256_hmac_final (&sha256_hmac_ctx2); + + u32 digest[8]; + + digest[0] = sha256_hmac_ctx2.opad.h[0]; + digest[1] = sha256_hmac_ctx2.opad.h[1]; + digest[2] = sha256_hmac_ctx2.opad.h[2]; + digest[3] = sha256_hmac_ctx2.opad.h[3]; + digest[4] = sha256_hmac_ctx2.opad.h[4]; + digest[5] = sha256_hmac_ctx2.opad.h[5]; + digest[6] = sha256_hmac_ctx2.opad.h[6]; + digest[7] = sha256_hmac_ctx2.opad.h[7]; + + #ifdef IS_CUDA + const uint4 tmp0 = make_uint4 (digest[0], digest[1], digest[2], digest[3]); + const uint4 tmp1 = make_uint4 (digest[4], digest[5], digest[6], digest[7]); + #else + const uint4 tmp0 = (uint4) (digest[0], digest[1], digest[2], digest[3]); + const uint4 tmp1 = (uint4) (digest[4], digest[5], digest[6], digest[7]); + #endif + + tmps[gid].P[k + 0] = tmp0; + tmps[gid].P[k + 1] = tmp1; + } + + for (u32 l = 0; l < SCRYPT_CNT4; l += 4) + { + uint4 T[4]; + + T[0] = tmps[gid].P[l + 0]; + T[1] = tmps[gid].P[l + 1]; + T[2] = tmps[gid].P[l + 2]; + T[3] = tmps[gid].P[l + 3]; + + T[0] = hc_swap32_4 (T[0]); + T[1] = hc_swap32_4 (T[1]); + T[2] = hc_swap32_4 (T[2]); + T[3] = hc_swap32_4 (T[3]); + + uint4 X[4]; + + #ifdef IS_CUDA + X[0] = make_uint4 (T[0].x, T[1].y, T[2].z, T[3].w); + X[1] = make_uint4 (T[1].x, T[2].y, T[3].z, T[0].w); + X[2] = make_uint4 (T[2].x, T[3].y, T[0].z, T[1].w); + X[3] = make_uint4 (T[3].x, T[0].y, T[1].z, T[2].w); + #else + X[0] = (uint4) (T[0].x, T[1].y, T[2].z, T[3].w); + X[1] = (uint4) (T[1].x, T[2].y, T[3].z, T[0].w); + X[2] = (uint4) (T[2].x, T[3].y, T[0].z, T[1].w); + X[3] = (uint4) (T[3].x, T[0].y, T[1].z, T[2].w); + #endif + + tmps[gid].P[l + 0] = X[0]; + tmps[gid].P[l + 1] = X[1]; + tmps[gid].P[l + 2] = X[2]; + tmps[gid].P[l + 3] = X[3]; + } +} + +KERNEL_FQ void m28200_loop_prepare (KERN_ATTR_TMPS_ESALT (exodus_tmp_t, exodus_t)) +{ + /** + * base + */ + + const u64 gid = get_global_id (0); + const u64 lid = get_local_id (0); + + if (gid >= gid_max) return; + + // SCRYPT part, init V + + GLOBAL_AS uint4 *d_scrypt0_buf = (GLOBAL_AS uint4 *) d_extra0_buf; + GLOBAL_AS uint4 *d_scrypt1_buf = (GLOBAL_AS uint4 *) d_extra1_buf; + GLOBAL_AS uint4 *d_scrypt2_buf = (GLOBAL_AS uint4 *) d_extra2_buf; + GLOBAL_AS uint4 *d_scrypt3_buf = (GLOBAL_AS uint4 *) d_extra3_buf; + + uint4 X[STATE_CNT4]; + + const u32 P_offset = salt_repeat * STATE_CNT4; + + GLOBAL_AS uint4 *P = tmps[gid].P + P_offset; + + for (int z = 0; z < STATE_CNT4; z++) X[z] = P[z]; + + scrypt_smix_init (X, d_scrypt0_buf, d_scrypt1_buf, d_scrypt2_buf, d_scrypt3_buf); + + for (int z = 0; z < STATE_CNT4; z++) P[z] = X[z]; +} + +KERNEL_FQ void m28200_loop (KERN_ATTR_TMPS_ESALT (exodus_tmp_t, exodus_t)) +{ + const u64 gid = get_global_id (0); + const u64 lid = get_local_id (0); + + if (gid >= gid_max) return; + + GLOBAL_AS uint4 *d_scrypt0_buf = (GLOBAL_AS uint4 *) d_extra0_buf; + GLOBAL_AS uint4 *d_scrypt1_buf = (GLOBAL_AS uint4 *) d_extra1_buf; + GLOBAL_AS uint4 *d_scrypt2_buf = (GLOBAL_AS uint4 *) d_extra2_buf; + GLOBAL_AS uint4 *d_scrypt3_buf = (GLOBAL_AS uint4 *) d_extra3_buf; + + uint4 X[STATE_CNT4]; + + const u32 P_offset = salt_repeat * STATE_CNT4; + + GLOBAL_AS uint4 *P = tmps[gid].P + P_offset; + + for (int z = 0; z < STATE_CNT4; z++) X[z] = P[z]; + + scrypt_smix_loop (X, d_scrypt0_buf, d_scrypt1_buf, d_scrypt2_buf, d_scrypt3_buf); + + for (int z = 0; z < STATE_CNT4; z++) P[z] = X[z]; +} + +KERNEL_FQ void m28200_comp (KERN_ATTR_TMPS_ESALT (exodus_tmp_t, exodus_t)) +{ + const u64 gid = get_global_id (0); + const u64 lid = get_local_id (0); + const u64 lsz = get_local_size (0); + + /** + * aes shared + */ + + #ifdef REAL_SHM + + LOCAL_VK u32 s_td0[256]; + LOCAL_VK u32 s_td1[256]; + LOCAL_VK u32 s_td2[256]; + LOCAL_VK u32 s_td3[256]; + LOCAL_VK u32 s_td4[256]; + + LOCAL_VK u32 s_te0[256]; + LOCAL_VK u32 s_te1[256]; + LOCAL_VK u32 s_te2[256]; + LOCAL_VK u32 s_te3[256]; + LOCAL_VK u32 s_te4[256]; + + for (u32 i = lid; i < 256; i += lsz) + { + s_td0[i] = td0[i]; + s_td1[i] = td1[i]; + s_td2[i] = td2[i]; + s_td3[i] = td3[i]; + s_td4[i] = td4[i]; + + s_te0[i] = te0[i]; + s_te1[i] = te1[i]; + s_te2[i] = te2[i]; + s_te3[i] = te3[i]; + s_te4[i] = te4[i]; + } + + SYNC_THREADS (); + + #else + + CONSTANT_AS u32a *s_td0 = td0; + CONSTANT_AS u32a *s_td1 = td1; + CONSTANT_AS u32a *s_td2 = td2; + CONSTANT_AS u32a *s_td3 = td3; + CONSTANT_AS u32a *s_td4 = td4; + + CONSTANT_AS u32a *s_te0 = te0; + CONSTANT_AS u32a *s_te1 = te1; + CONSTANT_AS u32a *s_te2 = te2; + CONSTANT_AS u32a *s_te3 = te3; + CONSTANT_AS u32a *s_te4 = te4; + + #endif + + if (gid >= gid_max) return; + + /** + * 2nd pbkdf2, creates B + */ + + u32 w0[4]; + u32 w1[4]; + u32 w2[4]; + u32 w3[4]; + + sha256_hmac_ctx_t ctx; + + sha256_hmac_init_global_swap (&ctx, pws[gid].i, pws[gid].pw_len); + + for (u32 l = 0; l < SCRYPT_CNT4; l += 4) + { + uint4 X[4]; + + X[0] = tmps[gid].P[l + 0]; + X[1] = tmps[gid].P[l + 1]; + X[2] = tmps[gid].P[l + 2]; + X[3] = tmps[gid].P[l + 3]; + + uint4 T[4]; + + #ifdef IS_CUDA + T[0] = make_uint4 (X[0].x, X[3].y, X[2].z, X[1].w); + T[1] = make_uint4 (X[1].x, X[0].y, X[3].z, X[2].w); + T[2] = make_uint4 (X[2].x, X[1].y, X[0].z, X[3].w); + T[3] = make_uint4 (X[3].x, X[2].y, X[1].z, X[0].w); + #else + T[0] = (uint4) (X[0].x, X[3].y, X[2].z, X[1].w); + T[1] = (uint4) (X[1].x, X[0].y, X[3].z, X[2].w); + T[2] = (uint4) (X[2].x, X[1].y, X[0].z, X[3].w); + T[3] = (uint4) (X[3].x, X[2].y, X[1].z, X[0].w); + #endif + + T[0] = hc_swap32_4 (T[0]); + T[1] = hc_swap32_4 (T[1]); + T[2] = hc_swap32_4 (T[2]); + T[3] = hc_swap32_4 (T[3]); + + w0[0] = T[0].x; + w0[1] = T[0].y; + w0[2] = T[0].z; + w0[3] = T[0].w; + w1[0] = T[1].x; + w1[1] = T[1].y; + w1[2] = T[1].z; + w1[3] = T[1].w; + w2[0] = T[2].x; + w2[1] = T[2].y; + w2[2] = T[2].z; + w2[3] = T[2].w; + w3[0] = T[3].x; + w3[1] = T[3].y; + w3[2] = T[3].z; + w3[3] = T[3].w; + + sha256_hmac_update_64 (&ctx, w0, w1, w2, w3, 64); + } + + w0[0] = 1; + w0[1] = 0; + w0[2] = 0; + w0[3] = 0; + w1[0] = 0; + w1[1] = 0; + w1[2] = 0; + w1[3] = 0; + w2[0] = 0; + w2[1] = 0; + w2[2] = 0; + w2[3] = 0; + w3[0] = 0; + w3[1] = 0; + w3[2] = 0; + w3[3] = 0; + + sha256_hmac_update_64 (&ctx, w0, w1, w2, w3, 4); + + sha256_hmac_final (&ctx); + + // GCM stuff + + u32 ukey[8]; + + ukey[0] = ctx.opad.h[0]; + ukey[1] = ctx.opad.h[1]; + ukey[2] = ctx.opad.h[2]; + ukey[3] = ctx.opad.h[3]; + ukey[4] = ctx.opad.h[4]; + ukey[5] = ctx.opad.h[5]; + ukey[6] = ctx.opad.h[6]; + ukey[7] = ctx.opad.h[7]; + + u32 key[60] = { 0 }; + u32 subKey[4] = { 0 }; + + AES_GCM_Init (ukey, 256, key, subKey, s_te0, s_te1, s_te2, s_te3, s_te4); + + u32 iv[4]; + + iv[0] = esalt_bufs[DIGESTS_OFFSET].iv[0]; + iv[1] = esalt_bufs[DIGESTS_OFFSET].iv[1]; + iv[2] = esalt_bufs[DIGESTS_OFFSET].iv[2]; + iv[3] = 0; + + u32 J0[4] = { 0 }; + + AES_GCM_Prepare_J0 (iv, 12, subKey, J0); + + u32 T[4] = { 0 }; + u32 S[4] = { 0 }; + + u32 S_len = 16; + u32 aad_buf[4] = { 0 }; + u32 aad_len = 0; + + AES_GCM_GHASH_GLOBAL (subKey, aad_buf, aad_len, esalt_bufs[DIGESTS_OFFSET].data, 32, S); + + AES_GCM_GCTR (key, J0, S, S_len, T, s_te0, s_te1, s_te2, s_te3, s_te4); + + const u32 r0 = T[0]; + const u32 r1 = T[1]; + const u32 r2 = T[2]; + const u32 r3 = T[3]; + + #define il_pos 0 + + #ifdef KERNEL_STATIC + #include COMPARE_M + #endif +} diff --git a/docs/changes.txt b/docs/changes.txt index a18355a87..80626475b 100644 --- a/docs/changes.txt +++ b/docs/changes.txt @@ -1,5 +1,11 @@ * changes v6.2.5 -> v6.2.x +## +## Algorithms +## + +- Added hash-mode: Exodus Desktop Wallet (scrypt) + ## ## Features ## diff --git a/docs/readme.txt b/docs/readme.txt index 5bd14826b..e560d9c77 100644 --- a/docs/readme.txt +++ b/docs/readme.txt @@ -406,6 +406,7 @@ NVIDIA GPUs require "NVIDIA Driver" (440.64 or later) and "CUDA Toolkit" (9.0 or - MultiBit Classic .key (MD5) - MultiBit Classic .wallet (scrypt) - MultiBit HD (scrypt) +- Exodus Desktop Wallet (scrypt) ## ## Attack-Modes diff --git a/src/modules/module_28200.c b/src/modules/module_28200.c new file mode 100644 index 000000000..191ffbf20 --- /dev/null +++ b/src/modules/module_28200.c @@ -0,0 +1,594 @@ +/** + * Author......: See docs/credits.txt + * License.....: MIT + */ + +#include +#include "common.h" +#include "types.h" +#include "modules.h" +#include "bitops.h" +#include "convert.h" +#include "shared.h" + +static const u32 ATTACK_EXEC = ATTACK_EXEC_OUTSIDE_KERNEL; +static const u32 DGST_POS0 = 0; +static const u32 DGST_POS1 = 1; +static const u32 DGST_POS2 = 2; +static const u32 DGST_POS3 = 3; +static const u32 DGST_SIZE = DGST_SIZE_4_4; +static const u32 HASH_CATEGORY = HASH_CATEGORY_CRYPTOCURRENCY_WALLET; +static const char *HASH_NAME = "Exodus Desktop Wallet (scrypt)"; +static const u64 KERN_TYPE = 28200; +static const u32 OPTI_TYPE = OPTI_TYPE_ZERO_BYTE; +static const u64 OPTS_TYPE = OPTS_TYPE_PT_GENERATE_LE + | OPTS_TYPE_MP_MULTI_DISABLE + | OPTS_TYPE_NATIVE_THREADS + | OPTS_TYPE_LOOP_PREPARE; +static const u32 SALT_TYPE = SALT_TYPE_EMBEDDED; +static const char *ST_PASS = "hashcat"; +static const char *ST_HASH = "EXODUS:16384:8:1:IYkXZgFETRmFp4wQXyP8XMe3LtuOw8wMdLcBVQ+9YWE=:lq0W9ekN5sC0O7Xw:UD4a6mUUhkTbQtGWitXHZUg0pQ4RHI6W/KUyYE95m3k=:ZuNQckXOtr4r21x+DT1zpQ=="; + +u32 module_attack_exec (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSED const user_options_t *user_options, MAYBE_UNUSED const user_options_extra_t *user_options_extra) { return ATTACK_EXEC; } +u32 module_dgst_pos0 (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSED const user_options_t *user_options, MAYBE_UNUSED const user_options_extra_t *user_options_extra) { return DGST_POS0; } +u32 module_dgst_pos1 (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSED const user_options_t *user_options, MAYBE_UNUSED const user_options_extra_t *user_options_extra) { return DGST_POS1; } +u32 module_dgst_pos2 (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSED const user_options_t *user_options, MAYBE_UNUSED const user_options_extra_t *user_options_extra) { return DGST_POS2; } +u32 module_dgst_pos3 (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSED const user_options_t *user_options, MAYBE_UNUSED const user_options_extra_t *user_options_extra) { return DGST_POS3; } +u32 module_dgst_size (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSED const user_options_t *user_options, MAYBE_UNUSED const user_options_extra_t *user_options_extra) { return DGST_SIZE; } +u32 module_hash_category (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSED const user_options_t *user_options, MAYBE_UNUSED const user_options_extra_t *user_options_extra) { return HASH_CATEGORY; } +const char *module_hash_name (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSED const user_options_t *user_options, MAYBE_UNUSED const user_options_extra_t *user_options_extra) { return HASH_NAME; } +u64 module_kern_type (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSED const user_options_t *user_options, MAYBE_UNUSED const user_options_extra_t *user_options_extra) { return KERN_TYPE; } +u32 module_opti_type (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSED const user_options_t *user_options, MAYBE_UNUSED const user_options_extra_t *user_options_extra) { return OPTI_TYPE; } +u64 module_opts_type (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSED const user_options_t *user_options, MAYBE_UNUSED const user_options_extra_t *user_options_extra) { return OPTS_TYPE; } +u32 module_salt_type (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSED const user_options_t *user_options, MAYBE_UNUSED const user_options_extra_t *user_options_extra) { return SALT_TYPE; } +const char *module_st_hash (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSED const user_options_t *user_options, MAYBE_UNUSED const user_options_extra_t *user_options_extra) { return ST_HASH; } +const char *module_st_pass (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSED const user_options_t *user_options, MAYBE_UNUSED const user_options_extra_t *user_options_extra) { return ST_PASS; } + +typedef struct exodus +{ + u32 iv[4]; + u32 data[8]; + u32 tag[4]; + +} exodus_t; + +static const char *SIGNATURE_EXODUS = "EXODUS"; + +static const u64 SCRYPT_N = 16384; +static const u64 SCRYPT_R = 8; +static const u64 SCRYPT_P = 1; + +u32 module_kernel_loops_min (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSED const user_options_t *user_options, MAYBE_UNUSED const user_options_extra_t *user_options_extra) +{ + const u32 kernel_loops_min = 1024; + + return kernel_loops_min; +} + +u32 module_kernel_loops_max (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSED const user_options_t *user_options, MAYBE_UNUSED const user_options_extra_t *user_options_extra) +{ + const u32 kernel_loops_max = 1024; + + return kernel_loops_max; +} + +u32 module_pw_min (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSED const user_options_t *user_options, MAYBE_UNUSED const user_options_extra_t *user_options_extra) +{ + const u32 pw_min = 4; + + return pw_min; +} + +u64 module_esalt_size (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSED const user_options_t *user_options, MAYBE_UNUSED const user_options_extra_t *user_options_extra) +{ + const u64 esalt_size = (const u64) sizeof (exodus_t); + + return esalt_size; +} + +u64 module_extra_buffer_size (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSED const user_options_t *user_options, MAYBE_UNUSED const user_options_extra_t *user_options_extra, MAYBE_UNUSED const hashes_t *hashes, MAYBE_UNUSED const hc_device_param_t *device_param) +{ + // we need to set the self-test hash settings to pass the self-test + // the decoder for the self-test is called after this function + + const u64 scrypt_N = (hashes->salts_buf[0].scrypt_N) ? hashes->salts_buf[0].scrypt_N : SCRYPT_N; + const u64 scrypt_r = (hashes->salts_buf[0].scrypt_r) ? hashes->salts_buf[0].scrypt_r : SCRYPT_R; + + const u64 kernel_power_max = ((OPTS_TYPE & OPTS_TYPE_MP_MULTI_DISABLE) ? 1 : device_param->device_processors) * device_param->kernel_threads_max * device_param->kernel_accel_max; + + u64 tmto_start = 0; + u64 tmto_stop = 4; + + if (user_options->scrypt_tmto_chgd == true) + { + tmto_start = user_options->scrypt_tmto; + tmto_stop = user_options->scrypt_tmto; + } + + // size_pws + + const u64 size_pws = kernel_power_max * sizeof (pw_t); + + const u64 size_pws_amp = size_pws; + + // size_pws_comp + + const u64 size_pws_comp = kernel_power_max * (sizeof (u32) * 64); + + // size_pws_idx + + const u64 size_pws_idx = (kernel_power_max + 1) * sizeof (pw_idx_t); + + // size_tmps + + const u64 size_tmps = kernel_power_max * hashconfig->tmp_size; + + // size_hooks + + const u64 size_hooks = kernel_power_max * hashconfig->hook_size; + + u64 size_pws_pre = 4; + u64 size_pws_base = 4; + + if (user_options->slow_candidates == true) + { + // size_pws_pre + + size_pws_pre = kernel_power_max * sizeof (pw_pre_t); + + // size_pws_base + + size_pws_base = kernel_power_max * sizeof (pw_pre_t); + } + + // sometimes device_available_mem and device_maxmem_alloc reported back from the opencl runtime are a bit inaccurate. + // let's add some extra space just to be sure. + // now depends on the kernel-accel value (where scrypt and similar benefits), but also hard minimum 64mb and maximum 1024mb limit + + u64 EXTRA_SPACE = (1024ULL * 1024ULL) * device_param->kernel_accel_max; + + EXTRA_SPACE = MAX (EXTRA_SPACE, ( 64ULL * 1024ULL * 1024ULL)); + EXTRA_SPACE = MIN (EXTRA_SPACE, (1024ULL * 1024ULL * 1024ULL)); + + const u64 scrypt_extra_space + = device_param->size_bfs + + device_param->size_combs + + device_param->size_digests + + device_param->size_esalts + + device_param->size_markov_css + + device_param->size_plains + + device_param->size_results + + device_param->size_root_css + + device_param->size_rules + + device_param->size_rules_c + + device_param->size_salts + + device_param->size_shown + + device_param->size_tm + + device_param->size_st_digests + + device_param->size_st_salts + + device_param->size_st_esalts + + size_pws + + size_pws_amp + + size_pws_comp + + size_pws_idx + + size_tmps + + size_hooks + + size_pws_pre + + size_pws_base + + EXTRA_SPACE; + + bool not_enough_memory = true; + + u64 size_scrypt = 0; + + u64 tmto; + + for (tmto = tmto_start; tmto <= tmto_stop; tmto++) + { + size_scrypt = (128ULL * scrypt_r) * scrypt_N; + + size_scrypt /= 1ull << tmto; + + size_scrypt *= kernel_power_max; + + if ((size_scrypt / 4) > device_param->device_maxmem_alloc) continue; + + if ((size_scrypt + scrypt_extra_space) > device_param->device_available_mem) continue; + + not_enough_memory = false; + + break; + } + + if (not_enough_memory == true) return -1; + + return size_scrypt; +} + +u64 module_tmp_size (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSED const user_options_t *user_options, MAYBE_UNUSED const user_options_extra_t *user_options_extra) +{ + const u64 tmp_size = 0; // we'll add some later + + return tmp_size; +} + +u64 module_extra_tmp_size (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSED const user_options_t *user_options, MAYBE_UNUSED const user_options_extra_t *user_options_extra, MAYBE_UNUSED const hashes_t *hashes) +{ + const u64 scrypt_N = (hashes->salts_buf[0].scrypt_N) ? hashes->salts_buf[0].scrypt_N : SCRYPT_N; + const u64 scrypt_r = (hashes->salts_buf[0].scrypt_r) ? hashes->salts_buf[0].scrypt_r : SCRYPT_R; + const u64 scrypt_p = (hashes->salts_buf[0].scrypt_p) ? hashes->salts_buf[0].scrypt_p : SCRYPT_P; + + // we need to check that all hashes have the same scrypt settings + + for (u32 i = 1; i < hashes->salts_cnt; i++) + { + if ((hashes->salts_buf[i].scrypt_N != scrypt_N) + || (hashes->salts_buf[i].scrypt_r != scrypt_r) + || (hashes->salts_buf[i].scrypt_p != scrypt_p)) + { + return -1; + } + } + + const u64 tmp_size = 128ULL * scrypt_r * scrypt_p; + + return tmp_size; +} + +bool module_warmup_disable (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSED const user_options_t *user_options, MAYBE_UNUSED const user_options_extra_t *user_options_extra) +{ + return true; +} + +char *module_jit_build_options (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSED const user_options_t *user_options, MAYBE_UNUSED const user_options_extra_t *user_options_extra, MAYBE_UNUSED const hashes_t *hashes, MAYBE_UNUSED const hc_device_param_t *device_param) +{ + const u64 scrypt_N = (hashes->salts_buf[0].scrypt_N) ? hashes->salts_buf[0].scrypt_N : SCRYPT_N; + const u64 scrypt_r = (hashes->salts_buf[0].scrypt_r) ? hashes->salts_buf[0].scrypt_r : SCRYPT_R; + const u64 scrypt_p = (hashes->salts_buf[0].scrypt_p) ? hashes->salts_buf[0].scrypt_p : SCRYPT_P; + + const u64 extra_buffer_size = device_param->extra_buffer_size; + + const u64 kernel_power_max = ((OPTS_TYPE & OPTS_TYPE_MP_MULTI_DISABLE) ? 1 : device_param->device_processors) * device_param->kernel_threads_max * device_param->kernel_accel_max; + + const u64 size_scrypt = 128ULL * scrypt_r * scrypt_N; + + const u64 scrypt_tmto_final = (kernel_power_max * size_scrypt) / extra_buffer_size; + + const u64 tmp_size = 128ULL * scrypt_r * scrypt_p; + + char *jit_build_options = NULL; + + hc_asprintf (&jit_build_options, "-DSCRYPT_N=%u -DSCRYPT_R=%u -DSCRYPT_P=%u -DSCRYPT_TMTO=%" PRIu64 " -DSCRYPT_TMP_ELEM=%" PRIu64, + hashes->salts_buf[0].scrypt_N, + hashes->salts_buf[0].scrypt_r, + hashes->salts_buf[0].scrypt_p, + scrypt_tmto_final, + tmp_size / 16); + + return jit_build_options; +} + +int module_hash_decode (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSED void *digest_buf, MAYBE_UNUSED salt_t *salt, MAYBE_UNUSED void *esalt_buf, MAYBE_UNUSED void *hook_salt_buf, MAYBE_UNUSED hashinfo_t *hash_info, const char *line_buf, MAYBE_UNUSED const int line_len) +{ + u32 *digest = (u32 *) digest_buf; + + exodus_t *exodus = (exodus_t *) esalt_buf; + + token_t token; + + token.token_cnt = 8; + + token.signatures_cnt = 1; + token.signatures_buf[0] = SIGNATURE_EXODUS; + + token.len_min[0] = 6; + token.len_max[0] = 6; + token.sep[0] = ':'; + token.attr[0] = TOKEN_ATTR_VERIFY_LENGTH + | TOKEN_ATTR_VERIFY_SIGNATURE; + + token.len_min[1] = 1; + token.len_max[1] = 6; + token.sep[1] = ':'; + token.attr[1] = TOKEN_ATTR_VERIFY_LENGTH + | TOKEN_ATTR_VERIFY_DIGIT; + + token.len_min[2] = 1; + token.len_max[2] = 6; + token.sep[2] = ':'; + token.attr[2] = TOKEN_ATTR_VERIFY_LENGTH + | TOKEN_ATTR_VERIFY_DIGIT; + + token.len_min[3] = 1; + token.len_max[3] = 6; + token.sep[3] = ':'; + token.attr[3] = TOKEN_ATTR_VERIFY_LENGTH + | TOKEN_ATTR_VERIFY_DIGIT; + + token.len_min[4] = 44; + token.len_max[4] = 44; + token.sep[4] = ':'; + token.attr[4] = TOKEN_ATTR_VERIFY_LENGTH + | TOKEN_ATTR_VERIFY_BASE64A; + + token.len_min[5] = 16; + token.len_max[5] = 16; + token.sep[5] = ':'; + token.attr[5] = TOKEN_ATTR_VERIFY_LENGTH + | TOKEN_ATTR_VERIFY_BASE64A; + + token.len_min[6] = 44; + token.len_max[6] = 44; + token.sep[6] = ':'; + token.attr[6] = TOKEN_ATTR_VERIFY_LENGTH + | TOKEN_ATTR_VERIFY_BASE64A; + + token.len_min[7] = 24; + token.len_max[7] = 24; + token.sep[7] = ':'; + token.attr[7] = TOKEN_ATTR_VERIFY_LENGTH + | TOKEN_ATTR_VERIFY_BASE64A; + + const int rc_tokenizer = input_tokenizer ((const u8 *) line_buf, line_len, &token); + + if (rc_tokenizer != PARSER_OK) return (rc_tokenizer); + + u8 tmp_buf[512]; + int tmp_len; + + // scrypt settings + + const u8 *N_pos = token.buf[1]; + const u8 *r_pos = token.buf[2]; + const u8 *p_pos = token.buf[3]; + + salt->scrypt_N = hc_strtoul ((const char *) N_pos, NULL, 10); + salt->scrypt_r = hc_strtoul ((const char *) r_pos, NULL, 10); + salt->scrypt_p = hc_strtoul ((const char *) p_pos, NULL, 10); + + salt->salt_iter = salt->scrypt_N; + salt->salt_repeats = salt->scrypt_p - 1; + + // salt + + const u8 *salt_pos = token.buf[4]; + const int salt_len = token.len[4]; + + memset (tmp_buf, 0, sizeof (tmp_buf)); + + tmp_len = base64_decode (base64_to_int, (const u8 *) salt_pos, salt_len, tmp_buf); + + memcpy (salt->salt_buf, tmp_buf, tmp_len); + + salt->salt_len = tmp_len; + + // iv + + const u8 *iv_pos = token.buf[5]; + const int iv_len = token.len[5]; + + memset (tmp_buf, 0, sizeof (tmp_buf)); + + tmp_len = base64_decode (base64_to_int, (const u8 *) iv_pos, iv_len, tmp_buf); + + memcpy (exodus->iv, tmp_buf, tmp_len); + + for (int i = 0; i < 4; i++) exodus->iv[i] = byte_swap_32 (exodus->iv[i]); + + // data + + const u8 *data_pos = token.buf[6]; + const int data_len = token.len[6]; + + memset (tmp_buf, 0, sizeof (tmp_buf)); + + tmp_len = base64_decode (base64_to_int, (const u8 *) data_pos, data_len, tmp_buf); + + memcpy (exodus->data, tmp_buf, tmp_len); + + for (int i = 0; i < 8; i++) exodus->data[i] = byte_swap_32 (exodus->data[i]); + + // tag + + const u8 *tag_pos = token.buf[7]; + const int tag_len = token.len[7]; + + memset (tmp_buf, 0, sizeof (tmp_buf)); + + tmp_len = base64_decode (base64_to_int, (const u8 *) tag_pos, tag_len, tmp_buf); + + memcpy (exodus->tag, tmp_buf, tmp_len); + + for (int i = 0; i < 4; i++) exodus->tag[i] = byte_swap_32 (exodus->tag[i]); + + // digest + + digest[0] = exodus->tag[0]; + digest[1] = exodus->tag[1]; + digest[2] = exodus->tag[2]; + digest[3] = exodus->tag[3]; + + return (PARSER_OK); +} + +int module_hash_encode (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSED const void *digest_buf, MAYBE_UNUSED const salt_t *salt, MAYBE_UNUSED const void *esalt_buf, MAYBE_UNUSED const void *hook_salt_buf, MAYBE_UNUSED const hashinfo_t *hash_info, char *line_buf, MAYBE_UNUSED const int line_size) +{ + //const u32 *digest = (const u32 *) digest_buf; + + const exodus_t *exodus = (const exodus_t *) esalt_buf; + + // salt + + char base64_salt[64]; + + int base64_salt_len = base64_encode (int_to_base64, (const u8 *) salt->salt_buf, salt->salt_len, (u8 *) base64_salt); + + base64_salt[base64_salt_len] = 0; + + // iv + + u32 tmp_iv[4] = { 0 }; + + for (int i = 0; i < 4; i++) tmp_iv[i] = byte_swap_32 (exodus->iv[i]); + + char base64_iv[64]; + + int base64_iv_len = base64_encode (int_to_base64, (const u8 *) tmp_iv, 12, (u8 *) base64_iv); + + base64_iv[base64_iv_len] = 0; + + // data + + u32 tmp_data[8] = { 0 }; + + for (int i = 0; i < 8; i++) tmp_data[i] = byte_swap_32 (exodus->data[i]); + + char base64_data[64]; + + int base64_data_len = base64_encode (int_to_base64, (const u8 *) tmp_data, 32, (u8 *) base64_data); + + base64_data[base64_data_len] = 0; + + // tag + + u32 tmp_tag[4] = { 0 }; + + for (int i = 0; i < 4; i++) tmp_tag[i] = byte_swap_32 (exodus->tag[i]); + + char base64_tag[64]; + + int base64_tag_len = base64_encode (int_to_base64, (const u8 *) tmp_tag, 16, (u8 *) base64_tag); + + base64_tag[base64_tag_len] = 0; + + // output + + const int line_len = snprintf (line_buf, line_size, "%s:%u:%u:%u:%s:%s:%s:%s", + SIGNATURE_EXODUS, + salt->scrypt_N, + salt->scrypt_r, + salt->scrypt_p, + base64_salt, + base64_iv, + base64_data, + base64_tag); + + return line_len; +} + +/* + +Find the right -n value for your GPU: +===================================== + +1. For example, to find the value for 28200, first create a valid hash for 28200 as follows: + +$ ./hashcat --example-hashes -m 28200 | grep Example.Hash | grep -v Format | cut -b 25- > tmp.hash.28200 + +2. Now let it iterate through all -n values to a certain point. In this case, I'm using 200, but in general it's a value that is at least twice that of the multiprocessor. If you don't mind you can just leave it as it is, it just runs a little longer. + +$ export i=1; while [ $i -ne 201 ]; do echo $i; ./hashcat --quiet tmp.hash.28200 --keep-guessing --self-test-disable --markov-disable --restore-disable --outfile-autohex-disable --wordlist-autohex-disable --potfile-disable --logfile-disable --hwmon-disable --status --status-timer 1 --runtime 28 --machine-readable --optimized-kernel-enable --workload-profile 3 --hash-type 28200 --attack-mode 3 ?b?b?b?b?b?b?b --backend-devices 1 --force -n $i; i=$(($i+1)); done | tee x + +3. Determine the highest measured H/s speed. But don't just use the highest value. Instead, use the number that seems most stable, usually at the beginning. + +$ grep "$(printf 'STATUS\t3')" x | cut -f4 -d$'\t' | sort -n | tail + +4. To match the speed you have chosen to the correct value in the 'x' file, simply search for it in it. Then go up a little on the block where you found him. The value -n is the single value that begins before the block start. If you have multiple blocks at the same speed, choose the lowest value for -n + +*/ + +const char *module_extra_tuningdb_block (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSED const user_options_t *user_options, MAYBE_UNUSED const user_options_extra_t *user_options_extra) +{ + const char *extra_tuningdb_block = + "DEVICE_TYPE_CPU * 28200 1 N A\n" + "DEVICE_TYPE_GPU * 28200 1 N A\n" + "GeForce_GTX_980 * 28200 1 16 A\n" + "GeForce_GTX_1080 * 28200 1 45 A\n" + "GeForce_RTX_2080_Ti * 28200 1 68 A\n" + "GeForce_RTX_3080 * 28200 1 68 A\n" + "GeForce_RTX_3090 * 28200 1 82 A\n" + "ALIAS_AMD_Vega64 * 28200 1 60 A\n" + "ALIAS_AMD_RX6900XT * 28200 1 60 A\n" + ; + + return extra_tuningdb_block; +} + +void module_init (module_ctx_t *module_ctx) +{ + module_ctx->module_context_size = MODULE_CONTEXT_SIZE_CURRENT; + module_ctx->module_interface_version = MODULE_INTERFACE_VERSION_CURRENT; + + module_ctx->module_attack_exec = module_attack_exec; + module_ctx->module_benchmark_esalt = MODULE_DEFAULT; + module_ctx->module_benchmark_hook_salt = MODULE_DEFAULT; + module_ctx->module_benchmark_mask = MODULE_DEFAULT; + module_ctx->module_benchmark_salt = MODULE_DEFAULT; + module_ctx->module_build_plain_postprocess = MODULE_DEFAULT; + module_ctx->module_deep_comp_kernel = MODULE_DEFAULT; + module_ctx->module_deprecated_notice = MODULE_DEFAULT; + module_ctx->module_dgst_pos0 = module_dgst_pos0; + module_ctx->module_dgst_pos1 = module_dgst_pos1; + module_ctx->module_dgst_pos2 = module_dgst_pos2; + module_ctx->module_dgst_pos3 = module_dgst_pos3; + module_ctx->module_dgst_size = module_dgst_size; + module_ctx->module_dictstat_disable = MODULE_DEFAULT; + module_ctx->module_esalt_size = module_esalt_size; + module_ctx->module_extra_buffer_size = module_extra_buffer_size; + module_ctx->module_extra_tmp_size = module_extra_tmp_size; + module_ctx->module_extra_tuningdb_block = module_extra_tuningdb_block; + module_ctx->module_forced_outfile_format = MODULE_DEFAULT; + module_ctx->module_hash_binary_count = MODULE_DEFAULT; + module_ctx->module_hash_binary_parse = MODULE_DEFAULT; + module_ctx->module_hash_binary_save = MODULE_DEFAULT; + module_ctx->module_hash_decode_postprocess = MODULE_DEFAULT; + module_ctx->module_hash_decode_potfile = MODULE_DEFAULT; + module_ctx->module_hash_decode_zero_hash = MODULE_DEFAULT; + module_ctx->module_hash_decode = module_hash_decode; + module_ctx->module_hash_encode_status = MODULE_DEFAULT; + module_ctx->module_hash_encode_potfile = MODULE_DEFAULT; + module_ctx->module_hash_encode = module_hash_encode; + module_ctx->module_hash_init_selftest = MODULE_DEFAULT; + module_ctx->module_hash_mode = MODULE_DEFAULT; + module_ctx->module_hash_category = module_hash_category; + module_ctx->module_hash_name = module_hash_name; + module_ctx->module_hashes_count_min = MODULE_DEFAULT; + module_ctx->module_hashes_count_max = MODULE_DEFAULT; + module_ctx->module_hlfmt_disable = MODULE_DEFAULT; + module_ctx->module_hook_extra_param_size = MODULE_DEFAULT; + module_ctx->module_hook_extra_param_init = MODULE_DEFAULT; + module_ctx->module_hook_extra_param_term = MODULE_DEFAULT; + module_ctx->module_hook12 = MODULE_DEFAULT; + module_ctx->module_hook23 = MODULE_DEFAULT; + module_ctx->module_hook_salt_size = MODULE_DEFAULT; + module_ctx->module_hook_size = MODULE_DEFAULT; + module_ctx->module_jit_build_options = module_jit_build_options; + module_ctx->module_jit_cache_disable = MODULE_DEFAULT; + module_ctx->module_kernel_accel_max = MODULE_DEFAULT; + module_ctx->module_kernel_accel_min = MODULE_DEFAULT; + module_ctx->module_kernel_loops_max = module_kernel_loops_max; + module_ctx->module_kernel_loops_min = module_kernel_loops_min; + module_ctx->module_kernel_threads_max = MODULE_DEFAULT; + module_ctx->module_kernel_threads_min = MODULE_DEFAULT; + module_ctx->module_kern_type = module_kern_type; + module_ctx->module_kern_type_dynamic = MODULE_DEFAULT; + module_ctx->module_opti_type = module_opti_type; + module_ctx->module_opts_type = module_opts_type; + module_ctx->module_outfile_check_disable = MODULE_DEFAULT; + module_ctx->module_outfile_check_nocomp = MODULE_DEFAULT; + module_ctx->module_potfile_custom_check = MODULE_DEFAULT; + module_ctx->module_potfile_disable = MODULE_DEFAULT; + module_ctx->module_potfile_keep_all_hashes = MODULE_DEFAULT; + module_ctx->module_pwdump_column = MODULE_DEFAULT; + module_ctx->module_pw_max = MODULE_DEFAULT; + module_ctx->module_pw_min = module_pw_min; + module_ctx->module_salt_max = MODULE_DEFAULT; + module_ctx->module_salt_min = MODULE_DEFAULT; + module_ctx->module_salt_type = module_salt_type; + module_ctx->module_separator = MODULE_DEFAULT; + module_ctx->module_st_hash = module_st_hash; + module_ctx->module_st_pass = module_st_pass; + module_ctx->module_tmp_size = module_tmp_size; + module_ctx->module_unstable_warning = MODULE_DEFAULT; + module_ctx->module_warmup_disable = module_warmup_disable; +} diff --git a/tools/exodus2hashcat.py b/tools/exodus2hashcat.py new file mode 100755 index 000000000..9fdc695b7 --- /dev/null +++ b/tools/exodus2hashcat.py @@ -0,0 +1,69 @@ +#!/usr/bin/env python + +# Author......: See docs/credits.txt +# License.....: MIT +# Target......: Exodus wallet extractor +# Example.....: exodus2hashcat.py + +import binascii +import sys +import hashlib +from Crypto.Cipher import AES +import base64 +import os.path + +METADATA_LEN = 256 +HEADER_LEN = 224 +CRC_LEN = 32 +LEN_BLOB_STORED = 4 + +if len(sys.argv) != 2 : + print("Error, usage exodus2hashcat.py ") + sys.exit(1) + +if os.path.basename(sys.argv[1])!= 'seed.seco': + print("Error, usage exodus2hashcat.py ") + sys.exit(1) + +with open(sys.argv[1],'rb') as fd: + seedBuffer = fd.read() + +#Basic check +if not seedBuffer[0:4].decode("utf8").startswith("SECO"): + print("Not A SECO exodus header magic") + sys.exit(1) + +salt = seedBuffer[0x100:0x120] + +n = int.from_bytes(seedBuffer[0x120:0x124],"big") +r = int.from_bytes(seedBuffer[0x124:0x128],"big") +p = int.from_bytes(seedBuffer[0x128:0x12c],"big") + +#Basic check +if n!=16384 or r !=8 or p != 1: + print("Warning,unexpected scrypt N,r,p values") + +if os.path.getsize(sys.argv[1]) != METADATA_LEN + HEADER_LEN + CRC_LEN + LEN_BLOB_STORED+ int.from_bytes(seedBuffer[0x200:0x204],"big"): + print(os.path.getsize(sys.argv[1])) + print( METADATA_LEN + HEADER_LEN + int.from_bytes(seedBuffer[0x200:0x204],"big")) + print("Error file size") + sys.argv[1] + +#Check integrity +m = hashlib.sha256() +m.update(seedBuffer[HEADER_LEN+CRC_LEN:]) +if m.digest() != seedBuffer[HEADER_LEN:HEADER_LEN+CRC_LEN]: + print("SECO file seems corrupted") + sys.exit(1) + +#Check aes-gcm string +cipher = seedBuffer[0x12c:0x138] +if binascii.hexlify(cipher) != b"6165732d3235362d67636d00": + print("Error aes-256-gcm") + sys.exit(1) + +iv = seedBuffer[0x14c:0x158] +authTag = seedBuffer[0x158:0x168] +key = seedBuffer[0x168:0x188] + +print("EXODUS:"+str(n)+":"+str(r)+":"+str(p)+":"+base64.b64encode(salt).decode("utf8")+":"+base64.b64encode(iv).decode("utf8")+":"+base64.b64encode(key).decode("utf8")+":"+base64.b64encode(authTag).decode("utf8")) diff --git a/tools/test_modules/m28200.pm b/tools/test_modules/m28200.pm new file mode 100644 index 000000000..5732cdbd9 --- /dev/null +++ b/tools/test_modules/m28200.pm @@ -0,0 +1,111 @@ +#!/usr/bin/env perl + +## +## Author......: See docs/credits.txt +## License.....: MIT +## + +use strict; +use warnings; + +use Crypt::ScryptKDF qw (scrypt_hash scrypt_raw); +use Crypt::CBC; +use MIME::Base64 qw (decode_base64 encode_base64); +use Digest::SHA qw (sha512); +use Crypt::AuthEnc::GCM; + +sub module_constraints { [[0, 256], [64, 64], [-1, -1], [-1, -1], [-1, -1]] } + +sub module_generate_hash +{ + my $word = shift; + my $salt = shift; + my $scrypt_n = shift // 16384; + my $scrypt_r = shift // 8; + my $scrypt_p = shift // 1; + my $iv = shift // random_hex_string (24); + my $data = shift; + my $tag = shift; + + my $salt_bin = pack ("H*", $salt); + + my $key_bin = scrypt_raw ($word, $salt_bin, $scrypt_n, $scrypt_r, $scrypt_p, 32); + + my $iv_bin = pack ("H*", $iv); + + my $pt; + + if (defined $data) + { + my $data_bin = pack ("H*", $data); + + my $aes = Crypt::AuthEnc::GCM->new ("AES", $key_bin, $iv_bin); + + $pt = $aes->decrypt_add ($data_bin); + + my $tag_bin = pack ("H*", $tag); + + my $result_tag = $aes->decrypt_done ($tag_bin); + + if ($result_tag == 1) + { + ## correct password + } + else + { + $pt = random_bytes (32); + } + } + else + { + $pt = random_bytes (32); + } + + my $aes = Crypt::AuthEnc::GCM->new ("AES", $key_bin, $iv_bin); + + my $ct_bin = $aes->encrypt_add ($pt); + + my $tag_bin = $aes->encrypt_done (); + + my $hash = sprintf ('EXODUS:%u:%u:%u:%s:%s:%s:%s', $scrypt_n, $scrypt_r, $scrypt_p, encode_base64 ($salt_bin, ""), encode_base64 ($iv_bin, ""), encode_base64 ($ct_bin, ""), encode_base64 ($tag_bin, "")); + + return $hash; +} + +sub module_verify_hash +{ + my $line = shift; + + my $idx = rindex ($line, ':'); + + return unless $idx >= 0; + + my $hash = substr ($line, 0, $idx); + my $word = substr ($line, $idx + 1); + + return unless substr ($hash, 0, 6) eq 'EXODUS'; + + my ($signature, $scrypt_n, $scrypt_r, $scrypt_p, $salt, $iv, $data, $tag) = split ':', $hash; + + return unless defined $signature; + return unless defined $scrypt_n; + return unless defined $scrypt_r; + return unless defined $scrypt_p; + return unless defined $salt; + return unless defined $iv; + return unless defined $data; + return unless defined $tag; + + $salt = decode_base64 ($salt); + $iv = decode_base64 ($iv); + $data = decode_base64 ($data); + $tag = decode_base64 ($tag); + + my $word_packed = pack_if_HEX_notation ($word); + + my $new_hash = module_generate_hash ($word_packed, unpack ("H*", $salt), $scrypt_n, $scrypt_r, $scrypt_p, unpack ("H*", $iv), unpack ("H*", $data), unpack ("H*", $tag)); + + return ($new_hash, $word); +} + +1;