diff --git a/OpenCL/m12200.cl b/OpenCL/m12200.cl index 919f0220a..87290c84f 100644 --- a/OpenCL/m12200.cl +++ b/OpenCL/m12200.cl @@ -3,127 +3,19 @@ * License.....: MIT */ +#define NEW_SIMD_CODE + #include "inc_vendor.cl" #include "inc_hash_constants.h" #include "inc_hash_functions.cl" #include "inc_types.cl" #include "inc_common.cl" +#include "inc_simd.cl" +#include "inc_hash_sha512.cl" #define COMPARE_S "inc_comp_single.cl" #define COMPARE_M "inc_comp_multi.cl" -__constant u64a k_sha512[80] = -{ - SHA512C00, SHA512C01, SHA512C02, SHA512C03, - SHA512C04, SHA512C05, SHA512C06, SHA512C07, - SHA512C08, SHA512C09, SHA512C0a, SHA512C0b, - SHA512C0c, SHA512C0d, SHA512C0e, SHA512C0f, - SHA512C10, SHA512C11, SHA512C12, SHA512C13, - SHA512C14, SHA512C15, SHA512C16, SHA512C17, - SHA512C18, SHA512C19, SHA512C1a, SHA512C1b, - SHA512C1c, SHA512C1d, SHA512C1e, SHA512C1f, - SHA512C20, SHA512C21, SHA512C22, SHA512C23, - SHA512C24, SHA512C25, SHA512C26, SHA512C27, - SHA512C28, SHA512C29, SHA512C2a, SHA512C2b, - SHA512C2c, SHA512C2d, SHA512C2e, SHA512C2f, - SHA512C30, SHA512C31, SHA512C32, SHA512C33, - SHA512C34, SHA512C35, SHA512C36, SHA512C37, - SHA512C38, SHA512C39, SHA512C3a, SHA512C3b, - SHA512C3c, SHA512C3d, SHA512C3e, SHA512C3f, - SHA512C40, SHA512C41, SHA512C42, SHA512C43, - SHA512C44, SHA512C45, SHA512C46, SHA512C47, - SHA512C48, SHA512C49, SHA512C4a, SHA512C4b, - SHA512C4c, SHA512C4d, SHA512C4e, SHA512C4f, -}; - -void sha512_transform (const u64 w[16], u64 dgst[8]) -{ - u64 a = dgst[0]; - u64 b = dgst[1]; - u64 c = dgst[2]; - u64 d = dgst[3]; - u64 e = dgst[4]; - u64 f = dgst[5]; - u64 g = dgst[6]; - u64 h = dgst[7]; - - u64 w0_t = w[ 0]; - u64 w1_t = w[ 1]; - u64 w2_t = w[ 2]; - u64 w3_t = w[ 3]; - u64 w4_t = w[ 4]; - u64 w5_t = w[ 5]; - u64 w6_t = w[ 6]; - u64 w7_t = w[ 7]; - u64 w8_t = w[ 8]; - u64 w9_t = w[ 9]; - u64 wa_t = w[10]; - u64 wb_t = w[11]; - u64 wc_t = w[12]; - u64 wd_t = w[13]; - u64 we_t = w[14]; - u64 wf_t = w[15]; - - #define ROUND_EXPAND() \ - { \ - w0_t = SHA512_EXPAND (we_t, w9_t, w1_t, w0_t); \ - w1_t = SHA512_EXPAND (wf_t, wa_t, w2_t, w1_t); \ - w2_t = SHA512_EXPAND (w0_t, wb_t, w3_t, w2_t); \ - w3_t = SHA512_EXPAND (w1_t, wc_t, w4_t, w3_t); \ - w4_t = SHA512_EXPAND (w2_t, wd_t, w5_t, w4_t); \ - w5_t = SHA512_EXPAND (w3_t, we_t, w6_t, w5_t); \ - w6_t = SHA512_EXPAND (w4_t, wf_t, w7_t, w6_t); \ - w7_t = SHA512_EXPAND (w5_t, w0_t, w8_t, w7_t); \ - w8_t = SHA512_EXPAND (w6_t, w1_t, w9_t, w8_t); \ - w9_t = SHA512_EXPAND (w7_t, w2_t, wa_t, w9_t); \ - wa_t = SHA512_EXPAND (w8_t, w3_t, wb_t, wa_t); \ - wb_t = SHA512_EXPAND (w9_t, w4_t, wc_t, wb_t); \ - wc_t = SHA512_EXPAND (wa_t, w5_t, wd_t, wc_t); \ - wd_t = SHA512_EXPAND (wb_t, w6_t, we_t, wd_t); \ - we_t = SHA512_EXPAND (wc_t, w7_t, wf_t, we_t); \ - wf_t = SHA512_EXPAND (wd_t, w8_t, w0_t, wf_t); \ - } - - #define ROUND_STEP(i) \ - { \ - SHA512_STEP (SHA512_F0o, SHA512_F1o, a, b, c, d, e, f, g, h, w0_t, k_sha512[i + 0]); \ - SHA512_STEP (SHA512_F0o, SHA512_F1o, h, a, b, c, d, e, f, g, w1_t, k_sha512[i + 1]); \ - SHA512_STEP (SHA512_F0o, SHA512_F1o, g, h, a, b, c, d, e, f, w2_t, k_sha512[i + 2]); \ - SHA512_STEP (SHA512_F0o, SHA512_F1o, f, g, h, a, b, c, d, e, w3_t, k_sha512[i + 3]); \ - SHA512_STEP (SHA512_F0o, SHA512_F1o, e, f, g, h, a, b, c, d, w4_t, k_sha512[i + 4]); \ - SHA512_STEP (SHA512_F0o, SHA512_F1o, d, e, f, g, h, a, b, c, w5_t, k_sha512[i + 5]); \ - SHA512_STEP (SHA512_F0o, SHA512_F1o, c, d, e, f, g, h, a, b, w6_t, k_sha512[i + 6]); \ - SHA512_STEP (SHA512_F0o, SHA512_F1o, b, c, d, e, f, g, h, a, w7_t, k_sha512[i + 7]); \ - SHA512_STEP (SHA512_F0o, SHA512_F1o, a, b, c, d, e, f, g, h, w8_t, k_sha512[i + 8]); \ - SHA512_STEP (SHA512_F0o, SHA512_F1o, h, a, b, c, d, e, f, g, w9_t, k_sha512[i + 9]); \ - SHA512_STEP (SHA512_F0o, SHA512_F1o, g, h, a, b, c, d, e, f, wa_t, k_sha512[i + 10]); \ - SHA512_STEP (SHA512_F0o, SHA512_F1o, f, g, h, a, b, c, d, e, wb_t, k_sha512[i + 11]); \ - SHA512_STEP (SHA512_F0o, SHA512_F1o, e, f, g, h, a, b, c, d, wc_t, k_sha512[i + 12]); \ - SHA512_STEP (SHA512_F0o, SHA512_F1o, d, e, f, g, h, a, b, c, wd_t, k_sha512[i + 13]); \ - SHA512_STEP (SHA512_F0o, SHA512_F1o, c, d, e, f, g, h, a, b, we_t, k_sha512[i + 14]); \ - SHA512_STEP (SHA512_F0o, SHA512_F1o, b, c, d, e, f, g, h, a, wf_t, k_sha512[i + 15]); \ - } - - ROUND_STEP (0); - - #ifdef _unroll - #pragma unroll - #endif - for (int i = 16; i < 80; i += 16) - { - ROUND_EXPAND (); ROUND_STEP (i); - } - - dgst[0] += a; - dgst[1] += b; - dgst[2] += c; - dgst[3] += d; - dgst[4] += e; - dgst[5] += f; - dgst[6] += g; - dgst[7] += h; -} - __kernel void m12200_init (__global pw_t *pws, __global const kernel_rule_t *rules_buf, __global const pw_t *combs_buf, __global const bf_t *bfs_buf, __global ecryptfs_tmp_t *tmps, __global void *hooks, __global const u32 *bitmaps_buf_s1_a, __global const u32 *bitmaps_buf_s1_b, __global const u32 *bitmaps_buf_s1_c, __global const u32 *bitmaps_buf_s1_d, __global const u32 *bitmaps_buf_s2_a, __global const u32 *bitmaps_buf_s2_b, __global const u32 *bitmaps_buf_s2_c, __global const u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global const digest_t *digests_buf, __global u32 *hashes_shown, __global const salt_t *salt_bufs, __global const void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV0_buf, __global u32 *d_scryptV1_buf, __global u32 *d_scryptV2_buf, __global u32 *d_scryptV3_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 il_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max) { /** @@ -134,166 +26,133 @@ __kernel void m12200_init (__global pw_t *pws, __global const kernel_rule_t *rul if (gid >= gid_max) return; - u32 w0[4]; + sha512_ctx_t ctx; - w0[0] = pws[gid].i[ 0]; - w0[1] = pws[gid].i[ 1]; - w0[2] = pws[gid].i[ 2]; - w0[3] = pws[gid].i[ 3]; + sha512_init (&ctx); - u32 w1[4]; + sha512_update_global (&ctx, salt_bufs[salt_pos].salt_buf, salt_bufs[salt_pos].salt_len); - w1[0] = pws[gid].i[ 4]; - w1[1] = pws[gid].i[ 5]; - w1[2] = pws[gid].i[ 6]; - w1[3] = pws[gid].i[ 7]; + sha512_update_global_swap (&ctx, pws[gid].i, pws[gid].pw_len); - u32 w2[4]; + sha512_final (&ctx); - w2[0] = pws[gid].i[ 8]; - w2[1] = pws[gid].i[ 9]; - w2[2] = pws[gid].i[10]; - w2[3] = pws[gid].i[11]; - - u32 w3[4]; - - w3[0] = pws[gid].i[12]; - w3[1] = pws[gid].i[13]; - w3[2] = pws[gid].i[14]; - w3[3] = pws[gid].i[15]; - - u32 pw_len = pws[gid].pw_len; - - append_0x80_4x4 (w0, w1, w2, w3, pw_len); - - w0[0] = swap32 (w0[0]); - w0[1] = swap32 (w0[1]); - w0[2] = swap32 (w0[2]); - w0[3] = swap32 (w0[3]); - w1[0] = swap32 (w1[0]); - w1[1] = swap32 (w1[1]); - w1[2] = swap32 (w1[2]); - w1[3] = swap32 (w1[3]); - w2[0] = swap32 (w2[0]); - w2[1] = swap32 (w2[1]); - w2[2] = swap32 (w2[2]); - w2[3] = swap32 (w2[3]); - w3[0] = swap32 (w3[0]); - w3[1] = swap32 (w3[1]); - w3[2] = swap32 (w3[2]); - w3[3] = swap32 (w3[3]); - - /** - * salt - */ - - u32 s0[2]; - - s0[0] = salt_bufs[salt_pos].salt_buf[0]; - s0[1] = salt_bufs[salt_pos].salt_buf[1]; - - u32 salt_len = salt_bufs[salt_pos].salt_len; - - u64 w[16]; - - w[ 0] = hl32_to_64 (s0[0], s0[1]); - w[ 1] = hl32_to_64 (w0[0], w0[1]); - w[ 2] = hl32_to_64 (w0[2], w0[3]); - w[ 3] = hl32_to_64 (w1[0], w1[1]); - w[ 4] = hl32_to_64 (w1[2], w1[3]); - w[ 5] = hl32_to_64 (w2[0], w2[1]); - w[ 6] = hl32_to_64 (w2[2], w2[3]); - w[ 7] = hl32_to_64 (w3[0], w3[1]); - w[ 8] = hl32_to_64 (w3[2], w3[3]); - w[ 9] = 0; - w[10] = 0; - w[11] = 0; - w[12] = 0; - w[13] = 0; - w[14] = 0; - w[15] = (salt_len + pw_len) * 8; - - u64 dgst[8]; - - dgst[0] = SHA512M_A; - dgst[1] = SHA512M_B; - dgst[2] = SHA512M_C; - dgst[3] = SHA512M_D; - dgst[4] = SHA512M_E; - dgst[5] = SHA512M_F; - dgst[6] = SHA512M_G; - dgst[7] = SHA512M_H; - - sha512_transform (w, dgst); - - tmps[gid].out[0] = dgst[0]; - tmps[gid].out[1] = dgst[1]; - tmps[gid].out[2] = dgst[2]; - tmps[gid].out[3] = dgst[3]; - tmps[gid].out[4] = dgst[4]; - tmps[gid].out[5] = dgst[5]; - tmps[gid].out[6] = dgst[6]; - tmps[gid].out[7] = dgst[7]; + tmps[gid].out[0] = ctx.h[0]; + tmps[gid].out[1] = ctx.h[1]; + tmps[gid].out[2] = ctx.h[2]; + tmps[gid].out[3] = ctx.h[3]; + tmps[gid].out[4] = ctx.h[4]; + tmps[gid].out[5] = ctx.h[5]; + tmps[gid].out[6] = ctx.h[6]; + tmps[gid].out[7] = ctx.h[7]; } __kernel void m12200_loop (__global pw_t *pws, __global const kernel_rule_t *rules_buf, __global const pw_t *combs_buf, __global const bf_t *bfs_buf, __global ecryptfs_tmp_t *tmps, __global void *hooks, __global const u32 *bitmaps_buf_s1_a, __global const u32 *bitmaps_buf_s1_b, __global const u32 *bitmaps_buf_s1_c, __global const u32 *bitmaps_buf_s1_d, __global const u32 *bitmaps_buf_s2_a, __global const u32 *bitmaps_buf_s2_b, __global const u32 *bitmaps_buf_s2_c, __global const u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global const digest_t *digests_buf, __global u32 *hashes_shown, __global const salt_t *salt_bufs, __global const void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV0_buf, __global u32 *d_scryptV1_buf, __global u32 *d_scryptV2_buf, __global u32 *d_scryptV3_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 il_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max) { const u32 gid = get_global_id (0); - if (gid >= gid_max) return; + if ((gid * VECT_SIZE) >= gid_max) return; - u64 dgst[8]; + u64x t0 = pack64v (tmps, out, gid, 0); + u64x t1 = pack64v (tmps, out, gid, 1); + u64x t2 = pack64v (tmps, out, gid, 2); + u64x t3 = pack64v (tmps, out, gid, 3); + u64x t4 = pack64v (tmps, out, gid, 4); + u64x t5 = pack64v (tmps, out, gid, 5); + u64x t6 = pack64v (tmps, out, gid, 6); + u64x t7 = pack64v (tmps, out, gid, 7); - dgst[0] = tmps[gid].out[0]; - dgst[1] = tmps[gid].out[1]; - dgst[2] = tmps[gid].out[2]; - dgst[3] = tmps[gid].out[3]; - dgst[4] = tmps[gid].out[4]; - dgst[5] = tmps[gid].out[5]; - dgst[6] = tmps[gid].out[6]; - dgst[7] = tmps[gid].out[7]; + u32x w0[4]; + u32x w1[4]; + u32x w2[4]; + u32x w3[4]; + u32x w4[4]; + u32x w5[4]; + u32x w6[4]; + u32x w7[4]; - for (u32 i = 0; i < loop_cnt; i++) + w0[0] = 0; + w0[1] = 0; + w0[2] = 0; + w0[3] = 0; + w1[0] = 0; + w1[1] = 0; + w1[2] = 0; + w1[3] = 0; + w2[0] = 0; + w2[1] = 0; + w2[2] = 0; + w2[3] = 0; + w3[0] = 0; + w3[1] = 0; + w3[2] = 0; + w3[3] = 0; + w4[0] = 0x80000000; + w4[1] = 0; + w4[2] = 0; + w4[3] = 0; + w5[0] = 0; + w5[1] = 0; + w5[2] = 0; + w5[3] = 0; + w6[0] = 0; + w6[1] = 0; + w6[2] = 0; + w6[3] = 0; + w7[0] = 0; + w7[1] = 0; + w7[2] = 0; + w7[3] = 64 * 8; + + for (u32 i = 0, j = loop_pos; i < loop_cnt; i++, j++) { - u64 w[16]; + w0[0] = h32_from_64 (t0); + w0[1] = l32_from_64 (t0); + w0[2] = h32_from_64 (t1); + w0[3] = l32_from_64 (t1); + w1[0] = h32_from_64 (t2); + w1[1] = l32_from_64 (t2); + w1[2] = h32_from_64 (t3); + w1[3] = l32_from_64 (t3); + w2[0] = h32_from_64 (t4); + w2[1] = l32_from_64 (t4); + w2[2] = h32_from_64 (t5); + w2[3] = l32_from_64 (t5); + w3[0] = h32_from_64 (t6); + w3[1] = l32_from_64 (t6); + w3[2] = h32_from_64 (t7); + w3[3] = l32_from_64 (t7); - w[ 0] = dgst[0]; - w[ 1] = dgst[1]; - w[ 2] = dgst[2]; - w[ 3] = dgst[3]; - w[ 4] = dgst[4]; - w[ 5] = dgst[5]; - w[ 6] = dgst[6]; - w[ 7] = dgst[7]; - w[ 8] = 0x8000000000000000; - w[ 9] = 0; - w[10] = 0; - w[11] = 0; - w[12] = 0; - w[13] = 0; - w[14] = 0; - w[15] = 64 * 8; + u64x digest[8]; - dgst[0] = SHA512M_A; - dgst[1] = SHA512M_B; - dgst[2] = SHA512M_C; - dgst[3] = SHA512M_D; - dgst[4] = SHA512M_E; - dgst[5] = SHA512M_F; - dgst[6] = SHA512M_G; - dgst[7] = SHA512M_H; + digest[0] = SHA512M_A; + digest[1] = SHA512M_B; + digest[2] = SHA512M_C; + digest[3] = SHA512M_D; + digest[4] = SHA512M_E; + digest[5] = SHA512M_F; + digest[6] = SHA512M_G; + digest[7] = SHA512M_H; - sha512_transform (w, dgst); + sha512_transform_vector (w0, w1, w2, w3, w4, w5, w6, w7, digest); + + t0 = digest[0]; + t1 = digest[1]; + t2 = digest[2]; + t3 = digest[3]; + t4 = digest[4]; + t5 = digest[5]; + t6 = digest[6]; + t7 = digest[7]; } - tmps[gid].out[0] = dgst[0]; - tmps[gid].out[1] = dgst[1]; - tmps[gid].out[2] = dgst[2]; - tmps[gid].out[3] = dgst[3]; - tmps[gid].out[4] = dgst[4]; - tmps[gid].out[5] = dgst[5]; - tmps[gid].out[6] = dgst[6]; - tmps[gid].out[7] = dgst[7]; + unpack64v (tmps, out, gid, 0, t0); + unpack64v (tmps, out, gid, 1, t1); + unpack64v (tmps, out, gid, 2, t2); + unpack64v (tmps, out, gid, 3, t3); + unpack64v (tmps, out, gid, 4, t4); + unpack64v (tmps, out, gid, 5, t5); + unpack64v (tmps, out, gid, 6, t6); + unpack64v (tmps, out, gid, 7, t7); } __kernel void m12200_comp (__global pw_t *pws, __global const kernel_rule_t *rules_buf, __global const pw_t *combs_buf, __global const bf_t *bfs_buf, __global ecryptfs_tmp_t *tmps, __global void *hooks, __global const u32 *bitmaps_buf_s1_a, __global const u32 *bitmaps_buf_s1_b, __global const u32 *bitmaps_buf_s1_c, __global const u32 *bitmaps_buf_s1_d, __global const u32 *bitmaps_buf_s2_a, __global const u32 *bitmaps_buf_s2_b, __global const u32 *bitmaps_buf_s2_c, __global const u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global const digest_t *digests_buf, __global u32 *hashes_shown, __global const salt_t *salt_bufs, __global const void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV0_buf, __global u32 *d_scryptV1_buf, __global u32 *d_scryptV2_buf, __global u32 *d_scryptV3_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 il_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max) @@ -310,8 +169,8 @@ __kernel void m12200_comp (__global pw_t *pws, __global const kernel_rule_t *rul const u64 a = tmps[gid].out[0]; - const u32 r0 = h32_from_64 (a); - const u32 r1 = l32_from_64 (a); + const u32 r0 = h32_from_64_S (a); + const u32 r1 = l32_from_64_S (a); const u32 r2 = 0; const u32 r3 = 0; diff --git a/src/interface.c b/src/interface.c index 0c2f1e55b..6197385d8 100644 --- a/src/interface.c +++ b/src/interface.c @@ -23389,7 +23389,8 @@ int hashconfig_init (hashcat_ctx_t *hashcat_ctx) hashconfig->dgst_size = DGST_SIZE_8_8; hashconfig->parse_func = ecryptfs_parse_hash; hashconfig->opti_type = OPTI_TYPE_ZERO_BYTE - | OPTI_TYPE_USES_BITS_64; + | OPTI_TYPE_USES_BITS_64 + | OPTI_TYPE_SLOW_HASH_SIMD_LOOP; hashconfig->dgst_pos0 = 0; hashconfig->dgst_pos1 = 1; hashconfig->dgst_pos2 = 2; @@ -24657,6 +24658,8 @@ int hashconfig_init (hashcat_ctx_t *hashcat_ctx) break; case 11600: hashconfig->pw_max = PW_MAX; break; + case 12200: hashconfig->pw_max = PW_MAX; + break; } // pw_max : algo specific hard max length