diff --git a/OpenCL/inc_types.cl b/OpenCL/inc_types.cl index 69208e795..ae3ab423a 100644 --- a/OpenCL/inc_types.cl +++ b/OpenCL/inc_types.cl @@ -1122,10 +1122,11 @@ typedef struct md5crypt_tmp typedef struct sha256crypt_tmp { - u32 alt_result[8]; + // pure version - u32 p_bytes[4]; - u32 s_bytes[4]; + u32 alt_result[8]; + u32 p_bytes[64]; + u32 s_bytes[64]; } sha256crypt_tmp_t; diff --git a/OpenCL/m07400-pure.cl b/OpenCL/m07400-pure.cl new file mode 100644 index 000000000..936add9f0 --- /dev/null +++ b/OpenCL/m07400-pure.cl @@ -0,0 +1,365 @@ +/** + * Author......: See docs/credits.txt + * License.....: MIT + */ + +#include "inc_vendor.cl" +#include "inc_hash_constants.h" +#include "inc_hash_functions.cl" +#include "inc_types.cl" +#include "inc_common.cl" +#include "inc_hash_sha256.cl" + +#define COMPARE_S "inc_comp_single.cl" +#define COMPARE_M "inc_comp_multi.cl" + +__kernel void m07400_init (__global pw_t *pws, __global const kernel_rule_t *rules_buf, __global const pw_t *combs_buf, __global const bf_t *bfs_buf, __global sha256crypt_tmp_t *tmps, __global void *hooks, __global const u32 *bitmaps_buf_s1_a, __global const u32 *bitmaps_buf_s1_b, __global const u32 *bitmaps_buf_s1_c, __global const u32 *bitmaps_buf_s1_d, __global const u32 *bitmaps_buf_s2_a, __global const u32 *bitmaps_buf_s2_b, __global const u32 *bitmaps_buf_s2_c, __global const u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global const digest_t *digests_buf, __global u32 *hashes_shown, __global const salt_t *salt_bufs, __global const void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV0_buf, __global u32 *d_scryptV1_buf, __global u32 *d_scryptV2_buf, __global u32 *d_scryptV3_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 il_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max) +{ + /** + * base + */ + + const u32 gid = get_global_id (0); + + if (gid >= gid_max) return; + + /** + * init + */ + + const u32 pw_len = pws[gid].pw_len; + + const u32 pw_lenv = ceil ((float) pw_len / 4); + + u32 w[64] = { 0 }; + + for (int idx = 0; idx < pw_lenv; idx++) + { + w[idx] = pws[gid].i[idx]; + + barrier (CLK_GLOBAL_MEM_FENCE); + } + + for (int idx = 0; idx < pw_lenv; idx++) + { + w[idx] = swap32_S (w[idx]); + } + + const u32 salt_len = salt_bufs[salt_pos].salt_len; + + const u32 salt_lenv = ceil ((float) salt_len / 4); + + u32 s[64] = { 0 }; + + for (int idx = 0; idx < salt_lenv; idx++) + { + s[idx] = salt_bufs[salt_pos].salt_buf[idx]; + + barrier (CLK_GLOBAL_MEM_FENCE); + } + + for (int idx = 0; idx < salt_lenv; idx++) + { + s[idx] = swap32_S (s[idx]); + } + + /** + * prepare + */ + + sha256_ctx_t ctx; + + sha256_init (&ctx); + + sha256_update (&ctx, w, pw_len); + + sha256_update (&ctx, s, salt_len); + + sha256_update (&ctx, w, pw_len); + + sha256_final (&ctx); + + u32 final[16] = { 0 }; + + final[0] = ctx.h[0]; + final[1] = ctx.h[1]; + final[2] = ctx.h[2]; + final[3] = ctx.h[3]; + final[4] = ctx.h[4]; + final[5] = ctx.h[5]; + final[6] = ctx.h[6]; + final[7] = ctx.h[7]; + + // alt_result + + sha256_init (&ctx); + + sha256_update (&ctx, w, pw_len); + + sha256_update (&ctx, s, salt_len); + + int pl; + + for (pl = pw_len; pl > 32; pl -= 32) + { + sha256_update (&ctx, final, 32); + } + + u32 t_final[16] = { 0 }; + + #ifdef _unroll + #pragma unroll + #endif + for (int i = 0; i < 8; i++) t_final[i] = final[i]; + + truncate_block_16x4_be (t_final + 0, t_final + 4, t_final + 8, t_final + 12, pl); + + sha256_update (&ctx, t_final, pl); + + for (int cnt = pw_len; cnt > 0; cnt >>= 1) + { + if ((cnt & 1) != 0) + { + sha256_update (&ctx, final, 32); + } + else + { + sha256_update (&ctx, w, pw_len); + } + } + + sha256_final (&ctx); + + tmps[gid].alt_result[0] = ctx.h[0]; + tmps[gid].alt_result[1] = ctx.h[1]; + tmps[gid].alt_result[2] = ctx.h[2]; + tmps[gid].alt_result[3] = ctx.h[3]; + tmps[gid].alt_result[4] = ctx.h[4]; + tmps[gid].alt_result[5] = ctx.h[5]; + tmps[gid].alt_result[6] = ctx.h[6]; + tmps[gid].alt_result[7] = ctx.h[7]; + + // p_bytes + + sha256_init (&ctx); + + for (u32 j = 0; j < pw_len; j++) + { + sha256_update (&ctx, w, pw_len); + } + + sha256_final (&ctx); + + final[ 0] = ctx.h[0]; + final[ 1] = ctx.h[1]; + final[ 2] = ctx.h[2]; + final[ 3] = ctx.h[3]; + final[ 4] = ctx.h[4]; + final[ 5] = ctx.h[5]; + final[ 6] = ctx.h[6]; + final[ 7] = ctx.h[7]; + final[ 8] = 0; + final[ 9] = 0; + final[10] = 0; + final[11] = 0; + final[12] = 0; + final[13] = 0; + final[14] = 0; + final[15] = 0; + + u32 p_final[64] = { 0 }; + + int idx; + + for (pl = pw_len, idx = 0; pl > 32; pl -= 32, idx += 8) + { + p_final[idx + 0] = final[0]; + p_final[idx + 1] = final[1]; + p_final[idx + 2] = final[2]; + p_final[idx + 3] = final[3]; + p_final[idx + 4] = final[4]; + p_final[idx + 5] = final[5]; + p_final[idx + 6] = final[6]; + p_final[idx + 7] = final[7]; + } + + truncate_block_16x4_be (final + 0, final + 4, final + 8, final + 12, pl); + + p_final[idx + 0] = final[0]; + p_final[idx + 1] = final[1]; + p_final[idx + 2] = final[2]; + p_final[idx + 3] = final[3]; + p_final[idx + 4] = final[4]; + p_final[idx + 5] = final[5]; + p_final[idx + 6] = final[6]; + p_final[idx + 7] = final[7]; + + #ifdef _unroll + #pragma unroll + #endif + for (int i = 0; i < 64; i++) tmps[gid].p_bytes[i] = p_final[i]; + + // s_bytes + + sha256_init (&ctx); + + for (u32 j = 0; j < 16 + (tmps[gid].alt_result[0] >> 24); j++) + { + sha256_update (&ctx, s, salt_len); + } + + sha256_final (&ctx); + + final[ 0] = ctx.h[0]; + final[ 1] = ctx.h[1]; + final[ 2] = ctx.h[2]; + final[ 3] = ctx.h[3]; + final[ 4] = ctx.h[4]; + final[ 5] = ctx.h[5]; + final[ 6] = ctx.h[6]; + final[ 7] = ctx.h[7]; + final[ 8] = 0; + final[ 9] = 0; + final[10] = 0; + final[11] = 0; + final[12] = 0; + final[13] = 0; + final[14] = 0; + final[15] = 0; + + u32 s_final[64] = { 0 }; + + for (pl = salt_len, idx = 0; pl > 32; pl -= 32, idx += 8) + { + s_final[idx + 0] = final[0]; + s_final[idx + 1] = final[1]; + s_final[idx + 2] = final[2]; + s_final[idx + 3] = final[3]; + s_final[idx + 4] = final[4]; + s_final[idx + 5] = final[5]; + s_final[idx + 6] = final[6]; + s_final[idx + 7] = final[7]; + } + + truncate_block_16x4_be (final + 0, final + 4, final + 8, final + 12, pl); + + s_final[idx + 0] = final[0]; + s_final[idx + 1] = final[1]; + s_final[idx + 2] = final[2]; + s_final[idx + 3] = final[3]; + s_final[idx + 4] = final[4]; + s_final[idx + 5] = final[5]; + s_final[idx + 6] = final[6]; + s_final[idx + 7] = final[7]; + + #ifdef _unroll + #pragma unroll + #endif + for (int i = 0; i < 64; i++) tmps[gid].s_bytes[i] = s_final[i]; +} + +__kernel void m07400_loop (__global pw_t *pws, __global const kernel_rule_t *rules_buf, __global const pw_t *combs_buf, __global const bf_t *bfs_buf, __global sha256crypt_tmp_t *tmps, __global void *hooks, __global const u32 *bitmaps_buf_s1_a, __global const u32 *bitmaps_buf_s1_b, __global const u32 *bitmaps_buf_s1_c, __global const u32 *bitmaps_buf_s1_d, __global const u32 *bitmaps_buf_s2_a, __global const u32 *bitmaps_buf_s2_b, __global const u32 *bitmaps_buf_s2_c, __global const u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global const digest_t *digests_buf, __global u32 *hashes_shown, __global const salt_t *salt_bufs, __global const void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV0_buf, __global u32 *d_scryptV1_buf, __global u32 *d_scryptV2_buf, __global u32 *d_scryptV3_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 il_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max) +{ + /** + * base + */ + + const u32 gid = get_global_id (0); + + if (gid >= gid_max) return; + + const u32 pw_len = pws[gid].pw_len; + + const u32 salt_len = salt_bufs[salt_pos].salt_len; + + u32 alt_result[16] = { 0 }; + + alt_result[0] = tmps[gid].alt_result[0]; + alt_result[1] = tmps[gid].alt_result[1]; + alt_result[2] = tmps[gid].alt_result[2]; + alt_result[3] = tmps[gid].alt_result[3]; + alt_result[4] = tmps[gid].alt_result[4]; + alt_result[5] = tmps[gid].alt_result[5]; + alt_result[6] = tmps[gid].alt_result[6]; + alt_result[7] = tmps[gid].alt_result[7]; + + /* Repeatedly run the collected hash value through sha256 to burn + CPU cycles. */ + + for (u32 i = 0, j = loop_pos; i < loop_cnt; i++, j++) + { + sha256_ctx_t ctx; + + sha256_init (&ctx); + + if (j & 1) + { + sha256_update_global (&ctx, tmps[gid].p_bytes, pw_len); + } + else + { + sha256_update (&ctx, alt_result, 32); + } + + if (j % 3) + { + sha256_update_global (&ctx, tmps[gid].s_bytes, salt_len); + } + + if (j % 7) + { + sha256_update_global (&ctx, tmps[gid].p_bytes, pw_len); + } + + if (j & 1) + { + sha256_update (&ctx, alt_result, 32); + } + else + { + sha256_update_global (&ctx, tmps[gid].p_bytes, pw_len); + } + + sha256_final (&ctx); + + alt_result[0] = ctx.h[0]; + alt_result[1] = ctx.h[1]; + alt_result[2] = ctx.h[2]; + alt_result[3] = ctx.h[3]; + alt_result[4] = ctx.h[4]; + alt_result[5] = ctx.h[5]; + alt_result[6] = ctx.h[6]; + alt_result[7] = ctx.h[7]; + } + + tmps[gid].alt_result[0] = alt_result[0]; + tmps[gid].alt_result[1] = alt_result[1]; + tmps[gid].alt_result[2] = alt_result[2]; + tmps[gid].alt_result[3] = alt_result[3]; + tmps[gid].alt_result[4] = alt_result[4]; + tmps[gid].alt_result[5] = alt_result[5]; + tmps[gid].alt_result[6] = alt_result[6]; + tmps[gid].alt_result[7] = alt_result[7]; +} + +__kernel void m07400_comp (__global pw_t *pws, __global const kernel_rule_t *rules_buf, __global const pw_t *combs_buf, __global const bf_t *bfs_buf, __global sha256crypt_tmp_t *tmps, __global void *hooks, __global const u32 *bitmaps_buf_s1_a, __global const u32 *bitmaps_buf_s1_b, __global const u32 *bitmaps_buf_s1_c, __global const u32 *bitmaps_buf_s1_d, __global const u32 *bitmaps_buf_s2_a, __global const u32 *bitmaps_buf_s2_b, __global const u32 *bitmaps_buf_s2_c, __global const u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global const digest_t *digests_buf, __global u32 *hashes_shown, __global const salt_t *salt_bufs, __global const void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV0_buf, __global u32 *d_scryptV1_buf, __global u32 *d_scryptV2_buf, __global u32 *d_scryptV3_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 il_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max) +{ + /** + * base + */ + + const u32 gid = get_global_id (0); + + if (gid >= gid_max) return; + + const u32 lid = get_local_id (0); + + const u32 r0 = swap32_S (tmps[gid].alt_result[0]); + const u32 r1 = swap32_S (tmps[gid].alt_result[1]); + const u32 r2 = swap32_S (tmps[gid].alt_result[2]); + const u32 r3 = swap32_S (tmps[gid].alt_result[3]); + + #define il_pos 0 + + #include COMPARE_M +} diff --git a/include/interface.h b/include/interface.h index 94cce825d..331542408 100644 --- a/include/interface.h +++ b/include/interface.h @@ -520,10 +520,11 @@ typedef struct md5crypt_tmp typedef struct sha256crypt_tmp { - u32 alt_result[8]; + // pure version - u32 p_bytes[4]; - u32 s_bytes[4]; + u32 alt_result[8]; + u32 p_bytes[64]; + u32 s_bytes[64]; } sha256crypt_tmp_t; diff --git a/src/interface.c b/src/interface.c index 68d130bc6..e0b1afc10 100644 --- a/src/interface.c +++ b/src/interface.c @@ -24612,7 +24612,7 @@ int hashconfig_init (hashcat_ctx_t *hashcat_ctx) break; case 7000: hashconfig->pw_max = 19; break; - case 7400: hashconfig->pw_max = 16; + case 7400: hashconfig->pw_max = 15; break; case 7700: hashconfig->pw_max = 8; break;