/** * Author......: See docs/credits.txt * License.....: MIT */ #include "inc_vendor.cl" #include "inc_hash_constants.h" #include "inc_hash_functions.cl" #include "inc_types.cl" #include "inc_common.cl" #include "inc_hash_md5.cl" #define COMPARE_S "inc_comp_single.cl" #define COMPARE_M "inc_comp_multi.cl" #define md5crypt_magic 0x00243124u void memcat16 (u32 block0[4], u32 block1[4], u32 block2[4], u32 block3[4], const u32 block_len, const u32 append[4]) { u32 tmp0; u32 tmp1; u32 tmp2; u32 tmp3; u32 tmp4; #if defined IS_AMD || defined IS_GENERIC const int offset_minus_4 = 4 - (block_len & 3); tmp0 = amd_bytealign (append[0], 0, offset_minus_4); tmp1 = amd_bytealign (append[1], append[0], offset_minus_4); tmp2 = amd_bytealign (append[2], append[1], offset_minus_4); tmp3 = amd_bytealign (append[3], append[2], offset_minus_4); tmp4 = amd_bytealign ( 0, append[3], offset_minus_4); const u32 mod = block_len & 3; if (mod == 0) { tmp0 = tmp1; tmp1 = tmp2; tmp2 = tmp3; tmp3 = tmp4; tmp4 = 0; } #endif #ifdef IS_NV const int offset_minus_4 = 4 - (block_len & 3); const int selector = (0x76543210 >> (offset_minus_4 * 4)) & 0xffff; tmp0 = __byte_perm ( 0, append[0], selector); tmp1 = __byte_perm (append[0], append[1], selector); tmp2 = __byte_perm (append[1], append[2], selector); tmp3 = __byte_perm (append[2], append[3], selector); tmp4 = __byte_perm (append[3], 0, selector); #endif const u32 div = block_len / 4; switch (div) { case 0: block0[0] |= tmp0; block0[1] = tmp1; block0[2] = tmp2; block0[3] = tmp3; block1[0] = tmp4; break; case 1: block0[1] |= tmp0; block0[2] = tmp1; block0[3] = tmp2; block1[0] = tmp3; block1[1] = tmp4; break; case 2: block0[2] |= tmp0; block0[3] = tmp1; block1[0] = tmp2; block1[1] = tmp3; block1[2] = tmp4; break; case 3: block0[3] |= tmp0; block1[0] = tmp1; block1[1] = tmp2; block1[2] = tmp3; block1[3] = tmp4; break; case 4: block1[0] |= tmp0; block1[1] = tmp1; block1[2] = tmp2; block1[3] = tmp3; block2[0] = tmp4; break; case 5: block1[1] |= tmp0; block1[2] = tmp1; block1[3] = tmp2; block2[0] = tmp3; block2[1] = tmp4; break; case 6: block1[2] |= tmp0; block1[3] = tmp1; block2[0] = tmp2; block2[1] = tmp3; block2[2] = tmp4; break; case 7: block1[3] |= tmp0; block2[0] = tmp1; block2[1] = tmp2; block2[2] = tmp3; block2[3] = tmp4; break; case 8: block2[0] |= tmp0; block2[1] = tmp1; block2[2] = tmp2; block2[3] = tmp3; block3[0] = tmp4; break; case 9: block2[1] |= tmp0; block2[2] = tmp1; block2[3] = tmp2; block3[0] = tmp3; block3[1] = tmp4; break; } } void memcat16_x80 (u32 block0[4], u32 block1[4], u32 block2[4], u32 block3[4], const u32 block_len, const u32 append[4]) { u32 tmp0; u32 tmp1; u32 tmp2; u32 tmp3; u32 tmp4; #if defined IS_AMD || defined IS_GENERIC const int offset_minus_4 = 4 - (block_len & 3); tmp0 = amd_bytealign (append[0], 0, offset_minus_4); tmp1 = amd_bytealign (append[1], append[0], offset_minus_4); tmp2 = amd_bytealign (append[2], append[1], offset_minus_4); tmp3 = amd_bytealign (append[3], append[2], offset_minus_4); tmp4 = amd_bytealign ( 0x80, append[3], offset_minus_4); const u32 mod = block_len & 3; if (mod == 0) { tmp0 = tmp1; tmp1 = tmp2; tmp2 = tmp3; tmp3 = tmp4; tmp4 = 0x80; } #endif #ifdef IS_NV const int offset_minus_4 = 4 - (block_len & 3); const int selector = (0x76543210 >> (offset_minus_4 * 4)) & 0xffff; tmp0 = __byte_perm ( 0, append[0], selector); tmp1 = __byte_perm (append[0], append[1], selector); tmp2 = __byte_perm (append[1], append[2], selector); tmp3 = __byte_perm (append[2], append[3], selector); tmp4 = __byte_perm (append[3], 0x80, selector); #endif const u32 div = block_len / 4; switch (div) { case 0: block0[0] |= tmp0; block0[1] = tmp1; block0[2] = tmp2; block0[3] = tmp3; block1[0] = tmp4; break; case 1: block0[1] |= tmp0; block0[2] = tmp1; block0[3] = tmp2; block1[0] = tmp3; block1[1] = tmp4; break; case 2: block0[2] |= tmp0; block0[3] = tmp1; block1[0] = tmp2; block1[1] = tmp3; block1[2] = tmp4; break; case 3: block0[3] |= tmp0; block1[0] = tmp1; block1[1] = tmp2; block1[2] = tmp3; block1[3] = tmp4; break; case 4: block1[0] |= tmp0; block1[1] = tmp1; block1[2] = tmp2; block1[3] = tmp3; block2[0] = tmp4; break; case 5: block1[1] |= tmp0; block1[2] = tmp1; block1[3] = tmp2; block2[0] = tmp3; block2[1] = tmp4; break; case 6: block1[2] |= tmp0; block1[3] = tmp1; block2[0] = tmp2; block2[1] = tmp3; block2[2] = tmp4; break; case 7: block1[3] |= tmp0; block2[0] = tmp1; block2[1] = tmp2; block2[2] = tmp3; block2[3] = tmp4; break; case 8: block2[0] |= tmp0; block2[1] = tmp1; block2[2] = tmp2; block2[3] = tmp3; block3[0] = tmp4; break; case 9: block2[1] |= tmp0; block2[2] = tmp1; block2[3] = tmp2; block3[0] = tmp3; block3[1] = tmp4; break; } } void memcat8 (u32 block0[4], u32 block1[4], u32 block2[4], u32 block3[4], const u32 block_len, const u32 append[2]) { u32 tmp0; u32 tmp1; u32 tmp2; #if defined IS_AMD || defined IS_GENERIC const int offset_minus_4 = 4 - (block_len & 3); tmp0 = amd_bytealign (append[0], 0, offset_minus_4); tmp1 = amd_bytealign (append[1], append[0], offset_minus_4); tmp2 = amd_bytealign ( 0, append[1], offset_minus_4); const u32 mod = block_len & 3; if (mod == 0) { tmp0 = tmp1; tmp1 = tmp2; tmp2 = 0; } #endif #ifdef IS_NV const int offset_minus_4 = 4 - (block_len & 3); const int selector = (0x76543210 >> (offset_minus_4 * 4)) & 0xffff; tmp0 = __byte_perm ( 0, append[0], selector); tmp1 = __byte_perm (append[0], append[1], selector); tmp2 = __byte_perm (append[1], 0, selector); #endif const u32 div = block_len / 4; switch (div) { case 0: block0[0] |= tmp0; block0[1] = tmp1; block0[2] = tmp2; break; case 1: block0[1] |= tmp0; block0[2] = tmp1; block0[3] = tmp2; break; case 2: block0[2] |= tmp0; block0[3] = tmp1; block1[0] = tmp2; break; case 3: block0[3] |= tmp0; block1[0] = tmp1; block1[1] = tmp2; break; case 4: block1[0] |= tmp0; block1[1] = tmp1; block1[2] = tmp2; break; case 5: block1[1] |= tmp0; block1[2] = tmp1; block1[3] = tmp2; break; case 6: block1[2] |= tmp0; block1[3] = tmp1; block2[0] = tmp2; break; case 7: block1[3] |= tmp0; block2[0] = tmp1; block2[1] = tmp2; break; case 8: block2[0] |= tmp0; block2[1] = tmp1; block2[2] = tmp2; break; case 9: block2[1] |= tmp0; block2[2] = tmp1; block2[3] = tmp2; break; case 10: block2[2] |= tmp0; block2[3] = tmp1; block3[0] = tmp2; break; case 11: block2[3] |= tmp0; block3[0] = tmp1; block3[1] = tmp2; break; } } void append_sign (u32 block0[4], u32 block1[4], const u32 block_len) { switch (block_len) { case 0: block0[0] = md5crypt_magic; break; case 1: block0[0] = block0[0] | md5crypt_magic << 8u; block0[1] = md5crypt_magic >> 24u; break; case 2: block0[0] = block0[0] | md5crypt_magic << 16u; block0[1] = md5crypt_magic >> 16u; break; case 3: block0[0] = block0[0] | md5crypt_magic << 24u; block0[1] = md5crypt_magic >> 8u; break; case 4: block0[1] = md5crypt_magic; break; case 5: block0[1] = block0[1] | md5crypt_magic << 8u; block0[2] = md5crypt_magic >> 24u; break; case 6: block0[1] = block0[1] | md5crypt_magic << 16u; block0[2] = md5crypt_magic >> 16u; break; case 7: block0[1] = block0[1] | md5crypt_magic << 24u; block0[2] = md5crypt_magic >> 8u; break; case 8: block0[2] = md5crypt_magic; break; case 9: block0[2] = block0[2] | md5crypt_magic << 8u; block0[3] = md5crypt_magic >> 24u; break; case 10: block0[2] = block0[2] | md5crypt_magic << 16u; block0[3] = md5crypt_magic >> 16u; break; case 11: block0[2] = block0[2] | md5crypt_magic << 24u; block0[3] = md5crypt_magic >> 8u; break; case 12: block0[3] = md5crypt_magic; break; case 13: block0[3] = block0[3] | md5crypt_magic << 8u; block1[0] = md5crypt_magic >> 24u; break; case 14: block0[3] = block0[3] | md5crypt_magic << 16u; block1[0] = md5crypt_magic >> 16u; break; case 15: block0[3] = block0[3] | md5crypt_magic << 24u; block1[0] = md5crypt_magic >> 8u; break; } } void append_1st (u32 block0[4], u32 block1[4], u32 block2[4], u32 block3[4], const u32 block_len, const u32 append) { switch (block_len) { case 0: block0[0] = append; break; case 1: block0[0] = block0[0] | append << 8; break; case 2: block0[0] = block0[0] | append << 16; break; case 3: block0[0] = block0[0] | append << 24; break; case 4: block0[1] = append; break; case 5: block0[1] = block0[1] | append << 8; break; case 6: block0[1] = block0[1] | append << 16; break; case 7: block0[1] = block0[1] | append << 24; break; case 8: block0[2] = append; break; case 9: block0[2] = block0[2] | append << 8; break; case 10: block0[2] = block0[2] | append << 16; break; case 11: block0[2] = block0[2] | append << 24; break; case 12: block0[3] = append; break; case 13: block0[3] = block0[3] | append << 8; break; case 14: block0[3] = block0[3] | append << 16; break; case 15: block0[3] = block0[3] | append << 24; break; case 16: block1[0] = append; break; case 17: block1[0] = block1[0] | append << 8; break; case 18: block1[0] = block1[0] | append << 16; break; case 19: block1[0] = block1[0] | append << 24; break; case 20: block1[1] = append; break; case 21: block1[1] = block1[1] | append << 8; break; case 22: block1[1] = block1[1] | append << 16; break; case 23: block1[1] = block1[1] | append << 24; break; case 24: block1[2] = append; break; case 25: block1[2] = block1[2] | append << 8; break; case 26: block1[2] = block1[2] | append << 16; break; case 27: block1[2] = block1[2] | append << 24; break; case 28: block1[3] = append; break; case 29: block1[3] = block1[3] | append << 8; break; case 30: block1[3] = block1[3] | append << 16; break; case 31: block1[3] = block1[3] | append << 24; break; case 32: block2[0] = append; break; case 33: block2[0] = block2[0] | append << 8; break; case 34: block2[0] = block2[0] | append << 16; break; case 35: block2[0] = block2[0] | append << 24; break; case 36: block2[1] = append; break; case 37: block2[1] = block2[1] | append << 8; break; case 38: block2[1] = block2[1] | append << 16; break; case 39: block2[1] = block2[1] | append << 24; break; case 40: block2[2] = append; break; case 41: block2[2] = block2[2] | append << 8; break; case 42: block2[2] = block2[2] | append << 16; break; case 43: block2[2] = block2[2] | append << 24; break; case 44: block2[3] = append; break; case 45: block2[3] = block2[3] | append << 8; break; case 46: block2[3] = block2[3] | append << 16; break; case 47: block2[3] = block2[3] | append << 24; break; case 48: block3[0] = append; break; case 49: block3[0] = block3[0] | append << 8; break; case 50: block3[0] = block3[0] | append << 16; break; case 51: block3[0] = block3[0] | append << 24; break; case 52: block3[1] = append; break; case 53: block3[1] = block3[1] | append << 8; break; case 54: block3[1] = block3[1] | append << 16; break; case 55: block3[1] = block3[1] | append << 24; break; case 56: block3[2] = append; break; } } __kernel void m00500_init (__global pw_t *pws, __global const kernel_rule_t *rules_buf, __global const comb_t *combs_buf, __global const bf_t *bfs_buf, __global md5crypt_tmp_t *tmps, __global void *hooks, __global const u32 *bitmaps_buf_s1_a, __global const u32 *bitmaps_buf_s1_b, __global const u32 *bitmaps_buf_s1_c, __global const u32 *bitmaps_buf_s1_d, __global const u32 *bitmaps_buf_s2_a, __global const u32 *bitmaps_buf_s2_b, __global const u32 *bitmaps_buf_s2_c, __global const u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global const digest_t *digests_buf, __global u32 *hashes_shown, __global const salt_t *salt_bufs, __global const void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV0_buf, __global u32 *d_scryptV1_buf, __global u32 *d_scryptV2_buf, __global u32 *d_scryptV3_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 il_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max) { /** * base */ const u32 gid = get_global_id (0); if (gid >= gid_max) return; /** * init */ const u32 pw_len = pws[gid].pw_len; const u32 pw_lenv = ceil ((float) pw_len / 4); u32 w[64] = { 0 }; for (int idx = 0; idx < pw_lenv; idx++) { w[idx] = pws[gid].i[idx]; barrier (CLK_GLOBAL_MEM_FENCE); } const u32 salt_len = salt_bufs[salt_pos].salt_len; const u32 salt_lenv = ceil ((float) salt_len / 4); u32 s[64] = { 0 }; for (int idx = 0; idx < salt_lenv; idx++) { s[idx] = salt_bufs[salt_pos].salt_buf[idx]; barrier (CLK_GLOBAL_MEM_FENCE); } /** * prepare */ md5_ctx_t md5_ctx1; md5_init (&md5_ctx1); md5_update (&md5_ctx1, w, pw_len); md5_update (&md5_ctx1, s, salt_len); md5_update (&md5_ctx1, w, pw_len); md5_final (&md5_ctx1); u32 final[16] = { 0 }; final[0] = md5_ctx1.h[0]; final[1] = md5_ctx1.h[1]; final[2] = md5_ctx1.h[2]; final[3] = md5_ctx1.h[3]; if (pw_len < 16) { truncate_block (final, pw_len); } md5_ctx_t md5_ctx; md5_init (&md5_ctx); md5_update (&md5_ctx, w, pw_len); u32 m[16] = { 0 }; m[0] = md5crypt_magic; md5_update (&md5_ctx, m, 3); md5_update (&md5_ctx, s, salt_len); for (int pl = pw_len; pl > 0; pl -= 16) { md5_update (&md5_ctx, final, pl > 16 ? 16 : pl); } /* Then something really weird... */ u32 z[16] = { 0 }; for (int i = pw_len; i != 0; i >>= 1) { if (i & 1) { md5_update (&md5_ctx, z, 1); } else { md5_update (&md5_ctx, w, 1); } } md5_final (&md5_ctx); tmps[gid].digest_buf[0] = md5_ctx.h[0]; tmps[gid].digest_buf[1] = md5_ctx.h[1]; tmps[gid].digest_buf[2] = md5_ctx.h[2]; tmps[gid].digest_buf[3] = md5_ctx.h[3]; } __kernel void m00500_loop (__global pw_t *pws, __global const kernel_rule_t *rules_buf, __global const comb_t *combs_buf, __global const bf_t *bfs_buf, __global md5crypt_tmp_t *tmps, __global void *hooks, __global const u32 *bitmaps_buf_s1_a, __global const u32 *bitmaps_buf_s1_b, __global const u32 *bitmaps_buf_s1_c, __global const u32 *bitmaps_buf_s1_d, __global const u32 *bitmaps_buf_s2_a, __global const u32 *bitmaps_buf_s2_b, __global const u32 *bitmaps_buf_s2_c, __global const u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global const digest_t *digests_buf, __global u32 *hashes_shown, __global const salt_t *salt_bufs, __global const void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV0_buf, __global u32 *d_scryptV1_buf, __global u32 *d_scryptV2_buf, __global u32 *d_scryptV3_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 il_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max) { /** * base */ const u32 gid = get_global_id (0); if (gid >= gid_max) return; /** * init */ const u32 pw_len = pws[gid].pw_len; const u32 pw_lenv = ceil ((float) pw_len / 4); u32 w[64] = { 0 }; for (int idx = 0; idx < pw_lenv; idx++) { w[idx] = pws[gid].i[idx]; barrier (CLK_GLOBAL_MEM_FENCE); } const u32 salt_len = salt_bufs[salt_pos].salt_len; const u32 salt_lenv = ceil ((float) salt_len / 4); u32 s[64] = { 0 }; for (int idx = 0; idx < salt_lenv; idx++) { s[idx] = salt_bufs[salt_pos].salt_buf[idx]; barrier (CLK_GLOBAL_MEM_FENCE); } /** * digest */ u32 digest[16] = { 0 }; digest[0] = tmps[gid].digest_buf[0]; digest[1] = tmps[gid].digest_buf[1]; digest[2] = tmps[gid].digest_buf[2]; digest[3] = tmps[gid].digest_buf[3]; /** * loop */ for (u32 i = 0, j = loop_pos; i < loop_cnt; i++, j++) { md5_ctx_t md5_ctx; md5_init (&md5_ctx); if (j & 1) { md5_update (&md5_ctx, w, pw_len); } else { md5_update (&md5_ctx, digest, 16); } if (j % 3) { md5_update (&md5_ctx, s, salt_len); } if (j % 7) { md5_update (&md5_ctx, w, pw_len); } if (j & 1) { md5_update (&md5_ctx, digest, 16); } else { md5_update (&md5_ctx, w, pw_len); } md5_final (&md5_ctx); digest[0] = md5_ctx.h[0]; digest[1] = md5_ctx.h[1]; digest[2] = md5_ctx.h[2]; digest[3] = md5_ctx.h[3]; } tmps[gid].digest_buf[0] = digest[0]; tmps[gid].digest_buf[1] = digest[1]; tmps[gid].digest_buf[2] = digest[2]; tmps[gid].digest_buf[3] = digest[3]; } __kernel void m00500_comp (__global pw_t *pws, __global const kernel_rule_t *rules_buf, __global const comb_t *combs_buf, __global const bf_t *bfs_buf, __global md5crypt_tmp_t *tmps, __global void *hooks, __global const u32 *bitmaps_buf_s1_a, __global const u32 *bitmaps_buf_s1_b, __global const u32 *bitmaps_buf_s1_c, __global const u32 *bitmaps_buf_s1_d, __global const u32 *bitmaps_buf_s2_a, __global const u32 *bitmaps_buf_s2_b, __global const u32 *bitmaps_buf_s2_c, __global const u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global const digest_t *digests_buf, __global u32 *hashes_shown, __global const salt_t *salt_bufs, __global const void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV0_buf, __global u32 *d_scryptV1_buf, __global u32 *d_scryptV2_buf, __global u32 *d_scryptV3_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 il_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max) { /** * modifier */ const u32 gid = get_global_id (0); if (gid >= gid_max) return; const u32 lid = get_local_id (0); /** * digest */ const u32 r0 = tmps[gid].digest_buf[DGST_R0]; const u32 r1 = tmps[gid].digest_buf[DGST_R1]; const u32 r2 = tmps[gid].digest_buf[DGST_R2]; const u32 r3 = tmps[gid].digest_buf[DGST_R3]; #define il_pos 0 #include COMPARE_M }