/** * Authors.....: Jens Steube * Gabriele Gristina * magnum * Michael Sprecher * * License.....: MIT */ #define _SHA1_ #define NEW_SIMD_CODE #include "inc_vendor.cl" #include "inc_hash_constants.h" #include "inc_hash_functions.cl" #include "inc_types.cl" #include "inc_common.cl" #include "inc_simd.cl" #if VECT_SIZE == 1 #define uint_to_hex_lower8_le(i) (u32x) (l_bin2asc[(i)]) #elif VECT_SIZE == 2 #define uint_to_hex_lower8_le(i) (u32x) (l_bin2asc[(i).s0], l_bin2asc[(i).s1]) #elif VECT_SIZE == 4 #define uint_to_hex_lower8_le(i) (u32x) (l_bin2asc[(i).s0], l_bin2asc[(i).s1], l_bin2asc[(i).s2], l_bin2asc[(i).s3]) #elif VECT_SIZE == 8 #define uint_to_hex_lower8_le(i) (u32x) (l_bin2asc[(i).s0], l_bin2asc[(i).s1], l_bin2asc[(i).s2], l_bin2asc[(i).s3], l_bin2asc[(i).s4], l_bin2asc[(i).s5], l_bin2asc[(i).s6], l_bin2asc[(i).s7]) #elif VECT_SIZE == 16 #define uint_to_hex_lower8_le(i) (u32x) (l_bin2asc[(i).s0], l_bin2asc[(i).s1], l_bin2asc[(i).s2], l_bin2asc[(i).s3], l_bin2asc[(i).s4], l_bin2asc[(i).s5], l_bin2asc[(i).s6], l_bin2asc[(i).s7], l_bin2asc[(i).s8], l_bin2asc[(i).s9], l_bin2asc[(i).sa], l_bin2asc[(i).sb], l_bin2asc[(i).sc], l_bin2asc[(i).sd], l_bin2asc[(i).se], l_bin2asc[(i).sf]) #endif void sha1_transform (const u32x w0[4], const u32x w1[4], const u32x w2[4], const u32x w3[4], u32x digest[5]) { u32x A = digest[0]; u32x B = digest[1]; u32x C = digest[2]; u32x D = digest[3]; u32x E = digest[4]; u32x w0_t = w0[0]; u32x w1_t = w0[1]; u32x w2_t = w0[2]; u32x w3_t = w0[3]; u32x w4_t = w1[0]; u32x w5_t = w1[1]; u32x w6_t = w1[2]; u32x w7_t = w1[3]; u32x w8_t = w2[0]; u32x w9_t = w2[1]; u32x wa_t = w2[2]; u32x wb_t = w2[3]; u32x wc_t = w3[0]; u32x wd_t = w3[1]; u32x we_t = w3[2]; u32x wf_t = w3[3]; #undef K #define K SHA1C00 SHA1_STEP (SHA1_F0o, A, B, C, D, E, w0_t); SHA1_STEP (SHA1_F0o, E, A, B, C, D, w1_t); SHA1_STEP (SHA1_F0o, D, E, A, B, C, w2_t); SHA1_STEP (SHA1_F0o, C, D, E, A, B, w3_t); SHA1_STEP (SHA1_F0o, B, C, D, E, A, w4_t); SHA1_STEP (SHA1_F0o, A, B, C, D, E, w5_t); SHA1_STEP (SHA1_F0o, E, A, B, C, D, w6_t); SHA1_STEP (SHA1_F0o, D, E, A, B, C, w7_t); SHA1_STEP (SHA1_F0o, C, D, E, A, B, w8_t); SHA1_STEP (SHA1_F0o, B, C, D, E, A, w9_t); SHA1_STEP (SHA1_F0o, A, B, C, D, E, wa_t); SHA1_STEP (SHA1_F0o, E, A, B, C, D, wb_t); SHA1_STEP (SHA1_F0o, D, E, A, B, C, wc_t); SHA1_STEP (SHA1_F0o, C, D, E, A, B, wd_t); SHA1_STEP (SHA1_F0o, B, C, D, E, A, we_t); SHA1_STEP (SHA1_F0o, A, B, C, D, E, wf_t); w0_t = rotl32 ((wd_t ^ w8_t ^ w2_t ^ w0_t), 1u); SHA1_STEP (SHA1_F0o, E, A, B, C, D, w0_t); w1_t = rotl32 ((we_t ^ w9_t ^ w3_t ^ w1_t), 1u); SHA1_STEP (SHA1_F0o, D, E, A, B, C, w1_t); w2_t = rotl32 ((wf_t ^ wa_t ^ w4_t ^ w2_t), 1u); SHA1_STEP (SHA1_F0o, C, D, E, A, B, w2_t); w3_t = rotl32 ((w0_t ^ wb_t ^ w5_t ^ w3_t), 1u); SHA1_STEP (SHA1_F0o, B, C, D, E, A, w3_t); #undef K #define K SHA1C01 w4_t = rotl32 ((w1_t ^ wc_t ^ w6_t ^ w4_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, w4_t); w5_t = rotl32 ((w2_t ^ wd_t ^ w7_t ^ w5_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, w5_t); w6_t = rotl32 ((w3_t ^ we_t ^ w8_t ^ w6_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, w6_t); w7_t = rotl32 ((w4_t ^ wf_t ^ w9_t ^ w7_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, w7_t); w8_t = rotl32 ((w5_t ^ w0_t ^ wa_t ^ w8_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, w8_t); w9_t = rotl32 ((w6_t ^ w1_t ^ wb_t ^ w9_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, w9_t); wa_t = rotl32 ((w7_t ^ w2_t ^ wc_t ^ wa_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, wa_t); wb_t = rotl32 ((w8_t ^ w3_t ^ wd_t ^ wb_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, wb_t); wc_t = rotl32 ((w9_t ^ w4_t ^ we_t ^ wc_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, wc_t); wd_t = rotl32 ((wa_t ^ w5_t ^ wf_t ^ wd_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, wd_t); we_t = rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, we_t); wf_t = rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, wf_t); w0_t = rotl32 ((wd_t ^ w8_t ^ w2_t ^ w0_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, w0_t); w1_t = rotl32 ((we_t ^ w9_t ^ w3_t ^ w1_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, w1_t); w2_t = rotl32 ((wf_t ^ wa_t ^ w4_t ^ w2_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, w2_t); w3_t = rotl32 ((w0_t ^ wb_t ^ w5_t ^ w3_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, w3_t); w4_t = rotl32 ((w1_t ^ wc_t ^ w6_t ^ w4_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, w4_t); w5_t = rotl32 ((w2_t ^ wd_t ^ w7_t ^ w5_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, w5_t); w6_t = rotl32 ((w3_t ^ we_t ^ w8_t ^ w6_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, w6_t); w7_t = rotl32 ((w4_t ^ wf_t ^ w9_t ^ w7_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, w7_t); #undef K #define K SHA1C02 w8_t = rotl32 ((w5_t ^ w0_t ^ wa_t ^ w8_t), 1u); SHA1_STEP (SHA1_F2o, A, B, C, D, E, w8_t); w9_t = rotl32 ((w6_t ^ w1_t ^ wb_t ^ w9_t), 1u); SHA1_STEP (SHA1_F2o, E, A, B, C, D, w9_t); wa_t = rotl32 ((w7_t ^ w2_t ^ wc_t ^ wa_t), 1u); SHA1_STEP (SHA1_F2o, D, E, A, B, C, wa_t); wb_t = rotl32 ((w8_t ^ w3_t ^ wd_t ^ wb_t), 1u); SHA1_STEP (SHA1_F2o, C, D, E, A, B, wb_t); wc_t = rotl32 ((w9_t ^ w4_t ^ we_t ^ wc_t), 1u); SHA1_STEP (SHA1_F2o, B, C, D, E, A, wc_t); wd_t = rotl32 ((wa_t ^ w5_t ^ wf_t ^ wd_t), 1u); SHA1_STEP (SHA1_F2o, A, B, C, D, E, wd_t); we_t = rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F2o, E, A, B, C, D, we_t); wf_t = rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F2o, D, E, A, B, C, wf_t); w0_t = rotl32 ((wd_t ^ w8_t ^ w2_t ^ w0_t), 1u); SHA1_STEP (SHA1_F2o, C, D, E, A, B, w0_t); w1_t = rotl32 ((we_t ^ w9_t ^ w3_t ^ w1_t), 1u); SHA1_STEP (SHA1_F2o, B, C, D, E, A, w1_t); w2_t = rotl32 ((wf_t ^ wa_t ^ w4_t ^ w2_t), 1u); SHA1_STEP (SHA1_F2o, A, B, C, D, E, w2_t); w3_t = rotl32 ((w0_t ^ wb_t ^ w5_t ^ w3_t), 1u); SHA1_STEP (SHA1_F2o, E, A, B, C, D, w3_t); w4_t = rotl32 ((w1_t ^ wc_t ^ w6_t ^ w4_t), 1u); SHA1_STEP (SHA1_F2o, D, E, A, B, C, w4_t); w5_t = rotl32 ((w2_t ^ wd_t ^ w7_t ^ w5_t), 1u); SHA1_STEP (SHA1_F2o, C, D, E, A, B, w5_t); w6_t = rotl32 ((w3_t ^ we_t ^ w8_t ^ w6_t), 1u); SHA1_STEP (SHA1_F2o, B, C, D, E, A, w6_t); w7_t = rotl32 ((w4_t ^ wf_t ^ w9_t ^ w7_t), 1u); SHA1_STEP (SHA1_F2o, A, B, C, D, E, w7_t); w8_t = rotl32 ((w5_t ^ w0_t ^ wa_t ^ w8_t), 1u); SHA1_STEP (SHA1_F2o, E, A, B, C, D, w8_t); w9_t = rotl32 ((w6_t ^ w1_t ^ wb_t ^ w9_t), 1u); SHA1_STEP (SHA1_F2o, D, E, A, B, C, w9_t); wa_t = rotl32 ((w7_t ^ w2_t ^ wc_t ^ wa_t), 1u); SHA1_STEP (SHA1_F2o, C, D, E, A, B, wa_t); wb_t = rotl32 ((w8_t ^ w3_t ^ wd_t ^ wb_t), 1u); SHA1_STEP (SHA1_F2o, B, C, D, E, A, wb_t); #undef K #define K SHA1C03 wc_t = rotl32 ((w9_t ^ w4_t ^ we_t ^ wc_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, wc_t); wd_t = rotl32 ((wa_t ^ w5_t ^ wf_t ^ wd_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, wd_t); we_t = rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, we_t); wf_t = rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, wf_t); w0_t = rotl32 ((wd_t ^ w8_t ^ w2_t ^ w0_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, w0_t); w1_t = rotl32 ((we_t ^ w9_t ^ w3_t ^ w1_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, w1_t); w2_t = rotl32 ((wf_t ^ wa_t ^ w4_t ^ w2_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, w2_t); w3_t = rotl32 ((w0_t ^ wb_t ^ w5_t ^ w3_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, w3_t); w4_t = rotl32 ((w1_t ^ wc_t ^ w6_t ^ w4_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, w4_t); w5_t = rotl32 ((w2_t ^ wd_t ^ w7_t ^ w5_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, w5_t); w6_t = rotl32 ((w3_t ^ we_t ^ w8_t ^ w6_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, w6_t); w7_t = rotl32 ((w4_t ^ wf_t ^ w9_t ^ w7_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, w7_t); w8_t = rotl32 ((w5_t ^ w0_t ^ wa_t ^ w8_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, w8_t); w9_t = rotl32 ((w6_t ^ w1_t ^ wb_t ^ w9_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, w9_t); wa_t = rotl32 ((w7_t ^ w2_t ^ wc_t ^ wa_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, wa_t); wb_t = rotl32 ((w8_t ^ w3_t ^ wd_t ^ wb_t), 1u); SHA1_STEP (SHA1_F1, A, B, C, D, E, wb_t); wc_t = rotl32 ((w9_t ^ w4_t ^ we_t ^ wc_t), 1u); SHA1_STEP (SHA1_F1, E, A, B, C, D, wc_t); wd_t = rotl32 ((wa_t ^ w5_t ^ wf_t ^ wd_t), 1u); SHA1_STEP (SHA1_F1, D, E, A, B, C, wd_t); we_t = rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F1, C, D, E, A, B, we_t); wf_t = rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F1, B, C, D, E, A, wf_t); digest[0] += A; digest[1] += B; digest[2] += C; digest[3] += D; digest[4] += E; } void m13900m (u32 w0[4], u32 w1[4], u32 w2[4], u32 w3[4], const u32 pw_len, __global pw_t *pws, __global kernel_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV0_buf, __global u32 *d_scryptV1_buf, __global u32 *d_scryptV2_buf, __global u32 *d_scryptV3_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 il_cnt, const u32 digests_cnt, const u32 digests_offset, __local u32 *l_bin2asc) { /** * modifier */ const u32 gid = get_global_id (0); const u32 lid = get_local_id (0); /** * salt */ u32 salt_buf0[3]; salt_buf0[0] = swap32_S (salt_bufs[salt_pos].salt_buf[0]); salt_buf0[1] = swap32_S (salt_bufs[salt_pos].salt_buf[1]); salt_buf0[2] = swap32_S (salt_bufs[salt_pos].salt_buf[2]); const u32 salt_len = salt_bufs[salt_pos].salt_len; /** * loop */ u32 w0l = w0[0]; for (u32 il_pos = 0; il_pos < il_cnt; il_pos += VECT_SIZE) { const u32x w0r = ix_create_bft (bfs_buf, il_pos); const u32x w0lr = w0l | w0r; u32x w0_t[4]; u32x w1_t[4]; u32x w2_t[4]; u32x w3_t[4]; w0_t[0] = w0lr; w0_t[1] = w0[1]; w0_t[2] = w0[2]; w0_t[3] = w0[3]; w1_t[0] = w1[0]; w1_t[1] = w1[1]; w1_t[2] = w1[2]; w1_t[3] = w1[3]; w2_t[0] = w2[0]; w2_t[1] = w2[1]; w2_t[2] = w2[2]; w2_t[3] = w2[3]; w3_t[0] = w3[0]; w3_t[1] = w3[1]; w3_t[2] = 0; w3_t[3] = pw_len * 8; /** * SHA1($pass) */ u32x digest[5]; digest[0] = SHA1M_A; digest[1] = SHA1M_B; digest[2] = SHA1M_C; digest[3] = SHA1M_D; digest[4] = SHA1M_E; sha1_transform (w0_t, w1_t, w2_t, w3_t, digest); /** * SHA1($salt.SHA1($pass)) */ u32x a; u32x b; u32x c; u32x d; u32x e; a = digest[0]; b = digest[1]; c = digest[2]; d = digest[3]; e = digest[4]; w0_t[0] = uint_to_hex_lower8_le ((a >> 16) & 255) << 0 | uint_to_hex_lower8_le ((a >> 24) & 255) << 16; w0_t[1] = uint_to_hex_lower8_le ((a >> 0) & 255) << 0 | uint_to_hex_lower8_le ((a >> 8) & 255) << 16; w0_t[2] = uint_to_hex_lower8_le ((b >> 16) & 255) << 0 | uint_to_hex_lower8_le ((b >> 24) & 255) << 16; w0_t[3] = uint_to_hex_lower8_le ((b >> 0) & 255) << 0 | uint_to_hex_lower8_le ((b >> 8) & 255) << 16; w1_t[0] = uint_to_hex_lower8_le ((c >> 16) & 255) << 0 | uint_to_hex_lower8_le ((c >> 24) & 255) << 16; w1_t[1] = uint_to_hex_lower8_le ((c >> 0) & 255) << 0 | uint_to_hex_lower8_le ((c >> 8) & 255) << 16; w1_t[2] = uint_to_hex_lower8_le ((d >> 16) & 255) << 0 | uint_to_hex_lower8_le ((d >> 24) & 255) << 16; w1_t[3] = uint_to_hex_lower8_le ((d >> 0) & 255) << 0 | uint_to_hex_lower8_le ((d >> 8) & 255) << 16; w2_t[0] = uint_to_hex_lower8_le ((e >> 16) & 255) << 0 | uint_to_hex_lower8_le ((e >> 24) & 255) << 16; w2_t[1] = uint_to_hex_lower8_le ((e >> 0) & 255) << 0 | uint_to_hex_lower8_le ((e >> 8) & 255) << 16; w2_t[2] = 0x80000000; w2_t[3] = 0; w3_t[0] = 0; w3_t[1] = 0; w3_t[2] = 0; w3_t[3] = 0; switch_buffer_by_offset_be (w0_t, w1_t, w2_t, w3_t, 9); w0_t[0] |= salt_buf0[0]; w0_t[1] |= salt_buf0[1]; w0_t[2] |= salt_buf0[2]; w3_t[3] = 49 * 8; digest[0] = SHA1M_A; digest[1] = SHA1M_B; digest[2] = SHA1M_C; digest[3] = SHA1M_D; digest[4] = SHA1M_E; sha1_transform (w0_t, w1_t, w2_t, w3_t, digest); /** * SHA1($salt.SHA1($salt.SHA1($pass))) */ a = digest[0]; b = digest[1]; c = digest[2]; d = digest[3]; e = digest[4]; w0_t[0] = uint_to_hex_lower8_le ((a >> 16) & 255) << 0 | uint_to_hex_lower8_le ((a >> 24) & 255) << 16; w0_t[1] = uint_to_hex_lower8_le ((a >> 0) & 255) << 0 | uint_to_hex_lower8_le ((a >> 8) & 255) << 16; w0_t[2] = uint_to_hex_lower8_le ((b >> 16) & 255) << 0 | uint_to_hex_lower8_le ((b >> 24) & 255) << 16; w0_t[3] = uint_to_hex_lower8_le ((b >> 0) & 255) << 0 | uint_to_hex_lower8_le ((b >> 8) & 255) << 16; w1_t[0] = uint_to_hex_lower8_le ((c >> 16) & 255) << 0 | uint_to_hex_lower8_le ((c >> 24) & 255) << 16; w1_t[1] = uint_to_hex_lower8_le ((c >> 0) & 255) << 0 | uint_to_hex_lower8_le ((c >> 8) & 255) << 16; w1_t[2] = uint_to_hex_lower8_le ((d >> 16) & 255) << 0 | uint_to_hex_lower8_le ((d >> 24) & 255) << 16; w1_t[3] = uint_to_hex_lower8_le ((d >> 0) & 255) << 0 | uint_to_hex_lower8_le ((d >> 8) & 255) << 16; w2_t[0] = uint_to_hex_lower8_le ((e >> 16) & 255) << 0 | uint_to_hex_lower8_le ((e >> 24) & 255) << 16; w2_t[1] = uint_to_hex_lower8_le ((e >> 0) & 255) << 0 | uint_to_hex_lower8_le ((e >> 8) & 255) << 16; w2_t[2] = 0x80000000; w2_t[3] = 0; w3_t[0] = 0; w3_t[1] = 0; w3_t[2] = 0; w3_t[3] = 0; switch_buffer_by_offset_be (w0_t, w1_t, w2_t, w3_t, 9); w0_t[0] |= salt_buf0[0]; w0_t[1] |= salt_buf0[1]; w0_t[2] |= salt_buf0[2]; w3_t[3] = 49 * 8; digest[0] = SHA1M_A; digest[1] = SHA1M_B; digest[2] = SHA1M_C; digest[3] = SHA1M_D; digest[4] = SHA1M_E; sha1_transform (w0_t, w1_t, w2_t, w3_t, digest); COMPARE_M_SIMD (digest[3], digest[4], digest[2], digest[1]); } } void m13900s (u32 w0[4], u32 w1[4], u32 w2[4], u32 w3[4], const u32 pw_len, __global pw_t *pws, __global kernel_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV0_buf, __global u32 *d_scryptV1_buf, __global u32 *d_scryptV2_buf, __global u32 *d_scryptV3_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 il_cnt, const u32 digests_cnt, const u32 digests_offset, __local u32 *l_bin2asc) { /** * modifier */ const u32 gid = get_global_id (0); const u32 lid = get_local_id (0); /** * salt */ u32 salt_buf0[4]; u32 salt_buf1[4]; u32 salt_buf2[4]; salt_buf0[0] = swap32_S (salt_bufs[salt_pos].salt_buf[ 0]); salt_buf0[1] = swap32_S (salt_bufs[salt_pos].salt_buf[ 1]); salt_buf0[2] = swap32_S (salt_bufs[salt_pos].salt_buf[ 2]); salt_buf0[3] = swap32_S (salt_bufs[salt_pos].salt_buf[ 3]); salt_buf1[0] = swap32_S (salt_bufs[salt_pos].salt_buf[ 4]); salt_buf1[1] = swap32_S (salt_bufs[salt_pos].salt_buf[ 5]); salt_buf1[2] = swap32_S (salt_bufs[salt_pos].salt_buf[ 6]); salt_buf1[3] = swap32_S (salt_bufs[salt_pos].salt_buf[ 7]); salt_buf2[0] = swap32_S (salt_bufs[salt_pos].salt_buf[ 8]); salt_buf2[1] = swap32_S (salt_bufs[salt_pos].salt_buf[ 9]); salt_buf2[2] = 0; salt_buf2[3] = 0; const u32 salt_len = salt_bufs[salt_pos].salt_len; /** * digest */ const u32 search[4] = { digests_buf[digests_offset].digest_buf[DGST_R0], digests_buf[digests_offset].digest_buf[DGST_R1], digests_buf[digests_offset].digest_buf[DGST_R2], digests_buf[digests_offset].digest_buf[DGST_R3] }; /** * loop */ u32 w0l = w0[0]; for (u32 il_pos = 0; il_pos < il_cnt; il_pos += VECT_SIZE) { const u32x w0r = ix_create_bft (bfs_buf, il_pos); const u32x w0lr = w0l | w0r; u32x w0_t[4]; u32x w1_t[4]; u32x w2_t[4]; u32x w3_t[4]; w0_t[0] = w0lr; w0_t[1] = w0[1]; w0_t[2] = w0[2]; w0_t[3] = w0[3]; w1_t[0] = w1[0]; w1_t[1] = w1[1]; w1_t[2] = w1[2]; w1_t[3] = w1[3]; w2_t[0] = w2[0]; w2_t[1] = w2[1]; w2_t[2] = w2[2]; w2_t[3] = w2[3]; w3_t[0] = w3[0]; w3_t[1] = w3[1]; w3_t[2] = 0; w3_t[3] = pw_len * 8; /** * SHA1($pass) */ u32x digest[5]; digest[0] = SHA1M_A; digest[1] = SHA1M_B; digest[2] = SHA1M_C; digest[3] = SHA1M_D; digest[4] = SHA1M_E; sha1_transform (w0_t, w1_t, w2_t, w3_t, digest); /** * SHA1($salt.SHA1($pass)) */ u32x a; u32x b; u32x c; u32x d; u32x e; a = digest[0]; b = digest[1]; c = digest[2]; d = digest[3]; e = digest[4]; w0_t[0] = uint_to_hex_lower8_le ((a >> 16) & 255) << 0 | uint_to_hex_lower8_le ((a >> 24) & 255) << 16; w0_t[1] = uint_to_hex_lower8_le ((a >> 0) & 255) << 0 | uint_to_hex_lower8_le ((a >> 8) & 255) << 16; w0_t[2] = uint_to_hex_lower8_le ((b >> 16) & 255) << 0 | uint_to_hex_lower8_le ((b >> 24) & 255) << 16; w0_t[3] = uint_to_hex_lower8_le ((b >> 0) & 255) << 0 | uint_to_hex_lower8_le ((b >> 8) & 255) << 16; w1_t[0] = uint_to_hex_lower8_le ((c >> 16) & 255) << 0 | uint_to_hex_lower8_le ((c >> 24) & 255) << 16; w1_t[1] = uint_to_hex_lower8_le ((c >> 0) & 255) << 0 | uint_to_hex_lower8_le ((c >> 8) & 255) << 16; w1_t[2] = uint_to_hex_lower8_le ((d >> 16) & 255) << 0 | uint_to_hex_lower8_le ((d >> 24) & 255) << 16; w1_t[3] = uint_to_hex_lower8_le ((d >> 0) & 255) << 0 | uint_to_hex_lower8_le ((d >> 8) & 255) << 16; w2_t[0] = uint_to_hex_lower8_le ((e >> 16) & 255) << 0 | uint_to_hex_lower8_le ((e >> 24) & 255) << 16; w2_t[1] = uint_to_hex_lower8_le ((e >> 0) & 255) << 0 | uint_to_hex_lower8_le ((e >> 8) & 255) << 16; w2_t[2] = 0x80000000; w2_t[3] = 0; w3_t[0] = 0; w3_t[1] = 0; w3_t[2] = 0; w3_t[3] = 0; switch_buffer_by_offset_be (w0_t, w1_t, w2_t, w3_t, 9); w0_t[0] |= salt_buf0[0]; w0_t[1] |= salt_buf0[1]; w0_t[2] |= salt_buf0[2]; w3_t[3] = 49 * 8; digest[0] = SHA1M_A; digest[1] = SHA1M_B; digest[2] = SHA1M_C; digest[3] = SHA1M_D; digest[4] = SHA1M_E; sha1_transform (w0_t, w1_t, w2_t, w3_t, digest); /** * SHA1($salt.SHA1($salt.SHA1($pass))) */ a = digest[0]; b = digest[1]; c = digest[2]; d = digest[3]; e = digest[4]; w0_t[0] = uint_to_hex_lower8_le ((a >> 16) & 255) << 0 | uint_to_hex_lower8_le ((a >> 24) & 255) << 16; w0_t[1] = uint_to_hex_lower8_le ((a >> 0) & 255) << 0 | uint_to_hex_lower8_le ((a >> 8) & 255) << 16; w0_t[2] = uint_to_hex_lower8_le ((b >> 16) & 255) << 0 | uint_to_hex_lower8_le ((b >> 24) & 255) << 16; w0_t[3] = uint_to_hex_lower8_le ((b >> 0) & 255) << 0 | uint_to_hex_lower8_le ((b >> 8) & 255) << 16; w1_t[0] = uint_to_hex_lower8_le ((c >> 16) & 255) << 0 | uint_to_hex_lower8_le ((c >> 24) & 255) << 16; w1_t[1] = uint_to_hex_lower8_le ((c >> 0) & 255) << 0 | uint_to_hex_lower8_le ((c >> 8) & 255) << 16; w1_t[2] = uint_to_hex_lower8_le ((d >> 16) & 255) << 0 | uint_to_hex_lower8_le ((d >> 24) & 255) << 16; w1_t[3] = uint_to_hex_lower8_le ((d >> 0) & 255) << 0 | uint_to_hex_lower8_le ((d >> 8) & 255) << 16; w2_t[0] = uint_to_hex_lower8_le ((e >> 16) & 255) << 0 | uint_to_hex_lower8_le ((e >> 24) & 255) << 16; w2_t[1] = uint_to_hex_lower8_le ((e >> 0) & 255) << 0 | uint_to_hex_lower8_le ((e >> 8) & 255) << 16; w2_t[2] = 0x80000000; w2_t[3] = 0; w3_t[0] = 0; w3_t[1] = 0; w3_t[2] = 0; w3_t[3] = 0; switch_buffer_by_offset_be (w0_t, w1_t, w2_t, w3_t, 9); w0_t[0] |= salt_buf0[0]; w0_t[1] |= salt_buf0[1]; w0_t[2] |= salt_buf0[2]; w3_t[3] = 49 * 8; digest[0] = SHA1M_A; digest[1] = SHA1M_B; digest[2] = SHA1M_C; digest[3] = SHA1M_D; digest[4] = SHA1M_E; sha1_transform (w0_t, w1_t, w2_t, w3_t, digest); COMPARE_S_SIMD (digest[3], digest[4], digest[2], digest[1]); } } __kernel void m13900_m04 (__global pw_t *pws, __global kernel_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *ss, __global void *ess, __global u32 *d_return_buf, __global u32 *d_scryptV0_buf, __global u32 *d_scryptV1_buf, __global u32 *d_scryptV2_buf, __global u32 *d_scryptV3_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 il_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max) { /** * modifier */ const u32 gid = get_global_id (0); const u32 lid = get_local_id (0); const u32 lsz = get_local_size (0); /** * shared */ __local u32 l_bin2asc[256]; for (u32 i = lid; i < 256; i += lsz) { const u32 i0 = (i >> 0) & 15; const u32 i1 = (i >> 4) & 15; l_bin2asc[i] = ((i0 < 10) ? '0' + i0 : 'a' - 10 + i0) << 0 | ((i1 < 10) ? '0' + i1 : 'a' - 10 + i1) << 8; } barrier (CLK_LOCAL_MEM_FENCE); if (gid >= gid_max) return; /** * base */ u32 w0[4]; w0[0] = pws[gid].i[ 0]; w0[1] = pws[gid].i[ 1]; w0[2] = pws[gid].i[ 2]; w0[3] = pws[gid].i[ 3]; u32 w1[4]; w1[0] = 0; w1[1] = 0; w1[2] = 0; w1[3] = 0; u32 w2[4]; w2[0] = 0; w2[1] = 0; w2[2] = 0; w2[3] = 0; u32 w3[4]; w3[0] = 0; w3[1] = 0; w3[2] = 0; w3[3] = 0; const u32 pw_len = pws[gid].pw_len; /** * main */ m13900m (w0, w1, w2, w3, pw_len, pws, rules_buf, combs_buf, bfs_buf, tmps, hooks, bitmaps_buf_s1_a, bitmaps_buf_s1_b, bitmaps_buf_s1_c, bitmaps_buf_s1_d, bitmaps_buf_s2_a, bitmaps_buf_s2_b, bitmaps_buf_s2_c, bitmaps_buf_s2_d, plains_buf, digests_buf, hashes_shown, ss, ess, d_return_buf, d_scryptV0_buf, d_scryptV1_buf, d_scryptV2_buf, d_scryptV3_buf, bitmap_mask, bitmap_shift1, bitmap_shift2, salt_pos, loop_pos, loop_cnt, il_cnt, digests_cnt, digests_offset, l_bin2asc); } __kernel void m13900_m08 (__global pw_t *pws, __global kernel_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *ss, __global void *ess, __global u32 *d_return_buf, __global u32 *d_scryptV0_buf, __global u32 *d_scryptV1_buf, __global u32 *d_scryptV2_buf, __global u32 *d_scryptV3_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 il_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max) { /** * modifier */ const u32 gid = get_global_id (0); const u32 lid = get_local_id (0); const u32 lsz = get_local_size (0); /** * shared */ __local u32 l_bin2asc[256]; for (u32 i = lid; i < 256; i += lsz) { const u32 i0 = (i >> 0) & 15; const u32 i1 = (i >> 4) & 15; l_bin2asc[i] = ((i0 < 10) ? '0' + i0 : 'a' - 10 + i0) << 0 | ((i1 < 10) ? '0' + i1 : 'a' - 10 + i1) << 8; } barrier (CLK_LOCAL_MEM_FENCE); if (gid >= gid_max) return; /** * base */ u32 w0[4]; w0[0] = pws[gid].i[ 0]; w0[1] = pws[gid].i[ 1]; w0[2] = pws[gid].i[ 2]; w0[3] = pws[gid].i[ 3]; u32 w1[4]; w1[0] = pws[gid].i[ 4]; w1[1] = pws[gid].i[ 5]; w1[2] = pws[gid].i[ 6]; w1[3] = pws[gid].i[ 7]; u32 w2[4]; w2[0] = 0; w2[1] = 0; w2[2] = 0; w2[3] = 0; u32 w3[4]; w3[0] = 0; w3[1] = 0; w3[2] = 0; w3[3] = 0; const u32 pw_len = pws[gid].pw_len; /** * main */ m13900m (w0, w1, w2, w3, pw_len, pws, rules_buf, combs_buf, bfs_buf, tmps, hooks, bitmaps_buf_s1_a, bitmaps_buf_s1_b, bitmaps_buf_s1_c, bitmaps_buf_s1_d, bitmaps_buf_s2_a, bitmaps_buf_s2_b, bitmaps_buf_s2_c, bitmaps_buf_s2_d, plains_buf, digests_buf, hashes_shown, ss, ess, d_return_buf, d_scryptV0_buf, d_scryptV1_buf, d_scryptV2_buf, d_scryptV3_buf, bitmap_mask, bitmap_shift1, bitmap_shift2, salt_pos, loop_pos, loop_cnt, il_cnt, digests_cnt, digests_offset, l_bin2asc); } __kernel void m13900_m16 (__global pw_t *pws, __global kernel_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *ss, __global void *ess, __global u32 *d_return_buf, __global u32 *d_scryptV0_buf, __global u32 *d_scryptV1_buf, __global u32 *d_scryptV2_buf, __global u32 *d_scryptV3_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 il_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max) { /** * modifier */ const u32 gid = get_global_id (0); const u32 lid = get_local_id (0); const u32 lsz = get_local_size (0); /** * shared */ __local u32 l_bin2asc[256]; for (u32 i = lid; i < 256; i += lsz) { const u32 i0 = (i >> 0) & 15; const u32 i1 = (i >> 4) & 15; l_bin2asc[i] = ((i0 < 10) ? '0' + i0 : 'a' - 10 + i0) << 0 | ((i1 < 10) ? '0' + i1 : 'a' - 10 + i1) << 8; } barrier (CLK_LOCAL_MEM_FENCE); if (gid >= gid_max) return; /** * base */ u32 w0[4]; w0[0] = pws[gid].i[ 0]; w0[1] = pws[gid].i[ 1]; w0[2] = pws[gid].i[ 2]; w0[3] = pws[gid].i[ 3]; u32 w1[4]; w1[0] = pws[gid].i[ 4]; w1[1] = pws[gid].i[ 5]; w1[2] = pws[gid].i[ 6]; w1[3] = pws[gid].i[ 7]; u32 w2[4]; w2[0] = pws[gid].i[ 8]; w2[1] = pws[gid].i[ 9]; w2[2] = pws[gid].i[10]; w2[3] = pws[gid].i[11]; u32 w3[4]; w3[0] = pws[gid].i[12]; w3[1] = pws[gid].i[13]; w3[2] = 0; w3[3] = 0; const u32 pw_len = pws[gid].pw_len; /** * main */ m13900m (w0, w1, w2, w3, pw_len, pws, rules_buf, combs_buf, bfs_buf, tmps, hooks, bitmaps_buf_s1_a, bitmaps_buf_s1_b, bitmaps_buf_s1_c, bitmaps_buf_s1_d, bitmaps_buf_s2_a, bitmaps_buf_s2_b, bitmaps_buf_s2_c, bitmaps_buf_s2_d, plains_buf, digests_buf, hashes_shown, ss, ess, d_return_buf, d_scryptV0_buf, d_scryptV1_buf, d_scryptV2_buf, d_scryptV3_buf, bitmap_mask, bitmap_shift1, bitmap_shift2, salt_pos, loop_pos, loop_cnt, il_cnt, digests_cnt, digests_offset, l_bin2asc); } __kernel void m13900_s04 (__global pw_t *pws, __global kernel_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *ss, __global void *ess, __global u32 *d_return_buf, __global u32 *d_scryptV0_buf, __global u32 *d_scryptV1_buf, __global u32 *d_scryptV2_buf, __global u32 *d_scryptV3_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 il_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max) { /** * modifier */ const u32 gid = get_global_id (0); const u32 lid = get_local_id (0); const u32 lsz = get_local_size (0); /** * shared */ __local u32 l_bin2asc[256]; for (u32 i = lid; i < 256; i += lsz) { const u32 i0 = (i >> 0) & 15; const u32 i1 = (i >> 4) & 15; l_bin2asc[i] = ((i0 < 10) ? '0' + i0 : 'a' - 10 + i0) << 0 | ((i1 < 10) ? '0' + i1 : 'a' - 10 + i1) << 8; } barrier (CLK_LOCAL_MEM_FENCE); if (gid >= gid_max) return; /** * base */ u32 w0[4]; w0[0] = pws[gid].i[ 0]; w0[1] = pws[gid].i[ 1]; w0[2] = pws[gid].i[ 2]; w0[3] = pws[gid].i[ 3]; u32 w1[4]; w1[0] = 0; w1[1] = 0; w1[2] = 0; w1[3] = 0; u32 w2[4]; w2[0] = 0; w2[1] = 0; w2[2] = 0; w2[3] = 0; u32 w3[4]; w3[0] = 0; w3[1] = 0; w3[2] = 0; w3[3] = 0; const u32 pw_len = pws[gid].pw_len; /** * main */ m13900s (w0, w1, w2, w3, pw_len, pws, rules_buf, combs_buf, bfs_buf, tmps, hooks, bitmaps_buf_s1_a, bitmaps_buf_s1_b, bitmaps_buf_s1_c, bitmaps_buf_s1_d, bitmaps_buf_s2_a, bitmaps_buf_s2_b, bitmaps_buf_s2_c, bitmaps_buf_s2_d, plains_buf, digests_buf, hashes_shown, ss, ess, d_return_buf, d_scryptV0_buf, d_scryptV1_buf, d_scryptV2_buf, d_scryptV3_buf, bitmap_mask, bitmap_shift1, bitmap_shift2, salt_pos, loop_pos, loop_cnt, il_cnt, digests_cnt, digests_offset, l_bin2asc); } __kernel void m13900_s08 (__global pw_t *pws, __global kernel_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *ss, __global void *ess, __global u32 *d_return_buf, __global u32 *d_scryptV0_buf, __global u32 *d_scryptV1_buf, __global u32 *d_scryptV2_buf, __global u32 *d_scryptV3_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 il_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max) { /** * modifier */ const u32 gid = get_global_id (0); const u32 lid = get_local_id (0); const u32 lsz = get_local_size (0); /** * shared */ __local u32 l_bin2asc[256]; for (u32 i = lid; i < 256; i += lsz) { const u32 i0 = (i >> 0) & 15; const u32 i1 = (i >> 4) & 15; l_bin2asc[i] = ((i0 < 10) ? '0' + i0 : 'a' - 10 + i0) << 0 | ((i1 < 10) ? '0' + i1 : 'a' - 10 + i1) << 8; } barrier (CLK_LOCAL_MEM_FENCE); if (gid >= gid_max) return; /** * base */ u32 w0[4]; w0[0] = pws[gid].i[ 0]; w0[1] = pws[gid].i[ 1]; w0[2] = pws[gid].i[ 2]; w0[3] = pws[gid].i[ 3]; u32 w1[4]; w1[0] = pws[gid].i[ 4]; w1[1] = pws[gid].i[ 5]; w1[2] = pws[gid].i[ 6]; w1[3] = pws[gid].i[ 7]; u32 w2[4]; w2[0] = 0; w2[1] = 0; w2[2] = 0; w2[3] = 0; u32 w3[4]; w3[0] = 0; w3[1] = 0; w3[2] = 0; w3[3] = 0; const u32 pw_len = pws[gid].pw_len; /** * main */ m13900s (w0, w1, w2, w3, pw_len, pws, rules_buf, combs_buf, bfs_buf, tmps, hooks, bitmaps_buf_s1_a, bitmaps_buf_s1_b, bitmaps_buf_s1_c, bitmaps_buf_s1_d, bitmaps_buf_s2_a, bitmaps_buf_s2_b, bitmaps_buf_s2_c, bitmaps_buf_s2_d, plains_buf, digests_buf, hashes_shown, ss, ess, d_return_buf, d_scryptV0_buf, d_scryptV1_buf, d_scryptV2_buf, d_scryptV3_buf, bitmap_mask, bitmap_shift1, bitmap_shift2, salt_pos, loop_pos, loop_cnt, il_cnt, digests_cnt, digests_offset, l_bin2asc); } __kernel void m13900_s16 (__global pw_t *pws, __global kernel_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *ss, __global void *ess, __global u32 *d_return_buf, __global u32 *d_scryptV0_buf, __global u32 *d_scryptV1_buf, __global u32 *d_scryptV2_buf, __global u32 *d_scryptV3_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 il_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max) { /** * modifier */ const u32 gid = get_global_id (0); const u32 lid = get_local_id (0); const u32 lsz = get_local_size (0); /** * shared */ __local u32 l_bin2asc[256]; for (u32 i = lid; i < 256; i += lsz) { const u32 i0 = (i >> 0) & 15; const u32 i1 = (i >> 4) & 15; l_bin2asc[i] = ((i0 < 10) ? '0' + i0 : 'a' - 10 + i0) << 0 | ((i1 < 10) ? '0' + i1 : 'a' - 10 + i1) << 8; } barrier (CLK_LOCAL_MEM_FENCE); if (gid >= gid_max) return; /** * base */ u32 w0[4]; w0[0] = pws[gid].i[ 0]; w0[1] = pws[gid].i[ 1]; w0[2] = pws[gid].i[ 2]; w0[3] = pws[gid].i[ 3]; u32 w1[4]; w1[0] = pws[gid].i[ 4]; w1[1] = pws[gid].i[ 5]; w1[2] = pws[gid].i[ 6]; w1[3] = pws[gid].i[ 7]; u32 w2[4]; w2[0] = pws[gid].i[ 8]; w2[1] = pws[gid].i[ 9]; w2[2] = pws[gid].i[10]; w2[3] = pws[gid].i[11]; u32 w3[4]; w3[0] = pws[gid].i[12]; w3[1] = pws[gid].i[13]; w3[2] = 0; w3[3] = 0; const u32 pw_len = pws[gid].pw_len; /** * main */ m13900s (w0, w1, w2, w3, pw_len, pws, rules_buf, combs_buf, bfs_buf, tmps, hooks, bitmaps_buf_s1_a, bitmaps_buf_s1_b, bitmaps_buf_s1_c, bitmaps_buf_s1_d, bitmaps_buf_s2_a, bitmaps_buf_s2_b, bitmaps_buf_s2_c, bitmaps_buf_s2_d, plains_buf, digests_buf, hashes_shown, ss, ess, d_return_buf, d_scryptV0_buf, d_scryptV1_buf, d_scryptV2_buf, d_scryptV3_buf, bitmap_mask, bitmap_shift1, bitmap_shift2, salt_pos, loop_pos, loop_cnt, il_cnt, digests_cnt, digests_offset, l_bin2asc); }