mirror of
https://github.com/hashcat/hashcat.git
synced 2024-12-27 17:08:12 +00:00
759 lines
24 KiB
Common Lisp
759 lines
24 KiB
Common Lisp
/**
|
|
* Author......: Jens Steube <jens.steube@gmail.com>
|
|
* License.....: MIT
|
|
*/
|
|
|
|
#define _SHA256_
|
|
|
|
#include "include/constants.h"
|
|
#include "include/kernel_vendor.h"
|
|
|
|
#ifdef VLIW1
|
|
#define VECT_SIZE1
|
|
#endif
|
|
|
|
#ifdef VLIW4
|
|
#define VECT_SIZE2
|
|
#endif
|
|
|
|
#ifdef VLIW5
|
|
#define VECT_SIZE2
|
|
#endif
|
|
|
|
#define DGST_R0 3
|
|
#define DGST_R1 7
|
|
#define DGST_R2 2
|
|
#define DGST_R3 6
|
|
|
|
#include "include/kernel_functions.c"
|
|
#include "types_amd.c"
|
|
#include "common_amd.c"
|
|
|
|
#ifdef VECT_SIZE1
|
|
#define VECT_COMPARE_S "check_single_vect1_comp4.c"
|
|
#define VECT_COMPARE_M "check_multi_vect1_comp4.c"
|
|
#endif
|
|
|
|
#ifdef VECT_SIZE2
|
|
#define VECT_COMPARE_S "check_single_vect2_comp4.c"
|
|
#define VECT_COMPARE_M "check_multi_vect2_comp4.c"
|
|
#endif
|
|
|
|
#ifdef VECT_SIZE4
|
|
#define VECT_COMPARE_S "check_single_vect4_comp4.c"
|
|
#define VECT_COMPARE_M "check_multi_vect4_comp4.c"
|
|
#endif
|
|
|
|
__constant u32 k_sha256[64] =
|
|
{
|
|
SHA256C00, SHA256C01, SHA256C02, SHA256C03,
|
|
SHA256C04, SHA256C05, SHA256C06, SHA256C07,
|
|
SHA256C08, SHA256C09, SHA256C0a, SHA256C0b,
|
|
SHA256C0c, SHA256C0d, SHA256C0e, SHA256C0f,
|
|
SHA256C10, SHA256C11, SHA256C12, SHA256C13,
|
|
SHA256C14, SHA256C15, SHA256C16, SHA256C17,
|
|
SHA256C18, SHA256C19, SHA256C1a, SHA256C1b,
|
|
SHA256C1c, SHA256C1d, SHA256C1e, SHA256C1f,
|
|
SHA256C20, SHA256C21, SHA256C22, SHA256C23,
|
|
SHA256C24, SHA256C25, SHA256C26, SHA256C27,
|
|
SHA256C28, SHA256C29, SHA256C2a, SHA256C2b,
|
|
SHA256C2c, SHA256C2d, SHA256C2e, SHA256C2f,
|
|
SHA256C30, SHA256C31, SHA256C32, SHA256C33,
|
|
SHA256C34, SHA256C35, SHA256C36, SHA256C37,
|
|
SHA256C38, SHA256C39, SHA256C3a, SHA256C3b,
|
|
SHA256C3c, SHA256C3d, SHA256C3e, SHA256C3f,
|
|
};
|
|
|
|
static void sha256_transform (const u32x w0[4], const u32x w1[4], const u32x w2[4], const u32x w3[4], u32x digest[8])
|
|
{
|
|
u32x a = digest[0];
|
|
u32x b = digest[1];
|
|
u32x c = digest[2];
|
|
u32x d = digest[3];
|
|
u32x e = digest[4];
|
|
u32x f = digest[5];
|
|
u32x g = digest[6];
|
|
u32x h = digest[7];
|
|
|
|
u32x w0_t = w0[0];
|
|
u32x w1_t = w0[1];
|
|
u32x w2_t = w0[2];
|
|
u32x w3_t = w0[3];
|
|
u32x w4_t = w1[0];
|
|
u32x w5_t = w1[1];
|
|
u32x w6_t = w1[2];
|
|
u32x w7_t = w1[3];
|
|
u32x w8_t = w2[0];
|
|
u32x w9_t = w2[1];
|
|
u32x wa_t = w2[2];
|
|
u32x wb_t = w2[3];
|
|
u32x wc_t = w3[0];
|
|
u32x wd_t = w3[1];
|
|
u32x we_t = w3[2];
|
|
u32x wf_t = w3[3];
|
|
|
|
#define ROUND_EXPAND() \
|
|
{ \
|
|
w0_t = SHA256_EXPAND (we_t, w9_t, w1_t, w0_t); \
|
|
w1_t = SHA256_EXPAND (wf_t, wa_t, w2_t, w1_t); \
|
|
w2_t = SHA256_EXPAND (w0_t, wb_t, w3_t, w2_t); \
|
|
w3_t = SHA256_EXPAND (w1_t, wc_t, w4_t, w3_t); \
|
|
w4_t = SHA256_EXPAND (w2_t, wd_t, w5_t, w4_t); \
|
|
w5_t = SHA256_EXPAND (w3_t, we_t, w6_t, w5_t); \
|
|
w6_t = SHA256_EXPAND (w4_t, wf_t, w7_t, w6_t); \
|
|
w7_t = SHA256_EXPAND (w5_t, w0_t, w8_t, w7_t); \
|
|
w8_t = SHA256_EXPAND (w6_t, w1_t, w9_t, w8_t); \
|
|
w9_t = SHA256_EXPAND (w7_t, w2_t, wa_t, w9_t); \
|
|
wa_t = SHA256_EXPAND (w8_t, w3_t, wb_t, wa_t); \
|
|
wb_t = SHA256_EXPAND (w9_t, w4_t, wc_t, wb_t); \
|
|
wc_t = SHA256_EXPAND (wa_t, w5_t, wd_t, wc_t); \
|
|
wd_t = SHA256_EXPAND (wb_t, w6_t, we_t, wd_t); \
|
|
we_t = SHA256_EXPAND (wc_t, w7_t, wf_t, we_t); \
|
|
wf_t = SHA256_EXPAND (wd_t, w8_t, w0_t, wf_t); \
|
|
}
|
|
|
|
#define ROUND_STEP(i) \
|
|
{ \
|
|
SHA256_STEP (SHA256_F0o, SHA256_F1o, a, b, c, d, e, f, g, h, w0_t, k_sha256[i + 0]); \
|
|
SHA256_STEP (SHA256_F0o, SHA256_F1o, h, a, b, c, d, e, f, g, w1_t, k_sha256[i + 1]); \
|
|
SHA256_STEP (SHA256_F0o, SHA256_F1o, g, h, a, b, c, d, e, f, w2_t, k_sha256[i + 2]); \
|
|
SHA256_STEP (SHA256_F0o, SHA256_F1o, f, g, h, a, b, c, d, e, w3_t, k_sha256[i + 3]); \
|
|
SHA256_STEP (SHA256_F0o, SHA256_F1o, e, f, g, h, a, b, c, d, w4_t, k_sha256[i + 4]); \
|
|
SHA256_STEP (SHA256_F0o, SHA256_F1o, d, e, f, g, h, a, b, c, w5_t, k_sha256[i + 5]); \
|
|
SHA256_STEP (SHA256_F0o, SHA256_F1o, c, d, e, f, g, h, a, b, w6_t, k_sha256[i + 6]); \
|
|
SHA256_STEP (SHA256_F0o, SHA256_F1o, b, c, d, e, f, g, h, a, w7_t, k_sha256[i + 7]); \
|
|
SHA256_STEP (SHA256_F0o, SHA256_F1o, a, b, c, d, e, f, g, h, w8_t, k_sha256[i + 8]); \
|
|
SHA256_STEP (SHA256_F0o, SHA256_F1o, h, a, b, c, d, e, f, g, w9_t, k_sha256[i + 9]); \
|
|
SHA256_STEP (SHA256_F0o, SHA256_F1o, g, h, a, b, c, d, e, f, wa_t, k_sha256[i + 10]); \
|
|
SHA256_STEP (SHA256_F0o, SHA256_F1o, f, g, h, a, b, c, d, e, wb_t, k_sha256[i + 11]); \
|
|
SHA256_STEP (SHA256_F0o, SHA256_F1o, e, f, g, h, a, b, c, d, wc_t, k_sha256[i + 12]); \
|
|
SHA256_STEP (SHA256_F0o, SHA256_F1o, d, e, f, g, h, a, b, c, wd_t, k_sha256[i + 13]); \
|
|
SHA256_STEP (SHA256_F0o, SHA256_F1o, c, d, e, f, g, h, a, b, we_t, k_sha256[i + 14]); \
|
|
SHA256_STEP (SHA256_F0o, SHA256_F1o, b, c, d, e, f, g, h, a, wf_t, k_sha256[i + 15]); \
|
|
}
|
|
|
|
ROUND_STEP (0);
|
|
|
|
#pragma unroll
|
|
for (int i = 16; i < 64; i += 16)
|
|
{
|
|
ROUND_EXPAND (); ROUND_STEP (i);
|
|
}
|
|
|
|
digest[0] += a;
|
|
digest[1] += b;
|
|
digest[2] += c;
|
|
digest[3] += d;
|
|
digest[4] += e;
|
|
digest[5] += f;
|
|
digest[6] += g;
|
|
digest[7] += h;
|
|
}
|
|
|
|
static void hmac_sha256_pad (u32x w0[4], u32x w1[4], u32x w2[4], u32x w3[4], u32x ipad[8], u32x opad[8])
|
|
{
|
|
w0[0] = w0[0] ^ 0x36363636;
|
|
w0[1] = w0[1] ^ 0x36363636;
|
|
w0[2] = w0[2] ^ 0x36363636;
|
|
w0[3] = w0[3] ^ 0x36363636;
|
|
w1[0] = w1[0] ^ 0x36363636;
|
|
w1[1] = w1[1] ^ 0x36363636;
|
|
w1[2] = w1[2] ^ 0x36363636;
|
|
w1[3] = w1[3] ^ 0x36363636;
|
|
w2[0] = w2[0] ^ 0x36363636;
|
|
w2[1] = w2[1] ^ 0x36363636;
|
|
w2[2] = w2[2] ^ 0x36363636;
|
|
w2[3] = w2[3] ^ 0x36363636;
|
|
w3[0] = w3[0] ^ 0x36363636;
|
|
w3[1] = w3[1] ^ 0x36363636;
|
|
w3[2] = w3[2] ^ 0x36363636;
|
|
w3[3] = w3[3] ^ 0x36363636;
|
|
|
|
ipad[0] = SHA256M_A;
|
|
ipad[1] = SHA256M_B;
|
|
ipad[2] = SHA256M_C;
|
|
ipad[3] = SHA256M_D;
|
|
ipad[4] = SHA256M_E;
|
|
ipad[5] = SHA256M_F;
|
|
ipad[6] = SHA256M_G;
|
|
ipad[7] = SHA256M_H;
|
|
|
|
sha256_transform (w0, w1, w2, w3, ipad);
|
|
|
|
w0[0] = w0[0] ^ 0x6a6a6a6a;
|
|
w0[1] = w0[1] ^ 0x6a6a6a6a;
|
|
w0[2] = w0[2] ^ 0x6a6a6a6a;
|
|
w0[3] = w0[3] ^ 0x6a6a6a6a;
|
|
w1[0] = w1[0] ^ 0x6a6a6a6a;
|
|
w1[1] = w1[1] ^ 0x6a6a6a6a;
|
|
w1[2] = w1[2] ^ 0x6a6a6a6a;
|
|
w1[3] = w1[3] ^ 0x6a6a6a6a;
|
|
w2[0] = w2[0] ^ 0x6a6a6a6a;
|
|
w2[1] = w2[1] ^ 0x6a6a6a6a;
|
|
w2[2] = w2[2] ^ 0x6a6a6a6a;
|
|
w2[3] = w2[3] ^ 0x6a6a6a6a;
|
|
w3[0] = w3[0] ^ 0x6a6a6a6a;
|
|
w3[1] = w3[1] ^ 0x6a6a6a6a;
|
|
w3[2] = w3[2] ^ 0x6a6a6a6a;
|
|
w3[3] = w3[3] ^ 0x6a6a6a6a;
|
|
|
|
opad[0] = SHA256M_A;
|
|
opad[1] = SHA256M_B;
|
|
opad[2] = SHA256M_C;
|
|
opad[3] = SHA256M_D;
|
|
opad[4] = SHA256M_E;
|
|
opad[5] = SHA256M_F;
|
|
opad[6] = SHA256M_G;
|
|
opad[7] = SHA256M_H;
|
|
|
|
sha256_transform (w0, w1, w2, w3, opad);
|
|
}
|
|
|
|
static void hmac_sha256_run (u32x w0[4], u32x w1[4], u32x w2[4], u32x w3[4], u32x ipad[8], u32x opad[8], u32x digest[8])
|
|
{
|
|
digest[0] = ipad[0];
|
|
digest[1] = ipad[1];
|
|
digest[2] = ipad[2];
|
|
digest[3] = ipad[3];
|
|
digest[4] = ipad[4];
|
|
digest[5] = ipad[5];
|
|
digest[6] = ipad[6];
|
|
digest[7] = ipad[7];
|
|
|
|
sha256_transform (w0, w1, w2, w3, digest);
|
|
|
|
w0[0] = digest[0];
|
|
w0[1] = digest[1];
|
|
w0[2] = digest[2];
|
|
w0[3] = digest[3];
|
|
w1[0] = digest[4];
|
|
w1[1] = digest[5];
|
|
w1[2] = digest[6];
|
|
w1[3] = digest[7];
|
|
w2[0] = 0x80000000;
|
|
w2[1] = 0;
|
|
w2[2] = 0;
|
|
w2[3] = 0;
|
|
w3[0] = 0;
|
|
w3[1] = 0;
|
|
w3[2] = 0;
|
|
w3[3] = (64 + 32) * 8;
|
|
|
|
digest[0] = opad[0];
|
|
digest[1] = opad[1];
|
|
digest[2] = opad[2];
|
|
digest[3] = opad[3];
|
|
digest[4] = opad[4];
|
|
digest[5] = opad[5];
|
|
digest[6] = opad[6];
|
|
digest[7] = opad[7];
|
|
|
|
sha256_transform (w0, w1, w2, w3, digest);
|
|
}
|
|
|
|
static void m01460m (u32x w0[4], u32x w1[4], u32x w2[4], u32x w3[4], const u32 pw_len, __global pw_t *pws, __global gpu_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esal_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 bfs_cnt, const u32 digests_cnt, const u32 digests_offset)
|
|
{
|
|
/**
|
|
* modifier
|
|
*/
|
|
|
|
const u32 gid = get_global_id (0);
|
|
const u32 lid = get_local_id (0);
|
|
|
|
/**
|
|
* salt
|
|
*/
|
|
|
|
u32 salt_buf0[4];
|
|
|
|
salt_buf0[0] = salt_bufs[salt_pos].salt_buf[ 0];
|
|
salt_buf0[1] = salt_bufs[salt_pos].salt_buf[ 1];
|
|
salt_buf0[2] = salt_bufs[salt_pos].salt_buf[ 2];
|
|
salt_buf0[3] = salt_bufs[salt_pos].salt_buf[ 3];
|
|
|
|
u32 salt_buf1[4];
|
|
|
|
salt_buf1[0] = salt_bufs[salt_pos].salt_buf[ 4];
|
|
salt_buf1[1] = salt_bufs[salt_pos].salt_buf[ 5];
|
|
salt_buf1[2] = salt_bufs[salt_pos].salt_buf[ 6];
|
|
salt_buf1[3] = salt_bufs[salt_pos].salt_buf[ 7];
|
|
|
|
/**
|
|
* pads
|
|
*/
|
|
|
|
u32x w0_t[4];
|
|
|
|
w0_t[0] = swap_workaround (salt_buf0[0]);
|
|
w0_t[1] = swap_workaround (salt_buf0[1]);
|
|
w0_t[2] = swap_workaround (salt_buf0[2]);
|
|
w0_t[3] = swap_workaround (salt_buf0[3]);
|
|
|
|
u32x w1_t[4];
|
|
|
|
w1_t[0] = swap_workaround (salt_buf1[0]);
|
|
w1_t[1] = swap_workaround (salt_buf1[1]);
|
|
w1_t[2] = swap_workaround (salt_buf1[2]);
|
|
w1_t[3] = swap_workaround (salt_buf1[3]);
|
|
|
|
u32x w2_t[4];
|
|
|
|
w2_t[0] = 0;
|
|
w2_t[1] = 0;
|
|
w2_t[2] = 0;
|
|
w2_t[3] = 0;
|
|
|
|
u32x w3_t[4];
|
|
|
|
w3_t[0] = 0;
|
|
w3_t[1] = 0;
|
|
w3_t[2] = 0;
|
|
w3_t[3] = 0;
|
|
|
|
u32x ipad[8];
|
|
u32x opad[8];
|
|
|
|
hmac_sha256_pad (w0_t, w1_t, w2_t, w3_t, ipad, opad);
|
|
|
|
/**
|
|
* loop
|
|
*/
|
|
|
|
u32x w0l = w0[0];
|
|
|
|
for (u32 il_pos = 0; il_pos < bfs_cnt; il_pos++)
|
|
{
|
|
const u32 w0r = bfs_buf[il_pos].i;
|
|
|
|
w0[0] = w0l | w0r;
|
|
|
|
w0_t[0] = w0[0];
|
|
w0_t[1] = w0[1];
|
|
w0_t[2] = w0[2];
|
|
w0_t[3] = w0[3];
|
|
w1_t[0] = w1[0];
|
|
w1_t[1] = w1[1];
|
|
w1_t[2] = w1[2];
|
|
w1_t[3] = w1[3];
|
|
w2_t[0] = w2[0];
|
|
w2_t[1] = w2[1];
|
|
w2_t[2] = w2[2];
|
|
w2_t[3] = w2[3];
|
|
w3_t[0] = w3[0];
|
|
w3_t[1] = w3[1];
|
|
w3_t[2] = 0;
|
|
w3_t[3] = (64 + pw_len) * 8;
|
|
|
|
u32x digest[8];
|
|
|
|
hmac_sha256_run (w0_t, w1_t, w2_t, w3_t, ipad, opad, digest);
|
|
|
|
const u32x r0 = digest[3];
|
|
const u32x r1 = digest[7];
|
|
const u32x r2 = digest[2];
|
|
const u32x r3 = digest[6];
|
|
|
|
#include VECT_COMPARE_M
|
|
}
|
|
}
|
|
|
|
static void m01460s (u32x w0[4], u32x w1[4], u32x w2[4], u32x w3[4], const u32 pw_len, __global pw_t *pws, __global gpu_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 bfs_cnt, const u32 digests_cnt, const u32 digests_offset)
|
|
{
|
|
/**
|
|
* modifier
|
|
*/
|
|
|
|
const u32 gid = get_global_id (0);
|
|
const u32 lid = get_local_id (0);
|
|
|
|
/**
|
|
* salt
|
|
*/
|
|
|
|
u32 salt_buf0[4];
|
|
|
|
salt_buf0[0] = salt_bufs[salt_pos].salt_buf[ 0];
|
|
salt_buf0[1] = salt_bufs[salt_pos].salt_buf[ 1];
|
|
salt_buf0[2] = salt_bufs[salt_pos].salt_buf[ 2];
|
|
salt_buf0[3] = salt_bufs[salt_pos].salt_buf[ 3];
|
|
|
|
u32 salt_buf1[4];
|
|
|
|
salt_buf1[0] = salt_bufs[salt_pos].salt_buf[ 4];
|
|
salt_buf1[1] = salt_bufs[salt_pos].salt_buf[ 5];
|
|
salt_buf1[2] = salt_bufs[salt_pos].salt_buf[ 6];
|
|
salt_buf1[3] = salt_bufs[salt_pos].salt_buf[ 7];
|
|
|
|
/**
|
|
* pads
|
|
*/
|
|
|
|
u32x w0_t[4];
|
|
|
|
w0_t[0] = swap_workaround (salt_buf0[0]);
|
|
w0_t[1] = swap_workaround (salt_buf0[1]);
|
|
w0_t[2] = swap_workaround (salt_buf0[2]);
|
|
w0_t[3] = swap_workaround (salt_buf0[3]);
|
|
|
|
u32x w1_t[4];
|
|
|
|
w1_t[0] = swap_workaround (salt_buf1[0]);
|
|
w1_t[1] = swap_workaround (salt_buf1[1]);
|
|
w1_t[2] = swap_workaround (salt_buf1[2]);
|
|
w1_t[3] = swap_workaround (salt_buf1[3]);
|
|
|
|
u32x w2_t[4];
|
|
|
|
w2_t[0] = 0;
|
|
w2_t[1] = 0;
|
|
w2_t[2] = 0;
|
|
w2_t[3] = 0;
|
|
|
|
u32x w3_t[4];
|
|
|
|
w3_t[0] = 0;
|
|
w3_t[1] = 0;
|
|
w3_t[2] = 0;
|
|
w3_t[3] = 0;
|
|
|
|
u32x ipad[8];
|
|
u32x opad[8];
|
|
|
|
hmac_sha256_pad (w0_t, w1_t, w2_t, w3_t, ipad, opad);
|
|
|
|
/**
|
|
* digest
|
|
*/
|
|
|
|
const u32 search[4] =
|
|
{
|
|
digests_buf[digests_offset].digest_buf[DGST_R0],
|
|
digests_buf[digests_offset].digest_buf[DGST_R1],
|
|
digests_buf[digests_offset].digest_buf[DGST_R2],
|
|
digests_buf[digests_offset].digest_buf[DGST_R3]
|
|
};
|
|
|
|
/**
|
|
* loop
|
|
*/
|
|
|
|
u32x w0l = w0[0];
|
|
|
|
for (u32 il_pos = 0; il_pos < bfs_cnt; il_pos++)
|
|
{
|
|
const u32 w0r = bfs_buf[il_pos].i;
|
|
|
|
w0[0] = w0l | w0r;
|
|
|
|
w0_t[0] = w0[0];
|
|
w0_t[1] = w0[1];
|
|
w0_t[2] = w0[2];
|
|
w0_t[3] = w0[3];
|
|
w1_t[0] = w1[0];
|
|
w1_t[1] = w1[1];
|
|
w1_t[2] = w1[2];
|
|
w1_t[3] = w1[3];
|
|
w2_t[0] = w2[0];
|
|
w2_t[1] = w2[1];
|
|
w2_t[2] = w2[2];
|
|
w2_t[3] = w2[3];
|
|
w3_t[0] = w3[0];
|
|
w3_t[1] = w3[1];
|
|
w3_t[2] = 0;
|
|
w3_t[3] = (64 + pw_len) * 8;
|
|
|
|
u32x digest[8];
|
|
|
|
hmac_sha256_run (w0_t, w1_t, w2_t, w3_t, ipad, opad, digest);
|
|
|
|
const u32x r0 = digest[3];
|
|
const u32x r1 = digest[7];
|
|
const u32x r2 = digest[2];
|
|
const u32x r3 = digest[6];
|
|
|
|
#include VECT_COMPARE_S
|
|
}
|
|
}
|
|
|
|
__kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m01460_m04 (__global pw_t *pws, __global gpu_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 bfs_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
|
|
{
|
|
/**
|
|
* base
|
|
*/
|
|
|
|
const u32 gid = get_global_id (0);
|
|
|
|
if (gid >= gid_max) return;
|
|
|
|
u32x w0[4];
|
|
|
|
w0[0] = pws[gid].i[ 0];
|
|
w0[1] = pws[gid].i[ 1];
|
|
w0[2] = pws[gid].i[ 2];
|
|
w0[3] = pws[gid].i[ 3];
|
|
|
|
u32x w1[4];
|
|
|
|
w1[0] = 0;
|
|
w1[1] = 0;
|
|
w1[2] = 0;
|
|
w1[3] = 0;
|
|
|
|
u32x w2[4];
|
|
|
|
w2[0] = 0;
|
|
w2[1] = 0;
|
|
w2[2] = 0;
|
|
w2[3] = 0;
|
|
|
|
u32x w3[4];
|
|
|
|
w3[0] = 0;
|
|
w3[1] = 0;
|
|
w3[2] = 0;
|
|
w3[3] = 0;
|
|
|
|
const u32 pw_len = pws[gid].pw_len;
|
|
|
|
/**
|
|
* main
|
|
*/
|
|
|
|
m01460m (w0, w1, w2, w3, pw_len, pws, rules_buf, combs_buf, bfs_buf, tmps, hooks, bitmaps_buf_s1_a, bitmaps_buf_s1_b, bitmaps_buf_s1_c, bitmaps_buf_s1_d, bitmaps_buf_s2_a, bitmaps_buf_s2_b, bitmaps_buf_s2_c, bitmaps_buf_s2_d, plains_buf, digests_buf, hashes_shown, salt_bufs, esalt_bufs, d_return_buf, d_scryptV_buf, bitmap_mask, bitmap_shift1, bitmap_shift2, salt_pos, loop_pos, loop_cnt, bfs_cnt, digests_cnt, digests_offset);
|
|
}
|
|
|
|
__kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m01460_m08 (__global pw_t *pws, __global gpu_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 bfs_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
|
|
{
|
|
/**
|
|
* base
|
|
*/
|
|
|
|
const u32 gid = get_global_id (0);
|
|
|
|
if (gid >= gid_max) return;
|
|
|
|
u32x w0[4];
|
|
|
|
w0[0] = pws[gid].i[ 0];
|
|
w0[1] = pws[gid].i[ 1];
|
|
w0[2] = pws[gid].i[ 2];
|
|
w0[3] = pws[gid].i[ 3];
|
|
|
|
u32x w1[4];
|
|
|
|
w1[0] = pws[gid].i[ 4];
|
|
w1[1] = pws[gid].i[ 5];
|
|
w1[2] = pws[gid].i[ 6];
|
|
w1[3] = pws[gid].i[ 7];
|
|
|
|
u32x w2[4];
|
|
|
|
w2[0] = 0;
|
|
w2[1] = 0;
|
|
w2[2] = 0;
|
|
w2[3] = 0;
|
|
|
|
u32x w3[4];
|
|
|
|
w3[0] = 0;
|
|
w3[1] = 0;
|
|
w3[2] = 0;
|
|
w3[3] = 0;
|
|
|
|
const u32 pw_len = pws[gid].pw_len;
|
|
|
|
/**
|
|
* main
|
|
*/
|
|
|
|
m01460m (w0, w1, w2, w3, pw_len, pws, rules_buf, combs_buf, bfs_buf, tmps, hooks, bitmaps_buf_s1_a, bitmaps_buf_s1_b, bitmaps_buf_s1_c, bitmaps_buf_s1_d, bitmaps_buf_s2_a, bitmaps_buf_s2_b, bitmaps_buf_s2_c, bitmaps_buf_s2_d, plains_buf, digests_buf, hashes_shown, salt_bufs, esalt_bufs, d_return_buf, d_scryptV_buf, bitmap_mask, bitmap_shift1, bitmap_shift2, salt_pos, loop_pos, loop_cnt, bfs_cnt, digests_cnt, digests_offset);
|
|
}
|
|
|
|
__kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m01460_m16 (__global pw_t *pws, __global gpu_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 bfs_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
|
|
{
|
|
/**
|
|
* base
|
|
*/
|
|
|
|
const u32 gid = get_global_id (0);
|
|
|
|
if (gid >= gid_max) return;
|
|
|
|
u32x w0[4];
|
|
|
|
w0[0] = pws[gid].i[ 0];
|
|
w0[1] = pws[gid].i[ 1];
|
|
w0[2] = pws[gid].i[ 2];
|
|
w0[3] = pws[gid].i[ 3];
|
|
|
|
u32x w1[4];
|
|
|
|
w1[0] = pws[gid].i[ 4];
|
|
w1[1] = pws[gid].i[ 5];
|
|
w1[2] = pws[gid].i[ 6];
|
|
w1[3] = pws[gid].i[ 7];
|
|
|
|
u32x w2[4];
|
|
|
|
w2[0] = pws[gid].i[ 8];
|
|
w2[1] = pws[gid].i[ 9];
|
|
w2[2] = pws[gid].i[10];
|
|
w2[3] = pws[gid].i[11];
|
|
|
|
u32x w3[4];
|
|
|
|
w3[0] = pws[gid].i[12];
|
|
w3[1] = pws[gid].i[13];
|
|
w3[2] = 0;
|
|
w3[3] = 0;
|
|
|
|
const u32 pw_len = pws[gid].pw_len;
|
|
|
|
/**
|
|
* main
|
|
*/
|
|
|
|
m01460m (w0, w1, w2, w3, pw_len, pws, rules_buf, combs_buf, bfs_buf, tmps, hooks, bitmaps_buf_s1_a, bitmaps_buf_s1_b, bitmaps_buf_s1_c, bitmaps_buf_s1_d, bitmaps_buf_s2_a, bitmaps_buf_s2_b, bitmaps_buf_s2_c, bitmaps_buf_s2_d, plains_buf, digests_buf, hashes_shown, salt_bufs, esalt_bufs, d_return_buf, d_scryptV_buf, bitmap_mask, bitmap_shift1, bitmap_shift2, salt_pos, loop_pos, loop_cnt, bfs_cnt, digests_cnt, digests_offset);
|
|
}
|
|
|
|
__kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m01460_s04 (__global pw_t *pws, __global gpu_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 bfs_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
|
|
{
|
|
/**
|
|
* base
|
|
*/
|
|
|
|
const u32 gid = get_global_id (0);
|
|
|
|
if (gid >= gid_max) return;
|
|
|
|
u32x w0[4];
|
|
|
|
w0[0] = pws[gid].i[ 0];
|
|
w0[1] = pws[gid].i[ 1];
|
|
w0[2] = pws[gid].i[ 2];
|
|
w0[3] = pws[gid].i[ 3];
|
|
|
|
u32x w1[4];
|
|
|
|
w1[0] = 0;
|
|
w1[1] = 0;
|
|
w1[2] = 0;
|
|
w1[3] = 0;
|
|
|
|
u32x w2[4];
|
|
|
|
w2[0] = 0;
|
|
w2[1] = 0;
|
|
w2[2] = 0;
|
|
w2[3] = 0;
|
|
|
|
u32x w3[4];
|
|
|
|
w3[0] = 0;
|
|
w3[1] = 0;
|
|
w3[2] = 0;
|
|
w3[3] = 0;
|
|
|
|
const u32 pw_len = pws[gid].pw_len;
|
|
|
|
/**
|
|
* main
|
|
*/
|
|
|
|
m01460s (w0, w1, w2, w3, pw_len, pws, rules_buf, combs_buf, bfs_buf, tmps, hooks, bitmaps_buf_s1_a, bitmaps_buf_s1_b, bitmaps_buf_s1_c, bitmaps_buf_s1_d, bitmaps_buf_s2_a, bitmaps_buf_s2_b, bitmaps_buf_s2_c, bitmaps_buf_s2_d, plains_buf, digests_buf, hashes_shown, salt_bufs, esalt_bufs, d_return_buf, d_scryptV_buf, bitmap_mask, bitmap_shift1, bitmap_shift2, salt_pos, loop_pos, loop_cnt, bfs_cnt, digests_cnt, digests_offset);
|
|
}
|
|
|
|
__kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m01460_s08 (__global pw_t *pws, __global gpu_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 bfs_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
|
|
{
|
|
/**
|
|
* base
|
|
*/
|
|
|
|
const u32 gid = get_global_id (0);
|
|
|
|
if (gid >= gid_max) return;
|
|
|
|
u32x w0[4];
|
|
|
|
w0[0] = pws[gid].i[ 0];
|
|
w0[1] = pws[gid].i[ 1];
|
|
w0[2] = pws[gid].i[ 2];
|
|
w0[3] = pws[gid].i[ 3];
|
|
|
|
u32x w1[4];
|
|
|
|
w1[0] = pws[gid].i[ 4];
|
|
w1[1] = pws[gid].i[ 5];
|
|
w1[2] = pws[gid].i[ 6];
|
|
w1[3] = pws[gid].i[ 7];
|
|
|
|
u32x w2[4];
|
|
|
|
w2[0] = 0;
|
|
w2[1] = 0;
|
|
w2[2] = 0;
|
|
w2[3] = 0;
|
|
|
|
u32x w3[4];
|
|
|
|
w3[0] = 0;
|
|
w3[1] = 0;
|
|
w3[2] = 0;
|
|
w3[3] = 0;
|
|
|
|
const u32 pw_len = pws[gid].pw_len;
|
|
|
|
/**
|
|
* main
|
|
*/
|
|
|
|
m01460s (w0, w1, w2, w3, pw_len, pws, rules_buf, combs_buf, bfs_buf, tmps, hooks, bitmaps_buf_s1_a, bitmaps_buf_s1_b, bitmaps_buf_s1_c, bitmaps_buf_s1_d, bitmaps_buf_s2_a, bitmaps_buf_s2_b, bitmaps_buf_s2_c, bitmaps_buf_s2_d, plains_buf, digests_buf, hashes_shown, salt_bufs, esalt_bufs, d_return_buf, d_scryptV_buf, bitmap_mask, bitmap_shift1, bitmap_shift2, salt_pos, loop_pos, loop_cnt, bfs_cnt, digests_cnt, digests_offset);
|
|
}
|
|
|
|
__kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m01460_s16 (__global pw_t *pws, __global gpu_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 bfs_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
|
|
{
|
|
/**
|
|
* base
|
|
*/
|
|
|
|
const u32 gid = get_global_id (0);
|
|
|
|
if (gid >= gid_max) return;
|
|
|
|
u32x w0[4];
|
|
|
|
w0[0] = pws[gid].i[ 0];
|
|
w0[1] = pws[gid].i[ 1];
|
|
w0[2] = pws[gid].i[ 2];
|
|
w0[3] = pws[gid].i[ 3];
|
|
|
|
u32x w1[4];
|
|
|
|
w1[0] = pws[gid].i[ 4];
|
|
w1[1] = pws[gid].i[ 5];
|
|
w1[2] = pws[gid].i[ 6];
|
|
w1[3] = pws[gid].i[ 7];
|
|
|
|
u32x w2[4];
|
|
|
|
w2[0] = pws[gid].i[ 8];
|
|
w2[1] = pws[gid].i[ 9];
|
|
w2[2] = pws[gid].i[10];
|
|
w2[3] = pws[gid].i[11];
|
|
|
|
u32x w3[4];
|
|
|
|
w3[0] = pws[gid].i[12];
|
|
w3[1] = pws[gid].i[13];
|
|
w3[2] = 0;
|
|
w3[3] = 0;
|
|
|
|
const u32 pw_len = pws[gid].pw_len;
|
|
|
|
/**
|
|
* main
|
|
*/
|
|
|
|
m01460s (w0, w1, w2, w3, pw_len, pws, rules_buf, combs_buf, bfs_buf, tmps, hooks, bitmaps_buf_s1_a, bitmaps_buf_s1_b, bitmaps_buf_s1_c, bitmaps_buf_s1_d, bitmaps_buf_s2_a, bitmaps_buf_s2_b, bitmaps_buf_s2_c, bitmaps_buf_s2_d, plains_buf, digests_buf, hashes_shown, salt_bufs, esalt_bufs, d_return_buf, d_scryptV_buf, bitmap_mask, bitmap_shift1, bitmap_shift2, salt_pos, loop_pos, loop_cnt, bfs_cnt, digests_cnt, digests_offset);
|
|
}
|