1
0
mirror of https://github.com/hashcat/hashcat.git synced 2024-11-29 19:38:18 +00:00

Fix md5crypt speed on GTX1080

This commit is contained in:
jsteube 2018-02-17 11:45:05 +01:00
parent b8b816eada
commit 64eb9ca9ef

View File

@ -15,9 +15,6 @@
#define COMPARE_S "inc_comp_single.cl" #define COMPARE_S "inc_comp_single.cl"
#define COMPARE_M "inc_comp_multi.cl" #define COMPARE_M "inc_comp_multi.cl"
#define PUTCHAR_LE(a,p,c) ((u8 *)(a))[(p)] = (u8) (c)
#define GETCHAR_LE(a,p) ((u8 *)(a))[(p)]
#define md5crypt_magic 0x00243124u #define md5crypt_magic 0x00243124u
__kernel void m00500_init (__global pw_t *pws, __global const kernel_rule_t *rules_buf, __global const pw_t *combs_buf, __global const bf_t *bfs_buf, __global md5crypt_tmp_t *tmps, __global void *hooks, __global const u32 *bitmaps_buf_s1_a, __global const u32 *bitmaps_buf_s1_b, __global const u32 *bitmaps_buf_s1_c, __global const u32 *bitmaps_buf_s1_d, __global const u32 *bitmaps_buf_s2_a, __global const u32 *bitmaps_buf_s2_b, __global const u32 *bitmaps_buf_s2_c, __global const u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global const digest_t *digests_buf, __global u32 *hashes_shown, __global const salt_t *salt_bufs, __global const void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV0_buf, __global u32 *d_scryptV1_buf, __global u32 *d_scryptV2_buf, __global u32 *d_scryptV3_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 il_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u64 gid_max) __kernel void m00500_init (__global pw_t *pws, __global const kernel_rule_t *rules_buf, __global const pw_t *combs_buf, __global const bf_t *bfs_buf, __global md5crypt_tmp_t *tmps, __global void *hooks, __global const u32 *bitmaps_buf_s1_a, __global const u32 *bitmaps_buf_s1_b, __global const u32 *bitmaps_buf_s1_c, __global const u32 *bitmaps_buf_s1_d, __global const u32 *bitmaps_buf_s2_a, __global const u32 *bitmaps_buf_s2_b, __global const u32 *bitmaps_buf_s2_c, __global const u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global const digest_t *digests_buf, __global u32 *hashes_shown, __global const salt_t *salt_bufs, __global const void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV0_buf, __global u32 *d_scryptV1_buf, __global u32 *d_scryptV2_buf, __global u32 *d_scryptV3_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 il_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u64 gid_max)
@ -162,152 +159,50 @@ __kernel void m00500_loop (__global pw_t *pws, __global const kernel_rule_t *rul
* digest * digest
*/ */
u32 digest[4]; u32 digest[16] = { 0 };
digest[0] = tmps[gid].digest_buf[0]; digest[0] = tmps[gid].digest_buf[0];
digest[1] = tmps[gid].digest_buf[1]; digest[1] = tmps[gid].digest_buf[1];
digest[2] = tmps[gid].digest_buf[2]; digest[2] = tmps[gid].digest_buf[2];
digest[3] = tmps[gid].digest_buf[3]; digest[3] = tmps[gid].digest_buf[3];
u32 wpc_len[8];
wpc_len[0] = 16 + 0 + 0 + pw_len;
wpc_len[1] = pw_len + 0 + 0 + 16;
wpc_len[2] = 16 + salt_len + 0 + pw_len;
wpc_len[3] = pw_len + salt_len + 0 + 16;
wpc_len[4] = 16 + 0 + pw_len + pw_len;
wpc_len[5] = pw_len + 0 + pw_len + 16;
wpc_len[6] = 16 + salt_len + pw_len + pw_len;
wpc_len[7] = pw_len + salt_len + pw_len + 16;
// largest possible wpc_len[7] is not enough because of zero buffer loop
u32 wpc[8][64 + 64 + 64 + 64];
#ifdef _unroll
#pragma unroll
#endif
for (u32 i = 0; i < 8; i++)
{
u32 block_len = 0;
if (i & 1)
{
for (u32 j = 0; j < pw_len; j++)
{
PUTCHAR_LE (wpc[i], block_len++, GETCHAR_LE (w, j));
}
}
else
{
block_len += 16;
}
if (i & 2)
{
for (u32 j = 0; j < salt_len; j++)
{
PUTCHAR_LE (wpc[i], block_len++, GETCHAR_LE (s, j));
}
}
if (i & 4)
{
for (u32 j = 0; j < pw_len; j++)
{
PUTCHAR_LE (wpc[i], block_len++, GETCHAR_LE (w, j));
}
}
if (i & 1)
{
block_len += 16;
}
else
{
for (u32 j = 0; j < pw_len; j++)
{
PUTCHAR_LE (wpc[i], block_len++, GETCHAR_LE (w, j));
}
}
}
#ifdef _unroll
#pragma unroll
#endif
for (u32 i = 0; i < 8; i++)
{
u32 *z = wpc[i] + ((wpc_len[i] / 64) * 16);
truncate_block_16x4_le_S (z + 0, z + 4, z + 8, z + 12, wpc_len[i] & 63);
}
/** /**
* loop * loop
*/ */
for (u32 i = 0, j = loop_pos; i < loop_cnt; i++, j++) for (u32 i = 0, j = loop_pos; i < loop_cnt; i++, j++)
{ {
const u32 j1 = (j & 1) ? 1 : 0;
const u32 j3 = (j % 3) ? 2 : 0;
const u32 j7 = (j % 7) ? 4 : 0;
const u32 pc = j1 + j3 + j7;
if (j1)
{
const u32 off = wpc_len[pc] / 4;
const u32 mod = wpc_len[pc] % 4;
u32 *ptr = wpc[pc] + off - 4;
switch (mod)
{
case 0:
ptr[0] = digest[0];
ptr[1] = digest[1];
ptr[2] = digest[2];
ptr[3] = digest[3];
break;
case 1:
ptr[0] = (ptr[0] & 0xff) | (digest[0] << 8);
ptr[1] = (digest[0] >> 24) | (digest[1] << 8);
ptr[2] = (digest[1] >> 24) | (digest[2] << 8);
ptr[3] = (digest[2] >> 24) | (digest[3] << 8);
ptr[4] = (digest[3] >> 24);
break;
case 2:
ptr[0] = (ptr[0] & 0xffff) | (digest[0] << 16);
ptr[1] = (digest[0] >> 16) | (digest[1] << 16);
ptr[2] = (digest[1] >> 16) | (digest[2] << 16);
ptr[3] = (digest[2] >> 16) | (digest[3] << 16);
ptr[4] = (digest[3] >> 16);
break;
case 3:
ptr[0] = (ptr[0] & 0xffffff) | (digest[0] << 24);
ptr[1] = (digest[0] >> 8) | (digest[1] << 24);
ptr[2] = (digest[1] >> 8) | (digest[2] << 24);
ptr[3] = (digest[2] >> 8) | (digest[3] << 24);
ptr[4] = (digest[3] >> 8);
break;
}
}
else
{
wpc[pc][0] = digest[0];
wpc[pc][1] = digest[1];
wpc[pc][2] = digest[2];
wpc[pc][3] = digest[3];
}
md5_ctx_t md5_ctx; md5_ctx_t md5_ctx;
md5_init (&md5_ctx); md5_init (&md5_ctx);
md5_update (&md5_ctx, wpc[pc], wpc_len[pc]); if (j & 1)
{
md5_update (&md5_ctx, w, pw_len);
}
else
{
md5_update (&md5_ctx, digest, 16);
}
if (j % 3)
{
md5_update (&md5_ctx, s, salt_len);
}
if (j % 7)
{
md5_update (&md5_ctx, w, pw_len);
}
if (j & 1)
{
md5_update (&md5_ctx, digest, 16);
}
else
{
md5_update (&md5_ctx, w, pw_len);
}
md5_final (&md5_ctx); md5_final (&md5_ctx);