mirror of
https://github.com/hashcat/hashcat.git
synced 2025-07-30 18:39:15 +00:00

Updated kernel declarations from "KERNEL_FQ void HC_ATTR_SEQ" to "KERNEL_FQ KERNEL_FA void". Please update your custom plugin kernels accordingly. Added spilling size as a factor in calculating usable memory per device. This is based on undocumented variables and may not be 100% accurate, but it works well in practice. Added a compiler hint to scrypt-based kernels indicating the guaranteed maximum thread count per kernel invocation. Removed redundant kernel code 29800, as it is identical to 27700, and updated the plugin.
408 lines
8.5 KiB
Common Lisp
408 lines
8.5 KiB
Common Lisp
/**
|
|
* Author......: See docs/credits.txt
|
|
* License.....: MIT
|
|
*/
|
|
|
|
//shared mem too small
|
|
//#define NEW_SIMD_CODE
|
|
|
|
#ifdef KERNEL_STATIC
|
|
#include M2S(INCLUDE_PATH/inc_vendor.h)
|
|
#include M2S(INCLUDE_PATH/inc_types.h)
|
|
#include M2S(INCLUDE_PATH/inc_platform.cl)
|
|
#include M2S(INCLUDE_PATH/inc_common.cl)
|
|
#include M2S(INCLUDE_PATH/inc_hash_md4.cl)
|
|
#include M2S(INCLUDE_PATH/inc_hash_md5.cl)
|
|
#include M2S(INCLUDE_PATH/inc_cipher_rc4.cl)
|
|
#endif
|
|
|
|
typedef struct krb5tgs
|
|
{
|
|
u32 account_info[512];
|
|
u32 checksum[4];
|
|
u32 edata2[5120];
|
|
u32 edata2_len;
|
|
u32 format;
|
|
|
|
} krb5tgs_t;
|
|
|
|
DECLSPEC int decrypt_and_check (LOCAL_AS u32 *S, PRIVATE_AS u32 *data, GLOBAL_AS const u32 *edata2, const u32 edata2_len, PRIVATE_AS const u32 *K2, PRIVATE_AS const u32 *checksum, const u64 lid)
|
|
{
|
|
rc4_init_128 (S, data, lid);
|
|
|
|
u32 out0[4];
|
|
u32 out1[4];
|
|
|
|
u8 i = 0;
|
|
u8 j = 0;
|
|
|
|
/*
|
|
8 first bytes are nonce, then ASN1 structs (DER encoding: type-length-data)
|
|
|
|
if length >= 128 bytes:
|
|
length is on 2 bytes and type is \x63\x82 (encode_krb5_enc_tkt_part) and data is an ASN1 sequence \x30\x82
|
|
else:
|
|
length is on 1 byte and type is \x63\x81 and data is an ASN1 sequence \x30\x81
|
|
|
|
next headers follow the same ASN1 "type-length-data" scheme
|
|
*/
|
|
|
|
j = rc4_next_16_global (S, i, j, edata2 + 0, out0, lid); i += 16;
|
|
|
|
if (((out0[2] & 0xff00ffff) != 0x30008163) && ((out0[2] & 0x0000ffff) != 0x00008263)) return 0;
|
|
|
|
j = rc4_next_16_global (S, i, j, edata2 + 4, out1, lid); i += 16;
|
|
|
|
if (((out1[0] & 0x00ffffff) != 0x00000503) && (out1[0] != 0x050307A0)) return 0;
|
|
|
|
rc4_init_128 (S, data, lid);
|
|
|
|
i = 0;
|
|
j = 0;
|
|
|
|
// init hmac
|
|
|
|
u32 w0[4];
|
|
u32 w1[4];
|
|
u32 w2[4];
|
|
u32 w3[4];
|
|
|
|
w0[0] = K2[0];
|
|
w0[1] = K2[1];
|
|
w0[2] = K2[2];
|
|
w0[3] = K2[3];
|
|
w1[0] = 0;
|
|
w1[1] = 0;
|
|
w1[2] = 0;
|
|
w1[3] = 0;
|
|
w2[0] = 0;
|
|
w2[1] = 0;
|
|
w2[2] = 0;
|
|
w2[3] = 0;
|
|
w3[0] = 0;
|
|
w3[1] = 0;
|
|
w3[2] = 0;
|
|
w3[3] = 0;
|
|
|
|
md5_hmac_ctx_t ctx;
|
|
|
|
md5_hmac_init_64 (&ctx, w0, w1, w2, w3);
|
|
|
|
int edata2_left;
|
|
|
|
for (edata2_left = edata2_len; edata2_left >= 64; edata2_left -= 64)
|
|
{
|
|
j = rc4_next_16_global (S, i, j, edata2, w0, lid); i += 16; edata2 += 4;
|
|
j = rc4_next_16_global (S, i, j, edata2, w1, lid); i += 16; edata2 += 4;
|
|
j = rc4_next_16_global (S, i, j, edata2, w2, lid); i += 16; edata2 += 4;
|
|
j = rc4_next_16_global (S, i, j, edata2, w3, lid); i += 16; edata2 += 4;
|
|
|
|
md5_hmac_update_64 (&ctx, w0, w1, w2, w3, 64);
|
|
}
|
|
|
|
w0[0] = 0;
|
|
w0[1] = 0;
|
|
w0[2] = 0;
|
|
w0[3] = 0;
|
|
w1[0] = 0;
|
|
w1[1] = 0;
|
|
w1[2] = 0;
|
|
w1[3] = 0;
|
|
w2[0] = 0;
|
|
w2[1] = 0;
|
|
w2[2] = 0;
|
|
w2[3] = 0;
|
|
w3[0] = 0;
|
|
w3[1] = 0;
|
|
w3[2] = 0;
|
|
w3[3] = 0;
|
|
|
|
if (edata2_left < 16)
|
|
{
|
|
j = rc4_next_16_global (S, i, j, edata2, w0, lid); i += 16; edata2 += 4;
|
|
|
|
truncate_block_4x4_le_S (w0, edata2_left & 0xf);
|
|
}
|
|
else if (edata2_left < 32)
|
|
{
|
|
j = rc4_next_16_global (S, i, j, edata2, w0, lid); i += 16; edata2 += 4;
|
|
j = rc4_next_16_global (S, i, j, edata2, w1, lid); i += 16; edata2 += 4;
|
|
|
|
truncate_block_4x4_le_S (w1, edata2_left & 0xf);
|
|
}
|
|
else if (edata2_left < 48)
|
|
{
|
|
j = rc4_next_16_global (S, i, j, edata2, w0, lid); i += 16; edata2 += 4;
|
|
j = rc4_next_16_global (S, i, j, edata2, w1, lid); i += 16; edata2 += 4;
|
|
j = rc4_next_16_global (S, i, j, edata2, w2, lid); i += 16; edata2 += 4;
|
|
|
|
truncate_block_4x4_le_S (w2, edata2_left & 0xf);
|
|
}
|
|
else
|
|
{
|
|
j = rc4_next_16_global (S, i, j, edata2, w0, lid); i += 16; edata2 += 4;
|
|
j = rc4_next_16_global (S, i, j, edata2, w1, lid); i += 16; edata2 += 4;
|
|
j = rc4_next_16_global (S, i, j, edata2, w2, lid); i += 16; edata2 += 4;
|
|
j = rc4_next_16_global (S, i, j, edata2, w3, lid); i += 16; edata2 += 4;
|
|
|
|
truncate_block_4x4_le_S (w3, edata2_left & 0xf);
|
|
}
|
|
|
|
md5_hmac_update_64 (&ctx, w0, w1, w2, w3, edata2_left);
|
|
|
|
md5_hmac_final (&ctx);
|
|
|
|
if (checksum[0] != ctx.opad.h[0]) return 0;
|
|
if (checksum[1] != ctx.opad.h[1]) return 0;
|
|
if (checksum[2] != ctx.opad.h[2]) return 0;
|
|
if (checksum[3] != ctx.opad.h[3]) return 0;
|
|
|
|
return 1;
|
|
}
|
|
|
|
DECLSPEC void kerb_prepare (PRIVATE_AS const u32 *K, PRIVATE_AS const u32 *checksum, PRIVATE_AS u32 *digest, PRIVATE_AS u32 *K2)
|
|
{
|
|
// K1=MD5_HMAC(K,1); with 1 encoded as little indian on 4 bytes (01000000 in hexa);
|
|
|
|
u32 w0[4];
|
|
u32 w1[4];
|
|
u32 w2[4];
|
|
u32 w3[4];
|
|
|
|
w0[0] = K[0];
|
|
w0[1] = K[1];
|
|
w0[2] = K[2];
|
|
w0[3] = K[3];
|
|
w1[0] = 0;
|
|
w1[1] = 0;
|
|
w1[2] = 0;
|
|
w1[3] = 0;
|
|
w2[0] = 0;
|
|
w2[1] = 0;
|
|
w2[2] = 0;
|
|
w2[3] = 0;
|
|
w3[0] = 0;
|
|
w3[1] = 0;
|
|
w3[2] = 0;
|
|
w3[3] = 0;
|
|
|
|
md5_hmac_ctx_t ctx1;
|
|
|
|
md5_hmac_init_64 (&ctx1, w0, w1, w2, w3);
|
|
|
|
w0[0] = 2;
|
|
w0[1] = 0;
|
|
w0[2] = 0;
|
|
w0[3] = 0;
|
|
w1[0] = 0;
|
|
w1[1] = 0;
|
|
w1[2] = 0;
|
|
w1[3] = 0;
|
|
w2[0] = 0;
|
|
w2[1] = 0;
|
|
w2[2] = 0;
|
|
w2[3] = 0;
|
|
w3[0] = 0;
|
|
w3[1] = 0;
|
|
w3[2] = 0;
|
|
w3[3] = 0;
|
|
|
|
md5_hmac_update_64 (&ctx1, w0, w1, w2, w3, 4);
|
|
|
|
md5_hmac_final (&ctx1);
|
|
|
|
w0[0] = ctx1.opad.h[0];
|
|
w0[1] = ctx1.opad.h[1];
|
|
w0[2] = ctx1.opad.h[2];
|
|
w0[3] = ctx1.opad.h[3];
|
|
w1[0] = 0;
|
|
w1[1] = 0;
|
|
w1[2] = 0;
|
|
w1[3] = 0;
|
|
w2[0] = 0;
|
|
w2[1] = 0;
|
|
w2[2] = 0;
|
|
w2[3] = 0;
|
|
w3[0] = 0;
|
|
w3[1] = 0;
|
|
w3[2] = 0;
|
|
w3[3] = 0;
|
|
|
|
md5_hmac_ctx_t ctx;
|
|
|
|
md5_hmac_init_64 (&ctx, w0, w1, w2, w3);
|
|
|
|
w0[0] = checksum[0];
|
|
w0[1] = checksum[1];
|
|
w0[2] = checksum[2];
|
|
w0[3] = checksum[3];
|
|
w1[0] = 0;
|
|
w1[1] = 0;
|
|
w1[2] = 0;
|
|
w1[3] = 0;
|
|
w2[0] = 0;
|
|
w2[1] = 0;
|
|
w2[2] = 0;
|
|
w2[3] = 0;
|
|
w3[0] = 0;
|
|
w3[1] = 0;
|
|
w3[2] = 0;
|
|
w3[3] = 0;
|
|
|
|
md5_hmac_update_64 (&ctx, w0, w1, w2, w3, 16);
|
|
|
|
md5_hmac_final (&ctx);
|
|
|
|
digest[0] = ctx.opad.h[0];
|
|
digest[1] = ctx.opad.h[1];
|
|
digest[2] = ctx.opad.h[2];
|
|
digest[3] = ctx.opad.h[3];
|
|
|
|
K2[0] = ctx1.opad.h[0];
|
|
K2[1] = ctx1.opad.h[1];
|
|
K2[2] = ctx1.opad.h[2];
|
|
K2[3] = ctx1.opad.h[3];
|
|
}
|
|
|
|
KERNEL_FQ KERNEL_FA void m13100_mxx (KERN_ATTR_VECTOR_ESALT (krb5tgs_t))
|
|
{
|
|
/**
|
|
* modifier
|
|
*/
|
|
|
|
const u64 lid = get_local_id (0);
|
|
const u64 gid = get_global_id (0);
|
|
|
|
if (gid >= GID_CNT) return;
|
|
|
|
/**
|
|
* base
|
|
*/
|
|
|
|
const u32 pw_len = pws[gid].pw_len;
|
|
|
|
u32x w[64] = { 0 };
|
|
|
|
for (u32 i = 0, idx = 0; i < pw_len; i += 4, idx += 1)
|
|
{
|
|
w[idx] = pws[gid].i[idx];
|
|
}
|
|
|
|
LOCAL_VK u32 S[64 * FIXED_LOCAL_SIZE];
|
|
|
|
u32 checksum[4];
|
|
|
|
checksum[0] = esalt_bufs[DIGESTS_OFFSET_HOST].checksum[0];
|
|
checksum[1] = esalt_bufs[DIGESTS_OFFSET_HOST].checksum[1];
|
|
checksum[2] = esalt_bufs[DIGESTS_OFFSET_HOST].checksum[2];
|
|
checksum[3] = esalt_bufs[DIGESTS_OFFSET_HOST].checksum[3];
|
|
|
|
/**
|
|
* loop
|
|
*/
|
|
|
|
u32x w0l = w[0];
|
|
|
|
for (u32 il_pos = 0; il_pos < IL_CNT; il_pos += VECT_SIZE)
|
|
{
|
|
const u32x w0r = words_buf_r[il_pos / VECT_SIZE];
|
|
|
|
const u32x w0 = w0l | w0r;
|
|
|
|
w[0] = w0;
|
|
|
|
md4_ctx_t ctx;
|
|
|
|
md4_init (&ctx);
|
|
|
|
md4_update_utf16le (&ctx, w, pw_len);
|
|
|
|
md4_final (&ctx);
|
|
|
|
u32 digest[4];
|
|
|
|
u32 K2[4];
|
|
|
|
kerb_prepare (ctx.h, checksum, digest, K2);
|
|
|
|
if (decrypt_and_check (S, digest, esalt_bufs[DIGESTS_OFFSET_HOST].edata2, esalt_bufs[DIGESTS_OFFSET_HOST].edata2_len, K2, checksum, lid) == 1)
|
|
{
|
|
if (hc_atomic_inc (&hashes_shown[DIGESTS_OFFSET_HOST]) == 0)
|
|
{
|
|
mark_hash (plains_buf, d_return_buf, SALT_POS_HOST, DIGESTS_CNT, 0, DIGESTS_OFFSET_HOST + 0, gid, il_pos, 0, 0);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
KERNEL_FQ KERNEL_FA void m13100_sxx (KERN_ATTR_VECTOR_ESALT (krb5tgs_t))
|
|
{
|
|
/**
|
|
* modifier
|
|
*/
|
|
|
|
const u64 lid = get_local_id (0);
|
|
const u64 gid = get_global_id (0);
|
|
|
|
if (gid >= GID_CNT) return;
|
|
|
|
/**
|
|
* base
|
|
*/
|
|
|
|
const u32 pw_len = pws[gid].pw_len;
|
|
|
|
u32x w[64] = { 0 };
|
|
|
|
for (u32 i = 0, idx = 0; i < pw_len; i += 4, idx += 1)
|
|
{
|
|
w[idx] = pws[gid].i[idx];
|
|
}
|
|
|
|
LOCAL_VK u32 S[64 * FIXED_LOCAL_SIZE];
|
|
|
|
u32 checksum[4];
|
|
|
|
checksum[0] = esalt_bufs[DIGESTS_OFFSET_HOST].checksum[0];
|
|
checksum[1] = esalt_bufs[DIGESTS_OFFSET_HOST].checksum[1];
|
|
checksum[2] = esalt_bufs[DIGESTS_OFFSET_HOST].checksum[2];
|
|
checksum[3] = esalt_bufs[DIGESTS_OFFSET_HOST].checksum[3];
|
|
|
|
/**
|
|
* loop
|
|
*/
|
|
|
|
u32x w0l = w[0];
|
|
|
|
for (u32 il_pos = 0; il_pos < IL_CNT; il_pos += VECT_SIZE)
|
|
{
|
|
const u32x w0r = words_buf_r[il_pos / VECT_SIZE];
|
|
|
|
const u32x w0 = w0l | w0r;
|
|
|
|
w[0] = w0;
|
|
|
|
md4_ctx_t ctx;
|
|
|
|
md4_init (&ctx);
|
|
|
|
md4_update_utf16le (&ctx, w, pw_len);
|
|
|
|
md4_final (&ctx);
|
|
|
|
u32 digest[4];
|
|
|
|
u32 K2[4];
|
|
|
|
kerb_prepare (ctx.h, checksum, digest, K2);
|
|
|
|
if (decrypt_and_check (S, digest, esalt_bufs[DIGESTS_OFFSET_HOST].edata2, esalt_bufs[DIGESTS_OFFSET_HOST].edata2_len, K2, checksum, lid) == 1)
|
|
{
|
|
if (hc_atomic_inc (&hashes_shown[DIGESTS_OFFSET_HOST]) == 0)
|
|
{
|
|
mark_hash (plains_buf, d_return_buf, SALT_POS_HOST, DIGESTS_CNT, 0, DIGESTS_OFFSET_HOST + 0, gid, il_pos, 0, 0);
|
|
}
|
|
}
|
|
}
|
|
}
|