From 62bed3663884ebee3b0784b063320235b20d49f8 Mon Sep 17 00:00:00 2001 From: Fist0urs Date: Fri, 19 Feb 2016 23:12:46 +0100 Subject: [PATCH] Add verification of checksum for -m 13100 This avoid collisions by arc4'ing all data then hmac-md5 when valid ASN1 structures headers are found. Performance should not be impacted. --- OpenCL/m13100_a0.cl | 314 ++++++++++++++++++++++++++++++++++++++------ OpenCL/m13100_a1.cl | 259 ++++++++++++++++++++++++++++++------ OpenCL/m13100_a3.cl | 251 +++++++++++++++++++++++++++++------ src/shared.c | 11 +- tools/test.pl | 2 +- 5 files changed, 710 insertions(+), 127 deletions(-) diff --git a/OpenCL/m13100_a0.cl b/OpenCL/m13100_a0.cl index 050b316ad..8ea9db59f 100644 --- a/OpenCL/m13100_a0.cl +++ b/OpenCL/m13100_a0.cl @@ -1,7 +1,7 @@ /** * Authors......: Jens Steube - * Authors......: Fist0urs - + * Authors......: Fist0urs + * * License.....: MIT */ @@ -140,40 +140,6 @@ static u8 rc4_next_16 (__local RC4_KEY *rc4_key, u8 i, u8 j, __global u32 in[4], return j; } -static int decrypt_and_check (__local RC4_KEY *rc4_key, u32 data[4], __global u32* edata2) -{ - rc4_init_16 (rc4_key, data); - - u32 out[8]; - - u8 j = 0; - - /* - 8 first bytes are nonce, then ASN1 structs (DER encoding: type-length-data) - - if length >= 128 bytes: - length is on 2 bytes and type is \x63\x82 (encode_krb5_enc_tkt_part) and data is an ASN1 sequence \x30\x82 - else: - length is on 1 byte and type is \x63\x81 and data is an ASN1 sequence \x30\x81 - - next headers follow the same ASN1 "type-length-data" scheme - */ - - j = rc4_next_16 (rc4_key, 0, 0, edata2, out); - - if (((out[2] & 0xff00ffff) != 0x30008163) && ((out[2] & 0x0000ffff) != 0x00008263)) return 0; - - j = rc4_next_16 (rc4_key, 16, j, edata2 + 4, out + 4); - - if (((out[4] & 0x00ffffff) != 0x00000503) && (out[4] != 0x050307A0)) return 0; - - // TODO (or not): add RC4'ing of all edata2 then hmac-md5 and compare with - // checksum to be definitely sure that this is the correct pass (even if - // collisions must be very rare) - - return 1; -} - static void md4_transform (const u32 w0[4], const u32 w1[4], const u32 w2[4], const u32 w3[4], u32 digest[4]) { u32 a = digest[0]; @@ -421,8 +387,207 @@ static void hmac_md5_run (u32 w0[4], u32 w1[4], u32 w2[4], u32 w3[4], u32 ipad[4 md5_transform (w0, w1, w2, w3, digest); } +static int decrypt_and_check (__local RC4_KEY *rc4_key, u32 data[4], __global u32 *edata2, const u32 edata2_len, const u32 K2[4], const u32 checksum[4]) +{ + rc4_init_16 (rc4_key, data); -static void kerb_prepare (const u32 w0[4], const u32 w1[4], const u32 pw_len, const u32 checksum[4], u32 digest[4]) + u32 out0[4]; + u32 out1[4]; + + u8 i = 0; + u8 j = 0; + + /* + 8 first bytes are nonce, then ASN1 structs (DER encoding: type-length-data) + + if length >= 128 bytes: + length is on 2 bytes and type is \x63\x82 (encode_krb5_enc_tkt_part) and data is an ASN1 sequence \x30\x82 + else: + length is on 1 byte and type is \x63\x81 and data is an ASN1 sequence \x30\x81 + + next headers follow the same ASN1 "type-length-data" scheme + */ + + j = rc4_next_16 (rc4_key, i, j, edata2 + 0, out0); i += 16; + + if (((out0[2] & 0xff00ffff) != 0x30008163) && ((out0[2] & 0x0000ffff) != 0x00008263)) return 0; + + j = rc4_next_16 (rc4_key, i, j, edata2 + 4, out1); i += 16; + + if (((out1[0] & 0x00ffffff) != 0x00000503) && (out1[0] != 0x050307A0)) return 0; + + rc4_init_16 (rc4_key, data); + + i = 0; + j = 0; + + // init hmac + + u32 w0[4]; + u32 w1[4]; + u32 w2[4]; + u32 w3[4]; + + w0[0] = K2[0]; + w0[1] = K2[1]; + w0[2] = K2[2]; + w0[3] = K2[3]; + w1[0] = 0; + w1[1] = 0; + w1[2] = 0; + w1[3] = 0; + w2[0] = 0; + w2[1] = 0; + w2[2] = 0; + w2[3] = 0; + w3[0] = 0; + w3[1] = 0; + w3[2] = 0; + w3[3] = 0; + + u32 ipad[4]; + u32 opad[4]; + + hmac_md5_pad (w0, w1, w2, w3, ipad, opad); + + int edata2_left; + + for (edata2_left = edata2_len; edata2_left >= 64; edata2_left -= 64) + { + j = rc4_next_16 (rc4_key, i, j, edata2, w0); i += 16; edata2 += 4; + j = rc4_next_16 (rc4_key, i, j, edata2, w1); i += 16; edata2 += 4; + j = rc4_next_16 (rc4_key, i, j, edata2, w2); i += 16; edata2 += 4; + j = rc4_next_16 (rc4_key, i, j, edata2, w3); i += 16; edata2 += 4; + + md5_transform (w0, w1, w2, w3, ipad); + } + + w0[0] = 0; + w0[1] = 0; + w0[2] = 0; + w0[3] = 0; + w1[0] = 0; + w1[1] = 0; + w1[2] = 0; + w1[3] = 0; + w2[0] = 0; + w2[1] = 0; + w2[2] = 0; + w2[3] = 0; + w3[0] = 0; + w3[1] = 0; + w3[2] = 0; + w3[3] = 0; + + if (edata2_left < 16) + { + j = rc4_next_16 (rc4_key, i, j, edata2, w0); i += 16; edata2 += 4; + + truncate_block (w0, edata2_left & 0xf); + append_0x80_1x4 (w0, edata2_left & 0xf); + + w3[2] = (64 + edata2_len) * 8; + w3[3] = 0; + + md5_transform (w0, w1, w2, w3, ipad); + } + else if (edata2_left < 32) + { + j = rc4_next_16 (rc4_key, i, j, edata2, w0); i += 16; edata2 += 4; + j = rc4_next_16 (rc4_key, i, j, edata2, w1); i += 16; edata2 += 4; + + truncate_block (w1, edata2_left & 0xf); + append_0x80_1x4 (w1, edata2_left & 0xf); + + w3[2] = (64 + edata2_len) * 8; + w3[3] = 0; + + md5_transform (w0, w1, w2, w3, ipad); + } + else if (edata2_left < 48) + { + j = rc4_next_16 (rc4_key, i, j, edata2, w0); i += 16; edata2 += 4; + j = rc4_next_16 (rc4_key, i, j, edata2, w1); i += 16; edata2 += 4; + j = rc4_next_16 (rc4_key, i, j, edata2, w2); i += 16; edata2 += 4; + + truncate_block (w2, edata2_left & 0xf); + append_0x80_1x4 (w2, edata2_left & 0xf); + + w3[2] = (64 + edata2_len) * 8; + w3[3] = 0; + + md5_transform (w0, w1, w2, w3, ipad); + } + else + { + j = rc4_next_16 (rc4_key, i, j, edata2, w0); i += 16; edata2 += 4; + j = rc4_next_16 (rc4_key, i, j, edata2, w1); i += 16; edata2 += 4; + j = rc4_next_16 (rc4_key, i, j, edata2, w2); i += 16; edata2 += 4; + j = rc4_next_16 (rc4_key, i, j, edata2, w3); i += 16; edata2 += 4; + + truncate_block (w3, edata2_left & 0xf); + append_0x80_1x4 (w3, edata2_left & 0xf); + + if (edata2_left < 56) + { + w3[2] = (64 + edata2_len) * 8; + w3[3] = 0; + + md5_transform (w0, w1, w2, w3, ipad); + } + else + { + md5_transform (w0, w1, w2, w3, ipad); + + w0[0] = 0; + w0[1] = 0; + w0[2] = 0; + w0[3] = 0; + w1[0] = 0; + w1[1] = 0; + w1[2] = 0; + w1[3] = 0; + w2[0] = 0; + w2[1] = 0; + w2[2] = 0; + w2[3] = 0; + w3[0] = 0; + w3[1] = 0; + w3[2] = (64 + edata2_len) * 8; + w3[3] = 0; + + md5_transform (w0, w1, w2, w3, ipad); + } + } + + w0[0] = ipad[0]; + w0[1] = ipad[1]; + w0[2] = ipad[2]; + w0[3] = ipad[3]; + w1[0] = 0x80; + w1[1] = 0; + w1[2] = 0; + w1[3] = 0; + w2[0] = 0; + w2[1] = 0; + w2[2] = 0; + w2[3] = 0; + w3[0] = 0; + w3[1] = 0; + w3[2] = (64 + 16) * 8; + w3[3] = 0; + + md5_transform (w0, w1, w2, w3, opad); + + if (checksum[0] != opad[0]) return 0; + if (checksum[1] != opad[1]) return 0; + if (checksum[2] != opad[2]) return 0; + if (checksum[3] != opad[3]) return 0; + + return 1; +} + +static void kerb_prepare (const u32 w0[4], const u32 w1[4], const u32 pw_len, const u32 checksum[4], u32 digest[4], u32 K2[4]) { /** * pads @@ -510,7 +675,14 @@ static void kerb_prepare (const u32 w0[4], const u32 w1[4], const u32 pw_len, co hmac_md5_run (w0_t, w1_t, w2_t, w3_t, ipad, opad, digest); - // K3=MD5_HMAC(K1,checksum); + // K2 = K1; + + K2[0] = digest[0]; + K2[1] = digest[1]; + K2[2] = digest[2]; + K2[3] = digest[3]; + + // K3=MD5_HMAC(K1,checksum); w0_t[0] = digest[0]; w0_t[1] = digest[1]; @@ -551,6 +723,60 @@ static void kerb_prepare (const u32 w0[4], const u32 w1[4], const u32 pw_len, co hmac_md5_run (w0_t, w1_t, w2_t, w3_t, ipad, opad, digest); } +static void m13100 (__local RC4_KEY *rc4_keys, u32 w0[4], u32 w1[4], u32 w2[4], u32 w3[4], const u32 pw_len, __global pw_t *pws, __global kernel_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global krb5tgs_t *krb5tgs_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 bfs_cnt, const u32 digests_cnt, const u32 digests_offset) +{ + /** + * modifier + */ + + const u32 gid = get_global_id (0); + const u32 lid = get_local_id (0); + + /** + * salt + */ + + u32 checksum[4]; + + checksum[0] = krb5tgs_bufs[salt_pos].checksum[0]; + checksum[1] = krb5tgs_bufs[salt_pos].checksum[1]; + checksum[2] = krb5tgs_bufs[salt_pos].checksum[2]; + checksum[3] = krb5tgs_bufs[salt_pos].checksum[3]; + + /** + * loop + */ + + u32 w0l = w0[0]; + + for (u32 il_pos = 0; il_pos < bfs_cnt; il_pos++) + { + const u32 w0r = bfs_buf[il_pos].i; + + w0[0] = w0l | w0r; + + u32 digest[4]; + + u32 K2[4]; + + kerb_prepare (w0, w1, pw_len, checksum, digest, K2); + + u32 tmp[4]; + + tmp[0] = digest[0]; + tmp[1] = digest[1]; + tmp[2] = digest[2]; + tmp[3] = digest[3]; + + if (decrypt_and_check (&rc4_keys[lid], tmp, krb5tgs_bufs[salt_pos].edata2, krb5tgs_bufs[salt_pos].edata2_len, K2, checksum) == 1) + { + mark_hash (plains_buf, hashes_shown, digests_offset, gid, il_pos); + + d_return_buf[lid] = 1; + } + } +} + __kernel void m13100_m04 (__global pw_t *pws, __global kernel_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global krb5tgs_t *krb5tgs_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 rules_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max) { /** @@ -637,7 +863,9 @@ __kernel void m13100_m04 (__global pw_t *pws, __global kernel_rule_t *rules_buf, u32 digest[4]; - kerb_prepare (w0, w1, out_len, checksum, digest); + u32 K2[4]; + + kerb_prepare (w0, w1, pw_len, checksum, digest, K2); u32 tmp[4]; @@ -646,7 +874,7 @@ __kernel void m13100_m04 (__global pw_t *pws, __global kernel_rule_t *rules_buf, tmp[2] = digest[2]; tmp[3] = digest[3]; - if (decrypt_and_check (&rc4_keys[lid], tmp, krb5tgs_bufs[salt_pos].edata2) == 1) + if (decrypt_and_check (&rc4_keys[lid], tmp, krb5tgs_bufs[salt_pos].edata2, krb5tgs_bufs[salt_pos].edata2_len, K2, checksum) == 1) { mark_hash (plains_buf, hashes_shown, digests_offset, gid, il_pos); @@ -749,7 +977,9 @@ __kernel void m13100_s04 (__global pw_t *pws, __global kernel_rule_t *rules_buf, u32 digest[4]; - kerb_prepare (w0, w1, out_len, checksum, digest); + u32 K2[4]; + + kerb_prepare (w0, w1, pw_len, checksum, digest, K2); u32 tmp[4]; @@ -758,7 +988,7 @@ __kernel void m13100_s04 (__global pw_t *pws, __global kernel_rule_t *rules_buf, tmp[2] = digest[2]; tmp[3] = digest[3]; - if (decrypt_and_check (&rc4_keys[lid], tmp, krb5tgs_bufs[salt_pos].edata2) == 1) + if (decrypt_and_check (&rc4_keys[lid], tmp, krb5tgs_bufs[salt_pos].edata2, krb5tgs_bufs[salt_pos].edata2_len, K2, checksum) == 1) { mark_hash (plains_buf, hashes_shown, digests_offset, gid, il_pos); diff --git a/OpenCL/m13100_a1.cl b/OpenCL/m13100_a1.cl index 86293b310..858ea01e4 100644 --- a/OpenCL/m13100_a1.cl +++ b/OpenCL/m13100_a1.cl @@ -1,7 +1,7 @@ /** * Authors......: Jens Steube - * Authors......: Fist0urs - + * Authors......: Fist0urs + * * License.....: MIT */ @@ -138,40 +138,6 @@ static u8 rc4_next_16 (__local RC4_KEY *rc4_key, u8 i, u8 j, __global u32 in[4], return j; } -static int decrypt_and_check (__local RC4_KEY *rc4_key, u32 data[4], __global u32* edata2) -{ - rc4_init_16 (rc4_key, data); - - u32 out[8]; - - u8 j = 0; - - /* - 8 first bytes are nonce, then ASN1 structs (DER encoding: type-length-data) - - if length >= 128 bytes: - length is on 2 bytes and type is \x63\x82 (encode_krb5_enc_tkt_part) and data is an ASN1 sequence \x30\x82 - else: - length is on 1 byte and type is \x63\x81 and data is an ASN1 sequence \x30\x81 - - next headers follow the same ASN1 "type-length-data" scheme - */ - - j = rc4_next_16 (rc4_key, 0, 0, edata2, out); - - if (((out[2] & 0xff00ffff) != 0x30008163) && ((out[2] & 0x0000ffff) != 0x00008263)) return 0; - - j = rc4_next_16 (rc4_key, 16, j, edata2 + 4, out + 4); - - if (((out[4] & 0x00ffffff) != 0x00000503) && (out[4] != 0x050307A0)) return 0; - - // TODO (or not): add RC4'ing of all edata2 then hmac-md5 and compare with - // checksum to be definitely sure that this is the correct pass (even if - // collisions must be very rare) - - return 1; -} - static void md4_transform (const u32 w0[4], const u32 w1[4], const u32 w2[4], const u32 w3[4], u32 digest[4]) { u32 a = digest[0]; @@ -419,7 +385,207 @@ static void hmac_md5_run (u32 w0[4], u32 w1[4], u32 w2[4], u32 w3[4], u32 ipad[4 md5_transform (w0, w1, w2, w3, digest); } -static void kerb_prepare (const u32 w0[4], const u32 w1[4], const u32 pw_len, const u32 checksum[4], u32 digest[4]) +static int decrypt_and_check (__local RC4_KEY *rc4_key, u32 data[4], __global u32 *edata2, const u32 edata2_len, const u32 K2[4], const u32 checksum[4]) +{ + rc4_init_16 (rc4_key, data); + + u32 out0[4]; + u32 out1[4]; + + u8 i = 0; + u8 j = 0; + + /* + 8 first bytes are nonce, then ASN1 structs (DER encoding: type-length-data) + + if length >= 128 bytes: + length is on 2 bytes and type is \x63\x82 (encode_krb5_enc_tkt_part) and data is an ASN1 sequence \x30\x82 + else: + length is on 1 byte and type is \x63\x81 and data is an ASN1 sequence \x30\x81 + + next headers follow the same ASN1 "type-length-data" scheme + */ + + j = rc4_next_16 (rc4_key, i, j, edata2 + 0, out0); i += 16; + + if (((out0[2] & 0xff00ffff) != 0x30008163) && ((out0[2] & 0x0000ffff) != 0x00008263)) return 0; + + j = rc4_next_16 (rc4_key, i, j, edata2 + 4, out1); i += 16; + + if (((out1[0] & 0x00ffffff) != 0x00000503) && (out1[0] != 0x050307A0)) return 0; + + rc4_init_16 (rc4_key, data); + + i = 0; + j = 0; + + // init hmac + + u32 w0[4]; + u32 w1[4]; + u32 w2[4]; + u32 w3[4]; + + w0[0] = K2[0]; + w0[1] = K2[1]; + w0[2] = K2[2]; + w0[3] = K2[3]; + w1[0] = 0; + w1[1] = 0; + w1[2] = 0; + w1[3] = 0; + w2[0] = 0; + w2[1] = 0; + w2[2] = 0; + w2[3] = 0; + w3[0] = 0; + w3[1] = 0; + w3[2] = 0; + w3[3] = 0; + + u32 ipad[4]; + u32 opad[4]; + + hmac_md5_pad (w0, w1, w2, w3, ipad, opad); + + int edata2_left; + + for (edata2_left = edata2_len; edata2_left >= 64; edata2_left -= 64) + { + j = rc4_next_16 (rc4_key, i, j, edata2, w0); i += 16; edata2 += 4; + j = rc4_next_16 (rc4_key, i, j, edata2, w1); i += 16; edata2 += 4; + j = rc4_next_16 (rc4_key, i, j, edata2, w2); i += 16; edata2 += 4; + j = rc4_next_16 (rc4_key, i, j, edata2, w3); i += 16; edata2 += 4; + + md5_transform (w0, w1, w2, w3, ipad); + } + + w0[0] = 0; + w0[1] = 0; + w0[2] = 0; + w0[3] = 0; + w1[0] = 0; + w1[1] = 0; + w1[2] = 0; + w1[3] = 0; + w2[0] = 0; + w2[1] = 0; + w2[2] = 0; + w2[3] = 0; + w3[0] = 0; + w3[1] = 0; + w3[2] = 0; + w3[3] = 0; + + if (edata2_left < 16) + { + j = rc4_next_16 (rc4_key, i, j, edata2, w0); i += 16; edata2 += 4; + + truncate_block (w0, edata2_left & 0xf); + append_0x80_1x4 (w0, edata2_left & 0xf); + + w3[2] = (64 + edata2_len) * 8; + w3[3] = 0; + + md5_transform (w0, w1, w2, w3, ipad); + } + else if (edata2_left < 32) + { + j = rc4_next_16 (rc4_key, i, j, edata2, w0); i += 16; edata2 += 4; + j = rc4_next_16 (rc4_key, i, j, edata2, w1); i += 16; edata2 += 4; + + truncate_block (w1, edata2_left & 0xf); + append_0x80_1x4 (w1, edata2_left & 0xf); + + w3[2] = (64 + edata2_len) * 8; + w3[3] = 0; + + md5_transform (w0, w1, w2, w3, ipad); + } + else if (edata2_left < 48) + { + j = rc4_next_16 (rc4_key, i, j, edata2, w0); i += 16; edata2 += 4; + j = rc4_next_16 (rc4_key, i, j, edata2, w1); i += 16; edata2 += 4; + j = rc4_next_16 (rc4_key, i, j, edata2, w2); i += 16; edata2 += 4; + + truncate_block (w2, edata2_left & 0xf); + append_0x80_1x4 (w2, edata2_left & 0xf); + + w3[2] = (64 + edata2_len) * 8; + w3[3] = 0; + + md5_transform (w0, w1, w2, w3, ipad); + } + else + { + j = rc4_next_16 (rc4_key, i, j, edata2, w0); i += 16; edata2 += 4; + j = rc4_next_16 (rc4_key, i, j, edata2, w1); i += 16; edata2 += 4; + j = rc4_next_16 (rc4_key, i, j, edata2, w2); i += 16; edata2 += 4; + j = rc4_next_16 (rc4_key, i, j, edata2, w3); i += 16; edata2 += 4; + + truncate_block (w3, edata2_left & 0xf); + append_0x80_1x4 (w3, edata2_left & 0xf); + + if (edata2_left < 56) + { + w3[2] = (64 + edata2_len) * 8; + w3[3] = 0; + + md5_transform (w0, w1, w2, w3, ipad); + } + else + { + md5_transform (w0, w1, w2, w3, ipad); + + w0[0] = 0; + w0[1] = 0; + w0[2] = 0; + w0[3] = 0; + w1[0] = 0; + w1[1] = 0; + w1[2] = 0; + w1[3] = 0; + w2[0] = 0; + w2[1] = 0; + w2[2] = 0; + w2[3] = 0; + w3[0] = 0; + w3[1] = 0; + w3[2] = (64 + edata2_len) * 8; + w3[3] = 0; + + md5_transform (w0, w1, w2, w3, ipad); + } + } + + w0[0] = ipad[0]; + w0[1] = ipad[1]; + w0[2] = ipad[2]; + w0[3] = ipad[3]; + w1[0] = 0x80; + w1[1] = 0; + w1[2] = 0; + w1[3] = 0; + w2[0] = 0; + w2[1] = 0; + w2[2] = 0; + w2[3] = 0; + w3[0] = 0; + w3[1] = 0; + w3[2] = (64 + 16) * 8; + w3[3] = 0; + + md5_transform (w0, w1, w2, w3, opad); + + if (checksum[0] != opad[0]) return 0; + if (checksum[1] != opad[1]) return 0; + if (checksum[2] != opad[2]) return 0; + if (checksum[3] != opad[3]) return 0; + + return 1; +} + +static void kerb_prepare (const u32 w0[4], const u32 w1[4], const u32 pw_len, const u32 checksum[4], u32 digest[4], u32 K2[4]) { /** * pads @@ -507,6 +673,13 @@ static void kerb_prepare (const u32 w0[4], const u32 w1[4], const u32 pw_len, co hmac_md5_run (w0_t, w1_t, w2_t, w3_t, ipad, opad, digest); + // K2 = K1; + + K2[0] = digest[0]; + K2[1] = digest[1]; + K2[2] = digest[2]; + K2[3] = digest[3]; + // K3=MD5_HMAC(K1,checksum); w0_t[0] = digest[0]; @@ -688,7 +861,9 @@ __kernel void m13100_m04 (__global pw_t *pws, __global kernel_rule_t *rules_buf, u32 digest[4]; - kerb_prepare (w0, w1, pw_len, checksum, digest); + u32 K2[4]; + + kerb_prepare (w0, w1, pw_len, checksum, digest, K2); u32 tmp[4]; @@ -697,7 +872,7 @@ __kernel void m13100_m04 (__global pw_t *pws, __global kernel_rule_t *rules_buf, tmp[2] = digest[2]; tmp[3] = digest[3]; - if (decrypt_and_check (&rc4_keys[lid], tmp, krb5tgs_bufs[salt_pos].edata2) == 1) + if (decrypt_and_check (&rc4_keys[lid], tmp, krb5tgs_bufs[salt_pos].edata2, krb5tgs_bufs[salt_pos].edata2_len, K2, checksum) == 1) { mark_hash (plains_buf, hashes_shown, digests_offset, gid, il_pos); @@ -854,7 +1029,9 @@ __kernel void m13100_s04 (__global pw_t *pws, __global kernel_rule_t *rules_buf, u32 digest[4]; - kerb_prepare (w0, w1, pw_len, checksum, digest); + u32 K2[4]; + + kerb_prepare (w0, w1, pw_len, checksum, digest, K2); u32 tmp[4]; @@ -863,7 +1040,7 @@ __kernel void m13100_s04 (__global pw_t *pws, __global kernel_rule_t *rules_buf, tmp[2] = digest[2]; tmp[3] = digest[3]; - if (decrypt_and_check (&rc4_keys[lid], tmp, krb5tgs_bufs[salt_pos].edata2) == 1) + if (decrypt_and_check (&rc4_keys[lid], tmp, krb5tgs_bufs[salt_pos].edata2, krb5tgs_bufs[salt_pos].edata2_len, K2, checksum) == 1) { mark_hash (plains_buf, hashes_shown, digests_offset, gid, il_pos); diff --git a/OpenCL/m13100_a3.cl b/OpenCL/m13100_a3.cl index 159631dc7..495cb9e7c 100644 --- a/OpenCL/m13100_a3.cl +++ b/OpenCL/m13100_a3.cl @@ -1,6 +1,6 @@ /** * Authors......: Jens Steube - * Authors......: Fist0urs + * Authors......: Fist0urs * * License.....: MIT */ @@ -138,40 +138,6 @@ static u8 rc4_next_16 (__local RC4_KEY *rc4_key, u8 i, u8 j, __global u32 in[4], return j; } -static int decrypt_and_check (__local RC4_KEY *rc4_key, u32 data[4], __global u32* edata2) -{ - rc4_init_16 (rc4_key, data); - - u32 out[8]; - - u8 j = 0; - - /* - 8 first bytes are nonce, then ASN1 structs (DER encoding: type-length-data) - - if length >= 128 bytes: - length is on 2 bytes and type is \x63\x82 (encode_krb5_enc_tkt_part) and data is an ASN1 sequence \x30\x82 - else: - length is on 1 byte and type is \x63\x81 and data is an ASN1 sequence \x30\x81 - - next headers follow the same ASN1 "type-length-data" scheme - */ - - j = rc4_next_16 (rc4_key, 0, 0, edata2, out); - - if (((out[2] & 0xff00ffff) != 0x30008163) && ((out[2] & 0x0000ffff) != 0x00008263)) return 0; - - j = rc4_next_16 (rc4_key, 16, j, edata2 + 4, out + 4); - - if (((out[4] & 0x00ffffff) != 0x00000503) && (out[4] != 0x050307A0)) return 0; - - // TODO (or not): add RC4'ing of all edata2 then hmac-md5 and compare with - // checksum to be definitely sure that this is the correct pass (even if - // collisions must be very rare) - - return 1; -} - static void md4_transform (const u32 w0[4], const u32 w1[4], const u32 w2[4], const u32 w3[4], u32 digest[4]) { u32 a = digest[0]; @@ -419,7 +385,207 @@ static void hmac_md5_run (u32 w0[4], u32 w1[4], u32 w2[4], u32 w3[4], u32 ipad[4 md5_transform (w0, w1, w2, w3, digest); } -static void kerb_prepare (const u32 w0[4], const u32 w1[4], const u32 pw_len, const u32 checksum[4], u32 digest[4]) +static int decrypt_and_check (__local RC4_KEY *rc4_key, u32 data[4], __global u32 *edata2, const u32 edata2_len, const u32 K2[4], const u32 checksum[4]) +{ + rc4_init_16 (rc4_key, data); + + u32 out0[4]; + u32 out1[4]; + + u8 i = 0; + u8 j = 0; + + /* + 8 first bytes are nonce, then ASN1 structs (DER encoding: type-length-data) + + if length >= 128 bytes: + length is on 2 bytes and type is \x63\x82 (encode_krb5_enc_tkt_part) and data is an ASN1 sequence \x30\x82 + else: + length is on 1 byte and type is \x63\x81 and data is an ASN1 sequence \x30\x81 + + next headers follow the same ASN1 "type-length-data" scheme + */ + + j = rc4_next_16 (rc4_key, i, j, edata2 + 0, out0); i += 16; + + if (((out0[2] & 0xff00ffff) != 0x30008163) && ((out0[2] & 0x0000ffff) != 0x00008263)) return 0; + + j = rc4_next_16 (rc4_key, i, j, edata2 + 4, out1); i += 16; + + if (((out1[0] & 0x00ffffff) != 0x00000503) && (out1[0] != 0x050307A0)) return 0; + + rc4_init_16 (rc4_key, data); + + i = 0; + j = 0; + + // init hmac + + u32 w0[4]; + u32 w1[4]; + u32 w2[4]; + u32 w3[4]; + + w0[0] = K2[0]; + w0[1] = K2[1]; + w0[2] = K2[2]; + w0[3] = K2[3]; + w1[0] = 0; + w1[1] = 0; + w1[2] = 0; + w1[3] = 0; + w2[0] = 0; + w2[1] = 0; + w2[2] = 0; + w2[3] = 0; + w3[0] = 0; + w3[1] = 0; + w3[2] = 0; + w3[3] = 0; + + u32 ipad[4]; + u32 opad[4]; + + hmac_md5_pad (w0, w1, w2, w3, ipad, opad); + + int edata2_left; + + for (edata2_left = edata2_len; edata2_left >= 64; edata2_left -= 64) + { + j = rc4_next_16 (rc4_key, i, j, edata2, w0); i += 16; edata2 += 4; + j = rc4_next_16 (rc4_key, i, j, edata2, w1); i += 16; edata2 += 4; + j = rc4_next_16 (rc4_key, i, j, edata2, w2); i += 16; edata2 += 4; + j = rc4_next_16 (rc4_key, i, j, edata2, w3); i += 16; edata2 += 4; + + md5_transform (w0, w1, w2, w3, ipad); + } + + w0[0] = 0; + w0[1] = 0; + w0[2] = 0; + w0[3] = 0; + w1[0] = 0; + w1[1] = 0; + w1[2] = 0; + w1[3] = 0; + w2[0] = 0; + w2[1] = 0; + w2[2] = 0; + w2[3] = 0; + w3[0] = 0; + w3[1] = 0; + w3[2] = 0; + w3[3] = 0; + + if (edata2_left < 16) + { + j = rc4_next_16 (rc4_key, i, j, edata2, w0); i += 16; edata2 += 4; + + truncate_block (w0, edata2_left & 0xf); + append_0x80_1x4 (w0, edata2_left & 0xf); + + w3[2] = (64 + edata2_len) * 8; + w3[3] = 0; + + md5_transform (w0, w1, w2, w3, ipad); + } + else if (edata2_left < 32) + { + j = rc4_next_16 (rc4_key, i, j, edata2, w0); i += 16; edata2 += 4; + j = rc4_next_16 (rc4_key, i, j, edata2, w1); i += 16; edata2 += 4; + + truncate_block (w1, edata2_left & 0xf); + append_0x80_1x4 (w1, edata2_left & 0xf); + + w3[2] = (64 + edata2_len) * 8; + w3[3] = 0; + + md5_transform (w0, w1, w2, w3, ipad); + } + else if (edata2_left < 48) + { + j = rc4_next_16 (rc4_key, i, j, edata2, w0); i += 16; edata2 += 4; + j = rc4_next_16 (rc4_key, i, j, edata2, w1); i += 16; edata2 += 4; + j = rc4_next_16 (rc4_key, i, j, edata2, w2); i += 16; edata2 += 4; + + truncate_block (w2, edata2_left & 0xf); + append_0x80_1x4 (w2, edata2_left & 0xf); + + w3[2] = (64 + edata2_len) * 8; + w3[3] = 0; + + md5_transform (w0, w1, w2, w3, ipad); + } + else + { + j = rc4_next_16 (rc4_key, i, j, edata2, w0); i += 16; edata2 += 4; + j = rc4_next_16 (rc4_key, i, j, edata2, w1); i += 16; edata2 += 4; + j = rc4_next_16 (rc4_key, i, j, edata2, w2); i += 16; edata2 += 4; + j = rc4_next_16 (rc4_key, i, j, edata2, w3); i += 16; edata2 += 4; + + truncate_block (w3, edata2_left & 0xf); + append_0x80_1x4 (w3, edata2_left & 0xf); + + if (edata2_left < 56) + { + w3[2] = (64 + edata2_len) * 8; + w3[3] = 0; + + md5_transform (w0, w1, w2, w3, ipad); + } + else + { + md5_transform (w0, w1, w2, w3, ipad); + + w0[0] = 0; + w0[1] = 0; + w0[2] = 0; + w0[3] = 0; + w1[0] = 0; + w1[1] = 0; + w1[2] = 0; + w1[3] = 0; + w2[0] = 0; + w2[1] = 0; + w2[2] = 0; + w2[3] = 0; + w3[0] = 0; + w3[1] = 0; + w3[2] = (64 + edata2_len) * 8; + w3[3] = 0; + + md5_transform (w0, w1, w2, w3, ipad); + } + } + + w0[0] = ipad[0]; + w0[1] = ipad[1]; + w0[2] = ipad[2]; + w0[3] = ipad[3]; + w1[0] = 0x80; + w1[1] = 0; + w1[2] = 0; + w1[3] = 0; + w2[0] = 0; + w2[1] = 0; + w2[2] = 0; + w2[3] = 0; + w3[0] = 0; + w3[1] = 0; + w3[2] = (64 + 16) * 8; + w3[3] = 0; + + md5_transform (w0, w1, w2, w3, opad); + + if (checksum[0] != opad[0]) return 0; + if (checksum[1] != opad[1]) return 0; + if (checksum[2] != opad[2]) return 0; + if (checksum[3] != opad[3]) return 0; + + return 1; +} + +static void kerb_prepare (const u32 w0[4], const u32 w1[4], const u32 pw_len, const u32 checksum[4], u32 digest[4], u32 K2[4]) { /** * pads @@ -507,6 +673,13 @@ static void kerb_prepare (const u32 w0[4], const u32 w1[4], const u32 pw_len, co hmac_md5_run (w0_t, w1_t, w2_t, w3_t, ipad, opad, digest); + // K2 = K1; + + K2[0] = digest[0]; + K2[1] = digest[1]; + K2[2] = digest[2]; + K2[3] = digest[3]; + // K3=MD5_HMAC(K1,checksum); w0_t[0] = digest[0]; @@ -582,7 +755,9 @@ static void m13100 (__local RC4_KEY *rc4_keys, u32 w0[4], u32 w1[4], u32 w2[4], u32 digest[4]; - kerb_prepare (w0, w1, pw_len, checksum, digest); + u32 K2[4]; + + kerb_prepare (w0, w1, pw_len, checksum, digest, K2); u32 tmp[4]; @@ -591,7 +766,7 @@ static void m13100 (__local RC4_KEY *rc4_keys, u32 w0[4], u32 w1[4], u32 w2[4], tmp[2] = digest[2]; tmp[3] = digest[3]; - if (decrypt_and_check (&rc4_keys[lid], tmp, krb5tgs_bufs[salt_pos].edata2) == 1) + if (decrypt_and_check (&rc4_keys[lid], tmp, krb5tgs_bufs[salt_pos].edata2, krb5tgs_bufs[salt_pos].edata2_len, K2, checksum) == 1) { mark_hash (plains_buf, hashes_shown, digests_offset, gid, il_pos); diff --git a/src/shared.c b/src/shared.c index 159275769..08bcd90ba 100644 --- a/src/shared.c +++ b/src/shared.c @@ -8302,7 +8302,7 @@ void ascii_digest (char out_buf[4096], uint salt_pos, uint digest_pos) u8 *ptr_checksum = (u8 *) krb5tgs->checksum; u8 *ptr_edata2 = (u8 *) krb5tgs->edata2; - char data[256] = { 0 }; + char data[2560 * 4 * 2] = { 0 }; char *ptr_data = data; @@ -8312,11 +8312,9 @@ void ascii_digest (char out_buf[4096], uint salt_pos, uint digest_pos) /* skip '$' */ ptr_data++; - for (uint i = 0; i < 32; i++, ptr_data += 2) + for (uint i = 0; i < krb5tgs->edata2_len; i++, ptr_data += 2) sprintf (ptr_data, "%02x", ptr_edata2[i]); - *ptr_data = 0; - snprintf (out_buf, len-1, "%s$%s$%s$%s", SIGNATURE_KRB5TGS, (char *) krb5tgs->account_info, @@ -18795,7 +18793,10 @@ int krb5tgs_parse_hash (char *input_buf, uint input_len, hash_t *hash_buf) | hex_convert (p0) << 4; } - krb5tgs->edata2_len = strlen (edata_ptr - input_len) / (2 * 4); + /* this is needed for hmac_md5 */ + *edata_ptr++ = 0x80; + + krb5tgs->edata2_len = (data_len - 32) / 2 ; salt->salt_buf[0] = krb5tgs->checksum[0]; salt->salt_buf[1] = krb5tgs->checksum[1]; diff --git a/tools/test.pl b/tools/test.pl index 30df3ffcc..5d945fe0c 100755 --- a/tools/test.pl +++ b/tools/test.pl @@ -6704,7 +6704,7 @@ END_CODE my $edata2 = $cipher->RC4 (pack ("H*", $cleartext_ticket)); - $tmp_hash = sprintf ('$krb5tgs$23$*%s$%s$%s*$%s$%s', $user, $realm, $spn, unpack ("H*", $checksum), substr (unpack ("H*", $edata2), 0, 64)); + $tmp_hash = sprintf ('$krb5tgs$23$*%s$%s$%s*$%s$%s', $user, $realm, $spn, unpack ("H*", $checksum), unpack ("H*", $edata2)); } return ($tmp_hash);