Move 198xx DiskCryptor to 200xx to not collide with Kerberos 5, etype 17, Pre-Auth

pull/2003/head
jsteube 5 years ago
parent 17ab30b29f
commit b9aaaf7809

@ -368,7 +368,7 @@ KERNEL_FQ void m19800_comp (KERN_ATTR_TMPS_ESALT (krb5pa_17_tmp_t, krb5pa_17_t))
aes_key[1] = hc_swap32_S (tmps[gid].out[1]);
aes_key[2] = hc_swap32_S (tmps[gid].out[2]);
aes_key[3] = hc_swap32_S (tmps[gid].out[3]);
u32 aes_iv[4];
aes_iv[0] = 0;
@ -380,11 +380,12 @@ KERNEL_FQ void m19800_comp (KERN_ATTR_TMPS_ESALT (krb5pa_17_tmp_t, krb5pa_17_t))
aes128_set_encrypt_key (aes_ks, aes_key, s_te0, s_te1, s_te2, s_te3);
u32 key_bytes[4];
u32 out[4];
aes128_encrypt_cbc (aes_ks, aes_iv, nfolded, out, s_te0, s_te1, s_te2, s_te3, s_te4);
u32 key_bytes[4];
key_bytes[0] = hc_swap32_S (out[0]);
key_bytes[1] = hc_swap32_S (out[1]);
key_bytes[2] = hc_swap32_S (out[2]);
@ -392,11 +393,11 @@ KERNEL_FQ void m19800_comp (KERN_ATTR_TMPS_ESALT (krb5pa_17_tmp_t, krb5pa_17_t))
// then aes_cbc encrypt this nfolded value with 'key_bytes' as key along with a null IV
aes128_set_encrypt_key (aes_ks, key_bytes, s_te0, s_te1, s_te2, s_te3);
/* we will now compute 'ke' */
u32 ke[4];
// we can precompute _nfold(pack('>IB', 1, 0xAA), 16)
nfolded[0] = 0xae2c160b;
nfolded[1] = 0x04ad5006;
@ -430,9 +431,9 @@ KERNEL_FQ void m19800_comp (KERN_ATTR_TMPS_ESALT (krb5pa_17_tmp_t, krb5pa_17_t))
// c_1 aka c_n-1 since there are guaranteed to be exactly 3 blocks
enc_blocks[4] = esalt_bufs[digests_offset].enc_timestamp[4];
enc_blocks[5] = esalt_bufs[digests_offset].enc_timestamp[5];
enc_blocks[6] = esalt_bufs[digests_offset].enc_timestamp[6];
enc_blocks[6] = esalt_bufs[digests_offset].enc_timestamp[6];
enc_blocks[7] = esalt_bufs[digests_offset].enc_timestamp[7];
u32 w0[4];
u32 w1[4];
u32 w2[4];
@ -441,10 +442,10 @@ KERNEL_FQ void m19800_comp (KERN_ATTR_TMPS_ESALT (krb5pa_17_tmp_t, krb5pa_17_t))
u32 aes_cts_decrypt_ks[44];
AES128_set_decrypt_key (aes_cts_decrypt_ks, ke, s_te0, s_te1, s_te2, s_te3, s_td0, s_td1, s_td2, s_td3);
// Our first decryption is the last block (currently in c_n-1) using the first portion of (c_n) as our IV, this allows us to get plaintext in one crypto operation
aes_iv[0] = esalt_bufs[digests_offset].enc_timestamp[8];
aes_iv[1] = esalt_bufs[digests_offset].enc_timestamp[9];
aes_iv[0] = esalt_bufs[digests_offset].enc_timestamp[ 8];
aes_iv[1] = esalt_bufs[digests_offset].enc_timestamp[ 9];
aes_iv[2] = esalt_bufs[digests_offset].enc_timestamp[10];
aes_iv[3] = esalt_bufs[digests_offset].enc_timestamp[11];
@ -454,7 +455,7 @@ KERNEL_FQ void m19800_comp (KERN_ATTR_TMPS_ESALT (krb5pa_17_tmp_t, krb5pa_17_t))
w0[1] = hc_swap32_S (decrypted_block[1]);
w0[2] = hc_swap32_S (decrypted_block[2]);
w0[3] = hc_swap32_S (decrypted_block[3]);
// Move as much code as possible after this branch to avoid unnecessary computation on misses
if (((w0[0] & 0xf0f0f0f0) == 0x30303030) && ((w0[1] & 0xffff0000) == 0x5aa10000))
{
@ -465,12 +466,13 @@ KERNEL_FQ void m19800_comp (KERN_ATTR_TMPS_ESALT (krb5pa_17_tmp_t, krb5pa_17_t))
w0[1] = decrypted_block[1];
w0[2] = decrypted_block[2];
w0[3] = decrypted_block[3];
int enc_timestamp_len = esalt_bufs[digests_offset].enc_timestamp_len;
int last_word_position = enc_timestamp_len / 4;
// New c_1, join c_n with result of the decrypted c_n-1
int last_block_iter;
for (last_block_iter = 4; last_block_iter < 8; last_block_iter++)
{
if (last_word_position > last_block_iter + 4)
@ -482,62 +484,62 @@ KERNEL_FQ void m19800_comp (KERN_ATTR_TMPS_ESALT (krb5pa_17_tmp_t, krb5pa_17_t))
// Handle case when the split lands in the middle of a WORD
switch (enc_timestamp_len % 4)
{
case 1:
enc_blocks[last_block_iter] = (esalt_bufs[digests_offset].enc_timestamp[last_block_iter + 4] & 0x000000ff) | (w0[last_block_iter - 4] & 0xffffff00);
break;
case 2:
enc_blocks[last_block_iter] = (esalt_bufs[digests_offset].enc_timestamp[last_block_iter + 4] & 0x0000ffff) | (w0[last_block_iter - 4] & 0xffff0000);
break;
case 3:
enc_blocks[last_block_iter] = (esalt_bufs[digests_offset].enc_timestamp[last_block_iter + 4] & 0x00ffffff) | (w0[last_block_iter - 4] & 0xff000000);
break;
default:
enc_blocks[last_block_iter] = w0[last_block_iter - 4];
}
case 1:
enc_blocks[last_block_iter] = (esalt_bufs[digests_offset].enc_timestamp[last_block_iter + 4] & 0x000000ff) | (w0[last_block_iter - 4] & 0xffffff00);
break;
case 2:
enc_blocks[last_block_iter] = (esalt_bufs[digests_offset].enc_timestamp[last_block_iter + 4] & 0x0000ffff) | (w0[last_block_iter - 4] & 0xffff0000);
break;
case 3:
enc_blocks[last_block_iter] = (esalt_bufs[digests_offset].enc_timestamp[last_block_iter + 4] & 0x00ffffff) | (w0[last_block_iter - 4] & 0xff000000);
break;
default:
enc_blocks[last_block_iter] = w0[last_block_iter - 4];
}
}
else
{
enc_blocks[last_block_iter] = w0[last_block_iter - 4];
}
}
// c_2 aka c_n which is now equal to the old c_n-1
enc_blocks[8] = esalt_bufs[digests_offset].enc_timestamp[4];
enc_blocks[9] = esalt_bufs[digests_offset].enc_timestamp[5];
enc_blocks[10] = esalt_bufs[digests_offset].enc_timestamp[6];
enc_blocks[10] = esalt_bufs[digests_offset].enc_timestamp[6];
enc_blocks[11] = esalt_bufs[digests_offset].enc_timestamp[7];
// Go ahead and decrypt all blocks now as a normal AES CBC operation
// Go ahead and decrypt all blocks now as a normal AES CBC operation
aes_iv[0] = 0;
aes_iv[1] = 0;
aes_iv[2] = 0;
aes_iv[3] = 0;
aes128_decrypt_cbc (aes_cts_decrypt_ks, enc_blocks, decrypted_block, aes_iv, s_td0, s_td1, s_td2, s_td3, s_td4);
w0[0] = hc_swap32_S (decrypted_block[0]);
w0[1] = hc_swap32_S (decrypted_block[1]);
w0[2] = hc_swap32_S (decrypted_block[2]);
w0[3] = hc_swap32_S (decrypted_block[3]);
aes128_decrypt_cbc (aes_cts_decrypt_ks, enc_blocks + 4, decrypted_block, aes_iv, s_td0, s_td1, s_td2, s_td3, s_td4);
w1[0] = hc_swap32_S (decrypted_block[0]);
w1[1] = hc_swap32_S (decrypted_block[1]);
w1[2] = hc_swap32_S (decrypted_block[2]);
w1[3] = hc_swap32_S (decrypted_block[3]);
aes128_decrypt_cbc (aes_cts_decrypt_ks, enc_blocks + 8, decrypted_block, aes_iv, s_td0, s_td1, s_td2, s_td3, s_td4);
w2[0] = hc_swap32_S (decrypted_block[0]);
w2[1] = hc_swap32_S (decrypted_block[1]);
w2[2] = hc_swap32_S (decrypted_block[2]);
w2[3] = hc_swap32_S (decrypted_block[3]);
w3[0] = 0;
w3[1] = 0;
w3[2] = 0;
w3[3] = 0;
/* we will now compute 'ki', having 'key_bytes' */
u32 ki[8];
@ -562,7 +564,7 @@ KERNEL_FQ void m19800_comp (KERN_ATTR_TMPS_ESALT (krb5pa_17_tmp_t, krb5pa_17_t))
ki[1] = out[1];
ki[2] = out[2];
ki[3] = out[3];
sha1_hmac_ctx_t sha1_hmac_ctx;
/*
@ -599,18 +601,19 @@ KERNEL_FQ void m19800_comp (KERN_ATTR_TMPS_ESALT (krb5pa_17_tmp_t, krb5pa_17_t))
sha1_hmac_update_64 (&sha1_hmac_ctx, w0, w1, w2, w3, enc_timestamp_len);
sha1_hmac_final(&sha1_hmac_ctx);
sha1_hmac_final (&sha1_hmac_ctx);
// Compare checksum
if(sha1_hmac_ctx.opad.h[0] == esalt_bufs[digests_offset].checksum[0]
&& sha1_hmac_ctx.opad.h[1] == esalt_bufs[digests_offset].checksum[1]
&& sha1_hmac_ctx.opad.h[2] == esalt_bufs[digests_offset].checksum[2])
if ((sha1_hmac_ctx.opad.h[0] == esalt_bufs[digests_offset].checksum[0])
&& (sha1_hmac_ctx.opad.h[1] == esalt_bufs[digests_offset].checksum[1])
&& (sha1_hmac_ctx.opad.h[2] == esalt_bufs[digests_offset].checksum[2]))
{
if (atomic_inc (&hashes_shown[digests_offset]) == 0)
{
#define il_pos 0
mark_hash (plains_buf, d_return_buf, salt_pos, digests_cnt, 0, digests_offset + 0, gid, il_pos, 0, 0);
}
}
}
}
}

@ -148,7 +148,7 @@ KERNEL_FQ void m19900_init (KERN_ATTR_TMPS_ESALT (krb5pa_18_tmp_t, krb5pa_18_t))
sha1_hmac_update_global_swap (&sha1_hmac_ctx, esalt_bufs[digests_offset].account_info, esalt_bufs[digests_offset].account_info_len);
for (u32 i = 0, j = 1; i < 8; i += 5, j += 1)
for (u32 i = 0, j = 1; i < 8; i += 5, j += 1)
{
sha1_hmac_ctx_t sha1_hmac_ctx2 = sha1_hmac_ctx;
@ -194,9 +194,10 @@ KERNEL_FQ void m19900_init (KERN_ATTR_TMPS_ESALT (krb5pa_18_tmp_t, krb5pa_18_t))
KERNEL_FQ void m19900_loop (KERN_ATTR_TMPS_ESALT (krb5pa_18_tmp_t, krb5pa_18_t))
{
/**
/**
* base
*/
const u64 gid = get_global_id (0);
if ((gid * VECT_SIZE) >= gid_max) return;
@ -372,7 +373,7 @@ KERNEL_FQ void m19900_comp (KERN_ATTR_TMPS_ESALT (krb5pa_18_tmp_t, krb5pa_18_t))
aes_key[5] = hc_swap32_S (tmps[gid].out[5]);
aes_key[6] = hc_swap32_S (tmps[gid].out[6]);
aes_key[7] = hc_swap32_S (tmps[gid].out[7]);
u32 aes_iv[4];
aes_iv[0] = 0;
@ -384,11 +385,12 @@ KERNEL_FQ void m19900_comp (KERN_ATTR_TMPS_ESALT (krb5pa_18_tmp_t, krb5pa_18_t))
aes256_set_encrypt_key (aes_ks, aes_key, s_te0, s_te1, s_te2, s_te3);
u32 key_bytes[8];
u32 out[4];
aes256_encrypt_cbc (aes_ks, aes_iv, nfolded, out, s_te0, s_te1, s_te2, s_te3, s_te4);
u32 key_bytes[8];
key_bytes[0] = hc_swap32_S (out[0]);
key_bytes[1] = hc_swap32_S (out[1]);
key_bytes[2] = hc_swap32_S (out[2]);
@ -408,11 +410,11 @@ KERNEL_FQ void m19900_comp (KERN_ATTR_TMPS_ESALT (krb5pa_18_tmp_t, krb5pa_18_t))
// then aes_cbc encrypt this nfolded value with 'key_bytes' as key along with a null IV
aes256_set_encrypt_key (aes_ks, key_bytes, s_te0, s_te1, s_te2, s_te3);
/* we will now compute 'ke' */
u32 ke[8];
// we can precompute _nfold(pack('>IB', 1, 0xAA), 16)
nfolded[0] = 0xae2c160b;
nfolded[1] = 0x04ad5006;
@ -443,7 +445,7 @@ KERNEL_FQ void m19900_comp (KERN_ATTR_TMPS_ESALT (krb5pa_18_tmp_t, krb5pa_18_t))
ke[5] = out[1];
ke[6] = out[2];
ke[7] = out[3];
// Decode the CTS mode encryption by decrypting c_n-1 and swapping it with c_n
u32 enc_blocks[12];
@ -458,9 +460,9 @@ KERNEL_FQ void m19900_comp (KERN_ATTR_TMPS_ESALT (krb5pa_18_tmp_t, krb5pa_18_t))
// c_1 aka c_n-1 since there are guaranteed to be exactly 3 blocks
enc_blocks[4] = esalt_bufs[digests_offset].enc_timestamp[4];
enc_blocks[5] = esalt_bufs[digests_offset].enc_timestamp[5];
enc_blocks[6] = esalt_bufs[digests_offset].enc_timestamp[6];
enc_blocks[6] = esalt_bufs[digests_offset].enc_timestamp[6];
enc_blocks[7] = esalt_bufs[digests_offset].enc_timestamp[7];
u32 w0[4];
u32 w1[4];
u32 w2[4];
@ -469,7 +471,7 @@ KERNEL_FQ void m19900_comp (KERN_ATTR_TMPS_ESALT (krb5pa_18_tmp_t, krb5pa_18_t))
u32 aes_cts_decrypt_ks[60];
AES256_set_decrypt_key (aes_cts_decrypt_ks, ke, s_te0, s_te1, s_te2, s_te3, s_td0, s_td1, s_td2, s_td3);
// Our first decryption is the last block (currently in c_n-1) using the first portion of (c_n) as our IV, this allows us to get plaintext in one crypto operation
aes_iv[0] = esalt_bufs[digests_offset].enc_timestamp[8];
aes_iv[1] = esalt_bufs[digests_offset].enc_timestamp[9];
@ -482,23 +484,24 @@ KERNEL_FQ void m19900_comp (KERN_ATTR_TMPS_ESALT (krb5pa_18_tmp_t, krb5pa_18_t))
w0[1] = hc_swap32_S (decrypted_block[1]);
w0[2] = hc_swap32_S (decrypted_block[2]);
w0[3] = hc_swap32_S (decrypted_block[3]);
// Move as much code as possible after this branch to avoid unnecessary computation on misses
if (((w0[0] & 0xf0f0f0f0) == 0x30303030) && ((w0[1] & 0xffff0000) == 0x5aa10000))
{
// Decrypt c_n-1 without an IV for the padding blocks on c_n
aes256_decrypt (aes_cts_decrypt_ks, enc_blocks + 4, decrypted_block, s_td0, s_td1, s_td2, s_td3, s_td4);
w0[0] = decrypted_block[0];
w0[1] = decrypted_block[1];
w0[2] = decrypted_block[2];
w0[3] = decrypted_block[3];
int enc_timestamp_len = esalt_bufs[digests_offset].enc_timestamp_len;
int last_word_position = enc_timestamp_len / 4;
// New c_1, join c_n with result of the decrypted c_n-1
int last_block_iter;
for (last_block_iter = 4; last_block_iter < 8; last_block_iter++)
{
if (last_word_position > last_block_iter + 4)
@ -510,63 +513,63 @@ KERNEL_FQ void m19900_comp (KERN_ATTR_TMPS_ESALT (krb5pa_18_tmp_t, krb5pa_18_t))
// Handle case when the split lands in the middle of a WORD
switch (enc_timestamp_len % 4)
{
case 1:
enc_blocks[last_block_iter] = (esalt_bufs[digests_offset].enc_timestamp[last_block_iter + 4] & 0x000000ff) | (w0[last_block_iter - 4] & 0xffffff00);
break;
case 2:
enc_blocks[last_block_iter] = (esalt_bufs[digests_offset].enc_timestamp[last_block_iter + 4] & 0x0000ffff) | (w0[last_block_iter - 4] & 0xffff0000);
break;
case 3:
enc_blocks[last_block_iter] = (esalt_bufs[digests_offset].enc_timestamp[last_block_iter + 4] & 0x00ffffff) | (w0[last_block_iter - 4] & 0xff000000);
break;
default:
enc_blocks[last_block_iter] = w0[last_block_iter - 4];
}
case 1:
enc_blocks[last_block_iter] = (esalt_bufs[digests_offset].enc_timestamp[last_block_iter + 4] & 0x000000ff) | (w0[last_block_iter - 4] & 0xffffff00);
break;
case 2:
enc_blocks[last_block_iter] = (esalt_bufs[digests_offset].enc_timestamp[last_block_iter + 4] & 0x0000ffff) | (w0[last_block_iter - 4] & 0xffff0000);
break;
case 3:
enc_blocks[last_block_iter] = (esalt_bufs[digests_offset].enc_timestamp[last_block_iter + 4] & 0x00ffffff) | (w0[last_block_iter - 4] & 0xff000000);
break;
default:
enc_blocks[last_block_iter] = w0[last_block_iter - 4];
}
}
else
{
enc_blocks[last_block_iter] = w0[last_block_iter - 4];
}
}
// c_2 aka c_n which is now equal to the old c_n-1
enc_blocks[8] = esalt_bufs[digests_offset].enc_timestamp[4];
enc_blocks[9] = esalt_bufs[digests_offset].enc_timestamp[5];
enc_blocks[10] = esalt_bufs[digests_offset].enc_timestamp[6];
enc_blocks[ 8] = esalt_bufs[digests_offset].enc_timestamp[4];
enc_blocks[ 9] = esalt_bufs[digests_offset].enc_timestamp[5];
enc_blocks[10] = esalt_bufs[digests_offset].enc_timestamp[6];
enc_blocks[11] = esalt_bufs[digests_offset].enc_timestamp[7];
// Go ahead and decrypt all blocks now as a normal AES CBC operation
aes_iv[0] = 0;
aes_iv[1] = 0;
aes_iv[2] = 0;
aes_iv[3] = 0;
aes256_decrypt_cbc (aes_cts_decrypt_ks, enc_blocks, decrypted_block, aes_iv, s_td0, s_td1, s_td2, s_td3, s_td4);
aes256_decrypt_cbc (aes_cts_decrypt_ks, enc_blocks + 0, decrypted_block, aes_iv, s_td0, s_td1, s_td2, s_td3, s_td4);
w0[0] = hc_swap32_S (decrypted_block[0]);
w0[1] = hc_swap32_S (decrypted_block[1]);
w0[2] = hc_swap32_S (decrypted_block[2]);
w0[3] = hc_swap32_S (decrypted_block[3]);
aes256_decrypt_cbc (aes_cts_decrypt_ks, enc_blocks + 4, decrypted_block, aes_iv, s_td0, s_td1, s_td2, s_td3, s_td4);
w1[0] = hc_swap32_S (decrypted_block[0]);
w1[1] = hc_swap32_S (decrypted_block[1]);
w1[2] = hc_swap32_S (decrypted_block[2]);
w1[3] = hc_swap32_S (decrypted_block[3]);
aes256_decrypt_cbc (aes_cts_decrypt_ks, enc_blocks + 8, decrypted_block, aes_iv, s_td0, s_td1, s_td2, s_td3, s_td4);
w2[0] = hc_swap32_S (decrypted_block[0]);
w2[1] = hc_swap32_S (decrypted_block[1]);
w2[2] = hc_swap32_S (decrypted_block[2]);
w2[3] = hc_swap32_S (decrypted_block[3]);
w3[0] = 0;
w3[1] = 0;
w3[2] = 0;
w3[3] = 0;
/* we will now compute 'ki', having 'key_bytes' */
u32 ki[8];
@ -603,7 +606,7 @@ KERNEL_FQ void m19900_comp (KERN_ATTR_TMPS_ESALT (krb5pa_18_tmp_t, krb5pa_18_t))
ki[5] = out[1];
ki[6] = out[2];
ki[7] = out[3];
sha1_hmac_ctx_t sha1_hmac_ctx;
/*
@ -640,18 +643,19 @@ KERNEL_FQ void m19900_comp (KERN_ATTR_TMPS_ESALT (krb5pa_18_tmp_t, krb5pa_18_t))
sha1_hmac_update_64 (&sha1_hmac_ctx, w0, w1, w2, w3, enc_timestamp_len);
sha1_hmac_final(&sha1_hmac_ctx);
sha1_hmac_final (&sha1_hmac_ctx);
// Compare checksum
if(sha1_hmac_ctx.opad.h[0] == esalt_bufs[digests_offset].checksum[0]
&& sha1_hmac_ctx.opad.h[1] == esalt_bufs[digests_offset].checksum[1]
&& sha1_hmac_ctx.opad.h[2] == esalt_bufs[digests_offset].checksum[2])
if ((sha1_hmac_ctx.opad.h[0] == esalt_bufs[digests_offset].checksum[0])
&& (sha1_hmac_ctx.opad.h[1] == esalt_bufs[digests_offset].checksum[1])
&& (sha1_hmac_ctx.opad.h[2] == esalt_bufs[digests_offset].checksum[2]))
{
if (atomic_inc (&hashes_shown[digests_offset]) == 0)
{
#define il_pos 0
mark_hash (plains_buf, d_return_buf, salt_pos, digests_cnt, 0, digests_offset + 0, gid, il_pos, 0, 0);
}
}
}
}
}

@ -91,7 +91,7 @@ DECLSPEC void hmac_sha512_run_V (u32x *w0, u32x *w1, u32x *w2, u32x *w3, u32x *w
sha512_transform_vector (w0, w1, w2, w3, w4, w5, w6, w7, digest);
}
KERNEL_FQ void m19811_init (KERN_ATTR_TMPS_ESALT (pbkdf2_sha512_tmp_t, diskcryptor_esalt_t))
KERNEL_FQ void m20011_init (KERN_ATTR_TMPS_ESALT (pbkdf2_sha512_tmp_t, diskcryptor_esalt_t))
{
/**
* base
@ -195,7 +195,7 @@ KERNEL_FQ void m19811_init (KERN_ATTR_TMPS_ESALT (pbkdf2_sha512_tmp_t, diskcrypt
}
}
KERNEL_FQ void m19811_loop (KERN_ATTR_TMPS_ESALT (pbkdf2_sha512_tmp_t, diskcryptor_esalt_t))
KERNEL_FQ void m20011_loop (KERN_ATTR_TMPS_ESALT (pbkdf2_sha512_tmp_t, diskcryptor_esalt_t))
{
const u64 gid = get_global_id (0);
@ -321,7 +321,7 @@ KERNEL_FQ void m19811_loop (KERN_ATTR_TMPS_ESALT (pbkdf2_sha512_tmp_t, diskcrypt
}
}
KERNEL_FQ void m19811_comp (KERN_ATTR_TMPS_ESALT (pbkdf2_sha512_tmp_t, diskcryptor_esalt_t))
KERNEL_FQ void m20011_comp (KERN_ATTR_TMPS_ESALT (pbkdf2_sha512_tmp_t, diskcryptor_esalt_t))
{
const u64 gid = get_global_id (0);
const u64 lid = get_local_id (0);

@ -91,7 +91,7 @@ DECLSPEC void hmac_sha512_run_V (u32x *w0, u32x *w1, u32x *w2, u32x *w3, u32x *w
sha512_transform_vector (w0, w1, w2, w3, w4, w5, w6, w7, digest);
}
KERNEL_FQ void m19812_init (KERN_ATTR_TMPS_ESALT (pbkdf2_sha512_tmp_t, diskcryptor_esalt_t))
KERNEL_FQ void m20012_init (KERN_ATTR_TMPS_ESALT (pbkdf2_sha512_tmp_t, diskcryptor_esalt_t))
{
/**
* base
@ -195,7 +195,7 @@ KERNEL_FQ void m19812_init (KERN_ATTR_TMPS_ESALT (pbkdf2_sha512_tmp_t, diskcrypt
}
}
KERNEL_FQ void m19812_loop (KERN_ATTR_TMPS_ESALT (pbkdf2_sha512_tmp_t, diskcryptor_esalt_t))
KERNEL_FQ void m20012_loop (KERN_ATTR_TMPS_ESALT (pbkdf2_sha512_tmp_t, diskcryptor_esalt_t))
{
const u64 gid = get_global_id (0);
@ -321,7 +321,7 @@ KERNEL_FQ void m19812_loop (KERN_ATTR_TMPS_ESALT (pbkdf2_sha512_tmp_t, diskcrypt
}
}
KERNEL_FQ void m19812_comp (KERN_ATTR_TMPS_ESALT (pbkdf2_sha512_tmp_t, diskcryptor_esalt_t))
KERNEL_FQ void m20012_comp (KERN_ATTR_TMPS_ESALT (pbkdf2_sha512_tmp_t, diskcryptor_esalt_t))
{
const u64 gid = get_global_id (0);
const u64 lid = get_local_id (0);

@ -91,7 +91,7 @@ DECLSPEC void hmac_sha512_run_V (u32x *w0, u32x *w1, u32x *w2, u32x *w3, u32x *w
sha512_transform_vector (w0, w1, w2, w3, w4, w5, w6, w7, digest);
}
KERNEL_FQ void m19813_init (KERN_ATTR_TMPS_ESALT (pbkdf2_sha512_tmp_t, diskcryptor_esalt_t))
KERNEL_FQ void m20013_init (KERN_ATTR_TMPS_ESALT (pbkdf2_sha512_tmp_t, diskcryptor_esalt_t))
{
/**
* base
@ -195,7 +195,7 @@ KERNEL_FQ void m19813_init (KERN_ATTR_TMPS_ESALT (pbkdf2_sha512_tmp_t, diskcrypt
}
}
KERNEL_FQ void m19813_loop (KERN_ATTR_TMPS_ESALT (pbkdf2_sha512_tmp_t, diskcryptor_esalt_t))
KERNEL_FQ void m20013_loop (KERN_ATTR_TMPS_ESALT (pbkdf2_sha512_tmp_t, diskcryptor_esalt_t))
{
const u64 gid = get_global_id (0);
@ -321,7 +321,7 @@ KERNEL_FQ void m19813_loop (KERN_ATTR_TMPS_ESALT (pbkdf2_sha512_tmp_t, diskcrypt
}
}
KERNEL_FQ void m19813_comp (KERN_ATTR_TMPS_ESALT (pbkdf2_sha512_tmp_t, diskcryptor_esalt_t))
KERNEL_FQ void m20013_comp (KERN_ATTR_TMPS_ESALT (pbkdf2_sha512_tmp_t, diskcryptor_esalt_t))
{
const u64 gid = get_global_id (0);
const u64 lid = get_local_id (0);

@ -21,6 +21,8 @@
- Added hash-mode: QNX /etc/shadow (SHA512)
- Added hash-mode: Kerberos 5 TGS-REP etype 17 (AES128-CTS-HMAC-SHA1-96)
- Added hash-mode: Kerberos 5 TGS-REP etype 18 (AES256-CTS-HMAC-SHA1-96)
- Added hash-mode: Kerberos 5 Pre-Auth etype 17 (AES128-CTS-HMAC-SHA1-96)
- Added hash-mode: Kerberos 5 Pre-Auth etype 18 (AES256-CTS-HMAC-SHA1-96)
- Added hash-mode: sha1(md5(md5($pass)))
- Added hash-mode: sha1($salt1.$pass.$salt2)
- Added hash-mode: Ruby on Rails Restful-Authentication

@ -122,13 +122,16 @@ NVIDIA GPUs require "NVIDIA Driver" (418.56 or later)
- NetNTLMv1+ESS
- NetNTLMv2
- IPMI2 RAKP HMAC-SHA1
- Kerberos 5 AS-REQ Pre-Auth etype 23
- Kerberos 5 AS-REQ Pre-Auth etype 23
- Kerberos 5 AS-REP etype 23
- Kerberos 5 TGS-REP etype 23 (RC4-HMAC-MD5)
- Kerberos 5 Pre-Auth etype 17/18 (AES128-CTS-HMAC-SHA1-96)
- Kerberos 5 TGS-REP etype 17/18 (AES128-CTS-HMAC-SHA1-96)
- DNSSEC (NSEC3)
- CRAM-MD5
- PostgreSQL CRAM (MD5)
- MySQL CRAM (SHA1)
- SIP digest authentication (MD5)
- Kerberos 5 TGS-REP etype 23
- TACACS+
- JWT (JSON Web Token)
- SMF (Simple Machines Forum) > v1.1
@ -266,7 +269,6 @@ NVIDIA GPUs require "NVIDIA Driver" (418.56 or later)
- Ethereum Wallet, SCRYPT
- Ethereum Pre-Sale Wallet, PBKDF2-HMAC-SHA256
- Ansible Vault
- Kerberos 5 AS-REP etype 23
- Plaintext
##

@ -17,7 +17,7 @@ static const u32 DGST_POS2 = 2;
static const u32 DGST_POS3 = 3;
static const u32 DGST_SIZE = DGST_SIZE_4_4;
static const u32 HASH_CATEGORY = HASH_CATEGORY_NETWORK_PROTOCOL;
static const char *HASH_NAME = "Kerberos 5 AS-REQ Pre-Auth etype 23";
static const char *HASH_NAME = "Kerberos 5, etype 23, AS-REQ Pre-Auth";
static const u64 KERN_TYPE = 7500;
static const u32 OPTI_TYPE = OPTI_TYPE_ZERO_BYTE
| OPTI_TYPE_NOT_ITERATED;

@ -17,7 +17,7 @@ static const u32 DGST_POS2 = 2;
static const u32 DGST_POS3 = 3;
static const u32 DGST_SIZE = DGST_SIZE_4_4;
static const u32 HASH_CATEGORY = HASH_CATEGORY_NETWORK_PROTOCOL;
static const char *HASH_NAME = "Kerberos 5 TGS-REP etype 23 (RC4-HMAC-MD5)";
static const char *HASH_NAME = "Kerberos 5, etype 23, TGS-REP";
static const u64 KERN_TYPE = 13100;
static const u32 OPTI_TYPE = OPTI_TYPE_ZERO_BYTE
| OPTI_TYPE_NOT_ITERATED;

@ -17,7 +17,7 @@ static const u32 DGST_POS2 = 2;
static const u32 DGST_POS3 = 3;
static const u32 DGST_SIZE = DGST_SIZE_4_4;
static const u32 HASH_CATEGORY = HASH_CATEGORY_NETWORK_PROTOCOL;
static const char *HASH_NAME = "Kerberos 5 AS-REP etype 23";
static const char *HASH_NAME = "Kerberos 5, etype 23, AS-REP";
static const u64 KERN_TYPE = 18200;
static const u32 OPTI_TYPE = OPTI_TYPE_ZERO_BYTE
| OPTI_TYPE_NOT_ITERATED;

@ -17,7 +17,7 @@ static const u32 DGST_POS2 = 2;
static const u32 DGST_POS3 = 3;
static const u32 DGST_SIZE = DGST_SIZE_4_4;
static const u32 HASH_CATEGORY = HASH_CATEGORY_NETWORK_PROTOCOL;
static const char *HASH_NAME = "Kerberos 5 TGS-REP etype 17 (AES128-CTS-HMAC-SHA1-96)";
static const char *HASH_NAME = "Kerberos 5, etype 17, TGS-REP";
static const u64 KERN_TYPE = 19600;
static const u32 OPTI_TYPE = OPTI_TYPE_ZERO_BYTE
| OPTI_TYPE_NOT_ITERATED

@ -17,7 +17,7 @@ static const u32 DGST_POS2 = 2;
static const u32 DGST_POS3 = 3;
static const u32 DGST_SIZE = DGST_SIZE_4_4;
static const u32 HASH_CATEGORY = HASH_CATEGORY_NETWORK_PROTOCOL;
static const char *HASH_NAME = "Kerberos 5 TGS-REP etype 18 (AES256-CTS-HMAC-SHA1-96)";
static const char *HASH_NAME = "Kerberos 5, etype 18, TGS-REP";
static const u64 KERN_TYPE = 19700;
static const u32 OPTI_TYPE = OPTI_TYPE_ZERO_BYTE
| OPTI_TYPE_NOT_ITERATED

@ -17,7 +17,7 @@ static const u32 DGST_POS2 = 2;
static const u32 DGST_POS3 = 3;
static const u32 DGST_SIZE = DGST_SIZE_4_4;
static const u32 HASH_CATEGORY = HASH_CATEGORY_NETWORK_PROTOCOL;
static const char *HASH_NAME = "Kerberos 5 Pre-Auth etype 17 (AES128-CTS-HMAC-SHA1-96)";
static const char *HASH_NAME = "Kerberos 5, etype 17, Pre-Auth";
static const u64 KERN_TYPE = 19800;
static const u32 OPTI_TYPE = OPTI_TYPE_ZERO_BYTE
| OPTI_TYPE_NOT_ITERATED
@ -161,7 +161,7 @@ int module_hash_decode (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSE
memcpy(account_info_ptr + domain_len, user_pos, user_len);
krb5pa->account_info_len = account_info_len;
// Split checksum
checksum_pos = data_pos + data_len - 24;
data_len = data_len - 24;

@ -17,7 +17,7 @@ static const u32 DGST_POS2 = 2;
static const u32 DGST_POS3 = 3;
static const u32 DGST_SIZE = DGST_SIZE_4_4;
static const u32 HASH_CATEGORY = HASH_CATEGORY_NETWORK_PROTOCOL;
static const char *HASH_NAME = "Kerberos 5 Pre-Auth etype 18 (AES256-CTS-HMAC-SHA1-96)";
static const char *HASH_NAME = "Kerberos 5, etype 18, Pre-Auth";
static const u64 KERN_TYPE = 19900;
static const u32 OPTI_TYPE = OPTI_TYPE_ZERO_BYTE
| OPTI_TYPE_NOT_ITERATED
@ -161,7 +161,7 @@ int module_hash_decode (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSE
memcpy(account_info_ptr + domain_len, user_pos, user_len);
krb5pa->account_info_len = account_info_len;
// Split checksum
checksum_pos = data_pos + data_len - 24;
data_len = data_len - 24;

@ -18,7 +18,7 @@ static const u32 DGST_POS3 = 3;
static const u32 DGST_SIZE = DGST_SIZE_4_4;
static const u32 HASH_CATEGORY = HASH_CATEGORY_FDE;
static const char *HASH_NAME = "DiskCryptor SHA512 + XTS 512 bit";
static const u64 KERN_TYPE = 19811;
static const u64 KERN_TYPE = 20011;
static const u32 OPTI_TYPE = OPTI_TYPE_ZERO_BYTE
| OPTI_TYPE_USES_BITS_64
| OPTI_TYPE_SLOW_HASH_SIMD_LOOP;

@ -18,7 +18,7 @@ static const u32 DGST_POS3 = 3;
static const u32 DGST_SIZE = DGST_SIZE_4_4;
static const u32 HASH_CATEGORY = HASH_CATEGORY_FDE;
static const char *HASH_NAME = "DiskCryptor SHA512 + XTS 1024 bit";
static const u64 KERN_TYPE = 19812;
static const u64 KERN_TYPE = 20012;
static const u32 OPTI_TYPE = OPTI_TYPE_ZERO_BYTE
| OPTI_TYPE_USES_BITS_64
| OPTI_TYPE_SLOW_HASH_SIMD_LOOP;

@ -18,7 +18,7 @@ static const u32 DGST_POS3 = 3;
static const u32 DGST_SIZE = DGST_SIZE_4_4;
static const u32 HASH_CATEGORY = HASH_CATEGORY_FDE;
static const char *HASH_NAME = "DiskCryptor SHA512 + XTS 1536 bit";
static const u64 KERN_TYPE = 19813;
static const u64 KERN_TYPE = 20013;
static const u32 OPTI_TYPE = OPTI_TYPE_ZERO_BYTE
| OPTI_TYPE_USES_BITS_64
| OPTI_TYPE_SLOW_HASH_SIMD_LOOP;
Loading…
Cancel
Save