mirror of
https://github.com/hashcat/hashcat.git
synced 2024-11-29 19:38:18 +00:00
ada829fa20
KNX IP Secure uses a constant salt, which require the use of `OPTS_TYPE_DEEP_COMP_KERNEL`. This commit adds the required options and adjusts the indexing of the esalt accordingly. The attempt at an optimized kernel has been removed as requested in the PR feedback. Additionally, minor formatting improvements have been made.
424 lines
10 KiB
Common Lisp
424 lines
10 KiB
Common Lisp
/**
|
|
* Author......: See docs/credits.txt
|
|
* License.....: MIT
|
|
*/
|
|
|
|
#define NEW_SIMD_CODE
|
|
|
|
#ifdef KERNEL_STATIC
|
|
#include "inc_vendor.h"
|
|
#include "inc_types.h"
|
|
#include "inc_platform.cl"
|
|
#include "inc_common.cl"
|
|
#include "inc_simd.cl"
|
|
#include "inc_hash_sha256.cl"
|
|
#include "inc_cipher_aes.cl"
|
|
#endif
|
|
|
|
#define COMPARE_S "inc_comp_single.cl"
|
|
#define COMPARE_M "inc_comp_multi.cl"
|
|
|
|
typedef struct blocks
|
|
{
|
|
u32 b1[4];
|
|
u32 b2[4];
|
|
u32 b3[4];
|
|
|
|
} blocks_t;
|
|
|
|
typedef struct pbkdf2_sha256_tmp
|
|
{
|
|
u32 ipad[8];
|
|
u32 opad[8];
|
|
|
|
u32 dgst[32];
|
|
u32 out[32];
|
|
|
|
} pbkdf2_sha256_tmp_t;
|
|
|
|
DECLSPEC void hmac_sha256_run_V (u32x *w0, u32x *w1, u32x *w2, u32x *w3, u32x *ipad, u32x *opad, u32x *digest)
|
|
{
|
|
digest[0] = ipad[0];
|
|
digest[1] = ipad[1];
|
|
digest[2] = ipad[2];
|
|
digest[3] = ipad[3];
|
|
digest[4] = ipad[4];
|
|
digest[5] = ipad[5];
|
|
digest[6] = ipad[6];
|
|
digest[7] = ipad[7];
|
|
|
|
sha256_transform_vector (w0, w1, w2, w3, digest);
|
|
|
|
w0[0] = digest[0];
|
|
w0[1] = digest[1];
|
|
w0[2] = digest[2];
|
|
w0[3] = digest[3];
|
|
w1[0] = digest[4];
|
|
w1[1] = digest[5];
|
|
w1[2] = digest[6];
|
|
w1[3] = digest[7];
|
|
w2[0] = 0x80000000;
|
|
w2[1] = 0;
|
|
w2[2] = 0;
|
|
w2[3] = 0;
|
|
w3[0] = 0;
|
|
w3[1] = 0;
|
|
w3[2] = 0;
|
|
w3[3] = (64 + 32) * 8;
|
|
|
|
digest[0] = opad[0];
|
|
digest[1] = opad[1];
|
|
digest[2] = opad[2];
|
|
digest[3] = opad[3];
|
|
digest[4] = opad[4];
|
|
digest[5] = opad[5];
|
|
digest[6] = opad[6];
|
|
digest[7] = opad[7];
|
|
|
|
sha256_transform_vector (w0, w1, w2, w3, digest);
|
|
}
|
|
|
|
DECLSPEC void aes128_encrypt_cbc (const u32 *aes_ks, u32 *aes_iv, const u32 *in, u32 *out, SHM_TYPE u32 *s_te0, SHM_TYPE u32 *s_te1, SHM_TYPE u32 *s_te2, SHM_TYPE u32 *s_te3, SHM_TYPE u32 *s_te4)
|
|
{
|
|
u32 in_s[4];
|
|
|
|
in_s[0] = in[0];
|
|
in_s[1] = in[1];
|
|
in_s[2] = in[2];
|
|
in_s[3] = in[3];
|
|
|
|
in_s[0] ^= aes_iv[0];
|
|
in_s[1] ^= aes_iv[1];
|
|
in_s[2] ^= aes_iv[2];
|
|
in_s[3] ^= aes_iv[3];
|
|
|
|
aes128_encrypt (aes_ks, in_s, out, s_te0, s_te1, s_te2, s_te3, s_te4);
|
|
|
|
aes_iv[0] = out[0];
|
|
aes_iv[1] = out[1];
|
|
aes_iv[2] = out[2];
|
|
aes_iv[3] = out[3];
|
|
}
|
|
|
|
KERNEL_FQ void m25900_init(KERN_ATTR_TMPS(pbkdf2_sha256_tmp_t))
|
|
{
|
|
/**
|
|
* base
|
|
*/
|
|
|
|
const u64 gid = get_global_id(0);
|
|
|
|
if (gid >= gid_max) return;
|
|
|
|
sha256_hmac_ctx_t sha256_hmac_ctx;
|
|
|
|
sha256_hmac_init_global_swap(&sha256_hmac_ctx, pws[gid].i, pws[gid].pw_len);
|
|
|
|
tmps[gid].ipad[0] = sha256_hmac_ctx.ipad.h[0];
|
|
tmps[gid].ipad[1] = sha256_hmac_ctx.ipad.h[1];
|
|
tmps[gid].ipad[2] = sha256_hmac_ctx.ipad.h[2];
|
|
tmps[gid].ipad[3] = sha256_hmac_ctx.ipad.h[3];
|
|
tmps[gid].ipad[4] = sha256_hmac_ctx.ipad.h[4];
|
|
tmps[gid].ipad[5] = sha256_hmac_ctx.ipad.h[5];
|
|
tmps[gid].ipad[6] = sha256_hmac_ctx.ipad.h[6];
|
|
tmps[gid].ipad[7] = sha256_hmac_ctx.ipad.h[7];
|
|
|
|
tmps[gid].opad[0] = sha256_hmac_ctx.opad.h[0];
|
|
tmps[gid].opad[1] = sha256_hmac_ctx.opad.h[1];
|
|
tmps[gid].opad[2] = sha256_hmac_ctx.opad.h[2];
|
|
tmps[gid].opad[3] = sha256_hmac_ctx.opad.h[3];
|
|
tmps[gid].opad[4] = sha256_hmac_ctx.opad.h[4];
|
|
tmps[gid].opad[5] = sha256_hmac_ctx.opad.h[5];
|
|
tmps[gid].opad[6] = sha256_hmac_ctx.opad.h[6];
|
|
tmps[gid].opad[7] = sha256_hmac_ctx.opad.h[7];
|
|
|
|
sha256_hmac_update_global_swap(&sha256_hmac_ctx, salt_bufs[SALT_POS].salt_buf, salt_bufs[SALT_POS].salt_len);
|
|
|
|
for (u32 i = 0, j = 1; i < 8; i += 8, j += 1)
|
|
{
|
|
sha256_hmac_ctx_t sha256_hmac_ctx2 = sha256_hmac_ctx;
|
|
|
|
u32 w0[4];
|
|
u32 w1[4];
|
|
u32 w2[4];
|
|
u32 w3[4];
|
|
|
|
w0[0] = j;
|
|
w0[1] = 0;
|
|
w0[2] = 0;
|
|
w0[3] = 0;
|
|
w1[0] = 0;
|
|
w1[1] = 0;
|
|
w1[2] = 0;
|
|
w1[3] = 0;
|
|
w2[0] = 0;
|
|
w2[1] = 0;
|
|
w2[2] = 0;
|
|
w2[3] = 0;
|
|
w3[0] = 0;
|
|
w3[1] = 0;
|
|
w3[2] = 0;
|
|
w3[3] = 0;
|
|
|
|
sha256_hmac_update_64(&sha256_hmac_ctx2, w0, w1, w2, w3, 4);
|
|
|
|
sha256_hmac_final(&sha256_hmac_ctx2);
|
|
|
|
tmps[gid].dgst[i + 0] = sha256_hmac_ctx2.opad.h[0];
|
|
tmps[gid].dgst[i + 1] = sha256_hmac_ctx2.opad.h[1];
|
|
tmps[gid].dgst[i + 2] = sha256_hmac_ctx2.opad.h[2];
|
|
tmps[gid].dgst[i + 3] = sha256_hmac_ctx2.opad.h[3];
|
|
tmps[gid].dgst[i + 4] = sha256_hmac_ctx2.opad.h[4];
|
|
tmps[gid].dgst[i + 5] = sha256_hmac_ctx2.opad.h[5];
|
|
tmps[gid].dgst[i + 6] = sha256_hmac_ctx2.opad.h[6];
|
|
tmps[gid].dgst[i + 7] = sha256_hmac_ctx2.opad.h[7];
|
|
|
|
tmps[gid].out[i + 0] = tmps[gid].dgst[i + 0];
|
|
tmps[gid].out[i + 1] = tmps[gid].dgst[i + 1];
|
|
tmps[gid].out[i + 2] = tmps[gid].dgst[i + 2];
|
|
tmps[gid].out[i + 3] = tmps[gid].dgst[i + 3];
|
|
tmps[gid].out[i + 4] = tmps[gid].dgst[i + 4];
|
|
tmps[gid].out[i + 5] = tmps[gid].dgst[i + 5];
|
|
tmps[gid].out[i + 6] = tmps[gid].dgst[i + 6];
|
|
tmps[gid].out[i + 7] = tmps[gid].dgst[i + 7];
|
|
}
|
|
}
|
|
|
|
KERNEL_FQ void m25900_loop(KERN_ATTR_TMPS(pbkdf2_sha256_tmp_t))
|
|
{
|
|
const u64 gid = get_global_id(0);
|
|
|
|
if ((gid * VECT_SIZE) >= gid_max) return;
|
|
|
|
u32x ipad[8];
|
|
u32x opad[8];
|
|
|
|
ipad[0] = packv (tmps, ipad, gid, 0);
|
|
ipad[1] = packv (tmps, ipad, gid, 1);
|
|
ipad[2] = packv (tmps, ipad, gid, 2);
|
|
ipad[3] = packv (tmps, ipad, gid, 3);
|
|
ipad[4] = packv (tmps, ipad, gid, 4);
|
|
ipad[5] = packv (tmps, ipad, gid, 5);
|
|
ipad[6] = packv (tmps, ipad, gid, 6);
|
|
ipad[7] = packv (tmps, ipad, gid, 7);
|
|
|
|
opad[0] = packv (tmps, opad, gid, 0);
|
|
opad[1] = packv (tmps, opad, gid, 1);
|
|
opad[2] = packv (tmps, opad, gid, 2);
|
|
opad[3] = packv (tmps, opad, gid, 3);
|
|
opad[4] = packv (tmps, opad, gid, 4);
|
|
opad[5] = packv (tmps, opad, gid, 5);
|
|
opad[6] = packv (tmps, opad, gid, 6);
|
|
opad[7] = packv (tmps, opad, gid, 7);
|
|
|
|
for (u32 i = 0; i < 8; i += 8)
|
|
{
|
|
u32x dgst[8];
|
|
u32x out[8];
|
|
|
|
dgst[0] = packv (tmps, dgst, gid, i + 0);
|
|
dgst[1] = packv (tmps, dgst, gid, i + 1);
|
|
dgst[2] = packv (tmps, dgst, gid, i + 2);
|
|
dgst[3] = packv (tmps, dgst, gid, i + 3);
|
|
dgst[4] = packv (tmps, dgst, gid, i + 4);
|
|
dgst[5] = packv (tmps, dgst, gid, i + 5);
|
|
dgst[6] = packv (tmps, dgst, gid, i + 6);
|
|
dgst[7] = packv (tmps, dgst, gid, i + 7);
|
|
|
|
out[0] = packv (tmps, out, gid, i + 0);
|
|
out[1] = packv (tmps, out, gid, i + 1);
|
|
out[2] = packv (tmps, out, gid, i + 2);
|
|
out[3] = packv (tmps, out, gid, i + 3);
|
|
out[4] = packv (tmps, out, gid, i + 4);
|
|
out[5] = packv (tmps, out, gid, i + 5);
|
|
out[6] = packv (tmps, out, gid, i + 6);
|
|
out[7] = packv (tmps, out, gid, i + 7);
|
|
|
|
for (u32 j = 0; j < loop_cnt; j++)
|
|
{
|
|
u32x w0[4];
|
|
u32x w1[4];
|
|
u32x w2[4];
|
|
u32x w3[4];
|
|
|
|
w0[0] = dgst[0];
|
|
w0[1] = dgst[1];
|
|
w0[2] = dgst[2];
|
|
w0[3] = dgst[3];
|
|
w1[0] = dgst[4];
|
|
w1[1] = dgst[5];
|
|
w1[2] = dgst[6];
|
|
w1[3] = dgst[7];
|
|
w2[0] = 0x80000000;
|
|
w2[1] = 0;
|
|
w2[2] = 0;
|
|
w2[3] = 0;
|
|
w3[0] = 0;
|
|
w3[1] = 0;
|
|
w3[2] = 0;
|
|
w3[3] = (64 + 32) * 8;
|
|
|
|
hmac_sha256_run_V (w0, w1, w2, w3, ipad, opad, dgst);
|
|
|
|
out[0] ^= dgst[0];
|
|
out[1] ^= dgst[1];
|
|
out[2] ^= dgst[2];
|
|
out[3] ^= dgst[3];
|
|
out[4] ^= dgst[4];
|
|
out[5] ^= dgst[5];
|
|
out[6] ^= dgst[6];
|
|
out[7] ^= dgst[7];
|
|
}
|
|
|
|
unpackv (tmps, dgst, gid, i + 0, dgst[0]);
|
|
unpackv (tmps, dgst, gid, i + 1, dgst[1]);
|
|
unpackv (tmps, dgst, gid, i + 2, dgst[2]);
|
|
unpackv (tmps, dgst, gid, i + 3, dgst[3]);
|
|
unpackv (tmps, dgst, gid, i + 4, dgst[4]);
|
|
unpackv (tmps, dgst, gid, i + 5, dgst[5]);
|
|
unpackv (tmps, dgst, gid, i + 6, dgst[6]);
|
|
unpackv (tmps, dgst, gid, i + 7, dgst[7]);
|
|
|
|
unpackv (tmps, out, gid, i + 0, out[0]);
|
|
unpackv (tmps, out, gid, i + 1, out[1]);
|
|
unpackv (tmps, out, gid, i + 2, out[2]);
|
|
unpackv (tmps, out, gid, i + 3, out[3]);
|
|
unpackv (tmps, out, gid, i + 4, out[4]);
|
|
unpackv (tmps, out, gid, i + 5, out[5]);
|
|
unpackv (tmps, out, gid, i + 6, out[6]);
|
|
unpackv (tmps, out, gid, i + 7, out[7]);
|
|
}
|
|
}
|
|
|
|
KERNEL_FQ void m25900_comp(KERN_ATTR_TMPS_ESALT(pbkdf2_sha256_tmp_t, blocks_t))
|
|
{
|
|
/**
|
|
* base
|
|
*/
|
|
|
|
const u64 gid = get_global_id(0);
|
|
const u64 lid = get_local_id(0);
|
|
const u64 lsz = get_local_size(0);
|
|
|
|
/**
|
|
* aes shared
|
|
*/
|
|
|
|
#ifdef REAL_SHM
|
|
|
|
LOCAL_VK u32 s_td0[256];
|
|
LOCAL_VK u32 s_td1[256];
|
|
LOCAL_VK u32 s_td2[256];
|
|
LOCAL_VK u32 s_td3[256];
|
|
LOCAL_VK u32 s_td4[256];
|
|
|
|
LOCAL_VK u32 s_te0[256];
|
|
LOCAL_VK u32 s_te1[256];
|
|
LOCAL_VK u32 s_te2[256];
|
|
LOCAL_VK u32 s_te3[256];
|
|
LOCAL_VK u32 s_te4[256];
|
|
|
|
for (u32 i = lid; i < 256; i += lsz)
|
|
{
|
|
s_td0[i] = td0[i];
|
|
s_td1[i] = td1[i];
|
|
s_td2[i] = td2[i];
|
|
s_td3[i] = td3[i];
|
|
s_td4[i] = td4[i];
|
|
|
|
s_te0[i] = te0[i];
|
|
s_te1[i] = te1[i];
|
|
s_te2[i] = te2[i];
|
|
s_te3[i] = te3[i];
|
|
s_te4[i] = te4[i];
|
|
}
|
|
|
|
SYNC_THREADS();
|
|
|
|
#else
|
|
|
|
CONSTANT_AS u32a* s_td0 = td0;
|
|
CONSTANT_AS u32a* s_td1 = td1;
|
|
CONSTANT_AS u32a* s_td2 = td2;
|
|
CONSTANT_AS u32a* s_td3 = td3;
|
|
CONSTANT_AS u32a* s_td4 = td4;
|
|
|
|
CONSTANT_AS u32a* s_te0 = te0;
|
|
CONSTANT_AS u32a* s_te1 = te1;
|
|
CONSTANT_AS u32a* s_te2 = te2;
|
|
CONSTANT_AS u32a* s_te3 = te3;
|
|
CONSTANT_AS u32a* s_te4 = te4;
|
|
|
|
#endif
|
|
|
|
if (gid >= gid_max) return;
|
|
|
|
u32 key[4];
|
|
|
|
key[0] = tmps[gid].out[DGST_R0];
|
|
key[1] = tmps[gid].out[DGST_R1];
|
|
key[2] = tmps[gid].out[DGST_R2];
|
|
key[3] = tmps[gid].out[DGST_R3];
|
|
|
|
u32 aes_ks[44];
|
|
|
|
AES128_set_encrypt_key (aes_ks, key, s_te0, s_te1, s_te2, s_te3);
|
|
|
|
u32 b0[4] = { 0 };
|
|
|
|
u32 aes_cbc_iv[4] = { 0 };
|
|
|
|
u32 yn[4];
|
|
|
|
const u32 digest_pos = loop_pos;
|
|
const u32 digest_cur = DIGESTS_OFFSET + digest_pos;
|
|
|
|
u32 b1[4];
|
|
|
|
b1[0] = esalt_bufs[digest_cur].b1[0];
|
|
b1[1] = esalt_bufs[digest_cur].b1[1];
|
|
b1[2] = esalt_bufs[digest_cur].b1[2];
|
|
b1[3] = esalt_bufs[digest_cur].b1[3];
|
|
|
|
u32 b2[4];
|
|
|
|
b2[0] = esalt_bufs[digest_cur].b2[0];
|
|
b2[1] = esalt_bufs[digest_cur].b2[1];
|
|
b2[2] = esalt_bufs[digest_cur].b2[2];
|
|
b2[3] = esalt_bufs[digest_cur].b2[3];
|
|
|
|
u32 b3[4];
|
|
|
|
b3[0] = esalt_bufs[digest_cur].b3[0];
|
|
b3[1] = esalt_bufs[digest_cur].b3[1];
|
|
b3[2] = esalt_bufs[digest_cur].b3[2];
|
|
b3[3] = esalt_bufs[digest_cur].b3[3];
|
|
|
|
aes128_encrypt_cbc (aes_ks, aes_cbc_iv, b0, yn, s_te0, s_te1, s_te2, s_te3, s_te4);
|
|
aes128_encrypt_cbc (aes_ks, aes_cbc_iv, b1, yn, s_te0, s_te1, s_te2, s_te3, s_te4);
|
|
aes128_encrypt_cbc (aes_ks, aes_cbc_iv, b2, yn, s_te0, s_te1, s_te2, s_te3, s_te4);
|
|
aes128_encrypt_cbc (aes_ks, aes_cbc_iv, b3, yn, s_te0, s_te1, s_te2, s_te3, s_te4);
|
|
|
|
u32 nonce[4];
|
|
|
|
nonce[0] = 0;
|
|
nonce[1] = 0;
|
|
nonce[2] = 0;
|
|
nonce[3] = 0x00ff0000; // already swapped
|
|
|
|
u32 s0[4];
|
|
|
|
aes128_encrypt(aes_ks, nonce, s0, s_te0, s_te1, s_te2, s_te3, s_te4);
|
|
|
|
const u32 r0 = yn[0] ^ s0[0];
|
|
const u32 r1 = yn[1] ^ s0[1];
|
|
const u32 r2 = yn[2] ^ s0[2];
|
|
const u32 r3 = yn[3] ^ s0[3];
|
|
|
|
#define il_pos 0
|
|
|
|
#ifdef KERNEL_STATIC
|
|
#include COMPARE_M
|
|
#endif
|
|
}
|