mirror of
https://github.com/hashcat/hashcat.git
synced 2024-12-17 04:08:34 +00:00
dad03e394d
1) SIMD code for all attack-mode Macro vector_accessible() was not refactored and missing completely. Had to rename variables rules_cnt, combs_cnt and bfs_cnt into il_cnt which was a good thing anyway as with new SIMD code they all act in the same way. 2) SIMD code for attack-mode 0 With new SIMD code, apply_rules_vect() has to return u32 not u32x. This has massive impact on all *_a0 kernels. I've rewritten most of them. Deep testing using test.sh is still required. Some kernel need more fixes: - Some are kind of completely incompatible like m10400 but they still use old check_* includes, we should get rid of them as they are no longer neccessary as we have simd.c - Some have a chance but require additional effort like m11500. We can use commented out "#define NEW_SIMD_CODE" to find them This change can have negative impact on -a0 performance for device that require vectorization. That is mostly CPU devices. New GPU's are all scalar, so they wont get hurt by this. This change also proofes that there's no way to efficiently vectorize kernel rules with new SIMD code, but it enables the addition of the rule functions like @ that we were missing for some long time. This is a TODO.
1007 lines
29 KiB
Common Lisp
1007 lines
29 KiB
Common Lisp
/**
|
|
* Authors......: Jens Steube <jens.steube@gmail.com>
|
|
* Authors......: Fist0urs <eddy.maaalou@gmail.com>
|
|
*
|
|
* License.....: MIT
|
|
*/
|
|
|
|
#define _KRB5TGS_
|
|
|
|
#include "include/constants.h"
|
|
#include "include/kernel_vendor.h"
|
|
|
|
#define DGST_R0 0
|
|
#define DGST_R1 1
|
|
#define DGST_R2 2
|
|
#define DGST_R3 3
|
|
|
|
#include "include/kernel_functions.c"
|
|
#include "OpenCL/types_ocl.c"
|
|
#include "OpenCL/common.c"
|
|
#include "include/rp_kernel.h"
|
|
#include "OpenCL/rp.c"
|
|
|
|
typedef struct
|
|
{
|
|
u8 S[256];
|
|
|
|
u32 wtf_its_faster;
|
|
|
|
} RC4_KEY;
|
|
|
|
static void swap (__local RC4_KEY *rc4_key, const u8 i, const u8 j)
|
|
{
|
|
u8 tmp;
|
|
|
|
tmp = rc4_key->S[i];
|
|
rc4_key->S[i] = rc4_key->S[j];
|
|
rc4_key->S[j] = tmp;
|
|
}
|
|
|
|
static void rc4_init_16 (__local RC4_KEY *rc4_key, const u32 data[4])
|
|
{
|
|
u32 v = 0x03020100;
|
|
u32 a = 0x04040404;
|
|
|
|
__local u32 *ptr = (__local u32 *) rc4_key->S;
|
|
|
|
#pragma unroll
|
|
for (u32 i = 0; i < 64; i++)
|
|
{
|
|
*ptr++ = v; v += a;
|
|
}
|
|
|
|
u32 j = 0;
|
|
|
|
for (u32 i = 0; i < 16; i++)
|
|
{
|
|
u32 idx = i * 16;
|
|
|
|
u32 v;
|
|
|
|
v = data[0];
|
|
|
|
j += rc4_key->S[idx] + (v >> 0); swap (rc4_key, idx, j); idx++;
|
|
j += rc4_key->S[idx] + (v >> 8); swap (rc4_key, idx, j); idx++;
|
|
j += rc4_key->S[idx] + (v >> 16); swap (rc4_key, idx, j); idx++;
|
|
j += rc4_key->S[idx] + (v >> 24); swap (rc4_key, idx, j); idx++;
|
|
|
|
v = data[1];
|
|
|
|
j += rc4_key->S[idx] + (v >> 0); swap (rc4_key, idx, j); idx++;
|
|
j += rc4_key->S[idx] + (v >> 8); swap (rc4_key, idx, j); idx++;
|
|
j += rc4_key->S[idx] + (v >> 16); swap (rc4_key, idx, j); idx++;
|
|
j += rc4_key->S[idx] + (v >> 24); swap (rc4_key, idx, j); idx++;
|
|
|
|
v = data[2];
|
|
|
|
j += rc4_key->S[idx] + (v >> 0); swap (rc4_key, idx, j); idx++;
|
|
j += rc4_key->S[idx] + (v >> 8); swap (rc4_key, idx, j); idx++;
|
|
j += rc4_key->S[idx] + (v >> 16); swap (rc4_key, idx, j); idx++;
|
|
j += rc4_key->S[idx] + (v >> 24); swap (rc4_key, idx, j); idx++;
|
|
|
|
v = data[3];
|
|
|
|
j += rc4_key->S[idx] + (v >> 0); swap (rc4_key, idx, j); idx++;
|
|
j += rc4_key->S[idx] + (v >> 8); swap (rc4_key, idx, j); idx++;
|
|
j += rc4_key->S[idx] + (v >> 16); swap (rc4_key, idx, j); idx++;
|
|
j += rc4_key->S[idx] + (v >> 24); swap (rc4_key, idx, j); idx++;
|
|
}
|
|
}
|
|
|
|
static u8 rc4_next_16 (__local RC4_KEY *rc4_key, u8 i, u8 j, __global u32 in[4], u32 out[4])
|
|
{
|
|
#pragma unroll
|
|
for (u32 k = 0; k < 4; k++)
|
|
{
|
|
u32 xor4 = 0;
|
|
|
|
u8 idx;
|
|
|
|
i += 1;
|
|
j += rc4_key->S[i];
|
|
|
|
swap (rc4_key, i, j);
|
|
|
|
idx = rc4_key->S[i] + rc4_key->S[j];
|
|
|
|
xor4 |= rc4_key->S[idx] << 0;
|
|
|
|
i += 1;
|
|
j += rc4_key->S[i];
|
|
|
|
swap (rc4_key, i, j);
|
|
|
|
idx = rc4_key->S[i] + rc4_key->S[j];
|
|
|
|
xor4 |= rc4_key->S[idx] << 8;
|
|
|
|
i += 1;
|
|
j += rc4_key->S[i];
|
|
|
|
swap (rc4_key, i, j);
|
|
|
|
idx = rc4_key->S[i] + rc4_key->S[j];
|
|
|
|
xor4 |= rc4_key->S[idx] << 16;
|
|
|
|
i += 1;
|
|
j += rc4_key->S[i];
|
|
|
|
swap (rc4_key, i, j);
|
|
|
|
idx = rc4_key->S[i] + rc4_key->S[j];
|
|
|
|
xor4 |= rc4_key->S[idx] << 24;
|
|
|
|
out[k] = in[k] ^ xor4;
|
|
}
|
|
|
|
return j;
|
|
}
|
|
|
|
static void md4_transform (const u32 w0[4], const u32 w1[4], const u32 w2[4], const u32 w3[4], u32 digest[4])
|
|
{
|
|
u32 a = digest[0];
|
|
u32 b = digest[1];
|
|
u32 c = digest[2];
|
|
u32 d = digest[3];
|
|
|
|
MD4_STEP (MD4_Fo, a, b, c, d, w0[0], MD4C00, MD4S00);
|
|
MD4_STEP (MD4_Fo, d, a, b, c, w0[1], MD4C00, MD4S01);
|
|
MD4_STEP (MD4_Fo, c, d, a, b, w0[2], MD4C00, MD4S02);
|
|
MD4_STEP (MD4_Fo, b, c, d, a, w0[3], MD4C00, MD4S03);
|
|
MD4_STEP (MD4_Fo, a, b, c, d, w1[0], MD4C00, MD4S00);
|
|
MD4_STEP (MD4_Fo, d, a, b, c, w1[1], MD4C00, MD4S01);
|
|
MD4_STEP (MD4_Fo, c, d, a, b, w1[2], MD4C00, MD4S02);
|
|
MD4_STEP (MD4_Fo, b, c, d, a, w1[3], MD4C00, MD4S03);
|
|
MD4_STEP (MD4_Fo, a, b, c, d, w2[0], MD4C00, MD4S00);
|
|
MD4_STEP (MD4_Fo, d, a, b, c, w2[1], MD4C00, MD4S01);
|
|
MD4_STEP (MD4_Fo, c, d, a, b, w2[2], MD4C00, MD4S02);
|
|
MD4_STEP (MD4_Fo, b, c, d, a, w2[3], MD4C00, MD4S03);
|
|
MD4_STEP (MD4_Fo, a, b, c, d, w3[0], MD4C00, MD4S00);
|
|
MD4_STEP (MD4_Fo, d, a, b, c, w3[1], MD4C00, MD4S01);
|
|
MD4_STEP (MD4_Fo, c, d, a, b, w3[2], MD4C00, MD4S02);
|
|
MD4_STEP (MD4_Fo, b, c, d, a, w3[3], MD4C00, MD4S03);
|
|
|
|
MD4_STEP (MD4_Go, a, b, c, d, w0[0], MD4C01, MD4S10);
|
|
MD4_STEP (MD4_Go, d, a, b, c, w1[0], MD4C01, MD4S11);
|
|
MD4_STEP (MD4_Go, c, d, a, b, w2[0], MD4C01, MD4S12);
|
|
MD4_STEP (MD4_Go, b, c, d, a, w3[0], MD4C01, MD4S13);
|
|
MD4_STEP (MD4_Go, a, b, c, d, w0[1], MD4C01, MD4S10);
|
|
MD4_STEP (MD4_Go, d, a, b, c, w1[1], MD4C01, MD4S11);
|
|
MD4_STEP (MD4_Go, c, d, a, b, w2[1], MD4C01, MD4S12);
|
|
MD4_STEP (MD4_Go, b, c, d, a, w3[1], MD4C01, MD4S13);
|
|
MD4_STEP (MD4_Go, a, b, c, d, w0[2], MD4C01, MD4S10);
|
|
MD4_STEP (MD4_Go, d, a, b, c, w1[2], MD4C01, MD4S11);
|
|
MD4_STEP (MD4_Go, c, d, a, b, w2[2], MD4C01, MD4S12);
|
|
MD4_STEP (MD4_Go, b, c, d, a, w3[2], MD4C01, MD4S13);
|
|
MD4_STEP (MD4_Go, a, b, c, d, w0[3], MD4C01, MD4S10);
|
|
MD4_STEP (MD4_Go, d, a, b, c, w1[3], MD4C01, MD4S11);
|
|
MD4_STEP (MD4_Go, c, d, a, b, w2[3], MD4C01, MD4S12);
|
|
MD4_STEP (MD4_Go, b, c, d, a, w3[3], MD4C01, MD4S13);
|
|
|
|
MD4_STEP (MD4_H , a, b, c, d, w0[0], MD4C02, MD4S20);
|
|
MD4_STEP (MD4_H , d, a, b, c, w2[0], MD4C02, MD4S21);
|
|
MD4_STEP (MD4_H , c, d, a, b, w1[0], MD4C02, MD4S22);
|
|
MD4_STEP (MD4_H , b, c, d, a, w3[0], MD4C02, MD4S23);
|
|
MD4_STEP (MD4_H , a, b, c, d, w0[2], MD4C02, MD4S20);
|
|
MD4_STEP (MD4_H , d, a, b, c, w2[2], MD4C02, MD4S21);
|
|
MD4_STEP (MD4_H , c, d, a, b, w1[2], MD4C02, MD4S22);
|
|
MD4_STEP (MD4_H , b, c, d, a, w3[2], MD4C02, MD4S23);
|
|
MD4_STEP (MD4_H , a, b, c, d, w0[1], MD4C02, MD4S20);
|
|
MD4_STEP (MD4_H , d, a, b, c, w2[1], MD4C02, MD4S21);
|
|
MD4_STEP (MD4_H , c, d, a, b, w1[1], MD4C02, MD4S22);
|
|
MD4_STEP (MD4_H , b, c, d, a, w3[1], MD4C02, MD4S23);
|
|
MD4_STEP (MD4_H , a, b, c, d, w0[3], MD4C02, MD4S20);
|
|
MD4_STEP (MD4_H , d, a, b, c, w2[3], MD4C02, MD4S21);
|
|
MD4_STEP (MD4_H , c, d, a, b, w1[3], MD4C02, MD4S22);
|
|
MD4_STEP (MD4_H , b, c, d, a, w3[3], MD4C02, MD4S23);
|
|
|
|
digest[0] += a;
|
|
digest[1] += b;
|
|
digest[2] += c;
|
|
digest[3] += d;
|
|
}
|
|
|
|
static void md5_transform (const u32 w0[4], const u32 w1[4], const u32 w2[4], const u32 w3[4], u32 digest[4])
|
|
{
|
|
u32 a = digest[0];
|
|
u32 b = digest[1];
|
|
u32 c = digest[2];
|
|
u32 d = digest[3];
|
|
|
|
u32 w0_t = w0[0];
|
|
u32 w1_t = w0[1];
|
|
u32 w2_t = w0[2];
|
|
u32 w3_t = w0[3];
|
|
u32 w4_t = w1[0];
|
|
u32 w5_t = w1[1];
|
|
u32 w6_t = w1[2];
|
|
u32 w7_t = w1[3];
|
|
u32 w8_t = w2[0];
|
|
u32 w9_t = w2[1];
|
|
u32 wa_t = w2[2];
|
|
u32 wb_t = w2[3];
|
|
u32 wc_t = w3[0];
|
|
u32 wd_t = w3[1];
|
|
u32 we_t = w3[2];
|
|
u32 wf_t = w3[3];
|
|
|
|
MD5_STEP (MD5_Fo, a, b, c, d, w0_t, MD5C00, MD5S00);
|
|
MD5_STEP (MD5_Fo, d, a, b, c, w1_t, MD5C01, MD5S01);
|
|
MD5_STEP (MD5_Fo, c, d, a, b, w2_t, MD5C02, MD5S02);
|
|
MD5_STEP (MD5_Fo, b, c, d, a, w3_t, MD5C03, MD5S03);
|
|
MD5_STEP (MD5_Fo, a, b, c, d, w4_t, MD5C04, MD5S00);
|
|
MD5_STEP (MD5_Fo, d, a, b, c, w5_t, MD5C05, MD5S01);
|
|
MD5_STEP (MD5_Fo, c, d, a, b, w6_t, MD5C06, MD5S02);
|
|
MD5_STEP (MD5_Fo, b, c, d, a, w7_t, MD5C07, MD5S03);
|
|
MD5_STEP (MD5_Fo, a, b, c, d, w8_t, MD5C08, MD5S00);
|
|
MD5_STEP (MD5_Fo, d, a, b, c, w9_t, MD5C09, MD5S01);
|
|
MD5_STEP (MD5_Fo, c, d, a, b, wa_t, MD5C0a, MD5S02);
|
|
MD5_STEP (MD5_Fo, b, c, d, a, wb_t, MD5C0b, MD5S03);
|
|
MD5_STEP (MD5_Fo, a, b, c, d, wc_t, MD5C0c, MD5S00);
|
|
MD5_STEP (MD5_Fo, d, a, b, c, wd_t, MD5C0d, MD5S01);
|
|
MD5_STEP (MD5_Fo, c, d, a, b, we_t, MD5C0e, MD5S02);
|
|
MD5_STEP (MD5_Fo, b, c, d, a, wf_t, MD5C0f, MD5S03);
|
|
|
|
MD5_STEP (MD5_Go, a, b, c, d, w1_t, MD5C10, MD5S10);
|
|
MD5_STEP (MD5_Go, d, a, b, c, w6_t, MD5C11, MD5S11);
|
|
MD5_STEP (MD5_Go, c, d, a, b, wb_t, MD5C12, MD5S12);
|
|
MD5_STEP (MD5_Go, b, c, d, a, w0_t, MD5C13, MD5S13);
|
|
MD5_STEP (MD5_Go, a, b, c, d, w5_t, MD5C14, MD5S10);
|
|
MD5_STEP (MD5_Go, d, a, b, c, wa_t, MD5C15, MD5S11);
|
|
MD5_STEP (MD5_Go, c, d, a, b, wf_t, MD5C16, MD5S12);
|
|
MD5_STEP (MD5_Go, b, c, d, a, w4_t, MD5C17, MD5S13);
|
|
MD5_STEP (MD5_Go, a, b, c, d, w9_t, MD5C18, MD5S10);
|
|
MD5_STEP (MD5_Go, d, a, b, c, we_t, MD5C19, MD5S11);
|
|
MD5_STEP (MD5_Go, c, d, a, b, w3_t, MD5C1a, MD5S12);
|
|
MD5_STEP (MD5_Go, b, c, d, a, w8_t, MD5C1b, MD5S13);
|
|
MD5_STEP (MD5_Go, a, b, c, d, wd_t, MD5C1c, MD5S10);
|
|
MD5_STEP (MD5_Go, d, a, b, c, w2_t, MD5C1d, MD5S11);
|
|
MD5_STEP (MD5_Go, c, d, a, b, w7_t, MD5C1e, MD5S12);
|
|
MD5_STEP (MD5_Go, b, c, d, a, wc_t, MD5C1f, MD5S13);
|
|
|
|
MD5_STEP (MD5_H , a, b, c, d, w5_t, MD5C20, MD5S20);
|
|
MD5_STEP (MD5_H , d, a, b, c, w8_t, MD5C21, MD5S21);
|
|
MD5_STEP (MD5_H , c, d, a, b, wb_t, MD5C22, MD5S22);
|
|
MD5_STEP (MD5_H , b, c, d, a, we_t, MD5C23, MD5S23);
|
|
MD5_STEP (MD5_H , a, b, c, d, w1_t, MD5C24, MD5S20);
|
|
MD5_STEP (MD5_H , d, a, b, c, w4_t, MD5C25, MD5S21);
|
|
MD5_STEP (MD5_H , c, d, a, b, w7_t, MD5C26, MD5S22);
|
|
MD5_STEP (MD5_H , b, c, d, a, wa_t, MD5C27, MD5S23);
|
|
MD5_STEP (MD5_H , a, b, c, d, wd_t, MD5C28, MD5S20);
|
|
MD5_STEP (MD5_H , d, a, b, c, w0_t, MD5C29, MD5S21);
|
|
MD5_STEP (MD5_H , c, d, a, b, w3_t, MD5C2a, MD5S22);
|
|
MD5_STEP (MD5_H , b, c, d, a, w6_t, MD5C2b, MD5S23);
|
|
MD5_STEP (MD5_H , a, b, c, d, w9_t, MD5C2c, MD5S20);
|
|
MD5_STEP (MD5_H , d, a, b, c, wc_t, MD5C2d, MD5S21);
|
|
MD5_STEP (MD5_H , c, d, a, b, wf_t, MD5C2e, MD5S22);
|
|
MD5_STEP (MD5_H , b, c, d, a, w2_t, MD5C2f, MD5S23);
|
|
|
|
MD5_STEP (MD5_I , a, b, c, d, w0_t, MD5C30, MD5S30);
|
|
MD5_STEP (MD5_I , d, a, b, c, w7_t, MD5C31, MD5S31);
|
|
MD5_STEP (MD5_I , c, d, a, b, we_t, MD5C32, MD5S32);
|
|
MD5_STEP (MD5_I , b, c, d, a, w5_t, MD5C33, MD5S33);
|
|
MD5_STEP (MD5_I , a, b, c, d, wc_t, MD5C34, MD5S30);
|
|
MD5_STEP (MD5_I , d, a, b, c, w3_t, MD5C35, MD5S31);
|
|
MD5_STEP (MD5_I , c, d, a, b, wa_t, MD5C36, MD5S32);
|
|
MD5_STEP (MD5_I , b, c, d, a, w1_t, MD5C37, MD5S33);
|
|
MD5_STEP (MD5_I , a, b, c, d, w8_t, MD5C38, MD5S30);
|
|
MD5_STEP (MD5_I , d, a, b, c, wf_t, MD5C39, MD5S31);
|
|
MD5_STEP (MD5_I , c, d, a, b, w6_t, MD5C3a, MD5S32);
|
|
MD5_STEP (MD5_I , b, c, d, a, wd_t, MD5C3b, MD5S33);
|
|
MD5_STEP (MD5_I , a, b, c, d, w4_t, MD5C3c, MD5S30);
|
|
MD5_STEP (MD5_I , d, a, b, c, wb_t, MD5C3d, MD5S31);
|
|
MD5_STEP (MD5_I , c, d, a, b, w2_t, MD5C3e, MD5S32);
|
|
MD5_STEP (MD5_I , b, c, d, a, w9_t, MD5C3f, MD5S33);
|
|
|
|
digest[0] += a;
|
|
digest[1] += b;
|
|
digest[2] += c;
|
|
digest[3] += d;
|
|
}
|
|
|
|
static void hmac_md5_pad (u32 w0[4], u32 w1[4], u32 w2[4], u32 w3[4], u32 ipad[4], u32 opad[4])
|
|
{
|
|
w0[0] = w0[0] ^ 0x36363636;
|
|
w0[1] = w0[1] ^ 0x36363636;
|
|
w0[2] = w0[2] ^ 0x36363636;
|
|
w0[3] = w0[3] ^ 0x36363636;
|
|
w1[0] = w1[0] ^ 0x36363636;
|
|
w1[1] = w1[1] ^ 0x36363636;
|
|
w1[2] = w1[2] ^ 0x36363636;
|
|
w1[3] = w1[3] ^ 0x36363636;
|
|
w2[0] = w2[0] ^ 0x36363636;
|
|
w2[1] = w2[1] ^ 0x36363636;
|
|
w2[2] = w2[2] ^ 0x36363636;
|
|
w2[3] = w2[3] ^ 0x36363636;
|
|
w3[0] = w3[0] ^ 0x36363636;
|
|
w3[1] = w3[1] ^ 0x36363636;
|
|
w3[2] = w3[2] ^ 0x36363636;
|
|
w3[3] = w3[3] ^ 0x36363636;
|
|
|
|
ipad[0] = MD5M_A;
|
|
ipad[1] = MD5M_B;
|
|
ipad[2] = MD5M_C;
|
|
ipad[3] = MD5M_D;
|
|
|
|
md5_transform (w0, w1, w2, w3, ipad);
|
|
|
|
w0[0] = w0[0] ^ 0x6a6a6a6a;
|
|
w0[1] = w0[1] ^ 0x6a6a6a6a;
|
|
w0[2] = w0[2] ^ 0x6a6a6a6a;
|
|
w0[3] = w0[3] ^ 0x6a6a6a6a;
|
|
w1[0] = w1[0] ^ 0x6a6a6a6a;
|
|
w1[1] = w1[1] ^ 0x6a6a6a6a;
|
|
w1[2] = w1[2] ^ 0x6a6a6a6a;
|
|
w1[3] = w1[3] ^ 0x6a6a6a6a;
|
|
w2[0] = w2[0] ^ 0x6a6a6a6a;
|
|
w2[1] = w2[1] ^ 0x6a6a6a6a;
|
|
w2[2] = w2[2] ^ 0x6a6a6a6a;
|
|
w2[3] = w2[3] ^ 0x6a6a6a6a;
|
|
w3[0] = w3[0] ^ 0x6a6a6a6a;
|
|
w3[1] = w3[1] ^ 0x6a6a6a6a;
|
|
w3[2] = w3[2] ^ 0x6a6a6a6a;
|
|
w3[3] = w3[3] ^ 0x6a6a6a6a;
|
|
|
|
opad[0] = MD5M_A;
|
|
opad[1] = MD5M_B;
|
|
opad[2] = MD5M_C;
|
|
opad[3] = MD5M_D;
|
|
|
|
md5_transform (w0, w1, w2, w3, opad);
|
|
}
|
|
|
|
static void hmac_md5_run (u32 w0[4], u32 w1[4], u32 w2[4], u32 w3[4], u32 ipad[4], u32 opad[4], u32 digest[4])
|
|
{
|
|
digest[0] = ipad[0];
|
|
digest[1] = ipad[1];
|
|
digest[2] = ipad[2];
|
|
digest[3] = ipad[3];
|
|
|
|
md5_transform (w0, w1, w2, w3, digest);
|
|
|
|
w0[0] = digest[0];
|
|
w0[1] = digest[1];
|
|
w0[2] = digest[2];
|
|
w0[3] = digest[3];
|
|
w1[0] = 0x80;
|
|
w1[1] = 0;
|
|
w1[2] = 0;
|
|
w1[3] = 0;
|
|
w2[0] = 0;
|
|
w2[1] = 0;
|
|
w2[2] = 0;
|
|
w2[3] = 0;
|
|
w3[0] = 0;
|
|
w3[1] = 0;
|
|
w3[2] = (64 + 16) * 8;
|
|
w3[3] = 0;
|
|
|
|
digest[0] = opad[0];
|
|
digest[1] = opad[1];
|
|
digest[2] = opad[2];
|
|
digest[3] = opad[3];
|
|
|
|
md5_transform (w0, w1, w2, w3, digest);
|
|
}
|
|
|
|
static int decrypt_and_check (__local RC4_KEY *rc4_key, u32 data[4], __global u32 *edata2, const u32 edata2_len, const u32 K2[4], const u32 checksum[4])
|
|
{
|
|
rc4_init_16 (rc4_key, data);
|
|
|
|
u32 out0[4];
|
|
u32 out1[4];
|
|
|
|
u8 i = 0;
|
|
u8 j = 0;
|
|
|
|
/*
|
|
8 first bytes are nonce, then ASN1 structs (DER encoding: type-length-data)
|
|
|
|
if length >= 128 bytes:
|
|
length is on 2 bytes and type is \x63\x82 (encode_krb5_enc_tkt_part) and data is an ASN1 sequence \x30\x82
|
|
else:
|
|
length is on 1 byte and type is \x63\x81 and data is an ASN1 sequence \x30\x81
|
|
|
|
next headers follow the same ASN1 "type-length-data" scheme
|
|
*/
|
|
|
|
j = rc4_next_16 (rc4_key, i, j, edata2 + 0, out0); i += 16;
|
|
|
|
if (((out0[2] & 0xff00ffff) != 0x30008163) && ((out0[2] & 0x0000ffff) != 0x00008263)) return 0;
|
|
|
|
j = rc4_next_16 (rc4_key, i, j, edata2 + 4, out1); i += 16;
|
|
|
|
if (((out1[0] & 0x00ffffff) != 0x00000503) && (out1[0] != 0x050307A0)) return 0;
|
|
|
|
rc4_init_16 (rc4_key, data);
|
|
|
|
i = 0;
|
|
j = 0;
|
|
|
|
// init hmac
|
|
|
|
u32 w0[4];
|
|
u32 w1[4];
|
|
u32 w2[4];
|
|
u32 w3[4];
|
|
|
|
w0[0] = K2[0];
|
|
w0[1] = K2[1];
|
|
w0[2] = K2[2];
|
|
w0[3] = K2[3];
|
|
w1[0] = 0;
|
|
w1[1] = 0;
|
|
w1[2] = 0;
|
|
w1[3] = 0;
|
|
w2[0] = 0;
|
|
w2[1] = 0;
|
|
w2[2] = 0;
|
|
w2[3] = 0;
|
|
w3[0] = 0;
|
|
w3[1] = 0;
|
|
w3[2] = 0;
|
|
w3[3] = 0;
|
|
|
|
u32 ipad[4];
|
|
u32 opad[4];
|
|
|
|
hmac_md5_pad (w0, w1, w2, w3, ipad, opad);
|
|
|
|
int edata2_left;
|
|
|
|
for (edata2_left = edata2_len; edata2_left >= 64; edata2_left -= 64)
|
|
{
|
|
j = rc4_next_16 (rc4_key, i, j, edata2, w0); i += 16; edata2 += 4;
|
|
j = rc4_next_16 (rc4_key, i, j, edata2, w1); i += 16; edata2 += 4;
|
|
j = rc4_next_16 (rc4_key, i, j, edata2, w2); i += 16; edata2 += 4;
|
|
j = rc4_next_16 (rc4_key, i, j, edata2, w3); i += 16; edata2 += 4;
|
|
|
|
md5_transform (w0, w1, w2, w3, ipad);
|
|
}
|
|
|
|
w0[0] = 0;
|
|
w0[1] = 0;
|
|
w0[2] = 0;
|
|
w0[3] = 0;
|
|
w1[0] = 0;
|
|
w1[1] = 0;
|
|
w1[2] = 0;
|
|
w1[3] = 0;
|
|
w2[0] = 0;
|
|
w2[1] = 0;
|
|
w2[2] = 0;
|
|
w2[3] = 0;
|
|
w3[0] = 0;
|
|
w3[1] = 0;
|
|
w3[2] = 0;
|
|
w3[3] = 0;
|
|
|
|
if (edata2_left < 16)
|
|
{
|
|
j = rc4_next_16 (rc4_key, i, j, edata2, w0); i += 16; edata2 += 4;
|
|
|
|
truncate_block (w0, edata2_left & 0xf);
|
|
append_0x80_1x4 (w0, edata2_left & 0xf);
|
|
|
|
w3[2] = (64 + edata2_len) * 8;
|
|
w3[3] = 0;
|
|
|
|
md5_transform (w0, w1, w2, w3, ipad);
|
|
}
|
|
else if (edata2_left < 32)
|
|
{
|
|
j = rc4_next_16 (rc4_key, i, j, edata2, w0); i += 16; edata2 += 4;
|
|
j = rc4_next_16 (rc4_key, i, j, edata2, w1); i += 16; edata2 += 4;
|
|
|
|
truncate_block (w1, edata2_left & 0xf);
|
|
append_0x80_1x4 (w1, edata2_left & 0xf);
|
|
|
|
w3[2] = (64 + edata2_len) * 8;
|
|
w3[3] = 0;
|
|
|
|
md5_transform (w0, w1, w2, w3, ipad);
|
|
}
|
|
else if (edata2_left < 48)
|
|
{
|
|
j = rc4_next_16 (rc4_key, i, j, edata2, w0); i += 16; edata2 += 4;
|
|
j = rc4_next_16 (rc4_key, i, j, edata2, w1); i += 16; edata2 += 4;
|
|
j = rc4_next_16 (rc4_key, i, j, edata2, w2); i += 16; edata2 += 4;
|
|
|
|
truncate_block (w2, edata2_left & 0xf);
|
|
append_0x80_1x4 (w2, edata2_left & 0xf);
|
|
|
|
w3[2] = (64 + edata2_len) * 8;
|
|
w3[3] = 0;
|
|
|
|
md5_transform (w0, w1, w2, w3, ipad);
|
|
}
|
|
else
|
|
{
|
|
j = rc4_next_16 (rc4_key, i, j, edata2, w0); i += 16; edata2 += 4;
|
|
j = rc4_next_16 (rc4_key, i, j, edata2, w1); i += 16; edata2 += 4;
|
|
j = rc4_next_16 (rc4_key, i, j, edata2, w2); i += 16; edata2 += 4;
|
|
j = rc4_next_16 (rc4_key, i, j, edata2, w3); i += 16; edata2 += 4;
|
|
|
|
truncate_block (w3, edata2_left & 0xf);
|
|
append_0x80_1x4 (w3, edata2_left & 0xf);
|
|
|
|
if (edata2_left < 56)
|
|
{
|
|
w3[2] = (64 + edata2_len) * 8;
|
|
w3[3] = 0;
|
|
|
|
md5_transform (w0, w1, w2, w3, ipad);
|
|
}
|
|
else
|
|
{
|
|
md5_transform (w0, w1, w2, w3, ipad);
|
|
|
|
w0[0] = 0;
|
|
w0[1] = 0;
|
|
w0[2] = 0;
|
|
w0[3] = 0;
|
|
w1[0] = 0;
|
|
w1[1] = 0;
|
|
w1[2] = 0;
|
|
w1[3] = 0;
|
|
w2[0] = 0;
|
|
w2[1] = 0;
|
|
w2[2] = 0;
|
|
w2[3] = 0;
|
|
w3[0] = 0;
|
|
w3[1] = 0;
|
|
w3[2] = (64 + edata2_len) * 8;
|
|
w3[3] = 0;
|
|
|
|
md5_transform (w0, w1, w2, w3, ipad);
|
|
}
|
|
}
|
|
|
|
w0[0] = ipad[0];
|
|
w0[1] = ipad[1];
|
|
w0[2] = ipad[2];
|
|
w0[3] = ipad[3];
|
|
w1[0] = 0x80;
|
|
w1[1] = 0;
|
|
w1[2] = 0;
|
|
w1[3] = 0;
|
|
w2[0] = 0;
|
|
w2[1] = 0;
|
|
w2[2] = 0;
|
|
w2[3] = 0;
|
|
w3[0] = 0;
|
|
w3[1] = 0;
|
|
w3[2] = (64 + 16) * 8;
|
|
w3[3] = 0;
|
|
|
|
md5_transform (w0, w1, w2, w3, opad);
|
|
|
|
if (checksum[0] != opad[0]) return 0;
|
|
if (checksum[1] != opad[1]) return 0;
|
|
if (checksum[2] != opad[2]) return 0;
|
|
if (checksum[3] != opad[3]) return 0;
|
|
|
|
return 1;
|
|
}
|
|
|
|
static void kerb_prepare (const u32 w0[4], const u32 w1[4], const u32 pw_len, const u32 checksum[4], u32 digest[4], u32 K2[4])
|
|
{
|
|
/**
|
|
* pads
|
|
*/
|
|
|
|
u32 w0_t[4];
|
|
u32 w1_t[4];
|
|
u32 w2_t[4];
|
|
u32 w3_t[4];
|
|
|
|
w0_t[0] = w0[0];
|
|
w0_t[1] = w0[1];
|
|
w0_t[2] = w0[2];
|
|
w0_t[3] = w0[3];
|
|
w1_t[0] = w1[0];
|
|
w1_t[1] = w1[1];
|
|
w1_t[2] = w1[2];
|
|
w1_t[3] = w1[3];
|
|
w2_t[0] = 0;
|
|
w2_t[1] = 0;
|
|
w2_t[2] = 0;
|
|
w2_t[3] = 0;
|
|
w3_t[0] = 0;
|
|
w3_t[1] = 0;
|
|
w3_t[2] = 0;
|
|
w3_t[3] = 0;
|
|
|
|
// K=MD4(Little_indian(UNICODE(pwd))
|
|
|
|
append_0x80_2x4 (w0_t, w1_t, pw_len);
|
|
|
|
make_unicode (w1_t, w2_t, w3_t);
|
|
make_unicode (w0_t, w0_t, w1_t);
|
|
|
|
w3_t[2] = pw_len * 8 * 2;
|
|
w3_t[3] = 0;
|
|
|
|
digest[0] = MD4M_A;
|
|
digest[1] = MD4M_B;
|
|
digest[2] = MD4M_C;
|
|
digest[3] = MD4M_D;
|
|
|
|
md4_transform (w0_t, w1_t, w2_t, w3_t, digest);
|
|
|
|
// K1=MD5_HMAC(K,1); with 2 encoded as little indian on 4 bytes (02000000 in hexa);
|
|
|
|
w0_t[0] = digest[0];
|
|
w0_t[1] = digest[1];
|
|
w0_t[2] = digest[2];
|
|
w0_t[3] = digest[3];
|
|
w1_t[0] = 0;
|
|
w1_t[1] = 0;
|
|
w1_t[2] = 0;
|
|
w1_t[3] = 0;
|
|
w2_t[0] = 0;
|
|
w2_t[1] = 0;
|
|
w2_t[2] = 0;
|
|
w2_t[3] = 0;
|
|
w3_t[0] = 0;
|
|
w3_t[1] = 0;
|
|
w3_t[2] = 0;
|
|
w3_t[3] = 0;
|
|
|
|
u32 ipad[4];
|
|
u32 opad[4];
|
|
|
|
hmac_md5_pad (w0_t, w1_t, w2_t, w3_t, ipad, opad);
|
|
|
|
w0_t[0] = 2;
|
|
w0_t[1] = 0x80;
|
|
w0_t[2] = 0;
|
|
w0_t[3] = 0;
|
|
w1_t[0] = 0;
|
|
w1_t[1] = 0;
|
|
w1_t[2] = 0;
|
|
w1_t[3] = 0;
|
|
w2_t[0] = 0;
|
|
w2_t[1] = 0;
|
|
w2_t[2] = 0;
|
|
w2_t[3] = 0;
|
|
w3_t[0] = 0;
|
|
w3_t[1] = 0;
|
|
w3_t[2] = (64 + 4) * 8;
|
|
w3_t[3] = 0;
|
|
|
|
hmac_md5_run (w0_t, w1_t, w2_t, w3_t, ipad, opad, digest);
|
|
|
|
// K2 = K1;
|
|
|
|
K2[0] = digest[0];
|
|
K2[1] = digest[1];
|
|
K2[2] = digest[2];
|
|
K2[3] = digest[3];
|
|
|
|
// K3=MD5_HMAC(K1,checksum);
|
|
|
|
w0_t[0] = digest[0];
|
|
w0_t[1] = digest[1];
|
|
w0_t[2] = digest[2];
|
|
w0_t[3] = digest[3];
|
|
w1_t[0] = 0;
|
|
w1_t[1] = 0;
|
|
w1_t[2] = 0;
|
|
w1_t[3] = 0;
|
|
w2_t[0] = 0;
|
|
w2_t[1] = 0;
|
|
w2_t[2] = 0;
|
|
w2_t[3] = 0;
|
|
w3_t[0] = 0;
|
|
w3_t[1] = 0;
|
|
w3_t[2] = 0;
|
|
w3_t[3] = 0;
|
|
|
|
hmac_md5_pad (w0_t, w1_t, w2_t, w3_t, ipad, opad);
|
|
|
|
w0_t[0] = checksum[0];
|
|
w0_t[1] = checksum[1];
|
|
w0_t[2] = checksum[2];
|
|
w0_t[3] = checksum[3];
|
|
w1_t[0] = 0x80;
|
|
w1_t[1] = 0;
|
|
w1_t[2] = 0;
|
|
w1_t[3] = 0;
|
|
w2_t[0] = 0;
|
|
w2_t[1] = 0;
|
|
w2_t[2] = 0;
|
|
w2_t[3] = 0;
|
|
w3_t[0] = 0;
|
|
w3_t[1] = 0;
|
|
w3_t[2] = (64 + 16) * 8;
|
|
w3_t[3] = 0;
|
|
|
|
hmac_md5_run (w0_t, w1_t, w2_t, w3_t, ipad, opad, digest);
|
|
}
|
|
|
|
static void m13100 (__local RC4_KEY *rc4_keys, u32 w0[4], u32 w1[4], u32 w2[4], u32 w3[4], const u32 pw_len, __global pw_t *pws, __global kernel_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global krb5tgs_t *krb5tgs_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 il_cnt, const u32 digests_cnt, const u32 digests_offset)
|
|
{
|
|
/**
|
|
* modifier
|
|
*/
|
|
|
|
const u32 gid = get_global_id (0);
|
|
const u32 lid = get_local_id (0);
|
|
|
|
/**
|
|
* salt
|
|
*/
|
|
|
|
u32 checksum[4];
|
|
|
|
checksum[0] = krb5tgs_bufs[salt_pos].checksum[0];
|
|
checksum[1] = krb5tgs_bufs[salt_pos].checksum[1];
|
|
checksum[2] = krb5tgs_bufs[salt_pos].checksum[2];
|
|
checksum[3] = krb5tgs_bufs[salt_pos].checksum[3];
|
|
|
|
/**
|
|
* loop
|
|
*/
|
|
|
|
u32 w0l = w0[0];
|
|
|
|
for (u32 il_pos = 0; il_pos < il_cnt; il_pos++)
|
|
{
|
|
const u32 w0r = bfs_buf[il_pos].i;
|
|
|
|
w0[0] = w0l | w0r;
|
|
|
|
u32 digest[4];
|
|
|
|
u32 K2[4];
|
|
|
|
kerb_prepare (w0, w1, pw_len, checksum, digest, K2);
|
|
|
|
u32 tmp[4];
|
|
|
|
tmp[0] = digest[0];
|
|
tmp[1] = digest[1];
|
|
tmp[2] = digest[2];
|
|
tmp[3] = digest[3];
|
|
|
|
if (decrypt_and_check (&rc4_keys[lid], tmp, krb5tgs_bufs[salt_pos].edata2, krb5tgs_bufs[salt_pos].edata2_len, K2, checksum) == 1)
|
|
{
|
|
mark_hash (plains_buf, hashes_shown, digests_offset, gid, il_pos);
|
|
|
|
d_return_buf[lid] = 1;
|
|
}
|
|
}
|
|
}
|
|
|
|
__kernel void m13100_m04 (__global pw_t *pws, __global kernel_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global krb5tgs_t *krb5tgs_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 il_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
|
|
{
|
|
/**
|
|
* modifier
|
|
*/
|
|
|
|
__local RC4_KEY rc4_keys[64];
|
|
|
|
const u32 lid = get_local_id (0);
|
|
const u32 gid = get_global_id (0);
|
|
|
|
if (gid >= gid_max) return;
|
|
|
|
/**
|
|
* base
|
|
*/
|
|
|
|
u32 pw_buf0[4];
|
|
|
|
pw_buf0[0] = pws[gid].i[ 0];
|
|
pw_buf0[1] = pws[gid].i[ 1];
|
|
pw_buf0[2] = pws[gid].i[ 2];
|
|
pw_buf0[3] = pws[gid].i[ 3];
|
|
|
|
u32 pw_buf1[4];
|
|
|
|
pw_buf1[0] = pws[gid].i[ 4];
|
|
pw_buf1[1] = pws[gid].i[ 5];
|
|
pw_buf1[2] = pws[gid].i[ 6];
|
|
pw_buf1[3] = pws[gid].i[ 7];
|
|
|
|
const u32 pw_len = pws[gid].pw_len;
|
|
|
|
/**
|
|
* salt
|
|
*/
|
|
|
|
u32 checksum[4];
|
|
|
|
checksum[0] = krb5tgs_bufs[salt_pos].checksum[0];
|
|
checksum[1] = krb5tgs_bufs[salt_pos].checksum[1];
|
|
checksum[2] = krb5tgs_bufs[salt_pos].checksum[2];
|
|
checksum[3] = krb5tgs_bufs[salt_pos].checksum[3];
|
|
|
|
/**
|
|
* loop
|
|
*/
|
|
|
|
for (u32 il_pos = 0; il_pos < il_cnt; il_pos++)
|
|
{
|
|
u32 w0[4];
|
|
|
|
w0[0] = pw_buf0[0];
|
|
w0[1] = pw_buf0[1];
|
|
w0[2] = pw_buf0[2];
|
|
w0[3] = pw_buf0[3];
|
|
|
|
u32 w1[4];
|
|
|
|
w1[0] = pw_buf1[0];
|
|
w1[1] = pw_buf1[1];
|
|
w1[2] = pw_buf1[2];
|
|
w1[3] = pw_buf1[3];
|
|
|
|
u32 w2[4];
|
|
|
|
w2[0] = 0;
|
|
w2[1] = 0;
|
|
w2[2] = 0;
|
|
w2[3] = 0;
|
|
|
|
u32 w3[4];
|
|
|
|
w3[0] = 0;
|
|
w3[1] = 0;
|
|
w3[2] = 0;
|
|
w3[3] = 0;
|
|
|
|
const u32 out_len = apply_rules (rules_buf[il_pos].cmds, w0, w1, pw_len);
|
|
|
|
/**
|
|
* kerberos
|
|
*/
|
|
|
|
u32 digest[4];
|
|
|
|
u32 K2[4];
|
|
|
|
kerb_prepare (w0, w1, pw_len, checksum, digest, K2);
|
|
|
|
u32 tmp[4];
|
|
|
|
tmp[0] = digest[0];
|
|
tmp[1] = digest[1];
|
|
tmp[2] = digest[2];
|
|
tmp[3] = digest[3];
|
|
|
|
if (decrypt_and_check (&rc4_keys[lid], tmp, krb5tgs_bufs[salt_pos].edata2, krb5tgs_bufs[salt_pos].edata2_len, K2, checksum) == 1)
|
|
{
|
|
mark_hash (plains_buf, hashes_shown, digests_offset, gid, il_pos);
|
|
|
|
d_return_buf[lid] = 1;
|
|
}
|
|
}
|
|
}
|
|
|
|
__kernel void m13100_m08 (__global pw_t *pws, __global kernel_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global krb5tgs_t *krb5tgs_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 il_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
|
|
{
|
|
}
|
|
|
|
__kernel void m13100_m16 (__global pw_t *pws, __global kernel_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global krb5tgs_t *krb5tgs_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 il_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
|
|
{
|
|
}
|
|
|
|
__kernel void m13100_s04 (__global pw_t *pws, __global kernel_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global krb5tgs_t *krb5tgs_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 il_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
|
|
{
|
|
/**
|
|
* modifier
|
|
*/
|
|
|
|
__local RC4_KEY rc4_keys[64];
|
|
|
|
const u32 lid = get_local_id (0);
|
|
const u32 gid = get_global_id (0);
|
|
|
|
if (gid >= gid_max) return;
|
|
|
|
/**
|
|
* base
|
|
*/
|
|
|
|
u32 pw_buf0[4];
|
|
|
|
pw_buf0[0] = pws[gid].i[ 0];
|
|
pw_buf0[1] = pws[gid].i[ 1];
|
|
pw_buf0[2] = pws[gid].i[ 2];
|
|
pw_buf0[3] = pws[gid].i[ 3];
|
|
|
|
u32 pw_buf1[4];
|
|
|
|
pw_buf1[0] = pws[gid].i[ 4];
|
|
pw_buf1[1] = pws[gid].i[ 5];
|
|
pw_buf1[2] = pws[gid].i[ 6];
|
|
pw_buf1[3] = pws[gid].i[ 7];
|
|
|
|
const u32 pw_len = pws[gid].pw_len;
|
|
|
|
/**
|
|
* salt
|
|
*/
|
|
|
|
u32 checksum[4];
|
|
|
|
checksum[0] = krb5tgs_bufs[salt_pos].checksum[0];
|
|
checksum[1] = krb5tgs_bufs[salt_pos].checksum[1];
|
|
checksum[2] = krb5tgs_bufs[salt_pos].checksum[2];
|
|
checksum[3] = krb5tgs_bufs[salt_pos].checksum[3];
|
|
|
|
/**
|
|
* loop
|
|
*/
|
|
|
|
for (u32 il_pos = 0; il_pos < il_cnt; il_pos++)
|
|
{
|
|
u32 w0[4];
|
|
|
|
w0[0] = pw_buf0[0];
|
|
w0[1] = pw_buf0[1];
|
|
w0[2] = pw_buf0[2];
|
|
w0[3] = pw_buf0[3];
|
|
|
|
u32 w1[4];
|
|
|
|
w1[0] = pw_buf1[0];
|
|
w1[1] = pw_buf1[1];
|
|
w1[2] = pw_buf1[2];
|
|
w1[3] = pw_buf1[3];
|
|
|
|
u32 w2[4];
|
|
|
|
w2[0] = 0;
|
|
w2[1] = 0;
|
|
w2[2] = 0;
|
|
w2[3] = 0;
|
|
|
|
u32 w3[4];
|
|
|
|
w3[0] = 0;
|
|
w3[1] = 0;
|
|
w3[2] = 0;
|
|
w3[3] = 0;
|
|
|
|
const u32 out_len = apply_rules (rules_buf[il_pos].cmds, w0, w1, pw_len);
|
|
|
|
/**
|
|
* kerberos
|
|
*/
|
|
|
|
u32 digest[4];
|
|
|
|
u32 K2[4];
|
|
|
|
kerb_prepare (w0, w1, pw_len, checksum, digest, K2);
|
|
|
|
u32 tmp[4];
|
|
|
|
tmp[0] = digest[0];
|
|
tmp[1] = digest[1];
|
|
tmp[2] = digest[2];
|
|
tmp[3] = digest[3];
|
|
|
|
if (decrypt_and_check (&rc4_keys[lid], tmp, krb5tgs_bufs[salt_pos].edata2, krb5tgs_bufs[salt_pos].edata2_len, K2, checksum) == 1)
|
|
{
|
|
mark_hash (plains_buf, hashes_shown, digests_offset, gid, il_pos);
|
|
|
|
d_return_buf[lid] = 1;
|
|
}
|
|
}
|
|
}
|
|
|
|
__kernel void m13100_s08 (__global pw_t *pws, __global kernel_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global krb5tgs_t *krb5tgs_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 il_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
|
|
{
|
|
}
|
|
|
|
__kernel void m13100_s16 (__global pw_t *pws, __global kernel_rule_t *rules_buf, __global comb_t *combs_buf, __global bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global u32 *bitmaps_buf_s1_a, __global u32 *bitmaps_buf_s1_b, __global u32 *bitmaps_buf_s1_c, __global u32 *bitmaps_buf_s1_d, __global u32 *bitmaps_buf_s2_a, __global u32 *bitmaps_buf_s2_b, __global u32 *bitmaps_buf_s2_c, __global u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global digest_t *digests_buf, __global u32 *hashes_shown, __global salt_t *salt_bufs, __global krb5tgs_t *krb5tgs_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 il_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
|
|
{
|
|
}
|