1
0
mirror of https://github.com/hashcat/hashcat.git synced 2024-11-22 08:08:10 +00:00

RC4: Updated hash-mode 7500, 9710, 9720, 10400 and 10410 to new RC4 crypto library code, improving performance by 20% or more

This commit is contained in:
Jens Steube 2021-05-30 19:53:28 +02:00
parent 2a55fd7f33
commit 4e565efcf9
8 changed files with 101 additions and 858 deletions

View File

@ -16,6 +16,7 @@
#include "inc_simd.cl"
#include "inc_hash_md4.cl"
#include "inc_hash_md5.cl"
#include "inc_cipher_rc4.cl"
#endif
typedef struct krb5pa
@ -28,142 +29,19 @@ typedef struct krb5pa
} krb5pa_t;
typedef struct
DECLSPEC int decrypt_and_check (LOCAL_AS u32 *S, u32 *data, u32 *timestamp_ct)
{
u8 S[256];
u32 wtf_its_faster;
} RC4_KEY;
DECLSPEC void swap (LOCAL_AS RC4_KEY *rc4_key, const u8 i, const u8 j)
{
u8 tmp;
tmp = rc4_key->S[i];
rc4_key->S[i] = rc4_key->S[j];
rc4_key->S[j] = tmp;
}
DECLSPEC void rc4_init_16 (LOCAL_AS RC4_KEY *rc4_key, const u32 *data)
{
u32 v = 0x03020100;
u32 a = 0x04040404;
LOCAL_AS u32 *ptr = (LOCAL_AS u32 *) rc4_key->S;
#ifdef _unroll
#pragma unroll
#endif
for (u32 i = 0; i < 64; i++)
{
*ptr++ = v; v += a;
}
u32 j = 0;
for (u32 i = 0; i < 16; i++)
{
u32 idx = i * 16;
u32 v;
v = data[0];
j += rc4_key->S[idx] + (v >> 0); swap (rc4_key, idx, j); idx++;
j += rc4_key->S[idx] + (v >> 8); swap (rc4_key, idx, j); idx++;
j += rc4_key->S[idx] + (v >> 16); swap (rc4_key, idx, j); idx++;
j += rc4_key->S[idx] + (v >> 24); swap (rc4_key, idx, j); idx++;
v = data[1];
j += rc4_key->S[idx] + (v >> 0); swap (rc4_key, idx, j); idx++;
j += rc4_key->S[idx] + (v >> 8); swap (rc4_key, idx, j); idx++;
j += rc4_key->S[idx] + (v >> 16); swap (rc4_key, idx, j); idx++;
j += rc4_key->S[idx] + (v >> 24); swap (rc4_key, idx, j); idx++;
v = data[2];
j += rc4_key->S[idx] + (v >> 0); swap (rc4_key, idx, j); idx++;
j += rc4_key->S[idx] + (v >> 8); swap (rc4_key, idx, j); idx++;
j += rc4_key->S[idx] + (v >> 16); swap (rc4_key, idx, j); idx++;
j += rc4_key->S[idx] + (v >> 24); swap (rc4_key, idx, j); idx++;
v = data[3];
j += rc4_key->S[idx] + (v >> 0); swap (rc4_key, idx, j); idx++;
j += rc4_key->S[idx] + (v >> 8); swap (rc4_key, idx, j); idx++;
j += rc4_key->S[idx] + (v >> 16); swap (rc4_key, idx, j); idx++;
j += rc4_key->S[idx] + (v >> 24); swap (rc4_key, idx, j); idx++;
}
}
DECLSPEC u8 rc4_next_16 (LOCAL_AS RC4_KEY *rc4_key, u8 i, u8 j, const u32 *in, u32 *out)
{
#ifdef _unroll
#pragma unroll
#endif
for (u32 k = 0; k < 4; k++)
{
u32 xor4 = 0;
u8 idx;
i += 1;
j += rc4_key->S[i];
swap (rc4_key, i, j);
idx = rc4_key->S[i] + rc4_key->S[j];
xor4 |= rc4_key->S[idx] << 0;
i += 1;
j += rc4_key->S[i];
swap (rc4_key, i, j);
idx = rc4_key->S[i] + rc4_key->S[j];
xor4 |= rc4_key->S[idx] << 8;
i += 1;
j += rc4_key->S[i];
swap (rc4_key, i, j);
idx = rc4_key->S[i] + rc4_key->S[j];
xor4 |= rc4_key->S[idx] << 16;
i += 1;
j += rc4_key->S[i];
swap (rc4_key, i, j);
idx = rc4_key->S[i] + rc4_key->S[j];
xor4 |= rc4_key->S[idx] << 24;
out[k] = in[k] ^ xor4;
}
return j;
}
DECLSPEC int decrypt_and_check (LOCAL_AS RC4_KEY *rc4_key, u32 *data, u32 *timestamp_ct)
{
rc4_init_16 (rc4_key, data);
rc4_init_128 (S, data);
u32 out[4];
u8 j = 0;
j = rc4_next_16 (rc4_key, 0, j, timestamp_ct + 0, out);
j = rc4_next_16 (S, 0, j, timestamp_ct + 0, out);
if ((out[3] & 0xffff0000) != 0x30320000) return 0;
j = rc4_next_16 (rc4_key, 16, j, timestamp_ct + 4, out);
j = rc4_next_16 (S, 16, j, timestamp_ct + 4, out);
if (((out[0] & 0xff) < '0') || ((out[0] & 0xff) > '9')) return 0; out[0] >>= 8;
if (((out[0] & 0xff) < '0') || ((out[0] & 0xff) > '9')) return 0; out[0] >>= 8;
@ -451,9 +329,7 @@ KERNEL_FQ void m07500_m04 (KERN_ATTR_RULES_ESALT (krb5pa_t))
* shared
*/
LOCAL_VK RC4_KEY rc4_keys[64];
LOCAL_AS RC4_KEY *rc4_key = &rc4_keys[lid];
LOCAL_VK u32 S[64 * FIXED_LOCAL_SIZE];
/**
* loop
@ -483,7 +359,7 @@ KERNEL_FQ void m07500_m04 (KERN_ATTR_RULES_ESALT (krb5pa_t))
tmp[2] = digest[2];
tmp[3] = digest[3];
if (decrypt_and_check (rc4_key, tmp, timestamp_ct) == 1)
if (decrypt_and_check (S, tmp, timestamp_ct) == 1)
{
if (hc_atomic_inc (&hashes_shown[DIGESTS_OFFSET]) == 0)
{
@ -557,9 +433,7 @@ KERNEL_FQ void m07500_s04 (KERN_ATTR_RULES_ESALT (krb5pa_t))
* shared
*/
LOCAL_VK RC4_KEY rc4_keys[64];
LOCAL_AS RC4_KEY *rc4_key = &rc4_keys[lid];
LOCAL_VK u32 S[64 * FIXED_LOCAL_SIZE];
/**
* loop
@ -589,7 +463,7 @@ KERNEL_FQ void m07500_s04 (KERN_ATTR_RULES_ESALT (krb5pa_t))
tmp[2] = digest[2];
tmp[3] = digest[3];
if (decrypt_and_check (rc4_key, tmp, timestamp_ct) == 1)
if (decrypt_and_check (S, tmp, timestamp_ct) == 1)
{
if (hc_atomic_inc (&hashes_shown[DIGESTS_OFFSET]) == 0)
{

View File

@ -15,6 +15,7 @@
#include "inc_rp.cl"
#include "inc_hash_md4.cl"
#include "inc_hash_md5.cl"
#include "inc_cipher_rc4.cl"
#endif
typedef struct krb5pa
@ -27,142 +28,19 @@ typedef struct krb5pa
} krb5pa_t;
typedef struct
DECLSPEC int decrypt_and_check (LOCAL_AS u32 *S, u32 *data, u32 *timestamp_ct)
{
u8 S[256];
u32 wtf_its_faster;
} RC4_KEY;
DECLSPEC void swap (LOCAL_AS RC4_KEY *rc4_key, const u8 i, const u8 j)
{
u8 tmp;
tmp = rc4_key->S[i];
rc4_key->S[i] = rc4_key->S[j];
rc4_key->S[j] = tmp;
}
DECLSPEC void rc4_init_16 (LOCAL_AS RC4_KEY *rc4_key, const u32 *data)
{
u32 v = 0x03020100;
u32 a = 0x04040404;
LOCAL_AS u32 *ptr = (LOCAL_AS u32 *) rc4_key->S;
#ifdef _unroll
#pragma unroll
#endif
for (u32 i = 0; i < 64; i++)
{
*ptr++ = v; v += a;
}
u32 j = 0;
for (u32 i = 0; i < 16; i++)
{
u32 idx = i * 16;
u32 v;
v = data[0];
j += rc4_key->S[idx] + (v >> 0); swap (rc4_key, idx, j); idx++;
j += rc4_key->S[idx] + (v >> 8); swap (rc4_key, idx, j); idx++;
j += rc4_key->S[idx] + (v >> 16); swap (rc4_key, idx, j); idx++;
j += rc4_key->S[idx] + (v >> 24); swap (rc4_key, idx, j); idx++;
v = data[1];
j += rc4_key->S[idx] + (v >> 0); swap (rc4_key, idx, j); idx++;
j += rc4_key->S[idx] + (v >> 8); swap (rc4_key, idx, j); idx++;
j += rc4_key->S[idx] + (v >> 16); swap (rc4_key, idx, j); idx++;
j += rc4_key->S[idx] + (v >> 24); swap (rc4_key, idx, j); idx++;
v = data[2];
j += rc4_key->S[idx] + (v >> 0); swap (rc4_key, idx, j); idx++;
j += rc4_key->S[idx] + (v >> 8); swap (rc4_key, idx, j); idx++;
j += rc4_key->S[idx] + (v >> 16); swap (rc4_key, idx, j); idx++;
j += rc4_key->S[idx] + (v >> 24); swap (rc4_key, idx, j); idx++;
v = data[3];
j += rc4_key->S[idx] + (v >> 0); swap (rc4_key, idx, j); idx++;
j += rc4_key->S[idx] + (v >> 8); swap (rc4_key, idx, j); idx++;
j += rc4_key->S[idx] + (v >> 16); swap (rc4_key, idx, j); idx++;
j += rc4_key->S[idx] + (v >> 24); swap (rc4_key, idx, j); idx++;
}
}
DECLSPEC u8 rc4_next_16 (LOCAL_AS RC4_KEY *rc4_key, u8 i, u8 j, const u32 *in, u32 *out)
{
#ifdef _unroll
#pragma unroll
#endif
for (u32 k = 0; k < 4; k++)
{
u32 xor4 = 0;
u8 idx;
i += 1;
j += rc4_key->S[i];
swap (rc4_key, i, j);
idx = rc4_key->S[i] + rc4_key->S[j];
xor4 |= rc4_key->S[idx] << 0;
i += 1;
j += rc4_key->S[i];
swap (rc4_key, i, j);
idx = rc4_key->S[i] + rc4_key->S[j];
xor4 |= rc4_key->S[idx] << 8;
i += 1;
j += rc4_key->S[i];
swap (rc4_key, i, j);
idx = rc4_key->S[i] + rc4_key->S[j];
xor4 |= rc4_key->S[idx] << 16;
i += 1;
j += rc4_key->S[i];
swap (rc4_key, i, j);
idx = rc4_key->S[i] + rc4_key->S[j];
xor4 |= rc4_key->S[idx] << 24;
out[k] = in[k] ^ xor4;
}
return j;
}
DECLSPEC int decrypt_and_check (LOCAL_AS RC4_KEY *rc4_key, u32 *data, u32 *timestamp_ct)
{
rc4_init_16 (rc4_key, data);
rc4_init_128 (S, data);
u32 out[4];
u8 j = 0;
j = rc4_next_16 (rc4_key, 0, j, timestamp_ct + 0, out);
j = rc4_next_16 (S, 0, j, timestamp_ct + 0, out);
if ((out[3] & 0xffff0000) != 0x30320000) return 0;
j = rc4_next_16 (rc4_key, 16, j, timestamp_ct + 4, out);
j = rc4_next_16 (S, 16, j, timestamp_ct + 4, out);
if (((out[0] & 0xff) < '0') || ((out[0] & 0xff) > '9')) return 0; out[0] >>= 8;
if (((out[0] & 0xff) < '0') || ((out[0] & 0xff) > '9')) return 0; out[0] >>= 8;
@ -296,9 +174,7 @@ KERNEL_FQ void m07500_mxx (KERN_ATTR_RULES_ESALT (krb5pa_t))
COPY_PW (pws[gid]);
LOCAL_VK RC4_KEY rc4_keys[64];
LOCAL_AS RC4_KEY *rc4_key = &rc4_keys[lid];
LOCAL_VK u32 S[64 * FIXED_LOCAL_SIZE];
u32 checksum[4];
@ -340,7 +216,7 @@ KERNEL_FQ void m07500_mxx (KERN_ATTR_RULES_ESALT (krb5pa_t))
kerb_prepare (ctx.h, checksum, digest);
if (decrypt_and_check (rc4_key, digest, timestamp_ct) == 1)
if (decrypt_and_check (S, digest, timestamp_ct) == 1)
{
if (hc_atomic_inc (&hashes_shown[DIGESTS_OFFSET]) == 0)
{
@ -367,9 +243,7 @@ KERNEL_FQ void m07500_sxx (KERN_ATTR_RULES_ESALT (krb5pa_t))
COPY_PW (pws[gid]);
LOCAL_VK RC4_KEY rc4_keys[64];
LOCAL_AS RC4_KEY *rc4_key = &rc4_keys[lid];
LOCAL_VK u32 S[64 * FIXED_LOCAL_SIZE];
u32 checksum[4];
@ -411,7 +285,7 @@ KERNEL_FQ void m07500_sxx (KERN_ATTR_RULES_ESALT (krb5pa_t))
kerb_prepare (ctx.h, checksum, digest);
if (decrypt_and_check (rc4_key, digest, timestamp_ct) == 1)
if (decrypt_and_check (S, digest, timestamp_ct) == 1)
{
if (hc_atomic_inc (&hashes_shown[DIGESTS_OFFSET]) == 0)
{

View File

@ -14,6 +14,7 @@
#include "inc_simd.cl"
#include "inc_hash_md4.cl"
#include "inc_hash_md5.cl"
#include "inc_cipher_rc4.cl"
#endif
typedef struct krb5pa
@ -26,142 +27,19 @@ typedef struct krb5pa
} krb5pa_t;
typedef struct
DECLSPEC int decrypt_and_check (LOCAL_AS u32 *S, u32 *data, u32 *timestamp_ct)
{
u8 S[256];
u32 wtf_its_faster;
} RC4_KEY;
DECLSPEC void swap (LOCAL_AS RC4_KEY *rc4_key, const u8 i, const u8 j)
{
u8 tmp;
tmp = rc4_key->S[i];
rc4_key->S[i] = rc4_key->S[j];
rc4_key->S[j] = tmp;
}
DECLSPEC void rc4_init_16 (LOCAL_AS RC4_KEY *rc4_key, const u32 *data)
{
u32 v = 0x03020100;
u32 a = 0x04040404;
LOCAL_AS u32 *ptr = (LOCAL_AS u32 *) rc4_key->S;
#ifdef _unroll
#pragma unroll
#endif
for (u32 i = 0; i < 64; i++)
{
*ptr++ = v; v += a;
}
u32 j = 0;
for (u32 i = 0; i < 16; i++)
{
u32 idx = i * 16;
u32 v;
v = data[0];
j += rc4_key->S[idx] + (v >> 0); swap (rc4_key, idx, j); idx++;
j += rc4_key->S[idx] + (v >> 8); swap (rc4_key, idx, j); idx++;
j += rc4_key->S[idx] + (v >> 16); swap (rc4_key, idx, j); idx++;
j += rc4_key->S[idx] + (v >> 24); swap (rc4_key, idx, j); idx++;
v = data[1];
j += rc4_key->S[idx] + (v >> 0); swap (rc4_key, idx, j); idx++;
j += rc4_key->S[idx] + (v >> 8); swap (rc4_key, idx, j); idx++;
j += rc4_key->S[idx] + (v >> 16); swap (rc4_key, idx, j); idx++;
j += rc4_key->S[idx] + (v >> 24); swap (rc4_key, idx, j); idx++;
v = data[2];
j += rc4_key->S[idx] + (v >> 0); swap (rc4_key, idx, j); idx++;
j += rc4_key->S[idx] + (v >> 8); swap (rc4_key, idx, j); idx++;
j += rc4_key->S[idx] + (v >> 16); swap (rc4_key, idx, j); idx++;
j += rc4_key->S[idx] + (v >> 24); swap (rc4_key, idx, j); idx++;
v = data[3];
j += rc4_key->S[idx] + (v >> 0); swap (rc4_key, idx, j); idx++;
j += rc4_key->S[idx] + (v >> 8); swap (rc4_key, idx, j); idx++;
j += rc4_key->S[idx] + (v >> 16); swap (rc4_key, idx, j); idx++;
j += rc4_key->S[idx] + (v >> 24); swap (rc4_key, idx, j); idx++;
}
}
DECLSPEC u8 rc4_next_16 (LOCAL_AS RC4_KEY *rc4_key, u8 i, u8 j, const u32 *in, u32 *out)
{
#ifdef _unroll
#pragma unroll
#endif
for (u32 k = 0; k < 4; k++)
{
u32 xor4 = 0;
u8 idx;
i += 1;
j += rc4_key->S[i];
swap (rc4_key, i, j);
idx = rc4_key->S[i] + rc4_key->S[j];
xor4 |= rc4_key->S[idx] << 0;
i += 1;
j += rc4_key->S[i];
swap (rc4_key, i, j);
idx = rc4_key->S[i] + rc4_key->S[j];
xor4 |= rc4_key->S[idx] << 8;
i += 1;
j += rc4_key->S[i];
swap (rc4_key, i, j);
idx = rc4_key->S[i] + rc4_key->S[j];
xor4 |= rc4_key->S[idx] << 16;
i += 1;
j += rc4_key->S[i];
swap (rc4_key, i, j);
idx = rc4_key->S[i] + rc4_key->S[j];
xor4 |= rc4_key->S[idx] << 24;
out[k] = in[k] ^ xor4;
}
return j;
}
DECLSPEC int decrypt_and_check (LOCAL_AS RC4_KEY *rc4_key, u32 *data, u32 *timestamp_ct)
{
rc4_init_16 (rc4_key, data);
rc4_init_128 (S, data);
u32 out[4];
u8 j = 0;
j = rc4_next_16 (rc4_key, 0, j, timestamp_ct + 0, out);
j = rc4_next_16 (S, 0, j, timestamp_ct + 0, out);
if ((out[3] & 0xffff0000) != 0x30320000) return 0;
j = rc4_next_16 (rc4_key, 16, j, timestamp_ct + 4, out);
j = rc4_next_16 (S, 16, j, timestamp_ct + 4, out);
if (((out[0] & 0xff) < '0') || ((out[0] & 0xff) > '9')) return 0; out[0] >>= 8;
if (((out[0] & 0xff) < '0') || ((out[0] & 0xff) > '9')) return 0; out[0] >>= 8;
@ -449,9 +327,7 @@ KERNEL_FQ void m07500_m04 (KERN_ATTR_ESALT (krb5pa_t))
* shared
*/
LOCAL_VK RC4_KEY rc4_keys[64];
LOCAL_AS RC4_KEY *rc4_key = &rc4_keys[lid];
LOCAL_VK u32 S[64 * FIXED_LOCAL_SIZE];
/**
* loop
@ -531,7 +407,7 @@ KERNEL_FQ void m07500_m04 (KERN_ATTR_ESALT (krb5pa_t))
tmp[2] = digest[2];
tmp[3] = digest[3];
if (decrypt_and_check (rc4_key, tmp, timestamp_ct) == 1)
if (decrypt_and_check (S, tmp, timestamp_ct) == 1)
{
if (hc_atomic_inc (&hashes_shown[DIGESTS_OFFSET]) == 0)
{
@ -605,9 +481,7 @@ KERNEL_FQ void m07500_s04 (KERN_ATTR_ESALT (krb5pa_t))
* shared
*/
LOCAL_VK RC4_KEY rc4_keys[64];
LOCAL_AS RC4_KEY *rc4_key = &rc4_keys[lid];
LOCAL_VK u32 S[64 * FIXED_LOCAL_SIZE];
/**
* loop
@ -687,7 +561,7 @@ KERNEL_FQ void m07500_s04 (KERN_ATTR_ESALT (krb5pa_t))
tmp[2] = digest[2];
tmp[3] = digest[3];
if (decrypt_and_check (rc4_key, tmp, timestamp_ct) == 1)
if (decrypt_and_check (S, tmp, timestamp_ct) == 1)
{
if (hc_atomic_inc (&hashes_shown[DIGESTS_OFFSET]) == 0)
{

View File

@ -13,6 +13,7 @@
#include "inc_common.cl"
#include "inc_hash_md4.cl"
#include "inc_hash_md5.cl"
#include "inc_cipher_rc4.cl"
#endif
typedef struct krb5pa
@ -25,142 +26,19 @@ typedef struct krb5pa
} krb5pa_t;
typedef struct
DECLSPEC int decrypt_and_check (LOCAL_AS u32 *S, u32 *data, u32 *timestamp_ct)
{
u8 S[256];
u32 wtf_its_faster;
} RC4_KEY;
DECLSPEC void swap (LOCAL_AS RC4_KEY *rc4_key, const u8 i, const u8 j)
{
u8 tmp;
tmp = rc4_key->S[i];
rc4_key->S[i] = rc4_key->S[j];
rc4_key->S[j] = tmp;
}
DECLSPEC void rc4_init_16 (LOCAL_AS RC4_KEY *rc4_key, const u32 *data)
{
u32 v = 0x03020100;
u32 a = 0x04040404;
LOCAL_AS u32 *ptr = (LOCAL_AS u32 *) rc4_key->S;
#ifdef _unroll
#pragma unroll
#endif
for (u32 i = 0; i < 64; i++)
{
*ptr++ = v; v += a;
}
u32 j = 0;
for (u32 i = 0; i < 16; i++)
{
u32 idx = i * 16;
u32 v;
v = data[0];
j += rc4_key->S[idx] + (v >> 0); swap (rc4_key, idx, j); idx++;
j += rc4_key->S[idx] + (v >> 8); swap (rc4_key, idx, j); idx++;
j += rc4_key->S[idx] + (v >> 16); swap (rc4_key, idx, j); idx++;
j += rc4_key->S[idx] + (v >> 24); swap (rc4_key, idx, j); idx++;
v = data[1];
j += rc4_key->S[idx] + (v >> 0); swap (rc4_key, idx, j); idx++;
j += rc4_key->S[idx] + (v >> 8); swap (rc4_key, idx, j); idx++;
j += rc4_key->S[idx] + (v >> 16); swap (rc4_key, idx, j); idx++;
j += rc4_key->S[idx] + (v >> 24); swap (rc4_key, idx, j); idx++;
v = data[2];
j += rc4_key->S[idx] + (v >> 0); swap (rc4_key, idx, j); idx++;
j += rc4_key->S[idx] + (v >> 8); swap (rc4_key, idx, j); idx++;
j += rc4_key->S[idx] + (v >> 16); swap (rc4_key, idx, j); idx++;
j += rc4_key->S[idx] + (v >> 24); swap (rc4_key, idx, j); idx++;
v = data[3];
j += rc4_key->S[idx] + (v >> 0); swap (rc4_key, idx, j); idx++;
j += rc4_key->S[idx] + (v >> 8); swap (rc4_key, idx, j); idx++;
j += rc4_key->S[idx] + (v >> 16); swap (rc4_key, idx, j); idx++;
j += rc4_key->S[idx] + (v >> 24); swap (rc4_key, idx, j); idx++;
}
}
DECLSPEC u8 rc4_next_16 (LOCAL_AS RC4_KEY *rc4_key, u8 i, u8 j, const u32 *in, u32 *out)
{
#ifdef _unroll
#pragma unroll
#endif
for (u32 k = 0; k < 4; k++)
{
u32 xor4 = 0;
u8 idx;
i += 1;
j += rc4_key->S[i];
swap (rc4_key, i, j);
idx = rc4_key->S[i] + rc4_key->S[j];
xor4 |= rc4_key->S[idx] << 0;
i += 1;
j += rc4_key->S[i];
swap (rc4_key, i, j);
idx = rc4_key->S[i] + rc4_key->S[j];
xor4 |= rc4_key->S[idx] << 8;
i += 1;
j += rc4_key->S[i];
swap (rc4_key, i, j);
idx = rc4_key->S[i] + rc4_key->S[j];
xor4 |= rc4_key->S[idx] << 16;
i += 1;
j += rc4_key->S[i];
swap (rc4_key, i, j);
idx = rc4_key->S[i] + rc4_key->S[j];
xor4 |= rc4_key->S[idx] << 24;
out[k] = in[k] ^ xor4;
}
return j;
}
DECLSPEC int decrypt_and_check (LOCAL_AS RC4_KEY *rc4_key, u32 *data, u32 *timestamp_ct)
{
rc4_init_16 (rc4_key, data);
rc4_init_128 (S, data);
u32 out[4];
u8 j = 0;
j = rc4_next_16 (rc4_key, 0, j, timestamp_ct + 0, out);
j = rc4_next_16 (S, 0, j, timestamp_ct + 0, out);
if ((out[3] & 0xffff0000) != 0x30320000) return 0;
j = rc4_next_16 (rc4_key, 16, j, timestamp_ct + 4, out);
j = rc4_next_16 (S, 16, j, timestamp_ct + 4, out);
if (((out[0] & 0xff) < '0') || ((out[0] & 0xff) > '9')) return 0; out[0] >>= 8;
if (((out[0] & 0xff) < '0') || ((out[0] & 0xff) > '9')) return 0; out[0] >>= 8;
@ -292,9 +170,7 @@ KERNEL_FQ void m07500_mxx (KERN_ATTR_ESALT (krb5pa_t))
* base
*/
LOCAL_VK RC4_KEY rc4_keys[64];
LOCAL_AS RC4_KEY *rc4_key = &rc4_keys[lid];
LOCAL_VK u32 S[64 * FIXED_LOCAL_SIZE];
u32 checksum[4];
@ -336,7 +212,7 @@ KERNEL_FQ void m07500_mxx (KERN_ATTR_ESALT (krb5pa_t))
kerb_prepare (ctx.h, checksum, digest);
if (decrypt_and_check (rc4_key, digest, timestamp_ct) == 1)
if (decrypt_and_check (S, digest, timestamp_ct) == 1)
{
if (hc_atomic_inc (&hashes_shown[DIGESTS_OFFSET]) == 0)
{
@ -361,9 +237,7 @@ KERNEL_FQ void m07500_sxx (KERN_ATTR_ESALT (krb5pa_t))
* base
*/
LOCAL_VK RC4_KEY rc4_keys[64];
LOCAL_AS RC4_KEY *rc4_key = &rc4_keys[lid];
LOCAL_VK u32 S[64 * FIXED_LOCAL_SIZE];
u32 checksum[4];
@ -405,7 +279,7 @@ KERNEL_FQ void m07500_sxx (KERN_ATTR_ESALT (krb5pa_t))
kerb_prepare (ctx.h, checksum, digest);
if (decrypt_and_check (rc4_key, digest, timestamp_ct) == 1)
if (decrypt_and_check (S, digest, timestamp_ct) == 1)
{
if (hc_atomic_inc (&hashes_shown[DIGESTS_OFFSET]) == 0)
{

View File

@ -14,6 +14,7 @@
#include "inc_simd.cl"
#include "inc_hash_md4.cl"
#include "inc_hash_md5.cl"
#include "inc_cipher_rc4.cl"
#endif
typedef struct krb5pa
@ -26,142 +27,19 @@ typedef struct krb5pa
} krb5pa_t;
typedef struct
DECLSPEC int decrypt_and_check (LOCAL_AS u32 *S, u32 *data, u32 *timestamp_ct)
{
u8 S[256];
u32 wtf_its_faster;
} RC4_KEY;
DECLSPEC void swap (LOCAL_AS RC4_KEY *rc4_key, const u8 i, const u8 j)
{
u8 tmp;
tmp = rc4_key->S[i];
rc4_key->S[i] = rc4_key->S[j];
rc4_key->S[j] = tmp;
}
DECLSPEC void rc4_init_16 (LOCAL_AS RC4_KEY *rc4_key, const u32 *data)
{
u32 v = 0x03020100;
u32 a = 0x04040404;
LOCAL_AS u32 *ptr = (LOCAL_AS u32 *) rc4_key->S;
#ifdef _unroll
#pragma unroll
#endif
for (u32 i = 0; i < 64; i++)
{
*ptr++ = v; v += a;
}
u32 j = 0;
for (u32 i = 0; i < 16; i++)
{
u32 idx = i * 16;
u32 v;
v = data[0];
j += rc4_key->S[idx] + (v >> 0); swap (rc4_key, idx, j); idx++;
j += rc4_key->S[idx] + (v >> 8); swap (rc4_key, idx, j); idx++;
j += rc4_key->S[idx] + (v >> 16); swap (rc4_key, idx, j); idx++;
j += rc4_key->S[idx] + (v >> 24); swap (rc4_key, idx, j); idx++;
v = data[1];
j += rc4_key->S[idx] + (v >> 0); swap (rc4_key, idx, j); idx++;
j += rc4_key->S[idx] + (v >> 8); swap (rc4_key, idx, j); idx++;
j += rc4_key->S[idx] + (v >> 16); swap (rc4_key, idx, j); idx++;
j += rc4_key->S[idx] + (v >> 24); swap (rc4_key, idx, j); idx++;
v = data[2];
j += rc4_key->S[idx] + (v >> 0); swap (rc4_key, idx, j); idx++;
j += rc4_key->S[idx] + (v >> 8); swap (rc4_key, idx, j); idx++;
j += rc4_key->S[idx] + (v >> 16); swap (rc4_key, idx, j); idx++;
j += rc4_key->S[idx] + (v >> 24); swap (rc4_key, idx, j); idx++;
v = data[3];
j += rc4_key->S[idx] + (v >> 0); swap (rc4_key, idx, j); idx++;
j += rc4_key->S[idx] + (v >> 8); swap (rc4_key, idx, j); idx++;
j += rc4_key->S[idx] + (v >> 16); swap (rc4_key, idx, j); idx++;
j += rc4_key->S[idx] + (v >> 24); swap (rc4_key, idx, j); idx++;
}
}
DECLSPEC u8 rc4_next_16 (LOCAL_AS RC4_KEY *rc4_key, u8 i, u8 j, const u32 *in, u32 *out)
{
#ifdef _unroll
#pragma unroll
#endif
for (u32 k = 0; k < 4; k++)
{
u32 xor4 = 0;
u8 idx;
i += 1;
j += rc4_key->S[i];
swap (rc4_key, i, j);
idx = rc4_key->S[i] + rc4_key->S[j];
xor4 |= rc4_key->S[idx] << 0;
i += 1;
j += rc4_key->S[i];
swap (rc4_key, i, j);
idx = rc4_key->S[i] + rc4_key->S[j];
xor4 |= rc4_key->S[idx] << 8;
i += 1;
j += rc4_key->S[i];
swap (rc4_key, i, j);
idx = rc4_key->S[i] + rc4_key->S[j];
xor4 |= rc4_key->S[idx] << 16;
i += 1;
j += rc4_key->S[i];
swap (rc4_key, i, j);
idx = rc4_key->S[i] + rc4_key->S[j];
xor4 |= rc4_key->S[idx] << 24;
out[k] = in[k] ^ xor4;
}
return j;
}
DECLSPEC int decrypt_and_check (LOCAL_AS RC4_KEY *rc4_key, u32 *data, u32 *timestamp_ct)
{
rc4_init_16 (rc4_key, data);
rc4_init_128 (S, data);
u32 out[4];
u8 j = 0;
j = rc4_next_16 (rc4_key, 0, j, timestamp_ct + 0, out);
j = rc4_next_16 (S, 0, j, timestamp_ct + 0, out);
if ((out[3] & 0xffff0000) != 0x30320000) return 0;
j = rc4_next_16 (rc4_key, 16, j, timestamp_ct + 4, out);
j = rc4_next_16 (S, 16, j, timestamp_ct + 4, out);
if (((out[0] & 0xff) < '0') || ((out[0] & 0xff) > '9')) return 0; out[0] >>= 8;
if (((out[0] & 0xff) < '0') || ((out[0] & 0xff) > '9')) return 0; out[0] >>= 8;
@ -393,7 +271,7 @@ DECLSPEC void kerb_prepare (const u32 *w0, const u32 *w1, const u32 pw_len, cons
hmac_md5_run (w0_t, w1_t, w2_t, w3_t, ipad, opad, digest);
}
DECLSPEC void m07500 (LOCAL_AS RC4_KEY *rc4_key, u32 *w0, u32 *w1, u32 *w2, u32 *w3, const u32 pw_len, KERN_ATTR_ESALT (krb5pa_t))
DECLSPEC void m07500 (LOCAL_AS u32 *S, u32 *w0, u32 *w1, u32 *w2, u32 *w3, const u32 pw_len, KERN_ATTR_ESALT (krb5pa_t))
{
/**
* modifier
@ -473,7 +351,7 @@ DECLSPEC void m07500 (LOCAL_AS RC4_KEY *rc4_key, u32 *w0, u32 *w1, u32 *w2, u32
tmp[2] = digest[2];
tmp[3] = digest[3];
if (decrypt_and_check (rc4_key, tmp, timestamp_ct) == 1)
if (decrypt_and_check (S, tmp, timestamp_ct) == 1)
{
if (hc_atomic_inc (&hashes_shown[DIGESTS_OFFSET]) == 0)
{
@ -528,11 +406,9 @@ KERNEL_FQ void m07500_m04 (KERN_ATTR_ESALT (krb5pa_t))
* main
*/
LOCAL_VK RC4_KEY rc4_keys[64];
LOCAL_VK u32 S[64 * FIXED_LOCAL_SIZE];
LOCAL_AS RC4_KEY *rc4_key = &rc4_keys[lid];
m07500 (rc4_key, w0, w1, w2, w3, pw_len, pws, rules_buf, combs_buf, bfs_buf, tmps, hooks, bitmaps_buf_s1_a, bitmaps_buf_s1_b, bitmaps_buf_s1_c, bitmaps_buf_s1_d, bitmaps_buf_s2_a, bitmaps_buf_s2_b, bitmaps_buf_s2_c, bitmaps_buf_s2_d, plains_buf, digests_buf, hashes_shown, salt_bufs, esalt_bufs, d_return_buf, d_extra0_buf, d_extra1_buf, d_extra2_buf, d_extra3_buf, bitmap_mask, bitmap_shift1, bitmap_shift2, SALT_POS, loop_pos, loop_cnt, il_cnt, digests_cnt, DIGESTS_OFFSET, combs_mode, salt_repeat, pws_pos, gid_max);
m07500 (S, w0, w1, w2, w3, pw_len, pws, rules_buf, combs_buf, bfs_buf, tmps, hooks, bitmaps_buf_s1_a, bitmaps_buf_s1_b, bitmaps_buf_s1_c, bitmaps_buf_s1_d, bitmaps_buf_s2_a, bitmaps_buf_s2_b, bitmaps_buf_s2_c, bitmaps_buf_s2_d, plains_buf, digests_buf, hashes_shown, salt_bufs, esalt_bufs, d_return_buf, d_extra0_buf, d_extra1_buf, d_extra2_buf, d_extra3_buf, bitmap_mask, bitmap_shift1, bitmap_shift2, SALT_POS, loop_pos, loop_cnt, il_cnt, digests_cnt, DIGESTS_OFFSET, combs_mode, salt_repeat, pws_pos, gid_max);
}
KERNEL_FQ void m07500_m08 (KERN_ATTR_ESALT (krb5pa_t))
@ -580,11 +456,9 @@ KERNEL_FQ void m07500_m08 (KERN_ATTR_ESALT (krb5pa_t))
* main
*/
LOCAL_VK RC4_KEY rc4_keys[64];
LOCAL_VK u32 S[64 * FIXED_LOCAL_SIZE];
LOCAL_AS RC4_KEY *rc4_key = &rc4_keys[lid];
m07500 (rc4_key, w0, w1, w2, w3, pw_len, pws, rules_buf, combs_buf, bfs_buf, tmps, hooks, bitmaps_buf_s1_a, bitmaps_buf_s1_b, bitmaps_buf_s1_c, bitmaps_buf_s1_d, bitmaps_buf_s2_a, bitmaps_buf_s2_b, bitmaps_buf_s2_c, bitmaps_buf_s2_d, plains_buf, digests_buf, hashes_shown, salt_bufs, esalt_bufs, d_return_buf, d_extra0_buf, d_extra1_buf, d_extra2_buf, d_extra3_buf, bitmap_mask, bitmap_shift1, bitmap_shift2, SALT_POS, loop_pos, loop_cnt, il_cnt, digests_cnt, DIGESTS_OFFSET, combs_mode, salt_repeat, pws_pos, gid_max);
m07500 (S, w0, w1, w2, w3, pw_len, pws, rules_buf, combs_buf, bfs_buf, tmps, hooks, bitmaps_buf_s1_a, bitmaps_buf_s1_b, bitmaps_buf_s1_c, bitmaps_buf_s1_d, bitmaps_buf_s2_a, bitmaps_buf_s2_b, bitmaps_buf_s2_c, bitmaps_buf_s2_d, plains_buf, digests_buf, hashes_shown, salt_bufs, esalt_bufs, d_return_buf, d_extra0_buf, d_extra1_buf, d_extra2_buf, d_extra3_buf, bitmap_mask, bitmap_shift1, bitmap_shift2, SALT_POS, loop_pos, loop_cnt, il_cnt, digests_cnt, DIGESTS_OFFSET, combs_mode, salt_repeat, pws_pos, gid_max);
}
KERNEL_FQ void m07500_m16 (KERN_ATTR_ESALT (krb5pa_t))
@ -636,11 +510,9 @@ KERNEL_FQ void m07500_s04 (KERN_ATTR_ESALT (krb5pa_t))
* main
*/
LOCAL_VK RC4_KEY rc4_keys[64];
LOCAL_VK u32 S[64 * FIXED_LOCAL_SIZE];
LOCAL_AS RC4_KEY *rc4_key = &rc4_keys[lid];
m07500 (rc4_key, w0, w1, w2, w3, pw_len, pws, rules_buf, combs_buf, bfs_buf, tmps, hooks, bitmaps_buf_s1_a, bitmaps_buf_s1_b, bitmaps_buf_s1_c, bitmaps_buf_s1_d, bitmaps_buf_s2_a, bitmaps_buf_s2_b, bitmaps_buf_s2_c, bitmaps_buf_s2_d, plains_buf, digests_buf, hashes_shown, salt_bufs, esalt_bufs, d_return_buf, d_extra0_buf, d_extra1_buf, d_extra2_buf, d_extra3_buf, bitmap_mask, bitmap_shift1, bitmap_shift2, SALT_POS, loop_pos, loop_cnt, il_cnt, digests_cnt, DIGESTS_OFFSET, combs_mode, salt_repeat, pws_pos, gid_max);
m07500 (S, w0, w1, w2, w3, pw_len, pws, rules_buf, combs_buf, bfs_buf, tmps, hooks, bitmaps_buf_s1_a, bitmaps_buf_s1_b, bitmaps_buf_s1_c, bitmaps_buf_s1_d, bitmaps_buf_s2_a, bitmaps_buf_s2_b, bitmaps_buf_s2_c, bitmaps_buf_s2_d, plains_buf, digests_buf, hashes_shown, salt_bufs, esalt_bufs, d_return_buf, d_extra0_buf, d_extra1_buf, d_extra2_buf, d_extra3_buf, bitmap_mask, bitmap_shift1, bitmap_shift2, SALT_POS, loop_pos, loop_cnt, il_cnt, digests_cnt, DIGESTS_OFFSET, combs_mode, salt_repeat, pws_pos, gid_max);
}
KERNEL_FQ void m07500_s08 (KERN_ATTR_ESALT (krb5pa_t))
@ -688,11 +560,9 @@ KERNEL_FQ void m07500_s08 (KERN_ATTR_ESALT (krb5pa_t))
* main
*/
LOCAL_VK RC4_KEY rc4_keys[64];
LOCAL_VK u32 S[64 * FIXED_LOCAL_SIZE];
LOCAL_AS RC4_KEY *rc4_key = &rc4_keys[lid];
m07500 (rc4_key, w0, w1, w2, w3, pw_len, pws, rules_buf, combs_buf, bfs_buf, tmps, hooks, bitmaps_buf_s1_a, bitmaps_buf_s1_b, bitmaps_buf_s1_c, bitmaps_buf_s1_d, bitmaps_buf_s2_a, bitmaps_buf_s2_b, bitmaps_buf_s2_c, bitmaps_buf_s2_d, plains_buf, digests_buf, hashes_shown, salt_bufs, esalt_bufs, d_return_buf, d_extra0_buf, d_extra1_buf, d_extra2_buf, d_extra3_buf, bitmap_mask, bitmap_shift1, bitmap_shift2, SALT_POS, loop_pos, loop_cnt, il_cnt, digests_cnt, DIGESTS_OFFSET, combs_mode, salt_repeat, pws_pos, gid_max);
m07500 (S, w0, w1, w2, w3, pw_len, pws, rules_buf, combs_buf, bfs_buf, tmps, hooks, bitmaps_buf_s1_a, bitmaps_buf_s1_b, bitmaps_buf_s1_c, bitmaps_buf_s1_d, bitmaps_buf_s2_a, bitmaps_buf_s2_b, bitmaps_buf_s2_c, bitmaps_buf_s2_d, plains_buf, digests_buf, hashes_shown, salt_bufs, esalt_bufs, d_return_buf, d_extra0_buf, d_extra1_buf, d_extra2_buf, d_extra3_buf, bitmap_mask, bitmap_shift1, bitmap_shift2, SALT_POS, loop_pos, loop_cnt, il_cnt, digests_cnt, DIGESTS_OFFSET, combs_mode, salt_repeat, pws_pos, gid_max);
}
KERNEL_FQ void m07500_s16 (KERN_ATTR_ESALT (krb5pa_t))

View File

@ -13,6 +13,7 @@
#include "inc_common.cl"
#include "inc_hash_md4.cl"
#include "inc_hash_md5.cl"
#include "inc_cipher_rc4.cl"
#endif
typedef struct krb5pa
@ -25,142 +26,19 @@ typedef struct krb5pa
} krb5pa_t;
typedef struct
DECLSPEC int decrypt_and_check (LOCAL_AS u32 *S, u32 *data, u32 *timestamp_ct)
{
u8 S[256];
u32 wtf_its_faster;
} RC4_KEY;
DECLSPEC void swap (LOCAL_AS RC4_KEY *rc4_key, const u8 i, const u8 j)
{
u8 tmp;
tmp = rc4_key->S[i];
rc4_key->S[i] = rc4_key->S[j];
rc4_key->S[j] = tmp;
}
DECLSPEC void rc4_init_16 (LOCAL_AS RC4_KEY *rc4_key, const u32 *data)
{
u32 v = 0x03020100;
u32 a = 0x04040404;
LOCAL_AS u32 *ptr = (LOCAL_AS u32 *) rc4_key->S;
#ifdef _unroll
#pragma unroll
#endif
for (u32 i = 0; i < 64; i++)
{
*ptr++ = v; v += a;
}
u32 j = 0;
for (u32 i = 0; i < 16; i++)
{
u32 idx = i * 16;
u32 v;
v = data[0];
j += rc4_key->S[idx] + (v >> 0); swap (rc4_key, idx, j); idx++;
j += rc4_key->S[idx] + (v >> 8); swap (rc4_key, idx, j); idx++;
j += rc4_key->S[idx] + (v >> 16); swap (rc4_key, idx, j); idx++;
j += rc4_key->S[idx] + (v >> 24); swap (rc4_key, idx, j); idx++;
v = data[1];
j += rc4_key->S[idx] + (v >> 0); swap (rc4_key, idx, j); idx++;
j += rc4_key->S[idx] + (v >> 8); swap (rc4_key, idx, j); idx++;
j += rc4_key->S[idx] + (v >> 16); swap (rc4_key, idx, j); idx++;
j += rc4_key->S[idx] + (v >> 24); swap (rc4_key, idx, j); idx++;
v = data[2];
j += rc4_key->S[idx] + (v >> 0); swap (rc4_key, idx, j); idx++;
j += rc4_key->S[idx] + (v >> 8); swap (rc4_key, idx, j); idx++;
j += rc4_key->S[idx] + (v >> 16); swap (rc4_key, idx, j); idx++;
j += rc4_key->S[idx] + (v >> 24); swap (rc4_key, idx, j); idx++;
v = data[3];
j += rc4_key->S[idx] + (v >> 0); swap (rc4_key, idx, j); idx++;
j += rc4_key->S[idx] + (v >> 8); swap (rc4_key, idx, j); idx++;
j += rc4_key->S[idx] + (v >> 16); swap (rc4_key, idx, j); idx++;
j += rc4_key->S[idx] + (v >> 24); swap (rc4_key, idx, j); idx++;
}
}
DECLSPEC u8 rc4_next_16 (LOCAL_AS RC4_KEY *rc4_key, u8 i, u8 j, const u32 *in, u32 *out)
{
#ifdef _unroll
#pragma unroll
#endif
for (u32 k = 0; k < 4; k++)
{
u32 xor4 = 0;
u8 idx;
i += 1;
j += rc4_key->S[i];
swap (rc4_key, i, j);
idx = rc4_key->S[i] + rc4_key->S[j];
xor4 |= rc4_key->S[idx] << 0;
i += 1;
j += rc4_key->S[i];
swap (rc4_key, i, j);
idx = rc4_key->S[i] + rc4_key->S[j];
xor4 |= rc4_key->S[idx] << 8;
i += 1;
j += rc4_key->S[i];
swap (rc4_key, i, j);
idx = rc4_key->S[i] + rc4_key->S[j];
xor4 |= rc4_key->S[idx] << 16;
i += 1;
j += rc4_key->S[i];
swap (rc4_key, i, j);
idx = rc4_key->S[i] + rc4_key->S[j];
xor4 |= rc4_key->S[idx] << 24;
out[k] = in[k] ^ xor4;
}
return j;
}
DECLSPEC int decrypt_and_check (LOCAL_AS RC4_KEY *rc4_key, u32 *data, u32 *timestamp_ct)
{
rc4_init_16 (rc4_key, data);
rc4_init_128 (S, data);
u32 out[4];
u8 j = 0;
j = rc4_next_16 (rc4_key, 0, j, timestamp_ct + 0, out);
j = rc4_next_16 (S, 0, j, timestamp_ct + 0, out);
if ((out[3] & 0xffff0000) != 0x30320000) return 0;
j = rc4_next_16 (rc4_key, 16, j, timestamp_ct + 4, out);
j = rc4_next_16 (S, 16, j, timestamp_ct + 4, out);
if (((out[0] & 0xff) < '0') || ((out[0] & 0xff) > '9')) return 0; out[0] >>= 8;
if (((out[0] & 0xff) < '0') || ((out[0] & 0xff) > '9')) return 0; out[0] >>= 8;
@ -313,9 +191,7 @@ KERNEL_FQ void m07500_mxx (KERN_ATTR_VECTOR_ESALT (krb5pa_t))
w[idx] = pws[gid].i[idx];
}
LOCAL_VK RC4_KEY rc4_keys[64];
LOCAL_AS RC4_KEY *rc4_key = &rc4_keys[lid];
LOCAL_VK u32 S[64 * FIXED_LOCAL_SIZE];
u32 checksum[4];
@ -361,7 +237,7 @@ KERNEL_FQ void m07500_mxx (KERN_ATTR_VECTOR_ESALT (krb5pa_t))
kerb_prepare (ctx.h, checksum, digest);
if (decrypt_and_check (rc4_key, digest, timestamp_ct) == 1)
if (decrypt_and_check (S, digest, timestamp_ct) == 1)
{
if (hc_atomic_inc (&hashes_shown[DIGESTS_OFFSET]) == 0)
{
@ -407,9 +283,7 @@ KERNEL_FQ void m07500_sxx (KERN_ATTR_VECTOR_ESALT (krb5pa_t))
w[idx] = pws[gid].i[idx];
}
LOCAL_VK RC4_KEY rc4_keys[64];
LOCAL_AS RC4_KEY *rc4_key = &rc4_keys[lid];
LOCAL_VK u32 S[64 * FIXED_LOCAL_SIZE];
u32 checksum[4];
@ -455,7 +329,7 @@ KERNEL_FQ void m07500_sxx (KERN_ATTR_VECTOR_ESALT (krb5pa_t))
kerb_prepare (ctx.h, checksum, digest);
if (decrypt_and_check (rc4_key, digest, timestamp_ct) == 1)
if (decrypt_and_check (S, digest, timestamp_ct) == 1)
{
if (hc_atomic_inc (&hashes_shown[DIGESTS_OFFSET]) == 0)
{

View File

@ -30,7 +30,7 @@
- AES Crypt Plugin: Reduced max password length from 256 to 128 which improved performance by 22%
- RAR3-p (Compressed): Fix workaround in unrar library in AES constant table generation to enable multi-threading support
- CRC32: Prevent decompression of data used in CRC32 calculation on host. This leads to false negatives with TrueCrypt/VeraCrypt keyfiles
- RC4: Update -m 97x0 and -m 104x0 to new RC4 crypto library code, improving performance by 20% or more
- RC4: Updated hash-mode 7500, 9710, 9720, 10400 and 10410 to new RC4 crypto library code, improving performance by 20% or more
##
## Technical

View File

@ -53,6 +53,44 @@ typedef struct krb5pa
static const char *SIGNATURE_KRB5PA = "$krb5pa$23$";
char *module_jit_build_options (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSED const user_options_t *user_options, MAYBE_UNUSED const user_options_extra_t *user_options_extra, MAYBE_UNUSED const hashes_t *hashes, MAYBE_UNUSED const hc_device_param_t *device_param)
{
char *jit_build_options = NULL;
u32 native_threads = 0;
if (device_param->opencl_device_type & CL_DEVICE_TYPE_CPU)
{
native_threads = 1;
}
else if (device_param->opencl_device_type & CL_DEVICE_TYPE_GPU)
{
if (device_param->opencl_device_vendor_id == VENDOR_ID_INTEL_SDK)
{
native_threads = 8;
}
else if (device_param->opencl_device_vendor_id == VENDOR_ID_AMD)
{
if (device_param->device_local_mem_size < 49152)
{
native_threads = 32;
}
else
{
native_threads = 64;
}
}
else
{
native_threads = 32;
}
}
hc_asprintf (&jit_build_options, "-D FIXED_LOCAL_SIZE=%u -D _unroll", native_threads);
return jit_build_options;
}
u64 module_esalt_size (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSED const user_options_t *user_options, MAYBE_UNUSED const user_options_extra_t *user_options_extra)
{
const u64 esalt_size = (const u64) sizeof (krb5pa_t);
@ -60,41 +98,6 @@ u64 module_esalt_size (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSED
return esalt_size;
}
u32 module_kernel_threads_min (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSED const user_options_t *user_options, MAYBE_UNUSED const user_options_extra_t *user_options_extra)
{
const u32 kernel_threads_min = 64;
return kernel_threads_min;
}
u32 module_kernel_threads_max (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSED const user_options_t *user_options, MAYBE_UNUSED const user_options_extra_t *user_options_extra)
{
const u32 kernel_threads_max = 64;
return kernel_threads_max;
}
char *module_jit_build_options (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSED const user_options_t *user_options, MAYBE_UNUSED const user_options_extra_t *user_options_extra, MAYBE_UNUSED const hashes_t *hashes, MAYBE_UNUSED const hc_device_param_t *device_param)
{
char *jit_build_options = NULL;
// in pure -a 0 mode we reserve pws_t with 64 threads = 256 + 4 bytes = 16640.
// the RC4_KEY with 64 threads requires (256 + 4) 16640.
if ((hashconfig->opti_type & OPTI_TYPE_OPTIMIZED_KERNEL) == 0)
{
if (user_options_extra->attack_kern == ATTACK_KERN_STRAIGHT)
{
if (device_param->device_local_mem_size < 49152)
{
hc_asprintf (&jit_build_options, "-D FORCE_DISABLE_SHM");
}
}
}
return jit_build_options;
}
int module_hash_decode (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSED void *digest_buf, MAYBE_UNUSED salt_t *salt, MAYBE_UNUSED void *esalt_buf, MAYBE_UNUSED void *hook_salt_buf, MAYBE_UNUSED hashinfo_t *hash_info, const char *line_buf, MAYBE_UNUSED const int line_len)
{
u32 *digest = (u32 *) digest_buf;
@ -293,8 +296,8 @@ void module_init (module_ctx_t *module_ctx)
module_ctx->module_kernel_accel_min = MODULE_DEFAULT;
module_ctx->module_kernel_loops_max = MODULE_DEFAULT;
module_ctx->module_kernel_loops_min = MODULE_DEFAULT;
module_ctx->module_kernel_threads_max = module_kernel_threads_max;
module_ctx->module_kernel_threads_min = module_kernel_threads_min;
module_ctx->module_kernel_threads_max = MODULE_DEFAULT;
module_ctx->module_kernel_threads_min = MODULE_DEFAULT;
module_ctx->module_kern_type = module_kern_type;
module_ctx->module_kern_type_dynamic = MODULE_DEFAULT;
module_ctx->module_opti_type = module_opti_type;