1
0
mirror of https://github.com/hashcat/hashcat.git synced 2025-07-23 23:18:21 +00:00

Merge branch 'hashcat:master' into banderlog-patch-1

This commit is contained in:
Borys Kabakov 2025-07-12 13:35:23 +03:00 committed by GitHub
commit 83e865fe26
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
116 changed files with 5118 additions and 33292 deletions

2
.gitignore vendored
View File

@ -1,3 +1,5 @@
.DS_Store
*/.DS_Store
*.exe
*.bin
*.app

View File

@ -569,22 +569,22 @@ DECLSPEC void Cast5Encrypt (PRIVATE_AS const u8 *inBlock, PRIVATE_AS u8 *outBloc
u32 t;
/* Do the work */
_CAST_F1(l, r, 0, 16);
_CAST_F2(r, l, 1, 16);
_CAST_F3(l, r, 2, 16);
_CAST_F1(r, l, 3, 16);
_CAST_F2(l, r, 4, 16);
_CAST_F3(r, l, 5, 16);
_CAST_F1(l, r, 6, 16);
_CAST_F2(r, l, 7, 16);
_CAST_F3(l, r, 8, 16);
_CAST_F1(r, l, 9, 16);
_CAST_F2(l, r, 10, 16);
_CAST_F3(r, l, 11, 16);
_CAST_F1(l, r, 12, 16);
_CAST_F2(r, l, 13, 16);
_CAST_F3(l, r, 14, 16);
_CAST_F1(r, l, 15, 16);
CAST_F1(l, r, 0, 16);
CAST_F2(r, l, 1, 16);
CAST_F3(l, r, 2, 16);
CAST_F1(r, l, 3, 16);
CAST_F2(l, r, 4, 16);
CAST_F3(r, l, 5, 16);
CAST_F1(l, r, 6, 16);
CAST_F2(r, l, 7, 16);
CAST_F3(l, r, 8, 16);
CAST_F1(r, l, 9, 16);
CAST_F2(l, r, 10, 16);
CAST_F3(r, l, 11, 16);
CAST_F1(l, r, 12, 16);
CAST_F2(r, l, 13, 16);
CAST_F3(l, r, 14, 16);
CAST_F1(r, l, 15, 16);
/* Put l,r into outblock */
PUT_UINT32BE(r, outBlock, 0);
@ -599,22 +599,22 @@ DECLSPEC void Cast5Decrypt (PRIVATE_AS const u8 *inBlock, PRIVATE_AS u8 *outBloc
u32 t;
/* Only do full 16 rounds if key length > 80 bits */
_CAST_F1(r, l, 15, 16);
_CAST_F3(l, r, 14, 16);
_CAST_F2(r, l, 13, 16);
_CAST_F1(l, r, 12, 16);
_CAST_F3(r, l, 11, 16);
_CAST_F2(l, r, 10, 16);
_CAST_F1(r, l, 9, 16);
_CAST_F3(l, r, 8, 16);
_CAST_F2(r, l, 7, 16);
_CAST_F1(l, r, 6, 16);
_CAST_F3(r, l, 5, 16);
_CAST_F2(l, r, 4, 16);
_CAST_F1(r, l, 3, 16);
_CAST_F3(l, r, 2, 16);
_CAST_F2(r, l, 1, 16);
_CAST_F1(l, r, 0, 16);
CAST_F1(r, l, 15, 16);
CAST_F3(l, r, 14, 16);
CAST_F2(r, l, 13, 16);
CAST_F1(l, r, 12, 16);
CAST_F3(r, l, 11, 16);
CAST_F2(l, r, 10, 16);
CAST_F1(r, l, 9, 16);
CAST_F3(l, r, 8, 16);
CAST_F2(r, l, 7, 16);
CAST_F1(l, r, 6, 16);
CAST_F3(r, l, 5, 16);
CAST_F2(l, r, 4, 16);
CAST_F1(r, l, 3, 16);
CAST_F3(l, r, 2, 16);
CAST_F2(r, l, 1, 16);
CAST_F1(l, r, 0, 16);
/* Put l,r into outblock */
PUT_UINT32BE(r, outBlock, 0);
PUT_UINT32BE(l, outBlock, 4);
@ -633,8 +633,8 @@ DECLSPEC void Cast5SetKey (PRIVATE_AS CAST_KEY *key, u32 keylength, PRIVATE_AS c
GET_UINT32BE(X[2], userKey, 8);
GET_UINT32BE(X[3], userKey, 12);
#define x(i) GETBYTE(X[i/4], 3-i%4)
#define z(i) GETBYTE(Z[i/4], 3-i%4)
#define x(i) GETBYTE(X[i/4], 3-i%4)
#define z(i) GETBYTE(Z[i/4], 3-i%4)
for (i=0; i<=16; i+=16) {
// this part is copied directly from RFC 2144 (with some search and replace) by Wei Dai
@ -673,11 +673,11 @@ DECLSPEC void Cast5SetKey (PRIVATE_AS CAST_KEY *key, u32 keylength, PRIVATE_AS c
}
u32 data[32];
for (i = 0; i < 16; i++) {
data[i * 2] = K[i];
data[i * 2 + 1] = ((K[i + 16]) + 16) & 0x1f; // here only the lowest 5 bits are set..
}
for (i=16; i<32; i++)
K[i] &= 0x1f;
for (i = 0; i < 16; i++) {
data[i * 2] = K[i];
data[i * 2 + 1] = ((K[i + 16]) + 16) & 0x1f; // here only the lowest 5 bits are set..
}
for (i=16; i<32; i++) K[i] &= 0x1f;
}

View File

@ -1,7 +1,5 @@
#ifndef _OPENCL_CAST_H
#define _OPENCL_CAST_H
#ifndef INC_CIPHER_CAST_H
#define INC_CIPHER_CAST_H
// #include "opencl_misc.h"
#define GET_UINT32BE(n, b, i) \
@ -32,28 +30,26 @@ typedef struct {
#define U8d(x) GETBYTE(x,0)
/* CAST uses three different round functions */
#define _CAST_f1(l, r, km, kr) \
#define CAST_f1(l, r, km, kr) \
t = hc_rotl32_S(km + r, kr); \
l ^= ((s_S[0][U8a(t)] ^ s_S[1][U8b(t)]) - \
s_S[2][U8c(t)]) + s_S[3][U8d(t)];
#define _CAST_f2(l, r, km, kr) \
#define CAST_f2(l, r, km, kr) \
t = hc_rotl32_S(km ^ r, kr); \
l ^= ((s_S[0][U8a(t)] - s_S[1][U8b(t)]) + \
s_S[2][U8c(t)]) ^ s_S[3][U8d(t)];
#define _CAST_f3(l, r, km, kr) \
#define CAST_f3(l, r, km, kr) \
t = hc_rotl32_S(km - r, kr); \
l ^= ((s_S[0][U8a(t)] + s_S[1][U8b(t)]) ^ \
s_S[2][U8c(t)]) - s_S[3][U8d(t)];
#define _CAST_F1(l, r, i, j) _CAST_f1(l, r, K[i], K[i+j])
#define _CAST_F2(l, r, i, j) _CAST_f2(l, r, K[i], K[i+j])
#define _CAST_F3(l, r, i, j) _CAST_f3(l, r, K[i], K[i+j])
#define CAST_F1(l, r, i, j) CAST_f1(l, r, K[i], K[i+j])
#define CAST_F2(l, r, i, j) CAST_f2(l, r, K[i], K[i+j])
#define CAST_F3(l, r, i, j) CAST_f3(l, r, K[i], K[i+j])
/* OpenSSL API compatibility */
#define CAST_set_key(ckey, len, key) Cast5SetKey(ckey, len, key)
#define CAST_ecb_encrypt(in, out, ckey) Cast5Encrypt(in, out, ckey)
#define CAST_ecb_decrypt(in, out, ckey) Cast5Decrypt(in, out, ckey)
#endif /* _OPENCL_CAST_H */
#endif /* INC_CIPHER_CAST_H */

File diff suppressed because it is too large Load Diff

View File

@ -12,24 +12,26 @@
#include "inc_hash_blake2b.h"
#include "inc_hash_argon2.h"
#define LBLOCKSIZE (128 / THREADS_PER_LANE)
DECLSPEC void argon2_initial_block (PRIVATE_AS const u32 *in, const u32 lane, const u32 blocknum, const u32 parallelism, GLOBAL_AS argon2_block_t *blocks)
{
blake2b_ctx_t ctx;
blake2b_init (&ctx);
u64 blake_buf[16] = { 0 };
ctx.m[0] = hl32_to_64 (in[ 0], sizeof(argon2_block_t));
ctx.m[1] = hl32_to_64 (in[ 2], in[ 1]);
ctx.m[2] = hl32_to_64 (in[ 4], in[ 3]);
ctx.m[3] = hl32_to_64 (in[ 6], in[ 5]);
ctx.m[4] = hl32_to_64 (in[ 8], in[ 7]);
ctx.m[5] = hl32_to_64 (in[10], in[ 9]);
ctx.m[6] = hl32_to_64 (in[12], in[11]);
ctx.m[7] = hl32_to_64 (in[14], in[13]);
ctx.m[8] = hl32_to_64 (blocknum, in[15]);
ctx.m[9] = hl32_to_64 (0, lane);
blake_buf[0] = sizeof(argon2_block_t);
blake2b_update (&ctx, (PRIVATE_AS u32 *) blake_buf, 4);
blake2b_update (&ctx, in, 64);
blake_buf[0] = hl32_to_64 (lane, blocknum);
blake2b_update (&ctx, (PRIVATE_AS u32 *) blake_buf, 8);
blake2b_final (&ctx);
blake2b_transform (ctx.h, ctx.m, 76, (u64) BLAKE2B_FINAL);
GLOBAL_AS u64 *out = blocks[(blocknum * parallelism) + lane].values;
@ -38,12 +40,23 @@ DECLSPEC void argon2_initial_block (PRIVATE_AS const u32 *in, const u32 lane, co
out[2] = ctx.h[2];
out[3] = ctx.h[3];
ctx.m[8] = 0;
ctx.m[9] = 0;
for (u32 off = 4; off < 124; off += 4)
{
for (u32 idx = 0; idx < 8; idx++) blake_buf[idx] = ctx.h[idx];
for (u32 idx = 0; idx < 8; idx++) ctx.m[idx] = ctx.h[idx];
blake2b_init (&ctx);
blake2b_transform (ctx.h, blake_buf, 64, (u64) BLAKE2B_FINAL);
ctx.h[0] = BLAKE2B_IV_00 ^ 0x01010040; // default output length: 0x40 = 64 bytes
ctx.h[1] = BLAKE2B_IV_01;
ctx.h[2] = BLAKE2B_IV_02;
ctx.h[3] = BLAKE2B_IV_03;
ctx.h[4] = BLAKE2B_IV_04;
ctx.h[5] = BLAKE2B_IV_05;
ctx.h[6] = BLAKE2B_IV_06;
ctx.h[7] = BLAKE2B_IV_07;
blake2b_transform (ctx.h, ctx.m, 64, (u64) BLAKE2B_FINAL);
out[off + 0] = ctx.h[0];
out[off + 1] = ctx.h[1];
@ -57,39 +70,85 @@ DECLSPEC void argon2_initial_block (PRIVATE_AS const u32 *in, const u32 lane, co
out[127] = ctx.h[7];
}
DECLSPEC void blake2b_update_8 (PRIVATE_AS blake2b_ctx_t *ctx, const u32 w0, const u32 w1, const int len)
{
const int pos = ctx->len & 127;
if (pos == 0)
{
if (ctx->len > 0) // if new block (pos == 0) AND the (old) len is not zero => transform
{
blake2b_transform (ctx->h, ctx->m, ctx->len, BLAKE2B_UPDATE);
}
}
const u64 m = hl32_to_64 (w1, w0);
const u32 s = (pos & 7) * 8;
const u64 m0 = (m << s);
const u64 m1 = (m >> 8) >> (56 - s);
const int idx = pos / 8;
ctx->m[ 0] |= (idx == 0) ? m0 : 0;
ctx->m[ 1] |= (idx == 1) ? m0 : (idx == 0) ? m1 : 0;
ctx->m[ 2] |= (idx == 2) ? m0 : (idx == 1) ? m1 : 0;
ctx->m[ 3] |= (idx == 3) ? m0 : (idx == 2) ? m1 : 0;
ctx->m[ 4] |= (idx == 4) ? m0 : (idx == 3) ? m1 : 0;
ctx->m[ 5] |= (idx == 5) ? m0 : (idx == 4) ? m1 : 0;
ctx->m[ 6] |= (idx == 6) ? m0 : (idx == 5) ? m1 : 0;
ctx->m[ 7] |= (idx == 7) ? m0 : (idx == 6) ? m1 : 0;
ctx->m[ 8] |= (idx == 8) ? m0 : (idx == 7) ? m1 : 0;
ctx->m[ 9] |= (idx == 9) ? m0 : (idx == 8) ? m1 : 0;
ctx->m[10] |= (idx == 10) ? m0 : (idx == 9) ? m1 : 0;
ctx->m[11] |= (idx == 11) ? m0 : (idx == 10) ? m1 : 0;
ctx->m[12] |= (idx == 12) ? m0 : (idx == 11) ? m1 : 0;
ctx->m[13] |= (idx == 13) ? m0 : (idx == 12) ? m1 : 0;
ctx->m[14] |= (idx == 14) ? m0 : (idx == 13) ? m1 : 0;
ctx->m[15] |= (idx == 15) ? m0 : (idx == 14) ? m1 : 0;
if ((pos + len) > 128)
{
const u32 cur_len = ((ctx->len + len) / 128) * 128;
blake2b_transform (ctx->h, ctx->m, cur_len, (u64) BLAKE2B_UPDATE);
for (u32 i = 1; i < 16; i++) ctx->m[i] = 0;
ctx->m[0] = m1;
}
ctx->len += len;
}
DECLSPEC void argon2_initial_hash (GLOBAL_AS const pw_t *pw, GLOBAL_AS const salt_t *salt, PRIVATE_AS const argon2_options_t *options, PRIVATE_AS u64 *blockhash)
{
blake2b_ctx_t ctx;
blake2b_init (&ctx);
u32 option_input[32] = { 0 };
ctx.m[0] = hl32_to_64 (options->digest_len, options->parallelism);
ctx.m[1] = hl32_to_64 (options->iterations, options->memory_usage_in_kib);
ctx.m[2] = hl32_to_64 (options->type, options->version);
ctx.len = 24;
option_input[0] = options->parallelism;
option_input[1] = options->digest_len;
option_input[2] = options->memory_usage_in_kib;
option_input[3] = options->iterations;
option_input[4] = options->version;
option_input[5] = options->type;
const u32 pw_len = pw->pw_len;
blake2b_update (&ctx, option_input, 24);
blake2b_update_8 (&ctx, pw_len, 0, 4);
u32 len_input[32] = { 0 };
for (u32 i = 0, idx = 0; i < pw_len; i += 8, idx += 2)
{
blake2b_update_8 (&ctx, pw->i[idx + 0], pw->i[idx + 1], MIN((pw_len - i), 8));
}
len_input[0] = pw->pw_len;
const u32 salt_len = salt->salt_len;
blake2b_update (&ctx, len_input, 4);
blake2b_update_global (&ctx, pw->i, pw->pw_len);
blake2b_update_8 (&ctx, salt_len, 0, 4);
len_input[0] = salt->salt_len;
blake2b_update (&ctx, len_input, 4);
blake2b_update_global (&ctx, salt->salt_buf, salt->salt_len);
len_input[0] = 0;
blake2b_update (&ctx, len_input, 4); // secret (K)
blake2b_update (&ctx, len_input, 4); // associated data (X)
for (u32 i = 0, idx = 0; i < salt_len; i += 8, idx += 2)
{
blake2b_update_8 (&ctx, salt->salt_buf[idx + 0], salt->salt_buf[idx + 1], MIN((salt_len - i), 8));
}
blake2b_update_8 (&ctx, 0, 0, 8); // secret (K) and associated data (X)
blake2b_final (&ctx);
for (u32 idx = 0; idx < 8; idx++) blockhash[idx] = ctx.h[idx];
@ -110,7 +169,6 @@ DECLSPEC void argon2_init (GLOBAL_AS const pw_t *pw, GLOBAL_AS const salt_t *sal
}
}
// TODO: reconsider 'trunc_mul()'
DECLSPEC u64 trunc_mul (u64 x, u64 y)
{
const u32 xlo = (u32) x;
@ -142,8 +200,6 @@ DECLSPEC inline u32 argon2_ref_address (PRIVATE_AS const argon2_options_t *optio
ref_area += (index - 1);
}
// if ref_area == 0xFFFFFFFF => bug
const u32 j1 = l32_from_64_S (pseudo_random);
ref_index = (ref_area - 1 - hc_umulhi (ref_area, hc_umulhi (j1, j1)));
@ -188,8 +244,36 @@ DECLSPEC int argon2_shift (int idx, int argon2_thread)
return (argon2_thread & 0x0e) | (((argon2_thread & 0x11) + delta + 0x0e) & 0x11);
}
DECLSPEC void argon2_hash_block (u64 R[4], int argon2_thread, LOCAL_AS u64 *shuffle_buf, int argon2_lsz)
DECLSPEC void argon2_hash_block (u64 R[LBLOCKSIZE], int argon2_thread, LOCAL_AS u64 *shuffle_buf, int argon2_lsz)
{
#if THREADS_PER_LANE == 1
u64 v[16];
for (u32 i = 0, offset = 0; i < 8; i++, offset += 16)
{
for (u32 j = 0; j < 16; j++) v[j] = R[offset + j];
ARGON2_P();
for (u32 j = 0; j < 16; j++) R[offset + j] = v[j];
}
for (u32 i = 0, offset = 0; i < 8; i++, offset += 2)
{
for (u32 j = 0, k = offset; j < 16; j += 2, k += 16) {
v[j + 0] = R[k + 0];
v[j + 1] = R[k + 1];
}
ARGON2_P();
for (u32 j = 0, k = offset; j < 16; j += 2, k += 16)
{
R[k + 0] = v[j + 0];
R[k + 1] = v[j + 1];
}
}
#else
for (u32 idx = 1; idx < 4; idx++) R[idx] = hc__shfl_sync (shuffle_buf, FULL_MASK, R[idx], argon2_thread ^ (idx << 2), argon2_thread, argon2_lsz);
transpose_permute_block (R, argon2_thread);
@ -215,49 +299,45 @@ DECLSPEC void argon2_hash_block (u64 R[4], int argon2_thread, LOCAL_AS u64 *shuf
ARGON2_G(R[0], R[1], R[2], R[3]);
for (u32 idx = 1; idx < 4; idx++) R[idx] = hc__shfl_sync (shuffle_buf, FULL_MASK, R[idx], argon2_shift ((4 - idx), argon2_thread), argon2_thread, argon2_lsz);
#endif
}
DECLSPEC void argon2_next_addresses (PRIVATE_AS const argon2_options_t *options, PRIVATE_AS const argon2_pos_t *pos, PRIVATE_AS u32 *addresses, u32 start_index, u32 argon2_thread, LOCAL_AS u64 *shuffle_buf, u32 argon2_lsz)
{
u64 Z[4] = { 0 };
u64 Z[LBLOCKSIZE] = { 0 };
u64 tmp[LBLOCKSIZE] = { 0 };
u64 tmp[4] = { 0 };
tmp[0] = 0;
tmp[1] = 0;
tmp[2] = 0;
tmp[3] = 0;
switch (argon2_thread)
for (u32 i = 0, index = argon2_thread; i < (LBLOCKSIZE / 4); i++, index += THREADS_PER_LANE)
{
case 0: Z[0] = pos->pass; break;
case 1: Z[0] = pos->lane; break;
case 2: Z[0] = pos->slice; break;
case 3: Z[0] = options->memory_block_count; break;
case 4: Z[0] = options->iterations; break;
case 5: Z[0] = options->type; break;
case 6: Z[0] = (start_index / 128) + 1; break;
default: Z[0] = 0; break;
switch (index)
{
case 0: Z[i] = pos->pass; break;
case 1: Z[i] = pos->lane; break;
case 2: Z[i] = pos->slice; break;
case 3: Z[i] = options->memory_block_count; break;
case 4: Z[i] = options->iterations; break;
case 5: Z[i] = options->type; break;
case 6: Z[i] = (start_index / 128) + 1; break;
default: Z[i] = 0; break;
}
tmp[i] = Z[i];
}
tmp[0] = Z[0];
argon2_hash_block (Z, argon2_thread, shuffle_buf, argon2_lsz);
for (u32 idx = 0; idx < (LBLOCKSIZE / 4); idx++) Z[idx] ^= tmp[idx];
for (u32 idx = 0; idx < LBLOCKSIZE; idx++) tmp[idx] = Z[idx];
argon2_hash_block (Z, argon2_thread, shuffle_buf, argon2_lsz);
Z[0] ^= tmp[0];
for (u32 idx = 0; idx < LBLOCKSIZE; idx++) Z[idx] ^= tmp[idx];
for (u32 idx = 0; idx < 4; idx++) tmp[idx] = Z[idx];
argon2_hash_block (Z, argon2_thread, shuffle_buf, argon2_lsz);
for (u32 idx = 0; idx < 4; idx++) Z[idx] ^= tmp[idx];
for (u32 i = 0, index = (start_index + argon2_thread); i < 4; i++, index += THREADS_PER_LANE)
for (u32 i = 0, index = (start_index + argon2_thread); i < LBLOCKSIZE; i++, index += THREADS_PER_LANE)
{
addresses[i] = argon2_ref_address (options, pos, index, Z[i]);
}
// if addresses[0] == 0xFFFFFFFE => bug
}
DECLSPEC u32 index_u32x4 (const u32 array[4], u32 index)
@ -277,20 +357,20 @@ DECLSPEC u32 index_u32x4 (const u32 array[4], u32 index)
return (u32) -1;
}
DECLSPEC GLOBAL_AS argon2_block_t *argon2_get_current_block (GLOBAL_AS argon2_block_t *blocks, PRIVATE_AS const argon2_options_t *options, u32 lane, u32 index_in_lane, u64 R[4], u32 argon2_thread)
DECLSPEC GLOBAL_AS argon2_block_t *argon2_get_current_block (GLOBAL_AS argon2_block_t *blocks, PRIVATE_AS const argon2_options_t *options, u32 lane, u32 index_in_lane, u64 R[LBLOCKSIZE], u32 argon2_thread)
{
// Apply wrap-around to previous block index if the current block is the first block in the lane
const u32 prev_in_lane = (index_in_lane == 0) ? (options->lane_length - 1) : (index_in_lane - 1);
GLOBAL_AS argon2_block_t *prev_block = &blocks[(prev_in_lane * options->parallelism) + lane];
for (u32 idx = 0; idx < 4; idx++) R[idx] = prev_block->values[(idx * THREADS_PER_LANE) + argon2_thread];
for (u32 idx = 0; idx < LBLOCKSIZE; idx++) R[idx] = prev_block->values[(idx * THREADS_PER_LANE) + argon2_thread];
return &blocks[(index_in_lane * options->parallelism) + lane];
}
DECLSPEC void argon2_fill_subsegment (GLOBAL_AS argon2_block_t *blocks, PRIVATE_AS const argon2_options_t *options, PRIVATE_AS const argon2_pos_t *pos, bool indep_addr, const u32 addresses[4],
u32 start_index, u32 end_index, GLOBAL_AS argon2_block_t *cur_block, u64 R[4], u32 argon2_thread, LOCAL_AS u64 *shuffle_buf, u32 argon2_lsz)
DECLSPEC void argon2_fill_subsegment (GLOBAL_AS argon2_block_t *blocks, PRIVATE_AS const argon2_options_t *options, PRIVATE_AS const argon2_pos_t *pos, bool indep_addr, const u32 addresses[LBLOCKSIZE],
u32 start_index, u32 end_index, GLOBAL_AS argon2_block_t *cur_block, u64 R[LBLOCKSIZE], u32 argon2_thread, LOCAL_AS u64 *shuffle_buf, u32 argon2_lsz)
{
for (u32 index = start_index; index < end_index; index++, cur_block += options->parallelism)
{
@ -298,34 +378,40 @@ DECLSPEC void argon2_fill_subsegment (GLOBAL_AS argon2_block_t *blocks, PRIVATE_
if (indep_addr)
{
ref_address = index_u32x4 (addresses, (index / THREADS_PER_LANE) % ARGON2_SYNC_POINTS);
#if THREADS_PER_LANE == 1
ref_address = addresses[(index / THREADS_PER_LANE) % LBLOCKSIZE];
#else
ref_address = index_u32x4 (addresses, (index / THREADS_PER_LANE) % LBLOCKSIZE);
ref_address = hc__shfl_sync (shuffle_buf, FULL_MASK, ref_address, index, argon2_thread, argon2_lsz);
#endif
}
else
{
ref_address = argon2_ref_address (options, pos, index, R[0]);
#if THREADS_PER_LANE != 1
ref_address = hc__shfl_sync (shuffle_buf, FULL_MASK, ref_address, 0, argon2_thread, argon2_lsz);
#endif
}
GLOBAL_AS const argon2_block_t *ref_block = &blocks[ref_address];
u64 tmp[4] = { 0 };
u64 tmp[LBLOCKSIZE] = { 0 };
// First pass is overwrite, next passes are XOR with previous
if ((pos->pass > 0) && (options->version != ARGON2_VERSION_10))
{
for (u32 idx = 0; idx < 4; idx++) tmp[idx] = cur_block->values[(idx * THREADS_PER_LANE) + argon2_thread];
for (u32 idx = 0; idx < LBLOCKSIZE; idx++) tmp[idx] = cur_block->values[(idx * THREADS_PER_LANE) + argon2_thread];
}
for (u32 idx = 0; idx < 4; idx++) R[idx] ^= ref_block->values[(idx * THREADS_PER_LANE) + argon2_thread];
for (u32 idx = 0; idx < LBLOCKSIZE; idx++) R[idx] ^= ref_block->values[(idx * THREADS_PER_LANE) + argon2_thread];
for (u32 idx = 0; idx < 4; idx++) tmp[idx] ^= R[idx];
for (u32 idx = 0; idx < LBLOCKSIZE; idx++) tmp[idx] ^= R[idx];
argon2_hash_block (R, argon2_thread, shuffle_buf, argon2_lsz);
for (u32 idx = 0; idx < 4; idx++) R[idx] ^= tmp[idx];
for (u32 idx = 0; idx < LBLOCKSIZE; idx++) R[idx] ^= tmp[idx];
for (u32 idx = 0; idx < 4; idx++) cur_block->values[(idx * THREADS_PER_LANE) + argon2_thread] = R[idx];
for (u32 idx = 0; idx < LBLOCKSIZE; idx++) cur_block->values[(idx * THREADS_PER_LANE) + argon2_thread] = R[idx];
}
}
@ -335,7 +421,7 @@ DECLSPEC void argon2_fill_segment (GLOBAL_AS argon2_block_t *blocks, PRIVATE_AS
const u32 skip_blocks = (pos->pass == 0) && (pos->slice == 0) ? 2 : 0;
const u32 index_in_lane = (pos->slice * options->segment_length) + skip_blocks;
u64 R[4] = { 0 };
u64 R[LBLOCKSIZE] = { 0 };
GLOBAL_AS argon2_block_t *cur_block = argon2_get_current_block (blocks, options, pos->lane, index_in_lane, R, argon2_thread);
@ -346,7 +432,7 @@ DECLSPEC void argon2_fill_segment (GLOBAL_AS argon2_block_t *blocks, PRIVATE_AS
const u32 start_index = (block_index == 0) ? skip_blocks : block_index;
const u32 end_index = MIN(((start_index | 127) + 1), options->segment_length);
u32 addresses[4] = { 0 };
u32 addresses[LBLOCKSIZE] = { 0 };
argon2_next_addresses (options, pos, addresses, block_index, argon2_thread, shuffle_buf, argon2_lsz);
argon2_fill_subsegment (blocks, options, pos, true, addresses, start_index, end_index, cur_block, R, argon2_thread, shuffle_buf, argon2_lsz);
@ -356,7 +442,7 @@ DECLSPEC void argon2_fill_segment (GLOBAL_AS argon2_block_t *blocks, PRIVATE_AS
}
else
{
u32 addresses[4] = { 0 };
u32 addresses[LBLOCKSIZE] = { 0 };
argon2_fill_subsegment (blocks, options, pos, false, addresses, skip_blocks, options->segment_length, cur_block, R, argon2_thread, shuffle_buf, argon2_lsz);
}
@ -367,26 +453,43 @@ DECLSPEC void argon2_final (GLOBAL_AS argon2_block_t *blocks, PRIVATE_AS const a
const u32 lane_length = options->lane_length;
const u32 lanes = options->parallelism;
argon2_block_t final_block = { };
for (u32 l = 0; l < lanes; l++)
{
for (u32 idx = 0; idx < 128; idx++) final_block.values[idx] ^= blocks[((lane_length - 1) * lanes) + l].values[idx];
}
u32 output_len[32] = { 0 };
output_len[0] = options->digest_len;
blake2b_ctx_t ctx;
blake2b_init (&ctx);
// Override default (0x40) value in BLAKE2b
ctx.h[0] ^= 0x40 ^ options->digest_len;
blake2b_update (&ctx, output_len, 4);
blake2b_update (&ctx, (PRIVATE_AS u32 *) final_block.values, sizeof(final_block));
u32 rem = options->digest_len;
blake2b_final (&ctx);
for (u32 offset = 0; offset < 128; offset += 16)
{
for (u32 l = 0; l < lanes; l++)
{
for (u32 idx = 0; idx < 16; idx++)
{
ctx.m[idx] ^= blocks[((lane_length - 1) * lanes) + l].values[idx + offset];
}
}
for (u32 idx = 0; idx < 16; idx++)
{
const u64 value = ctx.m[idx];
ctx.m[idx] = hl32_to_64 (l32_from_64_S (value), rem);
rem = h32_from_64_S (value);
}
ctx.len += 128;
blake2b_transform (ctx.h, ctx.m, ctx.len, (u64) BLAKE2B_UPDATE);
for (u32 idx = 0; idx < 16; idx++) ctx.m[idx] = 0;
}
ctx.m[0] = hl32_to_64 (0, rem);
blake2b_transform (ctx.h, ctx.m, 1028, (u64) BLAKE2B_FINAL);
for (uint i = 0, idx = 0; i < (options->digest_len / 4); i += 2, idx += 1)
{

View File

@ -1,4 +1,3 @@
/**
* Author......: Netherlands Forensic Institute
* License.....: MIT
@ -12,7 +11,10 @@
#define ARGON2_VERSION_10 0x10
#define ARGON2_VERSION_13 0x13
#ifndef THREADS_PER_LANE
#define THREADS_PER_LANE 32
#endif
#define FULL_MASK 0xffffffff
#define BLAKE2B_OUTBYTES 64

View File

@ -24,7 +24,7 @@ DECLSPEC u64 blake2b_rot16_S (const u64 a)
return out.v64;
#elif (defined IS_AMD || defined IS_HIP) && HAS_VPERM == 1
#elif (defined IS_AMD || defined IS_HIP)
vconv64_t in;
@ -98,7 +98,7 @@ DECLSPEC u64 blake2b_rot24_S (const u64 a)
return out.v64;
#elif (defined IS_AMD || defined IS_HIP) && HAS_VPERM == 1
#elif (defined IS_AMD || defined IS_HIP)
vconv64_t in;

View File

@ -77,7 +77,7 @@ DECLSPEC u32 blake2s_rot08_S (const u32 a)
return out.v32;
#elif (defined IS_AMD || defined IS_HIP) && HAS_VPERM == 1
#elif (defined IS_AMD || defined IS_HIP)
vconv32_t in;

View File

@ -781,7 +781,6 @@ DECLSPEC void append_block8_optimized (const u32 offset, PRIVATE_AS u32 *buf0, P
const int offset_switch = offset / 4;
#if ((defined IS_AMD || defined IS_HIP) && HAS_VPERM == 0) || defined IS_GENERIC
const u32 src_r00 = src_r0[0];
const u32 src_r01 = src_r0[1];
const u32 src_r02 = src_r0[2];
@ -882,123 +881,6 @@ DECLSPEC void append_block8_optimized (const u32 offset, PRIVATE_AS u32 *buf0, P
s0 = 0;
break;
}
#endif
#if ((defined IS_AMD || defined IS_HIP) && HAS_VPERM == 1) || defined IS_NV
const int offset_mod_4 = offset & 3;
const int offset_minus_4 = 4 - offset_mod_4;
#if defined IS_NV
const int selector = (0x76543210 >> (offset_minus_4 * 4)) & 0xffff;
#endif
#if (defined IS_AMD || defined IS_HIP)
const int selector = l32_from_64_S (0x0706050403020100UL >> (offset_minus_4 * 8));
#endif
const u32 src_r00 = src_r0[0];
const u32 src_r01 = src_r0[1];
const u32 src_r02 = src_r0[2];
const u32 src_r03 = src_r0[3];
const u32 src_r10 = src_r1[0];
const u32 src_r11 = src_r1[1];
const u32 src_r12 = src_r1[2];
const u32 src_r13 = src_r1[3];
switch (offset_switch)
{
case 0:
s7 = hc_byte_perm_S (src_r12, src_r13, selector);
s6 = hc_byte_perm_S (src_r11, src_r12, selector);
s5 = hc_byte_perm_S (src_r10, src_r11, selector);
s4 = hc_byte_perm_S (src_r03, src_r10, selector);
s3 = hc_byte_perm_S (src_r02, src_r03, selector);
s2 = hc_byte_perm_S (src_r01, src_r02, selector);
s1 = hc_byte_perm_S (src_r00, src_r01, selector);
s0 = hc_byte_perm_S ( 0, src_r00, selector);
break;
case 1:
s7 = hc_byte_perm_S (src_r11, src_r12, selector);
s6 = hc_byte_perm_S (src_r10, src_r11, selector);
s5 = hc_byte_perm_S (src_r03, src_r10, selector);
s4 = hc_byte_perm_S (src_r02, src_r03, selector);
s3 = hc_byte_perm_S (src_r01, src_r02, selector);
s2 = hc_byte_perm_S (src_r00, src_r01, selector);
s1 = hc_byte_perm_S ( 0, src_r00, selector);
s0 = 0;
break;
case 2:
s7 = hc_byte_perm_S (src_r10, src_r11, selector);
s6 = hc_byte_perm_S (src_r03, src_r10, selector);
s5 = hc_byte_perm_S (src_r02, src_r03, selector);
s4 = hc_byte_perm_S (src_r01, src_r02, selector);
s3 = hc_byte_perm_S (src_r00, src_r01, selector);
s2 = hc_byte_perm_S ( 0, src_r00, selector);
s1 = 0;
s0 = 0;
break;
case 3:
s7 = hc_byte_perm_S (src_r03, src_r10, selector);
s6 = hc_byte_perm_S (src_r02, src_r03, selector);
s5 = hc_byte_perm_S (src_r01, src_r02, selector);
s4 = hc_byte_perm_S (src_r00, src_r01, selector);
s3 = hc_byte_perm_S ( 0, src_r00, selector);
s2 = 0;
s1 = 0;
s0 = 0;
break;
case 4:
s7 = hc_byte_perm_S (src_r02, src_r03, selector);
s6 = hc_byte_perm_S (src_r01, src_r02, selector);
s5 = hc_byte_perm_S (src_r00, src_r01, selector);
s4 = hc_byte_perm_S ( 0, src_r00, selector);
s3 = 0;
s2 = 0;
s1 = 0;
s0 = 0;
break;
case 5:
s7 = hc_byte_perm_S (src_r01, src_r02, selector);
s6 = hc_byte_perm_S (src_r00, src_r01, selector);
s5 = hc_byte_perm_S ( 0, src_r00, selector);
s4 = 0;
s3 = 0;
s2 = 0;
s1 = 0;
s0 = 0;
break;
case 6:
s7 = hc_byte_perm_S (src_r00, src_r01, selector);
s6 = hc_byte_perm_S ( 0, src_r00, selector);
s5 = 0;
s4 = 0;
s3 = 0;
s2 = 0;
s1 = 0;
s0 = 0;
break;
case 7:
s7 = hc_byte_perm_S ( 0, src_r00, selector);
s6 = 0;
s5 = 0;
s4 = 0;
s3 = 0;
s2 = 0;
s1 = 0;
s0 = 0;
break;
}
#endif
buf0[0] = src_l0[0] | s0;
buf0[1] = src_l0[1] | s1;

View File

@ -121,10 +121,6 @@ using namespace metal;
#define IS_GENERIC
#endif
#if defined IS_AMD && HAS_VPERM == 1
#define IS_ROCM
#endif
#define LOCAL_MEM_TYPE_LOCAL 1
#define LOCAL_MEM_TYPE_GLOBAL 2
@ -159,7 +155,7 @@ using namespace metal;
#elif defined IS_CUDA
#define DECLSPEC __device__
#elif defined IS_HIP
#define DECLSPEC __device__
#define DECLSPEC __device__ HC_INLINE
#else
#define DECLSPEC
#endif
@ -190,11 +186,6 @@ using namespace metal;
#define USE_ROTATE
#endif
#ifdef IS_ROCM
#define USE_BITSELECT
#define USE_ROTATE
#endif
#ifdef IS_INTEL_SDK
#ifdef IS_CPU
//#define USE_BITSELECT

View File

@ -32,43 +32,16 @@ DECLSPEC void memcat16 (PRIVATE_AS u32 *block0, PRIVATE_AS u32 *block1, PRIVATE_
u32 tmp3;
u32 tmp4;
#if ((defined IS_AMD || defined IS_HIP) && HAS_VPERM == 0) || defined IS_GENERIC
u32 in0 = append[0];
u32 in1 = append[1];
u32 in2 = append[2];
u32 in3 = append[3];
tmp0 = hc_bytealign ( 0, in0, offset);
tmp1 = hc_bytealign (in0, in1, offset);
tmp2 = hc_bytealign (in1, in2, offset);
tmp3 = hc_bytealign (in2, in3, offset);
tmp4 = hc_bytealign (in3, 0, offset);
#endif
#if ((defined IS_AMD || defined IS_HIP) && HAS_VPERM == 1) || defined IS_NV
const int offset_mod_4 = offset & 3;
const int offset_minus_4 = 4 - offset_mod_4;
#if defined IS_NV
const int selector = (0x76543210 >> (offset_minus_4 * 4)) & 0xffff;
#endif
#if (defined IS_AMD || defined IS_HIP)
const int selector = l32_from_64_S (0x0706050403020100UL >> (offset_minus_4 * 8));
#endif
u32 in0 = append[0];
u32 in1 = append[1];
u32 in2 = append[2];
u32 in3 = append[3];
tmp0 = hc_byte_perm ( 0, in0, selector);
tmp1 = hc_byte_perm (in0, in1, selector);
tmp2 = hc_byte_perm (in1, in2, selector);
tmp3 = hc_byte_perm (in2, in3, selector);
tmp4 = hc_byte_perm (in3, 0, selector);
#endif
tmp0 = hc_bytealign_S ( 0, in0, offset);
tmp1 = hc_bytealign_S (in0, in1, offset);
tmp2 = hc_bytealign_S (in1, in2, offset);
tmp3 = hc_bytealign_S (in2, in3, offset);
tmp4 = hc_bytealign_S (in3, 0, offset);
const u32 div = offset / 4;
@ -145,45 +118,17 @@ DECLSPEC void memcat16_x80 (PRIVATE_AS u32 *block0, PRIVATE_AS u32 *block1, PRIV
u32 tmp3;
u32 tmp4;
#if ((defined IS_AMD || defined IS_HIP) && HAS_VPERM == 0) || defined IS_GENERIC
u32 in0 = append[0];
u32 in1 = append[1];
u32 in2 = append[2];
u32 in3 = append[3];
u32 in4 = 0x80;
tmp0 = hc_bytealign ( 0, in0, offset);
tmp1 = hc_bytealign (in0, in1, offset);
tmp2 = hc_bytealign (in1, in2, offset);
tmp3 = hc_bytealign (in2, in3, offset);
tmp4 = hc_bytealign (in3, in4, offset);
#endif
#if ((defined IS_AMD || defined IS_HIP) && HAS_VPERM == 1) || defined IS_NV
const int offset_mod_4 = offset & 3;
const int offset_minus_4 = 4 - offset_mod_4;
#if defined IS_NV
const int selector = (0x76543210 >> (offset_minus_4 * 4)) & 0xffff;
#endif
#if (defined IS_AMD || defined IS_HIP)
const int selector = l32_from_64_S (0x0706050403020100UL >> (offset_minus_4 * 8));
#endif
u32 in0 = append[0];
u32 in1 = append[1];
u32 in2 = append[2];
u32 in3 = append[3];
u32 in4 = 0x80;
tmp0 = hc_byte_perm ( 0, in0, selector);
tmp1 = hc_byte_perm (in0, in1, selector);
tmp2 = hc_byte_perm (in1, in2, selector);
tmp3 = hc_byte_perm (in2, in3, selector);
tmp4 = hc_byte_perm (in3, in4, selector);
#endif
tmp0 = hc_bytealign_S ( 0, in0, offset);
tmp1 = hc_bytealign_S (in0, in1, offset);
tmp2 = hc_bytealign_S (in1, in2, offset);
tmp3 = hc_bytealign_S (in2, in3, offset);
tmp4 = hc_bytealign_S (in3, in4, offset);
const u32 div = offset / 4;
@ -258,35 +203,12 @@ DECLSPEC void memcat8 (PRIVATE_AS u32 *block0, PRIVATE_AS u32 *block1, PRIVATE_A
u32 tmp1;
u32 tmp2;
#if ((defined IS_AMD || defined IS_HIP) && HAS_VPERM == 0) || defined IS_GENERIC
u32 in0 = append[0];
u32 in1 = append[1];
tmp0 = hc_bytealign ( 0, in0, offset);
tmp1 = hc_bytealign (in0, in1, offset);
tmp2 = hc_bytealign (in1, 0, offset);
#endif
#if ((defined IS_AMD || defined IS_HIP) && HAS_VPERM == 1) || defined IS_NV
const int offset_mod_4 = offset & 3;
const int offset_minus_4 = 4 - offset_mod_4;
#if defined IS_NV
const int selector = (0x76543210 >> (offset_minus_4 * 4)) & 0xffff;
#endif
#if (defined IS_AMD || defined IS_HIP)
const int selector = l32_from_64_S (0x0706050403020100UL >> (offset_minus_4 * 8));
#endif
u32 in0 = append[0];
u32 in1 = append[1];
tmp0 = hc_byte_perm ( 0, in0, selector);
tmp1 = hc_byte_perm (in0, in1, selector);
tmp2 = hc_byte_perm (in1, 0, selector);
#endif
tmp0 = hc_bytealign_S ( 0, in0, offset);
tmp1 = hc_bytealign_S (in0, in1, offset);
tmp2 = hc_bytealign_S (in1, 0, offset);
const u32 div = offset / 4;

View File

@ -31,44 +31,17 @@ DECLSPEC void memcat16 (PRIVATE_AS u32 *block0, PRIVATE_AS u32 *block1, PRIVATE_
u32 tmp3;
u32 tmp4;
#if ((defined IS_AMD || defined IS_HIP) && HAS_VPERM == 0) || defined IS_GENERIC
u32 in0 = append[0];
u32 in1 = append[1];
u32 in2 = append[2];
u32 in3 = append[3];
tmp0 = hc_bytealign ( 0, in0, offset);
tmp1 = hc_bytealign (in0, in1, offset);
tmp2 = hc_bytealign (in1, in2, offset);
tmp3 = hc_bytealign (in2, in3, offset);
tmp4 = hc_bytealign (in3, 0, offset);
#endif
#if ((defined IS_AMD || defined IS_HIP) && HAS_VPERM == 1) || defined IS_NV
const int offset_mod_4 = offset & 3;
const int offset_minus_4 = 4 - offset_mod_4;
#if defined IS_NV
const int selector = (0x76543210 >> (offset_minus_4 * 4)) & 0xffff;
#endif
#if (defined IS_AMD || defined IS_HIP)
const int selector = l32_from_64_S (0x0706050403020100UL >> (offset_minus_4 * 8));
#endif
u32 in0 = append[0];
u32 in1 = append[1];
u32 in2 = append[2];
u32 in3 = append[3];
tmp0 = hc_byte_perm ( 0, in0, selector);
tmp1 = hc_byte_perm (in0, in1, selector);
tmp2 = hc_byte_perm (in1, in2, selector);
tmp3 = hc_byte_perm (in2, in3, selector);
tmp4 = hc_byte_perm (in3, 0, selector);
#endif
tmp0 = hc_bytealign_S ( 0, in0, offset);
tmp1 = hc_bytealign_S (in0, in1, offset);
tmp2 = hc_bytealign_S (in1, in2, offset);
tmp3 = hc_bytealign_S (in2, in3, offset);
tmp4 = hc_bytealign_S (in3, 0, offset);
const u32 div = offset / 4;
switch (div)
@ -144,45 +117,17 @@ DECLSPEC void memcat16_x80 (PRIVATE_AS u32 *block0, PRIVATE_AS u32 *block1, PRIV
u32 tmp3;
u32 tmp4;
#if ((defined IS_AMD || defined IS_HIP) && HAS_VPERM == 0) || defined IS_GENERIC
u32 in0 = append[0];
u32 in1 = append[1];
u32 in2 = append[2];
u32 in3 = append[3];
u32 in4 = 0x80;
tmp0 = hc_bytealign ( 0, in0, offset);
tmp1 = hc_bytealign (in0, in1, offset);
tmp2 = hc_bytealign (in1, in2, offset);
tmp3 = hc_bytealign (in2, in3, offset);
tmp4 = hc_bytealign (in3, in4, offset);
#endif
#if ((defined IS_AMD || defined IS_HIP) && HAS_VPERM == 1) || defined IS_NV
const int offset_mod_4 = offset & 3;
const int offset_minus_4 = 4 - offset_mod_4;
#if defined IS_NV
const int selector = (0x76543210 >> (offset_minus_4 * 4)) & 0xffff;
#endif
#if (defined IS_AMD || defined IS_HIP)
const int selector = l32_from_64_S (0x0706050403020100UL >> (offset_minus_4 * 8));
#endif
u32 in0 = append[0];
u32 in1 = append[1];
u32 in2 = append[2];
u32 in3 = append[3];
u32 in4 = 0x80;
tmp0 = hc_byte_perm ( 0, in0, selector);
tmp1 = hc_byte_perm (in0, in1, selector);
tmp2 = hc_byte_perm (in1, in2, selector);
tmp3 = hc_byte_perm (in2, in3, selector);
tmp4 = hc_byte_perm (in3, in4, selector);
#endif
tmp0 = hc_bytealign_S ( 0, in0, offset);
tmp1 = hc_bytealign_S (in0, in1, offset);
tmp2 = hc_bytealign_S (in1, in2, offset);
tmp3 = hc_bytealign_S (in2, in3, offset);
tmp4 = hc_bytealign_S (in3, in4, offset);
const u32 div = offset / 4;
@ -257,35 +202,12 @@ DECLSPEC void memcat8 (PRIVATE_AS u32 *block0, PRIVATE_AS u32 *block1, PRIVATE_A
u32 tmp1;
u32 tmp2;
#if ((defined IS_AMD || defined IS_HIP) && HAS_VPERM == 0) || defined IS_GENERIC
u32 in0 = append[0];
u32 in1 = append[1];
tmp0 = hc_bytealign ( 0, in0, offset);
tmp1 = hc_bytealign (in0, in1, offset);
tmp2 = hc_bytealign (in1, 0, offset);
#endif
#if ((defined IS_AMD || defined IS_HIP) && HAS_VPERM == 1) || defined IS_NV
const int offset_mod_4 = offset & 3;
const int offset_minus_4 = 4 - offset_mod_4;
#if defined IS_NV
const int selector = (0x76543210 >> (offset_minus_4 * 4)) & 0xffff;
#endif
#if (defined IS_AMD || defined IS_HIP)
const int selector = l32_from_64_S (0x0706050403020100UL >> (offset_minus_4 * 8));
#endif
u32 in0 = append[0];
u32 in1 = append[1];
tmp0 = hc_byte_perm ( 0, in0, selector);
tmp1 = hc_byte_perm (in0, in1, selector);
tmp2 = hc_byte_perm (in1, 0, selector);
#endif
tmp0 = hc_bytealign_S ( 0, in0, offset);
tmp1 = hc_bytealign_S (in0, in1, offset);
tmp2 = hc_bytealign_S (in1, 0, offset);
const u32 div = offset / 4;

View File

@ -231,47 +231,18 @@ DECLSPEC void append_salt (PRIVATE_AS u32 *w0, PRIVATE_AS u32 *w1, PRIVATE_AS u3
u32 tmp4;
u32 tmp5;
#if ((defined IS_AMD || defined IS_HIP) && HAS_VPERM == 0) || defined IS_GENERIC
u32 in0 = append[0];
u32 in1 = append[1];
u32 in2 = append[2];
u32 in3 = append[3];
u32 in4 = append[4];
tmp0 = hc_bytealign ( 0, in0, offset);
tmp1 = hc_bytealign (in0, in1, offset);
tmp2 = hc_bytealign (in1, in2, offset);
tmp3 = hc_bytealign (in2, in3, offset);
tmp4 = hc_bytealign (in3, in4, offset);
tmp5 = hc_bytealign (in4, 0, offset);
#endif
#if ((defined IS_AMD || defined IS_HIP) && HAS_VPERM == 1) || defined IS_NV
const int offset_mod_4 = offset & 3;
const int offset_minus_4 = 4 - offset_mod_4;
#if defined IS_NV
const int selector = (0x76543210 >> (offset_minus_4 * 4)) & 0xffff;
#endif
#if (defined IS_AMD || defined IS_HIP)
const int selector = l32_from_64_S (0x0706050403020100UL >> (offset_minus_4 * 8));
#endif
u32 in0 = append[0];
u32 in1 = append[1];
u32 in2 = append[2];
u32 in3 = append[3];
u32 in4 = append[4];
tmp0 = hc_byte_perm ( 0, in0, selector);
tmp1 = hc_byte_perm (in0, in1, selector);
tmp2 = hc_byte_perm (in1, in2, selector);
tmp3 = hc_byte_perm (in2, in3, selector);
tmp4 = hc_byte_perm (in3, in4, selector);
tmp5 = hc_byte_perm (in4, 0, selector);
#endif
tmp0 = hc_bytealign_S ( 0, in0, offset);
tmp1 = hc_bytealign_S (in0, in1, offset);
tmp2 = hc_bytealign_S (in1, in2, offset);
tmp3 = hc_bytealign_S (in2, in3, offset);
tmp4 = hc_bytealign_S (in3, in4, offset);
tmp5 = hc_bytealign_S (in4, 0, offset);
const u32 div = offset / 4;

View File

@ -28,43 +28,16 @@ DECLSPEC void memcat16 (PRIVATE_AS u32 *block0, PRIVATE_AS u32 *block1, PRIVATE_
u32 tmp3;
u32 tmp4;
#if ((defined IS_AMD || defined IS_HIP) && HAS_VPERM == 0) || defined IS_GENERIC
u32 in0 = append[0];
u32 in1 = append[1];
u32 in2 = append[2];
u32 in3 = append[3];
tmp0 = hc_bytealign ( 0, in0, offset);
tmp1 = hc_bytealign (in0, in1, offset);
tmp2 = hc_bytealign (in1, in2, offset);
tmp3 = hc_bytealign (in2, in3, offset);
tmp4 = hc_bytealign (in3, 0, offset);
#endif
#if ((defined IS_AMD || defined IS_HIP) && HAS_VPERM == 1) || defined IS_NV
const int offset_mod_4 = offset & 3;
const int offset_minus_4 = 4 - offset_mod_4;
#if defined IS_NV
const int selector = (0x76543210 >> (offset_minus_4 * 4)) & 0xffff;
#endif
#if (defined IS_AMD || defined IS_HIP)
const int selector = l32_from_64_S (0x0706050403020100UL >> (offset_minus_4 * 8));
#endif
u32 in0 = append[0];
u32 in1 = append[1];
u32 in2 = append[2];
u32 in3 = append[3];
tmp0 = hc_byte_perm ( 0, in0, selector);
tmp1 = hc_byte_perm (in0, in1, selector);
tmp2 = hc_byte_perm (in1, in2, selector);
tmp3 = hc_byte_perm (in2, in3, selector);
tmp4 = hc_byte_perm (in3, 0, selector);
#endif
tmp0 = hc_bytealign_S ( 0, in0, offset);
tmp1 = hc_bytealign_S (in0, in1, offset);
tmp2 = hc_bytealign_S (in1, in2, offset);
tmp3 = hc_bytealign_S (in2, in3, offset);
tmp4 = hc_bytealign_S (in3, 0, offset);
const u32 div = offset / 4;
@ -140,47 +113,18 @@ DECLSPEC void memcat16_x80 (PRIVATE_AS u32 *block0, PRIVATE_AS u32 *block1, PRIV
u32 tmp2;
u32 tmp3;
u32 tmp4;
#if ((defined IS_AMD || defined IS_HIP) && HAS_VPERM == 0) || defined IS_GENERIC
u32 in0 = append[0];
u32 in1 = append[1];
u32 in2 = append[2];
u32 in3 = append[3];
u32 in4 = 0x80;
tmp0 = hc_bytealign ( 0, in0, offset);
tmp1 = hc_bytealign (in0, in1, offset);
tmp2 = hc_bytealign (in1, in2, offset);
tmp3 = hc_bytealign (in2, in3, offset);
tmp4 = hc_bytealign (in3, in4, offset);
#endif
#if ((defined IS_AMD || defined IS_HIP) && HAS_VPERM == 1) || defined IS_NV
const int offset_mod_4 = offset & 3;
const int offset_minus_4 = 4 - offset_mod_4;
#if defined IS_NV
const int selector = (0x76543210 >> (offset_minus_4 * 4)) & 0xffff;
#endif
#if (defined IS_AMD || defined IS_HIP)
const int selector = l32_from_64_S (0x0706050403020100UL >> (offset_minus_4 * 8));
#endif
u32 in0 = append[0];
u32 in1 = append[1];
u32 in2 = append[2];
u32 in3 = append[3];
u32 in4 = 0x80;
tmp0 = hc_byte_perm ( 0, in0, selector);
tmp1 = hc_byte_perm (in0, in1, selector);
tmp2 = hc_byte_perm (in1, in2, selector);
tmp3 = hc_byte_perm (in2, in3, selector);
tmp4 = hc_byte_perm (in3, in4, selector);
#endif
tmp0 = hc_bytealign_S ( 0, in0, offset);
tmp1 = hc_bytealign_S (in0, in1, offset);
tmp2 = hc_bytealign_S (in1, in2, offset);
tmp3 = hc_bytealign_S (in2, in3, offset);
tmp4 = hc_bytealign_S (in3, in4, offset);
const u32 div = offset / 4;
switch (div)
@ -254,35 +198,12 @@ DECLSPEC void memcat8 (PRIVATE_AS u32 *block0, PRIVATE_AS u32 *block1, PRIVATE_A
u32 tmp1;
u32 tmp2;
#if ((defined IS_AMD || defined IS_HIP) && HAS_VPERM == 0) || defined IS_GENERIC
u32 in0 = append[0];
u32 in1 = append[1];
tmp0 = hc_bytealign ( 0, in0, offset);
tmp1 = hc_bytealign (in0, in1, offset);
tmp2 = hc_bytealign (in1, 0, offset);
#endif
#if ((defined IS_AMD || defined IS_HIP) && HAS_VPERM == 1) || defined IS_NV
const int offset_mod_4 = offset & 3;
const int offset_minus_4 = 4 - offset_mod_4;
#if defined IS_NV
const int selector = (0x76543210 >> (offset_minus_4 * 4)) & 0xffff;
#endif
#if (defined IS_AMD || defined IS_HIP)
const int selector = l32_from_64_S (0x0706050403020100UL >> (offset_minus_4 * 8));
#endif
u32 in0 = append[0];
u32 in1 = append[1];
tmp0 = hc_byte_perm ( 0, in0, selector);
tmp1 = hc_byte_perm (in0, in1, selector);
tmp2 = hc_byte_perm (in1, 0, selector);
#endif
tmp0 = hc_bytealign_S ( 0, in0, offset);
tmp1 = hc_bytealign_S (in0, in1, offset);
tmp2 = hc_bytealign_S (in1, 0, offset);
const u32 div = offset / 4;

View File

@ -45,30 +45,11 @@ DECLSPEC u32 memcat16 (PRIVATE_AS u32 *block, const u32 offset, PRIVATE_AS const
u32 in2 = append[2];
u32 in3 = append[3];
#if ((defined IS_AMD || defined IS_HIP) && HAS_VPERM == 0) || defined IS_GENERIC
const u32 tmp0 = hc_bytealign_be ( 0, in0, offset);
const u32 tmp1 = hc_bytealign_be (in0, in1, offset);
const u32 tmp2 = hc_bytealign_be (in1, in2, offset);
const u32 tmp3 = hc_bytealign_be (in2, in3, offset);
const u32 tmp4 = hc_bytealign_be (in3, 0, offset);
#endif
#if ((defined IS_AMD || defined IS_HIP) && HAS_VPERM == 1) || defined IS_NV
#if defined IS_NV
const int selector = (0x76543210 >> ((offset & 3) * 4)) & 0xffff;
#endif
#if (defined IS_AMD || defined IS_HIP)
const int selector = l32_from_64_S (0x0706050403020100UL >> ((offset & 3) * 8));
#endif
const u32 tmp0 = hc_byte_perm_S (in0, 0, selector);
const u32 tmp1 = hc_byte_perm_S (in1, in0, selector);
const u32 tmp2 = hc_byte_perm_S (in2, in1, selector);
const u32 tmp3 = hc_byte_perm_S (in3, in2, selector);
const u32 tmp4 = hc_byte_perm_S (0, in3, selector);
#endif
const u32 tmp0 = hc_bytealign_be_S ( 0, in0, offset);
const u32 tmp1 = hc_bytealign_be_S (in0, in1, offset);
const u32 tmp2 = hc_bytealign_be_S (in1, in2, offset);
const u32 tmp3 = hc_bytealign_be_S (in2, in3, offset);
const u32 tmp4 = hc_bytealign_be_S (in3, 0, offset);
switch (offset / 4)
{
@ -172,30 +153,11 @@ DECLSPEC u32 memcat16c (PRIVATE_AS u32 *block, const u32 offset, PRIVATE_AS cons
u32 in2 = append[2];
u32 in3 = append[3];
#if ((defined IS_AMD || defined IS_HIP) && HAS_VPERM == 0) || defined IS_GENERIC
const u32 tmp0 = hc_bytealign_be ( 0, in0, offset);
const u32 tmp1 = hc_bytealign_be (in0, in1, offset);
const u32 tmp2 = hc_bytealign_be (in1, in2, offset);
const u32 tmp3 = hc_bytealign_be (in2, in3, offset);
const u32 tmp4 = hc_bytealign_be (in3, 0, offset);
#endif
#if ((defined IS_AMD || defined IS_HIP) && HAS_VPERM == 1) || defined IS_NV
#if defined IS_NV
const int selector = (0x76543210 >> ((offset & 3) * 4)) & 0xffff;
#endif
#if (defined IS_AMD || defined IS_HIP)
const int selector = l32_from_64_S (0x0706050403020100UL >> ((offset & 3) * 8));
#endif
const u32 tmp0 = hc_byte_perm_S (in0, 0, selector);
const u32 tmp1 = hc_byte_perm_S (in1, in0, selector);
const u32 tmp2 = hc_byte_perm_S (in2, in1, selector);
const u32 tmp3 = hc_byte_perm_S (in3, in2, selector);
const u32 tmp4 = hc_byte_perm_S (0, in3, selector);
#endif
const u32 tmp0 = hc_bytealign_be_S ( 0, in0, offset);
const u32 tmp1 = hc_bytealign_be_S (in0, in1, offset);
const u32 tmp2 = hc_bytealign_be_S (in1, in2, offset);
const u32 tmp3 = hc_bytealign_be_S (in2, in3, offset);
const u32 tmp4 = hc_bytealign_be_S (in3, 0, offset);
u32 carry[4] = { 0 };
@ -336,32 +298,12 @@ DECLSPEC u32 memcat16s (PRIVATE_AS u32 *block, const u32 offset, PRIVATE_AS cons
u32 in3 = append[3];
u32 in4 = append[4];
#if ((defined IS_AMD || defined IS_HIP) && HAS_VPERM == 0) || defined IS_GENERIC
const u32 tmp0 = hc_bytealign_be ( 0, in0, offset);
const u32 tmp1 = hc_bytealign_be (in0, in1, offset);
const u32 tmp2 = hc_bytealign_be (in1, in2, offset);
const u32 tmp3 = hc_bytealign_be (in2, in3, offset);
const u32 tmp4 = hc_bytealign_be (in3, in4, offset);
const u32 tmp5 = hc_bytealign_be (in4, 0, offset);
#endif
#if ((defined IS_AMD || defined IS_HIP) && HAS_VPERM == 1) || defined IS_NV
#if defined IS_NV
const int selector = (0x76543210 >> ((offset & 3) * 4)) & 0xffff;
#endif
#if (defined IS_AMD || defined IS_HIP)
const int selector = l32_from_64_S (0x0706050403020100UL >> ((offset & 3) * 8));
#endif
const u32 tmp0 = hc_byte_perm_S (in0, 0, selector);
const u32 tmp1 = hc_byte_perm_S (in1, in0, selector);
const u32 tmp2 = hc_byte_perm_S (in2, in1, selector);
const u32 tmp3 = hc_byte_perm_S (in3, in2, selector);
const u32 tmp4 = hc_byte_perm_S (in4, in3, selector);
const u32 tmp5 = hc_byte_perm_S (0, in4, selector);
#endif
const u32 tmp0 = hc_bytealign_be_S ( 0, in0, offset);
const u32 tmp1 = hc_bytealign_be_S (in0, in1, offset);
const u32 tmp2 = hc_bytealign_be_S (in1, in2, offset);
const u32 tmp3 = hc_bytealign_be_S (in2, in3, offset);
const u32 tmp4 = hc_bytealign_be_S (in3, in4, offset);
const u32 tmp5 = hc_bytealign_be_S (in4, 0, offset);
switch (offset / 4)
{
@ -477,32 +419,12 @@ DECLSPEC u32 memcat16sc (PRIVATE_AS u32 *block, const u32 offset, PRIVATE_AS con
u32 in3 = append[3];
u32 in4 = append[4];
#if ((defined IS_AMD || defined IS_HIP) && HAS_VPERM == 0) || defined IS_GENERIC
const u32 tmp0 = hc_bytealign_be ( 0, in0, offset);
const u32 tmp1 = hc_bytealign_be (in0, in1, offset);
const u32 tmp2 = hc_bytealign_be (in1, in2, offset);
const u32 tmp3 = hc_bytealign_be (in2, in3, offset);
const u32 tmp4 = hc_bytealign_be (in3, in4, offset);
const u32 tmp5 = hc_bytealign_be (in4, 0, offset);
#endif
#if ((defined IS_AMD || defined IS_HIP) && HAS_VPERM == 1) || defined IS_NV
#if defined IS_NV
const int selector = (0x76543210 >> ((offset & 3) * 4)) & 0xffff;
#endif
#if (defined IS_AMD || defined IS_HIP)
const int selector = l32_from_64_S (0x0706050403020100UL >> ((offset & 3) * 8));
#endif
const u32 tmp0 = hc_byte_perm_S (in0, 0, selector);
const u32 tmp1 = hc_byte_perm_S (in1, in0, selector);
const u32 tmp2 = hc_byte_perm_S (in2, in1, selector);
const u32 tmp3 = hc_byte_perm_S (in3, in2, selector);
const u32 tmp4 = hc_byte_perm_S (in4, in3, selector);
const u32 tmp5 = hc_byte_perm_S (0, in4, selector);
#endif
const u32 tmp0 = hc_bytealign_be_S ( 0, in0, offset);
const u32 tmp1 = hc_bytealign_be_S (in0, in1, offset);
const u32 tmp2 = hc_bytealign_be_S (in1, in2, offset);
const u32 tmp3 = hc_bytealign_be_S (in2, in3, offset);
const u32 tmp4 = hc_bytealign_be_S (in3, in4, offset);
const u32 tmp5 = hc_bytealign_be_S (in4, 0, offset);
u32 carry[5] = { 0 };
@ -784,30 +706,11 @@ DECLSPEC u32 memcat20 (PRIVATE_AS u32 *block, const u32 offset, PRIVATE_AS const
u32 in2 = append[2];
u32 in3 = append[3];
#if ((defined IS_AMD || defined IS_HIP) && HAS_VPERM == 0) || defined IS_GENERIC
const u32 tmp0 = hc_bytealign_be_S ( 0, in0, offset);
const u32 tmp1 = hc_bytealign_be_S (in0, in1, offset);
const u32 tmp2 = hc_bytealign_be_S (in1, in2, offset);
const u32 tmp3 = hc_bytealign_be_S (in2, in3, offset);
const u32 tmp4 = hc_bytealign_be_S (in3, 0, offset);
#endif
#if ((defined IS_AMD || defined IS_HIP) && HAS_VPERM == 1) || defined IS_NV
#if defined IS_NV
const int selector = (0x76543210 >> ((offset & 3) * 4)) & 0xffff;
#endif
#if (defined IS_AMD || defined IS_HIP)
const int selector = l32_from_64_S (0x0706050403020100UL >> ((offset & 3) * 8));
#endif
const u32 tmp0 = hc_byte_perm_S (in0, 0, selector);
const u32 tmp1 = hc_byte_perm_S (in1, in0, selector);
const u32 tmp2 = hc_byte_perm_S (in2, in1, selector);
const u32 tmp3 = hc_byte_perm_S (in3, in2, selector);
const u32 tmp4 = hc_byte_perm_S (0, in3, selector);
#endif
switch (offset / 4)
{
@ -950,30 +853,11 @@ DECLSPEC u32 memcat20_x80 (PRIVATE_AS u32 *block, const u32 offset, PRIVATE_AS c
u32 in3 = append[3];
u32 in4 = 0x80000000;
#if ((defined IS_AMD || defined IS_HIP) && HAS_VPERM == 0) || defined IS_GENERIC
const u32 tmp0 = hc_bytealign_be_S ( 0, in0, offset);
const u32 tmp1 = hc_bytealign_be_S (in0, in1, offset);
const u32 tmp2 = hc_bytealign_be_S (in1, in2, offset);
const u32 tmp3 = hc_bytealign_be_S (in2, in3, offset);
const u32 tmp4 = hc_bytealign_be_S (in3, in4, offset);
#endif
#if ((defined IS_AMD || defined IS_HIP) && HAS_VPERM == 1) || defined IS_NV
#if defined IS_NV
const int selector = (0x76543210 >> ((offset & 3) * 4)) & 0xffff;
#endif
#if (defined IS_AMD || defined IS_HIP)
const int selector = l32_from_64_S (0x0706050403020100UL >> ((offset & 3) * 8));
#endif
const u32 tmp0 = hc_byte_perm_S (in0, 0, selector);
const u32 tmp1 = hc_byte_perm_S (in1, in0, selector);
const u32 tmp2 = hc_byte_perm_S (in2, in1, selector);
const u32 tmp3 = hc_byte_perm_S (in3, in2, selector);
const u32 tmp4 = hc_byte_perm_S (in4, in3, selector);
#endif
switch (offset / 4)
{
@ -1116,32 +1000,12 @@ DECLSPEC u32 memcat24 (PRIVATE_AS u32 *block, const u32 offset, PRIVATE_AS const
u32 in3 = append[3];
u32 in4 = append[4];
#if ((defined IS_AMD || defined IS_HIP) && HAS_VPERM == 0) || defined IS_GENERIC
const u32 tmp0 = hc_bytealign_be_S ( 0, in0, offset);
const u32 tmp1 = hc_bytealign_be_S (in0, in1, offset);
const u32 tmp2 = hc_bytealign_be_S (in1, in2, offset);
const u32 tmp3 = hc_bytealign_be_S (in2, in3, offset);
const u32 tmp4 = hc_bytealign_be_S (in3, in4, offset);
const u32 tmp5 = hc_bytealign_be_S (in4, 0, offset);
#endif
#if ((defined IS_AMD || defined IS_HIP) && HAS_VPERM == 1) || defined IS_NV
#if defined IS_NV
const int selector = (0x76543210 >> ((offset & 3) * 4)) & 0xffff;
#endif
#if (defined IS_AMD || defined IS_HIP)
const int selector = l32_from_64_S (0x0706050403020100UL >> ((offset & 3) * 8));
#endif
const u32 tmp0 = hc_byte_perm_S (in0, 0, selector);
const u32 tmp1 = hc_byte_perm_S (in1, in0, selector);
const u32 tmp2 = hc_byte_perm_S (in2, in1, selector);
const u32 tmp3 = hc_byte_perm_S (in3, in2, selector);
const u32 tmp4 = hc_byte_perm_S (in4, in3, selector);
const u32 tmp5 = hc_byte_perm_S (0, in4, selector);
#endif
switch (offset / 4)
{

View File

@ -62,16 +62,12 @@ DECLSPEC void m07800m (PRIVATE_AS u32 *w0, PRIVATE_AS u32 *w1, PRIVATE_AS u32 *w
* salt
*/
u32 salt_buf[8];
u32 salt_buf[4];
salt_buf[0] = hc_swap32_S (salt_bufs[SALT_POS_HOST].salt_buf[0]);
salt_buf[1] = hc_swap32_S (salt_bufs[SALT_POS_HOST].salt_buf[1]);
salt_buf[2] = hc_swap32_S (salt_bufs[SALT_POS_HOST].salt_buf[2]);
salt_buf[3] = hc_swap32_S (salt_bufs[SALT_POS_HOST].salt_buf[3]);
salt_buf[4] = hc_swap32_S (salt_bufs[SALT_POS_HOST].salt_buf[4]);
salt_buf[5] = hc_swap32_S (salt_bufs[SALT_POS_HOST].salt_buf[5]);
salt_buf[6] = hc_swap32_S (salt_bufs[SALT_POS_HOST].salt_buf[6]);
salt_buf[7] = hc_swap32_S (salt_bufs[SALT_POS_HOST].salt_buf[7]);
const u32 salt_len = salt_bufs[SALT_POS_HOST].salt_len;
@ -84,10 +80,10 @@ DECLSPEC void m07800m (PRIVATE_AS u32 *w0, PRIVATE_AS u32 *w1, PRIVATE_AS u32 *w
s0[1] = salt_buf[1];
s0[2] = salt_buf[2];
s0[3] = salt_buf[3];
s1[0] = salt_buf[4];
s1[1] = salt_buf[5];
s1[2] = salt_buf[6];
s1[3] = salt_buf[7];
s1[0] = 0;
s1[1] = 0;
s1[2] = 0;
s1[3] = 0;
s2[0] = 0;
s2[1] = 0;
s2[2] = 0;
@ -206,8 +202,8 @@ DECLSPEC void m07800m (PRIVATE_AS u32 *w0, PRIVATE_AS u32 *w1, PRIVATE_AS u32 *w
final[ 5] = w1[1];
final[ 6] = w1[2];
final[ 7] = w1[3];
final[ 8] = 0;
final[ 9] = 0;
final[ 8] = w2[0];
final[ 9] = w2[1];
final[10] = 0;
final[11] = 0;
final[12] = 0;
@ -279,16 +275,12 @@ DECLSPEC void m07800s (PRIVATE_AS u32 *w0, PRIVATE_AS u32 *w1, PRIVATE_AS u32 *w
* salt
*/
u32 salt_buf[8];
u32 salt_buf[4];
salt_buf[0] = hc_swap32_S (salt_bufs[SALT_POS_HOST].salt_buf[0]);
salt_buf[1] = hc_swap32_S (salt_bufs[SALT_POS_HOST].salt_buf[1]);
salt_buf[2] = hc_swap32_S (salt_bufs[SALT_POS_HOST].salt_buf[2]);
salt_buf[3] = hc_swap32_S (salt_bufs[SALT_POS_HOST].salt_buf[3]);
salt_buf[4] = hc_swap32_S (salt_bufs[SALT_POS_HOST].salt_buf[4]);
salt_buf[5] = hc_swap32_S (salt_bufs[SALT_POS_HOST].salt_buf[5]);
salt_buf[6] = hc_swap32_S (salt_bufs[SALT_POS_HOST].salt_buf[6]);
salt_buf[7] = hc_swap32_S (salt_bufs[SALT_POS_HOST].salt_buf[7]);
const u32 salt_len = salt_bufs[SALT_POS_HOST].salt_len;
@ -301,10 +293,10 @@ DECLSPEC void m07800s (PRIVATE_AS u32 *w0, PRIVATE_AS u32 *w1, PRIVATE_AS u32 *w
s0[1] = salt_buf[1];
s0[2] = salt_buf[2];
s0[3] = salt_buf[3];
s1[0] = salt_buf[4];
s1[1] = salt_buf[5];
s1[2] = salt_buf[6];
s1[3] = salt_buf[7];
s1[0] = 0;
s1[1] = 0;
s1[2] = 0;
s1[3] = 0;
s2[0] = 0;
s2[1] = 0;
s2[2] = 0;
@ -435,8 +427,8 @@ DECLSPEC void m07800s (PRIVATE_AS u32 *w0, PRIVATE_AS u32 *w1, PRIVATE_AS u32 *w
final[ 5] = w1[1];
final[ 6] = w1[2];
final[ 7] = w1[3];
final[ 8] = 0;
final[ 9] = 0;
final[ 8] = w2[0];
final[ 9] = w2[1];
final[10] = 0;
final[11] = 0;
final[12] = 0;
@ -514,8 +506,6 @@ KERNEL_FQ KERNEL_FA void m07800_m04 (KERN_ATTR_BASIC ())
* modifier
*/
//const u64 lid = get_local_id (0);
u32 w0[4];
w0[0] = pws[gid].i[ 0];
@ -569,8 +559,6 @@ KERNEL_FQ KERNEL_FA void m07800_m08 (KERN_ATTR_BASIC ())
* modifier
*/
//const u64 lid = get_local_id (0);
u32 w0[4];
w0[0] = pws[gid].i[ 0];
@ -610,6 +598,55 @@ KERNEL_FQ KERNEL_FA void m07800_m08 (KERN_ATTR_BASIC ())
KERNEL_FQ KERNEL_FA void m07800_m16 (KERN_ATTR_BASIC ())
{
/**
* base
*/
const u64 lid = get_local_id (0);
const u64 gid = get_global_id (0);
const u64 lsz = get_local_size (0);
if (gid >= GID_CNT) return;
/**
* modifier
*/
u32 w0[4];
w0[0] = pws[gid].i[ 0];
w0[1] = pws[gid].i[ 1];
w0[2] = pws[gid].i[ 2];
w0[3] = pws[gid].i[ 3];
u32 w1[4];
w1[0] = pws[gid].i[ 4];
w1[1] = pws[gid].i[ 5];
w1[2] = pws[gid].i[ 6];
w1[3] = pws[gid].i[ 7];
u32 w2[4];
w2[0] = pws[gid].i[ 8];
w2[1] = pws[gid].i[ 9];
w2[2] = pws[gid].i[10];
w2[3] = pws[gid].i[11];
u32 w3[4];
w3[0] = pws[gid].i[12];
w3[1] = pws[gid].i[13];
w3[2] = 0;
w3[3] = 0;
const u32 pw_len = pws[gid].pw_len & 63;
/**
* main
*/
m07800m (w0, w1, w2, w3, pw_len, pws, rules_buf, combs_buf, bfs_buf, tmps, hooks, bitmaps_buf_s1_a, bitmaps_buf_s1_b, bitmaps_buf_s1_c, bitmaps_buf_s1_d, bitmaps_buf_s2_a, bitmaps_buf_s2_b, bitmaps_buf_s2_c, bitmaps_buf_s2_d, plains_buf, digests_buf, hashes_shown, salt_bufs, esalt_bufs, d_return_buf, d_extra0_buf, d_extra1_buf, d_extra2_buf, d_extra3_buf, kernel_param, gid, lid, lsz);
}
KERNEL_FQ KERNEL_FA void m07800_s04 (KERN_ATTR_BASIC ())
@ -628,8 +665,6 @@ KERNEL_FQ KERNEL_FA void m07800_s04 (KERN_ATTR_BASIC ())
* modifier
*/
//const u64 lid = get_local_id (0);
u32 w0[4];
w0[0] = pws[gid].i[ 0];
@ -683,8 +718,6 @@ KERNEL_FQ KERNEL_FA void m07800_s08 (KERN_ATTR_BASIC ())
* modifier
*/
//const u64 lid = get_local_id (0);
u32 w0[4];
w0[0] = pws[gid].i[ 0];
@ -724,4 +757,53 @@ KERNEL_FQ KERNEL_FA void m07800_s08 (KERN_ATTR_BASIC ())
KERNEL_FQ KERNEL_FA void m07800_s16 (KERN_ATTR_BASIC ())
{
/**
* base
*/
const u64 lid = get_local_id (0);
const u64 gid = get_global_id (0);
const u64 lsz = get_local_size (0);
if (gid >= GID_CNT) return;
/**
* modifier
*/
u32 w0[4];
w0[0] = pws[gid].i[ 0];
w0[1] = pws[gid].i[ 1];
w0[2] = pws[gid].i[ 2];
w0[3] = pws[gid].i[ 3];
u32 w1[4];
w1[0] = pws[gid].i[ 4];
w1[1] = pws[gid].i[ 5];
w1[2] = pws[gid].i[ 6];
w1[3] = pws[gid].i[ 7];
u32 w2[4];
w2[0] = pws[gid].i[ 8];
w2[1] = pws[gid].i[ 9];
w2[2] = pws[gid].i[10];
w2[3] = pws[gid].i[11];
u32 w3[4];
w3[0] = pws[gid].i[12];
w3[1] = pws[gid].i[13];
w3[2] = 0;
w3[3] = 0;
const u32 pw_len = pws[gid].pw_len & 63;
/**
* main
*/
m07800s (w0, w1, w2, w3, pw_len, pws, rules_buf, combs_buf, bfs_buf, tmps, hooks, bitmaps_buf_s1_a, bitmaps_buf_s1_b, bitmaps_buf_s1_c, bitmaps_buf_s1_d, bitmaps_buf_s2_a, bitmaps_buf_s2_b, bitmaps_buf_s2_c, bitmaps_buf_s2_d, plains_buf, digests_buf, hashes_shown, salt_bufs, esalt_bufs, d_return_buf, d_extra0_buf, d_extra1_buf, d_extra2_buf, d_extra3_buf, kernel_param, gid, lid, lsz);
}

View File

@ -62,16 +62,12 @@ DECLSPEC void m07801m (PRIVATE_AS u32 *w0, PRIVATE_AS u32 *w1, PRIVATE_AS u32 *w
* salt
*/
u32 salt_buf[8];
u32 salt_buf[4];
salt_buf[0] = hc_swap32_S (salt_bufs[SALT_POS_HOST].salt_buf[0]);
salt_buf[1] = hc_swap32_S (salt_bufs[SALT_POS_HOST].salt_buf[1]);
salt_buf[2] = hc_swap32_S (salt_bufs[SALT_POS_HOST].salt_buf[2]);
salt_buf[3] = hc_swap32_S (salt_bufs[SALT_POS_HOST].salt_buf[3]);
salt_buf[4] = hc_swap32_S (salt_bufs[SALT_POS_HOST].salt_buf[4]);
salt_buf[5] = hc_swap32_S (salt_bufs[SALT_POS_HOST].salt_buf[5]);
salt_buf[6] = hc_swap32_S (salt_bufs[SALT_POS_HOST].salt_buf[6]);
salt_buf[7] = hc_swap32_S (salt_bufs[SALT_POS_HOST].salt_buf[7]);
const u32 salt_len = salt_bufs[SALT_POS_HOST].salt_len;
@ -84,10 +80,10 @@ DECLSPEC void m07801m (PRIVATE_AS u32 *w0, PRIVATE_AS u32 *w1, PRIVATE_AS u32 *w
s0[1] = salt_buf[1];
s0[2] = salt_buf[2];
s0[3] = salt_buf[3];
s1[0] = salt_buf[4];
s1[1] = salt_buf[5];
s1[2] = salt_buf[6];
s1[3] = salt_buf[7];
s1[0] = 0;
s1[1] = 0;
s1[2] = 0;
s1[3] = 0;
s2[0] = 0;
s2[1] = 0;
s2[2] = 0;
@ -206,8 +202,8 @@ DECLSPEC void m07801m (PRIVATE_AS u32 *w0, PRIVATE_AS u32 *w1, PRIVATE_AS u32 *w
final[ 5] = w1[1];
final[ 6] = w1[2];
final[ 7] = w1[3];
final[ 8] = 0;
final[ 9] = 0;
final[ 8] = w2[0];
final[ 9] = w2[1];
final[10] = 0;
final[11] = 0;
final[12] = 0;
@ -279,16 +275,12 @@ DECLSPEC void m07801s (PRIVATE_AS u32 *w0, PRIVATE_AS u32 *w1, PRIVATE_AS u32 *w
* salt
*/
u32 salt_buf[8];
u32 salt_buf[4];
salt_buf[0] = hc_swap32_S (salt_bufs[SALT_POS_HOST].salt_buf[0]);
salt_buf[1] = hc_swap32_S (salt_bufs[SALT_POS_HOST].salt_buf[1]);
salt_buf[2] = hc_swap32_S (salt_bufs[SALT_POS_HOST].salt_buf[2]);
salt_buf[3] = hc_swap32_S (salt_bufs[SALT_POS_HOST].salt_buf[3]);
salt_buf[4] = hc_swap32_S (salt_bufs[SALT_POS_HOST].salt_buf[4]);
salt_buf[5] = hc_swap32_S (salt_bufs[SALT_POS_HOST].salt_buf[5]);
salt_buf[6] = hc_swap32_S (salt_bufs[SALT_POS_HOST].salt_buf[6]);
salt_buf[7] = hc_swap32_S (salt_bufs[SALT_POS_HOST].salt_buf[7]);
const u32 salt_len = salt_bufs[SALT_POS_HOST].salt_len;
@ -301,10 +293,10 @@ DECLSPEC void m07801s (PRIVATE_AS u32 *w0, PRIVATE_AS u32 *w1, PRIVATE_AS u32 *w
s0[1] = salt_buf[1];
s0[2] = salt_buf[2];
s0[3] = salt_buf[3];
s1[0] = salt_buf[4];
s1[1] = salt_buf[5];
s1[2] = salt_buf[6];
s1[3] = salt_buf[7];
s1[0] = 0;
s1[1] = 0;
s1[2] = 0;
s1[3] = 0;
s2[0] = 0;
s2[1] = 0;
s2[2] = 0;
@ -435,8 +427,8 @@ DECLSPEC void m07801s (PRIVATE_AS u32 *w0, PRIVATE_AS u32 *w1, PRIVATE_AS u32 *w
final[ 5] = w1[1];
final[ 6] = w1[2];
final[ 7] = w1[3];
final[ 8] = 0;
final[ 9] = 0;
final[ 8] = w2[0];
final[ 9] = w2[1];
final[10] = 0;
final[11] = 0;
final[12] = 0;
@ -514,8 +506,6 @@ KERNEL_FQ KERNEL_FA void m07801_m04 (KERN_ATTR_BASIC ())
* modifier
*/
//const u64 lid = get_local_id (0);
u32 w0[4];
w0[0] = pws[gid].i[ 0];
@ -569,8 +559,6 @@ KERNEL_FQ KERNEL_FA void m07801_m08 (KERN_ATTR_BASIC ())
* modifier
*/
//const u64 lid = get_local_id (0);
u32 w0[4];
w0[0] = pws[gid].i[ 0];
@ -610,6 +598,55 @@ KERNEL_FQ KERNEL_FA void m07801_m08 (KERN_ATTR_BASIC ())
KERNEL_FQ KERNEL_FA void m07801_m16 (KERN_ATTR_BASIC ())
{
/**
* base
*/
const u64 lid = get_local_id (0);
const u64 gid = get_global_id (0);
const u64 lsz = get_local_size (0);
if (gid >= GID_CNT) return;
/**
* modifier
*/
u32 w0[4];
w0[0] = pws[gid].i[ 0];
w0[1] = pws[gid].i[ 1];
w0[2] = pws[gid].i[ 2];
w0[3] = pws[gid].i[ 3];
u32 w1[4];
w1[0] = pws[gid].i[ 4];
w1[1] = pws[gid].i[ 5];
w1[2] = pws[gid].i[ 6];
w1[3] = pws[gid].i[ 7];
u32 w2[4];
w2[0] = pws[gid].i[ 8];
w2[1] = pws[gid].i[ 9];
w2[2] = pws[gid].i[10];
w2[3] = pws[gid].i[11];
u32 w3[4];
w3[0] = pws[gid].i[12];
w3[1] = pws[gid].i[13];
w3[2] = 0;
w3[3] = 0;
const u32 pw_len = pws[gid].pw_len & 63;
/**
* main
*/
m07801m (w0, w1, w2, w3, pw_len, pws, rules_buf, combs_buf, bfs_buf, tmps, hooks, bitmaps_buf_s1_a, bitmaps_buf_s1_b, bitmaps_buf_s1_c, bitmaps_buf_s1_d, bitmaps_buf_s2_a, bitmaps_buf_s2_b, bitmaps_buf_s2_c, bitmaps_buf_s2_d, plains_buf, digests_buf, hashes_shown, salt_bufs, esalt_bufs, d_return_buf, d_extra0_buf, d_extra1_buf, d_extra2_buf, d_extra3_buf, kernel_param, gid, lid, lsz);
}
KERNEL_FQ KERNEL_FA void m07801_s04 (KERN_ATTR_BASIC ())
@ -628,8 +665,6 @@ KERNEL_FQ KERNEL_FA void m07801_s04 (KERN_ATTR_BASIC ())
* modifier
*/
//const u64 lid = get_local_id (0);
u32 w0[4];
w0[0] = pws[gid].i[ 0];
@ -683,8 +718,6 @@ KERNEL_FQ KERNEL_FA void m07801_s08 (KERN_ATTR_BASIC ())
* modifier
*/
//const u64 lid = get_local_id (0);
u32 w0[4];
w0[0] = pws[gid].i[ 0];
@ -724,4 +757,53 @@ KERNEL_FQ KERNEL_FA void m07801_s08 (KERN_ATTR_BASIC ())
KERNEL_FQ KERNEL_FA void m07801_s16 (KERN_ATTR_BASIC ())
{
/**
* base
*/
const u64 lid = get_local_id (0);
const u64 gid = get_global_id (0);
const u64 lsz = get_local_size (0);
if (gid >= GID_CNT) return;
/**
* modifier
*/
u32 w0[4];
w0[0] = pws[gid].i[ 0];
w0[1] = pws[gid].i[ 1];
w0[2] = pws[gid].i[ 2];
w0[3] = pws[gid].i[ 3];
u32 w1[4];
w1[0] = pws[gid].i[ 4];
w1[1] = pws[gid].i[ 5];
w1[2] = pws[gid].i[ 6];
w1[3] = pws[gid].i[ 7];
u32 w2[4];
w2[0] = pws[gid].i[ 8];
w2[1] = pws[gid].i[ 9];
w2[2] = pws[gid].i[10];
w2[3] = pws[gid].i[11];
u32 w3[4];
w3[0] = pws[gid].i[12];
w3[1] = pws[gid].i[13];
w3[2] = 0;
w3[3] = 0;
const u32 pw_len = pws[gid].pw_len & 63;
/**
* main
*/
m07801s (w0, w1, w2, w3, pw_len, pws, rules_buf, combs_buf, bfs_buf, tmps, hooks, bitmaps_buf_s1_a, bitmaps_buf_s1_b, bitmaps_buf_s1_c, bitmaps_buf_s1_d, bitmaps_buf_s2_a, bitmaps_buf_s2_b, bitmaps_buf_s2_c, bitmaps_buf_s2_d, plains_buf, digests_buf, hashes_shown, salt_bufs, esalt_bufs, d_return_buf, d_extra0_buf, d_extra1_buf, d_extra2_buf, d_extra3_buf, kernel_param, gid, lid, lsz);
}

View File

@ -234,34 +234,13 @@ DECLSPEC void make_sc (LOCAL_AS u32 *sc, PRIVATE_AS const u32 *pw, const u32 pw_
u32 i;
#if ((defined IS_AMD || defined IS_HIP) && HAS_VPERM == 0) || defined IS_GENERIC
for (i = 0; i < pd; i++) sc[idx++] = pw[i];
sc[idx++] = pw[i]
| hc_bytealign_be (bl[0], 0, pm4);
for (i = 1; i < bd; i++) sc[idx++] = hc_bytealign_be (bl[i], bl[i - 1], pm4);
sc[idx++] = hc_bytealign_be (sc[0], bl[i - 1], pm4);
for (i = 1; i < 4; i++) sc[idx++] = hc_bytealign_be (sc[i], sc[i - 1], pm4);
sc[idx++] = hc_bytealign_be ( 0, sc[i - 1], pm4);
#endif
#if ((defined IS_AMD || defined IS_HIP) && HAS_VPERM == 1) || defined IS_NV
#if defined IS_NV
const int selector = (0x76543210 >> ((pm4 & 3) * 4)) & 0xffff;
#endif
#if (defined IS_AMD || defined IS_HIP)
const int selector = l32_from_64_S (0x0706050403020100UL >> ((pm4 & 3) * 8));
#endif
for (i = 0; i < pd; i++) sc[idx++] = pw[i];
sc[idx++] = pw[i]
| hc_byte_perm ( 0, bl[0], selector);
for (i = 1; i < bd; i++) sc[idx++] = hc_byte_perm (bl[i - 1], bl[i], selector);
sc[idx++] = hc_byte_perm (bl[i - 1], sc[0], selector);
for (i = 1; i < 4; i++) sc[idx++] = hc_byte_perm (sc[i - 1], sc[i], selector);
sc[idx++] = hc_byte_perm (sc[i - 1], 0, selector);
#endif
| hc_bytealign_be_S (bl[0], 0, pm4);
for (i = 1; i < bd; i++) sc[idx++] = hc_bytealign_be_S (bl[i], bl[i - 1], pm4);
sc[idx++] = hc_bytealign_be_S (sc[0], bl[i - 1], pm4);
for (i = 1; i < 4; i++) sc[idx++] = hc_bytealign_be_S (sc[i], sc[i - 1], pm4);
sc[idx++] = hc_bytealign_be_S ( 0, sc[i - 1], pm4);
}
}
@ -272,27 +251,10 @@ DECLSPEC void make_pt_with_offset (PRIVATE_AS u32 *pt, const u32 offset, LOCAL_A
const u32 om = m % 4;
const u32 od = m / 4;
#if ((defined IS_AMD || defined IS_HIP) && HAS_VPERM == 0) || defined IS_GENERIC
pt[0] = hc_bytealign_be (sc[od + 1], sc[od + 0], om);
pt[1] = hc_bytealign_be (sc[od + 2], sc[od + 1], om);
pt[2] = hc_bytealign_be (sc[od + 3], sc[od + 2], om);
pt[3] = hc_bytealign_be (sc[od + 4], sc[od + 3], om);
#endif
#if ((defined IS_AMD || defined IS_HIP) && HAS_VPERM == 1) || defined IS_NV
#if defined IS_NV
const int selector = (0x76543210 >> ((om & 3) * 4)) & 0xffff;
#endif
#if (defined IS_AMD || defined IS_HIP)
const int selector = l32_from_64_S (0x0706050403020100UL >> ((om & 3) * 8));
#endif
pt[0] = hc_byte_perm (sc[od + 0], sc[od + 1], selector);
pt[1] = hc_byte_perm (sc[od + 1], sc[od + 2], selector);
pt[2] = hc_byte_perm (sc[od + 2], sc[od + 3], selector);
pt[3] = hc_byte_perm (sc[od + 3], sc[od + 4], selector);
#endif
pt[0] = hc_bytealign_be_S (sc[od + 1], sc[od + 0], om);
pt[1] = hc_bytealign_be_S (sc[od + 2], sc[od + 1], om);
pt[2] = hc_bytealign_be_S (sc[od + 3], sc[od + 2], om);
pt[3] = hc_bytealign_be_S (sc[od + 4], sc[od + 3], om);
}
DECLSPEC void make_w_with_offset (PRIVATE_AS ctx_t *ctx, const u32 W_len, const u32 offset, LOCAL_AS const u32 *sc, const u32 pwbl_len, PRIVATE_AS u32 *iv, PRIVATE_AS const u32 *ks, SHM_TYPE u32 *s_te0, SHM_TYPE u32 *s_te1, SHM_TYPE u32 *s_te2, SHM_TYPE u32 *s_te3, SHM_TYPE u32 *s_te4)

View File

@ -42,24 +42,8 @@ DECLSPEC void memcat8c_be (PRIVATE_AS u32 *w0, PRIVATE_AS u32 *w1, PRIVATE_AS u3
u32 tmp0;
u32 tmp1;
#if ((defined IS_AMD || defined IS_HIP) && HAS_VPERM == 0) || defined IS_GENERIC
tmp0 = hc_bytealign_be (0, append, func_len);
tmp1 = hc_bytealign_be (append, 0, func_len);
#endif
#if ((defined IS_AMD || defined IS_HIP) && HAS_VPERM == 1) || defined IS_NV
#if defined IS_NV
const int selector = (0x76543210 >> ((func_len & 3) * 4)) & 0xffff;
#endif
#if (defined IS_AMD || defined IS_HIP)
const int selector = l32_from_64_S (0x0706050403020100UL >> ((func_len & 3) * 8));
#endif
tmp0 = hc_byte_perm (append, 0, selector);
tmp1 = hc_byte_perm (0, append, selector);
#endif
tmp0 = hc_bytealign_be_S (0, append, func_len);
tmp1 = hc_bytealign_be_S (append, 0, func_len);
u32 carry = 0;

View File

@ -37,24 +37,8 @@ DECLSPEC void memcat8c_be (PRIVATE_AS u32 *w0, PRIVATE_AS u32 *w1, PRIVATE_AS u3
u32 tmp0;
u32 tmp1;
#if ((defined IS_AMD || defined IS_HIP) && HAS_VPERM == 0) || defined IS_GENERIC
tmp0 = hc_bytealign_be (0, append, func_len);
tmp1 = hc_bytealign_be (append, 0, func_len);
#endif
#if ((defined IS_AMD || defined IS_HIP) && HAS_VPERM == 1) || defined IS_NV
#if defined IS_NV
const int selector = (0x76543210 >> ((func_len & 3) * 4)) & 0xffff;
#endif
#if (defined IS_AMD || defined IS_HIP)
const int selector = l32_from_64_S (0x0706050403020100UL >> ((func_len & 3) * 8));
#endif
tmp0 = hc_byte_perm (append, 0, selector);
tmp1 = hc_byte_perm (0, append, selector);
#endif
tmp0 = hc_bytealign_be_S (0, append, func_len);
tmp1 = hc_bytealign_be_S (append, 0, func_len);
u32 carry = 0;

View File

@ -51,7 +51,6 @@ DECLSPEC void memcat64c_be (PRIVATE_AS u32x *block, const u32 offset, PRIVATE_AS
u32x tmp15;
u32x tmp16;
#if ((defined IS_AMD || defined IS_HIP) && HAS_VPERM == 0) || defined IS_GENERIC
tmp00 = hc_bytealign_be ( 0, carry[ 0], offset);
tmp01 = hc_bytealign_be (carry[ 0], carry[ 1], offset);
tmp02 = hc_bytealign_be (carry[ 1], carry[ 2], offset);
@ -69,36 +68,6 @@ DECLSPEC void memcat64c_be (PRIVATE_AS u32x *block, const u32 offset, PRIVATE_AS
tmp14 = hc_bytealign_be (carry[13], carry[14], offset);
tmp15 = hc_bytealign_be (carry[14], carry[15], offset);
tmp16 = hc_bytealign_be (carry[15], 0, offset);
#endif
#if ((defined IS_AMD || defined IS_HIP) && HAS_VPERM == 1) || defined IS_NV
#if defined IS_NV
const int selector = (0x76543210 >> ((offset & 3) * 4)) & 0xffff;
#endif
#if (defined IS_AMD || defined IS_HIP)
const int selector = l32_from_64_S (0x0706050403020100UL >> ((offset & 3) * 8));
#endif
tmp00 = hc_byte_perm (carry[ 0], 0, selector);
tmp01 = hc_byte_perm (carry[ 1], carry[ 0], selector);
tmp02 = hc_byte_perm (carry[ 2], carry[ 1], selector);
tmp03 = hc_byte_perm (carry[ 3], carry[ 2], selector);
tmp04 = hc_byte_perm (carry[ 4], carry[ 3], selector);
tmp05 = hc_byte_perm (carry[ 5], carry[ 4], selector);
tmp06 = hc_byte_perm (carry[ 6], carry[ 5], selector);
tmp07 = hc_byte_perm (carry[ 7], carry[ 6], selector);
tmp08 = hc_byte_perm (carry[ 8], carry[ 7], selector);
tmp09 = hc_byte_perm (carry[ 9], carry[ 8], selector);
tmp10 = hc_byte_perm (carry[10], carry[ 9], selector);
tmp11 = hc_byte_perm (carry[11], carry[10], selector);
tmp12 = hc_byte_perm (carry[12], carry[11], selector);
tmp13 = hc_byte_perm (carry[13], carry[12], selector);
tmp14 = hc_byte_perm (carry[14], carry[13], selector);
tmp15 = hc_byte_perm (carry[15], carry[14], selector);
tmp16 = hc_byte_perm ( 0, carry[15], selector);
#endif
carry[ 0] = 0;
carry[ 1] = 0;

View File

@ -49,7 +49,6 @@ DECLSPEC void memcat64c_be (PRIVATE_AS u32x *block, const u32 offset, PRIVATE_AS
u32x tmp15;
u32x tmp16;
#if ((defined IS_AMD || defined IS_HIP) && HAS_VPERM == 0) || defined IS_GENERIC
tmp00 = hc_bytealign_be ( 0, carry[ 0], offset);
tmp01 = hc_bytealign_be (carry[ 0], carry[ 1], offset);
tmp02 = hc_bytealign_be (carry[ 1], carry[ 2], offset);
@ -67,36 +66,6 @@ DECLSPEC void memcat64c_be (PRIVATE_AS u32x *block, const u32 offset, PRIVATE_AS
tmp14 = hc_bytealign_be (carry[13], carry[14], offset);
tmp15 = hc_bytealign_be (carry[14], carry[15], offset);
tmp16 = hc_bytealign_be (carry[15], 0, offset);
#endif
#if ((defined IS_AMD || defined IS_HIP) && HAS_VPERM == 1) || defined IS_NV
#if defined IS_NV
const int selector = (0x76543210 >> ((offset & 3) * 4)) & 0xffff;
#endif
#if (defined IS_AMD || defined IS_HIP)
const int selector = l32_from_64_S (0x0706050403020100UL >> ((offset & 3) * 8));
#endif
tmp00 = hc_byte_perm (carry[ 0], 0, selector);
tmp01 = hc_byte_perm (carry[ 1], carry[ 0], selector);
tmp02 = hc_byte_perm (carry[ 2], carry[ 1], selector);
tmp03 = hc_byte_perm (carry[ 3], carry[ 2], selector);
tmp04 = hc_byte_perm (carry[ 4], carry[ 3], selector);
tmp05 = hc_byte_perm (carry[ 5], carry[ 4], selector);
tmp06 = hc_byte_perm (carry[ 6], carry[ 5], selector);
tmp07 = hc_byte_perm (carry[ 7], carry[ 6], selector);
tmp08 = hc_byte_perm (carry[ 8], carry[ 7], selector);
tmp09 = hc_byte_perm (carry[ 9], carry[ 8], selector);
tmp10 = hc_byte_perm (carry[10], carry[ 9], selector);
tmp11 = hc_byte_perm (carry[11], carry[10], selector);
tmp12 = hc_byte_perm (carry[12], carry[11], selector);
tmp13 = hc_byte_perm (carry[13], carry[12], selector);
tmp14 = hc_byte_perm (carry[14], carry[13], selector);
tmp15 = hc_byte_perm (carry[15], carry[14], selector);
tmp16 = hc_byte_perm ( 0, carry[15], selector);
#endif
carry[ 0] = 0;
carry[ 1] = 0;

View File

@ -48,7 +48,6 @@ DECLSPEC void memcat64c_be (PRIVATE_AS u32x *block, const u32 offset, PRIVATE_AS
u32x tmp15;
u32x tmp16;
#if ((defined IS_AMD || defined IS_HIP) && HAS_VPERM == 0) || defined IS_GENERIC
tmp00 = hc_bytealign_be ( 0, carry[ 0], offset);
tmp01 = hc_bytealign_be (carry[ 0], carry[ 1], offset);
tmp02 = hc_bytealign_be (carry[ 1], carry[ 2], offset);
@ -66,36 +65,6 @@ DECLSPEC void memcat64c_be (PRIVATE_AS u32x *block, const u32 offset, PRIVATE_AS
tmp14 = hc_bytealign_be (carry[13], carry[14], offset);
tmp15 = hc_bytealign_be (carry[14], carry[15], offset);
tmp16 = hc_bytealign_be (carry[15], 0, offset);
#endif
#if ((defined IS_AMD || defined IS_HIP) && HAS_VPERM == 1) || defined IS_NV
#if defined IS_NV
const int selector = (0x76543210 >> ((offset & 3) * 4)) & 0xffff;
#endif
#if (defined IS_AMD || defined IS_HIP)
const int selector = l32_from_64_S (0x0706050403020100UL >> ((offset & 3) * 8));
#endif
tmp00 = hc_byte_perm (carry[ 0], 0, selector);
tmp01 = hc_byte_perm (carry[ 1], carry[ 0], selector);
tmp02 = hc_byte_perm (carry[ 2], carry[ 1], selector);
tmp03 = hc_byte_perm (carry[ 3], carry[ 2], selector);
tmp04 = hc_byte_perm (carry[ 4], carry[ 3], selector);
tmp05 = hc_byte_perm (carry[ 5], carry[ 4], selector);
tmp06 = hc_byte_perm (carry[ 6], carry[ 5], selector);
tmp07 = hc_byte_perm (carry[ 7], carry[ 6], selector);
tmp08 = hc_byte_perm (carry[ 8], carry[ 7], selector);
tmp09 = hc_byte_perm (carry[ 9], carry[ 8], selector);
tmp10 = hc_byte_perm (carry[10], carry[ 9], selector);
tmp11 = hc_byte_perm (carry[11], carry[10], selector);
tmp12 = hc_byte_perm (carry[12], carry[11], selector);
tmp13 = hc_byte_perm (carry[13], carry[12], selector);
tmp14 = hc_byte_perm (carry[14], carry[13], selector);
tmp15 = hc_byte_perm (carry[15], carry[14], selector);
tmp16 = hc_byte_perm ( 0, carry[15], selector);
#endif
carry[ 0] = 0;
carry[ 1] = 0;

View File

@ -392,7 +392,15 @@ KERNEL_FQ KERNEL_FA void m16600_m04 (KERN_ATTR_RULES_ESALT (electrum_wallet_t))
if (salt_type == 2)
{
if ((u8) (out[0] >> 0) != 'x') continue;
u8 version = (u8) (out[0] >> 0);
// https://github.com/spesmilo/electrum-docs/blob/master/xpub_version_bytes.rst
// Does not include testnet addresses
if (version != 'x' &&
version != 'y' &&
version != 'Y' &&
version != 'z' &&
version != 'Z' ) continue;
if ((u8) (out[0] >> 8) != 'p') continue;
if ((u8) (out[0] >> 16) != 'r') continue;
if ((u8) (out[0] >> 24) != 'v') continue;
@ -804,7 +812,15 @@ KERNEL_FQ KERNEL_FA void m16600_s04 (KERN_ATTR_RULES_ESALT (electrum_wallet_t))
if (salt_type == 2)
{
if ((u8) (out[0] >> 0) != 'x') continue;
u8 version = (u8) (out[0] >> 0);
// https://github.com/spesmilo/electrum-docs/blob/master/xpub_version_bytes.rst
// Does not include testnet addresses
if (version != 'x' &&
version != 'y' &&
version != 'Y' &&
version != 'z' &&
version != 'Z' ) continue;
if ((u8) (out[0] >> 8) != 'p') continue;
if ((u8) (out[0] >> 16) != 'r') continue;
if ((u8) (out[0] >> 24) != 'v') continue;

View File

@ -206,7 +206,15 @@ KERNEL_FQ KERNEL_FA void m16600_mxx (KERN_ATTR_RULES_ESALT (electrum_wallet_t))
if (salt_type == 2)
{
if ((u8) (out[0] >> 0) != 'x') continue;
u8 version = (u8) (out[0] >> 0);
// https://github.com/spesmilo/electrum-docs/blob/master/xpub_version_bytes.rst
// Does not include testnet addresses
if (version != 'x' &&
version != 'y' &&
version != 'Y' &&
version != 'z' &&
version != 'Z' ) continue;
if ((u8) (out[0] >> 8) != 'p') continue;
if ((u8) (out[0] >> 16) != 'r') continue;
if ((u8) (out[0] >> 24) != 'v') continue;
@ -424,7 +432,15 @@ KERNEL_FQ KERNEL_FA void m16600_sxx (KERN_ATTR_RULES_ESALT (electrum_wallet_t))
if (salt_type == 2)
{
if ((u8) (out[0] >> 0) != 'x') continue;
u8 version = (u8) (out[0] >> 0);
// https://github.com/spesmilo/electrum-docs/blob/master/xpub_version_bytes.rst
// Does not include testnet addresses
if (version != 'x' &&
version != 'y' &&
version != 'Y' &&
version != 'z' &&
version != 'Z' ) continue;
if ((u8) (out[0] >> 8) != 'p') continue;
if ((u8) (out[0] >> 16) != 'r') continue;
if ((u8) (out[0] >> 24) != 'v') continue;

View File

@ -448,7 +448,15 @@ KERNEL_FQ KERNEL_FA void m16600_m04 (KERN_ATTR_ESALT (electrum_wallet_t))
if (salt_type == 2)
{
if ((u8) (out[0] >> 0) != 'x') continue;
u8 version = (u8) (out[0] >> 0);
// https://github.com/spesmilo/electrum-docs/blob/master/xpub_version_bytes.rst
// Does not include testnet addresses
if (version != 'x' &&
version != 'y' &&
version != 'Y' &&
version != 'z' &&
version != 'Z' ) continue;
if ((u8) (out[0] >> 8) != 'p') continue;
if ((u8) (out[0] >> 16) != 'r') continue;
if ((u8) (out[0] >> 24) != 'v') continue;
@ -918,7 +926,15 @@ KERNEL_FQ KERNEL_FA void m16600_s04 (KERN_ATTR_ESALT (electrum_wallet_t))
if (salt_type == 2)
{
if ((u8) (out[0] >> 0) != 'x') continue;
u8 version = (u8) (out[0] >> 0);
// https://github.com/spesmilo/electrum-docs/blob/master/xpub_version_bytes.rst
// Does not include testnet addresses
if (version != 'x' &&
version != 'y' &&
version != 'Y' &&
version != 'z' &&
version != 'Z' ) continue;
if ((u8) (out[0] >> 8) != 'p') continue;
if ((u8) (out[0] >> 16) != 'r') continue;
if ((u8) (out[0] >> 24) != 'v') continue;

View File

@ -202,7 +202,15 @@ KERNEL_FQ KERNEL_FA void m16600_mxx (KERN_ATTR_ESALT (electrum_wallet_t))
if (salt_type == 2)
{
if ((u8) (out[0] >> 0) != 'x') continue;
u8 version = (u8) (out[0] >> 0);
// https://github.com/spesmilo/electrum-docs/blob/master/xpub_version_bytes.rst
// Does not include testnet addresses
if (version != 'x' &&
version != 'y' &&
version != 'Y' &&
version != 'z' &&
version != 'Z' ) continue;
if ((u8) (out[0] >> 8) != 'p') continue;
if ((u8) (out[0] >> 16) != 'r') continue;
if ((u8) (out[0] >> 24) != 'v') continue;
@ -418,7 +426,15 @@ KERNEL_FQ KERNEL_FA void m16600_sxx (KERN_ATTR_ESALT (electrum_wallet_t))
if (salt_type == 2)
{
if ((u8) (out[0] >> 0) != 'x') continue;
u8 version = (u8) (out[0] >> 0);
// https://github.com/spesmilo/electrum-docs/blob/master/xpub_version_bytes.rst
// Does not include testnet addresses
if (version != 'x' &&
version != 'y' &&
version != 'Y' &&
version != 'z' &&
version != 'Z' ) continue;
if ((u8) (out[0] >> 8) != 'p') continue;
if ((u8) (out[0] >> 16) != 'r') continue;
if ((u8) (out[0] >> 24) != 'v') continue;

View File

@ -167,7 +167,15 @@ DECLSPEC void m16600 (SHM_TYPE u32a *s_te0, SHM_TYPE u32a *s_te1, SHM_TYPE u32a
if (salt_type == 2)
{
if ((u8) (out[0] >> 0) != 'x') continue;
u8 version = (u8) (out[0] >> 0);
// https://github.com/spesmilo/electrum-docs/blob/master/xpub_version_bytes.rst
// Does not include testnet addresses
if (version != 'x' &&
version != 'y' &&
version != 'Y' &&
version != 'z' &&
version != 'Z' ) continue;
if ((u8) (out[0] >> 8) != 'p') continue;
if ((u8) (out[0] >> 16) != 'r') continue;
if ((u8) (out[0] >> 24) != 'v') continue;

View File

@ -215,7 +215,15 @@ KERNEL_FQ KERNEL_FA void m16600_mxx (KERN_ATTR_VECTOR_ESALT (electrum_wallet_t))
if (salt_type == 2)
{
if ((u8) (out[0] >> 0) != 'x') continue;
u8 version = (u8) (out[0] >> 0);
// https://github.com/spesmilo/electrum-docs/blob/master/xpub_version_bytes.rst
// Does not include testnet addresses
if (version != 'x' &&
version != 'y' &&
version != 'Y' &&
version != 'z' &&
version != 'Z' ) continue;
if ((u8) (out[0] >> 8) != 'p') continue;
if ((u8) (out[0] >> 16) != 'r') continue;
if ((u8) (out[0] >> 24) != 'v') continue;
@ -444,7 +452,15 @@ KERNEL_FQ KERNEL_FA void m16600_sxx (KERN_ATTR_VECTOR_ESALT (electrum_wallet_t))
if (salt_type == 2)
{
if ((u8) (out[0] >> 0) != 'x') continue;
u8 version = (u8) (out[0] >> 0);
// https://github.com/spesmilo/electrum-docs/blob/master/xpub_version_bytes.rst
// Does not include testnet addresses
if (version != 'x' &&
version != 'y' &&
version != 'Y' &&
version != 'z' &&
version != 'Z' ) continue;
if ((u8) (out[0] >> 8) != 'p') continue;
if ((u8) (out[0] >> 16) != 'r') continue;
if ((u8) (out[0] >> 24) != 'v') continue;

View File

@ -42,31 +42,6 @@ typedef struct gpg_tmp
} gpg_tmp_t;
DECLSPEC u32 hc_bytealign_le_S (const u32 a, const u32 b, const int c)
{
const int c_mod_4 = c & 3;
#if ((defined IS_AMD || defined IS_HIP) && HAS_VPERM == 0) || defined IS_GENERIC
const u32 r = l32_from_64_S ((v64_from_v32ab_S (b, a) >> (c_mod_4 * 8)));
#endif
#if ((defined IS_AMD || defined IS_HIP) && HAS_VPERM == 1) || defined IS_NV
#if defined IS_NV
const int selector = (0x76543210 >> (c_mod_4 * 4)) & 0xffff;
#endif
#if (defined IS_AMD || defined IS_HIP)
const int selector = l32_from_64_S (0x0706050403020100UL >> (c_mod_4 * 8));
#endif
const u32 r = hc_byte_perm (b, a, selector);
#endif
return r;
}
DECLSPEC void memcat_le_S (PRIVATE_AS u32 *block, const u32 offset, PRIVATE_AS const u32 *append, u32 len)
{
const u32 start_index = (offset - 1) >> 2;
@ -74,11 +49,11 @@ DECLSPEC void memcat_le_S (PRIVATE_AS u32 *block, const u32 offset, PRIVATE_AS c
const int off_mod_4 = offset & 3;
const int off_minus_4 = 4 - off_mod_4;
block[start_index] |= hc_bytealign_le_S (append[0], 0, off_minus_4);
block[start_index] |= hc_bytealign_be_S (append[0], 0, off_minus_4);
for (u32 idx = 1; idx < count; idx++)
{
block[start_index + idx] = hc_bytealign_le_S (append[idx], append[idx - 1], off_minus_4);
block[start_index + idx] = hc_bytealign_be_S (append[idx], append[idx - 1], off_minus_4);
}
}
@ -188,11 +163,11 @@ DECLSPEC int check_decoded_data (PRIVATE_AS u32 *decoded_data, const u32 decoded
u32 expected_sha1[5];
expected_sha1[0] = hc_bytealign_le_S (decoded_data[sha1_u32_off + 1], decoded_data[sha1_u32_off + 0], sha1_byte_off);
expected_sha1[1] = hc_bytealign_le_S (decoded_data[sha1_u32_off + 2], decoded_data[sha1_u32_off + 1], sha1_byte_off);
expected_sha1[2] = hc_bytealign_le_S (decoded_data[sha1_u32_off + 3], decoded_data[sha1_u32_off + 2], sha1_byte_off);
expected_sha1[3] = hc_bytealign_le_S (decoded_data[sha1_u32_off + 4], decoded_data[sha1_u32_off + 3], sha1_byte_off);
expected_sha1[4] = hc_bytealign_le_S (decoded_data[sha1_u32_off + 5], decoded_data[sha1_u32_off + 4], sha1_byte_off);
expected_sha1[0] = hc_bytealign_be_S (decoded_data[sha1_u32_off + 1], decoded_data[sha1_u32_off + 0], sha1_byte_off);
expected_sha1[1] = hc_bytealign_be_S (decoded_data[sha1_u32_off + 2], decoded_data[sha1_u32_off + 1], sha1_byte_off);
expected_sha1[2] = hc_bytealign_be_S (decoded_data[sha1_u32_off + 3], decoded_data[sha1_u32_off + 2], sha1_byte_off);
expected_sha1[3] = hc_bytealign_be_S (decoded_data[sha1_u32_off + 4], decoded_data[sha1_u32_off + 3], sha1_byte_off);
expected_sha1[4] = hc_bytealign_be_S (decoded_data[sha1_u32_off + 5], decoded_data[sha1_u32_off + 4], sha1_byte_off);
memzero_le_S (decoded_data, sha1_byte_off, 384 * sizeof(u32));

View File

@ -47,31 +47,6 @@ typedef struct gpg_tmp
} gpg_tmp_t;
DECLSPEC u32 hc_bytealign_le_S (const u32 a, const u32 b, const int c)
{
const int c_mod_4 = c & 3;
#if ((defined IS_AMD || defined IS_HIP) && HAS_VPERM == 0) || defined IS_GENERIC
const u32 r = l32_from_64_S ((v64_from_v32ab_S (b, a) >> (c_mod_4 * 8)));
#endif
#if ((defined IS_AMD || defined IS_HIP) && HAS_VPERM == 1) || defined IS_NV
#if defined IS_NV
const int selector = (0x76543210 >> (c_mod_4 * 4)) & 0xffff;
#endif
#if (defined IS_AMD || defined IS_HIP)
const int selector = l32_from_64_S (0x0706050403020100UL >> (c_mod_4 * 8));
#endif
const u32 r = hc_byte_perm (b, a, selector);
#endif
return r;
}
DECLSPEC void memcat_le_S (PRIVATE_AS u32 *block, const u32 offset, PRIVATE_AS const u32 *append, u32 len)
{
const u32 start_index = (offset - 1) >> 2;
@ -79,11 +54,11 @@ DECLSPEC void memcat_le_S (PRIVATE_AS u32 *block, const u32 offset, PRIVATE_AS c
const int off_mod_4 = offset & 3;
const int off_minus_4 = 4 - off_mod_4;
block[start_index] |= hc_bytealign_le_S (append[0], 0, off_minus_4);
block[start_index] |= hc_bytealign_be_S (append[0], 0, off_minus_4);
for (u32 idx = 1; idx < count; idx++)
{
block[start_index + idx] = hc_bytealign_le_S (append[idx], append[idx - 1], off_minus_4);
block[start_index + idx] = hc_bytealign_be_S (append[idx], append[idx - 1], off_minus_4);
}
}
@ -193,11 +168,11 @@ DECLSPEC int check_decoded_data (PRIVATE_AS u32 *decoded_data, const u32 decoded
u32 expected_sha1[5];
expected_sha1[0] = hc_bytealign_le_S (decoded_data[sha1_u32_off + 1], decoded_data[sha1_u32_off + 0], sha1_byte_off);
expected_sha1[1] = hc_bytealign_le_S (decoded_data[sha1_u32_off + 2], decoded_data[sha1_u32_off + 1], sha1_byte_off);
expected_sha1[2] = hc_bytealign_le_S (decoded_data[sha1_u32_off + 3], decoded_data[sha1_u32_off + 2], sha1_byte_off);
expected_sha1[3] = hc_bytealign_le_S (decoded_data[sha1_u32_off + 4], decoded_data[sha1_u32_off + 3], sha1_byte_off);
expected_sha1[4] = hc_bytealign_le_S (decoded_data[sha1_u32_off + 5], decoded_data[sha1_u32_off + 4], sha1_byte_off);
expected_sha1[0] = hc_bytealign_be_S (decoded_data[sha1_u32_off + 1], decoded_data[sha1_u32_off + 0], sha1_byte_off);
expected_sha1[1] = hc_bytealign_be_S (decoded_data[sha1_u32_off + 2], decoded_data[sha1_u32_off + 1], sha1_byte_off);
expected_sha1[2] = hc_bytealign_be_S (decoded_data[sha1_u32_off + 3], decoded_data[sha1_u32_off + 2], sha1_byte_off);
expected_sha1[3] = hc_bytealign_be_S (decoded_data[sha1_u32_off + 4], decoded_data[sha1_u32_off + 3], sha1_byte_off);
expected_sha1[4] = hc_bytealign_be_S (decoded_data[sha1_u32_off + 5], decoded_data[sha1_u32_off + 4], sha1_byte_off);
memzero_le_S (decoded_data, sha1_byte_off, 384 * sizeof(u32));

View File

@ -43,31 +43,6 @@ typedef struct gpg_tmp
} gpg_tmp_t;
DECLSPEC u32 hc_bytealign_le_S (const u32 a, const u32 b, const int c)
{
const int c_mod_4 = c & 3;
#if ((defined IS_AMD || defined IS_HIP) && HAS_VPERM == 0) || defined IS_GENERIC
const u32 r = l32_from_64_S ((v64_from_v32ab_S (b, a) >> (c_mod_4 * 8)));
#endif
#if ((defined IS_AMD || defined IS_HIP) && HAS_VPERM == 1) || defined IS_NV
#if defined IS_NV
const int selector = (0x76543210 >> (c_mod_4 * 4)) & 0xffff;
#endif
#if (defined IS_AMD || defined IS_HIP)
const int selector = l32_from_64_S (0x0706050403020100UL >> (c_mod_4 * 8));
#endif
const u32 r = hc_byte_perm (b, a, selector);
#endif
return r;
}
DECLSPEC void memcat_le_S (PRIVATE_AS u32 *block, const u32 offset, PRIVATE_AS const u32 *append, u32 len)
{
const u32 start_index = (offset - 1) >> 2;
@ -75,11 +50,11 @@ DECLSPEC void memcat_le_S (PRIVATE_AS u32 *block, const u32 offset, PRIVATE_AS c
const int off_mod_4 = offset & 3;
const int off_minus_4 = 4 - off_mod_4;
block[start_index] |= hc_bytealign_le_S (append[0], 0, off_minus_4);
block[start_index] |= hc_bytealign_be_S (append[0], 0, off_minus_4);
for (u32 idx = 1; idx < count; idx++)
{
block[start_index + idx] = hc_bytealign_le_S (append[idx], append[idx - 1], off_minus_4);
block[start_index + idx] = hc_bytealign_be_S (append[idx], append[idx - 1], off_minus_4);
}
}
@ -189,11 +164,11 @@ DECLSPEC int check_decoded_data (PRIVATE_AS u32 *decoded_data, const u32 decoded
u32 expected_sha1[5];
expected_sha1[0] = hc_bytealign_le_S (decoded_data[sha1_u32_off + 1], decoded_data[sha1_u32_off + 0], sha1_byte_off);
expected_sha1[1] = hc_bytealign_le_S (decoded_data[sha1_u32_off + 2], decoded_data[sha1_u32_off + 1], sha1_byte_off);
expected_sha1[2] = hc_bytealign_le_S (decoded_data[sha1_u32_off + 3], decoded_data[sha1_u32_off + 2], sha1_byte_off);
expected_sha1[3] = hc_bytealign_le_S (decoded_data[sha1_u32_off + 4], decoded_data[sha1_u32_off + 3], sha1_byte_off);
expected_sha1[4] = hc_bytealign_le_S (decoded_data[sha1_u32_off + 5], decoded_data[sha1_u32_off + 4], sha1_byte_off);
expected_sha1[0] = hc_bytealign_be_S (decoded_data[sha1_u32_off + 1], decoded_data[sha1_u32_off + 0], sha1_byte_off);
expected_sha1[1] = hc_bytealign_be_S (decoded_data[sha1_u32_off + 2], decoded_data[sha1_u32_off + 1], sha1_byte_off);
expected_sha1[2] = hc_bytealign_be_S (decoded_data[sha1_u32_off + 3], decoded_data[sha1_u32_off + 2], sha1_byte_off);
expected_sha1[3] = hc_bytealign_be_S (decoded_data[sha1_u32_off + 4], decoded_data[sha1_u32_off + 3], sha1_byte_off);
expected_sha1[4] = hc_bytealign_be_S (decoded_data[sha1_u32_off + 5], decoded_data[sha1_u32_off + 4], sha1_byte_off);
memzero_le_S (decoded_data, sha1_byte_off, 384 * sizeof(u32));

View File

@ -43,31 +43,6 @@ typedef struct gpg_tmp
} gpg_tmp_t;
DECLSPEC u32 hc_bytealign_le_S (const u32 a, const u32 b, const int c)
{
const int c_mod_4 = c & 3;
#if ((defined IS_AMD || defined IS_HIP) && HAS_VPERM == 0) || defined IS_GENERIC
const u32 r = l32_from_64_S ((v64_from_v32ab_S (b, a) >> (c_mod_4 * 8)));
#endif
#if ((defined IS_AMD || defined IS_HIP) && HAS_VPERM == 1) || defined IS_NV
#if defined IS_NV
const int selector = (0x76543210 >> (c_mod_4 * 4)) & 0xffff;
#endif
#if (defined IS_AMD || defined IS_HIP)
const int selector = l32_from_64_S (0x0706050403020100UL >> (c_mod_4 * 8));
#endif
const u32 r = hc_byte_perm (b, a, selector);
#endif
return r;
}
DECLSPEC void memcat_le_S (PRIVATE_AS u32 *block, const u32 offset, PRIVATE_AS const u32 *append, u32 len)
{
const u32 start_index = (offset - 1) >> 2;
@ -75,11 +50,11 @@ DECLSPEC void memcat_le_S (PRIVATE_AS u32 *block, const u32 offset, PRIVATE_AS c
const int off_mod_4 = offset & 3;
const int off_minus_4 = 4 - off_mod_4;
block[start_index] |= hc_bytealign_le_S (append[0], 0, off_minus_4);
block[start_index] |= hc_bytealign_be_S (append[0], 0, off_minus_4);
for (u32 idx = 1; idx < count; idx++)
{
block[start_index + idx] = hc_bytealign_le_S (append[idx], append[idx - 1], off_minus_4);
block[start_index + idx] = hc_bytealign_be_S (append[idx], append[idx - 1], off_minus_4);
}
}
@ -168,11 +143,11 @@ DECLSPEC int check_decoded_data (PRIVATE_AS u32 *decoded_data, const u32 decoded
u32 expected_sha1[5];
expected_sha1[0] = hc_bytealign_le_S (decoded_data[sha1_u32_off + 1], decoded_data[sha1_u32_off + 0], sha1_byte_off);
expected_sha1[1] = hc_bytealign_le_S (decoded_data[sha1_u32_off + 2], decoded_data[sha1_u32_off + 1], sha1_byte_off);
expected_sha1[2] = hc_bytealign_le_S (decoded_data[sha1_u32_off + 3], decoded_data[sha1_u32_off + 2], sha1_byte_off);
expected_sha1[3] = hc_bytealign_le_S (decoded_data[sha1_u32_off + 4], decoded_data[sha1_u32_off + 3], sha1_byte_off);
expected_sha1[4] = hc_bytealign_le_S (decoded_data[sha1_u32_off + 5], decoded_data[sha1_u32_off + 4], sha1_byte_off);
expected_sha1[0] = hc_bytealign_be_S (decoded_data[sha1_u32_off + 1], decoded_data[sha1_u32_off + 0], sha1_byte_off);
expected_sha1[1] = hc_bytealign_be_S (decoded_data[sha1_u32_off + 2], decoded_data[sha1_u32_off + 1], sha1_byte_off);
expected_sha1[2] = hc_bytealign_be_S (decoded_data[sha1_u32_off + 3], decoded_data[sha1_u32_off + 2], sha1_byte_off);
expected_sha1[3] = hc_bytealign_be_S (decoded_data[sha1_u32_off + 4], decoded_data[sha1_u32_off + 3], sha1_byte_off);
expected_sha1[4] = hc_bytealign_be_S (decoded_data[sha1_u32_off + 5], decoded_data[sha1_u32_off + 4], sha1_byte_off);

View File

@ -37,7 +37,7 @@ typedef struct md5_double_salt
} md5_double_salt_t;
KERNEL_FQ KERNEL_FA void m21310_mxx (KERN_ATTR_ESALT (md5_double_salt))
KERNEL_FQ KERNEL_FA void m21310_mxx (KERN_ATTR_ESALT (md5_double_salt_t))
{
/**
* modifier
@ -149,7 +149,7 @@ KERNEL_FQ KERNEL_FA void m21310_mxx (KERN_ATTR_ESALT (md5_double_salt))
}
}
KERNEL_FQ KERNEL_FA void m21310_sxx (KERN_ATTR_ESALT (md5_double_salt))
KERNEL_FQ KERNEL_FA void m21310_sxx (KERN_ATTR_ESALT (md5_double_salt_t))
{
/**
* modifier

View File

@ -37,7 +37,7 @@ typedef struct md5_double_salt
} md5_double_salt_t;
KERNEL_FQ KERNEL_FA void m21310_mxx (KERN_ATTR_VECTOR_ESALT (md5_double_salt))
KERNEL_FQ KERNEL_FA void m21310_mxx (KERN_ATTR_VECTOR_ESALT (md5_double_salt_t))
{
/**
* modifier
@ -168,7 +168,7 @@ KERNEL_FQ KERNEL_FA void m21310_mxx (KERN_ATTR_VECTOR_ESALT (md5_double_salt))
}
}
KERNEL_FQ KERNEL_FA void m21310_sxx (KERN_ATTR_VECTOR_ESALT (md5_double_salt))
KERNEL_FQ KERNEL_FA void m21310_sxx (KERN_ATTR_VECTOR_ESALT (md5_double_salt_t))
{
/**
* modifier

View File

@ -655,12 +655,12 @@ KERNEL_FQ KERNEL_FA void m21800_comp (KERN_ATTR_TMPS_ESALT (electrum_tmp_t, elec
if ((entropy >= MIN_ENTROPY) && (entropy <= MAX_ENTROPY))
{
if (hc_atomic_inc (&hashes_shown[DIGESTS_OFFSET_HOST]) == 0)
if (hc_atomic_inc (&hashes_shown[digest_cur]) == 0)
{
mark_hash (plains_buf, d_return_buf, SALT_POS_HOST, DIGESTS_CNT, 0, DIGESTS_OFFSET_HOST + 0, gid, 0, 0, 0);
mark_hash (plains_buf, d_return_buf, SALT_POS_HOST, DIGESTS_CNT, 0, digest_cur, gid, 0, 0, 0);
}
return;
//return;
}
}
}
@ -676,11 +676,11 @@ KERNEL_FQ KERNEL_FA void m21800_comp (KERN_ATTR_TMPS_ESALT (electrum_tmp_t, elec
((tmp[0] == 0x7b) && (tmp[1] == 0x0d) && (tmp[2] == 0x0a) && (tmp[3] == 0x20) &&
(tmp[4] == 0x20) && (tmp[5] == 0x20) && (tmp[6] == 0x20) && (tmp[7] == 0x22)))
{
if (hc_atomic_inc (&hashes_shown[DIGESTS_OFFSET_HOST]) == 0)
if (hc_atomic_inc (&hashes_shown[digest_cur]) == 0)
{
mark_hash (plains_buf, d_return_buf, SALT_POS_HOST, DIGESTS_CNT, 0, DIGESTS_OFFSET_HOST + 0, gid, 0, 0, 0);
mark_hash (plains_buf, d_return_buf, SALT_POS_HOST, DIGESTS_CNT, 0, digest_cur, gid, 0, 0, 0);
}
return;
//return;
}
}

View File

@ -145,24 +145,8 @@ DECLSPEC void memcat8c_be (PRIVATE_AS u32 *w0, PRIVATE_AS u32 *w1, PRIVATE_AS u3
u32 tmp0;
u32 tmp1;
#if ((defined IS_AMD || defined IS_HIP) && HAS_VPERM == 0) || defined IS_GENERIC
tmp0 = hc_bytealign_be (0, append, func_len);
tmp1 = hc_bytealign_be (append, 0, func_len);
#endif
#if ((defined IS_AMD || defined IS_HIP) && HAS_VPERM == 1) || defined IS_NV
#if defined IS_NV
const int selector = (0x76543210 >> ((func_len & 3) * 4)) & 0xffff;
#endif
#if (defined IS_AMD || defined IS_HIP)
const int selector = l32_from_64_S (0x0706050403020100UL >> ((func_len & 3) * 8));
#endif
tmp0 = hc_byte_perm (append, 0, selector);
tmp1 = hc_byte_perm (0, append, selector);
#endif
tmp0 = hc_bytealign_be_S (0, append, func_len);
tmp1 = hc_bytealign_be_S (append, 0, func_len);
u32 carry = 0;

View File

@ -58,24 +58,8 @@ DECLSPEC void memcat8c_be (PRIVATE_AS u32 *w0, PRIVATE_AS u32 *w1, PRIVATE_AS u3
u32 tmp0;
u32 tmp1;
#if ((defined IS_AMD || defined IS_HIP) && HAS_VPERM == 0) || defined IS_GENERIC
tmp0 = hc_bytealign_be (0, append, func_len);
tmp1 = hc_bytealign_be (append, 0, func_len);
#endif
#if ((defined IS_AMD || defined IS_HIP) && HAS_VPERM == 1) || defined IS_NV
#if defined IS_NV
const int selector = (0x76543210 >> ((func_len & 3) * 4)) & 0xffff;
#endif
#if (defined IS_AMD || defined IS_HIP)
const int selector = l32_from_64_S (0x0706050403020100UL >> ((func_len & 3) * 8));
#endif
tmp0 = hc_byte_perm (append, 0, selector);
tmp1 = hc_byte_perm (0, append, selector);
#endif
tmp0 = hc_bytealign_be_S (0, append, func_len);
tmp1 = hc_bytealign_be_S (append, 0, func_len);
u32 carry = 0;

View File

@ -368,7 +368,7 @@ KERNEL_FQ KERNEL_FA void m26610_comp (KERN_ATTR_TMPS_ESALT (pbkdf2_sha256_tmp_t,
AES_GCM_decrypt (key, J0, ct, 32, pt, s_te0, s_te1, s_te2, s_te3, s_te4);
const int correct = is_valid_printable_32 (pt[0])
int correct = is_valid_printable_32 (pt[0])
+ is_valid_printable_32 (pt[1])
+ is_valid_printable_32 (pt[2])
+ is_valid_printable_32 (pt[3])
@ -379,6 +379,37 @@ KERNEL_FQ KERNEL_FA void m26610_comp (KERN_ATTR_TMPS_ESALT (pbkdf2_sha256_tmp_t,
if (correct != 8) return;
u32 ct2[8];
ct2[0] = pbkdf2_sha256_aes_gcm->ct_buf[8]; // third block of ciphertext
ct2[1] = pbkdf2_sha256_aes_gcm->ct_buf[9];
ct2[2] = pbkdf2_sha256_aes_gcm->ct_buf[10];
ct2[3] = pbkdf2_sha256_aes_gcm->ct_buf[11];
ct2[4] = pbkdf2_sha256_aes_gcm->ct_buf[12]; // fourth block of ciphertext
ct2[5] = pbkdf2_sha256_aes_gcm->ct_buf[13];
ct2[6] = pbkdf2_sha256_aes_gcm->ct_buf[14];
ct2[7] = pbkdf2_sha256_aes_gcm->ct_buf[15];
// Only a single increment as the previous AES_GCM_DECRYPT already does one for us
J0[3]++;
u32 pt2[8] = { 0 };
AES_GCM_decrypt (key, J0, ct2, 32, pt2, s_te0, s_te1, s_te2, s_te3, s_te4);
correct = is_valid_printable_32 (pt2[0])
+ is_valid_printable_32 (pt2[1])
+ is_valid_printable_32 (pt2[2])
+ is_valid_printable_32 (pt2[3])
+ is_valid_printable_32 (pt2[4])
+ is_valid_printable_32 (pt2[5])
+ is_valid_printable_32 (pt2[6])
+ is_valid_printable_32 (pt2[7]);
// We need to check a third and fourth block to avoid extremely rare false-positives. See:
// https://github.com/hashcat/hashcat/issues/4121
if (correct != 8) return;
/*
const int pt_len = 28; // not using 32 byte but 28 because our UTF8 allows up to 4 byte per character and since we decrypt 32 byte
// only we can't guarantee it is not in the middle of a UTF8 byte stream at that point

View File

@ -29,7 +29,6 @@ DECLSPEC void shift_buffer_by_offset (PRIVATE_AS u32 *w0, const u32 offset)
{
const int offset_switch = offset / 4;
#if ((defined IS_AMD || defined IS_HIP) && HAS_VPERM == 0) || defined IS_GENERIC
switch (offset_switch)
{
case 0:
@ -67,56 +66,6 @@ DECLSPEC void shift_buffer_by_offset (PRIVATE_AS u32 *w0, const u32 offset)
w0[0] = 0;
break;
}
#endif
#if ((defined IS_AMD || defined IS_HIP) && HAS_VPERM == 1) || defined IS_NV
#if defined IS_NV
const int selector = (0x76543210 >> ((offset & 3) * 4)) & 0xffff;
#endif
#if (defined IS_AMD || defined IS_HIP)
const int selector = l32_from_64_S(0x0706050403020100UL >> ((offset & 3) * 8));
#endif
switch (offset_switch)
{
case 0:
w0[3] = hc_byte_perm_S (w0[3], w0[2], selector);
w0[2] = hc_byte_perm_S (w0[2], w0[1], selector);
w0[1] = hc_byte_perm_S (w0[1], w0[0], selector);
w0[0] = hc_byte_perm_S (w0[0], 0, selector);
break;
case 1:
w0[3] = hc_byte_perm_S (w0[2], w0[1], selector);
w0[2] = hc_byte_perm_S (w0[1], w0[0], selector);
w0[1] = hc_byte_perm_S (w0[0], 0, selector);
w0[0] = 0;
break;
case 2:
w0[3] = hc_byte_perm_S (w0[1], w0[0], selector);
w0[2] = hc_byte_perm_S (w0[0], 0, selector);
w0[1] = 0;
w0[0] = 0;
break;
case 3:
w0[3] = hc_byte_perm_S (w0[0], 0, selector);
w0[2] = 0;
w0[1] = 0;
w0[0] = 0;
break;
default:
w0[3] = 0;
w0[2] = 0;
w0[1] = 0;
w0[0] = 0;
break;
}
#endif
}
DECLSPEC void aes256_scrt_format (PRIVATE_AS u32 *aes_ks, PRIVATE_AS u32 *pw, const u32 pw_len, PRIVATE_AS u32 *hash, PRIVATE_AS u32 *out, SHM_TYPE u32 *s_te0, SHM_TYPE u32 *s_te1, SHM_TYPE u32 *s_te2, SHM_TYPE u32 *s_te3, SHM_TYPE u32 *s_te4)

View File

@ -31,7 +31,6 @@ DECLSPEC void shift_buffer_by_offset (PRIVATE_AS u32 *w0, const u32 offset)
{
const int offset_switch = offset / 4;
#if ((defined IS_AMD || defined IS_HIP) && HAS_VPERM == 0) || defined IS_GENERIC
switch (offset_switch)
{
case 0:
@ -69,56 +68,6 @@ DECLSPEC void shift_buffer_by_offset (PRIVATE_AS u32 *w0, const u32 offset)
w0[0] = 0;
break;
}
#endif
#if ((defined IS_AMD || defined IS_HIP) && HAS_VPERM == 1) || defined IS_NV
#if defined IS_NV
const int selector = (0x76543210 >> ((offset & 3) * 4)) & 0xffff;
#endif
#if (defined IS_AMD || defined IS_HIP)
const int selector = l32_from_64_S(0x0706050403020100UL >> ((offset & 3) * 8));
#endif
switch (offset_switch)
{
case 0:
w0[3] = hc_byte_perm_S (w0[3], w0[2], selector);
w0[2] = hc_byte_perm_S (w0[2], w0[1], selector);
w0[1] = hc_byte_perm_S (w0[1], w0[0], selector);
w0[0] = hc_byte_perm_S (w0[0], 0, selector);
break;
case 1:
w0[3] = hc_byte_perm_S (w0[2], w0[1], selector);
w0[2] = hc_byte_perm_S (w0[1], w0[0], selector);
w0[1] = hc_byte_perm_S (w0[0], 0, selector);
w0[0] = 0;
break;
case 2:
w0[3] = hc_byte_perm_S (w0[1], w0[0], selector);
w0[2] = hc_byte_perm_S (w0[0], 0, selector);
w0[1] = 0;
w0[0] = 0;
break;
case 3:
w0[3] = hc_byte_perm_S (w0[0], 0, selector);
w0[2] = 0;
w0[1] = 0;
w0[0] = 0;
break;
default:
w0[3] = 0;
w0[2] = 0;
w0[1] = 0;
w0[0] = 0;
break;
}
#endif
}
DECLSPEC void aes256_scrt_format (PRIVATE_AS u32 *aes_ks, PRIVATE_AS u32 *pw, const u32 pw_len, PRIVATE_AS u32 *hash, PRIVATE_AS u32 *out, SHM_TYPE u32 *s_te0, SHM_TYPE u32 *s_te1, SHM_TYPE u32 *s_te2, SHM_TYPE u32 *s_te3, SHM_TYPE u32 *s_te4)

View File

@ -26,7 +26,6 @@ DECLSPEC void shift_buffer_by_offset (PRIVATE_AS u32 *w0, const u32 offset)
{
const int offset_switch = offset / 4;
#if ((defined IS_AMD || defined IS_HIP) && HAS_VPERM == 0) || defined IS_GENERIC
switch (offset_switch)
{
case 0:
@ -64,56 +63,6 @@ DECLSPEC void shift_buffer_by_offset (PRIVATE_AS u32 *w0, const u32 offset)
w0[0] = 0;
break;
}
#endif
#if ((defined IS_AMD || defined IS_HIP) && HAS_VPERM == 1) || defined IS_NV
#if defined IS_NV
const int selector = (0x76543210 >> ((offset & 3) * 4)) & 0xffff;
#endif
#if (defined IS_AMD || defined IS_HIP)
const int selector = l32_from_64_S(0x0706050403020100UL >> ((offset & 3) * 8));
#endif
switch (offset_switch)
{
case 0:
w0[3] = hc_byte_perm_S (w0[3], w0[2], selector);
w0[2] = hc_byte_perm_S (w0[2], w0[1], selector);
w0[1] = hc_byte_perm_S (w0[1], w0[0], selector);
w0[0] = hc_byte_perm_S (w0[0], 0, selector);
break;
case 1:
w0[3] = hc_byte_perm_S (w0[2], w0[1], selector);
w0[2] = hc_byte_perm_S (w0[1], w0[0], selector);
w0[1] = hc_byte_perm_S (w0[0], 0, selector);
w0[0] = 0;
break;
case 2:
w0[3] = hc_byte_perm_S (w0[1], w0[0], selector);
w0[2] = hc_byte_perm_S (w0[0], 0, selector);
w0[1] = 0;
w0[0] = 0;
break;
case 3:
w0[3] = hc_byte_perm_S (w0[0], 0, selector);
w0[2] = 0;
w0[1] = 0;
w0[0] = 0;
break;
default:
w0[3] = 0;
w0[2] = 0;
w0[1] = 0;
w0[0] = 0;
break;
}
#endif
}
DECLSPEC void aes256_scrt_format (PRIVATE_AS u32 *aes_ks, PRIVATE_AS u32 *pw, const u32 pw_len, PRIVATE_AS u32 *hash, PRIVATE_AS u32 *out, SHM_TYPE u32 *s_te0, SHM_TYPE u32 *s_te1, SHM_TYPE u32 *s_te2, SHM_TYPE u32 *s_te3, SHM_TYPE u32 *s_te4)

View File

@ -31,7 +31,6 @@ DECLSPEC void shift_buffer_by_offset (PRIVATE_AS u32 *w0, const u32 offset)
{
const int offset_switch = offset / 4;
#if ((defined IS_AMD || defined IS_HIP) && HAS_VPERM == 0) || defined IS_GENERIC
switch (offset_switch)
{
case 0:
@ -69,56 +68,6 @@ DECLSPEC void shift_buffer_by_offset (PRIVATE_AS u32 *w0, const u32 offset)
w0[0] = 0;
break;
}
#endif
#if ((defined IS_AMD || defined IS_HIP) && HAS_VPERM == 1) || defined IS_NV
#if defined IS_NV
const int selector = (0x76543210 >> ((offset & 3) * 4)) & 0xffff;
#endif
#if (defined IS_AMD || defined IS_HIP)
const int selector = l32_from_64_S(0x0706050403020100UL >> ((offset & 3) * 8));
#endif
switch (offset_switch)
{
case 0:
w0[3] = hc_byte_perm_S (w0[3], w0[2], selector);
w0[2] = hc_byte_perm_S (w0[2], w0[1], selector);
w0[1] = hc_byte_perm_S (w0[1], w0[0], selector);
w0[0] = hc_byte_perm_S (w0[0], 0, selector);
break;
case 1:
w0[3] = hc_byte_perm_S (w0[2], w0[1], selector);
w0[2] = hc_byte_perm_S (w0[1], w0[0], selector);
w0[1] = hc_byte_perm_S (w0[0], 0, selector);
w0[0] = 0;
break;
case 2:
w0[3] = hc_byte_perm_S (w0[1], w0[0], selector);
w0[2] = hc_byte_perm_S (w0[0], 0, selector);
w0[1] = 0;
w0[0] = 0;
break;
case 3:
w0[3] = hc_byte_perm_S (w0[0], 0, selector);
w0[2] = 0;
w0[1] = 0;
w0[0] = 0;
break;
default:
w0[3] = 0;
w0[2] = 0;
w0[1] = 0;
w0[0] = 0;
break;
}
#endif
}
DECLSPEC void aes256_scrt_format (PRIVATE_AS u32 *aes_ks, PRIVATE_AS u32 *pw, const u32 pw_len, PRIVATE_AS u32 *hash, PRIVATE_AS u32 *out, SHM_TYPE u32 *s_te0, SHM_TYPE u32 *s_te1, SHM_TYPE u32 *s_te2, SHM_TYPE u32 *s_te3, SHM_TYPE u32 *s_te4)

View File

@ -28,7 +28,6 @@ DECLSPEC void shift_buffer_by_offset (PRIVATE_AS u32 *w0, const u32 offset)
{
const int offset_switch = offset / 4;
#if ((defined IS_AMD || defined IS_HIP) && HAS_VPERM == 0) || defined IS_GENERIC
switch (offset_switch)
{
case 0:
@ -66,56 +65,6 @@ DECLSPEC void shift_buffer_by_offset (PRIVATE_AS u32 *w0, const u32 offset)
w0[0] = 0;
break;
}
#endif
#if ((defined IS_AMD || defined IS_HIP) && HAS_VPERM == 1) || defined IS_NV
#if defined IS_NV
const int selector = (0x76543210 >> ((offset & 3) * 4)) & 0xffff;
#endif
#if (defined IS_AMD || defined IS_HIP)
const int selector = l32_from_64_S(0x0706050403020100UL >> ((offset & 3) * 8));
#endif
switch (offset_switch)
{
case 0:
w0[3] = hc_byte_perm_S (w0[3], w0[2], selector);
w0[2] = hc_byte_perm_S (w0[2], w0[1], selector);
w0[1] = hc_byte_perm_S (w0[1], w0[0], selector);
w0[0] = hc_byte_perm_S (w0[0], 0, selector);
break;
case 1:
w0[3] = hc_byte_perm_S (w0[2], w0[1], selector);
w0[2] = hc_byte_perm_S (w0[1], w0[0], selector);
w0[1] = hc_byte_perm_S (w0[0], 0, selector);
w0[0] = 0;
break;
case 2:
w0[3] = hc_byte_perm_S (w0[1], w0[0], selector);
w0[2] = hc_byte_perm_S (w0[0], 0, selector);
w0[1] = 0;
w0[0] = 0;
break;
case 3:
w0[3] = hc_byte_perm_S (w0[0], 0, selector);
w0[2] = 0;
w0[1] = 0;
w0[0] = 0;
break;
default:
w0[3] = 0;
w0[2] = 0;
w0[1] = 0;
w0[0] = 0;
break;
}
#endif
}
DECLSPEC void aes256_scrt_format (PRIVATE_AS u32 *aes_ks, PRIVATE_AS u32 *pw, const u32 pw_len, PRIVATE_AS u32 *hash, PRIVATE_AS u32 *out, SHM_TYPE u32 *s_te0, SHM_TYPE u32 *s_te1, SHM_TYPE u32 *s_te2, SHM_TYPE u32 *s_te3, SHM_TYPE u32 *s_te4)

View File

@ -31,7 +31,6 @@ DECLSPEC void shift_buffer_by_offset (PRIVATE_AS u32 *w0, const u32 offset)
{
const int offset_switch = offset / 4;
#if ((defined IS_AMD || defined IS_HIP) && HAS_VPERM == 0) || defined IS_GENERIC
switch (offset_switch)
{
case 0:
@ -69,56 +68,6 @@ DECLSPEC void shift_buffer_by_offset (PRIVATE_AS u32 *w0, const u32 offset)
w0[0] = 0;
break;
}
#endif
#if ((defined IS_AMD || defined IS_HIP) && HAS_VPERM == 1) || defined IS_NV
#if defined IS_NV
const int selector = (0x76543210 >> ((offset & 3) * 4)) & 0xffff;
#endif
#if (defined IS_AMD || defined IS_HIP)
const int selector = l32_from_64_S(0x0706050403020100UL >> ((offset & 3) * 8));
#endif
switch (offset_switch)
{
case 0:
w0[3] = hc_byte_perm_S (w0[3], w0[2], selector);
w0[2] = hc_byte_perm_S (w0[2], w0[1], selector);
w0[1] = hc_byte_perm_S (w0[1], w0[0], selector);
w0[0] = hc_byte_perm_S (w0[0], 0, selector);
break;
case 1:
w0[3] = hc_byte_perm_S (w0[2], w0[1], selector);
w0[2] = hc_byte_perm_S (w0[1], w0[0], selector);
w0[1] = hc_byte_perm_S (w0[0], 0, selector);
w0[0] = 0;
break;
case 2:
w0[3] = hc_byte_perm_S (w0[1], w0[0], selector);
w0[2] = hc_byte_perm_S (w0[0], 0, selector);
w0[1] = 0;
w0[0] = 0;
break;
case 3:
w0[3] = hc_byte_perm_S (w0[0], 0, selector);
w0[2] = 0;
w0[1] = 0;
w0[0] = 0;
break;
default:
w0[3] = 0;
w0[2] = 0;
w0[1] = 0;
w0[0] = 0;
break;
}
#endif
}
DECLSPEC void aes256_scrt_format (PRIVATE_AS u32 *aes_ks, PRIVATE_AS u32 *pw, const u32 pw_len, PRIVATE_AS u32 *hash, PRIVATE_AS u32 *out, SHM_TYPE u32 *s_te0, SHM_TYPE u32 *s_te1, SHM_TYPE u32 *s_te2, SHM_TYPE u32 *s_te3, SHM_TYPE u32 *s_te4)

View File

@ -107,7 +107,7 @@ DECLSPEC u32 base64_encode_three_bytes_better (u32 in)
return out;
}
DECLSPEC void base64_encode_sha256 (u32 *out, const u32 *in)
DECLSPEC void base64_encode_sha256 (PRIVATE_AS u32 *out, PRIVATE_AS const u32 *in)
{
out[0] = base64_encode_three_bytes_better( (in[0] >> 8));
out[1] = base64_encode_three_bytes_better((in[0] << 16) | (in[1] >> 16));

View File

@ -41,7 +41,7 @@ CONSTANT_VK u32 newdes_rotor[256] =
0x3a, 0x37, 0x03, 0xf4, 0x61, 0xc5, 0xee, 0xe3, 0x76, 0x31, 0x4f, 0xe6, 0xdf, 0xa5, 0x99, 0x3b,
};
DECLSPEC void new_des (u32 *block, u32 *newdes_key)
DECLSPEC void new_des (PRIVATE_AS u32 *block, PRIVATE_AS u32 *newdes_key)
{
#define B0 (*(block+0))
#define B1 (*(block+1))
@ -71,7 +71,7 @@ DECLSPEC void new_des (u32 *block, u32 *newdes_key)
B7 = B7 ^ newdes_rotor[B3 ^ *(newdes_key++)];
}
DECLSPEC void key_expansion (const u8 *sha1sum, u32 *result)
DECLSPEC void key_expansion (PRIVATE_AS const u8 *sha1sum, PRIVATE_AS u32 *result)
{
for (int count = 0; count < 15; count++)
{
@ -143,7 +143,7 @@ KERNEL_FQ KERNEL_FA void m32700_init (KERN_ATTR_TMPS (sha1_tmp_t))
// Crate a NewDES key
u32 newdes_key32[60];
key_expansion ((const u8 *) ctx.h, newdes_key32);
key_expansion ((PRIVATE_AS const u8 *) ctx.h, newdes_key32);
for (int i = 0; i < 60; i++)
{
@ -182,7 +182,7 @@ KERNEL_FQ KERNEL_FA void m32700_loop (KERN_ATTR_TMPS (sha1_tmp_t))
}
// Run 1000 iterations of NewDES on the derived salt
for (int i = 0; i < LOOP_CNT; i++)
for (u32 i = 0; i < LOOP_CNT; i++)
{
new_des (salt32, newdes_key32);
}

335
OpenCL/m32800_a0-pure.cl Normal file
View File

@ -0,0 +1,335 @@
/**
* Author......: See docs/credits.txt
* License.....: MIT
*/
//#define NEW_SIMD_CODE
#ifdef KERNEL_STATIC
#include M2S(INCLUDE_PATH/inc_vendor.h)
#include M2S(INCLUDE_PATH/inc_types.h)
#include M2S(INCLUDE_PATH/inc_platform.cl)
#include M2S(INCLUDE_PATH/inc_common.cl)
#include M2S(INCLUDE_PATH/inc_rp.h)
#include M2S(INCLUDE_PATH/inc_rp.cl)
#include M2S(INCLUDE_PATH/inc_scalar.cl)
#include M2S(INCLUDE_PATH/inc_hash_md5.cl)
#endif
#if VECT_SIZE == 1
#define uint_to_hex_lower8(i) make_u32x (l_bin2asc[(i)])
#elif VECT_SIZE == 2
#define uint_to_hex_lower8(i) make_u32x (l_bin2asc[(i).s0], l_bin2asc[(i).s1])
#elif VECT_SIZE == 4
#define uint_to_hex_lower8(i) make_u32x (l_bin2asc[(i).s0], l_bin2asc[(i).s1], l_bin2asc[(i).s2], l_bin2asc[(i).s3])
#elif VECT_SIZE == 8
#define uint_to_hex_lower8(i) make_u32x (l_bin2asc[(i).s0], l_bin2asc[(i).s1], l_bin2asc[(i).s2], l_bin2asc[(i).s3], l_bin2asc[(i).s4], l_bin2asc[(i).s5], l_bin2asc[(i).s6], l_bin2asc[(i).s7])
#elif VECT_SIZE == 16
#define uint_to_hex_lower8(i) make_u32x (l_bin2asc[(i).s0], l_bin2asc[(i).s1], l_bin2asc[(i).s2], l_bin2asc[(i).s3], l_bin2asc[(i).s4], l_bin2asc[(i).s5], l_bin2asc[(i).s6], l_bin2asc[(i).s7], l_bin2asc[(i).s8], l_bin2asc[(i).s9], l_bin2asc[(i).sa], l_bin2asc[(i).sb], l_bin2asc[(i).sc], l_bin2asc[(i).sd], l_bin2asc[(i).se], l_bin2asc[(i).sf])
#endif
typedef struct md5_double_salt
{
u32 salt1_buf[64];
int salt1_len;
u32 salt2_buf[64];
int salt2_len;
} md5_double_salt_t;
KERNEL_FQ KERNEL_FA void m32800_mxx (KERN_ATTR_RULES_ESALT (md5_double_salt_t))
{
/**
* modifier
*/
const u64 gid = get_global_id (0);
const u64 lid = get_local_id (0);
const u64 lsz = get_local_size (0);
/**
* bin2asc uppercase table
*/
LOCAL_VK u32 l_bin2asc[256];
for (u32 i = lid; i < 256; i += lsz)
{
const u32 i0 = (i >> 0) & 15;
const u32 i1 = (i >> 4) & 15;
l_bin2asc[i] = ((i0 < 10) ? '0' + i0 : 'a' - 10 + i0) << 8
| ((i1 < 10) ? '0' + i1 : 'a' - 10 + i1) << 0;
}
SYNC_THREADS ();
if (gid >= GID_CNT) return;
/**
* base
*/
COPY_PW (pws[gid]);
const u32 salt1_len = esalt_bufs[DIGESTS_OFFSET_HOST].salt1_len;
u32 salt1_buf[64] = { 0 };
for (u32 i = 0, idx = 0; i < salt1_len; i += 4, idx += 1)
{
salt1_buf[idx] = esalt_bufs[DIGESTS_OFFSET_HOST].salt1_buf[idx];
}
const u32 salt2_len = esalt_bufs[DIGESTS_OFFSET_HOST].salt2_len;
u32 salt2_buf[64] = { 0 };
for (u32 i = 0, idx = 0; i < salt2_len; i += 4, idx += 1)
{
salt2_buf[idx] = esalt_bufs[DIGESTS_OFFSET_HOST].salt2_buf[idx];
}
/**
* loop
*/
for (u32 il_pos = 0; il_pos < IL_CNT; il_pos++)
{
pw_t tmp = PASTE_PW;
tmp.pw_len = apply_rules (rules_buf[il_pos].cmds, tmp.i, tmp.pw_len);
md5_ctx_t ctx0;
md5_init (&ctx0);
md5_update (&ctx0, tmp.i, tmp.pw_len);
md5_update (&ctx0, salt1_buf, salt1_len);
md5_final (&ctx0);
u32 a = ctx0.h[0];
u32 b = ctx0.h[1];
u32 c = ctx0.h[2];
u32 d = ctx0.h[3];
md5_ctx_t ctx;
md5_init (&ctx);
ctx.w0[0] = uint_to_hex_lower8 ((a >> 0) & 255) << 0
| uint_to_hex_lower8 ((a >> 8) & 255) << 16;
ctx.w0[1] = uint_to_hex_lower8 ((a >> 16) & 255) << 0
| uint_to_hex_lower8 ((a >> 24) & 255) << 16;
ctx.w0[2] = uint_to_hex_lower8 ((b >> 0) & 255) << 0
| uint_to_hex_lower8 ((b >> 8) & 255) << 16;
ctx.w0[3] = uint_to_hex_lower8 ((b >> 16) & 255) << 0
| uint_to_hex_lower8 ((b >> 24) & 255) << 16;
ctx.w1[0] = uint_to_hex_lower8 ((c >> 0) & 255) << 0
| uint_to_hex_lower8 ((c >> 8) & 255) << 16;
ctx.w1[1] = uint_to_hex_lower8 ((c >> 16) & 255) << 0
| uint_to_hex_lower8 ((c >> 24) & 255) << 16;
ctx.w1[2] = uint_to_hex_lower8 ((d >> 0) & 255) << 0
| uint_to_hex_lower8 ((d >> 8) & 255) << 16;
ctx.w1[3] = uint_to_hex_lower8 ((d >> 16) & 255) << 0
| uint_to_hex_lower8 ((d >> 24) & 255) << 16;
ctx.len = 32;
md5_final (&ctx);
a = ctx.h[0];
b = ctx.h[1];
c = ctx.h[2];
d = ctx.h[3];
md5_init (&ctx);
ctx.w0[0] = uint_to_hex_lower8 ((a >> 0) & 255) << 0
| uint_to_hex_lower8 ((a >> 8) & 255) << 16;
ctx.w0[1] = uint_to_hex_lower8 ((a >> 16) & 255) << 0
| uint_to_hex_lower8 ((a >> 24) & 255) << 16;
ctx.w0[2] = uint_to_hex_lower8 ((b >> 0) & 255) << 0
| uint_to_hex_lower8 ((b >> 8) & 255) << 16;
ctx.w0[3] = uint_to_hex_lower8 ((b >> 16) & 255) << 0
| uint_to_hex_lower8 ((b >> 24) & 255) << 16;
ctx.w1[0] = uint_to_hex_lower8 ((c >> 0) & 255) << 0
| uint_to_hex_lower8 ((c >> 8) & 255) << 16;
ctx.w1[1] = uint_to_hex_lower8 ((c >> 16) & 255) << 0
| uint_to_hex_lower8 ((c >> 24) & 255) << 16;
ctx.w1[2] = uint_to_hex_lower8 ((d >> 0) & 255) << 0
| uint_to_hex_lower8 ((d >> 8) & 255) << 16;
ctx.w1[3] = uint_to_hex_lower8 ((d >> 16) & 255) << 0
| uint_to_hex_lower8 ((d >> 24) & 255) << 16;
ctx.len = 32;
md5_update (&ctx, salt2_buf, salt2_len);
md5_final (&ctx);
const u32 r0 = ctx.h[DGST_R0];
const u32 r1 = ctx.h[DGST_R1];
const u32 r2 = ctx.h[DGST_R2];
const u32 r3 = ctx.h[DGST_R3];
COMPARE_M_SCALAR (r0, r1, r2, r3);
}
}
KERNEL_FQ KERNEL_FA void m32800_sxx (KERN_ATTR_RULES_ESALT (md5_double_salt_t))
{
/**
* modifier
*/
const u64 gid = get_global_id (0);
const u64 lid = get_local_id (0);
const u64 lsz = get_local_size (0);
/**
* bin2asc uppercase table
*/
LOCAL_VK u32 l_bin2asc[256];
for (u32 i = lid; i < 256; i += lsz)
{
const u32 i0 = (i >> 0) & 15;
const u32 i1 = (i >> 4) & 15;
l_bin2asc[i] = ((i0 < 10) ? '0' + i0 : 'a' - 10 + i0) << 8
| ((i1 < 10) ? '0' + i1 : 'a' - 10 + i1) << 0;
}
SYNC_THREADS ();
if (gid >= GID_CNT) return;
/**
* digest
*/
const u32 search[4] =
{
digests_buf[DIGESTS_OFFSET_HOST].digest_buf[DGST_R0],
digests_buf[DIGESTS_OFFSET_HOST].digest_buf[DGST_R1],
digests_buf[DIGESTS_OFFSET_HOST].digest_buf[DGST_R2],
digests_buf[DIGESTS_OFFSET_HOST].digest_buf[DGST_R3]
};
/**
* base
*/
COPY_PW (pws[gid]);
const u32 salt1_len = esalt_bufs[DIGESTS_OFFSET_HOST].salt1_len;
u32 salt1_buf[64] = { 0 };
for (u32 i = 0, idx = 0; i < salt1_len; i += 4, idx += 1)
{
salt1_buf[idx] = esalt_bufs[DIGESTS_OFFSET_HOST].salt1_buf[idx];
}
const u32 salt2_len = esalt_bufs[DIGESTS_OFFSET_HOST].salt2_len;
u32 salt2_buf[64] = { 0 };
for (u32 i = 0, idx = 0; i < salt2_len; i += 4, idx += 1)
{
salt2_buf[idx] = esalt_bufs[DIGESTS_OFFSET_HOST].salt2_buf[idx];
}
/**
* loop
*/
for (u32 il_pos = 0; il_pos < IL_CNT; il_pos++)
{
pw_t tmp = PASTE_PW;
tmp.pw_len = apply_rules (rules_buf[il_pos].cmds, tmp.i, tmp.pw_len);
md5_ctx_t ctx0;
md5_init (&ctx0);
md5_update (&ctx0, tmp.i, tmp.pw_len);
md5_update (&ctx0, salt1_buf, salt1_len);
md5_final (&ctx0);
u32 a = ctx0.h[0];
u32 b = ctx0.h[1];
u32 c = ctx0.h[2];
u32 d = ctx0.h[3];
md5_ctx_t ctx;
md5_init (&ctx);
ctx.w0[0] = uint_to_hex_lower8 ((a >> 0) & 255) << 0
| uint_to_hex_lower8 ((a >> 8) & 255) << 16;
ctx.w0[1] = uint_to_hex_lower8 ((a >> 16) & 255) << 0
| uint_to_hex_lower8 ((a >> 24) & 255) << 16;
ctx.w0[2] = uint_to_hex_lower8 ((b >> 0) & 255) << 0
| uint_to_hex_lower8 ((b >> 8) & 255) << 16;
ctx.w0[3] = uint_to_hex_lower8 ((b >> 16) & 255) << 0
| uint_to_hex_lower8 ((b >> 24) & 255) << 16;
ctx.w1[0] = uint_to_hex_lower8 ((c >> 0) & 255) << 0
| uint_to_hex_lower8 ((c >> 8) & 255) << 16;
ctx.w1[1] = uint_to_hex_lower8 ((c >> 16) & 255) << 0
| uint_to_hex_lower8 ((c >> 24) & 255) << 16;
ctx.w1[2] = uint_to_hex_lower8 ((d >> 0) & 255) << 0
| uint_to_hex_lower8 ((d >> 8) & 255) << 16;
ctx.w1[3] = uint_to_hex_lower8 ((d >> 16) & 255) << 0
| uint_to_hex_lower8 ((d >> 24) & 255) << 16;
ctx.len = 32;
md5_final (&ctx);
a = ctx.h[0];
b = ctx.h[1];
c = ctx.h[2];
d = ctx.h[3];
md5_init (&ctx);
ctx.w0[0] = uint_to_hex_lower8 ((a >> 0) & 255) << 0
| uint_to_hex_lower8 ((a >> 8) & 255) << 16;
ctx.w0[1] = uint_to_hex_lower8 ((a >> 16) & 255) << 0
| uint_to_hex_lower8 ((a >> 24) & 255) << 16;
ctx.w0[2] = uint_to_hex_lower8 ((b >> 0) & 255) << 0
| uint_to_hex_lower8 ((b >> 8) & 255) << 16;
ctx.w0[3] = uint_to_hex_lower8 ((b >> 16) & 255) << 0
| uint_to_hex_lower8 ((b >> 24) & 255) << 16;
ctx.w1[0] = uint_to_hex_lower8 ((c >> 0) & 255) << 0
| uint_to_hex_lower8 ((c >> 8) & 255) << 16;
ctx.w1[1] = uint_to_hex_lower8 ((c >> 16) & 255) << 0
| uint_to_hex_lower8 ((c >> 24) & 255) << 16;
ctx.w1[2] = uint_to_hex_lower8 ((d >> 0) & 255) << 0
| uint_to_hex_lower8 ((d >> 8) & 255) << 16;
ctx.w1[3] = uint_to_hex_lower8 ((d >> 16) & 255) << 0
| uint_to_hex_lower8 ((d >> 24) & 255) << 16;
ctx.len = 32;
md5_update (&ctx, salt2_buf, salt2_len);
md5_final (&ctx);
const u32 r0 = ctx.h[DGST_R0];
const u32 r1 = ctx.h[DGST_R1];
const u32 r2 = ctx.h[DGST_R2];
const u32 r3 = ctx.h[DGST_R3];
COMPARE_S_SCALAR (r0, r1, r2, r3);
}
}

329
OpenCL/m32800_a1-pure.cl Normal file
View File

@ -0,0 +1,329 @@
/**
* Author......: See docs/credits.txt
* License.....: MIT
*/
//#define NEW_SIMD_CODE
#ifdef KERNEL_STATIC
#include M2S(INCLUDE_PATH/inc_vendor.h)
#include M2S(INCLUDE_PATH/inc_types.h)
#include M2S(INCLUDE_PATH/inc_platform.cl)
#include M2S(INCLUDE_PATH/inc_common.cl)
#include M2S(INCLUDE_PATH/inc_scalar.cl)
#include M2S(INCLUDE_PATH/inc_hash_md5.cl)
#endif
#if VECT_SIZE == 1
#define uint_to_hex_lower8(i) make_u32x (l_bin2asc[(i)])
#elif VECT_SIZE == 2
#define uint_to_hex_lower8(i) make_u32x (l_bin2asc[(i).s0], l_bin2asc[(i).s1])
#elif VECT_SIZE == 4
#define uint_to_hex_lower8(i) make_u32x (l_bin2asc[(i).s0], l_bin2asc[(i).s1], l_bin2asc[(i).s2], l_bin2asc[(i).s3])
#elif VECT_SIZE == 8
#define uint_to_hex_lower8(i) make_u32x (l_bin2asc[(i).s0], l_bin2asc[(i).s1], l_bin2asc[(i).s2], l_bin2asc[(i).s3], l_bin2asc[(i).s4], l_bin2asc[(i).s5], l_bin2asc[(i).s6], l_bin2asc[(i).s7])
#elif VECT_SIZE == 16
#define uint_to_hex_lower8(i) make_u32x (l_bin2asc[(i).s0], l_bin2asc[(i).s1], l_bin2asc[(i).s2], l_bin2asc[(i).s3], l_bin2asc[(i).s4], l_bin2asc[(i).s5], l_bin2asc[(i).s6], l_bin2asc[(i).s7], l_bin2asc[(i).s8], l_bin2asc[(i).s9], l_bin2asc[(i).sa], l_bin2asc[(i).sb], l_bin2asc[(i).sc], l_bin2asc[(i).sd], l_bin2asc[(i).se], l_bin2asc[(i).sf])
#endif
typedef struct md5_double_salt
{
u32 salt1_buf[64];
int salt1_len;
u32 salt2_buf[64];
int salt2_len;
} md5_double_salt_t;
KERNEL_FQ KERNEL_FA void m32800_mxx (KERN_ATTR_ESALT (md5_double_salt_t))
{
/**
* modifier
*/
const u64 gid = get_global_id (0);
const u64 lid = get_local_id (0);
const u64 lsz = get_local_size (0);
/**
* bin2asc uppercase array
*/
LOCAL_VK u32 l_bin2asc[256];
for (u32 i = lid; i < 256; i += lsz)
{
const u32 i0 = (i >> 0) & 15;
const u32 i1 = (i >> 4) & 15;
l_bin2asc[i] = ((i0 < 10) ? '0' + i0 : 'a' - 10 + i0) << 8
| ((i1 < 10) ? '0' + i1 : 'a' - 10 + i1) << 0;
}
SYNC_THREADS ();
if (gid >= GID_CNT) return;
const u32 salt1_len = esalt_bufs[DIGESTS_OFFSET_HOST].salt1_len;
u32 salt1_buf[64] = { 0 };
for (u32 i = 0, idx = 0; i < salt1_len; i += 4, idx += 1)
{
salt1_buf[idx] = esalt_bufs[DIGESTS_OFFSET_HOST].salt1_buf[idx];
}
const u32 salt2_len = esalt_bufs[DIGESTS_OFFSET_HOST].salt2_len;
u32 salt2_buf[64] = { 0 };
for (u32 i = 0, idx = 0; i < salt2_len; i += 4, idx += 1)
{
salt2_buf[idx] = esalt_bufs[DIGESTS_OFFSET_HOST].salt2_buf[idx];
}
/**
* base
*/
md5_ctx_t ctx0;
md5_init (&ctx0);
md5_update_global (&ctx0, pws[gid].i, pws[gid].pw_len);
/**
* loop
*/
for (u32 il_pos = 0; il_pos < IL_CNT; il_pos++)
{
md5_ctx_t ctx1 = ctx0;
md5_update_global (&ctx1, combs_buf[il_pos].i, combs_buf[il_pos].pw_len);
md5_update (&ctx1, salt1_buf, salt1_len);
md5_final (&ctx1);
u32 a = ctx1.h[0];
u32 b = ctx1.h[1];
u32 c = ctx1.h[2];
u32 d = ctx1.h[3];
md5_ctx_t ctx;
md5_init (&ctx);
ctx.w0[0] = uint_to_hex_lower8 ((a >> 0) & 255) << 0
| uint_to_hex_lower8 ((a >> 8) & 255) << 16;
ctx.w0[1] = uint_to_hex_lower8 ((a >> 16) & 255) << 0
| uint_to_hex_lower8 ((a >> 24) & 255) << 16;
ctx.w0[2] = uint_to_hex_lower8 ((b >> 0) & 255) << 0
| uint_to_hex_lower8 ((b >> 8) & 255) << 16;
ctx.w0[3] = uint_to_hex_lower8 ((b >> 16) & 255) << 0
| uint_to_hex_lower8 ((b >> 24) & 255) << 16;
ctx.w1[0] = uint_to_hex_lower8 ((c >> 0) & 255) << 0
| uint_to_hex_lower8 ((c >> 8) & 255) << 16;
ctx.w1[1] = uint_to_hex_lower8 ((c >> 16) & 255) << 0
| uint_to_hex_lower8 ((c >> 24) & 255) << 16;
ctx.w1[2] = uint_to_hex_lower8 ((d >> 0) & 255) << 0
| uint_to_hex_lower8 ((d >> 8) & 255) << 16;
ctx.w1[3] = uint_to_hex_lower8 ((d >> 16) & 255) << 0
| uint_to_hex_lower8 ((d >> 24) & 255) << 16;
ctx.len = 32;
md5_final (&ctx);
a = ctx.h[0];
b = ctx.h[1];
c = ctx.h[2];
d = ctx.h[3];
md5_init (&ctx);
ctx.w0[0] = uint_to_hex_lower8 ((a >> 0) & 255) << 0
| uint_to_hex_lower8 ((a >> 8) & 255) << 16;
ctx.w0[1] = uint_to_hex_lower8 ((a >> 16) & 255) << 0
| uint_to_hex_lower8 ((a >> 24) & 255) << 16;
ctx.w0[2] = uint_to_hex_lower8 ((b >> 0) & 255) << 0
| uint_to_hex_lower8 ((b >> 8) & 255) << 16;
ctx.w0[3] = uint_to_hex_lower8 ((b >> 16) & 255) << 0
| uint_to_hex_lower8 ((b >> 24) & 255) << 16;
ctx.w1[0] = uint_to_hex_lower8 ((c >> 0) & 255) << 0
| uint_to_hex_lower8 ((c >> 8) & 255) << 16;
ctx.w1[1] = uint_to_hex_lower8 ((c >> 16) & 255) << 0
| uint_to_hex_lower8 ((c >> 24) & 255) << 16;
ctx.w1[2] = uint_to_hex_lower8 ((d >> 0) & 255) << 0
| uint_to_hex_lower8 ((d >> 8) & 255) << 16;
ctx.w1[3] = uint_to_hex_lower8 ((d >> 16) & 255) << 0
| uint_to_hex_lower8 ((d >> 24) & 255) << 16;
ctx.len = 32;
md5_update (&ctx, salt2_buf, salt2_len);
md5_final (&ctx);
const u32 r0 = ctx.h[DGST_R0];
const u32 r1 = ctx.h[DGST_R1];
const u32 r2 = ctx.h[DGST_R2];
const u32 r3 = ctx.h[DGST_R3];
COMPARE_M_SCALAR (r0, r1, r2, r3);
}
}
KERNEL_FQ KERNEL_FA void m32800_sxx (KERN_ATTR_ESALT (md5_double_salt_t))
{
/**
* modifier
*/
const u64 gid = get_global_id (0);
const u64 lid = get_local_id (0);
const u64 lsz = get_local_size (0);
/**
* bin2asc uppercase array
*/
LOCAL_VK u32 l_bin2asc[256];
for (u32 i = lid; i < 256; i += lsz)
{
const u32 i0 = (i >> 0) & 15;
const u32 i1 = (i >> 4) & 15;
l_bin2asc[i] = ((i0 < 10) ? '0' + i0 : 'a' - 10 + i0) << 8
| ((i1 < 10) ? '0' + i1 : 'a' - 10 + i1) << 0;
}
SYNC_THREADS ();
if (gid >= GID_CNT) return;
/**
* digest
*/
const u32 search[4] =
{
digests_buf[DIGESTS_OFFSET_HOST].digest_buf[DGST_R0],
digests_buf[DIGESTS_OFFSET_HOST].digest_buf[DGST_R1],
digests_buf[DIGESTS_OFFSET_HOST].digest_buf[DGST_R2],
digests_buf[DIGESTS_OFFSET_HOST].digest_buf[DGST_R3]
};
const u32 salt1_len = esalt_bufs[DIGESTS_OFFSET_HOST].salt1_len;
u32 salt1_buf[64] = { 0 };
for (u32 i = 0, idx = 0; i < salt1_len; i += 4, idx += 1)
{
salt1_buf[idx] = esalt_bufs[DIGESTS_OFFSET_HOST].salt1_buf[idx];
}
const u32 salt2_len = esalt_bufs[DIGESTS_OFFSET_HOST].salt2_len;
u32 salt2_buf[64] = { 0 };
for (u32 i = 0, idx = 0; i < salt2_len; i += 4, idx += 1)
{
salt2_buf[idx] = esalt_bufs[DIGESTS_OFFSET_HOST].salt2_buf[idx];
}
/**
* base
*/
md5_ctx_t ctx0;
md5_init (&ctx0);
md5_update_global (&ctx0, pws[gid].i, pws[gid].pw_len);
/**
* loop
*/
for (u32 il_pos = 0; il_pos < IL_CNT; il_pos++)
{
md5_ctx_t ctx1 = ctx0;
md5_update_global (&ctx1, combs_buf[il_pos].i, combs_buf[il_pos].pw_len);
md5_update (&ctx1, salt1_buf, salt1_len);
md5_final (&ctx1);
u32 a = ctx1.h[0];
u32 b = ctx1.h[1];
u32 c = ctx1.h[2];
u32 d = ctx1.h[3];
md5_ctx_t ctx;
md5_init (&ctx);
ctx.w0[0] = uint_to_hex_lower8 ((a >> 0) & 255) << 0
| uint_to_hex_lower8 ((a >> 8) & 255) << 16;
ctx.w0[1] = uint_to_hex_lower8 ((a >> 16) & 255) << 0
| uint_to_hex_lower8 ((a >> 24) & 255) << 16;
ctx.w0[2] = uint_to_hex_lower8 ((b >> 0) & 255) << 0
| uint_to_hex_lower8 ((b >> 8) & 255) << 16;
ctx.w0[3] = uint_to_hex_lower8 ((b >> 16) & 255) << 0
| uint_to_hex_lower8 ((b >> 24) & 255) << 16;
ctx.w1[0] = uint_to_hex_lower8 ((c >> 0) & 255) << 0
| uint_to_hex_lower8 ((c >> 8) & 255) << 16;
ctx.w1[1] = uint_to_hex_lower8 ((c >> 16) & 255) << 0
| uint_to_hex_lower8 ((c >> 24) & 255) << 16;
ctx.w1[2] = uint_to_hex_lower8 ((d >> 0) & 255) << 0
| uint_to_hex_lower8 ((d >> 8) & 255) << 16;
ctx.w1[3] = uint_to_hex_lower8 ((d >> 16) & 255) << 0
| uint_to_hex_lower8 ((d >> 24) & 255) << 16;
ctx.len = 32;
md5_final (&ctx);
a = ctx.h[0];
b = ctx.h[1];
c = ctx.h[2];
d = ctx.h[3];
md5_init (&ctx);
ctx.w0[0] = uint_to_hex_lower8 ((a >> 0) & 255) << 0
| uint_to_hex_lower8 ((a >> 8) & 255) << 16;
ctx.w0[1] = uint_to_hex_lower8 ((a >> 16) & 255) << 0
| uint_to_hex_lower8 ((a >> 24) & 255) << 16;
ctx.w0[2] = uint_to_hex_lower8 ((b >> 0) & 255) << 0
| uint_to_hex_lower8 ((b >> 8) & 255) << 16;
ctx.w0[3] = uint_to_hex_lower8 ((b >> 16) & 255) << 0
| uint_to_hex_lower8 ((b >> 24) & 255) << 16;
ctx.w1[0] = uint_to_hex_lower8 ((c >> 0) & 255) << 0
| uint_to_hex_lower8 ((c >> 8) & 255) << 16;
ctx.w1[1] = uint_to_hex_lower8 ((c >> 16) & 255) << 0
| uint_to_hex_lower8 ((c >> 24) & 255) << 16;
ctx.w1[2] = uint_to_hex_lower8 ((d >> 0) & 255) << 0
| uint_to_hex_lower8 ((d >> 8) & 255) << 16;
ctx.w1[3] = uint_to_hex_lower8 ((d >> 16) & 255) << 0
| uint_to_hex_lower8 ((d >> 24) & 255) << 16;
ctx.len = 32;
md5_update (&ctx, salt2_buf, salt2_len);
md5_final (&ctx);
const u32 r0 = ctx.h[DGST_R0];
const u32 r1 = ctx.h[DGST_R1];
const u32 r2 = ctx.h[DGST_R2];
const u32 r3 = ctx.h[DGST_R3];
COMPARE_S_SCALAR (r0, r1, r2, r3);
}
}

355
OpenCL/m32800_a3-pure.cl Normal file
View File

@ -0,0 +1,355 @@
/**
* Author......: See docs/credits.txt
* License.....: MIT
*/
#define NEW_SIMD_CODE
#ifdef KERNEL_STATIC
#include M2S(INCLUDE_PATH/inc_vendor.h)
#include M2S(INCLUDE_PATH/inc_types.h)
#include M2S(INCLUDE_PATH/inc_platform.cl)
#include M2S(INCLUDE_PATH/inc_common.cl)
#include M2S(INCLUDE_PATH/inc_simd.cl)
#include M2S(INCLUDE_PATH/inc_hash_md5.cl)
#endif
#if VECT_SIZE == 1
#define uint_to_hex_lower8(i) make_u32x (l_bin2asc[(i)])
#elif VECT_SIZE == 2
#define uint_to_hex_lower8(i) make_u32x (l_bin2asc[(i).s0], l_bin2asc[(i).s1])
#elif VECT_SIZE == 4
#define uint_to_hex_lower8(i) make_u32x (l_bin2asc[(i).s0], l_bin2asc[(i).s1], l_bin2asc[(i).s2], l_bin2asc[(i).s3])
#elif VECT_SIZE == 8
#define uint_to_hex_lower8(i) make_u32x (l_bin2asc[(i).s0], l_bin2asc[(i).s1], l_bin2asc[(i).s2], l_bin2asc[(i).s3], l_bin2asc[(i).s4], l_bin2asc[(i).s5], l_bin2asc[(i).s6], l_bin2asc[(i).s7])
#elif VECT_SIZE == 16
#define uint_to_hex_lower8(i) make_u32x (l_bin2asc[(i).s0], l_bin2asc[(i).s1], l_bin2asc[(i).s2], l_bin2asc[(i).s3], l_bin2asc[(i).s4], l_bin2asc[(i).s5], l_bin2asc[(i).s6], l_bin2asc[(i).s7], l_bin2asc[(i).s8], l_bin2asc[(i).s9], l_bin2asc[(i).sa], l_bin2asc[(i).sb], l_bin2asc[(i).sc], l_bin2asc[(i).sd], l_bin2asc[(i).se], l_bin2asc[(i).sf])
#endif
typedef struct md5_double_salt
{
u32 salt1_buf[64];
int salt1_len;
u32 salt2_buf[64];
int salt2_len;
} md5_double_salt_t;
KERNEL_FQ KERNEL_FA void m32800_mxx (KERN_ATTR_VECTOR_ESALT (md5_double_salt_t))
{
/**
* modifier
*/
const u64 gid = get_global_id (0);
const u64 lid = get_local_id (0);
const u64 lsz = get_local_size (0);
/*
* bin2asc uppercase table
*/
LOCAL_VK u32 l_bin2asc[256];
for (u32 i = lid; i < 256; i += lsz)
{
const u32 i0 = (i >> 0) & 15;
const u32 i1 = (i >> 4) & 15;
l_bin2asc[i] = ((i0 < 10) ? '0' + i0 : 'a' - 10 + i0) << 8
| ((i1 < 10) ? '0' + i1 : 'a' - 10 + i1) << 0;
}
SYNC_THREADS ();
if (gid >= GID_CNT) return;
/**
* base
*/
const u32 pw_len = pws[gid].pw_len;
u32x w[64] = { 0 };
for (u32 i = 0, idx = 0; i < pw_len; i += 4, idx += 1)
{
w[idx] = pws[gid].i[idx];
}
const u32 salt1_len = esalt_bufs[DIGESTS_OFFSET_HOST].salt1_len;
u32x salt1_buf[64] = { 0 };
for (u32 i = 0, idx = 0; i < salt1_len; i += 4, idx += 1)
{
salt1_buf[idx] = esalt_bufs[DIGESTS_OFFSET_HOST].salt1_buf[idx];
}
const u32 salt2_len = esalt_bufs[DIGESTS_OFFSET_HOST].salt2_len;
u32x salt2_buf[64] = { 0 };
for (u32 i = 0, idx = 0; i < salt2_len; i += 4, idx += 1)
{
salt2_buf[idx] = esalt_bufs[DIGESTS_OFFSET_HOST].salt2_buf[idx];
}
/**
* loop
*/
u32x w0l = w[0];
for (u32 il_pos = 0; il_pos < IL_CNT; il_pos += VECT_SIZE)
{
const u32x w0r = words_buf_r[il_pos / VECT_SIZE];
const u32x w0 = w0l | w0r;
w[0] = w0;
md5_ctx_vector_t ctx0;
md5_init_vector (&ctx0);
md5_update_vector (&ctx0, w, pw_len);
md5_update_vector (&ctx0, salt1_buf, salt1_len);
md5_final_vector (&ctx0);
u32x a = ctx0.h[0];
u32x b = ctx0.h[1];
u32x c = ctx0.h[2];
u32x d = ctx0.h[3];
md5_ctx_vector_t ctx;
md5_init_vector (&ctx);
ctx.w0[0] = uint_to_hex_lower8 ((a >> 0) & 255) << 0
| uint_to_hex_lower8 ((a >> 8) & 255) << 16;
ctx.w0[1] = uint_to_hex_lower8 ((a >> 16) & 255) << 0
| uint_to_hex_lower8 ((a >> 24) & 255) << 16;
ctx.w0[2] = uint_to_hex_lower8 ((b >> 0) & 255) << 0
| uint_to_hex_lower8 ((b >> 8) & 255) << 16;
ctx.w0[3] = uint_to_hex_lower8 ((b >> 16) & 255) << 0
| uint_to_hex_lower8 ((b >> 24) & 255) << 16;
ctx.w1[0] = uint_to_hex_lower8 ((c >> 0) & 255) << 0
| uint_to_hex_lower8 ((c >> 8) & 255) << 16;
ctx.w1[1] = uint_to_hex_lower8 ((c >> 16) & 255) << 0
| uint_to_hex_lower8 ((c >> 24) & 255) << 16;
ctx.w1[2] = uint_to_hex_lower8 ((d >> 0) & 255) << 0
| uint_to_hex_lower8 ((d >> 8) & 255) << 16;
ctx.w1[3] = uint_to_hex_lower8 ((d >> 16) & 255) << 0
| uint_to_hex_lower8 ((d >> 24) & 255) << 16;
ctx.len = 32;
md5_final_vector (&ctx);
a = ctx.h[0];
b = ctx.h[1];
c = ctx.h[2];
d = ctx.h[3];
md5_init_vector (&ctx);
ctx.w0[0] = uint_to_hex_lower8 ((a >> 0) & 255) << 0
| uint_to_hex_lower8 ((a >> 8) & 255) << 16;
ctx.w0[1] = uint_to_hex_lower8 ((a >> 16) & 255) << 0
| uint_to_hex_lower8 ((a >> 24) & 255) << 16;
ctx.w0[2] = uint_to_hex_lower8 ((b >> 0) & 255) << 0
| uint_to_hex_lower8 ((b >> 8) & 255) << 16;
ctx.w0[3] = uint_to_hex_lower8 ((b >> 16) & 255) << 0
| uint_to_hex_lower8 ((b >> 24) & 255) << 16;
ctx.w1[0] = uint_to_hex_lower8 ((c >> 0) & 255) << 0
| uint_to_hex_lower8 ((c >> 8) & 255) << 16;
ctx.w1[1] = uint_to_hex_lower8 ((c >> 16) & 255) << 0
| uint_to_hex_lower8 ((c >> 24) & 255) << 16;
ctx.w1[2] = uint_to_hex_lower8 ((d >> 0) & 255) << 0
| uint_to_hex_lower8 ((d >> 8) & 255) << 16;
ctx.w1[3] = uint_to_hex_lower8 ((d >> 16) & 255) << 0
| uint_to_hex_lower8 ((d >> 24) & 255) << 16;
ctx.len = 32;
md5_update_vector (&ctx, salt2_buf, salt2_len);
md5_final_vector (&ctx);
const u32x r0 = ctx.h[DGST_R0];
const u32x r1 = ctx.h[DGST_R1];
const u32x r2 = ctx.h[DGST_R2];
const u32x r3 = ctx.h[DGST_R3];
COMPARE_M_SIMD (r0, r1, r2, r3);
}
}
KERNEL_FQ KERNEL_FA void m32800_sxx (KERN_ATTR_VECTOR_ESALT (md5_double_salt_t))
{
/**
* modifier
*/
const u64 gid = get_global_id (0);
const u64 lid = get_local_id (0);
const u64 lsz = get_local_size (0);
/*
* bin2asc uppercase table
*/
LOCAL_VK u32 l_bin2asc[256];
for (u32 i = lid; i < 256; i += lsz)
{
const u32 i0 = (i >> 0) & 15;
const u32 i1 = (i >> 4) & 15;
l_bin2asc[i] = ((i0 < 10) ? '0' + i0 : 'a' - 10 + i0) << 8
| ((i1 < 10) ? '0' + i1 : 'a' - 10 + i1) << 0;
}
SYNC_THREADS ();
if (gid >= GID_CNT) return;
/**
* digest
*/
const u32 search[4] =
{
digests_buf[DIGESTS_OFFSET_HOST].digest_buf[DGST_R0],
digests_buf[DIGESTS_OFFSET_HOST].digest_buf[DGST_R1],
digests_buf[DIGESTS_OFFSET_HOST].digest_buf[DGST_R2],
digests_buf[DIGESTS_OFFSET_HOST].digest_buf[DGST_R3]
};
/**
* base
*/
const u32 pw_len = pws[gid].pw_len;
u32x w[64] = { 0 };
for (u32 i = 0, idx = 0; i < pw_len; i += 4, idx += 1)
{
w[idx] = pws[gid].i[idx];
}
const u32 salt1_len = esalt_bufs[DIGESTS_OFFSET_HOST].salt1_len;
u32x salt1_buf[64] = { 0 };
for (u32 i = 0, idx = 0; i < salt1_len; i += 4, idx += 1)
{
salt1_buf[idx] = esalt_bufs[DIGESTS_OFFSET_HOST].salt1_buf[idx];
}
const u32 salt2_len = esalt_bufs[DIGESTS_OFFSET_HOST].salt2_len;
u32x salt2_buf[64] = { 0 };
for (u32 i = 0, idx = 0; i < salt2_len; i += 4, idx += 1)
{
salt2_buf[idx] = esalt_bufs[DIGESTS_OFFSET_HOST].salt2_buf[idx];
}
/**
* loop
*/
u32x w0l = w[0];
for (u32 il_pos = 0; il_pos < IL_CNT; il_pos += VECT_SIZE)
{
const u32x w0r = words_buf_r[il_pos / VECT_SIZE];
const u32x w0 = w0l | w0r;
w[0] = w0;
md5_ctx_vector_t ctx0;
md5_init_vector (&ctx0);
md5_update_vector (&ctx0, w, pw_len);
md5_update_vector (&ctx0, salt1_buf, salt1_len);
md5_final_vector (&ctx0);
u32x a = ctx0.h[0];
u32x b = ctx0.h[1];
u32x c = ctx0.h[2];
u32x d = ctx0.h[3];
md5_ctx_vector_t ctx;
md5_init_vector (&ctx);
ctx.w0[0] = uint_to_hex_lower8 ((a >> 0) & 255) << 0
| uint_to_hex_lower8 ((a >> 8) & 255) << 16;
ctx.w0[1] = uint_to_hex_lower8 ((a >> 16) & 255) << 0
| uint_to_hex_lower8 ((a >> 24) & 255) << 16;
ctx.w0[2] = uint_to_hex_lower8 ((b >> 0) & 255) << 0
| uint_to_hex_lower8 ((b >> 8) & 255) << 16;
ctx.w0[3] = uint_to_hex_lower8 ((b >> 16) & 255) << 0
| uint_to_hex_lower8 ((b >> 24) & 255) << 16;
ctx.w1[0] = uint_to_hex_lower8 ((c >> 0) & 255) << 0
| uint_to_hex_lower8 ((c >> 8) & 255) << 16;
ctx.w1[1] = uint_to_hex_lower8 ((c >> 16) & 255) << 0
| uint_to_hex_lower8 ((c >> 24) & 255) << 16;
ctx.w1[2] = uint_to_hex_lower8 ((d >> 0) & 255) << 0
| uint_to_hex_lower8 ((d >> 8) & 255) << 16;
ctx.w1[3] = uint_to_hex_lower8 ((d >> 16) & 255) << 0
| uint_to_hex_lower8 ((d >> 24) & 255) << 16;
ctx.len = 32;
md5_final_vector (&ctx);
a = ctx.h[0];
b = ctx.h[1];
c = ctx.h[2];
d = ctx.h[3];
md5_init_vector (&ctx);
ctx.w0[0] = uint_to_hex_lower8 ((a >> 0) & 255) << 0
| uint_to_hex_lower8 ((a >> 8) & 255) << 16;
ctx.w0[1] = uint_to_hex_lower8 ((a >> 16) & 255) << 0
| uint_to_hex_lower8 ((a >> 24) & 255) << 16;
ctx.w0[2] = uint_to_hex_lower8 ((b >> 0) & 255) << 0
| uint_to_hex_lower8 ((b >> 8) & 255) << 16;
ctx.w0[3] = uint_to_hex_lower8 ((b >> 16) & 255) << 0
| uint_to_hex_lower8 ((b >> 24) & 255) << 16;
ctx.w1[0] = uint_to_hex_lower8 ((c >> 0) & 255) << 0
| uint_to_hex_lower8 ((c >> 8) & 255) << 16;
ctx.w1[1] = uint_to_hex_lower8 ((c >> 16) & 255) << 0
| uint_to_hex_lower8 ((c >> 24) & 255) << 16;
ctx.w1[2] = uint_to_hex_lower8 ((d >> 0) & 255) << 0
| uint_to_hex_lower8 ((d >> 8) & 255) << 16;
ctx.w1[3] = uint_to_hex_lower8 ((d >> 16) & 255) << 0
| uint_to_hex_lower8 ((d >> 24) & 255) << 16;
ctx.len = 32;
md5_update_vector (&ctx, salt2_buf, salt2_len);
md5_final_vector (&ctx);
const u32x r0 = ctx.h[DGST_R0];
const u32x r1 = ctx.h[DGST_R1];
const u32x r2 = ctx.h[DGST_R2];
const u32x r3 = ctx.h[DGST_R3];
COMPARE_S_SIMD (r0, r1, r2, r3);
}
}

View File

@ -1155,64 +1155,205 @@ KERNEL_FQ KERNEL_FA void m33800_init2 (KERN_ATTR_TMPS (bcrypt_tmp_t))
KERNEL_FQ KERNEL_FA void m33800_loop2 (KERN_ATTR_TMPS (bcrypt_tmp_t))
{
// the second loop is the same as the first one, only different "password" in init2
#if defined IS_CUDA || defined IS_HIP
m33800_loop(
pws,
g_rules_buf,
combs_buf,
g_bfs_buf,
tmps,
hooks,
bitmaps_buf_s1_a,
bitmaps_buf_s1_b,
bitmaps_buf_s1_c,
bitmaps_buf_s1_d,
bitmaps_buf_s2_a,
bitmaps_buf_s2_b,
bitmaps_buf_s2_c,
bitmaps_buf_s2_d,
plains_buf,
digests_buf,
hashes_shown,
salt_bufs,
esalt_bufs,
d_return_buf,
d_extra0_buf,
d_extra1_buf,
d_extra2_buf,
d_extra3_buf,
kernel_param
);
#else
m33800_loop(
pws,
rules_buf,
combs_buf,
bfs_buf,
tmps,
hooks,
bitmaps_buf_s1_a,
bitmaps_buf_s1_b,
bitmaps_buf_s1_c,
bitmaps_buf_s1_d,
bitmaps_buf_s2_a,
bitmaps_buf_s2_b,
bitmaps_buf_s2_c,
bitmaps_buf_s2_d,
plains_buf,
digests_buf,
hashes_shown,
salt_bufs,
esalt_bufs,
d_return_buf,
d_extra0_buf,
d_extra1_buf,
d_extra2_buf,
d_extra3_buf,
kernel_param
);
#endif
/**
* base
*/
const u64 gid = get_global_id (0);
const u64 lid = get_local_id (0);
if (gid >= GID_CNT) return;
// load
u32 E[18];
for (u32 i = 0; i < 18; i++)
{
E[i] = tmps[gid].E[i];
}
u32 P[18];
for (u32 i = 0; i < 18; i++)
{
P[i] = tmps[gid].P[i];
}
#ifdef DYNAMIC_LOCAL
// from host
#else
LOCAL_VK u32 S0_all[FIXED_LOCAL_SIZE][256];
LOCAL_VK u32 S1_all[FIXED_LOCAL_SIZE][256];
LOCAL_VK u32 S2_all[FIXED_LOCAL_SIZE][256];
LOCAL_VK u32 S3_all[FIXED_LOCAL_SIZE][256];
#endif
#ifdef BCRYPT_AVOID_BANK_CONFLICTS
LOCAL_AS u32 *S0 = S + (FIXED_LOCAL_SIZE * 256 * 0);
LOCAL_AS u32 *S1 = S + (FIXED_LOCAL_SIZE * 256 * 1);
LOCAL_AS u32 *S2 = S + (FIXED_LOCAL_SIZE * 256 * 2);
LOCAL_AS u32 *S3 = S + (FIXED_LOCAL_SIZE * 256 * 3);
#else
LOCAL_AS u32 *S0 = S0_all[lid];
LOCAL_AS u32 *S1 = S1_all[lid];
LOCAL_AS u32 *S2 = S2_all[lid];
LOCAL_AS u32 *S3 = S3_all[lid];
#endif
for (u32 i = 0; i < 256; i++)
{
SET_KEY32 (S0, i, tmps[gid].S0[i]);
SET_KEY32 (S1, i, tmps[gid].S1[i]);
SET_KEY32 (S2, i, tmps[gid].S2[i]);
SET_KEY32 (S3, i, tmps[gid].S3[i]);
}
/**
* salt
*/
u32 salt_buf[4];
salt_buf[0] = salt_bufs[SALT_POS_HOST].salt_buf[0];
salt_buf[1] = salt_bufs[SALT_POS_HOST].salt_buf[1];
salt_buf[2] = salt_bufs[SALT_POS_HOST].salt_buf[2];
salt_buf[3] = salt_bufs[SALT_POS_HOST].salt_buf[3];
/**
* main loop
*/
u32 L0;
u32 R0;
for (u32 i = 0; i < LOOP_CNT; i++)
{
for (u32 i = 0; i < 18; i++)
{
P[i] ^= E[i];
}
L0 = 0;
R0 = 0;
for (u32 i = 0; i < 9; i++)
{
BF_ENCRYPT (L0, R0);
P[i * 2 + 0] = L0;
P[i * 2 + 1] = R0;
}
for (u32 i = 0; i < 256; i += 2)
{
BF_ENCRYPT (L0, R0);
SET_KEY32 (S0, i + 0, L0);
SET_KEY32 (S0, i + 1, R0);
}
for (u32 i = 0; i < 256; i += 2)
{
BF_ENCRYPT (L0, R0);
SET_KEY32 (S1, i + 0, L0);
SET_KEY32 (S1, i + 1, R0);
}
for (u32 i = 0; i < 256; i += 2)
{
BF_ENCRYPT (L0, R0);
SET_KEY32 (S2, i + 0, L0);
SET_KEY32 (S2, i + 1, R0);
}
for (u32 i = 0; i < 256; i += 2)
{
BF_ENCRYPT (L0, R0);
SET_KEY32 (S3, i + 0, L0);
SET_KEY32 (S3, i + 1, R0);
}
P[ 0] ^= salt_buf[0];
P[ 1] ^= salt_buf[1];
P[ 2] ^= salt_buf[2];
P[ 3] ^= salt_buf[3];
P[ 4] ^= salt_buf[0];
P[ 5] ^= salt_buf[1];
P[ 6] ^= salt_buf[2];
P[ 7] ^= salt_buf[3];
P[ 8] ^= salt_buf[0];
P[ 9] ^= salt_buf[1];
P[10] ^= salt_buf[2];
P[11] ^= salt_buf[3];
P[12] ^= salt_buf[0];
P[13] ^= salt_buf[1];
P[14] ^= salt_buf[2];
P[15] ^= salt_buf[3];
P[16] ^= salt_buf[0];
P[17] ^= salt_buf[1];
L0 = 0;
R0 = 0;
for (u32 i = 0; i < 9; i++)
{
BF_ENCRYPT (L0, R0);
P[i * 2 + 0] = L0;
P[i * 2 + 1] = R0;
}
for (u32 i = 0; i < 256; i += 2)
{
BF_ENCRYPT (L0, R0);
SET_KEY32 (S0, i + 0, L0);
SET_KEY32 (S0, i + 1, R0);
}
for (u32 i = 0; i < 256; i += 2)
{
BF_ENCRYPT (L0, R0);
SET_KEY32 (S1, i + 0, L0);
SET_KEY32 (S1, i + 1, R0);
}
for (u32 i = 0; i < 256; i += 2)
{
BF_ENCRYPT (L0, R0);
SET_KEY32 (S2, i + 0, L0);
SET_KEY32 (S2, i + 1, R0);
}
for (u32 i = 0; i < 256; i += 2)
{
BF_ENCRYPT (L0, R0);
SET_KEY32 (S3, i + 0, L0);
SET_KEY32 (S3, i + 1, R0);
}
}
// store
for (u32 i = 0; i < 18; i++)
{
tmps[gid].P[i] = P[i];
}
for (u32 i = 0; i < 256; i++)
{
tmps[gid].S0[i] = GET_KEY32 (S0, i);
tmps[gid].S1[i] = GET_KEY32 (S1, i);
tmps[gid].S2[i] = GET_KEY32 (S2, i);
tmps[gid].S3[i] = GET_KEY32 (S3, i);
}
}
KERNEL_FQ KERNEL_FA void m33800_comp (KERN_ATTR_TMPS (bcrypt_tmp_t))

View File

@ -58,6 +58,7 @@
- Added hash-mode: md5(md5($salt).md5(md5($pass)))
- Added hash-mode: Domain Cached Credentials 2 (DCC2), MS Cache 2, (NT)
- Added hash-mode: Domain Cached Credentials (DCC), MS Cache (NT)
- Added hash-mode: md5(md5(md5($pass.$salt1)).$salt2)
- Added hash-mode: md5(md5(md5($pass).$salt1).$salt2)
- Added hash-mode: md5(md5(md5($pass)).$salt)
- Added hash-mode: md5(sha1($pass.$salt))
@ -72,9 +73,10 @@
- Added new feature (-Y) that creates N virtual instances for each device in your system at the cost of N times the device memory consumption
- Added options --benchmark-min and --benchmark-max to set a hash-mode range to be used during the benchmark
- Added option --total-candidates to provide the total candidate count for an attack insteda of the internal "--keyspace" value
- Added option --total-candidates to provide the total candidate count for an attack instead of the internal "--keyspace" value
- Added option --backend-devices-keepfree to configure X percentage of device memory available to keep free
- Added display of password length mininum and maximum in the Kernel.Feature status line
- Added the 'edge' test type to test.pl and the corresponding tool test_edge.sh
##
## Performance
@ -89,6 +91,8 @@
- Added verification of token buffer length when using TOKEN_ATTR_FIXED_LENGTH
- Fixed a bug in all SCRYPT-based hash modes with Apple Metal
- Fixed buffer overflow on module_26600.c / module_hash_encode()
- Fixed bug in module_constraints and kernel for hash-mode 7801
- Fixed bug in module_constraints and kernel for hash-mode 7800
- Fixed bug in 18400 module_hash_encode
- Fixed bug in 23800/unrar with Apple Silicon
- Fixed bug in 26900 module_hash_encode
@ -104,6 +108,7 @@
- Fixed build failed for 18400 with Apple Metal
- Fixed build failed for 18600 with Apple Metal
- Fixed build failed for 4410 with vector width > 1
- Fixed build failed for aarch64 (es: rpi)
- Fixed clang identification in src/Makefile
- Fixed build failure for almost all hash modes that make use of hc_swap64 and/or hc_swap64_S with Apple Metal / Apple Silicon
- Fixed debug mode 5 by adding the missing colon between original-word and finding-rule
@ -178,6 +183,7 @@
- Makefile: prevent make failure with Apple Silicon in case of partial rebuild
- Makefile: updated MACOSX_DEPLOYMENT_TARGET to 15.0
- Rules: Rename best64.rule to best66.rule and remove the unknown section from it
- Kernel: Renamed multiple defines in CAST cipher to fit expected naming convention of the C++ language standard
* changes v6.2.5 -> v6.2.6

View File

@ -27,7 +27,8 @@ Gabriele "matrix" Gristina <matrix@hashcat.net> (@gm4tr1x)
* Universal binary on Apple Silicon
* Hardware monitor initial code base and maintenance
* Test suite initial code base and maintenance
* Makefile initial code base
* Edge case testing suite
* Makefile initial code base and maintenance
* Multithreading initial code base
* MultiGPU initial code base
* Benchmarks initial code base

View File

@ -1,6 +1,6 @@
The MIT License (MIT)
Copyright (c) 2015-2023 Jens Steube
Copyright (c) 2015-2025 Jens Steube
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal

View File

@ -97,6 +97,7 @@ NVIDIA GPUs require "NVIDIA Driver" (440.64 or later) and "CUDA Toolkit" (9.0 or
- md5(md5(md5($pass)))
- md5(md5(md5($pass)).$salt)
- md5(md5(md5($pass).$salt1).$salt2)
- md5(md5(md5($pass.$salt1)).$salt2)
- md5(sha1($pass))
- md5(sha1($pass).$salt)
- md5(sha1($pass).md5($pass).sha1($pass))

View File

@ -18,15 +18,16 @@
#include <termios.h>
#if defined (__APPLE__)
#include <sys/ioctl.h>
#include <sys/sysctl.h>
#endif // __APPLE__
#endif // _WIN
#if !defined (_WIN) && !defined (__CYGWIN__) && !defined (__MSYS__)
#if defined (_POSIX)
#include <sys/utsname.h>
#if !defined (__linux__)
#include <sys/sysctl.h>
#endif // ! __linux__
#endif // ! _WIN && | __CYGWIN__ && ! __MSYS__
#if !defined (__APPLE__)
#include <sys/sysinfo.h>
#endif // ! __APPLE__
#endif // _POSIX
void welcome_screen (hashcat_ctx_t *hashcat_ctx, const char *version_tag);
void goodbye_screen (hashcat_ctx_t *hashcat_ctx, const time_t proc_start, const time_t proc_stop);

View File

@ -85,9 +85,12 @@ AR := /usr/bin/ar
SED := /usr/bin/sed
SED_IN_PLACE := -i ""
DARWIN_VERSION := $(shell uname -r | cut -d. -f1)
IS_APPLE_SILICON := $(shell lipo /bin/zsh -verify_arch arm64e && echo 1 || echo 0)
IS_APPLE_SILICON := $(shell [ "$$(sysctl -in hw.optional.arm64 2>/dev/null)" = "1" ] && echo 1 || echo 0)
endif
IS_AARCH64 := $(shell [ "$$(arch 2>/dev/null)" = "aarch64" ] && echo 1 || echo 0)
IS_ARM := $(or $(filter 1,$(IS_APPLE_SILICON)),$(filter 1,$(IS_AARCH64)))
ifneq (,$(filter $(UNAME),FreeBSD NetBSD))
CC := cc
CXX := c++
@ -236,6 +239,10 @@ ifneq ($(CC),clang)
CFLAGS_UNRAR += -Wno-class-memaccess
CFLAGS_UNRAR += -Wno-misleading-indentation
CFLAGS_UNRAR += -Wno-format-overflow
else
ifeq ($(IS_APPLE_SILICON),0)
CFLAGS_UNRAR += -Wno-nontrivial-memcall
endif
endif
CFLAGS_UNRAR += -Wno-missing-braces
CFLAGS_UNRAR += -Wno-unused-variable
@ -376,8 +383,6 @@ LFLAGS_NATIVE += -lpthread
LFLAGS_NATIVE += -liconv
ifeq ($(IS_APPLE_SILICON),1)
CFLAGS_NATIVE += -DSSE2NEON_SUPPRESS_WARNINGS
CFLAGS_NATIVE += -I$(DEPS_SSE2NEON)
CFLAGS_NATIVE += -arch arm64
CFLAGS_NATIVE += -arch x86_64
ifeq ($(SHARED),1)
@ -388,6 +393,11 @@ endif
endif # Darwin
ifeq ($(IS_ARM),1)
CFLAGS_NATIVE += -DSSE2NEON_SUPPRESS_WARNINGS
CFLAGS_NATIVE += -I$(DEPS_SSE2NEON)
endif
ifeq ($(UNAME),CYGWIN)
CFLAGS_NATIVE := $(CFLAGS)
CFLAGS_NATIVE += -DWITH_HWMON
@ -834,12 +844,12 @@ CFLAGS_LZMA_WIN += -Wno-misleading-indentation
CFLAGS_UNRAR_WIN += -Wno-misleading-indentation
CFLAGS_UNRAR_WIN += -Wno-class-memaccess
endif
ifeq ($(IS_APPLE_SILICON),1)
ifeq ($(IS_ARM),1)
CFLAGS_CROSS_LINUX += -DSSE2NEON_SUPPRESS_WARNINGS
CFLAGS_CROSS_LINUX += -I$(DEPS_SSE2NEON)
endif
endif
##
## Targets

View File

@ -334,6 +334,14 @@ static int autotune (hashcat_ctx_t *hashcat_ctx, hc_device_param_t *device_param
// v7 autotuner is a lot more straight forward
// we start with some purely theoretical values as a base, then move on to some meassured tests
/* This causes more problems than it solves.
* In theory, it's fine to boost accel early to improve accuracy, and it does,
* but on the other hand, it prevents increasing the thread count due to high runtime.
* For longer runtimes, we want to prioritize more threads over higher accel.
* This change also has some downsides for algorithms that actually benefit
* from higher accel and fewer threads (e.g., 7800, 14900). But those are easy to manage
* by limiting thread count, or better, by setting them to OPTS_TYPE_NATIVE_THREADS.
if (hashconfig->attack_exec == ATTACK_EXEC_INSIDE_KERNEL)
{
if (kernel_accel_min < kernel_accel_max)
@ -348,6 +356,7 @@ static int autotune (hashcat_ctx_t *hashcat_ctx, hc_device_param_t *device_param
}
}
}
*/
if (kernel_threads_min < kernel_threads_max)
{

View File

@ -10217,11 +10217,17 @@ int backend_session_begin (hashcat_ctx_t *hashcat_ctx)
const u64 device_available_mem_new = device_available_mem_sav - (device_available_mem_sav * 0.34);
event_log_warning (hashcat_ctx, "* Device #%u: This system does not offer any reliable method to query actual free memory. Estimated base: %" PRIu64, device_id + 1, device_available_mem_sav);
event_log_warning (hashcat_ctx, " Assuming normal desktop activity, reducing estimate by 34%%: %" PRIu64, device_available_mem_new);
event_log_warning (hashcat_ctx, " This can hurt performance drastically, especially on memory-heavy algorithms.");
event_log_warning (hashcat_ctx, " You can adjust this percentage using --backend-devices-keepfree");
event_log_warning (hashcat_ctx, NULL);
if (user_options->quiet == false)
{
if (user_options->machine_readable == false)
{
event_log_warning (hashcat_ctx, "* Device #%u: This system does not offer any reliable method to query actual free memory. Estimated base: %" PRIu64, device_id + 1, device_available_mem_sav);
event_log_warning (hashcat_ctx, " Assuming normal desktop activity, reducing estimate by 34%%: %" PRIu64, device_available_mem_new);
event_log_warning (hashcat_ctx, " This can hurt performance drastically, especially on memory-heavy algorithms.");
event_log_warning (hashcat_ctx, " You can adjust this percentage using --backend-devices-keepfree");
event_log_warning (hashcat_ctx, NULL);
}
}
device_param->device_available_mem = device_available_mem_new;
}
@ -16323,10 +16329,11 @@ int backend_session_begin (hashcat_ctx_t *hashcat_ctx)
// let's add some extra space just to be sure.
// now depends on the kernel-accel value (where scrypt and similar benefits), but also hard minimum 64mb and maximum 1024mb limit
// let's see if we still need this now that we have low-level API to report free memory
// we don't want these get too big. if a plugin requires really a lot of memory, the extra buffer should be used instead.
if (size_pws > device_param->device_maxmem_alloc) memory_limit_hit = 1;
if (size_tmps > device_param->device_maxmem_alloc) memory_limit_hit = 1;
if (size_hooks > device_param->device_maxmem_alloc) memory_limit_hit = 1;
if (size_pws > device_param->device_maxmem_alloc / 4) memory_limit_hit = 1;
if (size_tmps > device_param->device_maxmem_alloc / 4) memory_limit_hit = 1;
if (size_hooks > device_param->device_maxmem_alloc / 4) memory_limit_hit = 1;
// work around, for some reason apple opencl can't have buffers larger 2^31
// typically runs into trap 6

View File

@ -336,7 +336,7 @@ static int resolve_pyenv_libpath (char *out_buf, const size_t out_sz)
return -1;
}
static bool init_python (hc_python_lib_t *python)
static bool init_python (hc_python_lib_t *python, user_options_t *user_options)
{
char pythondll_path[PATH_MAX];
@ -526,7 +526,13 @@ static bool init_python (hc_python_lib_t *python)
}
else
{
printf ("Loaded python library from: %s\n\n", pythondll_path);
if (user_options->quiet == false)
{
if (user_options->machine_readable == false)
{
printf ("Loaded python library from: %s\n\n", pythondll_path);
}
}
}
#define HC_LOAD_FUNC_PYTHON(ptr,name,pythonname,type,libname,noerr) \
@ -694,7 +700,7 @@ void *platform_init (user_options_t *user_options)
python_interpreter->python = python;
if (init_python (python) == false) return NULL;
if (init_python (python, user_options) == false) return NULL;
python->Py_Initialize ();
@ -714,9 +720,15 @@ void *platform_init (user_options_t *user_options)
unit_t *unit_buf = &python_interpreter->units_buf[0];
#if defined (_WIN) || defined (__CYGWIN__) || defined (__APPLE__)
fprintf (stderr, "Attention!!! Falling back to single-threaded mode.\n");
fprintf (stderr, " Windows and MacOS ds not support multiprocessing module cleanly!\n");
fprintf (stderr, " For multithreading on Windows and MacOS, please use -m 72000 instead.\n\n");
if (user_options->quiet == false)
{
if (user_options->machine_readable == false)
{
fprintf (stderr, "Attention!!! Falling back to single-threaded mode.\n");
fprintf (stderr, " Windows and MacOS ds not support multiprocessing module cleanly!\n");
fprintf (stderr, " For multithreading on Windows and MacOS, please use -m 72000 instead.\n\n");
}
}
#endif
python_interpreter->source_filename = (user_options->bridge_parameter1 == NULL) ? DEFAULT_SOURCE_FILENAME : user_options->bridge_parameter1;

View File

@ -330,7 +330,7 @@ static int resolve_pyenv_libpath (char *out_buf, const size_t out_sz)
return -1;
}
static bool init_python (hc_python_lib_t *python)
static bool init_python (hc_python_lib_t *python, user_options_t *user_options)
{
char pythondll_path[PATH_MAX];
@ -525,18 +525,30 @@ static bool init_python (hc_python_lib_t *python)
}
else
{
printf ("Loaded python library from: %s\n\n", pythondll_path);
if (user_options->quiet == false)
{
if (user_options->machine_readable == false)
{
printf ("Loaded python library from: %s\n\n", pythondll_path);
}
}
}
#if defined (_WIN) || defined (__CYGWIN__) || defined (__APPLE__)
#else
fprintf (stderr, "Attention!!! The 'free-threaded' python library has some major downsides.\n");
fprintf (stderr, " The main purpose of this module is to give Windows and macOS users a multithreading option.\n");
fprintf (stderr, " It seems to be a lot slower, and relevant modules such as `cffi` are incompatibile.\n");
fprintf (stderr, " Since your are on Linux we highly recommend to stick to multiprocessing module.\n");
fprintf (stderr, " Maybe 'free-threaded' mode will become more mature in the future.\n");
fprintf (stderr, " For now, we high recommend to stick to -m 73000 instead.\n\n");
if (user_options->quiet == false)
{
if (user_options->machine_readable == false)
{
fprintf (stderr, "Attention!!! The 'free-threaded' python library has some major downsides.\n");
fprintf (stderr, " The main purpose of this module is to give Windows and macOS users a multithreading option.\n");
fprintf (stderr, " It seems to be a lot slower, and relevant modules such as `cffi` are incompatibile.\n");
fprintf (stderr, " Since your are on Linux we highly recommend to stick to multiprocessing module.\n");
fprintf (stderr, " Maybe 'free-threaded' mode will become more mature in the future.\n");
fprintf (stderr, " For now, we high recommend to stick to -m 73000 instead.\n\n");
}
}
#endif
#define HC_LOAD_FUNC_PYTHON(ptr,name,pythonname,type,libname,noerr) \
@ -696,7 +708,7 @@ void *platform_init (user_options_t *user_options)
python_interpreter->python = python;
if (init_python (python) == false) return NULL;
if (init_python (python, user_options) == false) return NULL;
python->Py_Initialize ();

View File

@ -96,6 +96,16 @@ int combinator_ctx_init (hashcat_ctx_t *hashcat_ctx)
return -1;
}
if (rc1 == -2)
{
event_log_error (hashcat_ctx, "Error reading wordlist: %s", dictfile1);
hc_fclose (&fp1);
hc_fclose (&fp2);
return -1;
}
if (words1_cnt == 0)
{
event_log_error (hashcat_ctx, "%s: empty file.", dictfile1);
@ -122,6 +132,13 @@ int combinator_ctx_init (hashcat_ctx_t *hashcat_ctx)
return -1;
}
if (rc2 == -2)
{
event_log_error (hashcat_ctx, "Error reading wordlist: %s", dictfile2);
return -1;
}
if (words2_cnt == 0)
{
event_log_error (hashcat_ctx, "%s: empty file.", dictfile2);
@ -199,6 +216,16 @@ int combinator_ctx_init (hashcat_ctx_t *hashcat_ctx)
return -1;
}
if (rc1 == -2)
{
event_log_error (hashcat_ctx, "Error reading wordlist: %s", dictfile1);
hc_fclose (&fp1);
hc_fclose (&fp2);
return -1;
}
if (words1_cnt == 0)
{
event_log_error (hashcat_ctx, "%s: empty file.", dictfile1);
@ -225,6 +252,13 @@ int combinator_ctx_init (hashcat_ctx_t *hashcat_ctx)
return -1;
}
if (rc2 == -2)
{
event_log_error (hashcat_ctx, "Error reading wordlist: %s", dictfile2);
return -1;
}
if (words2_cnt == 0)
{
event_log_error (hashcat_ctx, "%s: empty file.", dictfile2);
@ -330,6 +364,16 @@ int combinator_ctx_init (hashcat_ctx_t *hashcat_ctx)
return -1;
}
if (rc1 == -2)
{
event_log_error (hashcat_ctx, "Error reading wordlist: %s", dictfile1);
hc_fclose (&fp1);
hc_fclose (&fp2);
return -1;
}
if (words1_cnt == 0)
{
event_log_error (hashcat_ctx, "%s: empty file.", dictfile1);
@ -356,6 +400,13 @@ int combinator_ctx_init (hashcat_ctx_t *hashcat_ctx)
return -1;
}
if (rc2 == -2)
{
event_log_error (hashcat_ctx, "Error reading wordlist: %s", dictfile2);
return -1;
}
if (words2_cnt == 0)
{
event_log_error (hashcat_ctx, "%s: empty file.", dictfile2);
@ -412,6 +463,13 @@ int combinator_ctx_init (hashcat_ctx_t *hashcat_ctx)
return -1;
}
if (rc == -2)
{
event_log_error (hashcat_ctx, "Error reading wordlist: %s", dictfile);
return -1;
}
combinator_ctx->combs_cnt = words_cnt;
combinator_ctx->combs_mode = COMBINATOR_MODE_BASE_LEFT;
}

View File

@ -410,6 +410,17 @@ size_t hc_fread (void *ptr, size_t size, size_t nmemb, HCFILE *fp)
else if (fp->gfp)
{
n = gzfread (ptr, size, nmemb, fp->gfp);
// Double check to make sure that it successfully read 0 bytes instead of erroring
if (n == 0)
{
int errnum;
gzerror (fp->gfp, &errnum);
if (errnum != Z_OK)
{
return (size_t) -1;
}
}
}
else if (fp->ufp)
{
@ -579,7 +590,18 @@ int hc_fseek (HCFILE *fp, off_t offset, int whence)
}
else if (fp->xfp)
{
/* TODO */
/* XZ files are compressed streams, seeking is limited */
if (offset == 0 && whence == SEEK_SET)
{
/* Rewind to beginning */
hc_rewind(fp);
r = 0;
}
else
{
/* Arbitrary seeking not supported for compressed XZ files */
r = -1;
}
}
return r;

View File

@ -795,7 +795,7 @@ static int outer_loop (hashcat_ctx_t *hashcat_ctx)
{
if ((mask_ctx->masks_cnt > 1) || (straight_ctx->dicts_cnt > 1))
{
event_log_error (hashcat_ctx, "Use of --skip/--limit is not supported with --increment, mask files, or --stdout.");
event_log_error (hashcat_ctx, "Use of --skip/--limit is not supported with --increment, mask files, multiple dictionaries, or --stdout.");
return -1;
}

View File

@ -273,6 +273,12 @@ int hashconfig_init (hashcat_ctx_t *hashcat_ctx)
CHECK_MANDATORY (module_ctx->module_hash_encode);
}
// check deep comp kernel requirements
if (hashconfig->opts_type & OPTS_TYPE_DEEP_COMP_KERNEL)
{
CHECK_MANDATORY (module_ctx->module_deep_comp_kernel);
}
#undef CHECK_MANDATORY
if (user_options->keyboard_layout_mapping)

View File

@ -143,7 +143,12 @@ char *argon2_module_jit_build_options (MAYBE_UNUSED const hashconfig_t *hashconf
char *jit_build_options = NULL;
//hc_asprintf (&jit_build_options, "-D ARGON2_PARALLELISM=%u -D ARGON2_TMP_ELEM=%u", options[0].parallelism, options[0].memory_block_count);
//hc_asprintf (&jit_build_options, "-D ARGON2_PARALLELISM=%u", options[0].parallelism);
if (device_param->opencl_device_type & CL_DEVICE_TYPE_CPU)
{
hc_asprintf (&jit_build_options, "-D THREADS_PER_LANE=1");
}
return jit_build_options;
}

View File

@ -84,7 +84,7 @@ int module_hash_decode (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSE
token.attr[0] = TOKEN_ATTR_FIXED_LENGTH
| TOKEN_ATTR_VERIFY_HEX;
token.len_min[1] = 0;
token.len_min[1] = 1;
token.len_max[1] = 30;
token.attr[1] = TOKEN_ATTR_VERIFY_LENGTH;

View File

@ -25,6 +25,7 @@ static const u32 OPTI_TYPE = OPTI_TYPE_ZERO_BYTE
| OPTI_TYPE_NOT_ITERATED;
static const u64 OPTS_TYPE = OPTS_TYPE_STOCK_MODULE
| OPTS_TYPE_PT_GENERATE_LE
| OPTS_TYPE_NATIVE_THREADS
| OPTS_TYPE_PT_UPPER
| OPTS_TYPE_ST_UPPER;
static const u32 SALT_TYPE = SALT_TYPE_EMBEDDED;

View File

@ -25,6 +25,7 @@ static const u32 OPTI_TYPE = OPTI_TYPE_ZERO_BYTE
| OPTI_TYPE_NOT_ITERATED;
static const u64 OPTS_TYPE = OPTS_TYPE_STOCK_MODULE
| OPTS_TYPE_PT_GENERATE_LE
| OPTS_TYPE_NATIVE_THREADS
| OPTS_TYPE_PT_UPPER
| OPTS_TYPE_ST_UPPER;
static const u32 SALT_TYPE = SALT_TYPE_EMBEDDED;

View File

@ -24,6 +24,7 @@ static const u32 OPTI_TYPE = OPTI_TYPE_ZERO_BYTE
| OPTI_TYPE_NOT_ITERATED;
static const u64 OPTS_TYPE = OPTS_TYPE_STOCK_MODULE
| OPTS_TYPE_PT_GENERATE_BE
| OPTS_TYPE_NATIVE_THREADS
| OPTS_TYPE_ST_ADD80
| OPTS_TYPE_ST_UPPER;
static const u32 SALT_TYPE = SALT_TYPE_EMBEDDED;

View File

@ -24,6 +24,7 @@ static const u32 OPTI_TYPE = OPTI_TYPE_ZERO_BYTE
| OPTI_TYPE_NOT_ITERATED;
static const u64 OPTS_TYPE = OPTS_TYPE_STOCK_MODULE
| OPTS_TYPE_PT_GENERATE_BE
| OPTS_TYPE_NATIVE_THREADS
| OPTS_TYPE_ST_ADD80
| OPTS_TYPE_ST_UPPER;
static const u32 SALT_TYPE = SALT_TYPE_EMBEDDED;

View File

@ -22,6 +22,7 @@ static const u64 KERN_TYPE = 9000;
static const u32 OPTI_TYPE = OPTI_TYPE_ZERO_BYTE;
static const u64 OPTS_TYPE = OPTS_TYPE_STOCK_MODULE
| OPTS_TYPE_PT_GENERATE_LE
| OPTS_TYPE_NATIVE_THREADS
| OPTS_TYPE_BINARY_HASHFILE
| OPTS_TYPE_AUTODETECT_DISABLE
| OPTS_TYPE_DYNAMIC_SHARED;

View File

@ -27,6 +27,7 @@ static const u32 OPTI_TYPE = OPTI_TYPE_ZERO_BYTE
| OPTI_TYPE_REGISTER_LIMIT;
static const u64 OPTS_TYPE = OPTS_TYPE_STOCK_MODULE
| OPTS_TYPE_PT_GENERATE_LE
| OPTS_TYPE_NATIVE_THREADS
| OPTS_TYPE_BINARY_HASHFILE
| OPTS_TYPE_LOOP_EXTENDED
| OPTS_TYPE_MP_MULTI_DISABLE

View File

@ -27,6 +27,7 @@ static const u32 OPTI_TYPE = OPTI_TYPE_ZERO_BYTE
| OPTI_TYPE_REGISTER_LIMIT;
static const u64 OPTS_TYPE = OPTS_TYPE_STOCK_MODULE
| OPTS_TYPE_PT_GENERATE_LE
| OPTS_TYPE_NATIVE_THREADS
| OPTS_TYPE_BINARY_HASHFILE
| OPTS_TYPE_LOOP_EXTENDED
| OPTS_TYPE_MP_MULTI_DISABLE

View File

@ -27,6 +27,7 @@ static const u32 OPTI_TYPE = OPTI_TYPE_ZERO_BYTE
| OPTI_TYPE_REGISTER_LIMIT;
static const u64 OPTS_TYPE = OPTS_TYPE_STOCK_MODULE
| OPTS_TYPE_PT_GENERATE_LE
| OPTS_TYPE_NATIVE_THREADS
| OPTS_TYPE_BINARY_HASHFILE
| OPTS_TYPE_LOOP_EXTENDED
| OPTS_TYPE_MP_MULTI_DISABLE

View File

@ -27,6 +27,7 @@ static const u32 OPTI_TYPE = OPTI_TYPE_ZERO_BYTE
| OPTI_TYPE_REGISTER_LIMIT;
static const u64 OPTS_TYPE = OPTS_TYPE_STOCK_MODULE
| OPTS_TYPE_PT_GENERATE_LE
| OPTS_TYPE_NATIVE_THREADS
| OPTS_TYPE_BINARY_HASHFILE
| OPTS_TYPE_LOOP_EXTENDED
| OPTS_TYPE_MP_MULTI_DISABLE

View File

@ -27,6 +27,7 @@ static const u32 OPTI_TYPE = OPTI_TYPE_ZERO_BYTE
| OPTI_TYPE_REGISTER_LIMIT;
static const u64 OPTS_TYPE = OPTS_TYPE_STOCK_MODULE
| OPTS_TYPE_PT_GENERATE_LE
| OPTS_TYPE_NATIVE_THREADS
| OPTS_TYPE_BINARY_HASHFILE
| OPTS_TYPE_LOOP_EXTENDED
| OPTS_TYPE_MP_MULTI_DISABLE

View File

@ -27,6 +27,7 @@ static const u32 OPTI_TYPE = OPTI_TYPE_ZERO_BYTE
| OPTI_TYPE_REGISTER_LIMIT;
static const u64 OPTS_TYPE = OPTS_TYPE_STOCK_MODULE
| OPTS_TYPE_PT_GENERATE_LE
| OPTS_TYPE_NATIVE_THREADS
| OPTS_TYPE_BINARY_HASHFILE
| OPTS_TYPE_LOOP_EXTENDED
| OPTS_TYPE_MP_MULTI_DISABLE

View File

@ -26,6 +26,7 @@ static const u32 OPTI_TYPE = OPTI_TYPE_ZERO_BYTE
| OPTI_TYPE_SLOW_HASH_SIMD_LOOP2;
static const u64 OPTS_TYPE = OPTS_TYPE_STOCK_MODULE
| OPTS_TYPE_PT_GENERATE_LE
| OPTS_TYPE_NATIVE_THREADS
| OPTS_TYPE_ST_HEX
| OPTS_TYPE_MP_MULTI_DISABLE
| OPTS_TYPE_INIT2

View File

@ -23,6 +23,7 @@ static const u32 OPTI_TYPE = OPTI_TYPE_ZERO_BYTE
| OPTI_TYPE_REGISTER_LIMIT;
static const u64 OPTS_TYPE = OPTS_TYPE_STOCK_MODULE
| OPTS_TYPE_PT_GENERATE_LE
| OPTS_TYPE_NATIVE_THREADS
| OPTS_TYPE_SUGGEST_KG;
static const u32 SALT_TYPE = SALT_TYPE_EMBEDDED;
static const char *BENCHMARK_MASK = "?b?b?b?b?bxxxxx";

View File

@ -23,6 +23,7 @@ static const u64 KERN_TYPE = 19500;
static const u32 OPTI_TYPE = OPTI_TYPE_ZERO_BYTE
| OPTI_TYPE_RAW_HASH;
static const u64 OPTS_TYPE = OPTS_TYPE_STOCK_MODULE
| OPTS_TYPE_NATIVE_THREADS
| OPTS_TYPE_PT_GENERATE_BE;
static const u32 SALT_TYPE = SALT_TYPE_GENERIC;
static const char *ST_PASS = "hashcat";

View File

@ -64,8 +64,8 @@ int module_hash_decode (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSE
token.attr[0] = TOKEN_ATTR_FIXED_LENGTH
| TOKEN_ATTR_VERIFY_HEX;
token.len_min[1] = SALT_MIN;
token.len_max[1] = SALT_MAX;
token.len_min[1] = ((SALT_MIN * 8) / 6) + 0;
token.len_max[1] = ((SALT_MAX * 8) / 6) + 3;
token.attr[1] = TOKEN_ATTR_VERIFY_LENGTH
| TOKEN_ATTR_VERIFY_BASE64A;

View File

@ -22,6 +22,7 @@ static const u64 KERN_TYPE = 23900;
static const u32 OPTI_TYPE = OPTI_TYPE_ZERO_BYTE;
static const u64 OPTS_TYPE = OPTS_TYPE_STOCK_MODULE
| OPTS_TYPE_PT_GENERATE_LE
| OPTS_TYPE_NATIVE_THREADS
| OPTS_TYPE_ST_HEX;
static const u32 SALT_TYPE = SALT_TYPE_EMBEDDED;
static const char *ST_PASS = "hashcat";

View File

@ -30,6 +30,8 @@ static const char *ST_PASS = "hashcat1";
// hash generated using with python3 tools/metamask2hashcat.py --vault tools/2hashcat_tests/metamask2hashcat.json
static const char *ST_HASH = "$metamask$jfGI3TXguhb8GPnKSXFrMzRk2NCEc131Gt5G3kZr5+s=$h+BoIf2CQ5BEjaIOShFE7g==$R95fzGt4UQ0uwrcrVYnIi4UcSlWn9wlmer+//526ZDwYAp50K82F1u1oacYcdjjhuEvbZnWk/uBG00UkgLLlO3WbINljqmu2QWdDEwjTgo/qWR6MU9d/82rxNiONHQE8UrZ8SV+htVr6XIB0ze3aCV0E+fwI93EeP79ZeDxuOEhuHoiYT0bHWMv5nA48AdluG4DbOo7SrDAWBVCBsEdXsOfYsS3/TIh0a/iFCMX4uhxY2824JwcWp4H36SFWyBYMZCJ3/U4DYFbbjWZtGRthoJlIik5BJq4FLu3Y1jEgza0AWlAvu4MKTEqrYSpUIghfxf1a1f+kPvxsHNq0as0kRwCXu09DObbdsiggbmeoBkxMZiFq0d9ar/3Gon0r3hfc3c124Wlivzbzu1JcZ3wURhLSsUS7b5cfG86aXHJkxmQDA5urBz6lw3bsIvlEUB2ErkQy/zD+cPwCG1Rs/WKt7KNh45lppCUkHccbf+xlpdc8OfUwj01Xp7BdH8LMR7Vx1C4hZCvSdtURVl0VaAMxHDX0MjRkwmqS";
static const u32 ROUNDS_METAMASK = 10000;
u32 module_attack_exec (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSED const user_options_t *user_options, MAYBE_UNUSED const user_options_extra_t *user_options_extra) { return ATTACK_EXEC; }
u32 module_dgst_pos0 (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSED const user_options_t *user_options, MAYBE_UNUSED const user_options_extra_t *user_options_extra) { return DGST_POS0; }
u32 module_dgst_pos1 (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSED const user_options_t *user_options, MAYBE_UNUSED const user_options_extra_t *user_options_extra) { return DGST_POS1; }
@ -145,9 +147,10 @@ int module_hash_decode (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSE
| TOKEN_ATTR_VERIFY_SIGNATURE;
token.sep[1] = '$';
token.len[1] = 44;
token.attr[1] = TOKEN_ATTR_FIXED_LENGTH
| TOKEN_ATTR_VERIFY_BASE64A;
token.len_min[1] = 0;
token.len_max[1] = 60;
token.attr[1] = TOKEN_ATTR_VERIFY_LENGTH
| TOKEN_ATTR_OPTIONAL_ROUNDS;
token.sep[2] = '$';
token.len[2] = 24;
@ -170,7 +173,12 @@ int module_hash_decode (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSE
// iter
salt->salt_iter = 10000 - 1;
salt->salt_iter = ROUNDS_METAMASK - 1;
if (token.opt_len != -1)
{
salt->salt_iter = hc_strtoul ((const char *) token.opt_buf + 7, NULL, 10) - 1; // 7 = "rounds="
}
// salt
@ -313,14 +321,20 @@ int module_hash_encode (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSE
base64_encode (int_to_base64, (const u8 *) tmp_buf, (const int) metamask->ct_len+16, ct_buf);
u8 *out_buf = (u8 *) line_buf;
int out_len = snprintf ((char *) out_buf, line_size, "%s%s$%s$%s",
SIGNATURE_METAMASK_WALLET,
salt_buf,
iv_buf,
ct_buf);
return out_len;
if (salt->salt_iter + 1 != ROUNDS_METAMASK)
return snprintf ((char *) out_buf, line_size, "%srounds=%d$%s$%s$%s",
SIGNATURE_METAMASK_WALLET,
salt->salt_iter + 1,
salt_buf,
iv_buf,
ct_buf);
else
return snprintf ((char *) out_buf, line_size, "%s%s$%s$%s",
SIGNATURE_METAMASK_WALLET,
salt_buf,
iv_buf,
ct_buf);
}
void module_init (module_ctx_t *module_ctx)

View File

@ -29,6 +29,8 @@ static const u32 SALT_TYPE = SALT_TYPE_EMBEDDED;
static const char *ST_PASS = "hashcat1";
static const char *ST_HASH = "$metamask-short$jfGI3TXguhb8GPnKSXFrMzRk2NCEc131Gt5G3kZr5+s=$h+BoIf2CQ5BEjaIOShFE7g==$R95fzGt4UQ0uwrcrVYnIi4UcSlWn9wlmer+//526ZDwYAp50K82F1u1oacYcdjjhuEvbZnWk/uBG00UkgLLlOw==";
static const u32 ROUNDS_METAMASK = 10000;
u32 module_attack_exec (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSED const user_options_t *user_options, MAYBE_UNUSED const user_options_extra_t *user_options_extra) { return ATTACK_EXEC; }
u32 module_dgst_pos0 (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSED const user_options_t *user_options, MAYBE_UNUSED const user_options_extra_t *user_options_extra) { return DGST_POS0; }
u32 module_dgst_pos1 (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSED const user_options_t *user_options, MAYBE_UNUSED const user_options_extra_t *user_options_extra) { return DGST_POS1; }
@ -147,9 +149,10 @@ int module_hash_decode (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSE
| TOKEN_ATTR_VERIFY_SIGNATURE;
token.sep[1] = '$';
token.len[1] = 44;
token.attr[1] = TOKEN_ATTR_FIXED_LENGTH
| TOKEN_ATTR_VERIFY_BASE64A;
token.len_min[1] = 0;
token.len_max[1] = 60;
token.attr[1] = TOKEN_ATTR_VERIFY_LENGTH
| TOKEN_ATTR_OPTIONAL_ROUNDS;
token.sep[2] = '$';
token.len[2] = 24;
@ -168,7 +171,12 @@ int module_hash_decode (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSE
// iter
salt->salt_iter = 10000 - 1;
salt->salt_iter = ROUNDS_METAMASK - 1;
if (token.opt_len != -1)
{
salt->salt_iter = hc_strtoul ((const char *) token.opt_buf + 7, NULL, 10) - 1; // 7 = "rounds="
}
// salt
@ -293,13 +301,19 @@ int module_hash_encode (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSE
u8 *out_buf = (u8 *) line_buf;
int out_len = snprintf ((char *) out_buf, line_size, "%s%s$%s$%s",
SIGNATURE_METAMASK_WALLET,
salt_buf,
iv_buf,
ct_buf);
return out_len;
if (salt->salt_iter + 1 != ROUNDS_METAMASK)
return snprintf ((char *) out_buf, line_size, "%srounds=%d$%s$%s$%s",
SIGNATURE_METAMASK_WALLET,
salt->salt_iter + 1,
salt_buf,
iv_buf,
ct_buf);
else
return snprintf ((char *) out_buf, line_size, "%s%s$%s$%s",
SIGNATURE_METAMASK_WALLET,
salt_buf,
iv_buf,
ct_buf);
}
void module_init (module_ctx_t *module_ctx)

View File

@ -23,6 +23,7 @@ static const u32 OPTI_TYPE = OPTI_TYPE_ZERO_BYTE
| OPTI_TYPE_NOT_ITERATED
| OPTI_TYPE_SLOW_HASH_SIMD_LOOP;
static const u64 OPTS_TYPE = OPTS_TYPE_STOCK_MODULE
| OPTS_TYPE_DEEP_COMP_KERNEL
| OPTS_TYPE_PT_GENERATE_LE;
static const u32 SALT_TYPE = SALT_TYPE_EMBEDDED;
static const char *ST_PASS = "hashcat";
@ -63,6 +64,11 @@ typedef struct krb5db_17_tmp
static const char *SIGNATURE_KRB5DB = "$krb5db$17$";
u32 module_deep_comp_kernel (MAYBE_UNUSED const hashes_t *hashes, MAYBE_UNUSED const u32 salt_pos, MAYBE_UNUSED const u32 digest_pos)
{
return KERN_RUN_3;
}
u64 module_tmp_size (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSED const user_options_t *user_options, MAYBE_UNUSED const user_options_extra_t *user_options_extra)
{
const u64 tmp_size = (const u64) sizeof (krb5db_17_tmp_t);
@ -254,7 +260,7 @@ void module_init (module_ctx_t *module_ctx)
module_ctx->module_bridge_name = MODULE_DEFAULT;
module_ctx->module_bridge_type = MODULE_DEFAULT;
module_ctx->module_build_plain_postprocess = MODULE_DEFAULT;
module_ctx->module_deep_comp_kernel = MODULE_DEFAULT;
module_ctx->module_deep_comp_kernel = module_deep_comp_kernel;
module_ctx->module_deprecated_notice = MODULE_DEFAULT;
module_ctx->module_dgst_pos0 = module_dgst_pos0;
module_ctx->module_dgst_pos1 = module_dgst_pos1;

View File

@ -23,6 +23,7 @@ static const u32 OPTI_TYPE = OPTI_TYPE_ZERO_BYTE
| OPTI_TYPE_NOT_ITERATED
| OPTI_TYPE_SLOW_HASH_SIMD_LOOP;
static const u64 OPTS_TYPE = OPTS_TYPE_STOCK_MODULE
| OPTS_TYPE_DEEP_COMP_KERNEL
| OPTS_TYPE_PT_GENERATE_LE;
static const u32 SALT_TYPE = SALT_TYPE_EMBEDDED;
static const char *ST_PASS = "hashcat";
@ -63,6 +64,11 @@ typedef struct krb5db_18_tmp
static const char *SIGNATURE_KRB5DB = "$krb5db$18$";
u32 module_deep_comp_kernel (MAYBE_UNUSED const hashes_t *hashes, MAYBE_UNUSED const u32 salt_pos, MAYBE_UNUSED const u32 digest_pos)
{
return KERN_RUN_3;
}
u64 module_tmp_size (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSED const user_options_t *user_options, MAYBE_UNUSED const user_options_extra_t *user_options_extra)
{
const u64 tmp_size = (const u64) sizeof (krb5db_18_tmp_t);
@ -263,7 +269,7 @@ void module_init (module_ctx_t *module_ctx)
module_ctx->module_bridge_name = MODULE_DEFAULT;
module_ctx->module_bridge_type = MODULE_DEFAULT;
module_ctx->module_build_plain_postprocess = MODULE_DEFAULT;
module_ctx->module_deep_comp_kernel = MODULE_DEFAULT;
module_ctx->module_deep_comp_kernel = module_deep_comp_kernel;
module_ctx->module_deprecated_notice = MODULE_DEFAULT;
module_ctx->module_dgst_pos0 = module_dgst_pos0;
module_ctx->module_dgst_pos1 = module_dgst_pos1;

View File

@ -245,6 +245,8 @@ int module_hash_encode (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSE
u32_to_hex (byte_swap_32 (encdatavault->keychain[i]), (u8 *) tmp_buf + j);
}
tmp_buf[32 * 8] = 0;
const int line_len = snprintf (line_buf, line_size, "%s%u$%u$%08x%08x$%08x%08x$32$%08x%08x%08x%08x%08x%08x%08x%08x$%u$%s",
SIGNATURE_ENCDATAVAULT,
encdatavault->version,

View File

@ -207,6 +207,8 @@ int module_hash_encode (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSE
u32_to_hex (byte_swap_32 (encdatavault->keychain[i]), (u8 *) tmp_buf + j);
}
tmp_buf[32 * 8] = 0;
const int line_len = snprintf (line_buf, line_size, "%s%u$%u$%08x%08x$%08x%08x$%s",
SIGNATURE_ENCDATAVAULT,
encdatavault->version,

277
src/modules/module_32800.c Normal file
View File

@ -0,0 +1,277 @@
/**
* Author......: See docs/credits.txt
* License.....: MIT
*/
#include "common.h"
#include "types.h"
#include "modules.h"
#include "bitops.h"
#include "convert.h"
#include "shared.h"
#include "emu_inc_hash_md5.h"
static const u32 ATTACK_EXEC = ATTACK_EXEC_INSIDE_KERNEL;
static const u32 DGST_POS0 = 0;
static const u32 DGST_POS1 = 3;
static const u32 DGST_POS2 = 2;
static const u32 DGST_POS3 = 1;
static const u32 DGST_SIZE = DGST_SIZE_4_4;
static const u32 HASH_CATEGORY = HASH_CATEGORY_RAW_HASH_SALTED;
static const char *HASH_NAME = "md5(md5(md5($pass.$salt1)).$salt2)";
static const u64 KERN_TYPE = 32800;
static const u32 OPTI_TYPE = OPTI_TYPE_ZERO_BYTE;
static const u64 OPTS_TYPE = OPTS_TYPE_STOCK_MODULE
| OPTS_TYPE_PT_GENERATE_LE
| OPTS_TYPE_PT_ADD80
| OPTS_TYPE_PT_ADDBITS14;
static const u32 SALT_TYPE = SALT_TYPE_GENERIC;
static const char *ST_PASS = "hashcat";
static const char *ST_HASH = "2c749af6c65cf3e82e5837e3056727f5:59331674906582121215362940957615121466283616005471:17254656838978443692786064919357750120910718779182716907569266";
u32 module_attack_exec (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSED const user_options_t *user_options, MAYBE_UNUSED const user_options_extra_t *user_options_extra) { return ATTACK_EXEC; }
u32 module_dgst_pos0 (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSED const user_options_t *user_options, MAYBE_UNUSED const user_options_extra_t *user_options_extra) { return DGST_POS0; }
u32 module_dgst_pos1 (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSED const user_options_t *user_options, MAYBE_UNUSED const user_options_extra_t *user_options_extra) { return DGST_POS1; }
u32 module_dgst_pos2 (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSED const user_options_t *user_options, MAYBE_UNUSED const user_options_extra_t *user_options_extra) { return DGST_POS2; }
u32 module_dgst_pos3 (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSED const user_options_t *user_options, MAYBE_UNUSED const user_options_extra_t *user_options_extra) { return DGST_POS3; }
u32 module_dgst_size (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSED const user_options_t *user_options, MAYBE_UNUSED const user_options_extra_t *user_options_extra) { return DGST_SIZE; }
u32 module_hash_category (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSED const user_options_t *user_options, MAYBE_UNUSED const user_options_extra_t *user_options_extra) { return HASH_CATEGORY; }
const char *module_hash_name (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSED const user_options_t *user_options, MAYBE_UNUSED const user_options_extra_t *user_options_extra) { return HASH_NAME; }
u64 module_kern_type (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSED const user_options_t *user_options, MAYBE_UNUSED const user_options_extra_t *user_options_extra) { return KERN_TYPE; }
u32 module_opti_type (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSED const user_options_t *user_options, MAYBE_UNUSED const user_options_extra_t *user_options_extra) { return OPTI_TYPE; }
u64 module_opts_type (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSED const user_options_t *user_options, MAYBE_UNUSED const user_options_extra_t *user_options_extra) { return OPTS_TYPE; }
u32 module_salt_type (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSED const user_options_t *user_options, MAYBE_UNUSED const user_options_extra_t *user_options_extra) { return SALT_TYPE; }
const char *module_st_hash (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSED const user_options_t *user_options, MAYBE_UNUSED const user_options_extra_t *user_options_extra) { return ST_HASH; }
const char *module_st_pass (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSED const user_options_t *user_options, MAYBE_UNUSED const user_options_extra_t *user_options_extra) { return ST_PASS; }
typedef struct md5_double_salt
{
u32 salt1_buf[64];
int salt1_len;
u32 salt2_buf[64];
int salt2_len;
} md5_double_salt_t;
u64 module_esalt_size (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSED const user_options_t *user_options, MAYBE_UNUSED const user_options_extra_t *user_options_extra)
{
const u64 esalt_size = (const u64) sizeof (md5_double_salt_t);
return esalt_size;
}
int module_hash_decode (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSED void *digest_buf, MAYBE_UNUSED salt_t *salt, MAYBE_UNUSED void *esalt_buf, MAYBE_UNUSED void *hook_salt_buf, MAYBE_UNUSED hashinfo_t *hash_info, const char *line_buf, MAYBE_UNUSED const int line_len)
{
u32 *digest = (u32 *) digest_buf;
md5_double_salt_t *md5_double_salt = (md5_double_salt_t *) esalt_buf;
hc_token_t token;
memset (&token, 0, sizeof (hc_token_t));
token.token_cnt = 3;
token.sep[0] = hashconfig->separator;
token.len[0] = 32;
token.attr[0] = TOKEN_ATTR_FIXED_LENGTH
| TOKEN_ATTR_VERIFY_HEX;
token.sep[1] = hashconfig->separator;
token.len_min[1] = SALT_MIN;
token.len_max[1] = SALT_MAX;
token.attr[1] = TOKEN_ATTR_VERIFY_LENGTH;
token.sep[2] = hashconfig->separator;
token.len_min[2] = SALT_MIN;
token.len_max[2] = SALT_MAX;
token.attr[2] = TOKEN_ATTR_VERIFY_LENGTH;
if (hashconfig->opts_type & OPTS_TYPE_ST_HEX)
{
token.len_min[1] *= 2;
token.len_max[1] *= 2;
token.attr[1] |= TOKEN_ATTR_VERIFY_HEX;
token.len_min[2] *= 2;
token.len_max[2] *= 2;
token.attr[2] |= TOKEN_ATTR_VERIFY_HEX;
}
const int rc_tokenizer = input_tokenizer ((const u8 *) line_buf, line_len, &token);
if (rc_tokenizer != PARSER_OK) return (rc_tokenizer);
const u8 *hash_pos = token.buf[0];
digest[0] = hex_to_u32 (hash_pos + 0);
digest[1] = hex_to_u32 (hash_pos + 8);
digest[2] = hex_to_u32 (hash_pos + 16);
digest[3] = hex_to_u32 (hash_pos + 24);
if (hashconfig->opti_type & OPTI_TYPE_OPTIMIZED_KERNEL)
{
digest[0] -= MD5M_A;
digest[1] -= MD5M_B;
digest[2] -= MD5M_C;
digest[3] -= MD5M_D;
}
const bool parse_rc1 = generic_salt_decode (hashconfig, token.buf[1], token.len[1], (u8 *) md5_double_salt->salt1_buf, &md5_double_salt->salt1_len);
if (parse_rc1 == false) return (PARSER_SALT_LENGTH);
const bool parse_rc2 = generic_salt_decode (hashconfig, token.buf[2], token.len[2], (u8 *) md5_double_salt->salt2_buf, &md5_double_salt->salt2_len);
if (parse_rc2 == false) return (PARSER_SALT_LENGTH);
// make salt sorter happy
md5_ctx_t md5_ctx;
md5_init (&md5_ctx);
md5_update (&md5_ctx, md5_double_salt->salt1_buf, md5_double_salt->salt1_len);
md5_update (&md5_ctx, md5_double_salt->salt2_buf, md5_double_salt->salt2_len);
md5_final (&md5_ctx);
salt->salt_buf[0] = md5_ctx.h[0];
salt->salt_buf[1] = md5_ctx.h[1];
salt->salt_buf[2] = md5_ctx.h[2];
salt->salt_buf[3] = md5_ctx.h[3];
salt->salt_len = 16;
return (PARSER_OK);
}
int module_hash_encode (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSED const void *digest_buf, MAYBE_UNUSED const salt_t *salt, MAYBE_UNUSED const void *esalt_buf, MAYBE_UNUSED const void *hook_salt_buf, MAYBE_UNUSED const hashinfo_t *hash_info, char *line_buf, MAYBE_UNUSED const int line_size)
{
const u32 *digest = (const u32 *) digest_buf;
const md5_double_salt_t *md5_double_salt = (const md5_double_salt_t *) esalt_buf;
// we can not change anything in the original buffer, otherwise destroying sorting
// therefore create some local buffer
u32 tmp[4];
tmp[0] = digest[0];
tmp[1] = digest[1];
tmp[2] = digest[2];
tmp[3] = digest[3];
if (hashconfig->opti_type & OPTI_TYPE_OPTIMIZED_KERNEL)
{
tmp[0] += MD5M_A;
tmp[1] += MD5M_B;
tmp[2] += MD5M_C;
tmp[3] += MD5M_D;
}
u8 *out_buf = (u8 *) line_buf;
int out_len = 0;
u32_to_hex (tmp[0], out_buf + out_len); out_len += 8;
u32_to_hex (tmp[1], out_buf + out_len); out_len += 8;
u32_to_hex (tmp[2], out_buf + out_len); out_len += 8;
u32_to_hex (tmp[3], out_buf + out_len); out_len += 8;
out_buf[out_len] = hashconfig->separator;
out_len += 1;
out_len += generic_salt_encode (hashconfig, (const u8 *) md5_double_salt->salt1_buf, md5_double_salt->salt1_len, out_buf + out_len);
out_buf[out_len] = hashconfig->separator;
out_len += 1;
out_len += generic_salt_encode (hashconfig, (const u8 *) md5_double_salt->salt2_buf, md5_double_salt->salt2_len, out_buf + out_len);
return out_len;
}
void module_init (module_ctx_t *module_ctx)
{
module_ctx->module_context_size = MODULE_CONTEXT_SIZE_CURRENT;
module_ctx->module_interface_version = MODULE_INTERFACE_VERSION_CURRENT;
module_ctx->module_attack_exec = module_attack_exec;
module_ctx->module_benchmark_esalt = MODULE_DEFAULT;
module_ctx->module_benchmark_hook_salt = MODULE_DEFAULT;
module_ctx->module_benchmark_mask = MODULE_DEFAULT;
module_ctx->module_benchmark_charset = MODULE_DEFAULT;
module_ctx->module_benchmark_salt = MODULE_DEFAULT;
module_ctx->module_bridge_name = MODULE_DEFAULT;
module_ctx->module_bridge_type = MODULE_DEFAULT;
module_ctx->module_build_plain_postprocess = MODULE_DEFAULT;
module_ctx->module_deep_comp_kernel = MODULE_DEFAULT;
module_ctx->module_deprecated_notice = MODULE_DEFAULT;
module_ctx->module_dgst_pos0 = module_dgst_pos0;
module_ctx->module_dgst_pos1 = module_dgst_pos1;
module_ctx->module_dgst_pos2 = module_dgst_pos2;
module_ctx->module_dgst_pos3 = module_dgst_pos3;
module_ctx->module_dgst_size = module_dgst_size;
module_ctx->module_dictstat_disable = MODULE_DEFAULT;
module_ctx->module_esalt_size = module_esalt_size;
module_ctx->module_extra_buffer_size = MODULE_DEFAULT;
module_ctx->module_extra_tmp_size = MODULE_DEFAULT;
module_ctx->module_extra_tuningdb_block = MODULE_DEFAULT;
module_ctx->module_forced_outfile_format = MODULE_DEFAULT;
module_ctx->module_hash_binary_count = MODULE_DEFAULT;
module_ctx->module_hash_binary_parse = MODULE_DEFAULT;
module_ctx->module_hash_binary_save = MODULE_DEFAULT;
module_ctx->module_hash_decode_postprocess = MODULE_DEFAULT;
module_ctx->module_hash_decode_potfile = MODULE_DEFAULT;
module_ctx->module_hash_decode_zero_hash = MODULE_DEFAULT;
module_ctx->module_hash_decode = module_hash_decode;
module_ctx->module_hash_encode_status = MODULE_DEFAULT;
module_ctx->module_hash_encode_potfile = MODULE_DEFAULT;
module_ctx->module_hash_encode = module_hash_encode;
module_ctx->module_hash_init_selftest = MODULE_DEFAULT;
module_ctx->module_hash_mode = MODULE_DEFAULT;
module_ctx->module_hash_category = module_hash_category;
module_ctx->module_hash_name = module_hash_name;
module_ctx->module_hashes_count_min = MODULE_DEFAULT;
module_ctx->module_hashes_count_max = MODULE_DEFAULT;
module_ctx->module_hlfmt_disable = MODULE_DEFAULT;
module_ctx->module_hook_extra_param_size = MODULE_DEFAULT;
module_ctx->module_hook_extra_param_init = MODULE_DEFAULT;
module_ctx->module_hook_extra_param_term = MODULE_DEFAULT;
module_ctx->module_hook12 = MODULE_DEFAULT;
module_ctx->module_hook23 = MODULE_DEFAULT;
module_ctx->module_hook_salt_size = MODULE_DEFAULT;
module_ctx->module_hook_size = MODULE_DEFAULT;
module_ctx->module_jit_build_options = MODULE_DEFAULT;
module_ctx->module_jit_cache_disable = MODULE_DEFAULT;
module_ctx->module_kernel_accel_max = MODULE_DEFAULT;
module_ctx->module_kernel_accel_min = MODULE_DEFAULT;
module_ctx->module_kernel_loops_max = MODULE_DEFAULT;
module_ctx->module_kernel_loops_min = MODULE_DEFAULT;
module_ctx->module_kernel_threads_max = MODULE_DEFAULT;
module_ctx->module_kernel_threads_min = MODULE_DEFAULT;
module_ctx->module_kern_type = module_kern_type;
module_ctx->module_kern_type_dynamic = MODULE_DEFAULT;
module_ctx->module_opti_type = module_opti_type;
module_ctx->module_opts_type = module_opts_type;
module_ctx->module_outfile_check_disable = MODULE_DEFAULT;
module_ctx->module_outfile_check_nocomp = MODULE_DEFAULT;
module_ctx->module_potfile_custom_check = MODULE_DEFAULT;
module_ctx->module_potfile_disable = MODULE_DEFAULT;
module_ctx->module_potfile_keep_all_hashes = MODULE_DEFAULT;
module_ctx->module_pwdump_column = MODULE_DEFAULT;
module_ctx->module_pw_max = MODULE_DEFAULT;
module_ctx->module_pw_min = MODULE_DEFAULT;
module_ctx->module_salt_max = MODULE_DEFAULT;
module_ctx->module_salt_min = MODULE_DEFAULT;
module_ctx->module_salt_type = module_salt_type;
module_ctx->module_separator = MODULE_DEFAULT;
module_ctx->module_st_hash = module_st_hash;
module_ctx->module_st_pass = module_st_pass;
module_ctx->module_tmp_size = MODULE_DEFAULT;
module_ctx->module_unstable_warning = MODULE_DEFAULT;
module_ctx->module_warmup_disable = MODULE_DEFAULT;
}

View File

@ -35,6 +35,8 @@ static const char *const ST_0013 = "Error";
static const char *const ST_0014 = "Aborted (Finish)";
static const char *const ST_0015 = "Running (Quit after attack requested)";
static const char *const ST_0016 = "Autodetect";
static const char *const ST_0017 = "Paused (Checkpoint Quit requested)";
static const char *const ST_0018 = "Paused (Quit after attack requested)";
static const char *const ST_9999 = "Unknown! Bug!";
static const char UNITS[7] = { ' ', 'k', 'M', 'G', 'T', 'P', 'E' };
@ -262,8 +264,6 @@ const char *status_get_status_string (const hashcat_ctx_t *hashcat_ctx)
const int devices_status = status_ctx->devices_status;
// special case: running but checkpoint quit requested
if (devices_status == STATUS_RUNNING)
{
if (status_ctx->checkpoint_shutdown == true)
@ -276,6 +276,18 @@ const char *status_get_status_string (const hashcat_ctx_t *hashcat_ctx)
return ST_0015;
}
}
else if (devices_status == STATUS_PAUSED)
{
if (status_ctx->checkpoint_shutdown == true)
{
return ST_0017;
}
if (status_ctx->finish_shutdown == true)
{
return ST_0018;
}
}
switch (devices_status)
{

View File

@ -91,6 +91,13 @@ int straight_ctx_update_loop (hashcat_ctx_t *hashcat_ctx)
return -1;
}
if (rc == -2)
{
event_log_error (hashcat_ctx, "Error reading wordlist: %s", straight_ctx->dict);
return -1;
}
if (status_ctx->words_cnt == 0)
{
logfile_sub_msg ("STOP");
@ -125,6 +132,13 @@ int straight_ctx_update_loop (hashcat_ctx_t *hashcat_ctx)
return -1;
}
if (rc == -2)
{
event_log_error (hashcat_ctx, "Error reading wordlist: %s", combinator_ctx->dict1);
return -1;
}
}
else if (combinator_ctx->combs_mode == COMBINATOR_MODE_BASE_RIGHT)
{
@ -147,6 +161,13 @@ int straight_ctx_update_loop (hashcat_ctx_t *hashcat_ctx)
return -1;
}
if (rc == -2)
{
event_log_error (hashcat_ctx, "Error reading wordlist: %s", combinator_ctx->dict2);
return -1;
}
}
if (status_ctx->words_cnt == 0)
@ -194,6 +215,13 @@ int straight_ctx_update_loop (hashcat_ctx_t *hashcat_ctx)
return -1;
}
if (rc == -2)
{
event_log_error (hashcat_ctx, "Error reading wordlist: %s", straight_ctx->dict);
return -1;
}
if (status_ctx->words_cnt == 0)
{
logfile_sub_msg ("STOP");
@ -234,6 +262,13 @@ int straight_ctx_update_loop (hashcat_ctx_t *hashcat_ctx)
return -1;
}
if (rc == -2)
{
event_log_error (hashcat_ctx, "Error reading wordlist: %s", straight_ctx->dict);
return -1;
}
if ((status_ctx->words_cnt / straight_ctx->kernel_rules_cnt) != hashes->salts_cnt)
{
event_log_error (hashcat_ctx, "Number of words in wordlist '%s' is not in sync with number of unique salts", straight_ctx->dict);

View File

@ -1253,7 +1253,7 @@ void backend_info (hashcat_ctx_t *hashcat_ctx)
printf ("\"SystemInfo\": { ");
}
#if defined (_WIN) || defined (__CYGWIN__) || defined (__MSYS__)
#if defined (_WIN)
// Get Windows system information
SYSTEM_INFO sysinfo;
OSVERSIONINFO osvi;
@ -1311,6 +1311,7 @@ void backend_info (hashcat_ctx_t *hashcat_ctx)
printf ("\"Model\": \"%s\" } ", "N/A");
printf ("}, ");
}
#else
struct utsname utsbuf;
@ -1320,7 +1321,7 @@ void backend_info (hashcat_ctx_t *hashcat_ctx)
char *hw_model_buf = NULL;
#if !defined (__linux__)
#if !defined (__linux__) && !defined (__CYGWIN__) && !defined (__MSYS__)
size_t hw_model_len = 0;
@ -2830,7 +2831,7 @@ void status_display_status_json (hashcat_ctx_t *hashcat_ctx)
printf (",");
}
printf (" { \"device_id\": %02u,", device_id + 1);
printf (" { \"device_id\": %u,", device_id + 1);
char *device_name_json_encoded = (char *) hcmalloc (strlen (device_info->device_name) * 2);

View File

@ -214,8 +214,6 @@ int bypass (hashcat_ctx_t *hashcat_ctx)
{
status_ctx_t *status_ctx = hashcat_ctx->status_ctx;
if (status_ctx->devices_status != STATUS_RUNNING) return -1;
status_ctx->devices_status = STATUS_BYPASS;
status_ctx->run_main_level1 = true;
@ -262,8 +260,6 @@ int stop_at_checkpoint (hashcat_ctx_t *hashcat_ctx)
{
status_ctx_t *status_ctx = hashcat_ctx->status_ctx;
if (status_ctx->devices_status != STATUS_RUNNING) return -1;
// this feature only makes sense if --restore-disable was not specified
restore_ctx_t *restore_ctx = hashcat_ctx->restore_ctx;
@ -305,8 +301,6 @@ int finish_after_attack (hashcat_ctx_t *hashcat_ctx)
{
status_ctx_t *status_ctx = hashcat_ctx->status_ctx;
if (status_ctx->devices_status != STATUS_RUNNING) return -1;
// Enable or Disable
if (status_ctx->finish_shutdown == false)

View File

@ -1301,6 +1301,11 @@ int user_options_sanity (hashcat_ctx_t *hashcat_ctx)
}
}
if (user_options->benchmark_all == true)
{
user_options->benchmark = true;
}
if (user_options->benchmark == true)
{
// sanity checks based on automatically overwritten configuration variables by

View File

@ -60,6 +60,11 @@ int load_segment (hashcat_ctx_t *hashcat_ctx, HCFILE *fp)
wl_data->cnt = hc_fread (wl_data->buf, 1, wl_data->incr - 1000, fp);
if (wl_data->cnt == (size_t) -1)
{
return -1;
}
wl_data->buf[wl_data->cnt] = 0;
if (wl_data->cnt == 0) return 0;
@ -339,7 +344,12 @@ void get_next_word (hashcat_ctx_t *hashcat_ctx, HCFILE *fp, char **out_buf, u32
return;
}
load_segment (hashcat_ctx, fp);
if (load_segment (hashcat_ctx, fp) == -1)
{
event_log_error (hashcat_ctx, "Error reading file!\n");
return;
}
get_next_word (hashcat_ctx, fp, out_buf, out_len);
}
@ -559,9 +569,12 @@ int count_words (hashcat_ctx_t *hashcat_ctx, HCFILE *fp, const char *dictfile, u
u64 cnt2 = 0;
while (!hc_feof (fp))
{
load_segment (hashcat_ctx, fp);
{
if (load_segment (hashcat_ctx, fp) == -1)
{
return -2;
}
comp += wl_data->cnt;
u64 i = 0;

View File

@ -0,0 +1 @@
{"data":"G2Qsp8BtQNz9E1vFlSebykcGkps5iJ53fBjJV8GIE7eolAQpz5m25MTZv0b0t2tpqXvfgEYKYZKsoLvuWWRHqj2ommt+U0l6fEGPY6lTn5PYzDtMKNpPWHKCIS/QYhwEA9/X5RtyIwDL6VqcRp6owV+/icmRlQa+TI5buHWZ+99Z1kBbpTDVYLBwhMRsODM1vYizQDg0vFIo3cQDtpRWUqpAKXhFcpgRD+9grS3pP/zdlIUn//87DZ3ue6Sn6WFOe08EvuY8sYZqTiN3GxcfESOltNbZJGcedMubt/jGsk+qIwUWC/f456UgeX9DN7i2pQBBI+L7qkY5v1WT5Y0i8uho1c2M2G8M9miO8HSm/j4bpMN1J6lPtjFhfAzEPS1go1w2vTmOtr1y+2A4M6HEOcxIrPJ8lUUH9pcN7Xpd+u/tQv8BYFxd6RlNYKLyA6OChbF+TD5Zz6oFZQtkprXqzZUFOlxeWJ373kHMISZtXOx44YGaiT2178fXgHFXavw=","iv":"N1aDRjt2ZD5x15Q1X9zVUw==","keyMetadata":{"algorithm":"PBKDF2","params":{"iterations":600000}},"salt":"MBdUsmTcBHGCASECYr3gmD8XaJROwjhOegSWweCFhco="}

432
tools/apfs2hashcat.py Normal file
View File

@ -0,0 +1,432 @@
#!/usr/bin/env python3
# For extracting APFS hashes to be cracked by hashcat modes 18300 ($fvde$2$) or 16700 ($fvde$1$).
# Usage: `python3 apfs2hashcat.py <apfs_image_file> -o <_apfs_container_offset>`
# The argument -o is optional. The script will attempt to read the partition table to find the location of APFS container(s). In the case that the partition table is missing or you want to specify a particular APFS container, use -o to provide the offset to the start of the container.
import argparse
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
KNOWN_RECOVERY_HASHES = ['64C0C6EB-0000-AA11-AA11-00306543ECAC', 'D92A1CEC-18B6-D64E-BD8D-50F361C27507']
TAG_DICT = {'unk_80' : {'tag' : b'\x80', 'expected_len' : 1},
'uuid' : {'tag' : b'\x81', 'expected_len' : 0x10},
'unk_82' : {'tag' : b'\x82'},
'wrapped_kek' : {'tag' : b'\x83', 'expected_len' : 0x28},
'iterations' : {'tag' : b'\x84'},
'salt' : {'tag' : b'\x85', 'expected_len' : 0x10}}
HEX_APFS_CONTAINER_GUID = '7C3457EF-0000-11AA-AA11-00306543ECAC'
AES_XTS_SECTOR_SIZE = 512
EFI_PARTITION_HEADER = b'EFI PART'
def uint_to_int(b):
return int(b[::-1].hex(), 16)
def findall(p, s):
i = s.find(p)
while i != -1:
yield i
i = s.find(p, i+1)
def hex_to_guid(hex_str):
guid_parts = [0] * 5
guid_parts[0] = hex_str[0:8]
guid_parts[1] = hex_str[8:12]
guid_parts[2] = hex_str[12:16]
guid_parts[3] = hex_str[16:20]
guid_parts[4] = hex_str[20:]
guid = ''.join([guid_parts[0][i:i+2] for i in range(0, len(guid_parts[0]), 2)][::-1])
guid += '-'
guid += ''.join([guid_parts[1][i:i+2] for i in range(0, len(guid_parts[1]), 2)][::-1])
guid += '-'
guid += ''.join([guid_parts[2][i:i+2] for i in range(0, len(guid_parts[2]), 2)][::-1])
guid += '-'
guid += guid_parts[3]
guid += '-'
guid += guid_parts[4]
return guid.upper()
def parse_partition_entry(partition_entry):
type_GUID = partition_entry[0:0x10]
part_GUID = partition_entry[0x10:0x20]
start_LBA = partition_entry[0x20:0x28]
# end_LBA = partition_entry[0x28:0x30]
return part_GUID, type_GUID, start_LBA
# get main_start by multiplying apfs partition start lba by block size
def parse_partition_table(fp):
# determine whether sector size is 0x200 or 0x1000
sector_size = 0x0
# look for EFI PART at start of sector 1
fp.seek(0x200)
signature = fp.read(0x8)
if signature == EFI_PARTITION_HEADER:
sector_size = 0x200
else:
fp.seek(0x1000)
signature = fp.read(0x8)
if signature == EFI_PARTITION_HEADER:
sector_size = 0x1000
print("[+] Identified sector size:", sector_size)
if not sector_size:
print(f"[!] Invalid sector size {sector_size} (not 512 or 4096 bytes). Exiting.")
fp.seek(2 * sector_size) # go to sector 2
partitions = []
partition_entry = b'1'
while any(partition_entry):
partition_entry = fp.read(0x80)
if any(partition_entry):
partitions.append(partition_entry)
partition_dict = {}
for p in partitions:
part_GUID, type_GUID, start = parse_partition_entry(p)
starting_pos = uint_to_int(start) * sector_size
partition_dict[part_GUID.hex()] = {'start':starting_pos, 'partition_type':type_GUID.hex()}
return partition_dict
def AES_XTS_decrypt_sector(uuid, tweak, ct):
decryptor = Cipher(
algorithms.AES(key=uuid+uuid),
modes.XTS(tweak=tweak),
).decryptor()
pt = decryptor.update(ct) + decryptor.finalize()
return pt
def AES_decrypt(data, start_offset, block_size, uuid):
cs_factor = block_size // 0x200 # = 8 for block_size=4096
uno = start_offset * cs_factor
pt = b''
for offset in range(0, block_size, AES_XTS_SECTOR_SIZE):
ct = data[offset:offset + AES_XTS_SECTOR_SIZE]
tweak = hex(uno)[2:].zfill(32) # 32 so that the key is the correct length (16 bytes)
tweak = bytearray.fromhex(tweak)[::-1]
pt += AES_XTS_decrypt_sector(uuid, tweak, ct)
uno += 1
return pt
def TLV(full_kek_blob, tag, starting_index):
# expected tag should follow if this is the correct TLV)
if full_kek_blob[starting_index:starting_index+1] != TAG_DICT[tag]['tag']:
return -1, starting_index
# check for expected len for further confirmation
length = uint_to_int(full_kek_blob[starting_index+1:starting_index+2])
expected_len = TAG_DICT[tag].get('expected_len') # use .get() since not all tags have an expected len
if expected_len:
if length != expected_len:
return -1, starting_index
next_starting_index = starting_index+2+length
value = full_kek_blob[starting_index+2:next_starting_index]
return value, next_starting_index
def TLV_iterate(starting_index, pt, hash_set, volume_uuid):
for tag in TAG_DICT:
value, starting_index = TLV(pt, tag, starting_index)
# i.e. if fails length check
if value == -1:
return starting_index + 1, hash_set
TAG_DICT[tag]['value'] = value
aes_type = TAG_DICT['unk_82']['value']
wrapped_kek = TAG_DICT['wrapped_kek']['value']
iterations = TAG_DICT['iterations']['value']
salt = TAG_DICT['salt']['value']
aes_type = uint_to_int(aes_type[0:4])
# FVDE - AES128
if aes_type == 2:
aes_hash_value = 1
wrapped_kek = wrapped_kek[:0x18] # shorter kek value, this removes zeros
# APFS - AES256
elif aes_type == 16 or aes_type == 0:
aes_hash_value = 2
else:
print("[!] AES type not recognised, continuing...")
return
password_hash = f"$fvde${aes_hash_value}${len(salt)}${salt.hex()}${int(iterations.hex(),16)}${wrapped_kek.hex()}"
hash_set.add(password_hash)
print(f"\nFound password hash: {password_hash} (vol uuid: {volume_uuid.hex()})")
kek_uuid = hex_to_guid(TAG_DICT['uuid']['value'].hex())
if kek_uuid in KNOWN_RECOVERY_HASHES:
print(f"[!] Warning! Recognised UUID {kek_uuid}... possible recovery hash\n")
return starting_index, hash_set
def parse_block(block):
nx_xid = uint_to_int(block[16:24])
obj_type = uint_to_int(block[24:26])
magic = block[0x20:0x24]
return nx_xid, obj_type, magic
def parse_apsb_block(block):
obj_type = uint_to_int(block[24:26])
magic = block[0x20:0x24]
uuid = block[240:256]
encryption = uint_to_int(block[264:272])
name = block[704:960]
return obj_type, magic, uuid, encryption, name
def parse_keybag_entry(uuid, pt):
uuid_iterator = findall(uuid, pt)
for starting_pos in uuid_iterator:
ke_uuid, ke_tag, ke_keylen = pt[starting_pos:starting_pos+16], uint_to_int(pt[starting_pos + 16:starting_pos + 18]), uint_to_int(pt[starting_pos + 18:starting_pos + 20])
padding = pt[starting_pos + 20:starting_pos + 24]
keydata = pt[starting_pos + 24: starting_pos + 24 + ke_keylen]
# only tag 3 is needed for constructing the hash
if ke_tag == 3:
assert padding == b'\x00\x00\x00\x00'
volume_unlock_record = keydata
return volume_unlock_record
return None
def get_fs_oids(csb_body):
max_file_systems = uint_to_int(csb_body[0x94:0x98])
fs_oids = set()
for fs_entry in range(max_file_systems):
oid_start = 0x98 + 8 * fs_entry
fs_oid = uint_to_int(csb_body[oid_start:oid_start + 8])
if not fs_oid:
continue
fs_oids.add(fs_oid)
return fs_oids
def parse_csb(csb):
csb_body = csb[0x20:0x568]
header = csb_body[:4] # 'NXSB'
assert header == b'NXSB'
block_size = uint_to_int(csb_body[4:8]) # default is 4096
uuid = csb_body[0x28:0x38] # used as key for unwrapping
omap_oid = uint_to_int(csb_body[0x80:0x88]) # omap_oid to locate the omap to find volume offsets
fs_oids = get_fs_oids(csb_body)
# locate container's keybag using nx_keylocker field
keylocker_paddr = uint_to_int(csb_body[0x4f0:0x4f8])
# block info for iterating to find most recent csb
xp_desc_blocks = uint_to_int(csb_body[0x48:0x4b])
xp_desc_base = uint_to_int(csb_body[0x50:0x54])
return block_size, uuid, keylocker_paddr, omap_oid, fs_oids, xp_desc_base, xp_desc_blocks
def get_offset_from_oid(oid, apfs_start, block_size):
return apfs_start + oid * block_size
def parse_tree(tree, fs_oids, block_size):
volume_addresses = []
# get key data from TOC:
table_space_offset = uint_to_int(tree[0x28:0x2a])
table_space_len = uint_to_int(tree[0x2a:0x2c])
start_of_key_area = table_space_offset + table_space_len + 0x38 # 0x38 = header + entries
# b-tree structure is header (0x20 bytes) -> ToC -> keys -> free space -> values -> btree_info (0x28 bytes)
end_of_value_area = block_size - 0x28
tree_data = tree[0x38:]
for m in range(len(fs_oids)):
data_start = m * 4
key_offset = uint_to_int(tree_data[data_start:data_start + 2]) # key offset is from the start of the key area downwards
data_offset = uint_to_int(tree_data[data_start + 2:data_start + 4]) # data offset is from the end of the data area upwards
# get to key area
key_start = key_offset + start_of_key_area
key_oid = uint_to_int(tree[key_start:key_start + 0x8])
if key_oid not in fs_oids:
print(f"Found key_oid {key_oid} in omap but not present in fs map. Skipping this volume")
else:
val_end = end_of_value_area - data_offset
data_paddr = uint_to_int(tree[val_end + 0x8:val_end + 0x10])
volume_addresses.append(data_paddr)
return volume_addresses
def get_volumes(fp, block_size, apfs_start, tree, fs_oids):
volume_addresses = parse_tree(tree, fs_oids, block_size)
volumes_dict = dict()
for v in volume_addresses:
fp.seek(apfs_start + block_size * v)
block_start = fp.read(block_size)
obj_type, magic, uuid, encryption, name = parse_apsb_block(block_start)
if obj_type == 13 and magic == b'APSB':
volumes_dict[uuid] = {'start':v, 'name':name}
print()
print("[+] The following volumes are present:")
for u in volumes_dict:
print(f"{u.hex()} ({volumes_dict[u]['name'].decode()}) at {hex(volumes_dict[u]['start'] * block_size + apfs_start)}")
return volumes_dict
def decrypt_volume_keybag(fp, volume_keybag_addr, block_size, apfs_struct_start, volume_uuid):
volume_keybag_addr = volume_keybag_addr[:4].hex().zfill(8)
volume_keybag_addr = bytearray.fromhex(volume_keybag_addr)[::-1]
volume_keybag_addr = int(volume_keybag_addr.hex(),16)
offset = block_size * volume_keybag_addr + apfs_struct_start
fp.seek(offset)
encrypted_keybag = fp.read(block_size)
pt = AES_decrypt(encrypted_keybag, volume_keybag_addr, block_size, volume_uuid)
return pt
def get_apfs_containers(fp):
partition_dict = parse_partition_table(fp)
apfs_containers = []
for d in partition_dict:
if hex_to_guid(partition_dict[d]['partition_type']) == HEX_APFS_CONTAINER_GUID:
apfs_containers.append(partition_dict[d]['start'])
return apfs_containers
def get_tree(fp, omap_oid, apfs_struct_start, block_size):
omap_offset = get_offset_from_oid(omap_oid, apfs_struct_start, block_size)
fp.seek(omap_offset + 0x30) # location for tree_oid
tree_oid = fp.read(0x10)
tree_oid = uint_to_int(tree_oid)
tree_offset = get_offset_from_oid(tree_oid, apfs_struct_start, block_size)
fp.seek(tree_offset)
tree = fp.read(0x1000)
return tree
def get_container_keybag(fp, apfs_struct_start, block_size, keylocker_paddr):
# calculate offset to read from
offs = block_size * keylocker_paddr + apfs_struct_start
fp.seek(offs)
data = fp.read(block_size)
return data
def find_valid_csb(fp, block_size, xp_desc_base, xp_desc_blocks, apfs_start):
max_xid = 0
max_xid_paddr = 0
for paddr in range(xp_desc_base, xp_desc_base + xp_desc_blocks):
offs = block_size * paddr + apfs_start
fp.seek(offs + 0x10)
csb_xid = uint_to_int(fp.read(0x8))
if csb_xid >= max_xid:
max_xid = csb_xid
max_xid_paddr = paddr
print(f"[+] Found valid csb with xid {max_xid} at {hex(max_xid_paddr)}")
return max_xid_paddr
def main():
p = argparse.ArgumentParser()
p.add_argument('filename')
p.add_argument('-o', '--offset', help='[OPTIONAL] offset for APFS volume - may be necessary if partition table is not present')
args = p.parse_args()
filename = args.filename
with open(filename, 'rb') as fp:
if args.offset:
apfs_offset = int(args.offset)
apfs_containers = [apfs_offset]
else:
apfs_containers = get_apfs_containers(fp)
if apfs_containers == []:
print("[!] APFS volume GUID not found, exiting.")
exit()
for apfs_struct_start in apfs_containers:
print(f"[+] APFS container starts at {hex(apfs_struct_start)}")
fp.seek(apfs_struct_start)
csb = fp.read(0x568)
# read the first csb for initial info - then use this to iterate through all csbs and find the most recent one
block_size, uuid, keylocker_paddr, omap_oid, fs_oids, xp_desc_base, xp_desc_blocks = parse_csb(csb)
valid_csb_paddr = find_valid_csb(fp, block_size, xp_desc_base, xp_desc_blocks, apfs_struct_start)
fp.seek(valid_csb_paddr * block_size + apfs_struct_start)
valid_csb = fp.read(block_size)
block_size, uuid, keylocker_paddr, omap_oid, fs_oids, xp_desc_base, xp_desc_blocks = parse_csb(valid_csb)
encrypted_keybag = get_container_keybag(fp, apfs_struct_start, block_size, keylocker_paddr)
# Unwrap container keybag using AES-XTS with container UUID as key
starting_pt = AES_decrypt(encrypted_keybag, keylocker_paddr, block_size, uuid)
# find all volumes to iterate through
tree = get_tree(fp, omap_oid, apfs_struct_start, block_size)
volumes_dict = get_volumes(fp, block_size, apfs_struct_start, tree, fs_oids)
hash_set = set()
for volume_uuid in volumes_dict:
# find entry in container's keybag matching volume UUID and has KB_TAG_VOLUME_UNLOCK_RECORDS = 3. Its keydata is location of volume keybag.
volume_keybag_addr = parse_keybag_entry(volume_uuid, starting_pt)
# continue if encrypted keybag not found
if not volume_keybag_addr:
continue
# unwrap volume keybag using volume uuid AES-XTS
pt = decrypt_volume_keybag(fp, volume_keybag_addr, block_size, apfs_struct_start, volume_uuid)
# parse TLV for 80 first
index_iterator = findall(TAG_DICT['unk_80']['tag'], pt)
for starting_index in index_iterator:
starting_index, hash_set = TLV_iterate(starting_index, pt, hash_set, volume_uuid)
print()
print("[+] All hashes found.")
return
if __name__ == "__main__":
main()

233
tools/bitlocker2hashcat.py Normal file
View File

@ -0,0 +1,233 @@
# Construct a hash for use with hashcat mode 22100
# Usage: python3 bitlocker2hashcat.py <bitlocker_image> -o <bitlocker_partition_offset>
# Hashcat supports modes $bitlocker$0$ and $bitlocker$1$ and therefore this script will output hashes that relate to a VMK protected by a user password only.
# It is not possible to create a hash for VMKs protected by a TPM, and is infeasible to attempt to crack a hash of the recovery password.
# Refs: https://github.com/libyal/libbde/blob/main/documentation/BitLocker%20Drive%20Encryption%20(BDE)%20format.asciidoc#encryption_methods
import argparse
BITLOCKER_SIGNATURE = '-FVE-FS-'
BITLOCKER_TO_GO_SIGNATURE = 'MSWIN4.1'
BITLOCKER_GUIDS = {'4967D63B-2E29-4AD8-8399-F6A339E3D001' : 'BitLocker', '4967D63B-2E29-4AD8-8399-F6A339E3D01' : 'BitLocker To Go', '92A84D3B-DD80-4D0E-9E4E-B1E3284EAED8' : 'BitLocker Used Disk Space Only'}
PROTECTION_TYPES = {0x0: 'VMK protected with clear key', 0x100: 'VMK protected with TPM', 0x200: 'VMK protected with startup key', 0x500: 'VMK protected with TPM and PIN', 0x800: 'VMK protected with recovery password', 0x2000: 'VMK protected with password'}
FVE_ENTRY_TYPES = {0x0: 'None', 0x2: 'VMK', 0x3: 'FVEK', 0x4: 'Validation', 0x6: 'Startup key', 0x7: 'Computer description', 0xb: 'FVEK backup', 0xf: 'Volume header block'}
FVE_VALUE_TYPES = {0x0: 'Erased', 0x1: 'Key', 0x2: 'UTF-16 string', 0x3: 'Stretch key', 0x4: 'Use key', 0x5: 'AES-CCM encrypted key', 0x6: 'TPM encoded key', 0x7: 'Validation', 0x8: 'VMK', 0x9: 'External key', 0xa: 'Update', 0xb: 'Error', 0xf: 'Offset and size'}
ITERATION_COUNT = 0x100000
BITLOCKER_HASH_VERSIONS = [0,1] # 0,1 both supported on hashcat
HASHCAT_HASH = []
def guid_to_hex(guid):
guid_parts = guid.split('-')
search_target = ''.join([guid_parts[0][i:i+2] for i in range(0, len(guid_parts[0]), 2)][::-1])
search_target += ''.join([guid_parts[1][i:i+2] for i in range(0, len(guid_parts[1]), 2)][::-1])
search_target += ''.join([guid_parts[2][i:i+2] for i in range(0, len(guid_parts[2]), 2)][::-1])
search_target += guid_parts[3]
search_target += guid_parts[4]
return search_target
def hex_to_guid(hex_str):
guid_parts = [
hex_str[0:8],
hex_str[8:12],
hex_str[12:16],
hex_str[16:20],
hex_str[20:],
]
guid = ''.join([guid_parts[0][i:i+2] for i in range(0, len(guid_parts[0]), 2)][::-1])
guid += '-'
guid += ''.join([guid_parts[1][i:i+2] for i in range(0, len(guid_parts[1]), 2)][::-1])
guid += '-'
guid += ''.join([guid_parts[2][i:i+2] for i in range(0, len(guid_parts[2]), 2)][::-1])
guid += '-'
guid += guid_parts[3]
guid += '-'
guid += guid_parts[4]
return guid.upper()
def uint_to_int(b):
return int(b[::-1].hex(), 16)
def parse_FVEK(fvek_data):
print("\nParsing FVEK...")
nonce = fvek_data[:12]
mac = fvek_data[12:28]
enc_data = fvek_data[28:]
print("Mac:", mac.hex())
print("Nonce:", nonce.hex())
print("Encrypted data:", enc_data.hex())
return nonce, mac, enc_data
def parse_stretch_key(data):
print("\nParsing stretch key...")
encryption_method = hex(uint_to_int(data[0:4]))
salt = data[4:20]
print("Encryption method:", encryption_method)
print("Salt:", salt.hex())
current_pos = 0
aes_ccm_data = data[20:]
current_pos, data, value_type = parse_fve_metadata_entry(current_pos, aes_ccm_data)
nonce, mac, enc_data = parse_aes_ccm_encrypted_key(data)
return salt, nonce, mac, enc_data
def generate_hashcat_hash(salt, nonce, mac, enc_data):
print("\nFound hashcat hash!")
for version in BITLOCKER_HASH_VERSIONS:
generated_hash = f"$bitlocker${version}${len(salt)}${salt.hex()}${ITERATION_COUNT}${len(nonce)}${nonce.hex()}${len(mac + enc_data)}${(mac + enc_data).hex()}"
print(generated_hash)
HASHCAT_HASH.append(generated_hash)
def parse_aes_ccm_encrypted_key(data):
print("Parsing AES CCM key...")
nonce, mac, enc_data = parse_FVEK(data)
return nonce, mac, enc_data
def parse_description(data):
print("\nParsing description...")
print(f"Info: {data.decode('utf-16')}")
return
def parse_volume_header_block(data):
print("\nParsing volume header block...")
block_offset = uint_to_int(data[0:8])
block_size = uint_to_int(data[8:16])
print(f"Block offset: {hex(block_offset)}")
print(f"Block size: {block_size}")
def parse_VMK(VMK_data):
print("\nParsing VMK...")
guid = hex_to_guid(VMK_data[:16].hex())
protection_type = uint_to_int(VMK_data[26:28])
properties = VMK_data[28:]
print("GUID:", guid)
print(f"Protection type: {hex(protection_type)} = {PROTECTION_TYPES.get(protection_type)}")
# only try parse properties if correct protection type
if protection_type == 0x2000:
current_pos = 28
while current_pos < len(properties):
current_pos, data, value_type = parse_fve_metadata_entry(current_pos, VMK_data[current_pos:])
if value_type == 0x3:
salt, strech_nonce, stretch_mac, stretch_enc_data = parse_stretch_key(data)
if value_type == 0x5:
nonce, mac, enc_data = parse_aes_ccm_encrypted_key(data)
generate_hashcat_hash(salt, nonce, mac, enc_data)
return
def parse_fve_metadata_block(block):
print('\nParsing FVE block...')
signature = block[0:8]
fve_metadata_header = block[64:64+48]
metadata_size = parse_fve_metadata_header(fve_metadata_header)
entry_size = uint_to_int(block[112:114])
current_pos = 112
while current_pos < metadata_size:
current_pos, data, value_type = parse_fve_metadata_entry(current_pos, block[current_pos:current_pos+entry_size])
if value_type == 0x2:
parse_description(data)
if value_type == 0x5:
parse_aes_ccm_encrypted_key(data)
if value_type == 0x8:
parse_VMK(data)
if value_type == 0xf:
parse_volume_header_block(data)
try:
entry_size = uint_to_int(block[current_pos:current_pos+2])
except:
return
def parse_fve_metadata_entry(current_pos, block):
print("\nParsing FVE metadata entry...")
entry_size = uint_to_int(block[0:2])
entry_type = uint_to_int(block[2:4])
value_type = uint_to_int(block[4:6])
version = hex(uint_to_int(block[6:8]))
data = block[8:entry_size]
print(f"Entry size: {entry_size}")
print(f"Entry type: {hex(entry_type)} = {FVE_ENTRY_TYPES.get(entry_type)}")
print(f"Value type: {hex(value_type)} = {FVE_VALUE_TYPES.get(value_type)}")
current_pos = current_pos + entry_size
return current_pos, data, value_type
def parse_fve_metadata_header(block):
print("\nParsing FVE metadata header...")
metadata_size = uint_to_int(block[0:4])
volume_guid = hex_to_guid(block[16:32].hex())
nonce_counter = uint_to_int(block[32:36])
encryption_method = hex(uint_to_int(block[36:40]))
print("Metadata size:", metadata_size)
print("Volume GUID:", volume_guid)
print("Encryption method:", encryption_method)
return metadata_size
def main():
p = argparse.ArgumentParser()
p.add_argument('image_path', help="Path to encrypted BitLocker image")
p.add_argument('-o', '--offset', default=0, type=int, help='Offset in image where BitLocker partition starts')
args = p.parse_args()
bitlocker_partition = args.image_path
bitlocker_offset = args.offset
with open(bitlocker_partition, 'rb') as fp:
fp.seek(bitlocker_offset)
boot_entry_point = fp.read(3)
header = fp.read(8)
if header.decode('latin-1') not in [BITLOCKER_SIGNATURE, BITLOCKER_TO_GO_SIGNATURE]:
print("[!] Supplied image path is not a BitLocker partition. Try specifiying the offset of the BitLocker partition with -o")
exit()
print(f'[+] BitLocker signature found: {header.decode()}')
sector_size = uint_to_int(fp.read(2))
if header.decode('latin-1') == BITLOCKER_SIGNATURE:
guid_offset = 0xa0
if header.decode('latin-1') == BITLOCKER_TO_GO_SIGNATURE:
guid_offset = 0x1a8
fp.seek(guid_offset + bitlocker_offset)
volume_guid = fp.read(16)
print(f'[+] Identified volume GUID: {hex_to_guid(volume_guid.hex())} = {BITLOCKER_GUIDS.get(hex_to_guid(volume_guid.hex()))}')
# get FVE metadata block addresses
FVE_metadata_offsets = [hex(uint_to_int(fp.read(8)) + bitlocker_offset) for _ in range(3)]
print(f'[+] FVE metadata info found at offsets {FVE_metadata_offsets}')
# all metadata blocks should be the same
for f in FVE_metadata_offsets:
fp.seek(int(f, 16))
FVE_metadata_block = fp.read(2048)
parse_fve_metadata_block(FVE_metadata_block)
break
if HASHCAT_HASH == []:
print("\nNo hashes associated with the user password found. Exiting...")
else:
print("\nThe following hashcat hashes were found:")
for bitlocker_hash in HASHCAT_HASH:
print(bitlocker_hash)
return
if __name__ == "__main__":
main()

75
tools/gitea2hashcat.py Executable file
View File

@ -0,0 +1,75 @@
#!/usr/bin/python3
# Converts gitea PBKDF2-HMAC-SHA256 hashes into a format hashcat can use
# written by unix-ninja
import argparse
import base64
import sys
def convert_hash(hash_string):
"""Converts a SALT+HASH string to a hashcat compatible format,
ensuring the smaller input is treated as the salt.
Use : or | as delimeters.
"""
hash_string = hash_string.replace('|', ':')
try:
part1, part2 = hash_string.split(":")
except ValueError:
print(f"[-] Invalid input format: {hash_string}")
return None
try:
bytes1 = bytes.fromhex(part1)
bytes2 = bytes.fromhex(part2)
except ValueError:
print(f"[-] Invalid hex input: {hash_string}")
return None
# If lengths are equal, we will maintain the original order
if len(bytes1) > len(bytes2):
salt_bytes = bytes2
hash_bytes = bytes1
else:
salt_bytes = bytes1
hash_bytes = bytes2
salt_b64 = base64.b64encode(salt_bytes).decode('utf-8')
hash_b64 = base64.b64encode(hash_bytes).decode('utf-8')
return f"sha256:50000:{salt_b64}:{hash_b64}"
def main():
parser = argparse.ArgumentParser(description="Convert Gitea SALT+HASH strings to a hashcat-compatible format.",
formatter_class=argparse.RawTextHelpFormatter,
epilog="""Example:
gitea2hashcat.py <salt1>:<hash1> <hash2>|<salt2> ... or pipe input from stdin.
You can also dump output straight from sqlite into this script:
sqlite3 gitea.db 'select salt,passwd from user;' | gitea2hashcat.py""")
parser.add_argument('hashes', nargs='*', help='SALT+HASH strings to convert')
args = parser.parse_args()
# ... (rest of the main function remains the same)
print("[+] Run the output hashes through hashcat mode 10900 (PBKDF2-HMAC-SHA256)")
print()
if args.hashes:
# Process command-line arguments
for hash_string in args.hashes:
converted_hash = convert_hash(hash_string)
if converted_hash:
print(converted_hash)
else:
# Process input from stdin
for line in sys.stdin:
hash_string = line.strip() # Remove leading/trailing whitespace
converted_hash = convert_hash(hash_string)
if converted_hash:
print(converted_hash)
if __name__ == "__main__":
main()

Some files were not shown because too many files have changed in this diff Show More