Metal: added support for vectors up to 4

pull/3166/head
Gabriele Gristina 2 years ago
parent 4392da95c9
commit b3d3b31c3e

@ -601,9 +601,9 @@ DECLSPEC void blake2b_transform_vector (PRIVATE_AS u64x *h, PRIVATE_AS const u64
v[ 9] = BLAKE2B_IV_01;
v[10] = BLAKE2B_IV_02;
v[11] = BLAKE2B_IV_03;
v[12] = BLAKE2B_IV_04 ^ t0;
v[12] = make_u64x (BLAKE2B_IV_04) ^ t0;
v[13] = BLAKE2B_IV_05; // ^ t1;
v[14] = BLAKE2B_IV_06 ^ f0;
v[14] = make_u64x (BLAKE2B_IV_06) ^ f0;
v[15] = BLAKE2B_IV_07; // ^ f1;
BLAKE2B_ROUND_VECTOR ( 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);

@ -31,7 +31,7 @@
#define MD4_STEP(f,a,b,c,d,x,K,s) \
{ \
a += K; \
a += make_u32x (K); \
a = hc_add3 (a, x, f (b, c, d)); \
a = hc_rotl32 (a, s); \
}

@ -36,7 +36,7 @@
#define MD5_STEP(f,a,b,c,d,x,K,s) \
{ \
a += K; \
a += make_u32x (K); \
a = hc_add3 (a, x, f (b, c, d)); \
a = hc_rotl32 (a, s); \
a += b; \

@ -32,7 +32,7 @@
#define RIPEMD160_STEP(f,a,b,c,d,e,x,K,s) \
{ \
a += K; \
a += make_u32x (K); \
a += x; \
a += f (b, c, d); \
a = hc_rotl32 (a, s); \
@ -54,7 +54,7 @@
#define RIPEMD160_STEP_WORKAROUND_BUG(f,a,b,c,d,e,x,K,s) \
{ \
a += K; \
a += make_u32x (K); \
a += x; \
a += f (b, c, d); \
a = ROTATE_LEFT_WORKAROUND_BUG (a, s); \

@ -28,7 +28,7 @@
#define SHA1_STEP(f,a,b,c,d,e,x) \
{ \
e += K; \
e += make_u32x (K); \
e = hc_add3 (e, x, f (b, c, d)); \
e += hc_rotl32 (a, 5u); \
b = hc_rotl32 (b, 30u); \

@ -41,7 +41,7 @@
#define SHA224_STEP(F0,F1,a,b,c,d,e,f,g,h,x,K) \
{ \
h = hc_add3 (h, K, x); \
h = hc_add3 (h, make_u32x (K), x); \
h = hc_add3 (h, SHA224_S3 (e), F1 (e,f,g)); \
d += h; \
h = hc_add3 (h, SHA224_S2 (a), F0 (a,b,c)); \

@ -41,7 +41,7 @@
#define SHA256_STEP(F0,F1,a,b,c,d,e,f,g,h,x,K) \
{ \
h = hc_add3 (h, K, x); \
h = hc_add3 (h, make_u32x (K), x); \
h = hc_add3 (h, SHA256_S3 (e), F1 (e,f,g)); \
d += h; \
h = hc_add3 (h, SHA256_S2 (a), F0 (a,b,c)); \

@ -44,7 +44,7 @@
#define SHA384_STEP(F0,F1,a,b,c,d,e,f,g,h,x,K) \
{ \
h += K; \
h += make_u64x (K); \
h += x; \
h += SHA384_S1 (e); \
h += F0 (e, f, g); \

@ -44,7 +44,7 @@
#define SHA512_STEP(F0,F1,a,b,c,d,e,f,g,h,x,K) \
{ \
h += K; \
h += make_u64x (K); \
h += x; \
h += SHA512_S1 (e); \
h += F0 (e, f, g); \

@ -326,46 +326,6 @@ DECLSPEC u32 hc_atomic_or (volatile GLOBAL_AS u32 *p, volatile const u32 val)
return atomic_fetch_or_explicit (pd, val, memory_order_relaxed);
}
DECLSPEC u32x rotl32 (const u32x a, const int n)
{
return ((a << n) | ((a >> (32 - n))));
}
DECLSPEC u32x rotr32 (const u32x a, const int n)
{
return ((a >> n) | ((a << (32 - n))));
}
DECLSPEC u32 rotl32_S (const u32 a, const int n)
{
return ((a << n) | ((a >> (32 - n))));
}
DECLSPEC u32 rotr32_S (const u32 a, const int n)
{
return ((a >> n) | ((a << (32 - n))));
}
DECLSPEC u64x rotl64 (const u64x a, const int n)
{
return ((a << n) | ((a >> (64 - n))));
}
DECLSPEC u64x rotr64 (const u64x a, const int n)
{
return ((a >> n) | ((a << (64 - n))));
}
DECLSPEC u64 rotl64_S (const u64 a, const int n)
{
return ((a << n) | ((a >> (64 - n))));
}
DECLSPEC u64 rotr64_S (const u64 a, const int n)
{
return ((a >> n) | ((a << (64 - n))));
}
#define FIXED_THREAD_COUNT(n)
#define SYNC_THREADS() threadgroup_barrier (mem_flags::mem_threadgroup)
#endif // IS_METAL

@ -17,11 +17,11 @@
{ \
if (((h0) == search[0]) && ((h1) == search[1]) && ((h2) == search[2]) && ((h3) == search[3])) \
{ \
const u32 final_hash_pos = DIGESTS_OFFSET_HOST + 0; \
const u32 final_hash_pos = DIGESTS_OFFSET_HOST + 0; \
\
if (hc_atomic_inc (&hashes_shown[final_hash_pos]) == 0) \
if (hc_atomic_inc (&hashes_shown[final_hash_pos]) == 0) \
{ \
mark_hash (plains_buf, d_return_buf, SALT_POS_HOST, DIGESTS_CNT, 0, final_hash_pos, gid, il_pos, 0, 0); \
mark_hash (plains_buf, d_return_buf, SALT_POS_HOST, DIGESTS_CNT, 0, final_hash_pos, gid, il_pos, 0, 0); \
} \
} \
}
@ -37,13 +37,13 @@
BITMAP_SHIFT1, \
BITMAP_SHIFT2)) \
{ \
int digest_pos = find_hash (digest_tp0, DIGESTS_CNT, &digests_buf[DIGESTS_OFFSET_HOST]); \
int digest_pos = find_hash (digest_tp0, DIGESTS_CNT, &digests_buf[DIGESTS_OFFSET_HOST]); \
\
if (digest_pos != -1) \
{ \
const u32 final_hash_pos = DIGESTS_OFFSET_HOST + digest_pos; \
const u32 final_hash_pos = DIGESTS_OFFSET_HOST + digest_pos; \
\
if (hc_atomic_inc (&hashes_shown[final_hash_pos]) == 0) \
if (hc_atomic_inc (&hashes_shown[final_hash_pos]) == 0) \
{ \
mark_hash (plains_buf, d_return_buf, SALT_POS_HOST, DIGESTS_CNT, digest_pos, final_hash_pos, gid, il_pos, 0, 0); \
} \
@ -66,9 +66,9 @@
{ \
if (((h0).s0 == search[0]) && ((h1).s0 == search[1]) && ((h2).s0 == search[2]) && ((h3).s0 == search[3])) \
{ \
const u32 final_hash_pos = DIGESTS_OFFSET_HOST + 0; \
const u32 final_hash_pos = DIGESTS_OFFSET_HOST + 0; \
\
if (vector_accessible (il_pos, IL_CNT, 0) && (hc_atomic_inc (&hashes_shown[final_hash_pos]) == 0)) \
if (vector_accessible (il_pos, IL_CNT, 0) && (hc_atomic_inc (&hashes_shown[final_hash_pos]) == 0)) \
{ \
mark_hash (plains_buf, d_return_buf, SALT_POS_HOST, DIGESTS_CNT, 0, final_hash_pos, gid, il_pos + 0, 0, 0); \
} \
@ -76,9 +76,9 @@
\
if (((h0).s1 == search[0]) && ((h1).s1 == search[1]) && ((h2).s1 == search[2]) && ((h3).s1 == search[3])) \
{ \
const u32 final_hash_pos = DIGESTS_OFFSET_HOST + 0; \
const u32 final_hash_pos = DIGESTS_OFFSET_HOST + 0; \
\
if (vector_accessible (il_pos, IL_CNT, 1) && (hc_atomic_inc (&hashes_shown[final_hash_pos]) == 0)) \
if (vector_accessible (il_pos, IL_CNT, 1) && (hc_atomic_inc (&hashes_shown[final_hash_pos]) == 0)) \
{ \
mark_hash (plains_buf, d_return_buf, SALT_POS_HOST, DIGESTS_CNT, 0, final_hash_pos, gid, il_pos + 1, 0, 0); \
} \
@ -97,13 +97,13 @@
BITMAP_SHIFT1, \
BITMAP_SHIFT2)) \
{ \
int digest_pos = find_hash (digest_tp0, DIGESTS_CNT, &digests_buf[DIGESTS_OFFSET_HOST]); \
int digest_pos = find_hash (digest_tp0, DIGESTS_CNT, &digests_buf[DIGESTS_OFFSET_HOST]); \
\
if (digest_pos != -1) \
{ \
const u32 final_hash_pos = DIGESTS_OFFSET_HOST + digest_pos; \
const u32 final_hash_pos = DIGESTS_OFFSET_HOST + digest_pos; \
\
if (vector_accessible (il_pos, IL_CNT, 0) && (hc_atomic_inc (&hashes_shown[final_hash_pos]) == 0)) \
if (vector_accessible (il_pos, IL_CNT, 0) && (hc_atomic_inc (&hashes_shown[final_hash_pos]) == 0)) \
{ \
mark_hash (plains_buf, d_return_buf, SALT_POS_HOST, DIGESTS_CNT, digest_pos, final_hash_pos, gid, il_pos + 0, 0, 0); \
} \
@ -117,13 +117,13 @@
BITMAP_SHIFT1, \
BITMAP_SHIFT2)) \
{ \
int digest_pos = find_hash (digest_tp1, DIGESTS_CNT, &digests_buf[DIGESTS_OFFSET_HOST]); \
int digest_pos = find_hash (digest_tp1, DIGESTS_CNT, &digests_buf[DIGESTS_OFFSET_HOST]); \
\
if (digest_pos != -1) \
{ \
const u32 final_hash_pos = DIGESTS_OFFSET_HOST + digest_pos; \
const u32 final_hash_pos = DIGESTS_OFFSET_HOST + digest_pos; \
\
if (vector_accessible (il_pos, IL_CNT, 1) && (hc_atomic_inc (&hashes_shown[final_hash_pos]) == 0)) \
if (vector_accessible (il_pos, IL_CNT, 1) && (hc_atomic_inc (&hashes_shown[final_hash_pos]) == 0)) \
{ \
mark_hash (plains_buf, d_return_buf, SALT_POS_HOST, DIGESTS_CNT, digest_pos, final_hash_pos, gid, il_pos + 1, 0, 0); \
} \
@ -144,9 +144,9 @@
{ \
if (((h0).s0 == search[0]) && ((h1).s0 == search[1]) && ((h2).s0 == search[2]) && ((h3).s0 == search[3])) \
{ \
const u32 final_hash_pos = DIGESTS_OFFSET_HOST + 0; \
const u32 final_hash_pos = DIGESTS_OFFSET_HOST + 0; \
\
if (vector_accessible (il_pos, IL_CNT, 0) && (hc_atomic_inc (&hashes_shown[final_hash_pos]) == 0)) \
if (vector_accessible (il_pos, IL_CNT, 0) && (hc_atomic_inc (&hashes_shown[final_hash_pos]) == 0)) \
{ \
mark_hash (plains_buf, d_return_buf, SALT_POS_HOST, DIGESTS_CNT, 0, final_hash_pos, gid, il_pos + 0, 0, 0); \
} \
@ -154,9 +154,9 @@
\
if (((h0).s1 == search[0]) && ((h1).s1 == search[1]) && ((h2).s1 == search[2]) && ((h3).s1 == search[3])) \
{ \
const u32 final_hash_pos = DIGESTS_OFFSET_HOST + 0; \
const u32 final_hash_pos = DIGESTS_OFFSET_HOST + 0; \
\
if (vector_accessible (il_pos, IL_CNT, 1) && (hc_atomic_inc (&hashes_shown[final_hash_pos]) == 0)) \
if (vector_accessible (il_pos, IL_CNT, 1) && (hc_atomic_inc (&hashes_shown[final_hash_pos]) == 0)) \
{ \
mark_hash (plains_buf, d_return_buf, SALT_POS_HOST, DIGESTS_CNT, 0, final_hash_pos, gid, il_pos + 1, 0, 0); \
} \
@ -164,9 +164,9 @@
\
if (((h0).s2 == search[0]) && ((h1).s2 == search[1]) && ((h2).s2 == search[2]) && ((h3).s2 == search[3])) \
{ \
const u32 final_hash_pos = DIGESTS_OFFSET_HOST + 0; \
const u32 final_hash_pos = DIGESTS_OFFSET_HOST + 0; \
\
if (vector_accessible (il_pos, IL_CNT, 2) && (hc_atomic_inc (&hashes_shown[final_hash_pos]) == 0)) \
if (vector_accessible (il_pos, IL_CNT, 2) && (hc_atomic_inc (&hashes_shown[final_hash_pos]) == 0)) \
{ \
mark_hash (plains_buf, d_return_buf, SALT_POS_HOST, DIGESTS_CNT, 0, final_hash_pos, gid, il_pos + 2, 0, 0); \
} \
@ -174,9 +174,9 @@
\
if (((h0).s3 == search[0]) && ((h1).s3 == search[1]) && ((h2).s3 == search[2]) && ((h3).s3 == search[3])) \
{ \
const u32 final_hash_pos = DIGESTS_OFFSET_HOST + 0; \
const u32 final_hash_pos = DIGESTS_OFFSET_HOST + 0; \
\
if (vector_accessible (il_pos, IL_CNT, 3) && (hc_atomic_inc (&hashes_shown[final_hash_pos]) == 0)) \
if (vector_accessible (il_pos, IL_CNT, 3) && (hc_atomic_inc (&hashes_shown[final_hash_pos]) == 0)) \
{ \
mark_hash (plains_buf, d_return_buf, SALT_POS_HOST, DIGESTS_CNT, 0, final_hash_pos, gid, il_pos + 3, 0, 0); \
} \
@ -197,13 +197,13 @@
BITMAP_SHIFT1, \
BITMAP_SHIFT2)) \
{ \
int digest_pos = find_hash (digest_tp0, DIGESTS_CNT, &digests_buf[DIGESTS_OFFSET_HOST]); \
int digest_pos = find_hash (digest_tp0, DIGESTS_CNT, &digests_buf[DIGESTS_OFFSET_HOST]); \
\
if (digest_pos != -1) \
{ \
const u32 final_hash_pos = DIGESTS_OFFSET_HOST + digest_pos; \
const u32 final_hash_pos = DIGESTS_OFFSET_HOST + digest_pos; \
\
if (vector_accessible (il_pos, IL_CNT, 0) && (hc_atomic_inc (&hashes_shown[final_hash_pos]) == 0)) \
if (vector_accessible (il_pos, IL_CNT, 0) && (hc_atomic_inc (&hashes_shown[final_hash_pos]) == 0)) \
{ \
mark_hash (plains_buf, d_return_buf, SALT_POS_HOST, DIGESTS_CNT, digest_pos, final_hash_pos, gid, il_pos + 0, 0, 0); \
} \
@ -217,13 +217,13 @@
BITMAP_SHIFT1, \
BITMAP_SHIFT2)) \
{ \
int digest_pos = find_hash (digest_tp1, DIGESTS_CNT, &digests_buf[DIGESTS_OFFSET_HOST]); \
int digest_pos = find_hash (digest_tp1, DIGESTS_CNT, &digests_buf[DIGESTS_OFFSET_HOST]); \
\
if (digest_pos != -1) \
{ \
const u32 final_hash_pos = DIGESTS_OFFSET_HOST + digest_pos; \
const u32 final_hash_pos = DIGESTS_OFFSET_HOST + digest_pos; \
\
if (vector_accessible (il_pos, IL_CNT, 1) && (hc_atomic_inc (&hashes_shown[final_hash_pos]) == 0)) \
if (vector_accessible (il_pos, IL_CNT, 1) && (hc_atomic_inc (&hashes_shown[final_hash_pos]) == 0)) \
{ \
mark_hash (plains_buf, d_return_buf, SALT_POS_HOST, DIGESTS_CNT, digest_pos, final_hash_pos, gid, il_pos + 1, 0, 0); \
} \
@ -237,13 +237,13 @@
BITMAP_SHIFT1, \
BITMAP_SHIFT2)) \
{ \
int digest_pos = find_hash (digest_tp2, DIGESTS_CNT, &digests_buf[DIGESTS_OFFSET_HOST]); \
int digest_pos = find_hash (digest_tp2, DIGESTS_CNT, &digests_buf[DIGESTS_OFFSET_HOST]); \
\
if (digest_pos != -1) \
{ \
const u32 final_hash_pos = DIGESTS_OFFSET_HOST + digest_pos; \
const u32 final_hash_pos = DIGESTS_OFFSET_HOST + digest_pos; \
\
if (vector_accessible (il_pos, IL_CNT, 2) && (hc_atomic_inc (&hashes_shown[final_hash_pos]) == 0)) \
if (vector_accessible (il_pos, IL_CNT, 2) && (hc_atomic_inc (&hashes_shown[final_hash_pos]) == 0)) \
{ \
mark_hash (plains_buf, d_return_buf, SALT_POS_HOST, DIGESTS_CNT, digest_pos, final_hash_pos, gid, il_pos + 2, 0, 0); \
} \
@ -257,13 +257,13 @@
BITMAP_SHIFT1, \
BITMAP_SHIFT2)) \
{ \
int digest_pos = find_hash (digest_tp3, DIGESTS_CNT, &digests_buf[DIGESTS_OFFSET_HOST]); \
int digest_pos = find_hash (digest_tp3, DIGESTS_CNT, &digests_buf[DIGESTS_OFFSET_HOST]); \
\
if (digest_pos != -1) \
{ \
const u32 final_hash_pos = DIGESTS_OFFSET_HOST + digest_pos; \
const u32 final_hash_pos = DIGESTS_OFFSET_HOST + digest_pos; \
\
if (vector_accessible (il_pos, IL_CNT, 3) && (hc_atomic_inc (&hashes_shown[final_hash_pos]) == 0)) \
if (vector_accessible (il_pos, IL_CNT, 3) && (hc_atomic_inc (&hashes_shown[final_hash_pos]) == 0)) \
{ \
mark_hash (plains_buf, d_return_buf, SALT_POS_HOST, DIGESTS_CNT, digest_pos, final_hash_pos, gid, il_pos + 3, 0, 0); \
} \
@ -284,9 +284,9 @@
{ \
if (((h0).s0 == search[0]) && ((h1).s0 == search[1]) && ((h2).s0 == search[2]) && ((h3).s0 == search[3])) \
{ \
const u32 final_hash_pos = DIGESTS_OFFSET_HOST + 0; \
const u32 final_hash_pos = DIGESTS_OFFSET_HOST + 0; \
\
if (vector_accessible (il_pos, IL_CNT, 0) && (hc_atomic_inc (&hashes_shown[final_hash_pos]) == 0)) \
if (vector_accessible (il_pos, IL_CNT, 0) && (hc_atomic_inc (&hashes_shown[final_hash_pos]) == 0)) \
{ \
mark_hash (plains_buf, d_return_buf, SALT_POS_HOST, DIGESTS_CNT, 0, final_hash_pos, gid, il_pos + 0, 0, 0); \
} \
@ -294,9 +294,9 @@
\
if (((h0).s1 == search[0]) && ((h1).s1 == search[1]) && ((h2).s1 == search[2]) && ((h3).s1 == search[3])) \
{ \
const u32 final_hash_pos = DIGESTS_OFFSET_HOST + 0; \
const u32 final_hash_pos = DIGESTS_OFFSET_HOST + 0; \
\
if (vector_accessible (il_pos, IL_CNT, 1) && (hc_atomic_inc (&hashes_shown[final_hash_pos]) == 0)) \
if (vector_accessible (il_pos, IL_CNT, 1) && (hc_atomic_inc (&hashes_shown[final_hash_pos]) == 0)) \
{ \
mark_hash (plains_buf, d_return_buf, SALT_POS_HOST, DIGESTS_CNT, 0, final_hash_pos, gid, il_pos + 1, 0, 0); \
} \
@ -304,9 +304,9 @@
\
if (((h0).s2 == search[0]) && ((h1).s2 == search[1]) && ((h2).s2 == search[2]) && ((h3).s2 == search[3])) \
{ \
const u32 final_hash_pos = DIGESTS_OFFSET_HOST + 0; \
const u32 final_hash_pos = DIGESTS_OFFSET_HOST + 0; \
\
if (vector_accessible (il_pos, IL_CNT, 2) && (hc_atomic_inc (&hashes_shown[final_hash_pos]) == 0)) \
if (vector_accessible (il_pos, IL_CNT, 2) && (hc_atomic_inc (&hashes_shown[final_hash_pos]) == 0)) \
{ \
mark_hash (plains_buf, d_return_buf, SALT_POS_HOST, DIGESTS_CNT, 0, final_hash_pos, gid, il_pos + 2, 0, 0); \
} \
@ -314,18 +314,18 @@
\
if (((h0).s3 == search[0]) && ((h1).s3 == search[1]) && ((h2).s3 == search[2]) && ((h3).s3 == search[3])) \
{ \
const u32 final_hash_pos = DIGESTS_OFFSET_HOST + 0; \
const u32 final_hash_pos = DIGESTS_OFFSET_HOST + 0; \
\
if (vector_accessible (il_pos, IL_CNT, 3) && (hc_atomic_inc (&hashes_shown[final_hash_pos]) == 0)) \
if (vector_accessible (il_pos, IL_CNT, 3) && (hc_atomic_inc (&hashes_shown[final_hash_pos]) == 0)) \
{ \
mark_hash (plains_buf, d_return_buf, SALT_POS_HOST, DIGESTS_CNT, 0, final_hash_pos, gid, il_pos + 3, 0, 0); \
} \
} \
if (((h0).s4 == search[0]) && ((h1).s4 == search[1]) && ((h2).s4 == search[2]) && ((h3).s4 == search[3])) \
{ \
const u32 final_hash_pos = DIGESTS_OFFSET_HOST + 0; \
const u32 final_hash_pos = DIGESTS_OFFSET_HOST + 0; \
\
if (vector_accessible (il_pos, IL_CNT, 4) && (hc_atomic_inc (&hashes_shown[final_hash_pos]) == 0)) \
if (vector_accessible (il_pos, IL_CNT, 4) && (hc_atomic_inc (&hashes_shown[final_hash_pos]) == 0)) \
{ \
mark_hash (plains_buf, d_return_buf, SALT_POS_HOST, DIGESTS_CNT, 0, final_hash_pos, gid, il_pos + 4, 0, 0); \
} \
@ -333,9 +333,9 @@
\
if (((h0).s5 == search[0]) && ((h1).s5 == search[1]) && ((h2).s5 == search[2]) && ((h3).s5 == search[3])) \
{ \
const u32 final_hash_pos = DIGESTS_OFFSET_HOST + 0; \
const u32 final_hash_pos = DIGESTS_OFFSET_HOST + 0; \
\
if (vector_accessible (il_pos, IL_CNT, 5) && (hc_atomic_inc (&hashes_shown[final_hash_pos]) == 0)) \
if (vector_accessible (il_pos, IL_CNT, 5) && (hc_atomic_inc (&hashes_shown[final_hash_pos]) == 0)) \
{ \
mark_hash (plains_buf, d_return_buf, SALT_POS_HOST, DIGESTS_CNT, 0, final_hash_pos, gid, il_pos + 5, 0, 0); \
} \
@ -343,9 +343,9 @@
\
if (((h0).s6 == search[0]) && ((h1).s6 == search[1]) && ((h2).s6 == search[2]) && ((h3).s6 == search[3])) \
{ \
const u32 final_hash_pos = DIGESTS_OFFSET_HOST + 0; \
const u32 final_hash_pos = DIGESTS_OFFSET_HOST + 0; \
\
if (vector_accessible (il_pos, IL_CNT, 6) && (hc_atomic_inc (&hashes_shown[final_hash_pos]) == 0)) \
if (vector_accessible (il_pos, IL_CNT, 6) && (hc_atomic_inc (&hashes_shown[final_hash_pos]) == 0)) \
{ \
mark_hash (plains_buf, d_return_buf, SALT_POS_HOST, DIGESTS_CNT, 0, final_hash_pos, gid, il_pos + 6, 0, 0); \
} \
@ -353,9 +353,9 @@
\
if (((h0).s7 == search[0]) && ((h1).s7 == search[1]) && ((h2).s7 == search[2]) && ((h3).s7 == search[3])) \
{ \
const u32 final_hash_pos = DIGESTS_OFFSET_HOST + 0; \
const u32 final_hash_pos = DIGESTS_OFFSET_HOST + 0; \
\
if (vector_accessible (il_pos, IL_CNT, 7) && (hc_atomic_inc (&hashes_shown[final_hash_pos]) == 0)) \
if (vector_accessible (il_pos, IL_CNT, 7) && (hc_atomic_inc (&hashes_shown[final_hash_pos]) == 0)) \
{ \
mark_hash (plains_buf, d_return_buf, SALT_POS_HOST, DIGESTS_CNT, 0, final_hash_pos, gid, il_pos + 7, 0, 0); \
} \
@ -380,13 +380,13 @@
BITMAP_SHIFT1, \
BITMAP_SHIFT2)) \
{ \
int digest_pos = find_hash (digest_tp0, DIGESTS_CNT, &digests_buf[DIGESTS_OFFSET_HOST]); \
int digest_pos = find_hash (digest_tp0, DIGESTS_CNT, &digests_buf[DIGESTS_OFFSET_HOST]); \
\
if (digest_pos != -1) \
{ \
const u32 final_hash_pos = DIGESTS_OFFSET_HOST + digest_pos; \
const u32 final_hash_pos = DIGESTS_OFFSET_HOST + digest_pos; \
\
if (vector_accessible (il_pos, IL_CNT, 0) && (hc_atomic_inc (&hashes_shown[final_hash_pos]) == 0)) \
if (vector_accessible (il_pos, IL_CNT, 0) && (hc_atomic_inc (&hashes_shown[final_hash_pos]) == 0)) \
{ \
mark_hash (plains_buf, d_return_buf, SALT_POS_HOST, DIGESTS_CNT, digest_pos, final_hash_pos, gid, il_pos + 0, 0, 0); \
} \
@ -400,13 +400,13 @@
BITMAP_SHIFT1, \
BITMAP_SHIFT2)) \
{ \
int digest_pos = find_hash (digest_tp1, DIGESTS_CNT, &digests_buf[DIGESTS_OFFSET_HOST]); \
int digest_pos = find_hash (digest_tp1, DIGESTS_CNT, &digests_buf[DIGESTS_OFFSET_HOST]); \
\
if (digest_pos != -1) \
{ \
const u32 final_hash_pos = DIGESTS_OFFSET_HOST + digest_pos; \
const u32 final_hash_pos = DIGESTS_OFFSET_HOST + digest_pos; \
\
if (vector_accessible (il_pos, IL_CNT, 1) && (hc_atomic_inc (&hashes_shown[final_hash_pos]) == 0)) \
if (vector_accessible (il_pos, IL_CNT, 1) && (hc_atomic_inc (&hashes_shown[final_hash_pos]) == 0)) \
{ \
mark_hash (plains_buf, d_return_buf, SALT_POS_HOST, DIGESTS_CNT, digest_pos, final_hash_pos, gid, il_pos + 1, 0, 0); \
} \
@ -420,13 +420,13 @@
BITMAP_SHIFT1, \
BITMAP_SHIFT2)) \
{ \
int digest_pos = find_hash (digest_tp2, DIGESTS_CNT, &digests_buf[DIGESTS_OFFSET_HOST]); \
int digest_pos = find_hash (digest_tp2, DIGESTS_CNT, &digests_buf[DIGESTS_OFFSET_HOST]); \
\
if (digest_pos != -1) \
{ \
const u32 final_hash_pos = DIGESTS_OFFSET_HOST + digest_pos; \
const u32 final_hash_pos = DIGESTS_OFFSET_HOST + digest_pos; \
\
if (vector_accessible (il_pos, IL_CNT, 2) && (hc_atomic_inc (&hashes_shown[final_hash_pos]) == 0)) \
if (vector_accessible (il_pos, IL_CNT, 2) && (hc_atomic_inc (&hashes_shown[final_hash_pos]) == 0)) \
{ \
mark_hash (plains_buf, d_return_buf, SALT_POS_HOST, DIGESTS_CNT, digest_pos, final_hash_pos, gid, il_pos + 2, 0, 0); \
} \
@ -440,13 +440,13 @@
BITMAP_SHIFT1, \
BITMAP_SHIFT2)) \
{ \
int digest_pos = find_hash (digest_tp3, DIGESTS_CNT, &digests_buf[DIGESTS_OFFSET_HOST]); \
int digest_pos = find_hash (digest_tp3, DIGESTS_CNT, &digests_buf[DIGESTS_OFFSET_HOST]); \
\
if (digest_pos != -1) \
{ \
const u32 final_hash_pos = DIGESTS_OFFSET_HOST + digest_pos; \
const u32 final_hash_pos = DIGESTS_OFFSET_HOST + digest_pos; \
\
if (vector_accessible (il_pos, IL_CNT, 3) && (hc_atomic_inc (&hashes_shown[final_hash_pos]) == 0)) \
if (vector_accessible (il_pos, IL_CNT, 3) && (hc_atomic_inc (&hashes_shown[final_hash_pos]) == 0)) \
{ \
mark_hash (plains_buf, d_return_buf, SALT_POS_HOST, DIGESTS_CNT, digest_pos, final_hash_pos, gid, il_pos + 3, 0, 0); \
} \
@ -459,13 +459,13 @@
BITMAP_SHIFT1, \
BITMAP_SHIFT2)) \
{ \
int digest_pos = find_hash (digest_tp4, DIGESTS_CNT, &digests_buf[DIGESTS_OFFSET_HOST]); \
int digest_pos = find_hash (digest_tp4, DIGESTS_CNT, &digests_buf[DIGESTS_OFFSET_HOST]); \
\
if (digest_pos != -1) \
{ \
const u32 final_hash_pos = DIGESTS_OFFSET_HOST + digest_pos; \
const u32 final_hash_pos = DIGESTS_OFFSET_HOST + digest_pos; \
\
if (vector_accessible (il_pos, IL_CNT, 4) && (hc_atomic_inc (&hashes_shown[final_hash_pos]) == 0)) \
if (vector_accessible (il_pos, IL_CNT, 4) && (hc_atomic_inc (&hashes_shown[final_hash_pos]) == 0)) \
{ \
mark_hash (plains_buf, d_return_buf, SALT_POS_HOST, DIGESTS_CNT, digest_pos, final_hash_pos, gid, il_pos + 4, 0, 0); \
} \
@ -479,13 +479,13 @@
BITMAP_SHIFT1, \
BITMAP_SHIFT2)) \
{ \
int digest_pos = find_hash (digest_tp5, DIGESTS_CNT, &digests_buf[DIGESTS_OFFSET_HOST]); \
int digest_pos = find_hash (digest_tp5, DIGESTS_CNT, &digests_buf[DIGESTS_OFFSET_HOST]); \
\
if (digest_pos != -1) \
{ \
const u32 final_hash_pos = DIGESTS_OFFSET_HOST + digest_pos; \
const u32 final_hash_pos = DIGESTS_OFFSET_HOST + digest_pos; \
\
if (vector_accessible (il_pos, IL_CNT, 5) && (hc_atomic_inc (&hashes_shown[final_hash_pos]) == 0)) \
if (vector_accessible (il_pos, IL_CNT, 5) && (hc_atomic_inc (&hashes_shown[final_hash_pos]) == 0)) \
{ \
mark_hash (plains_buf, d_return_buf, SALT_POS_HOST, DIGESTS_CNT, digest_pos, final_hash_pos, gid, il_pos + 5, 0, 0); \
} \
@ -499,13 +499,13 @@
BITMAP_SHIFT1, \
BITMAP_SHIFT2)) \
{ \
int digest_pos = find_hash (digest_tp6, DIGESTS_CNT, &digests_buf[DIGESTS_OFFSET_HOST]); \
int digest_pos = find_hash (digest_tp6, DIGESTS_CNT, &digests_buf[DIGESTS_OFFSET_HOST]); \
\
if (digest_pos != -1) \
{ \
const u32 final_hash_pos = DIGESTS_OFFSET_HOST + digest_pos; \
const u32 final_hash_pos = DIGESTS_OFFSET_HOST + digest_pos; \
\
if (vector_accessible (il_pos, IL_CNT, 6) && (hc_atomic_inc (&hashes_shown[final_hash_pos]) == 0)) \
if (vector_accessible (il_pos, IL_CNT, 6) && (hc_atomic_inc (&hashes_shown[final_hash_pos]) == 0)) \
{ \
mark_hash (plains_buf, d_return_buf, SALT_POS_HOST, DIGESTS_CNT, digest_pos, final_hash_pos, gid, il_pos + 6, 0, 0); \
} \
@ -519,13 +519,13 @@
BITMAP_SHIFT1, \
BITMAP_SHIFT2)) \
{ \
int digest_pos = find_hash (digest_tp7, DIGESTS_CNT, &digests_buf[DIGESTS_OFFSET_HOST]); \
int digest_pos = find_hash (digest_tp7, DIGESTS_CNT, &digests_buf[DIGESTS_OFFSET_HOST]); \
\
if (digest_pos != -1) \
{ \
const u32 final_hash_pos = DIGESTS_OFFSET_HOST + digest_pos; \
const u32 final_hash_pos = DIGESTS_OFFSET_HOST + digest_pos; \
\
if (vector_accessible (il_pos, IL_CNT, 7) && (hc_atomic_inc (&hashes_shown[final_hash_pos]) == 0)) \
if (vector_accessible (il_pos, IL_CNT, 7) && (hc_atomic_inc (&hashes_shown[final_hash_pos]) == 0)) \
{ \
mark_hash (plains_buf, d_return_buf, SALT_POS_HOST, DIGESTS_CNT, digest_pos, final_hash_pos, gid, il_pos + 7, 0, 0); \
} \
@ -546,9 +546,9 @@
{ \
if (((h0).s0 == search[0]) && ((h1).s0 == search[1]) && ((h2).s0 == search[2]) && ((h3).s0 == search[3])) \
{ \
const u32 final_hash_pos = DIGESTS_OFFSET_HOST + 0; \
const u32 final_hash_pos = DIGESTS_OFFSET_HOST + 0; \
\
if (vector_accessible (il_pos, IL_CNT, 0) && (hc_atomic_inc (&hashes_shown[final_hash_pos]) == 0)) \
if (vector_accessible (il_pos, IL_CNT, 0) && (hc_atomic_inc (&hashes_shown[final_hash_pos]) == 0)) \
{ \
mark_hash (plains_buf, d_return_buf, SALT_POS_HOST, DIGESTS_CNT, 0, final_hash_pos, gid, il_pos + 0, 0, 0); \
} \
@ -556,9 +556,9 @@
\
if (((h0).s1 == search[0]) && ((h1).s1 == search[1]) && ((h2).s1 == search[2]) && ((h3).s1 == search[3])) \
{ \
const u32 final_hash_pos = DIGESTS_OFFSET_HOST + 0; \
const u32 final_hash_pos = DIGESTS_OFFSET_HOST + 0; \
\
if (vector_accessible (il_pos, IL_CNT, 1) && (hc_atomic_inc (&hashes_shown[final_hash_pos]) == 0)) \
if (vector_accessible (il_pos, IL_CNT, 1) && (hc_atomic_inc (&hashes_shown[final_hash_pos]) == 0)) \
{ \
mark_hash (plains_buf, d_return_buf, SALT_POS_HOST, DIGESTS_CNT, 0, final_hash_pos, gid, il_pos + 1, 0, 0); \
} \
@ -566,9 +566,9 @@
\
if (((h0).s2 == search[0]) && ((h1).s2 == search[1]) && ((h2).s2 == search[2]) && ((h3).s2 == search[3])) \
{ \
const u32 final_hash_pos = DIGESTS_OFFSET_HOST + 0; \
const u32 final_hash_pos = DIGESTS_OFFSET_HOST + 0; \
\
if (vector_accessible (il_pos, IL_CNT, 2) && (hc_atomic_inc (&hashes_shown[final_hash_pos]) == 0)) \
if (vector_accessible (il_pos, IL_CNT, 2) && (hc_atomic_inc (&hashes_shown[final_hash_pos]) == 0)) \
{ \
mark_hash (plains_buf, d_return_buf, SALT_POS_HOST, DIGESTS_CNT, 0, final_hash_pos, gid, il_pos + 2, 0, 0); \
} \
@ -576,18 +576,18 @@
\
if (((h0).s3 == search[0]) && ((h1).s3 == search[1]) && ((h2).s3 == search[2]) && ((h3).s3 == search[3])) \
{ \
const u32 final_hash_pos = DIGESTS_OFFSET_HOST + 0; \
const u32 final_hash_pos = DIGESTS_OFFSET_HOST + 0; \
\
if (vector_accessible (il_pos, IL_CNT, 3) && (hc_atomic_inc (&hashes_shown[final_hash_pos]) == 0)) \
if (vector_accessible (il_pos, IL_CNT, 3) && (hc_atomic_inc (&hashes_shown[final_hash_pos]) == 0)) \
{ \
mark_hash (plains_buf, d_return_buf, SALT_POS_HOST, DIGESTS_CNT, 0, final_hash_pos, gid, il_pos + 3, 0, 0); \
} \
} \
if (((h0).s4 == search[0]) && ((h1).s4 == search[1]) && ((h2).s4 == search[2]) && ((h3).s4 == search[3])) \
{ \
const u32 final_hash_pos = DIGESTS_OFFSET_HOST + 0; \
const u32 final_hash_pos = DIGESTS_OFFSET_HOST + 0; \
\
if (vector_accessible (il_pos, IL_CNT, 4) && (hc_atomic_inc (&hashes_shown[final_hash_pos]) == 0)) \
if (vector_accessible (il_pos, IL_CNT, 4) && (hc_atomic_inc (&hashes_shown[final_hash_pos]) == 0)) \
{ \
mark_hash (plains_buf, d_return_buf, SALT_POS_HOST, DIGESTS_CNT, 0, final_hash_pos, gid, il_pos + 4, 0, 0); \
} \
@ -595,9 +595,9 @@
\
if (((h0).s5 == search[0]) && ((h1).s5 == search[1]) && ((h2).s5 == search[2]) && ((h3).s5 == search[3])) \
{ \
const u32 final_hash_pos = DIGESTS_OFFSET_HOST + 0; \
const u32 final_hash_pos = DIGESTS_OFFSET_HOST + 0; \
\
if (vector_accessible (il_pos, IL_CNT, 5) && (hc_atomic_inc (&hashes_shown[final_hash_pos]) == 0)) \
if (vector_accessible (il_pos, IL_CNT, 5) && (hc_atomic_inc (&hashes_shown[final_hash_pos]) == 0)) \
{ \
mark_hash (plains_buf, d_return_buf, SALT_POS_HOST, DIGESTS_CNT, 0, final_hash_pos, gid, il_pos + 5, 0, 0); \
} \
@ -605,9 +605,9 @@
\
if (((h0).s6 == search[0]) && ((h1).s6 == search[1]) && ((h2).s6 == search[2]) && ((h3).s6 == search[3])) \
{ \
const u32 final_hash_pos = DIGESTS_OFFSET_HOST + 0; \
const u32 final_hash_pos = DIGESTS_OFFSET_HOST + 0; \
\
if (vector_accessible (il_pos, IL_CNT, 6) && (hc_atomic_inc (&hashes_shown[final_hash_pos]) == 0)) \
if (vector_accessible (il_pos, IL_CNT, 6) && (hc_atomic_inc (&hashes_shown[final_hash_pos]) == 0)) \
{ \
mark_hash (plains_buf, d_return_buf, SALT_POS_HOST, DIGESTS_CNT, 0, final_hash_pos, gid, il_pos + 6, 0, 0); \
} \
@ -615,9 +615,9 @@
\
if (((h0).s7 == search[0]) && ((h1).s7 == search[1]) && ((h2).s7 == search[2]) && ((h3).s7 == search[3])) \
{ \
const u32 final_hash_pos = DIGESTS_OFFSET_HOST + 0; \
const u32 final_hash_pos = DIGESTS_OFFSET_HOST + 0; \
\
if (vector_accessible (il_pos, IL_CNT, 7) && (hc_atomic_inc (&hashes_shown[final_hash_pos]) == 0)) \
if (vector_accessible (il_pos, IL_CNT, 7) && (hc_atomic_inc (&hashes_shown[final_hash_pos]) == 0)) \
{ \
mark_hash (plains_buf, d_return_buf, SALT_POS_HOST, DIGESTS_CNT, 0, final_hash_pos, gid, il_pos + 7, 0, 0); \
} \
@ -625,9 +625,9 @@
\
if (((h0).s8 == search[0]) && ((h1).s8 == search[1]) && ((h2).s8 == search[2]) && ((h3).s8 == search[3])) \
{ \
const u32 final_hash_pos = DIGESTS_OFFSET_HOST + 0; \
const u32 final_hash_pos = DIGESTS_OFFSET_HOST + 0; \
\
if (vector_accessible (il_pos, IL_CNT, 8) && (hc_atomic_inc (&hashes_shown[final_hash_pos]) == 0)) \
if (vector_accessible (il_pos, IL_CNT, 8) && (hc_atomic_inc (&hashes_shown[final_hash_pos]) == 0)) \
{ \
mark_hash (plains_buf, d_return_buf, SALT_POS_HOST, DIGESTS_CNT, 0, final_hash_pos, gid, il_pos + 8, 0, 0); \
} \
@ -635,9 +635,9 @@
\
if (((h0).s9 == search[0]) && ((h1).s9 == search[1]) && ((h2).s9 == search[2]) && ((h3).s9 == search[3])) \
{ \
const u32 final_hash_pos = DIGESTS_OFFSET_HOST + 0; \
const u32 final_hash_pos = DIGESTS_OFFSET_HOST + 0; \
\
if (vector_accessible (il_pos, IL_CNT, 9) && (hc_atomic_inc (&hashes_shown[final_hash_pos]) == 0)) \
if (vector_accessible (il_pos, IL_CNT, 9) && (hc_atomic_inc (&hashes_shown[final_hash_pos]) == 0)) \
{ \
mark_hash (plains_buf, d_return_buf, SALT_POS_HOST, DIGESTS_CNT, 0, final_hash_pos, gid, il_pos + 9, 0, 0); \
} \
@ -645,9 +645,9 @@
\
if (((h0).sa == search[0]) && ((h1).sa == search[1]) && ((h2).sa == search[2]) && ((h3).sa == search[3])) \
{ \
const u32 final_hash_pos = DIGESTS_OFFSET_HOST + 0; \
const u32 final_hash_pos = DIGESTS_OFFSET_HOST + 0; \
\
if (vector_accessible (il_pos, IL_CNT, 10) && (hc_atomic_inc (&hashes_shown[final_hash_pos]) == 0)) \
if (vector_accessible (il_pos, IL_CNT, 10) && (hc_atomic_inc (&hashes_shown[final_hash_pos]) == 0)) \
{ \
mark_hash (plains_buf, d_return_buf, SALT_POS_HOST, DIGESTS_CNT, 0, final_hash_pos, gid, il_pos + 10, 0, 0); \
} \
@ -655,9 +655,9 @@
\
if (((h0).sb == search[0]) && ((h1).sb == search[1]) && ((h2).sb == search[2]) && ((h3).sb == search[3])) \
{ \
const u32 final_hash_pos = DIGESTS_OFFSET_HOST + 0; \
const u32 final_hash_pos = DIGESTS_OFFSET_HOST + 0; \
\
if (vector_accessible (il_pos, IL_CNT, 11) && (hc_atomic_inc (&hashes_shown[final_hash_pos]) == 0)) \
if (vector_accessible (il_pos, IL_CNT, 11) && (hc_atomic_inc (&hashes_shown[final_hash_pos]) == 0)) \
{ \
mark_hash (plains_buf, d_return_buf, SALT_POS_HOST, DIGESTS_CNT, 0, final_hash_pos, gid, il_pos + 11, 0, 0); \
} \
@ -665,9 +665,9 @@
\
if (((h0).sc == search[0]) && ((h1).sc == search[1]) && ((h2).sc == search[2]) && ((h3).sc == search[3])) \
{ \
const u32 final_hash_pos = DIGESTS_OFFSET_HOST + 0; \
const u32 final_hash_pos = DIGESTS_OFFSET_HOST + 0; \
\
if (vector_accessible (il_pos, IL_CNT, 12) && (hc_atomic_inc (&hashes_shown[final_hash_pos]) == 0)) \
if (vector_accessible (il_pos, IL_CNT, 12) && (hc_atomic_inc (&hashes_shown[final_hash_pos]) == 0)) \
{ \
mark_hash (plains_buf, d_return_buf, SALT_POS_HOST, DIGESTS_CNT, 0, final_hash_pos, gid, il_pos + 12, 0, 0); \
} \
@ -675,9 +675,9 @@
\
if (((h0).sd == search[0]) && ((h1).sd == search[1]) && ((h2).sd == search[2]) && ((h3).sd == search[3])) \
{ \
const u32 final_hash_pos = DIGESTS_OFFSET_HOST + 0; \
const u32 final_hash_pos = DIGESTS_OFFSET_HOST + 0; \
\
if (vector_accessible (il_pos, IL_CNT, 13) && (hc_atomic_inc (&hashes_shown[final_hash_pos]) == 0)) \
if (vector_accessible (il_pos, IL_CNT, 13) && (hc_atomic_inc (&hashes_shown[final_hash_pos]) == 0)) \
{ \
mark_hash (plains_buf, d_return_buf, SALT_POS_HOST, DIGESTS_CNT, 0, final_hash_pos, gid, il_pos + 13, 0, 0); \
} \
@ -685,9 +685,9 @@
\
if (((h0).se == search[0]) && ((h1).se == search[1]) && ((h2).se == search[2]) && ((h3).se == search[3])) \
{ \
const u32 final_hash_pos = DIGESTS_OFFSET_HOST + 0; \
const u32 final_hash_pos = DIGESTS_OFFSET_HOST + 0; \
\
if (vector_accessible (il_pos, IL_CNT, 14) && (hc_atomic_inc (&hashes_shown[final_hash_pos]) == 0)) \
if (vector_accessible (il_pos, IL_CNT, 14) && (hc_atomic_inc (&hashes_shown[final_hash_pos]) == 0)) \
{ \
mark_hash (plains_buf, d_return_buf, SALT_POS_HOST, DIGESTS_CNT, 0, final_hash_pos, gid, il_pos + 14, 0, 0); \
} \
@ -695,9 +695,9 @@
\
if (((h0).sf == search[0]) && ((h1).sf == search[1]) && ((h2).sf == search[2]) && ((h3).sf == search[3])) \
{ \
const u32 final_hash_pos = DIGESTS_OFFSET_HOST + 0; \
const u32 final_hash_pos = DIGESTS_OFFSET_HOST + 0; \
\
if (vector_accessible (il_pos, IL_CNT, 15) && (hc_atomic_inc (&hashes_shown[final_hash_pos]) == 0)) \
if (vector_accessible (il_pos, IL_CNT, 15) && (hc_atomic_inc (&hashes_shown[final_hash_pos]) == 0)) \
{ \
mark_hash (plains_buf, d_return_buf, SALT_POS_HOST, DIGESTS_CNT, 0, final_hash_pos, gid, il_pos + 15, 0, 0); \
} \
@ -730,13 +730,13 @@
BITMAP_SHIFT1, \
BITMAP_SHIFT2)) \
{ \
int digest_pos = find_hash (digest_tp00, DIGESTS_CNT, &digests_buf[DIGESTS_OFFSET_HOST]); \
int digest_pos = find_hash (digest_tp00, DIGESTS_CNT, &digests_buf[DIGESTS_OFFSET_HOST]); \
\
if (digest_pos != -1) \
{ \
const u32 final_hash_pos = DIGESTS_OFFSET_HOST + digest_pos; \
const u32 final_hash_pos = DIGESTS_OFFSET_HOST + digest_pos; \
\
if (vector_accessible (il_pos, IL_CNT, 0) && (hc_atomic_inc (&hashes_shown[final_hash_pos]) == 0)) \
if (vector_accessible (il_pos, IL_CNT, 0) && (hc_atomic_inc (&hashes_shown[final_hash_pos]) == 0)) \
{ \
mark_hash (plains_buf, d_return_buf, SALT_POS_HOST, DIGESTS_CNT, digest_pos, final_hash_pos, gid, il_pos + 0, 0, 0); \
} \
@ -750,13 +750,13 @@
BITMAP_SHIFT1, \
BITMAP_SHIFT2)) \
{ \
int digest_pos = find_hash (digest_tp01, DIGESTS_CNT, &digests_buf[DIGESTS_OFFSET_HOST]); \
int digest_pos = find_hash (digest_tp01, DIGESTS_CNT, &digests_buf[DIGESTS_OFFSET_HOST]); \
\
if (digest_pos != -1) \
{ \
const u32 final_hash_pos = DIGESTS_OFFSET_HOST + digest_pos; \
const u32 final_hash_pos = DIGESTS_OFFSET_HOST + digest_pos; \
\
if (vector_accessible (il_pos, IL_CNT, 1) && (hc_atomic_inc (&hashes_shown[final_hash_pos]) == 0)) \
if (vector_accessible (il_pos, IL_CNT, 1) && (hc_atomic_inc (&hashes_shown[final_hash_pos]) == 0)) \
{ \
mark_hash (plains_buf, d_return_buf, SALT_POS_HOST, DIGESTS_CNT, digest_pos, final_hash_pos, gid, il_pos + 1, 0, 0); \
} \
@ -770,13 +770,13 @@
BITMAP_SHIFT1, \
BITMAP_SHIFT2)) \
{ \
int digest_pos = find_hash (digest_tp02, DIGESTS_CNT, &digests_buf[DIGESTS_OFFSET_HOST]); \
int digest_pos = find_hash (digest_tp02, DIGESTS_CNT, &digests_buf[DIGESTS_OFFSET_HOST]); \
\
if (digest_pos != -1) \
{ \
const u32 final_hash_pos = DIGESTS_OFFSET_HOST + digest_pos; \
const u32 final_hash_pos = DIGESTS_OFFSET_HOST + digest_pos; \
\
if (vector_accessible (il_pos, IL_CNT, 2) && (hc_atomic_inc (&hashes_shown[final_hash_pos]) == 0)) \
if (vector_accessible (il_pos, IL_CNT, 2) && (hc_atomic_inc (&hashes_shown[final_hash_pos]) == 0)) \
{ \
mark_hash (plains_buf, d_return_buf, SALT_POS_HOST, DIGESTS_CNT, digest_pos, final_hash_pos, gid, il_pos + 2, 0, 0); \
} \
@ -790,13 +790,13 @@
BITMAP_SHIFT1, \
BITMAP_SHIFT2)) \
{ \
int digest_pos = find_hash (digest_tp03, DIGESTS_CNT, &digests_buf[DIGESTS_OFFSET_HOST]); \
int digest_pos = find_hash (digest_tp03, DIGESTS_CNT, &digests_buf[DIGESTS_OFFSET_HOST]); \
\
if (digest_pos != -1) \
{ \
const u32 final_hash_pos = DIGESTS_OFFSET_HOST + digest_pos; \
const u32 final_hash_pos = DIGESTS_OFFSET_HOST + digest_pos; \
\
if (vector_accessible (il_pos, IL_CNT, 3) && (hc_atomic_inc (&hashes_shown[final_hash_pos]) == 0)) \
if (vector_accessible (il_pos, IL_CNT, 3) && (hc_atomic_inc (&hashes_shown[final_hash_pos]) == 0)) \
{ \
mark_hash (plains_buf, d_return_buf, SALT_POS_HOST, DIGESTS_CNT, digest_pos, final_hash_pos, gid, il_pos + 3, 0, 0); \
} \
@ -810,13 +810,13 @@
BITMAP_SHIFT1, \
BITMAP_SHIFT2)) \
{ \
int digest_pos = find_hash (digest_tp04, DIGESTS_CNT, &digests_buf[DIGESTS_OFFSET_HOST]); \
int digest_pos = find_hash (digest_tp04, DIGESTS_CNT, &digests_buf[DIGESTS_OFFSET_HOST]); \
\
if (digest_pos != -1) \
{ \
const u32 final_hash_pos = DIGESTS_OFFSET_HOST + digest_pos; \
const u32 final_hash_pos = DIGESTS_OFFSET_HOST + digest_pos; \
\
if (vector_accessible (il_pos, IL_CNT, 4) && (hc_atomic_inc (&hashes_shown[final_hash_pos]) == 0)) \
if (vector_accessible (il_pos, IL_CNT, 4) && (hc_atomic_inc (&hashes_shown[final_hash_pos]) == 0)) \
{ \
mark_hash (plains_buf, d_return_buf, SALT_POS_HOST, DIGESTS_CNT, digest_pos, final_hash_pos, gid, il_pos + 4, 0, 0); \
} \
@ -830,13 +830,13 @@
BITMAP_SHIFT1, \
BITMAP_SHIFT2)) \
{ \
int digest_pos = find_hash (digest_tp05, DIGESTS_CNT, &digests_buf[DIGESTS_OFFSET_HOST]); \
int digest_pos = find_hash (digest_tp05, DIGESTS_CNT, &digests_buf[DIGESTS_OFFSET_HOST]); \
\
if (digest_pos != -1) \
{ \
const u32 final_hash_pos = DIGESTS_OFFSET_HOST + digest_pos; \
const u32 final_hash_pos = DIGESTS_OFFSET_HOST + digest_pos; \
\
if (vector_accessible (il_pos, IL_CNT, 5) && (hc_atomic_inc (&hashes_shown[final_hash_pos]) == 0)) \
if (vector_accessible (il_pos, IL_CNT, 5) && (hc_atomic_inc (&hashes_shown[final_hash_pos]) == 0)) \
{ \
mark_hash (plains_buf, d_return_buf, SALT_POS_HOST, DIGESTS_CNT, digest_pos, final_hash_pos, gid, il_pos + 5, 0, 0); \
} \
@ -850,13 +850,13 @@
BITMAP_SHIFT1, \
BITMAP_SHIFT2)) \
{ \
int digest_pos = find_hash (digest_tp06, DIGESTS_CNT, &digests_buf[DIGESTS_OFFSET_HOST]); \
int digest_pos = find_hash (digest_tp06, DIGESTS_CNT, &digests_buf[DIGESTS_OFFSET_HOST]); \
\
if (digest_pos != -1) \
{ \
const u32 final_hash_pos = DIGESTS_OFFSET_HOST + digest_pos; \
const u32 final_hash_pos = DIGESTS_OFFSET_HOST + digest_pos; \
\
if (vector_accessible (il_pos, IL_CNT, 6) && (hc_atomic_inc (&hashes_shown[final_hash_pos]) == 0)) \
if (vector_accessible (il_pos, IL_CNT, 6) && (hc_atomic_inc (&hashes_shown[final_hash_pos]) == 0)) \
{ \
mark_hash (plains_buf, d_return_buf, SALT_POS_HOST, DIGESTS_CNT, digest_pos, final_hash_pos, gid, il_pos + 6, 0, 0); \
} \
@ -870,13 +870,13 @@
BITMAP_SHIFT1, \
BITMAP_SHIFT2)) \
{ \
int digest_pos = find_hash (digest_tp07, DIGESTS_CNT, &digests_buf[DIGESTS_OFFSET_HOST]); \
int digest_pos = find_hash (digest_tp07, DIGESTS_CNT, &digests_buf[DIGESTS_OFFSET_HOST]); \
\
if (digest_pos != -1) \
{ \
const u32 final_hash_pos = DIGESTS_OFFSET_HOST + digest_pos; \
const u32 final_hash_pos = DIGESTS_OFFSET_HOST + digest_pos; \
\
if (vector_accessible (il_pos, IL_CNT, 7) && (hc_atomic_inc (&hashes_shown[final_hash_pos]) == 0)) \
if (vector_accessible (il_pos, IL_CNT, 7) && (hc_atomic_inc (&hashes_shown[final_hash_pos]) == 0)) \
{ \
mark_hash (plains_buf, d_return_buf, SALT_POS_HOST, DIGESTS_CNT, digest_pos, final_hash_pos, gid, il_pos + 7, 0, 0); \
} \
@ -890,13 +890,13 @@
BITMAP_SHIFT1, \
BITMAP_SHIFT2)) \
{ \
int digest_pos = find_hash (digest_tp08, DIGESTS_CNT, &digests_buf[DIGESTS_OFFSET_HOST]); \
int digest_pos = find_hash (digest_tp08, DIGESTS_CNT, &digests_buf[DIGESTS_OFFSET_HOST]); \
\
if (digest_pos != -1) \
{ \
const u32 final_hash_pos = DIGESTS_OFFSET_HOST + digest_pos; \
const u32 final_hash_pos = DIGESTS_OFFSET_HOST + digest_pos; \
\
if (vector_accessible (il_pos, IL_CNT, 8) && (hc_atomic_inc (&hashes_shown[final_hash_pos]) == 0)) \
if (vector_accessible (il_pos, IL_CNT, 8) && (hc_atomic_inc (&hashes_shown[final_hash_pos]) == 0)) \
{ \
mark_hash (plains_buf, d_return_buf, SALT_POS_HOST, DIGESTS_CNT, digest_pos, final_hash_pos, gid, il_pos + 8, 0, 0); \
} \
@ -910,13 +910,13 @@
BITMAP_SHIFT1, \
BITMAP_SHIFT2)) \
{ \
int digest_pos = find_hash (digest_tp09, DIGESTS_CNT, &digests_buf[DIGESTS_OFFSET_HOST]); \
int digest_pos = find_hash (digest_tp09, DIGESTS_CNT, &digests_buf[DIGESTS_OFFSET_HOST]); \
\
if (digest_pos != -1) \
{ \
const u32 final_hash_pos = DIGESTS_OFFSET_HOST + digest_pos; \
const u32 final_hash_pos = DIGESTS_OFFSET_HOST + digest_pos; \
\
if (vector_accessible (il_pos, IL_CNT, 9) && (hc_atomic_inc (&hashes_shown[final_hash_pos]) == 0)) \
if (vector_accessible (il_pos, IL_CNT, 9) && (hc_atomic_inc (&hashes_shown[final_hash_pos]) == 0)) \
{ \
mark_hash (plains_buf, d_return_buf, SALT_POS_HOST, DIGESTS_CNT, digest_pos, final_hash_pos, gid, il_pos + 9, 0, 0); \
} \
@ -930,13 +930,13 @@
BITMAP_SHIFT1, \
BITMAP_SHIFT2)) \
{ \
int digest_pos = find_hash (digest_tp10, DIGESTS_CNT, &digests_buf[DIGESTS_OFFSET_HOST]); \
int digest_pos = find_hash (digest_tp10, DIGESTS_CNT, &digests_buf[DIGESTS_OFFSET_HOST]); \
\
if (digest_pos != -1) \
{ \
const u32 final_hash_pos = DIGESTS_OFFSET_HOST + digest_pos; \
const u32 final_hash_pos = DIGESTS_OFFSET_HOST + digest_pos; \
\
if (vector_accessible (il_pos, IL_CNT, 10) && (hc_atomic_inc (&hashes_shown[final_hash_pos]) == 0)) \
if (vector_accessible (il_pos, IL_CNT, 10) && (hc_atomic_inc (&hashes_shown[final_hash_pos]) == 0)) \
{ \
mark_hash (plains_buf, d_return_buf, SALT_POS_HOST, DIGESTS_CNT, digest_pos, final_hash_pos, gid, il_pos + 10, 0, 0); \
} \
@ -950,13 +950,13 @@
BITMAP_SHIFT1, \
BITMAP_SHIFT2)) \
{ \
int digest_pos = find_hash (digest_tp11, DIGESTS_CNT, &digests_buf[DIGESTS_OFFSET_HOST]); \
int digest_pos = find_hash (digest_tp11, DIGESTS_CNT, &digests_buf[DIGESTS_OFFSET_HOST]); \
\
if (digest_pos != -1) \
{ \
const u32 final_hash_pos = DIGESTS_OFFSET_HOST + digest_pos; \
const u32 final_hash_pos = DIGESTS_OFFSET_HOST + digest_pos; \
\
if (vector_accessible (il_pos, IL_CNT, 11) && (hc_atomic_inc (&hashes_shown[final_hash_pos]) == 0)) \
if (vector_accessible (il_pos, IL_CNT, 11) && (hc_atomic_inc (&hashes_shown[final_hash_pos]) == 0)) \
{ \
mark_hash (plains_buf, d_return_buf, SALT_POS_HOST, DIGESTS_CNT, digest_pos, final_hash_pos, gid, il_pos + 11, 0, 0); \
} \
@ -970,13 +970,13 @@
BITMAP_SHIFT1, \
BITMAP_SHIFT2)) \
{ \
int digest_pos = find_hash (digest_tp12, DIGESTS_CNT, &digests_buf[DIGESTS_OFFSET_HOST]); \
int digest_pos = find_hash (digest_tp12, DIGESTS_CNT, &digests_buf[DIGESTS_OFFSET_HOST]); \
\
if (digest_pos != -1) \
{ \
const u32 final_hash_pos = DIGESTS_OFFSET_HOST + digest_pos; \
const u32 final_hash_pos = DIGESTS_OFFSET_HOST + digest_pos; \
\
if (vector_accessible (il_pos, IL_CNT, 12) && (hc_atomic_inc (&hashes_shown[final_hash_pos]) == 0)) \
if (vector_accessible (il_pos, IL_CNT, 12) && (hc_atomic_inc (&hashes_shown[final_hash_pos]) == 0)) \
{ \
mark_hash (plains_buf, d_return_buf, SALT_POS_HOST, DIGESTS_CNT, digest_pos, final_hash_pos, gid, il_pos + 12, 0, 0); \
} \
@ -990,13 +990,13 @@
BITMAP_SHIFT1, \
BITMAP_SHIFT2)) \
{ \
int digest_pos = find_hash (digest_tp13, DIGESTS_CNT, &digests_buf[DIGESTS_OFFSET_HOST]); \
int digest_pos = find_hash (digest_tp13, DIGESTS_CNT, &digests_buf[DIGESTS_OFFSET_HOST]); \
\
if (digest_pos != -1) \
{ \
const u32 final_hash_pos = DIGESTS_OFFSET_HOST + digest_pos; \
const u32 final_hash_pos = DIGESTS_OFFSET_HOST + digest_pos; \
\
if (vector_accessible (il_pos, IL_CNT, 13) && (hc_atomic_inc (&hashes_shown[final_hash_pos]) == 0)) \
if (vector_accessible (il_pos, IL_CNT, 13) && (hc_atomic_inc (&hashes_shown[final_hash_pos]) == 0)) \
{ \
mark_hash (plains_buf, d_return_buf, SALT_POS_HOST, DIGESTS_CNT, digest_pos, final_hash_pos, gid, il_pos + 13, 0, 0); \
} \
@ -1010,13 +1010,13 @@
BITMAP_SHIFT1, \
BITMAP_SHIFT2)) \
{ \
int digest_pos = find_hash (digest_tp14, DIGESTS_CNT, &digests_buf[DIGESTS_OFFSET_HOST]); \
int digest_pos = find_hash (digest_tp14, DIGESTS_CNT, &digests_buf[DIGESTS_OFFSET_HOST]); \
\
if (digest_pos != -1) \
{ \
const u32 final_hash_pos = DIGESTS_OFFSET_HOST + digest_pos; \
const u32 final_hash_pos = DIGESTS_OFFSET_HOST + digest_pos; \
\
if (vector_accessible (il_pos, IL_CNT, 14) && (hc_atomic_inc (&hashes_shown[final_hash_pos]) == 0)) \
if (vector_accessible (il_pos, IL_CNT, 14) && (hc_atomic_inc (&hashes_shown[final_hash_pos]) == 0)) \
{ \
mark_hash (plains_buf, d_return_buf, SALT_POS_HOST, DIGESTS_CNT, digest_pos, final_hash_pos, gid, il_pos + 14, 0, 0); \
} \
@ -1030,13 +1030,13 @@
BITMAP_SHIFT1, \
BITMAP_SHIFT2)) \
{ \
int digest_pos = find_hash (digest_tp15, DIGESTS_CNT, &digests_buf[DIGESTS_OFFSET_HOST]); \
int digest_pos = find_hash (digest_tp15, DIGESTS_CNT, &digests_buf[DIGESTS_OFFSET_HOST]); \
\
if (digest_pos != -1) \
{ \
const u32 final_hash_pos = DIGESTS_OFFSET_HOST + digest_pos; \
const u32 final_hash_pos = DIGESTS_OFFSET_HOST + digest_pos; \
\
if (vector_accessible (il_pos, IL_CNT, 15) && (hc_atomic_inc (&hashes_shown[final_hash_pos]) == 0)) \
if (vector_accessible (il_pos, IL_CNT, 15) && (hc_atomic_inc (&hashes_shown[final_hash_pos]) == 0)) \
{ \
mark_hash (plains_buf, d_return_buf, SALT_POS_HOST, DIGESTS_CNT, digest_pos, final_hash_pos, gid, il_pos + 15, 0, 0); \
} \
@ -1162,21 +1162,21 @@
#elif VECT_SIZE == 2
#define VECTOR_ELEMENT(v, n) (n == 0 ? (v).s0 : (v).s1)
#elif VECT_SIZE == 4
#define VECTOR_ELEMENT(v, n) \
(n < 2 ? \
(n == 0 ? (v).s0 : (v).s1) : \
(n == 2 ? (v).s2 : (v).s3) \
#define VECTOR_ELEMENT(v, n) \
(n < 2 ? \
(n == 0 ? (v).s0 : (v).s1) : \
(n == 2 ? (v).s2 : (v).s3) \
)
#elif VECT_SIZE == 8
#define VECTOR_ELEMENT(v, n) \
(n < 4 ? \
(n < 2 ? \
(n == 0 ? (v).s0 : (v).s1) : \
(n == 2 ? (v).s2 : (v).s3) \
) : (n < 6 ? \
(n == 4 ? (v).s4 : (v).s5) : \
(n == 6 ? (v).s6 : (v).s7) \
) \
#define VECTOR_ELEMENT(v, n) \
(n < 4 ? \
(n < 2 ? \
(n == 0 ? (v).s0 : (v).s1) : \
(n == 2 ? (v).s2 : (v).s3) \
) : (n < 6 ? \
(n == 4 ? (v).s4 : (v).s5) : \
(n == 6 ? (v).s6 : (v).s7) \
) \
)
#elif VECT_SIZE == 16
#define VECTOR_ELEMENT(v, n) \

@ -109,6 +109,7 @@ typedef u64 u64x;
#define make_u64x (u64)
#else
#if defined IS_CUDA || defined IS_HIP
#if VECT_SIZE == 2
@ -871,10 +872,17 @@ typedef VTYPE(ushort, VECT_SIZE) u16x;
typedef VTYPE(uint, VECT_SIZE) u32x;
typedef VTYPE(ullong, VECT_SIZE) u64x;
#ifndef IS_METAL
#define make_u8x (u8x)
#define make_u16x (u16x)
#define make_u32x (u32x)
#define make_u64x (u64x)
#else
#define make_u8x u8x
#define make_u16x u16x
#define make_u32x u32x
#define make_u64x u64x
#endif
#endif
#endif

@ -197,4 +197,14 @@ using namespace metal;
//#define USE_SWIZZLE
#endif
#ifdef IS_METAL
#define USE_ROTATE
// Metal support max VECT_SIZE = 4
#define s0 x
#define s1 y
#define s2 z
#define s3 w
#endif
#endif // _INC_VENDOR_H

@ -184,11 +184,11 @@ KERNEL_FQ void m00300_m04 (KERN_ATTR_RULES ())
we_t = hc_rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F1, c, d, e, a, b, we_t);
wf_t = hc_rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F1, b, c, d, e, a, wf_t);
a += SHA1M_A;
b += SHA1M_B;
c += SHA1M_C;
d += SHA1M_D;
e += SHA1M_E;
a += make_u32x (SHA1M_A);
b += make_u32x (SHA1M_B);
c += make_u32x (SHA1M_C);
d += make_u32x (SHA1M_D);
e += make_u32x (SHA1M_E);
w0_t = a;
w1_t = b;
@ -507,11 +507,11 @@ KERNEL_FQ void m00300_s04 (KERN_ATTR_RULES ())
we_t = hc_rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F1, c, d, e, a, b, we_t);
wf_t = hc_rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F1, b, c, d, e, a, wf_t);
a += SHA1M_A;
b += SHA1M_B;
c += SHA1M_C;
d += SHA1M_D;
e += SHA1M_E;
a += make_u32x (SHA1M_A);
b += make_u32x (SHA1M_B);
c += make_u32x (SHA1M_C);
d += make_u32x (SHA1M_D);
e += make_u32x (SHA1M_E);
w0_t = a;
w1_t = b;

@ -240,11 +240,11 @@ KERNEL_FQ void m00300_m04 (KERN_ATTR_BASIC ())
we_t = hc_rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F1, c, d, e, a, b, we_t);
wf_t = hc_rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F1, b, c, d, e, a, wf_t);
a += SHA1M_A;
b += SHA1M_B;
c += SHA1M_C;
d += SHA1M_D;
e += SHA1M_E;
a += make_u32x (SHA1M_A);
b += make_u32x (SHA1M_B);
c += make_u32x (SHA1M_C);
d += make_u32x (SHA1M_D);
e += make_u32x (SHA1M_E);
w0_t = a;
w1_t = b;
@ -621,11 +621,11 @@ KERNEL_FQ void m00300_s04 (KERN_ATTR_BASIC ())
we_t = hc_rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F1, c, d, e, a, b, we_t);
wf_t = hc_rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F1, b, c, d, e, a, wf_t);
a += SHA1M_A;
b += SHA1M_B;
c += SHA1M_C;
d += SHA1M_D;
e += SHA1M_E;
a += make_u32x (SHA1M_A);
b += make_u32x (SHA1M_B);
c += make_u32x (SHA1M_C);
d += make_u32x (SHA1M_D);
e += make_u32x (SHA1M_E);
w0_t = a;
w1_t = b;

@ -251,11 +251,11 @@ DECLSPEC void m00300m (PRIVATE_AS u32 *w, const u32 pw_len, KERN_ATTR_FUNC_VECTO
SHA1_STEP (SHA1_F1 , c, d, e, a, b, (c_78s ^ w0s07 ^ w0s08 ^ w0s15 ^ w0s18 ^ w0s20));
SHA1_STEP (SHA1_F1 , b, c, d, e, a, (c_79s ^ w0s08 ^ w0s22));
a += SHA1M_A;
b += SHA1M_B;
c += SHA1M_C;
d += SHA1M_D;
e += SHA1M_E;
a += make_u32x (SHA1M_A);
b += make_u32x (SHA1M_B);
c += make_u32x (SHA1M_C);
d += make_u32x (SHA1M_D);
e += make_u32x (SHA1M_E);
u32x w0_t = a;
u32x w1_t = b;
@ -635,11 +635,11 @@ DECLSPEC void m00300s (PRIVATE_AS u32 *w, const u32 pw_len, KERN_ATTR_FUNC_VECTO
SHA1_STEP (SHA1_F1 , c, d, e, a, b, (c_78s ^ w0s07 ^ w0s08 ^ w0s15 ^ w0s18 ^ w0s20));
SHA1_STEP (SHA1_F1 , b, c, d, e, a, (c_79s ^ w0s08 ^ w0s22));
a += SHA1M_A;
b += SHA1M_B;
c += SHA1M_C;
d += SHA1M_D;
e += SHA1M_E;
a += make_u32x (SHA1M_A);
b += make_u32x (SHA1M_B);
c += make_u32x (SHA1M_C);
d += make_u32x (SHA1M_D);
e += make_u32x (SHA1M_E);
u32x w0_t = a;
u32x w1_t = b;

@ -145,10 +145,10 @@ KERNEL_FQ void m01100_m04 (KERN_ATTR_RULES ())
MD4_STEP (MD4_H , c, d, a, b, w1[3], MD4C02, MD4S22);
MD4_STEP (MD4_H , b, c, d, a, w3[3], MD4C02, MD4S23);
a += MD4M_A;
b += MD4M_B;
c += MD4M_C;
d += MD4M_D;
a += make_u32x (MD4M_A);
b += make_u32x (MD4M_B);
c += make_u32x (MD4M_C);
d += make_u32x (MD4M_D);
w0[0] = a;
w0[1] = b;
@ -376,10 +376,10 @@ KERNEL_FQ void m01100_s04 (KERN_ATTR_RULES ())
MD4_STEP (MD4_H , c, d, a, b, w1[3], MD4C02, MD4S22);
MD4_STEP (MD4_H , b, c, d, a, w3[3], MD4C02, MD4S23);
a += MD4M_A;
b += MD4M_B;
c += MD4M_C;
d += MD4M_D;
a += make_u32x (MD4M_A);
b += make_u32x (MD4M_B);
c += make_u32x (MD4M_C);
d += make_u32x (MD4M_D);
w0[0] = a;
w0[1] = b;

@ -205,10 +205,10 @@ KERNEL_FQ void m01100_m04 (KERN_ATTR_BASIC ())
MD4_STEP (MD4_H , c, d, a, b, w1[3], MD4C02, MD4S22);
MD4_STEP (MD4_H , b, c, d, a, w3[3], MD4C02, MD4S23);
a += MD4M_A;
b += MD4M_B;
c += MD4M_C;
d += MD4M_D;
a += make_u32x (MD4M_A);
b += make_u32x (MD4M_B);
c += make_u32x (MD4M_C);
d += make_u32x (MD4M_D);
w0[0] = a;
w0[1] = b;
@ -498,10 +498,10 @@ KERNEL_FQ void m01100_s04 (KERN_ATTR_BASIC ())
MD4_STEP (MD4_H , c, d, a, b, w1[3], MD4C02, MD4S22);
MD4_STEP (MD4_H , b, c, d, a, w3[3], MD4C02, MD4S23);
a += MD4M_A;
b += MD4M_B;
c += MD4M_C;
d += MD4M_D;
a += make_u32x (MD4M_A);
b += make_u32x (MD4M_B);
c += make_u32x (MD4M_C);
d += make_u32x (MD4M_D);
w0[0] = a;
w0[1] = b;

@ -159,10 +159,10 @@ DECLSPEC void m01100m (LOCAL_AS salt_t *s_salt_buf, PRIVATE_AS u32 *w, const u32
MD4_STEP0(MD4_H , c, d, a, b, H_w7c02, MD4S22);
MD4_STEP0(MD4_H , b, c, d, a, H_wfc02, MD4S23);
a += MD4M_A;
b += MD4M_B;
c += MD4M_C;
d += MD4M_D;
a += make_u32x (MD4M_A);
b += make_u32x (MD4M_B);
c += make_u32x (MD4M_C);
d += make_u32x (MD4M_D);
u32x w0_t[4];
u32x w1_t[4];
@ -403,10 +403,10 @@ DECLSPEC void m01100s (LOCAL_AS salt_t *s_salt_buf, PRIVATE_AS u32 *w, const u32
MD4_STEP0(MD4_H , c, d, a, b, H_w7c02, MD4S22);
MD4_STEP0(MD4_H , b, c, d, a, H_wfc02, MD4S23);
a += MD4M_A;
b += MD4M_B;
c += MD4M_C;
d += MD4M_D;
a += make_u32x (MD4M_A);
b += make_u32x (MD4M_B);
c += make_u32x (MD4M_C);
d += make_u32x (MD4M_D);
u32x w0_t[4];
u32x w1_t[4];

@ -196,10 +196,10 @@ KERNEL_FQ void m02610_m04 (KERN_ATTR_RULES ())
MD5_STEP (MD5_I , c, d, a, b, w0[2], MD5C3e, MD5S32);
MD5_STEP (MD5_I , b, c, d, a, w2[1], MD5C3f, MD5S33);
a += MD5M_A;
b += MD5M_B;
c += MD5M_C;
d += MD5M_D;
a += make_u32x (MD5M_A);
b += make_u32x (MD5M_B);
c += make_u32x (MD5M_C);
d += make_u32x (MD5M_D);
w0[0] = uint_to_hex_lower8 ((a >> 0) & 255) << 0
| uint_to_hex_lower8 ((a >> 8) & 255) << 16;
@ -492,10 +492,10 @@ KERNEL_FQ void m02610_s04 (KERN_ATTR_RULES ())
MD5_STEP (MD5_I , c, d, a, b, w0[2], MD5C3e, MD5S32);
MD5_STEP (MD5_I , b, c, d, a, w2[1], MD5C3f, MD5S33);
a += MD5M_A;
b += MD5M_B;
c += MD5M_C;
d += MD5M_D;
a += make_u32x (MD5M_A);
b += make_u32x (MD5M_B);
c += make_u32x (MD5M_C);
d += make_u32x (MD5M_D);
w0[0] = uint_to_hex_lower8 ((a >> 0) & 255) << 0
| uint_to_hex_lower8 ((a >> 8) & 255) << 16;

@ -253,10 +253,10 @@ KERNEL_FQ void m02610_m04 (KERN_ATTR_BASIC ())
MD5_STEP (MD5_I , c, d, a, b, w0[2], MD5C3e, MD5S32);
MD5_STEP (MD5_I , b, c, d, a, w2[1], MD5C3f, MD5S33);
a += MD5M_A;
b += MD5M_B;
c += MD5M_C;
d += MD5M_D;
a += make_u32x (MD5M_A);
b += make_u32x (MD5M_B);
c += make_u32x (MD5M_C);
d += make_u32x (MD5M_D);
w0[0] = uint_to_hex_lower8 ((a >> 0) & 255) << 0
| uint_to_hex_lower8 ((a >> 8) & 255) << 16;
@ -608,10 +608,10 @@ KERNEL_FQ void m02610_s04 (KERN_ATTR_BASIC ())
MD5_STEP (MD5_I , c, d, a, b, w0[2], MD5C3e, MD5S32);
MD5_STEP (MD5_I , b, c, d, a, w2[1], MD5C3f, MD5S33);
a += MD5M_A;
b += MD5M_B;
c += MD5M_C;
d += MD5M_D;
a += make_u32x (MD5M_A);
b += make_u32x (MD5M_B);
c += make_u32x (MD5M_C);
d += make_u32x (MD5M_D);
w0[0] = uint_to_hex_lower8 ((a >> 0) & 255) << 0
| uint_to_hex_lower8 ((a >> 8) & 255) << 16;

@ -173,10 +173,10 @@ DECLSPEC void m02610m (PRIVATE_AS u32 *w0, PRIVATE_AS u32 *w1, PRIVATE_AS u32 *w
MD5_STEP (MD5_I , c, d, a, b, w0_t[2], MD5C3e, MD5S32);
MD5_STEP (MD5_I , b, c, d, a, w2_t[1], MD5C3f, MD5S33);
a += MD5M_A;
b += MD5M_B;
c += MD5M_C;
d += MD5M_D;
a += make_u32x (MD5M_A);
b += make_u32x (MD5M_B);
c += make_u32x (MD5M_C);
d += make_u32x (MD5M_D);
w0_t[0] = uint_to_hex_lower8 ((a >> 0) & 255) << 0
| uint_to_hex_lower8 ((a >> 8) & 255) << 16;
@ -440,10 +440,10 @@ DECLSPEC void m02610s (PRIVATE_AS u32 *w0, PRIVATE_AS u32 *w1, PRIVATE_AS u32 *w
MD5_STEP (MD5_I , c, d, a, b, w0_t[2], MD5C3e, MD5S32);
MD5_STEP (MD5_I , b, c, d, a, w2_t[1], MD5C3f, MD5S33);
a += MD5M_A;
b += MD5M_B;
c += MD5M_C;
d += MD5M_D;
a += make_u32x (MD5M_A);
b += make_u32x (MD5M_B);
c += make_u32x (MD5M_C);
d += make_u32x (MD5M_D);
w0_t[0] = uint_to_hex_lower8 ((a >> 0) & 255) << 0
| uint_to_hex_lower8 ((a >> 8) & 255) << 16;

@ -196,10 +196,10 @@ KERNEL_FQ void m02710_m04 (KERN_ATTR_RULES ())
MD5_STEP (MD5_I , c, d, a, b, w0[2], MD5C3e, MD5S32);
MD5_STEP (MD5_I , b, c, d, a, w2[1], MD5C3f, MD5S33);
a += MD5M_A;
b += MD5M_B;
c += MD5M_C;
d += MD5M_D;
a += make_u32x (MD5M_A);
b += make_u32x (MD5M_B);
c += make_u32x (MD5M_C);
d += make_u32x (MD5M_D);
w0[0] = uint_to_hex_lower8 ((a >> 0) & 255) << 0
| uint_to_hex_lower8 ((a >> 8) & 255) << 16;
@ -300,10 +300,10 @@ KERNEL_FQ void m02710_m04 (KERN_ATTR_RULES ())
MD5_STEP (MD5_I , c, d, a, b, w0[2], MD5C3e, MD5S32);
MD5_STEP (MD5_I , b, c, d, a, w2[1], MD5C3f, MD5S33);
const u32x r_a = a + MD5M_A;
const u32x r_b = b + MD5M_B;
const u32x r_c = c + MD5M_C;
const u32x r_d = d + MD5M_D;
const u32x r_a = a + make_u32x (MD5M_A);
const u32x r_b = b + make_u32x (MD5M_B);
const u32x r_c = c + make_u32x (MD5M_C);
const u32x r_d = d + make_u32x (MD5M_D);
const u32x r_14 = (32 + salt_len) * 8;
@ -577,10 +577,10 @@ KERNEL_FQ void m02710_s04 (KERN_ATTR_RULES ())
MD5_STEP (MD5_I , c, d, a, b, w0[2], MD5C3e, MD5S32);
MD5_STEP (MD5_I , b, c, d, a, w2[1], MD5C3f, MD5S33);
a += MD5M_A;
b += MD5M_B;
c += MD5M_C;
d += MD5M_D;
a += make_u32x (MD5M_A);
b += make_u32x (MD5M_B);
c += make_u32x (MD5M_C);
d += make_u32x (MD5M_D);
w0[0] = uint_to_hex_lower8 ((a >> 0) & 255) << 0
| uint_to_hex_lower8 ((a >> 8) & 255) << 16;
@ -681,10 +681,10 @@ KERNEL_FQ void m02710_s04 (KERN_ATTR_RULES ())
MD5_STEP (MD5_I , c, d, a, b, w0[2], MD5C3e, MD5S32);
MD5_STEP (MD5_I , b, c, d, a, w2[1], MD5C3f, MD5S33);
const u32x r_a = a + MD5M_A;
const u32x r_b = b + MD5M_B;
const u32x r_c = c + MD5M_C;
const u32x r_d = d + MD5M_D;
const u32x r_a = a + make_u32x (MD5M_A);
const u32x r_b = b + make_u32x (MD5M_B);
const u32x r_c = c + make_u32x (MD5M_C);
const u32x r_d = d + make_u32x (MD5M_D);
const u32x r_14 = (32 + salt_len) * 8;

@ -253,10 +253,10 @@ KERNEL_FQ void m02710_m04 (KERN_ATTR_BASIC ())
MD5_STEP (MD5_I , c, d, a, b, w0[2], MD5C3e, MD5S32);
MD5_STEP (MD5_I , b, c, d, a, w2[1], MD5C3f, MD5S33);
a += MD5M_A;
b += MD5M_B;
c += MD5M_C;
d += MD5M_D;
a += make_u32x (MD5M_A);
b += make_u32x (MD5M_B);
c += make_u32x (MD5M_C);
d += make_u32x (MD5M_D);
w0[0] = uint_to_hex_lower8 ((a >> 0) & 255) << 0
| uint_to_hex_lower8 ((a >> 8) & 255) << 16;
@ -357,10 +357,10 @@ KERNEL_FQ void m02710_m04 (KERN_ATTR_BASIC ())
MD5_STEP (MD5_I , c, d, a, b, w0[2], MD5C3e, MD5S32);
MD5_STEP (MD5_I , b, c, d, a, w2[1], MD5C3f, MD5S33);
const u32x r_a = a + MD5M_A;
const u32x r_b = b + MD5M_B;
const u32x r_c = c + MD5M_C;
const u32x r_d = d + MD5M_D;
const u32x r_a = a + make_u32x (MD5M_A);
const u32x r_b = b + make_u32x (MD5M_B);
const u32x r_c = c + make_u32x (MD5M_C);
const u32x r_d = d + make_u32x (MD5M_D);
const u32x r_14 = (32 + salt_len) * 8;
@ -693,10 +693,10 @@ KERNEL_FQ void m02710_s04 (KERN_ATTR_BASIC ())
MD5_STEP (MD5_I , c, d, a, b, w0[2], MD5C3e, MD5S32);
MD5_STEP (MD5_I , b, c, d, a, w2[1], MD5C3f, MD5S33);
a += MD5M_A;
b += MD5M_B;
c += MD5M_C;
d += MD5M_D;
a += make_u32x (MD5M_A);
b += make_u32x (MD5M_B);
c += make_u32x (MD5M_C);
d += make_u32x (MD5M_D);
w0[0] = uint_to_hex_lower8 ((a >> 0) & 255) << 0
| uint_to_hex_lower8 ((a >> 8) & 255) << 16;
@ -797,10 +797,10 @@ KERNEL_FQ void m02710_s04 (KERN_ATTR_BASIC ())
MD5_STEP (MD5_I , c, d, a, b, w0[2], MD5C3e, MD5S32);
MD5_STEP (MD5_I , b, c, d, a, w2[1], MD5C3f, MD5S33);
const u32x r_a = a + MD5M_A;
const u32x r_b = b + MD5M_B;
const u32x r_c = c + MD5M_C;
const u32x r_d = d + MD5M_D;
const u32x r_a = a + make_u32x (MD5M_A);
const u32x r_b = b + make_u32x (MD5M_B);
const u32x r_c = c + make_u32x (MD5M_C);
const u32x r_d = d + make_u32x (MD5M_D);
const u32x r_14 = (32 + salt_len) * 8;

@ -173,10 +173,10 @@ DECLSPEC void m02710m (PRIVATE_AS u32 *w0, PRIVATE_AS u32 *w1, PRIVATE_AS u32 *w
MD5_STEP (MD5_I , c, d, a, b, w0_t[2], MD5C3e, MD5S32);
MD5_STEP (MD5_I , b, c, d, a, w2_t[1], MD5C3f, MD5S33);
a += MD5M_A;
b += MD5M_B;
c += MD5M_C;
d += MD5M_D;
a += make_u32x (MD5M_A);
b += make_u32x (MD5M_B);
c += make_u32x (MD5M_C);
d += make_u32x (MD5M_D);
w0_t[0] = uint_to_hex_lower8 ((a >> 0) & 255) << 0
| uint_to_hex_lower8 ((a >> 8) & 255) << 16;
@ -277,10 +277,10 @@ DECLSPEC void m02710m (PRIVATE_AS u32 *w0, PRIVATE_AS u32 *w1, PRIVATE_AS u32 *w
MD5_STEP (MD5_I , c, d, a, b, w0_t[2], MD5C3e, MD5S32);
MD5_STEP (MD5_I , b, c, d, a, w2_t[1], MD5C3f, MD5S33);
const u32x r_a = a + MD5M_A;
const u32x r_b = b + MD5M_B;
const u32x r_c = c + MD5M_C;
const u32x r_d = d + MD5M_D;
const u32x r_a = a + make_u32x (MD5M_A);
const u32x r_b = b + make_u32x (MD5M_B);
const u32x r_c = c + make_u32x (MD5M_C);
const u32x r_d = d + make_u32x (MD5M_D);
const u32x r_14 = (32 + salt_len) * 8;
@ -525,10 +525,10 @@ DECLSPEC void m02710s (PRIVATE_AS u32 *w0, PRIVATE_AS u32 *w1, PRIVATE_AS u32 *w
MD5_STEP (MD5_I , c, d, a, b, w0_t[2], MD5C3e, MD5S32);
MD5_STEP (MD5_I , b, c, d, a, w2_t[1], MD5C3f, MD5S33);
a += MD5M_A;
b += MD5M_B;
c += MD5M_C;
d += MD5M_D;
a += make_u32x (MD5M_A);
b += make_u32x (MD5M_B);
c += make_u32x (MD5M_C);
d += make_u32x (MD5M_D);
w0_t[0] = uint_to_hex_lower8 ((a >> 0) & 255) << 0
| uint_to_hex_lower8 ((a >> 8) & 255) << 16;
@ -629,10 +629,10 @@ DECLSPEC void m02710s (PRIVATE_AS u32 *w0, PRIVATE_AS u32 *w1, PRIVATE_AS u32 *w
MD5_STEP (MD5_I , c, d, a, b, w0_t[2], MD5C3e, MD5S32);
MD5_STEP (MD5_I , b, c, d, a, w2_t[1], MD5C3f, MD5S33);
const u32x r_a = a + MD5M_A;
const u32x r_b = b + MD5M_B;
const u32x r_c = c + MD5M_C;
const u32x r_d = d + MD5M_D;
const u32x r_a = a + make_u32x (MD5M_A);
const u32x r_b = b + make_u32x (MD5M_B);
const u32x r_c = c + make_u32x (MD5M_C);
const u32x r_d = d + make_u32x (MD5M_D);
const u32x r_14 = (32 + salt_len) * 8;

@ -194,10 +194,10 @@ KERNEL_FQ void m02810_m04 (KERN_ATTR_RULES ())
MD5_STEP (MD5_I , c, d, a, b, w0[2], MD5C3e, MD5S32);
MD5_STEP (MD5_I , b, c, d, a, w2[1], MD5C3f, MD5S33);
a += MD5M_A;
b += MD5M_B;
c += MD5M_C;
d += MD5M_D;
a += make_u32x (MD5M_A);
b += make_u32x (MD5M_B);
c += make_u32x (MD5M_C);
d += make_u32x (MD5M_D);
w0[0] = salt_buf0[0];
w0[1] = salt_buf0[1];
@ -298,10 +298,10 @@ KERNEL_FQ void m02810_m04 (KERN_ATTR_RULES ())
MD5_STEP (MD5_I , c, d, a, b, w0[2], MD5C3e, MD5S32);
MD5_STEP (MD5_I , b, c, d, a, w2[1], MD5C3f, MD5S33);
const u32x r_a = a + MD5M_A;
const u32x r_b = b + MD5M_B;
const u32x r_c = c + MD5M_C;
const u32x r_d = d + MD5M_D;
const u32x r_a = a + make_u32x (MD5M_A);
const u32x r_b = b + make_u32x (MD5M_B);
const u32x r_c = c + make_u32x (MD5M_C);
const u32x r_d = d + make_u32x (MD5M_D);
const u32x r_00 = 0x80;
const u32x r_14 = 64 * 8;
@ -574,10 +574,10 @@ KERNEL_FQ void m02810_s04 (KERN_ATTR_RULES ())
MD5_STEP (MD5_I , c, d, a, b, w0[2], MD5C3e, MD5S32);
MD5_STEP (MD5_I , b, c, d, a, w2[1], MD5C3f, MD5S33);
a += MD5M_A;
b += MD5M_B;
c += MD5M_C;
d += MD5M_D;
a += make_u32x (MD5M_A);
b += make_u32x (MD5M_B);
c += make_u32x (MD5M_C);
d += make_u32x (MD5M_D);
w0[0] = salt_buf0[0];
w0[1] = salt_buf0[1];
@ -678,10 +678,10 @@ KERNEL_FQ void m02810_s04 (KERN_ATTR_RULES ())
MD5_STEP (MD5_I , c, d, a, b, w0[2], MD5C3e, MD5S32);
MD5_STEP (MD5_I , b, c, d, a, w2[1], MD5C3f, MD5S33);
const u32x r_a = a + MD5M_A;
const u32x r_b = b + MD5M_B;
const u32x r_c = c + MD5M_C;
const u32x r_d = d + MD5M_D;
const u32x r_a = a + make_u32x (MD5M_A);
const u32x r_b = b + make_u32x (MD5M_B);
const u32x r_c = c + make_u32x (MD5M_C);
const u32x r_d = d + make_u32x (MD5M_D);
const u32x r_00 = 0x80;
const u32x r_14 = 64 * 8;

@ -251,10 +251,10 @@ KERNEL_FQ void m02810_m04 (KERN_ATTR_BASIC ())
MD5_STEP (MD5_I , c, d, a, b, w0[2], MD5C3e, MD5S32);
MD5_STEP (MD5_I , b, c, d, a, w2[1], MD5C3f, MD5S33);
a += MD5M_A;
b += MD5M_B;
c += MD5M_C;
d += MD5M_D;
a += make_u32x (MD5M_A);
b += make_u32x (MD5M_B);
c += make_u32x (MD5M_C);
d += make_u32x (MD5M_D);
w0[0] = salt_buf0[0];
w0[1] = salt_buf0[1];
@ -355,10 +355,10 @@ KERNEL_FQ void m02810_m04 (KERN_ATTR_BASIC ())
MD5_STEP (MD5_I , c, d, a, b, w0[2], MD5C3e, MD5S32);
MD5_STEP (MD5_I , b, c, d, a, w2[1], MD5C3f, MD5S33);
const u32x r_a = a + MD5M_A;
const u32x r_b = b + MD5M_B;
const u32x r_c = c + MD5M_C;
const u32x r_d = d + MD5M_D;
const u32x r_a = a + make_u32x (MD5M_A);
const u32x r_b = b + make_u32x (MD5M_B);
const u32x r_c = c + make_u32x (MD5M_C);
const u32x r_d = d + make_u32x (MD5M_D);
const u32x r_00 = 0x80;
const u32x r_14 = 64 * 8;
@ -690,10 +690,10 @@ KERNEL_FQ void m02810_s04 (KERN_ATTR_BASIC ())
MD5_STEP (MD5_I , c, d, a, b, w0[2], MD5C3e, MD5S32);
MD5_STEP (MD5_I , b, c, d, a, w2[1], MD5C3f, MD5S33);
a += MD5M_A;
b += MD5M_B;
c += MD5M_C;
d += MD5M_D;
a += make_u32x (MD5M_A);
b += make_u32x (MD5M_B);
c += make_u32x (MD5M_C);
d += make_u32x (MD5M_D);
w0[0] = salt_buf0[0];
w0[1] = salt_buf0[1];
@ -794,10 +794,10 @@ KERNEL_FQ void m02810_s04 (KERN_ATTR_BASIC ())
MD5_STEP (MD5_I , c, d, a, b, w0[2], MD5C3e, MD5S32);
MD5_STEP (MD5_I , b, c, d, a, w2[1], MD5C3f, MD5S33);
const u32x r_a = a + MD5M_A;
const u32x r_b = b + MD5M_B;
const u32x r_c = c + MD5M_C;
const u32x r_d = d + MD5M_D;
const u32x r_a = a + make_u32x (MD5M_A);
const u32x r_b = b + make_u32x (MD5M_B);
const u32x r_c = c + make_u32x (MD5M_C);
const u32x r_d = d + make_u32x (MD5M_D);
const u32x r_00 = 0x80;
const u32x r_14 = 64 * 8;

@ -171,10 +171,10 @@ DECLSPEC void m02810m (PRIVATE_AS u32 *w0, PRIVATE_AS u32 *w1, PRIVATE_AS u32 *w
MD5_STEP (MD5_I , c, d, a, b, w0_t[2], MD5C3e, MD5S32);
MD5_STEP (MD5_I , b, c, d, a, w2_t[1], MD5C3f, MD5S33);
a += MD5M_A;
b += MD5M_B;
c += MD5M_C;
d += MD5M_D;
a += make_u32x (MD5M_A);
b += make_u32x (MD5M_B);
c += make_u32x (MD5M_C);
d += make_u32x (MD5M_D);
w0_t[0] = salt_buf0[0];
w0_t[1] = salt_buf0[1];
@ -275,10 +275,10 @@ DECLSPEC void m02810m (PRIVATE_AS u32 *w0, PRIVATE_AS u32 *w1, PRIVATE_AS u32 *w
MD5_STEP (MD5_I , c, d, a, b, w0_t[2], MD5C3e, MD5S32);
MD5_STEP (MD5_I , b, c, d, a, w2_t[1], MD5C3f, MD5S33);
const u32x r_a = a + MD5M_A;
const u32x r_b = b + MD5M_B;
const u32x r_c = c + MD5M_C;
const u32x r_d = d + MD5M_D;
const u32x r_a = a + make_u32x (MD5M_A);
const u32x r_b = b + make_u32x (MD5M_B);
const u32x r_c = c + make_u32x (MD5M_C);
const u32x r_d = d + make_u32x (MD5M_D);
const u32x r_00 = 0x80;
const u32x r_14 = 64 * 8;
@ -522,10 +522,10 @@ DECLSPEC void m02810s (PRIVATE_AS u32 *w0, PRIVATE_AS u32 *w1, PRIVATE_AS u32 *w
MD5_STEP (MD5_I , c, d, a, b, w0_t[2], MD5C3e, MD5S32);
MD5_STEP (MD5_I , b, c, d, a, w2_t[1], MD5C3f, MD5S33);
a += MD5M_A;
b += MD5M_B;
c += MD5M_C;
d += MD5M_D;
a += make_u32x (MD5M_A);
b += make_u32x (MD5M_B);
c += make_u32x (MD5M_C);
d += make_u32x (MD5M_D);
w0_t[0] = salt_buf0[0];
w0_t[1] = salt_buf0[1];
@ -626,10 +626,10 @@ DECLSPEC void m02810s (PRIVATE_AS u32 *w0, PRIVATE_AS u32 *w1, PRIVATE_AS u32 *w
MD5_STEP (MD5_I , c, d, a, b, w0_t[2], MD5C3e, MD5S32);
MD5_STEP (MD5_I , b, c, d, a, w2_t[1], MD5C3f, MD5S33);
const u32x r_a = a + MD5M_A;
const u32x r_b = b + MD5M_B;
const u32x r_c = c + MD5M_C;
const u32x r_d = d + MD5M_D;
const u32x r_a = a + make_u32x (MD5M_A);
const u32x r_b = b + make_u32x (MD5M_B);
const u32x r_c = c + make_u32x (MD5M_C);
const u32x r_d = d + make_u32x (MD5M_D);
const u32x r_00 = 0x80;
const u32x r_14 = 64 * 8;

@ -196,10 +196,10 @@ KERNEL_FQ void m03500_m04 (KERN_ATTR_RULES ())
MD5_STEP (MD5_I , c, d, a, b, w0[2], MD5C3e, MD5S32);
MD5_STEP (MD5_I , b, c, d, a, w2[1], MD5C3f, MD5S33);
a += MD5M_A;
b += MD5M_B;
c += MD5M_C;
d += MD5M_D;
a += make_u32x (MD5M_A);
b += make_u32x (MD5M_B);
c += make_u32x (MD5M_C);
d += make_u32x (MD5M_D);
w0[0] = uint_to_hex_lower8 ((a >> 0) & 255) << 0
| uint_to_hex_lower8 ((a >> 8) & 255) << 16;
@ -302,10 +302,10 @@ KERNEL_FQ void m03500_m04 (KERN_ATTR_RULES ())
//STEP 3
a += MD5M_A;
b += MD5M_B;
c += MD5M_C;
d += MD5M_D;
a += make_u32x (MD5M_A);
b += make_u32x (MD5M_B);
c += make_u32x (MD5M_C);
d += make_u32x (MD5M_D);
w0[0] = uint_to_hex_lower8 ((a >> 0) & 255) << 0
| uint_to_hex_lower8 ((a >> 8) & 255) << 16;
@ -598,10 +598,10 @@ KERNEL_FQ void m03500_s04 (KERN_ATTR_RULES ())
MD5_STEP (MD5_I , c, d, a, b, w0[2], MD5C3e, MD5S32);
MD5_STEP (MD5_I , b, c, d, a, w2[1], MD5C3f, MD5S33);
a += MD5M_A;
b += MD5M_B;
c += MD5M_C;
d += MD5M_D;
a += make_u32x (MD5M_A);
b += make_u32x (MD5M_B);
c += make_u32x (MD5M_C);
d += make_u32x (MD5M_D);
w0[0] = uint_to_hex_lower8 ((a >> 0) & 255) << 0
| uint_to_hex_lower8 ((a >> 8) & 255) << 16;
@ -704,10 +704,10 @@ KERNEL_FQ void m03500_s04 (KERN_ATTR_RULES ())
//STEP3
a += MD5M_A;
b += MD5M_B;
c += MD5M_C;
d += MD5M_D;
a += make_u32x (MD5M_A);
b += make_u32x (MD5M_B);
c += make_u32x (MD5M_C);
d += make_u32x (MD5M_D);
w0[0] = uint_to_hex_lower8 ((a >> 0) & 255) << 0
| uint_to_hex_lower8 ((a >> 8) & 255) << 16;

@ -253,10 +253,10 @@ KERNEL_FQ void m03500_m04 (KERN_ATTR_BASIC ())
MD5_STEP (MD5_I , c, d, a, b, w0[2], MD5C3e, MD5S32);
MD5_STEP (MD5_I , b, c, d, a, w2[1], MD5C3f, MD5S33);
a += MD5M_A;
b += MD5M_B;
c += MD5M_C;
d += MD5M_D;
a += make_u32x (MD5M_A);
b += make_u32x (MD5M_B);
c += make_u32x (MD5M_C);
d += make_u32x (MD5M_D);
w0[0] = uint_to_hex_lower8 ((a >> 0) & 255) << 0
| uint_to_hex_lower8 ((a >> 8) & 255) << 16;
@ -359,10 +359,10 @@ KERNEL_FQ void m03500_m04 (KERN_ATTR_BASIC ())
//STEP3
a += MD5M_A;
b += MD5M_B;
c += MD5M_C;
d += MD5M_D;
a += make_u32x (MD5M_A);
b += make_u32x (MD5M_B);
c += make_u32x (MD5M_C);
d += make_u32x (MD5M_D);
w0[0] = uint_to_hex_lower8 ((a >> 0) & 255) << 0
| uint_to_hex_lower8 ((a >> 8) & 255) << 16;
@ -714,10 +714,10 @@ KERNEL_FQ void m03500_s04 (KERN_ATTR_BASIC ())
MD5_STEP (MD5_I , c, d, a, b, w0[2], MD5C3e, MD5S32);
MD5_STEP (MD5_I , b, c, d, a, w2[1], MD5C3f, MD5S33);
a += MD5M_A;
b += MD5M_B;
c += MD5M_C;
d += MD5M_D;
a += make_u32x (MD5M_A);
b += make_u32x (MD5M_B);
c += make_u32x (MD5M_C);
d += make_u32x (MD5M_D);
w0[0] = uint_to_hex_lower8 ((a >> 0) & 255) << 0
| uint_to_hex_lower8 ((a >> 8) & 255) << 16;
@ -820,10 +820,10 @@ KERNEL_FQ void m03500_s04 (KERN_ATTR_BASIC ())
//STEP3
a += MD5M_A;
b += MD5M_B;
c += MD5M_C;
d += MD5M_D;
a += make_u32x (MD5M_A);
b += make_u32x (MD5M_B);
c += make_u32x (MD5M_C);
d += make_u32x (MD5M_D);
w0[0] = uint_to_hex_lower8 ((a >> 0) & 255) << 0
| uint_to_hex_lower8 ((a >> 8) & 255) << 16;

@ -173,10 +173,10 @@ DECLSPEC void m03500m (PRIVATE_AS u32 *w0, PRIVATE_AS u32 *w1, PRIVATE_AS u32 *w
MD5_STEP (MD5_I , c, d, a, b, w0_t[2], MD5C3e, MD5S32);
MD5_STEP (MD5_I , b, c, d, a, w2_t[1], MD5C3f, MD5S33);
a += MD5M_A;
b += MD5M_B;
c += MD5M_C;
d += MD5M_D;
a += make_u32x (MD5M_A);
b += make_u32x (MD5M_B);
c += make_u32x (MD5M_C);
d += make_u32x (MD5M_D);
w0_t[0] = uint_to_hex_lower8 ((a >> 0) & 255) << 0
| uint_to_hex_lower8 ((a >> 8) & 255) << 16;
@ -279,10 +279,10 @@ DECLSPEC void m03500m (PRIVATE_AS u32 *w0, PRIVATE_AS u32 *w1, PRIVATE_AS u32 *w
//STEP 3
a += MD5M_A;
b += MD5M_B;
c += MD5M_C;
d += MD5M_D;
a += make_u32x (MD5M_A);
b += make_u32x (MD5M_B);
c += make_u32x (MD5M_C);
d += make_u32x (MD5M_D);
w0_t[0] = uint_to_hex_lower8 ((a >> 0) & 255) << 0
| uint_to_hex_lower8 ((a >> 8) & 255) << 16;
@ -546,10 +546,10 @@ DECLSPEC void m03500s (PRIVATE_AS u32 *w0, PRIVATE_AS u32 *w1, PRIVATE_AS u32 *w
MD5_STEP (MD5_I , c, d, a, b, w0_t[2], MD5C3e, MD5S32);
MD5_STEP (MD5_I , b, c, d, a, w2_t[1], MD5C3f, MD5S33);
a += MD5M_A;
b += MD5M_B;
c += MD5M_C;
d += MD5M_D;
a += make_u32x (MD5M_A);
b += make_u32x (MD5M_B);
c += make_u32x (MD5M_C);
d += make_u32x (MD5M_D);
w0_t[0] = uint_to_hex_lower8 ((a >> 0) & 255) << 0
| uint_to_hex_lower8 ((a >> 8) & 255) << 16;
@ -652,10 +652,10 @@ DECLSPEC void m03500s (PRIVATE_AS u32 *w0, PRIVATE_AS u32 *w1, PRIVATE_AS u32 *w
//STEP 3
a += MD5M_A;
b += MD5M_B;
c += MD5M_C;
d += MD5M_D;
a += make_u32x (MD5M_A);
b += make_u32x (MD5M_B);
c += make_u32x (MD5M_C);
d += make_u32x (MD5M_D);
w0_t[0] = uint_to_hex_lower8 ((a >> 0) & 255) << 0
| uint_to_hex_lower8 ((a >> 8) & 255) << 16;

@ -198,10 +198,10 @@ KERNEL_FQ void m03710_m04 (KERN_ATTR_RULES ())
MD5_STEP (MD5_I , c, d, a, b, w0[2], MD5C3e, MD5S32);
MD5_STEP (MD5_I , b, c, d, a, w2[1], MD5C3f, MD5S33);
a += MD5M_A;
b += MD5M_B;
c += MD5M_C;
d += MD5M_D;
a += make_u32x (MD5M_A);
b += make_u32x (MD5M_B);
c += make_u32x (MD5M_C);
d += make_u32x (MD5M_D);
w0[0] = uint_to_hex_lower8 ((a >> 0) & 255) << 0
| uint_to_hex_lower8 ((a >> 8) & 255) << 16;
@ -525,10 +525,10 @@ KERNEL_FQ void m03710_s04 (KERN_ATTR_RULES ())
MD5_STEP (MD5_I , c, d, a, b, w0[2], MD5C3e, MD5S32);
MD5_STEP (MD5_I , b, c, d, a, w2[1], MD5C3f, MD5S33);
a += MD5M_A;
b += MD5M_B;
c += MD5M_C;
d += MD5M_D;
a += make_u32x (MD5M_A);
b += make_u32x (MD5M_B);
c += make_u32x (MD5M_C);
d += make_u32x (MD5M_D);
w0[0] = uint_to_hex_lower8 ((a >> 0) & 255) << 0
| uint_to_hex_lower8 ((a >> 8) & 255) << 16;

@ -255,10 +255,10 @@ KERNEL_FQ void m03710_m04 (KERN_ATTR_BASIC ())
MD5_STEP (MD5_I , c, d, a, b, w0[2], MD5C3e, MD5S32);
MD5_STEP (MD5_I , b, c, d, a, w2[1], MD5C3f, MD5S33);
a += MD5M_A;
b += MD5M_B;
c += MD5M_C;
d += MD5M_D;
a += make_u32x (MD5M_A);
b += make_u32x (MD5M_B);
c += make_u32x (MD5M_C);
d += make_u32x (MD5M_D);
w0[0] = uint_to_hex_lower8 ((a >> 0) & 255) << 0
| uint_to_hex_lower8 ((a >> 8) & 255) << 16;
@ -641,10 +641,10 @@ KERNEL_FQ void m03710_s04 (KERN_ATTR_BASIC ())
MD5_STEP (MD5_I , c, d, a, b, w0[2], MD5C3e, MD5S32);
MD5_STEP (MD5_I , b, c, d, a, w2[1], MD5C3f, MD5S33);
a += MD5M_A;
b += MD5M_B;
c += MD5M_C;
d += MD5M_D;
a += make_u32x (MD5M_A);
b += make_u32x (MD5M_B);
c += make_u32x (MD5M_C);
d += make_u32x (MD5M_D);
w0[0] = uint_to_hex_lower8 ((a >> 0) & 255) << 0
| uint_to_hex_lower8 ((a >> 8) & 255) << 16;

@ -175,10 +175,10 @@ DECLSPEC void m03710m (PRIVATE_AS u32 *w0, PRIVATE_AS u32 *w1, PRIVATE_AS u32 *w
MD5_STEP (MD5_I , c, d, a, b, w0_t[2], MD5C3e, MD5S32);
MD5_STEP (MD5_I , b, c, d, a, w2_t[1], MD5C3f, MD5S33);
a += MD5M_A;
b += MD5M_B;
c += MD5M_C;
d += MD5M_D;
a += make_u32x (MD5M_A);
b += make_u32x (MD5M_B);
c += make_u32x (MD5M_C);
d += make_u32x (MD5M_D);
w0_t[0] = uint_to_hex_lower8 ((a >> 0) & 255) << 0
| uint_to_hex_lower8 ((a >> 8) & 255) << 16;
@ -473,10 +473,10 @@ DECLSPEC void m03710s (PRIVATE_AS u32 *w0, PRIVATE_AS u32 *w1, PRIVATE_AS u32 *w
MD5_STEP (MD5_I , c, d, a, b, w0_t[2], MD5C3e, MD5S32);
MD5_STEP (MD5_I , b, c, d, a, w2_t[1], MD5C3f, MD5S33);
a += MD5M_A;
b += MD5M_B;
c += MD5M_C;
d += MD5M_D;
a += make_u32x (MD5M_A);
b += make_u32x (MD5M_B);
c += make_u32x (MD5M_C);
d += make_u32x (MD5M_D);
w0_t[0] = uint_to_hex_lower8 ((a >> 0) & 255) << 0
| uint_to_hex_lower8 ((a >> 8) & 255) << 16;

@ -194,10 +194,10 @@ KERNEL_FQ void m03910_m04 (KERN_ATTR_RULES ())
MD5_STEP (MD5_I , c, d, a, b, w0[2], MD5C3e, MD5S32);
MD5_STEP (MD5_I , b, c, d, a, w2[1], MD5C3f, MD5S33);
a += MD5M_A;
b += MD5M_B;
c += MD5M_C;
d += MD5M_D;
a += make_u32x (MD5M_A);
b += make_u32x (MD5M_B);
c += make_u32x (MD5M_C);
d += make_u32x (MD5M_D);
w0[0] = uint_to_hex_lower8 ((a >> 0) & 255) << 0
| uint_to_hex_lower8 ((a >> 8) & 255) << 16;
@ -298,10 +298,10 @@ KERNEL_FQ void m03910_m04 (KERN_ATTR_RULES ())
MD5_STEP (MD5_I , c, d, a, b, w0[2], MD5C3e, MD5S32);
MD5_STEP (MD5_I , b, c, d, a, w2[1], MD5C3f, MD5S33);
const u32x r_a = a + MD5M_A;
const u32x r_b = b + MD5M_B;
const u32x r_c = c + MD5M_C;
const u32x r_d = d + MD5M_D;
const u32x r_a = a + make_u32x (MD5M_A);
const u32x r_b = b + make_u32x (MD5M_B);
const u32x r_c = c + make_u32x (MD5M_C);
const u32x r_d = d + make_u32x (MD5M_D);
const u32x r_00 = 0x80;
const u32x r_14 = 64 * 8;
@ -574,10 +574,10 @@ KERNEL_FQ void m03910_s04 (KERN_ATTR_RULES ())
MD5_STEP (MD5_I , c, d, a, b, w0[2], MD5C3e, MD5S32);
MD5_STEP (MD5_I , b, c, d, a, w2[1], MD5C3f, MD5S33);
a += MD5M_A;
b += MD5M_B;
c += MD5M_C;
d += MD5M_D;
a += make_u32x (MD5M_A);
b += make_u32x (MD5M_B);
c += make_u32x (MD5M_C);
d += make_u32x (MD5M_D);
w0[0] = uint_to_hex_lower8 ((a >> 0) & 255) << 0
| uint_to_hex_lower8 ((a >> 8) & 255) << 16;
@ -678,10 +678,10 @@ KERNEL_FQ void m03910_s04 (KERN_ATTR_RULES ())
MD5_STEP (MD5_I , c, d, a, b, w0[2], MD5C3e, MD5S32);
MD5_STEP (MD5_I , b, c, d, a, w2[1], MD5C3f, MD5S33);
const u32x r_a = a + MD5M_A;
const u32x r_b = b + MD5M_B;
const u32x r_c = c + MD5M_C;
const u32x r_d = d + MD5M_D;
const u32x r_a = a + make_u32x (MD5M_A);
const u32x r_b = b + make_u32x (MD5M_B);
const u32x r_c = c + make_u32x (MD5M_C);
const u32x r_d = d + make_u32x (MD5M_D);
const u32x r_00 = 0x80;
const u32x r_14 = 64 * 8;

@ -251,10 +251,10 @@ KERNEL_FQ void m03910_m04 (KERN_ATTR_BASIC ())
MD5_STEP (MD5_I , c, d, a, b, w0[2], MD5C3e, MD5S32);
MD5_STEP (MD5_I , b, c, d, a, w2[1], MD5C3f, MD5S33);
a += MD5M_A;
b += MD5M_B;
c += MD5M_C;
d += MD5M_D;
a += make_u32x (MD5M_A);
b += make_u32x (MD5M_B);
c += make_u32x (MD5M_C);
d += make_u32x (MD5M_D);
w0[0] = uint_to_hex_lower8 ((a >> 0) & 255) << 0
| uint_to_hex_lower8 ((a >> 8) & 255) << 16;
@ -355,10 +355,10 @@ KERNEL_FQ void m03910_m04 (KERN_ATTR_BASIC ())
MD5_STEP (MD5_I , c, d, a, b, w0[2], MD5C3e, MD5S32);
MD5_STEP (MD5_I , b, c, d, a, w2[1], MD5C3f, MD5S33);
const u32x r_a = a + MD5M_A;
const u32x r_b = b + MD5M_B;
const u32x r_c = c + MD5M_C;
const u32x r_d = d + MD5M_D;
const u32x r_a = a + make_u32x (MD5M_A);
const u32x r_b = b + make_u32x (MD5M_B);
const u32x r_c = c + make_u32x (MD5M_C);
const u32x r_d = d + make_u32x (MD5M_D);
const u32x r_00 = 0x80;
const u32x r_14 = 64 * 8;
@ -690,10 +690,10 @@ KERNEL_FQ void m03910_s04 (KERN_ATTR_BASIC ())
MD5_STEP (MD5_I , c, d, a, b, w0[2], MD5C3e, MD5S32);
MD5_STEP (MD5_I , b, c, d, a, w2[1], MD5C3f, MD5S33);
a += MD5M_A;
b += MD5M_B;
c += MD5M_C;
d += MD5M_D;
a += make_u32x (MD5M_A);
b += make_u32x (MD5M_B);
c += make_u32x (MD5M_C);
d += make_u32x (MD5M_D);
w0[0] = uint_to_hex_lower8 ((a >> 0) & 255) << 0
| uint_to_hex_lower8 ((a >> 8) & 255) << 16;
@ -794,10 +794,10 @@ KERNEL_FQ void m03910_s04 (KERN_ATTR_BASIC ())
MD5_STEP (MD5_I , c, d, a, b, w0[2], MD5C3e, MD5S32);
MD5_STEP (MD5_I , b, c, d, a, w2[1], MD5C3f, MD5S33);
const u32x r_a = a + MD5M_A;
const u32x r_b = b + MD5M_B;
const u32x r_c = c + MD5M_C;
const u32x r_d = d + MD5M_D;
const u32x r_a = a + make_u32x (MD5M_A);
const u32x r_b = b + make_u32x (MD5M_B);
const u32x r_c = c + make_u32x (MD5M_C);
const u32x r_d = d + make_u32x (MD5M_D);
const u32x r_00 = 0x80;
const u32x r_14 = 64 * 8;

@ -171,10 +171,10 @@ DECLSPEC void m03910m (PRIVATE_AS u32 *w0, PRIVATE_AS u32 *w1, PRIVATE_AS u32 *w
MD5_STEP (MD5_I , c, d, a, b, w0_t[2], MD5C3e, MD5S32);
MD5_STEP (MD5_I , b, c, d, a, w2_t[1], MD5C3f, MD5S33);
a += MD5M_A;
b += MD5M_B;
c += MD5M_C;
d += MD5M_D;
a += make_u32x (MD5M_A);
b += make_u32x (MD5M_B);
c += make_u32x (MD5M_C);
d += make_u32x (MD5M_D);
w0_t[0] = uint_to_hex_lower8 ((a >> 0) & 255) << 0
| uint_to_hex_lower8 ((a >> 8) & 255) << 16;
@ -275,10 +275,10 @@ DECLSPEC void m03910m (PRIVATE_AS u32 *w0, PRIVATE_AS u32 *w1, PRIVATE_AS u32 *w
MD5_STEP (MD5_I , c, d, a, b, w0_t[2], MD5C3e, MD5S32);
MD5_STEP (MD5_I , b, c, d, a, w2_t[1], MD5C3f, MD5S33);
const u32x r_a = a + MD5M_A;
const u32x r_b = b + MD5M_B;
const u32x r_c = c + MD5M_C;
const u32x r_d = d + MD5M_D;
const u32x r_a = a + make_u32x (MD5M_A);
const u32x r_b = b + make_u32x (MD5M_B);
const u32x r_c = c + make_u32x (MD5M_C);
const u32x r_d = d + make_u32x (MD5M_D);
const u32x r_00 = 0x80;
const u32x r_14 = 64 * 8;
@ -522,10 +522,10 @@ DECLSPEC void m03910s (PRIVATE_AS u32 *w0, PRIVATE_AS u32 *w1, PRIVATE_AS u32 *w
MD5_STEP (MD5_I , c, d, a, b, w0_t[2], MD5C3e, MD5S32);
MD5_STEP (MD5_I , b, c, d, a, w2_t[1], MD5C3f, MD5S33);
a += MD5M_A;
b += MD5M_B;
c += MD5M_C;
d += MD5M_D;
a += make_u32x (MD5M_A);
b += make_u32x (MD5M_B);
c += make_u32x (MD5M_C);
d += make_u32x (MD5M_D);
w0_t[0] = uint_to_hex_lower8 ((a >> 0) & 255) << 0
| uint_to_hex_lower8 ((a >> 8) & 255) << 16;
@ -626,10 +626,10 @@ DECLSPEC void m03910s (PRIVATE_AS u32 *w0, PRIVATE_AS u32 *w1, PRIVATE_AS u32 *w
MD5_STEP (MD5_I , c, d, a, b, w0_t[2], MD5C3e, MD5S32);
MD5_STEP (MD5_I , b, c, d, a, w2_t[1], MD5C3f, MD5S33);
const u32x r_a = a + MD5M_A;
const u32x r_b = b + MD5M_B;
const u32x r_c = c + MD5M_C;
const u32x r_d = d + MD5M_D;
const u32x r_a = a + make_u32x (MD5M_A);
const u32x r_b = b + make_u32x (MD5M_B);
const u32x r_c = c + make_u32x (MD5M_C);
const u32x r_d = d + make_u32x (MD5M_D);
const u32x r_00 = 0x80;
const u32x r_14 = 64 * 8;

@ -224,10 +224,10 @@ KERNEL_FQ void m04010_m04 (KERN_ATTR_RULES ())
MD5_STEP (MD5_I , c, d, a, b, w0[2], MD5C3e, MD5S32);
MD5_STEP (MD5_I , b, c, d, a, w2[1], MD5C3f, MD5S33);
a += MD5M_A;
b += MD5M_B;
c += MD5M_C;
d += MD5M_D;
a += make_u32x (MD5M_A);
b += make_u32x (MD5M_B);
c += make_u32x (MD5M_C);
d += make_u32x (MD5M_D);
w0[0] = uint_to_hex_lower8 ((a >> 0) & 255) << 0
| uint_to_hex_lower8 ((a >> 8) & 255) << 16;
@ -577,10 +577,10 @@ KERNEL_FQ void m04010_s04 (KERN_ATTR_RULES ())
MD5_STEP (MD5_I , c, d, a, b, w0[2], MD5C3e, MD5S32);
MD5_STEP (MD5_I , b, c, d, a, w2[1], MD5C3f, MD5S33);
a += MD5M_A;
b += MD5M_B;
c += MD5M_C;
d += MD5M_D;
a += make_u32x (MD5M_A);
b += make_u32x (MD5M_B);
c += make_u32x (MD5M_C);
d += make_u32x (MD5M_D);
w0[0] = uint_to_hex_lower8 ((a >> 0) & 255) << 0
| uint_to_hex_lower8 ((a >> 8) & 255) << 16;

@ -280,10 +280,10 @@ KERNEL_FQ void m04010_m04 (KERN_ATTR_BASIC ())
MD5_STEP (MD5_I , c, d, a, b, w0[2], MD5C3e, MD5S32);
MD5_STEP (MD5_I , b, c, d, a, w2[1], MD5C3f, MD5S33);
a += MD5M_A;
b += MD5M_B;
c += MD5M_C;
d += MD5M_D;
a += make_u32x (MD5M_A);
b += make_u32x (MD5M_B);
c += make_u32x (MD5M_C);
d += make_u32x (MD5M_D);
w0[0] = uint_to_hex_lower8 ((a >> 0) & 255) << 0
| uint_to_hex_lower8 ((a >> 8) & 255) << 16;
@ -691,10 +691,10 @@ KERNEL_FQ void m04010_s04 (KERN_ATTR_BASIC ())
MD5_STEP (MD5_I , c, d, a, b, w0[2], MD5C3e, MD5S32);
MD5_STEP (MD5_I , b, c, d, a, w2[1], MD5C3f, MD5S33);
a += MD5M_A;
b += MD5M_B;
c += MD5M_C;
d += MD5M_D;
a += make_u32x (MD5M_A);
b += make_u32x (MD5M_B);
c += make_u32x (MD5M_C);
d += make_u32x (MD5M_D);
w0[0] = uint_to_hex_lower8 ((a >> 0) & 255) << 0
| uint_to_hex_lower8 ((a >> 8) & 255) << 16;

@ -195,10 +195,10 @@ DECLSPEC void m04010m (PRIVATE_AS u32 *w0, PRIVATE_AS u32 *w1, PRIVATE_AS u32 *w
MD5_STEP (MD5_I , c, d, a, b, w0_t[2], MD5C3e, MD5S32);
MD5_STEP (MD5_I , b, c, d, a, w2_t[1], MD5C3f, MD5S33);
a += MD5M_A;
b += MD5M_B;
c += MD5M_C;
d += MD5M_D;
a += make_u32x (MD5M_A);
b += make_u32x (MD5M_B);
c += make_u32x (MD5M_C);
d += make_u32x (MD5M_D);
w0_t[0] = uint_to_hex_lower8 ((a >> 0) & 255) << 0
| uint_to_hex_lower8 ((a >> 8) & 255) << 16;
@ -513,10 +513,10 @@ DECLSPEC void m04010s (PRIVATE_AS u32 *w0, PRIVATE_AS u32 *w1, PRIVATE_AS u32 *w
MD5_STEP (MD5_I , c, d, a, b, w0_t[2], MD5C3e, MD5S32);
MD5_STEP (MD5_I , b, c, d, a, w2_t[1], MD5C3f, MD5S33);
a += MD5M_A;
b += MD5M_B;
c += MD5M_C;
d += MD5M_D;
a += make_u32x (MD5M_A);
b += make_u32x (MD5M_B);
c += make_u32x (MD5M_C);
d += make_u32x (MD5M_D);
w0_t[0] = uint_to_hex_lower8 ((a >> 0) & 255) << 0
| uint_to_hex_lower8 ((a >> 8) & 255) << 16;

@ -269,10 +269,10 @@ KERNEL_FQ void m04110_m04 (KERN_ATTR_RULES ())
MD5_STEP (MD5_I , c, d, a, b, w0[2], MD5C3e, MD5S32);
MD5_STEP (MD5_I , b, c, d, a, w2[1], MD5C3f, MD5S33);
a += MD5M_A;
b += MD5M_B;
c += MD5M_C;
d += MD5M_D;
a += make_u32x (MD5M_A);
b += make_u32x (MD5M_B);
c += make_u32x (MD5M_C);
d += make_u32x (MD5M_D);
w0[0] = uint_to_hex_lower8 ((a >> 0) & 255) << 0
| uint_to_hex_lower8 ((a >> 8) & 255) << 16;
@ -667,10 +667,10 @@ KERNEL_FQ void m04110_s04 (KERN_ATTR_RULES ())
MD5_STEP (MD5_I , c, d, a, b, w0[2], MD5C3e, MD5S32);
MD5_STEP (MD5_I , b, c, d, a, w2[1], MD5C3f, MD5S33);
a += MD5M_A;
b += MD5M_B;
c += MD5M_C;
d += MD5M_D;
a += make_u32x (MD5M_A);
b += make_u32x (MD5M_B);
c += make_u32x (MD5M_C);
d += make_u32x (MD5M_D);
w0[0] = uint_to_hex_lower8 ((a >> 0) & 255) << 0
| uint_to_hex_lower8 ((a >> 8) & 255) << 16;

@ -327,10 +327,10 @@ KERNEL_FQ void m04110_m04 (KERN_ATTR_BASIC ())
MD5_STEP (MD5_I , c, d, a, b, w0[2], MD5C3e, MD5S32);
MD5_STEP (MD5_I , b, c, d, a, w2[1], MD5C3f, MD5S33);
a += MD5M_A;
b += MD5M_B;
c += MD5M_C;
d += MD5M_D;
a += make_u32x (MD5M_A);
b += make_u32x (MD5M_B);
c += make_u32x (MD5M_C);
d += make_u32x (MD5M_D);
w0[0] = uint_to_hex_lower8 ((a >> 0) & 255) << 0
| uint_to_hex_lower8 ((a >> 8) & 255) << 16;
@ -785,10 +785,10 @@ KERNEL_FQ void m04110_s04 (KERN_ATTR_BASIC ())
MD5_STEP (MD5_I , c, d, a, b, w0[2], MD5C3e, MD5S32);
MD5_STEP (MD5_I , b, c, d, a, w2[1], MD5C3f, MD5S33);
a += MD5M_A;
b += MD5M_B;
c += MD5M_C;
d += MD5M_D;
a += make_u32x (MD5M_A);
b += make_u32x (MD5M_B);
c += make_u32x (MD5M_C);
d += make_u32x (MD5M_D);
w0[0] = uint_to_hex_lower8 ((a >> 0) & 255) << 0
| uint_to_hex_lower8 ((a >> 8) & 255) << 16;

@ -223,10 +223,10 @@ DECLSPEC void m04110m (PRIVATE_AS u32 *w0, PRIVATE_AS u32 *w1, PRIVATE_AS u32 *w
MD5_STEP (MD5_I , c, d, a, b, w0_t[2], MD5C3e, MD5S32);
MD5_STEP (MD5_I , b, c, d, a, w2_t[1], MD5C3f, MD5S33);
a += MD5M_A;
b += MD5M_B;
c += MD5M_C;
d += MD5M_D;
a += make_u32x (MD5M_A);
b += make_u32x (MD5M_B);
c += make_u32x (MD5M_C);
d += make_u32x (MD5M_D);
w0_t[0] = uint_to_hex_lower8 ((a >> 0) & 255) << 0
| uint_to_hex_lower8 ((a >> 8) & 255) << 16;
@ -569,10 +569,10 @@ DECLSPEC void m04110s (PRIVATE_AS u32 *w0, PRIVATE_AS u32 *w1, PRIVATE_AS u32 *w
MD5_STEP (MD5_I , c, d, a, b, w0_t[2], MD5C3e, MD5S32);
MD5_STEP (MD5_I , b, c, d, a, w2_t[1], MD5C3f, MD5S33);
a += MD5M_A;
b += MD5M_B;
c += MD5M_C;
d += MD5M_D;
a += make_u32x (MD5M_A);
b += make_u32x (MD5M_B);
c += make_u32x (MD5M_C);
d += make_u32x (MD5M_D);
w0_t[0] = uint_to_hex_lower8 ((a >> 0) & 255) << 0
| uint_to_hex_lower8 ((a >> 8) & 255) << 16;

@ -196,10 +196,10 @@ KERNEL_FQ void m04310_m04 (KERN_ATTR_RULES ())
MD5_STEP (MD5_I , c, d, a, b, w0[2], MD5C3e, MD5S32);
MD5_STEP (MD5_I , b, c, d, a, w2[1], MD5C3f, MD5S33);
a += MD5M_A;
b += MD5M_B;
c += MD5M_C;
d += MD5M_D;
a += make_u32x (MD5M_A);
b += make_u32x (MD5M_B);
c += make_u32x (MD5M_C);
d += make_u32x (MD5M_D);
w0[0] = uint_to_hex_upper8 ((a >> 0) & 255) << 0
| uint_to_hex_upper8 ((a >> 8) & 255) << 16;
@ -492,10 +492,10 @@ KERNEL_FQ void m04310_s04 (KERN_ATTR_RULES ())
MD5_STEP (MD5_I , c, d, a, b, w0[2], MD5C3e, MD5S32);
MD5_STEP (MD5_I , b, c, d, a, w2[1], MD5C3f, MD5S33);
a += MD5M_A;
b += MD5M_B;
c += MD5M_C;
d += MD5M_D;
a += make_u32x (MD5M_A);
b += make_u32x (MD5M_B);
c += make_u32x (MD5M_C);
d += make_u32x (MD5M_D);
w0[0] = uint_to_hex_upper8 ((a >> 0) & 255) << 0
| uint_to_hex_upper8 ((a >> 8) & 255) << 16;

@ -253,10 +253,10 @@ KERNEL_FQ void m04310_m04 (KERN_ATTR_BASIC ())
MD5_STEP (MD5_I , c, d, a, b, w0[2], MD5C3e, MD5S32);
MD5_STEP (MD5_I , b, c, d, a, w2[1], MD5C3f, MD5S33);
a += MD5M_A;
b += MD5M_B;
c += MD5M_C;
d += MD5M_D;
a += make_u32x (MD5M_A);
b += make_u32x (MD5M_B);
c += make_u32x (MD5M_C);
d += make_u32x (MD5M_D);
w0[0] = uint_to_hex_upper8 ((a >> 0) & 255) << 0
| uint_to_hex_upper8 ((a >> 8) & 255) << 16;
@ -608,10 +608,10 @@ KERNEL_FQ void m04310_s04 (KERN_ATTR_BASIC ())
MD5_STEP (MD5_I , c, d, a, b, w0[2], MD5C3e, MD5S32);
MD5_STEP (MD5_I , b, c, d, a, w2[1], MD5C3f, MD5S33);
a += MD5M_A;
b += MD5M_B;
c += MD5M_C;
d += MD5M_D;
a += make_u32x (MD5M_A);
b += make_u32x (MD5M_B);
c += make_u32x (MD5M_C);
d += make_u32x (MD5M_D);
w0[0] = uint_to_hex_upper8 ((a >> 0) & 255) << 0
| uint_to_hex_upper8 ((a >> 8) & 255) << 16;

@ -173,10 +173,10 @@ DECLSPEC void m04310m (PRIVATE_AS u32 *w0, PRIVATE_AS u32 *w1, PRIVATE_AS u32 *w
MD5_STEP (MD5_I , c, d, a, b, w0_t[2], MD5C3e, MD5S32);
MD5_STEP (MD5_I , b, c, d, a, w2_t[1], MD5C3f, MD5S33);
a += MD5M_A;
b += MD5M_B;
c += MD5M_C;
d += MD5M_D;
a += make_u32x (MD5M_A);
b += make_u32x (MD5M_B);
c += make_u32x (MD5M_C);
d += make_u32x (MD5M_D);
w0_t[0] = uint_to_hex_upper8 ((a >> 0) & 255) << 0
| uint_to_hex_upper8 ((a >> 8) & 255) << 16;
@ -440,10 +440,10 @@ DECLSPEC void m04310s (PRIVATE_AS u32 *w0, PRIVATE_AS u32 *w1, PRIVATE_AS u32 *w
MD5_STEP (MD5_I , c, d, a, b, w0_t[2], MD5C3e, MD5S32);
MD5_STEP (MD5_I , b, c, d, a, w2_t[1], MD5C3f, MD5S33);
a += MD5M_A;
b += MD5M_B;
c += MD5M_C;
d += MD5M_D;
a += make_u32x (MD5M_A);
b += make_u32x (MD5M_B);
c += make_u32x (MD5M_C);
d += make_u32x (MD5M_D);
w0_t[0] = uint_to_hex_upper8 ((a >> 0) & 255) << 0
| uint_to_hex_upper8 ((a >> 8) & 255) << 16;

@ -214,11 +214,11 @@ KERNEL_FQ void m04400_m04 (KERN_ATTR_RULES ())
we_t = hc_rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F1, c, d, e, a, b, we_t);
wf_t = hc_rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F1, b, c, d, e, a, wf_t);
a += SHA1M_A;
b += SHA1M_B;
c += SHA1M_C;
d += SHA1M_D;
e += SHA1M_E;
a += make_u32x (SHA1M_A);
b += make_u32x (SHA1M_B);
c += make_u32x (SHA1M_C);
d += make_u32x (SHA1M_D);
e += make_u32x (SHA1M_E);
/**
* md5
@ -536,11 +536,11 @@ KERNEL_FQ void m04400_s04 (KERN_ATTR_RULES ())
we_t = hc_rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F1, c, d, e, a, b, we_t);
wf_t = hc_rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F1, b, c, d, e, a, wf_t);
a += SHA1M_A;
b += SHA1M_B;
c += SHA1M_C;
d += SHA1M_D;
e += SHA1M_E;
a += make_u32x (SHA1M_A);
b += make_u32x (SHA1M_B);
c += make_u32x (SHA1M_C);
d += make_u32x (SHA1M_D);
e += make_u32x (SHA1M_E);
/**
* md5

@ -270,11 +270,11 @@ KERNEL_FQ void m04400_m04 (KERN_ATTR_BASIC ())
we_t = hc_rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F1, c, d, e, a, b, we_t);
wf_t = hc_rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F1, b, c, d, e, a, wf_t);
a += SHA1M_A;
b += SHA1M_B;
c += SHA1M_C;
d += SHA1M_D;
e += SHA1M_E;
a += make_u32x (SHA1M_A);
b += make_u32x (SHA1M_B);
c += make_u32x (SHA1M_C);
d += make_u32x (SHA1M_D);
e += make_u32x (SHA1M_E);
/**
* md5
@ -650,11 +650,11 @@ KERNEL_FQ void m04400_s04 (KERN_ATTR_BASIC ())
we_t = hc_rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F1, c, d, e, a, b, we_t);
wf_t = hc_rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F1, b, c, d, e, a, wf_t);
a += SHA1M_A;
b += SHA1M_B;
c += SHA1M_C;
d += SHA1M_D;
e += SHA1M_E;
a += make_u32x (SHA1M_A);
b += make_u32x (SHA1M_B);
c += make_u32x (SHA1M_C);
d += make_u32x (SHA1M_D);
e += make_u32x (SHA1M_E);
/**
* md5

@ -168,11 +168,11 @@ DECLSPEC void m04400m (PRIVATE_AS u32 *w0, PRIVATE_AS u32 *w1, PRIVATE_AS u32 *w
we_t = hc_rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F1, c, d, e, a, b, we_t);
wf_t = hc_rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F1, b, c, d, e, a, wf_t);
a += SHA1M_A;
b += SHA1M_B;
c += SHA1M_C;
d += SHA1M_D;
e += SHA1M_E;
a += make_u32x (SHA1M_A);
b += make_u32x (SHA1M_B);
c += make_u32x (SHA1M_C);
d += make_u32x (SHA1M_D);
e += make_u32x (SHA1M_E);
/**
* md5
@ -438,11 +438,11 @@ DECLSPEC void m04400s (PRIVATE_AS u32 *w0, PRIVATE_AS u32 *w1, PRIVATE_AS u32 *w
we_t = hc_rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F1, c, d, e, a, b, we_t);
wf_t = hc_rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F1, b, c, d, e, a, wf_t);
a += SHA1M_A;
b += SHA1M_B;
c += SHA1M_C;
d += SHA1M_D;
e += SHA1M_E;
a += make_u32x (SHA1M_A);
b += make_u32x (SHA1M_B);
c += make_u32x (SHA1M_C);
d += make_u32x (SHA1M_D);
e += make_u32x (SHA1M_E);
/**
* md5

@ -213,11 +213,11 @@ KERNEL_FQ void m04500_m04 (KERN_ATTR_RULES ())
we_t = hc_rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F1, c, d, e, a, b, we_t);
wf_t = hc_rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F1, b, c, d, e, a, wf_t);
a += SHA1M_A;
b += SHA1M_B;
c += SHA1M_C;
d += SHA1M_D;
e += SHA1M_E;
a += make_u32x (SHA1M_A);
b += make_u32x (SHA1M_B);
c += make_u32x (SHA1M_C);
d += make_u32x (SHA1M_D);
e += make_u32x (SHA1M_E);
/**
* 2nd SHA1
@ -568,11 +568,11 @@ KERNEL_FQ void m04500_s04 (KERN_ATTR_RULES ())
we_t = hc_rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F1, c, d, e, a, b, we_t);
wf_t = hc_rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F1, b, c, d, e, a, wf_t);
a += SHA1M_A;
b += SHA1M_B;
c += SHA1M_C;
d += SHA1M_D;
e += SHA1M_E;
a += make_u32x (SHA1M_A);
b += make_u32x (SHA1M_B);
c += make_u32x (SHA1M_C);
d += make_u32x (SHA1M_D);
e += make_u32x (SHA1M_E);
/**
* 2nd SHA1

@ -269,11 +269,11 @@ KERNEL_FQ void m04500_m04 (KERN_ATTR_BASIC ())
we_t = hc_rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F1, c, d, e, a, b, we_t);
wf_t = hc_rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F1, b, c, d, e, a, wf_t);
a += SHA1M_A;
b += SHA1M_B;
c += SHA1M_C;
d += SHA1M_D;
e += SHA1M_E;
a += make_u32x (SHA1M_A);
b += make_u32x (SHA1M_B);
c += make_u32x (SHA1M_C);
d += make_u32x (SHA1M_D);
e += make_u32x (SHA1M_E);
/**
* 2nd SHA1
@ -682,11 +682,11 @@ KERNEL_FQ void m04500_s04 (KERN_ATTR_BASIC ())
we_t = hc_rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F1, c, d, e, a, b, we_t);
wf_t = hc_rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F1, b, c, d, e, a, wf_t);
a += SHA1M_A;
b += SHA1M_B;
c += SHA1M_C;
d += SHA1M_D;
e += SHA1M_E;
a += make_u32x (SHA1M_A);
b += make_u32x (SHA1M_B);
c += make_u32x (SHA1M_C);
d += make_u32x (SHA1M_D);
e += make_u32x (SHA1M_E);
/**
* 2nd SHA1

@ -167,11 +167,11 @@ DECLSPEC void m04500m (PRIVATE_AS u32 *w0, PRIVATE_AS u32 *w1, PRIVATE_AS u32 *w
we_t = hc_rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F1, c, d, e, a, b, we_t);
wf_t = hc_rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F1, b, c, d, e, a, wf_t);
a += SHA1M_A;
b += SHA1M_B;
c += SHA1M_C;
d += SHA1M_D;
e += SHA1M_E;
a += make_u32x (SHA1M_A);
b += make_u32x (SHA1M_B);
c += make_u32x (SHA1M_C);
d += make_u32x (SHA1M_D);
e += make_u32x (SHA1M_E);
/**
* 2nd SHA1
@ -470,11 +470,11 @@ DECLSPEC void m04500s (PRIVATE_AS u32 *w0, PRIVATE_AS u32 *w1, PRIVATE_AS u32 *w
we_t = hc_rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F1, c, d, e, a, b, we_t);
wf_t = hc_rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F1, b, c, d, e, a, wf_t);
a += SHA1M_A;
b += SHA1M_B;
c += SHA1M_C;
d += SHA1M_D;
e += SHA1M_E;
a += make_u32x (SHA1M_A);
b += make_u32x (SHA1M_B);
c += make_u32x (SHA1M_C);
d += make_u32x (SHA1M_D);
e += make_u32x (SHA1M_E);
/**
* 2nd SHA1

@ -312,11 +312,11 @@ KERNEL_FQ void m04510_m04 (KERN_ATTR_RULES ())
SHA1_STEP (SHA1_F1, b, c, d, e, a, wf_t);
// Update sha1 state
a += SHA1M_A;
b += SHA1M_B;
c += SHA1M_C;
d += SHA1M_D;
e += SHA1M_E;
a += make_u32x (SHA1M_A);
b += make_u32x (SHA1M_B);
c += make_u32x (SHA1M_C);
d += make_u32x (SHA1M_D);
e += make_u32x (SHA1M_E);
/**
* 2nd SHA1
@ -1327,11 +1327,11 @@ KERNEL_FQ void m04510_s04 (KERN_ATTR_RULES ())
SHA1_STEP (SHA1_F1, b, c, d, e, a, wf_t);
// Update sha1 state
a += SHA1M_A;
b += SHA1M_B;
c += SHA1M_C;
d += SHA1M_D;
e += SHA1M_E;
a += make_u32x (SHA1M_A);
b += make_u32x (SHA1M_B);
c += make_u32x (SHA1M_C);
d += make_u32x (SHA1M_D);
e += make_u32x (SHA1M_E);
/**
* 2nd SHA1

@ -367,11 +367,11 @@ KERNEL_FQ void m04510_m04 (KERN_ATTR_BASIC ())
SHA1_STEP (SHA1_F1, b, c, d, e, a, wf_t);
// Update sha1 state
a += SHA1M_A;
b += SHA1M_B;
c += SHA1M_C;
d += SHA1M_D;
e += SHA1M_E;
a += make_u32x (SHA1M_A);
b += make_u32x (SHA1M_B);
c += make_u32x (SHA1M_C);
d += make_u32x (SHA1M_D);
e += make_u32x (SHA1M_E);
/**
* 2nd SHA1
@ -1439,11 +1439,11 @@ KERNEL_FQ void m04510_s04 (KERN_ATTR_BASIC ())
SHA1_STEP (SHA1_F1, b, c, d, e, a, wf_t);
// Update sha1 state
a += SHA1M_A;
b += SHA1M_B;
c += SHA1M_C;
d += SHA1M_D;
e += SHA1M_E;
a += make_u32x (SHA1M_A);
b += make_u32x (SHA1M_B);
c += make_u32x (SHA1M_C);
d += make_u32x (SHA1M_D);
e += make_u32x (SHA1M_E);
/**
* 2nd SHA1

@ -266,11 +266,11 @@ DECLSPEC void m04510m (PRIVATE_AS u32 *w0, PRIVATE_AS u32 *w1, PRIVATE_AS u32 *w
SHA1_STEP (SHA1_F1, b, c, d, e, a, wf_t);
// Update sha1 state
a += SHA1M_A;
b += SHA1M_B;
c += SHA1M_C;
d += SHA1M_D;
e += SHA1M_E;
a += make_u32x (SHA1M_A);
b += make_u32x (SHA1M_B);
c += make_u32x (SHA1M_C);
d += make_u32x (SHA1M_D);
e += make_u32x (SHA1M_E);
/**
* 2nd SHA1
@ -1229,11 +1229,11 @@ DECLSPEC void m04510s (PRIVATE_AS u32 *w0, PRIVATE_AS u32 *w1, PRIVATE_AS u32 *w
SHA1_STEP (SHA1_F1, b, c, d, e, a, wf_t);
// Update sha1 state
a += SHA1M_A;
b += SHA1M_B;
c += SHA1M_C;
d += SHA1M_D;
e += SHA1M_E;
a += make_u32x (SHA1M_A);
b += make_u32x (SHA1M_B);
c += make_u32x (SHA1M_C);
d += make_u32x (SHA1M_D);
e += make_u32x (SHA1M_E);
/**
* 2nd SHA1

@ -241,11 +241,11 @@ KERNEL_FQ void m04520_m04 (KERN_ATTR_RULES ())
we_t = hc_rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F1, c, d, e, a, b, we_t);
wf_t = hc_rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F1, b, c, d, e, a, wf_t);
a += SHA1M_A;
b += SHA1M_B;
c += SHA1M_C;
d += SHA1M_D;
e += SHA1M_E;
a += make_u32x (SHA1M_A);
b += make_u32x (SHA1M_B);
c += make_u32x (SHA1M_C);
d += make_u32x (SHA1M_D);
e += make_u32x (SHA1M_E);
u32x t0[4];
u32x t1[4];
@ -424,11 +424,11 @@ KERNEL_FQ void m04520_m04 (KERN_ATTR_RULES ())
we_t = hc_rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F1, c, d, e, a, b, we_t);
wf_t = hc_rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F1, b, c, d, e, a, wf_t);
a += SHA1M_A;
b += SHA1M_B;
c += SHA1M_C;
d += SHA1M_D;
e += SHA1M_E;
a += make_u32x (SHA1M_A);
b += make_u32x (SHA1M_B);
c += make_u32x (SHA1M_C);
d += make_u32x (SHA1M_D);
e += make_u32x (SHA1M_E);
t0[0] = c0[0];
t0[1] = c0[1];
@ -821,11 +821,11 @@ KERNEL_FQ void m04520_s04 (KERN_ATTR_RULES ())
we_t = hc_rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F1, c, d, e, a, b, we_t);
wf_t = hc_rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F1, b, c, d, e, a, wf_t);
a += SHA1M_A;
b += SHA1M_B;
c += SHA1M_C;
d += SHA1M_D;
e += SHA1M_E;
a += make_u32x (SHA1M_A);
b += make_u32x (SHA1M_B);
c += make_u32x (SHA1M_C);
d += make_u32x (SHA1M_D);
e += make_u32x (SHA1M_E);
u32x t0[4];
u32x t1[4];
@ -1004,11 +1004,11 @@ KERNEL_FQ void m04520_s04 (KERN_ATTR_RULES ())
we_t = hc_rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F1, c, d, e, a, b, we_t);
wf_t = hc_rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F1, b, c, d, e, a, wf_t);
a += SHA1M_A;
b += SHA1M_B;
c += SHA1M_C;
d += SHA1M_D;
e += SHA1M_E;
a += make_u32x (SHA1M_A);
b += make_u32x (SHA1M_B);
c += make_u32x (SHA1M_C);
d += make_u32x (SHA1M_D);
e += make_u32x (SHA1M_E);
t0[0] = c0[0];
t0[1] = c0[1];

@ -297,11 +297,11 @@ KERNEL_FQ void m04520_m04 (KERN_ATTR_BASIC ())
we_t = hc_rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F1, c, d, e, a, b, we_t);
wf_t = hc_rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F1, b, c, d, e, a, wf_t);
a += SHA1M_A;
b += SHA1M_B;
c += SHA1M_C;
d += SHA1M_D;
e += SHA1M_E;
a += make_u32x (SHA1M_A);
b += make_u32x (SHA1M_B);
c += make_u32x (SHA1M_C);
d += make_u32x (SHA1M_D);
e += make_u32x (SHA1M_E);
u32x t0[4];
u32x t1[4];
@ -480,11 +480,11 @@ KERNEL_FQ void m04520_m04 (KERN_ATTR_BASIC ())
we_t = hc_rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F1, c, d, e, a, b, we_t);
wf_t = hc_rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F1, b, c, d, e, a, wf_t);
a += SHA1M_A;
b += SHA1M_B;
c += SHA1M_C;
d += SHA1M_D;
e += SHA1M_E;
a += make_u32x (SHA1M_A);
b += make_u32x (SHA1M_B);
c += make_u32x (SHA1M_C);
d += make_u32x (SHA1M_D);
e += make_u32x (SHA1M_E);
t0[0] = c0[0];
t0[1] = c0[1];
@ -935,11 +935,11 @@ KERNEL_FQ void m04520_s04 (KERN_ATTR_BASIC ())
we_t = hc_rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F1, c, d, e, a, b, we_t);
wf_t = hc_rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F1, b, c, d, e, a, wf_t);
a += SHA1M_A;
b += SHA1M_B;
c += SHA1M_C;
d += SHA1M_D;
e += SHA1M_E;
a += make_u32x (SHA1M_A);
b += make_u32x (SHA1M_B);
c += make_u32x (SHA1M_C);
d += make_u32x (SHA1M_D);
e += make_u32x (SHA1M_E);
u32x t0[4];
u32x t1[4];
@ -1118,11 +1118,11 @@ KERNEL_FQ void m04520_s04 (KERN_ATTR_BASIC ())
we_t = hc_rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F1, c, d, e, a, b, we_t);
wf_t = hc_rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F1, b, c, d, e, a, wf_t);
a += SHA1M_A;
b += SHA1M_B;
c += SHA1M_C;
d += SHA1M_D;
e += SHA1M_E;
a += make_u32x (SHA1M_A);
b += make_u32x (SHA1M_B);
c += make_u32x (SHA1M_C);
d += make_u32x (SHA1M_D);
e += make_u32x (SHA1M_E);
t0[0] = c0[0];
t0[1] = c0[1];

@ -195,11 +195,11 @@ DECLSPEC void m04520m (PRIVATE_AS u32 *w0, PRIVATE_AS u32 *w1, PRIVATE_AS u32 *w
we_t = hc_rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F1, c, d, e, a, b, we_t);
wf_t = hc_rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F1, b, c, d, e, a, wf_t);
a += SHA1M_A;
b += SHA1M_B;
c += SHA1M_C;
d += SHA1M_D;
e += SHA1M_E;
a += make_u32x (SHA1M_A);
b += make_u32x (SHA1M_B);
c += make_u32x (SHA1M_C);
d += make_u32x (SHA1M_D);
e += make_u32x (SHA1M_E);
u32x t0[4];
u32x t1[4];
@ -378,11 +378,11 @@ DECLSPEC void m04520m (PRIVATE_AS u32 *w0, PRIVATE_AS u32 *w1, PRIVATE_AS u32 *w
we_t = hc_rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F1, c, d, e, a, b, we_t);
wf_t = hc_rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F1, b, c, d, e, a, wf_t);
a += SHA1M_A;
b += SHA1M_B;
c += SHA1M_C;
d += SHA1M_D;
e += SHA1M_E;
a += make_u32x (SHA1M_A);
b += make_u32x (SHA1M_B);
c += make_u32x (SHA1M_C);
d += make_u32x (SHA1M_D);
e += make_u32x (SHA1M_E);
t0[0] = c0[0];
t0[1] = c0[1];
@ -723,11 +723,11 @@ DECLSPEC void m04520s (PRIVATE_AS u32 *w0, PRIVATE_AS u32 *w1, PRIVATE_AS u32 *w
we_t = hc_rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F1, c, d, e, a, b, we_t);
wf_t = hc_rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F1, b, c, d, e, a, wf_t);
a += SHA1M_A;
b += SHA1M_B;
c += SHA1M_C;
d += SHA1M_D;
e += SHA1M_E;
a += make_u32x (SHA1M_A);
b += make_u32x (SHA1M_B);
c += make_u32x (SHA1M_C);
d += make_u32x (SHA1M_D);
e += make_u32x (SHA1M_E);
u32x t0[4];
u32x t1[4];
@ -906,11 +906,11 @@ DECLSPEC void m04520s (PRIVATE_AS u32 *w0, PRIVATE_AS u32 *w1, PRIVATE_AS u32 *w
we_t = hc_rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F1, c, d, e, a, b, we_t);
wf_t = hc_rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F1, b, c, d, e, a, wf_t);
a += SHA1M_A;
b += SHA1M_B;
c += SHA1M_C;
d += SHA1M_D;
e += SHA1M_E;
a += make_u32x (SHA1M_A);
b += make_u32x (SHA1M_B);
c += make_u32x (SHA1M_C);
d += make_u32x (SHA1M_D);
e += make_u32x (SHA1M_E);
t0[0] = c0[0];
t0[1] = c0[1];

@ -174,10 +174,10 @@ KERNEL_FQ void m04700_m04 (KERN_ATTR_RULES ())
MD5_STEP (MD5_I , c, d, a, b, w0[2], MD5C3e, MD5S32);
MD5_STEP (MD5_I , b, c, d, a, w2[1], MD5C3f, MD5S33);
a += MD5M_A;
b += MD5M_B;
c += MD5M_C;
d += MD5M_D;
a += make_u32x (MD5M_A);
b += make_u32x (MD5M_B);
c += make_u32x (MD5M_C);
d += make_u32x (MD5M_D);
/*
* sha1
@ -485,10 +485,10 @@ KERNEL_FQ void m04700_s04 (KERN_ATTR_RULES ())
MD5_STEP (MD5_I , c, d, a, b, w0[2], MD5C3e, MD5S32);
MD5_STEP (MD5_I , b, c, d, a, w2[1], MD5C3f, MD5S33);
a += MD5M_A;
b += MD5M_B;
c += MD5M_C;
d += MD5M_D;
a += make_u32x (MD5M_A);
b += make_u32x (MD5M_B);
c += make_u32x (MD5M_C);
d += make_u32x (MD5M_D);
/*
* sha1

@ -227,10 +227,10 @@ KERNEL_FQ void m04700_m04 (KERN_ATTR_BASIC ())
MD5_STEP (MD5_I , c, d, a, b, w0[2], MD5C3e, MD5S32);
MD5_STEP (MD5_I , b, c, d, a, w2[1], MD5C3f, MD5S33);
a += MD5M_A;
b += MD5M_B;
c += MD5M_C;
d += MD5M_D;
a += make_u32x (MD5M_A);
b += make_u32x (MD5M_B);
c += make_u32x (MD5M_C);
d += make_u32x (MD5M_D);
/*
* sha1
@ -593,10 +593,10 @@ KERNEL_FQ void m04700_s04 (KERN_ATTR_BASIC ())
MD5_STEP (MD5_I , c, d, a, b, w0[2], MD5C3e, MD5S32);
MD5_STEP (MD5_I , b, c, d, a, w2[1], MD5C3f, MD5S33);
a += MD5M_A;
b += MD5M_B;
c += MD5M_C;
d += MD5M_D;
a += make_u32x (MD5M_A);
b += make_u32x (MD5M_B);
c += make_u32x (MD5M_C);
d += make_u32x (MD5M_D);
/*
* sha1

@ -142,10 +142,10 @@ DECLSPEC void m04700m (PRIVATE_AS u32 *w0, PRIVATE_AS u32 *w1, PRIVATE_AS u32 *w
MD5_STEP (MD5_I , c, d, a, b, w2_t, MD5C3e, MD5S32);
MD5_STEP (MD5_I , b, c, d, a, w9_t, MD5C3f, MD5S33);
a += MD5M_A;
b += MD5M_B;
c += MD5M_C;
d += MD5M_D;
a += make_u32x (MD5M_A);
b += make_u32x (MD5M_B);
c += make_u32x (MD5M_C);
d += make_u32x (MD5M_D);
/*
* sha1
@ -415,10 +415,10 @@ DECLSPEC void m04700s (PRIVATE_AS u32 *w0, PRIVATE_AS u32 *w1, PRIVATE_AS u32 *w
MD5_STEP (MD5_I , c, d, a, b, w2_t, MD5C3e, MD5S32);
MD5_STEP (MD5_I , b, c, d, a, w9_t, MD5C3f, MD5S33);
a += MD5M_A;
b += MD5M_B;
c += MD5M_C;
d += MD5M_D;
a += make_u32x (MD5M_A);
b += make_u32x (MD5M_B);
c += make_u32x (MD5M_C);
d += make_u32x (MD5M_D);
/*
* sha1

@ -202,10 +202,10 @@ KERNEL_FQ void m04710_m04 (KERN_ATTR_RULES ())
MD5_STEP (MD5_I , c, d, a, b, w0[2], MD5C3e, MD5S32);
MD5_STEP (MD5_I , b, c, d, a, w2[1], MD5C3f, MD5S33);
a += MD5M_A;
b += MD5M_B;
c += MD5M_C;
d += MD5M_D;
a += make_u32x (MD5M_A);
b += make_u32x (MD5M_B);
c += make_u32x (MD5M_C);
d += make_u32x (MD5M_D);
// sha1
@ -918,10 +918,10 @@ KERNEL_FQ void m04710_s04 (KERN_ATTR_RULES ())
MD5_STEP (MD5_I , c, d, a, b, w0[2], MD5C3e, MD5S32);
MD5_STEP (MD5_I , b, c, d, a, w2[1], MD5C3f, MD5S33);
a += MD5M_A;
b += MD5M_B;
c += MD5M_C;
d += MD5M_D;
a += make_u32x (MD5M_A);
b += make_u32x (MD5M_B);
c += make_u32x (MD5M_C);
d += make_u32x (MD5M_D);
// sha1

@ -255,10 +255,10 @@ KERNEL_FQ void m04710_m04 (KERN_ATTR_BASIC ())
MD5_STEP (MD5_I , c, d, a, b, w0[2], MD5C3e, MD5S32);
MD5_STEP (MD5_I , b, c, d, a, w2[1], MD5C3f, MD5S33);
a += MD5M_A;
b += MD5M_B;
c += MD5M_C;
d += MD5M_D;
a += make_u32x (MD5M_A);
b += make_u32x (MD5M_B);
c += make_u32x (MD5M_C);
d += make_u32x (MD5M_D);
// sha1
@ -1026,10 +1026,10 @@ KERNEL_FQ void m04710_s04 (KERN_ATTR_BASIC ())
MD5_STEP (MD5_I , c, d, a, b, w0[2], MD5C3e, MD5S32);
MD5_STEP (MD5_I , b, c, d, a, w2[1], MD5C3f, MD5S33);
a += MD5M_A;
b += MD5M_B;
c += MD5M_C;
d += MD5M_D;
a += make_u32x (MD5M_A);
b += make_u32x (MD5M_B);
c += make_u32x (MD5M_C);
d += make_u32x (MD5M_D);
// sha1

@ -170,10 +170,10 @@ DECLSPEC void m04710m (PRIVATE_AS u32 *w0, PRIVATE_AS u32 *w1, PRIVATE_AS u32 *w
MD5_STEP (MD5_I , c, d, a, b, w2_t, MD5C3e, MD5S32);
MD5_STEP (MD5_I , b, c, d, a, w9_t, MD5C3f, MD5S33);
a += MD5M_A;
b += MD5M_B;
c += MD5M_C;
d += MD5M_D;
a += make_u32x (MD5M_A);
b += make_u32x (MD5M_B);
c += make_u32x (MD5M_C);
d += make_u32x (MD5M_D);
// sha1
@ -848,10 +848,10 @@ DECLSPEC void m04710s (PRIVATE_AS u32 *w0, PRIVATE_AS u32 *w1, PRIVATE_AS u32 *w
MD5_STEP (MD5_I , c, d, a, b, w2_t, MD5C3e, MD5S32);
MD5_STEP (MD5_I , b, c, d, a, w9_t, MD5C3f, MD5S33);
a += MD5M_A;
b += MD5M_B;
c += MD5M_C;
d += MD5M_D;
a += make_u32x (MD5M_A);
b += make_u32x (MD5M_B);
c += make_u32x (MD5M_C);
d += make_u32x (MD5M_D);
// sha1

@ -314,11 +314,11 @@ KERNEL_FQ void m05000_m04 (KERN_ATTR_RULES ())
we_t = hc_rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F1, c, d, e, a, b, we_t);
wf_t = hc_rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F1, b, c, d, e, a, wf_t);
a += SHA1M_A;
b += SHA1M_B;
c += SHA1M_C;
d += SHA1M_D;
e += SHA1M_E;
a += make_u32x (SHA1M_A);
b += make_u32x (SHA1M_B);
c += make_u32x (SHA1M_C);
d += make_u32x (SHA1M_D);
e += make_u32x (SHA1M_E);
/**
* 2nd SHA1
@ -770,11 +770,11 @@ KERNEL_FQ void m05000_s04 (KERN_ATTR_RULES ())
we_t = hc_rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F1, c, d, e, a, b, we_t);
wf_t = hc_rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F1, b, c, d, e, a, wf_t);
a += SHA1M_A;
b += SHA1M_B;
c += SHA1M_C;
d += SHA1M_D;
e += SHA1M_E;
a += make_u32x (SHA1M_A);
b += make_u32x (SHA1M_B);
c += make_u32x (SHA1M_C);
d += make_u32x (SHA1M_D);
e += make_u32x (SHA1M_E);
/**
* 2nd SHA1

@ -372,11 +372,11 @@ KERNEL_FQ void m05000_m04 (KERN_ATTR_BASIC ())
we_t = hc_rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F1, c, d, e, a, b, we_t);
wf_t = hc_rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F1, b, c, d, e, a, wf_t);
a += SHA1M_A;
b += SHA1M_B;
c += SHA1M_C;
d += SHA1M_D;
e += SHA1M_E;
a += make_u32x (SHA1M_A);
b += make_u32x (SHA1M_B);
c += make_u32x (SHA1M_C);
d += make_u32x (SHA1M_D);
e += make_u32x (SHA1M_E);
/**
* 2nd SHA1
@ -888,11 +888,11 @@ KERNEL_FQ void m05000_s04 (KERN_ATTR_BASIC ())
we_t = hc_rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F1, c, d, e, a, b, we_t);
wf_t = hc_rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F1, b, c, d, e, a, wf_t);
a += SHA1M_A;
b += SHA1M_B;
c += SHA1M_C;
d += SHA1M_D;
e += SHA1M_E;
a += make_u32x (SHA1M_A);
b += make_u32x (SHA1M_B);
c += make_u32x (SHA1M_C);
d += make_u32x (SHA1M_D);
e += make_u32x (SHA1M_E);
/**
* 2nd SHA1

@ -286,11 +286,11 @@ DECLSPEC void m05000m (PRIVATE_AS u32 *w0, PRIVATE_AS u32 *w1, PRIVATE_AS u32 *w
we_t = hc_rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F1, c, d, e, a, b, we_t);
wf_t = hc_rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F1, b, c, d, e, a, wf_t);
a += SHA1M_A;
b += SHA1M_B;
c += SHA1M_C;
d += SHA1M_D;
e += SHA1M_E;
a += make_u32x (SHA1M_A);
b += make_u32x (SHA1M_B);
c += make_u32x (SHA1M_C);
d += make_u32x (SHA1M_D);
e += make_u32x (SHA1M_E);
/**
* 2nd SHA1
@ -708,11 +708,11 @@ DECLSPEC void m05000s (PRIVATE_AS u32 *w0, PRIVATE_AS u32 *w1, PRIVATE_AS u32 *w
we_t = hc_rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F1, c, d, e, a, b, we_t);
wf_t = hc_rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F1, b, c, d, e, a, wf_t);
a += SHA1M_A;
b += SHA1M_B;
c += SHA1M_C;
d += SHA1M_D;
e += SHA1M_E;
a += make_u32x (SHA1M_A);
b += make_u32x (SHA1M_B);
c += make_u32x (SHA1M_C);
d += make_u32x (SHA1M_D);
e += make_u32x (SHA1M_E);
/**
* 2nd SHA1

@ -143,10 +143,10 @@ KERNEL_FQ void m05100_m04 (KERN_ATTR_RULES ())
MD5_STEP (MD5_I , c, d, a, b, w0[2], MD5C3e, MD5S32);
MD5_STEP (MD5_I , b, c, d, a, w2[1], MD5C3f, MD5S33);
a += MD5M_A;
b += MD5M_B;
c += MD5M_C;
d += MD5M_D;
a += make_u32x (MD5M_A);
b += make_u32x (MD5M_B);
c += make_u32x (MD5M_C);
d += make_u32x (MD5M_D);
u32x z = 0;
@ -303,10 +303,10 @@ KERNEL_FQ void m05100_s04 (KERN_ATTR_RULES ())
MD5_STEP (MD5_I , c, d, a, b, w0[2], MD5C3e, MD5S32);
MD5_STEP (MD5_I , b, c, d, a, w2[1], MD5C3f, MD5S33);
a += MD5M_A;
b += MD5M_B;
c += MD5M_C;
d += MD5M_D;
a += make_u32x (MD5M_A);
b += make_u32x (MD5M_B);
c += make_u32x (MD5M_C);
d += make_u32x (MD5M_D);
u32x z = 0;

@ -196,10 +196,10 @@ KERNEL_FQ void m05100_m04 (KERN_ATTR_BASIC ())
MD5_STEP (MD5_I , c, d, a, b, w0[2], MD5C3e, MD5S32);
MD5_STEP (MD5_I , b, c, d, a, w2[1], MD5C3f, MD5S33);
a += MD5M_A;
b += MD5M_B;
c += MD5M_C;
d += MD5M_D;
a += make_u32x (MD5M_A);
b += make_u32x (MD5M_B);
c += make_u32x (MD5M_C);
d += make_u32x (MD5M_D);
u32x z = 0;
@ -411,10 +411,10 @@ KERNEL_FQ void m05100_s04 (KERN_ATTR_BASIC ())
MD5_STEP (MD5_I , c, d, a, b, w0[2], MD5C3e, MD5S32);
MD5_STEP (MD5_I , b, c, d, a, w2[1], MD5C3f, MD5S33);
a += MD5M_A;
b += MD5M_B;
c += MD5M_C;
d += MD5M_D;
a += make_u32x (MD5M_A);
b += make_u32x (MD5M_B);
c += make_u32x (MD5M_C);
d += make_u32x (MD5M_D);
u32x z = 0;

@ -133,10 +133,10 @@ DECLSPEC void m05100m (PRIVATE_AS u32 *w0, PRIVATE_AS u32 *w1, PRIVATE_AS u32 *w
MD5_STEP (MD5_I , c, d, a, b, t0[2], MD5C3e, MD5S32);
MD5_STEP (MD5_I , b, c, d, a, t2[1], MD5C3f, MD5S33);
a += MD5M_A;
b += MD5M_B;
c += MD5M_C;
d += MD5M_D;
a += make_u32x (MD5M_A);
b += make_u32x (MD5M_B);
c += make_u32x (MD5M_C);
d += make_u32x (MD5M_D);
u32x z = 0;
@ -277,10 +277,10 @@ DECLSPEC void m05100s (PRIVATE_AS u32 *w0, PRIVATE_AS u32 *w1, PRIVATE_AS u32 *w
MD5_STEP (MD5_I , c, d, a, b, t0[2], MD5C3e, MD5S32);
MD5_STEP (MD5_I , b, c, d, a, t2[1], MD5C3f, MD5S33);
a += MD5M_A;
b += MD5M_B;
c += MD5M_C;
d += MD5M_D;
a += make_u32x (MD5M_A);
b += make_u32x (MD5M_B);
c += make_u32x (MD5M_C);
d += make_u32x (MD5M_D);
u32x z = 0;

@ -664,15 +664,15 @@ KERNEL_FQ void m05500_m04 (KERN_ATTR_RULES ())
MD4_STEP (MD4_H , a, b, c, d, w0_t[3], MD4C02, MD4S20);
MD4_STEP (MD4_H , d, a, b, c, w2_t[3], MD4C02, MD4S21);
if (MATCHES_NONE_VS (((d + MD4M_D) >> 16), s2)) continue;
if (MATCHES_NONE_VS (((d + make_u32x (MD4M_D)) >> 16), s2)) continue;
MD4_STEP (MD4_H , c, d, a, b, w1_t[3], MD4C02, MD4S22);
MD4_STEP (MD4_H , b, c, d, a, w3_t[3], MD4C02, MD4S23);
a += MD4M_A;
b += MD4M_B;
c += MD4M_C;
d += MD4M_D;
a += make_u32x (MD4M_A);
b += make_u32x (MD4M_B);
c += make_u32x (MD4M_C);
d += make_u32x (MD4M_D);
/**
* DES1
@ -893,15 +893,15 @@ KERNEL_FQ void m05500_s04 (KERN_ATTR_RULES ())
MD4_STEP (MD4_H , a, b, c, d, w0_t[3], MD4C02, MD4S20);
MD4_STEP (MD4_H , d, a, b, c, w2_t[3], MD4C02, MD4S21);
if (MATCHES_NONE_VS (((d + MD4M_D) >> 16), s2)) continue;
if (MATCHES_NONE_VS (((d + make_u32x (MD4M_D)) >> 16), s2)) continue;
MD4_STEP (MD4_H , c, d, a, b, w1_t[3], MD4C02, MD4S22);
MD4_STEP (MD4_H , b, c, d, a, w3_t[3], MD4C02, MD4S23);
a += MD4M_A;
b += MD4M_B;
c += MD4M_C;
d += MD4M_D;
a += make_u32x (MD4M_A);
b += make_u32x (MD4M_B);
c += make_u32x (MD4M_C);
d += make_u32x (MD4M_D);
/**
* DES1

@ -715,15 +715,15 @@ KERNEL_FQ void m05500_m04 (KERN_ATTR_BASIC ())
MD4_STEP (MD4_H , a, b, c, d, w0_t[3], MD4C02, MD4S20);
MD4_STEP (MD4_H , d, a, b, c, w2_t[3], MD4C02, MD4S21);
if (MATCHES_NONE_VS (((d + MD4M_D) >> 16), s2)) continue;
if (MATCHES_NONE_VS (((d + make_u32x (MD4M_D)) >> 16), s2)) continue;
MD4_STEP (MD4_H , c, d, a, b, w1_t[3], MD4C02, MD4S22);
MD4_STEP (MD4_H , b, c, d, a, w3_t[3], MD4C02, MD4S23);
a += MD4M_A;
b += MD4M_B;
c += MD4M_C;
d += MD4M_D;
a += make_u32x (MD4M_A);
b += make_u32x (MD4M_B);
c += make_u32x (MD4M_C);
d += make_u32x (MD4M_D);
/**
* DES1
@ -1002,15 +1002,15 @@ KERNEL_FQ void m05500_s04 (KERN_ATTR_BASIC ())
MD4_STEP (MD4_H , a, b, c, d, w0_t[3], MD4C02, MD4S20);
MD4_STEP (MD4_H , d, a, b, c, w2_t[3], MD4C02, MD4S21);
if (MATCHES_NONE_VS (((d + MD4M_D) >> 16), s2)) continue;
if (MATCHES_NONE_VS (((d + make_u32x (MD4M_D)) >> 16), s2)) continue;
MD4_STEP (MD4_H , c, d, a, b, w1_t[3], MD4C02, MD4S22);
MD4_STEP (MD4_H , b, c, d, a, w3_t[3], MD4C02, MD4S23);
a += MD4M_A;
b += MD4M_B;
c += MD4M_C;
d += MD4M_D;
a += make_u32x (MD4M_A);
b += make_u32x (MD4M_B);
c += make_u32x (MD4M_C);
d += make_u32x (MD4M_D);
/**
* DES1

@ -602,15 +602,15 @@ DECLSPEC void m05500m (SHM_TYPE u32 (*s_SPtrans)[64], SHM_TYPE u32 (*s_skb)[64],
MD4_STEP (MD4_H , a, b, c, d, w0_t[3], MD4C02, MD4S20);
MD4_STEP (MD4_H , d, a, b, c, w2_t[3], MD4C02, MD4S21);
if (MATCHES_NONE_VS (((d + MD4M_D) >> 16), s2)) continue;
if (MATCHES_NONE_VS (((d + make_u32x (MD4M_D)) >> 16), s2)) continue;
MD4_STEP (MD4_H , c, d, a, b, w1_t[3], MD4C02, MD4S22);
MD4_STEP (MD4_H , b, c, d, a, w3_t[3], MD4C02, MD4S23);
a += MD4M_A;
b += MD4M_B;
c += MD4M_C;
d += MD4M_D;
a += make_u32x (MD4M_A);
b += make_u32x (MD4M_B);
c += make_u32x (MD4M_C);
d += make_u32x (MD4M_D);
/**
* DES1
@ -768,15 +768,15 @@ DECLSPEC void m05500s (SHM_TYPE u32 (*s_SPtrans)[64], SHM_TYPE u32 (*s_skb)[64],
MD4_STEP (MD4_H , a, b, c, d, w0_t[3], MD4C02, MD4S20);
MD4_STEP (MD4_H , d, a, b, c, w2_t[3], MD4C02, MD4S21);
if (MATCHES_NONE_VS (((d + MD4M_D) >> 16), s2)) continue;
if (MATCHES_NONE_VS (((d + make_u32x (MD4M_D)) >> 16), s2)) continue;
MD4_STEP (MD4_H , c, d, a, b, w1_t[3], MD4C02, MD4S22);
MD4_STEP (MD4_H , b, c, d, a, w3_t[3], MD4C02, MD4S23);
a += MD4M_A;
b += MD4M_B;
c += MD4M_C;
d += MD4M_D;
a += make_u32x (MD4M_A);
b += make_u32x (MD4M_B);
c += make_u32x (MD4M_C);
d += make_u32x (MD4M_D);
/**
* DES1

@ -138,10 +138,10 @@ KERNEL_FQ void m09900_m04 (KERN_ATTR_RULES ())
MD5_STEP (MD5_I , c, d, a, b, w0[2], MD5C3e, MD5S32);
MD5_STEP (MD5_I , b, c, d, a, w2[1], MD5C3f, MD5S33);
a += MD5M_A;
b += MD5M_B;
c += MD5M_C;
d += MD5M_D;
a += make_u32x (MD5M_A);
b += make_u32x (MD5M_B);
c += make_u32x (MD5M_C);
d += make_u32x (MD5M_D);
u32x r_a = a;
u32x r_b = b;
@ -384,10 +384,10 @@ KERNEL_FQ void m09900_s04 (KERN_ATTR_RULES ())
MD5_STEP (MD5_I , c, d, a, b, w0[2], MD5C3e, MD5S32);
MD5_STEP (MD5_I , b, c, d, a, w2[1], MD5C3f, MD5S33);
a += MD5M_A;
b += MD5M_B;
c += MD5M_C;
d += MD5M_D;
a += make_u32x (MD5M_A);
b += make_u32x (MD5M_B);
c += make_u32x (MD5M_C);
d += make_u32x (MD5M_D);
u32x r_a = a;
u32x r_b = b;

@ -196,10 +196,10 @@ KERNEL_FQ void m09900_m04 (KERN_ATTR_BASIC ())
MD5_STEP (MD5_I , c, d, a, b, w0[2], MD5C3e, MD5S32);
MD5_STEP (MD5_I , b, c, d, a, w2[1], MD5C3f, MD5S33);
a += MD5M_A;
b += MD5M_B;
c += MD5M_C;
d += MD5M_D;
a += make_u32x (MD5M_A);
b += make_u32x (MD5M_B);
c += make_u32x (MD5M_C);
d += make_u32x (MD5M_D);
u32x r_a = a;
u32x r_b = b;
@ -502,10 +502,10 @@ KERNEL_FQ void m09900_s04 (KERN_ATTR_BASIC ())
MD5_STEP (MD5_I , c, d, a, b, w0[2], MD5C3e, MD5S32);
MD5_STEP (MD5_I , b, c, d, a, w2[1], MD5C3f, MD5S33);
a += MD5M_A;
b += MD5M_B;
c += MD5M_C;
d += MD5M_D;
a += make_u32x (MD5M_A);
b += make_u32x (MD5M_B);
c += make_u32x (MD5M_C);
d += make_u32x (MD5M_D);
u32x r_a = a;
u32x r_b = b;

@ -205,10 +205,10 @@ DECLSPEC void m09900m (PRIVATE_AS u32 *w, const u32 pw_len, KERN_ATTR_FUNC_VECTO
MD5_STEP0(MD5_I , c, d, a, b, I_w2c3e, MD5S32);
MD5_STEP0(MD5_I , b, c, d, a, I_w9c3f, MD5S33);
a += MD5M_A;
b += MD5M_B;
c += MD5M_C;
d += MD5M_D;
a += make_u32x (MD5M_A);
b += make_u32x (MD5M_B);
c += make_u32x (MD5M_C);
d += make_u32x (MD5M_D);
u32x r_a = a;
u32x r_b = b;
@ -495,10 +495,10 @@ DECLSPEC void m09900s (PRIVATE_AS u32 *w, const u32 pw_len, KERN_ATTR_FUNC_VECTO
MD5_STEP0(MD5_I , c, d, a, b, I_w2c3e, MD5S32);
MD5_STEP0(MD5_I , b, c, d, a, I_w9c3f, MD5S33);
a += MD5M_A;
b += MD5M_B;
c += MD5M_C;
d += MD5M_D;
a += make_u32x (MD5M_A);
b += make_u32x (MD5M_B);
c += make_u32x (MD5M_C);
d += make_u32x (MD5M_D);
u32x r_a = a;
u32x r_b = b;

@ -199,10 +199,10 @@ KERNEL_FQ void m11000_m04 (KERN_ATTR_RULES ())
MD5_STEP (MD5_I , c, d, a, b, w0_t[2], MD5C3e, MD5S32);
MD5_STEP (MD5_I , b, c, d, a, w2_t[1], MD5C3f, MD5S33);
a += MD5M_A;
b += MD5M_B;
c += MD5M_C;
d += MD5M_D;
a += make_u32x (MD5M_A);
b += make_u32x (MD5M_B);
c += make_u32x (MD5M_C);
d += make_u32x (MD5M_D);
u32x r_a = a;
u32x r_b = b;
@ -508,10 +508,10 @@ KERNEL_FQ void m11000_s04 (KERN_ATTR_RULES ())
MD5_STEP (MD5_I , c, d, a, b, w0_t[2], MD5C3e, MD5S32);
MD5_STEP (MD5_I , b, c, d, a, w2_t[1], MD5C3f, MD5S33);
a += MD5M_A;
b += MD5M_B;
c += MD5M_C;
d += MD5M_D;
a += make_u32x (MD5M_A);
b += make_u32x (MD5M_B);
c += make_u32x (MD5M_C);
d += make_u32x (MD5M_D);
u32x r_a = a;
u32x r_b = b;

@ -255,10 +255,10 @@ KERNEL_FQ void m11000_m04 (KERN_ATTR_BASIC ())
MD5_STEP (MD5_I , c, d, a, b, w0_t[2], MD5C3e, MD5S32);
MD5_STEP (MD5_I , b, c, d, a, w2_t[1], MD5C3f, MD5S33);
a += MD5M_A;
b += MD5M_B;
c += MD5M_C;
d += MD5M_D;
a += make_u32x (MD5M_A);
b += make_u32x (MD5M_B);
c += make_u32x (MD5M_C);
d += make_u32x (MD5M_D);
u32x r_a = a;
u32x r_b = b;
@ -622,10 +622,10 @@ KERNEL_FQ void m11000_s04 (KERN_ATTR_BASIC ())
MD5_STEP (MD5_I , c, d, a, b, w0_t[2], MD5C3e, MD5S32);
MD5_STEP (MD5_I , b, c, d, a, w2_t[1], MD5C3f, MD5S33);
a += MD5M_A;
b += MD5M_B;
c += MD5M_C;
d += MD5M_D;
a += make_u32x (MD5M_A);
b += make_u32x (MD5M_B);
c += make_u32x (MD5M_C);
d += make_u32x (MD5M_D);
u32x r_a = a;
u32x r_b = b;

@ -170,10 +170,10 @@ DECLSPEC void m11000m (PRIVATE_AS u32 *w0, PRIVATE_AS u32 *w1, PRIVATE_AS u32 *w
MD5_STEP (MD5_I , c, d, a, b, w0_t[2], MD5C3e, MD5S32);
MD5_STEP (MD5_I , b, c, d, a, w2_t[1], MD5C3f, MD5S33);
a += MD5M_A;
b += MD5M_B;
c += MD5M_C;
d += MD5M_D;
a += make_u32x (MD5M_A);
b += make_u32x (MD5M_B);
c += make_u32x (MD5M_C);
d += make_u32x (MD5M_D);
u32x r_a = a;
u32x r_b = b;
@ -444,10 +444,10 @@ DECLSPEC void m11000s (PRIVATE_AS u32 *w0, PRIVATE_AS u32 *w1, PRIVATE_AS u32 *w
MD5_STEP (MD5_I , c, d, a, b, w0_t[2], MD5C3e, MD5S32);
MD5_STEP (MD5_I , b, c, d, a, w2_t[1], MD5C3f, MD5S33);
a += MD5M_A;
b += MD5M_B;
c += MD5M_C;
d += MD5M_D;
a += make_u32x (MD5M_A);
b += make_u32x (MD5M_B);
c += make_u32x (MD5M_C);
d += make_u32x (MD5M_D);
u32x r_a = a;
u32x r_b = b;

@ -240,10 +240,10 @@ KERNEL_FQ void m11100_m04 (KERN_ATTR_RULES ())
MD5_STEP (MD5_I , c, d, a, b, w0[2], MD5C3e, MD5S32);
MD5_STEP (MD5_I , b, c, d, a, w2[1], MD5C3f, MD5S33);
a += MD5M_A;
b += MD5M_B;
c += MD5M_C;
d += MD5M_D;
a += make_u32x (MD5M_A);
b += make_u32x (MD5M_B);
c += make_u32x (MD5M_C);
d += make_u32x (MD5M_D);
w0[0] = uint_to_hex_lower8 ((a >> 0) & 255) << 0
| uint_to_hex_lower8 ((a >> 8) & 255) << 16;
@ -586,10 +586,10 @@ KERNEL_FQ void m11100_s04 (KERN_ATTR_RULES ())
MD5_STEP (MD5_I , c, d, a, b, w0[2], MD5C3e, MD5S32);
MD5_STEP (MD5_I , b, c, d, a, w2[1], MD5C3f, MD5S33);
a += MD5M_A;
b += MD5M_B;
c += MD5M_C;
d += MD5M_D;
a += make_u32x (MD5M_A);
b += make_u32x (MD5M_B);
c += make_u32x (MD5M_C);
d += make_u32x (MD5M_D);
w0[0] = uint_to_hex_lower8 ((a >> 0) & 255) << 0
| uint_to_hex_lower8 ((a >> 8) & 255) << 16;

@ -298,10 +298,10 @@ KERNEL_FQ void m11100_m04 (KERN_ATTR_BASIC ())
MD5_STEP (MD5_I , c, d, a, b, w0[2], MD5C3e, MD5S32);
MD5_STEP (MD5_I , b, c, d, a, w2[1], MD5C3f, MD5S33);
a += MD5M_A;
b += MD5M_B;
c += MD5M_C;
d += MD5M_D;
a += make_u32x (MD5M_A);
b += make_u32x (MD5M_B);
c += make_u32x (MD5M_C);
d += make_u32x (MD5M_D);
w0[0] = uint_to_hex_lower8 ((a >> 0) & 255) << 0
| uint_to_hex_lower8 ((a >> 8) & 255) << 16;
@ -704,10 +704,10 @@ KERNEL_FQ void m11100_s04 (KERN_ATTR_BASIC ())
MD5_STEP (MD5_I , c, d, a, b, w0[2], MD5C3e, MD5S32);
MD5_STEP (MD5_I , b, c, d, a, w2[1], MD5C3f, MD5S33);
a += MD5M_A;
b += MD5M_B;
c += MD5M_C;
d += MD5M_D;
a += make_u32x (MD5M_A);
b += make_u32x (MD5M_B);
c += make_u32x (MD5M_C);
d += make_u32x (MD5M_D);
w0[0] = uint_to_hex_lower8 ((a >> 0) & 255) << 0
| uint_to_hex_lower8 ((a >> 8) & 255) << 16;

@ -214,10 +214,10 @@ DECLSPEC void m11100m (PRIVATE_AS u32 *w0, PRIVATE_AS u32 *w1, PRIVATE_AS u32 *w
MD5_STEP (MD5_I , c, d, a, b, w0_t[2], MD5C3e, MD5S32);
MD5_STEP (MD5_I , b, c, d, a, w2_t[1], MD5C3f, MD5S33);
a += MD5M_A;
b += MD5M_B;
c += MD5M_C;
d += MD5M_D;
a += make_u32x (MD5M_A);
b += make_u32x (MD5M_B);
c += make_u32x (MD5M_C);
d += make_u32x (MD5M_D);
w0_t[0] = uint_to_hex_lower8 ((a >> 0) & 255) << 0
| uint_to_hex_lower8 ((a >> 8) & 255) << 16;
@ -528,10 +528,10 @@ DECLSPEC void m11100s (PRIVATE_AS u32 *w0, PRIVATE_AS u32 *w1, PRIVATE_AS u32 *w
MD5_STEP (MD5_I , c, d, a, b, w0_t[2], MD5C3e, MD5S32);
MD5_STEP (MD5_I , b, c, d, a, w2_t[1], MD5C3f, MD5S33);
a += MD5M_A;
b += MD5M_B;
c += MD5M_C;
d += MD5M_D;
a += make_u32x (MD5M_A);
b += make_u32x (MD5M_B);
c += make_u32x (MD5M_C);
d += make_u32x (MD5M_D);
w0_t[0] = uint_to_hex_lower8 ((a >> 0) & 255) << 0
| uint_to_hex_lower8 ((a >> 8) & 255) << 16;

@ -196,11 +196,11 @@ KERNEL_FQ void m11200_m04 (KERN_ATTR_RULES ())
we_t = hc_rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F1, c, d, e, a, b, we_t);
wf_t = hc_rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F1, b, c, d, e, a, wf_t);
const u32x plain_sha1_a = a + SHA1M_A;
const u32x plain_sha1_b = b + SHA1M_B;
const u32x plain_sha1_c = c + SHA1M_C;
const u32x plain_sha1_d = d + SHA1M_D;
const u32x plain_sha1_e = e + SHA1M_E;
const u32x plain_sha1_a = a + make_u32x (SHA1M_A);
const u32x plain_sha1_b = b + make_u32x (SHA1M_B);
const u32x plain_sha1_c = c + make_u32x (SHA1M_C);
const u32x plain_sha1_d = d + make_u32x (SHA1M_D);
const u32x plain_sha1_e = e + make_u32x (SHA1M_E);
/**
* sha1 (sha1 ($pass))
@ -325,11 +325,11 @@ KERNEL_FQ void m11200_m04 (KERN_ATTR_RULES ())
we_t = hc_rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F1, c, d, e, a, b, we_t);
wf_t = hc_rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F1, b, c, d, e, a, wf_t);
a += SHA1M_A;
b += SHA1M_B;
c += SHA1M_C;
d += SHA1M_D;
e += SHA1M_E;
a += make_u32x (SHA1M_A);
b += make_u32x (SHA1M_B);
c += make_u32x (SHA1M_C);
d += make_u32x (SHA1M_D);
e += make_u32x (SHA1M_E);
/**
* sha1 ($salt . sha1 (sha1 ($pass)))
@ -454,11 +454,11 @@ KERNEL_FQ void m11200_m04 (KERN_ATTR_RULES ())
we_t = hc_rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F1, c, d, e, a, b, we_t);
wf_t = hc_rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F1, b, c, d, e, a, wf_t);
a += SHA1M_A;
b += SHA1M_B;
c += SHA1M_C;
d += SHA1M_D;
e += SHA1M_E;
a += make_u32x (SHA1M_A);
b += make_u32x (SHA1M_B);
c += make_u32x (SHA1M_C);
d += make_u32x (SHA1M_D);
e += make_u32x (SHA1M_E);
a ^= plain_sha1_a;
b ^= plain_sha1_b;
@ -670,11 +670,11 @@ KERNEL_FQ void m11200_s04 (KERN_ATTR_RULES ())
we_t = hc_rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F1, c, d, e, a, b, we_t);
wf_t = hc_rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F1, b, c, d, e, a, wf_t);
const u32x plain_sha1_a = a + SHA1M_A;
const u32x plain_sha1_b = b + SHA1M_B;
const u32x plain_sha1_c = c + SHA1M_C;
const u32x plain_sha1_d = d + SHA1M_D;
const u32x plain_sha1_e = e + SHA1M_E;
const u32x plain_sha1_a = a + make_u32x (SHA1M_A);
const u32x plain_sha1_b = b + make_u32x (SHA1M_B);
const u32x plain_sha1_c = c + make_u32x (SHA1M_C);
const u32x plain_sha1_d = d + make_u32x (SHA1M_D);
const u32x plain_sha1_e = e + make_u32x (SHA1M_E);
/**
* sha1 (sha1 ($pass))
@ -799,11 +799,11 @@ KERNEL_FQ void m11200_s04 (KERN_ATTR_RULES ())
we_t = hc_rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F1, c, d, e, a, b, we_t);
wf_t = hc_rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F1, b, c, d, e, a, wf_t);
a += SHA1M_A;
b += SHA1M_B;
c += SHA1M_C;
d += SHA1M_D;
e += SHA1M_E;
a += make_u32x (SHA1M_A);
b += make_u32x (SHA1M_B);
c += make_u32x (SHA1M_C);
d += make_u32x (SHA1M_D);
e += make_u32x (SHA1M_E);
/**
* sha1 ($salt . sha1 (sha1 ($pass)))
@ -928,11 +928,11 @@ KERNEL_FQ void m11200_s04 (KERN_ATTR_RULES ())
we_t = hc_rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F1, c, d, e, a, b, we_t);
wf_t = hc_rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F1, b, c, d, e, a, wf_t);
a += SHA1M_A;
b += SHA1M_B;
c += SHA1M_C;
d += SHA1M_D;
e += SHA1M_E;
a += make_u32x (SHA1M_A);
b += make_u32x (SHA1M_B);
c += make_u32x (SHA1M_C);
d += make_u32x (SHA1M_D);
e += make_u32x (SHA1M_E);
a ^= plain_sha1_a;
b ^= plain_sha1_b;

@ -252,11 +252,11 @@ KERNEL_FQ void m11200_m04 (KERN_ATTR_BASIC ())
we_t = hc_rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F1, c, d, e, a, b, we_t);
wf_t = hc_rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F1, b, c, d, e, a, wf_t);
const u32x plain_sha1_a = a + SHA1M_A;
const u32x plain_sha1_b = b + SHA1M_B;
const u32x plain_sha1_c = c + SHA1M_C;
const u32x plain_sha1_d = d + SHA1M_D;
const u32x plain_sha1_e = e + SHA1M_E;
const u32x plain_sha1_a = a + make_u32x (SHA1M_A);
const u32x plain_sha1_b = b + make_u32x (SHA1M_B);
const u32x plain_sha1_c = c + make_u32x (SHA1M_C);
const u32x plain_sha1_d = d + make_u32x (SHA1M_D);
const u32x plain_sha1_e = e + make_u32x (SHA1M_E);
/**
* sha1 (sha1 ($pass))
@ -381,11 +381,11 @@ KERNEL_FQ void m11200_m04 (KERN_ATTR_BASIC ())
we_t = hc_rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F1, c, d, e, a, b, we_t);
wf_t = hc_rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F1, b, c, d, e, a, wf_t);
a += SHA1M_A;
b += SHA1M_B;
c += SHA1M_C;
d += SHA1M_D;
e += SHA1M_E;
a += make_u32x (SHA1M_A);
b += make_u32x (SHA1M_B);
c += make_u32x (SHA1M_C);
d += make_u32x (SHA1M_D);
e += make_u32x (SHA1M_E);
/**
* sha1 ($salt . sha1 (sha1 ($pass)))
@ -510,11 +510,11 @@ KERNEL_FQ void m11200_m04 (KERN_ATTR_BASIC ())
we_t = hc_rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F1, c, d, e, a, b, we_t);
wf_t = hc_rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F1, b, c, d, e, a, wf_t);
a += SHA1M_A;
b += SHA1M_B;
c += SHA1M_C;
d += SHA1M_D;
e += SHA1M_E;
a += make_u32x (SHA1M_A);
b += make_u32x (SHA1M_B);
c += make_u32x (SHA1M_C);
d += make_u32x (SHA1M_D);
e += make_u32x (SHA1M_E);
a ^= plain_sha1_a;
b ^= plain_sha1_b;
@ -784,11 +784,11 @@ KERNEL_FQ void m11200_s04 (KERN_ATTR_BASIC ())
we_t = hc_rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F1, c, d, e, a, b, we_t);
wf_t = hc_rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F1, b, c, d, e, a, wf_t);
const u32x plain_sha1_a = a + SHA1M_A;
const u32x plain_sha1_b = b + SHA1M_B;
const u32x plain_sha1_c = c + SHA1M_C;
const u32x plain_sha1_d = d + SHA1M_D;
const u32x plain_sha1_e = e + SHA1M_E;
const u32x plain_sha1_a = a + make_u32x (SHA1M_A);
const u32x plain_sha1_b = b + make_u32x (SHA1M_B);
const u32x plain_sha1_c = c + make_u32x (SHA1M_C);
const u32x plain_sha1_d = d + make_u32x (SHA1M_D);
const u32x plain_sha1_e = e + make_u32x (SHA1M_E);
/**
* sha1 (sha1 ($pass))
@ -913,11 +913,11 @@ KERNEL_FQ void m11200_s04 (KERN_ATTR_BASIC ())
we_t = hc_rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F1, c, d, e, a, b, we_t);
wf_t = hc_rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F1, b, c, d, e, a, wf_t);
a += SHA1M_A;
b += SHA1M_B;
c += SHA1M_C;
d += SHA1M_D;
e += SHA1M_E;
a += make_u32x (SHA1M_A);
b += make_u32x (SHA1M_B);
c += make_u32x (SHA1M_C);
d += make_u32x (SHA1M_D);
e += make_u32x (SHA1M_E);
/**
* sha1 ($salt . sha1 (sha1 ($pass)))
@ -1042,11 +1042,11 @@ KERNEL_FQ void m11200_s04 (KERN_ATTR_BASIC ())
we_t = hc_rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F1, c, d, e, a, b, we_t);
wf_t = hc_rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F1, b, c, d, e, a, wf_t);
a += SHA1M_A;
b += SHA1M_B;
c += SHA1M_C;
d += SHA1M_D;
e += SHA1M_E;
a += make_u32x (SHA1M_A);
b += make_u32x (SHA1M_B);
c += make_u32x (SHA1M_C);
d += make_u32x (SHA1M_D);
e += make_u32x (SHA1M_E);
a ^= plain_sha1_a;
b ^= plain_sha1_b;

@ -167,11 +167,11 @@ DECLSPEC void m11200m (PRIVATE_AS u32 *w0, PRIVATE_AS u32 *w1, PRIVATE_AS u32 *w
we_t = hc_rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F1, c, d, e, a, b, we_t);
wf_t = hc_rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F1, b, c, d, e, a, wf_t);
const u32x plain_sha1_a = a + SHA1M_A;
const u32x plain_sha1_b = b + SHA1M_B;
const u32x plain_sha1_c = c + SHA1M_C;
const u32x plain_sha1_d = d + SHA1M_D;
const u32x plain_sha1_e = e + SHA1M_E;
const u32x plain_sha1_a = a + make_u32x (SHA1M_A);
const u32x plain_sha1_b = b + make_u32x (SHA1M_B);
const u32x plain_sha1_c = c + make_u32x (SHA1M_C);
const u32x plain_sha1_d = d + make_u32x (SHA1M_D);
const u32x plain_sha1_e = e + make_u32x (SHA1M_E);
/**
* sha1 (sha1 ($pass))
@ -296,11 +296,11 @@ DECLSPEC void m11200m (PRIVATE_AS u32 *w0, PRIVATE_AS u32 *w1, PRIVATE_AS u32 *w
we_t = hc_rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F1, c, d, e, a, b, we_t);
wf_t = hc_rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F1, b, c, d, e, a, wf_t);
a += SHA1M_A;
b += SHA1M_B;
c += SHA1M_C;
d += SHA1M_D;
e += SHA1M_E;
a += make_u32x (SHA1M_A);
b += make_u32x (SHA1M_B);
c += make_u32x (SHA1M_C);
d += make_u32x (SHA1M_D);
e += make_u32x (SHA1M_E);
/**
* sha1 ($salt . sha1 (sha1 ($pass)))
@ -425,11 +425,11 @@ DECLSPEC void m11200m (PRIVATE_AS u32 *w0, PRIVATE_AS u32 *w1, PRIVATE_AS u32 *w
we_t = hc_rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F1, c, d, e, a, b, we_t);
wf_t = hc_rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F1, b, c, d, e, a, wf_t);
a += SHA1M_A;
b += SHA1M_B;
c += SHA1M_C;
d += SHA1M_D;
e += SHA1M_E;
a += make_u32x (SHA1M_A);
b += make_u32x (SHA1M_B);
c += make_u32x (SHA1M_C);
d += make_u32x (SHA1M_D);
e += make_u32x (SHA1M_E);
a ^= plain_sha1_a;
b ^= plain_sha1_b;
@ -606,11 +606,11 @@ DECLSPEC void m11200s (PRIVATE_AS u32 *w0, PRIVATE_AS u32 *w1, PRIVATE_AS u32 *w
we_t = hc_rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F1, c, d, e, a, b, we_t);
wf_t = hc_rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F1, b, c, d, e, a, wf_t);
const u32x plain_sha1_a = a + SHA1M_A;
const u32x plain_sha1_b = b + SHA1M_B;
const u32x plain_sha1_c = c + SHA1M_C;
const u32x plain_sha1_d = d + SHA1M_D;
const u32x plain_sha1_e = e + SHA1M_E;
const u32x plain_sha1_a = a + make_u32x (SHA1M_A);
const u32x plain_sha1_b = b + make_u32x (SHA1M_B);
const u32x plain_sha1_c = c + make_u32x (SHA1M_C);
const u32x plain_sha1_d = d + make_u32x (SHA1M_D);
const u32x plain_sha1_e = e + make_u32x (SHA1M_E);
/**
* sha1 (sha1 ($pass))
@ -735,11 +735,11 @@ DECLSPEC void m11200s (PRIVATE_AS u32 *w0, PRIVATE_AS u32 *w1, PRIVATE_AS u32 *w
we_t = hc_rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F1, c, d, e, a, b, we_t);
wf_t = hc_rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F1, b, c, d, e, a, wf_t);
a += SHA1M_A;
b += SHA1M_B;
c += SHA1M_C;
d += SHA1M_D;
e += SHA1M_E;
a += make_u32x (SHA1M_A);
b += make_u32x (SHA1M_B);
c += make_u32x (SHA1M_C);
d += make_u32x (SHA1M_D);
e += make_u32x (SHA1M_E);
/**
* sha1 ($salt . sha1 (sha1 ($pass)))
@ -864,11 +864,11 @@ DECLSPEC void m11200s (PRIVATE_AS u32 *w0, PRIVATE_AS u32 *w1, PRIVATE_AS u32 *w
we_t = hc_rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F1, c, d, e, a, b, we_t);
wf_t = hc_rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F1, b, c, d, e, a, wf_t);
a += SHA1M_A;
b += SHA1M_B;
c += SHA1M_C;
d += SHA1M_D;
e += SHA1M_E;
a += make_u32x (SHA1M_A);
b += make_u32x (SHA1M_B);
c += make_u32x (SHA1M_C);
d += make_u32x (SHA1M_D);
e += make_u32x (SHA1M_E);
a ^= plain_sha1_a;
b ^= plain_sha1_b;

@ -232,11 +232,11 @@ KERNEL_FQ void m12600_m04 (KERN_ATTR_RULES ())
we_t = hc_rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F1, c, d, e, a, b, we_t);
wf_t = hc_rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F1, b, c, d, e, a, wf_t);
a += SHA1M_A;
b += SHA1M_B;
c += SHA1M_C;
d += SHA1M_D;
e += SHA1M_E;
a += make_u32x (SHA1M_A);
b += make_u32x (SHA1M_B);
c += make_u32x (SHA1M_C);
d += make_u32x (SHA1M_D);
e += make_u32x (SHA1M_E);
/**
* sha256
@ -584,11 +584,11 @@ KERNEL_FQ void m12600_s04 (KERN_ATTR_RULES ())
we_t = hc_rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F1, c, d, e, a, b, we_t);
wf_t = hc_rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F1, b, c, d, e, a, wf_t);
a += SHA1M_A;
b += SHA1M_B;
c += SHA1M_C;
d += SHA1M_D;
e += SHA1M_E;
a += make_u32x (SHA1M_A);
b += make_u32x (SHA1M_B);
c += make_u32x (SHA1M_C);
d += make_u32x (SHA1M_D);
e += make_u32x (SHA1M_E);
/**
* sha256

@ -288,11 +288,11 @@ KERNEL_FQ void m12600_m04 (KERN_ATTR_BASIC ())
we_t = hc_rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F1, c, d, e, a, b, we_t);
wf_t = hc_rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F1, b, c, d, e, a, wf_t);
a += SHA1M_A;
b += SHA1M_B;
c += SHA1M_C;
d += SHA1M_D;
e += SHA1M_E;
a += make_u32x (SHA1M_A);
b += make_u32x (SHA1M_B);
c += make_u32x (SHA1M_C);
d += make_u32x (SHA1M_D);
e += make_u32x (SHA1M_E);
/**
* sha256
@ -698,11 +698,11 @@ KERNEL_FQ void m12600_s04 (KERN_ATTR_BASIC ())
we_t = hc_rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F1, c, d, e, a, b, we_t);
wf_t = hc_rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F1, b, c, d, e, a, wf_t);
a += SHA1M_A;
b += SHA1M_B;
c += SHA1M_C;
d += SHA1M_D;
e += SHA1M_E;
a += make_u32x (SHA1M_A);
b += make_u32x (SHA1M_B);
c += make_u32x (SHA1M_C);
d += make_u32x (SHA1M_D);
e += make_u32x (SHA1M_E);
/**
* sha256

@ -186,11 +186,11 @@ DECLSPEC void m12600m (PRIVATE_AS u32 *w0, PRIVATE_AS u32 *w1, PRIVATE_AS u32 *w
we_t = hc_rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F1, c, d, e, a, b, we_t);
wf_t = hc_rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F1, b, c, d, e, a, wf_t);
a += SHA1M_A;
b += SHA1M_B;
c += SHA1M_C;
d += SHA1M_D;
e += SHA1M_E;
a += make_u32x (SHA1M_A);
b += make_u32x (SHA1M_B);
c += make_u32x (SHA1M_C);
d += make_u32x (SHA1M_D);
e += make_u32x (SHA1M_E);
/**
* sha256
@ -486,11 +486,11 @@ DECLSPEC void m12600s (PRIVATE_AS u32 *w0, PRIVATE_AS u32 *w1, PRIVATE_AS u32 *w
we_t = hc_rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F1, c, d, e, a, b, we_t);
wf_t = hc_rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F1, b, c, d, e, a, wf_t);
a += SHA1M_A;
b += SHA1M_B;
c += SHA1M_C;
d += SHA1M_D;
e += SHA1M_E;
a += make_u32x (SHA1M_A);
b += make_u32x (SHA1M_B);
c += make_u32x (SHA1M_C);
d += make_u32x (SHA1M_D);
e += make_u32x (SHA1M_E);
/**
* sha256

@ -184,10 +184,10 @@ KERNEL_FQ void m13300_m04 (KERN_ATTR_RULES ())
we_t = hc_rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F1, c, d, e, a, b, we_t);
wf_t = hc_rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F1, b, c, d, e, a, wf_t);
a += SHA1M_A;
e += SHA1M_E;
d += SHA1M_D;
c += SHA1M_C;
a += make_u32x (SHA1M_A);
e += make_u32x (SHA1M_E);
d += make_u32x (SHA1M_D);
c += make_u32x (SHA1M_C);
e &= 0x00000000;
@ -383,10 +383,10 @@ KERNEL_FQ void m13300_s04 (KERN_ATTR_RULES ())
we_t = hc_rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F1, c, d, e, a, b, we_t);
wf_t = hc_rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F1, b, c, d, e, a, wf_t);
a += SHA1M_A;
e += SHA1M_E;
d += SHA1M_D;
c += SHA1M_C;
a += make_u32x (SHA1M_A);
e += make_u32x (SHA1M_E);
d += make_u32x (SHA1M_D);
c += make_u32x (SHA1M_C);
e &= 0x00000000;

@ -240,10 +240,10 @@ KERNEL_FQ void m13300_m04 (KERN_ATTR_BASIC ())
we_t = hc_rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F1, c, d, e, a, b, we_t);
wf_t = hc_rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F1, b, c, d, e, a, wf_t);
a += SHA1M_A;
e += SHA1M_E;
d += SHA1M_D;
c += SHA1M_C;
a += make_u32x (SHA1M_A);
e += make_u32x (SHA1M_E);
d += make_u32x (SHA1M_D);
c += make_u32x (SHA1M_C);
e &= 0x00000000;
@ -497,10 +497,10 @@ KERNEL_FQ void m13300_s04 (KERN_ATTR_BASIC ())
we_t = hc_rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F1, c, d, e, a, b, we_t);
wf_t = hc_rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F1, b, c, d, e, a, wf_t);
a += SHA1M_A;
e += SHA1M_E;
d += SHA1M_D;
c += SHA1M_C;
a += make_u32x (SHA1M_A);
e += make_u32x (SHA1M_E);
d += make_u32x (SHA1M_D);
c += make_u32x (SHA1M_C);
e &= 0x00000000;

@ -251,10 +251,10 @@ DECLSPEC void m13300m (PRIVATE_AS u32 *w, const u32 pw_len, KERN_ATTR_FUNC_VECTO
SHA1_STEP (SHA1_F1 , c, d, e, a, b, (c_78s ^ w0s07 ^ w0s08 ^ w0s15 ^ w0s18 ^ w0s20));
SHA1_STEP (SHA1_F1 , b, c, d, e, a, (c_79s ^ w0s08 ^ w0s22));
a += SHA1M_A;
e += SHA1M_E;
d += SHA1M_D;
c += SHA1M_C;
a += make_u32x (SHA1M_A);
e += make_u32x (SHA1M_E);
d += make_u32x (SHA1M_D);
c += make_u32x (SHA1M_C);
e &= 0x00000000;
@ -511,10 +511,10 @@ DECLSPEC void m13300s (PRIVATE_AS u32 *w, const u32 pw_len, KERN_ATTR_FUNC_VECTO
SHA1_STEP (SHA1_F1 , c, d, e, a, b, (c_78s ^ w0s07 ^ w0s08 ^ w0s15 ^ w0s18 ^ w0s20));
SHA1_STEP (SHA1_F1 , b, c, d, e, a, (c_79s ^ w0s08 ^ w0s22));
a += SHA1M_A;
e += SHA1M_E;
d += SHA1M_D;
c += SHA1M_C;
a += make_u32x (SHA1M_A);
e += make_u32x (SHA1M_E);
d += make_u32x (SHA1M_D);
c += make_u32x (SHA1M_C);
e &= 0x00000000;

@ -248,11 +248,11 @@ KERNEL_FQ void m15500_m04 (KERN_ATTR_RULES ())
we_t = hc_rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F1, c, d, e, a, b, we_t);
wf_t = hc_rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F1, b, c, d, e, a, wf_t);
a += SHA1M_A;
b += SHA1M_B;
c += SHA1M_C;
d += SHA1M_D;
e += SHA1M_E;
a += make_u32x (SHA1M_A);
b += make_u32x (SHA1M_B);
c += make_u32x (SHA1M_C);
d += make_u32x (SHA1M_D);
e += make_u32x (SHA1M_E);
a &= 0xff000000;
b &= 0x0000ffff;
@ -516,11 +516,11 @@ KERNEL_FQ void m15500_s04 (KERN_ATTR_RULES ())
we_t = hc_rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F1, c, d, e, a, b, we_t);
wf_t = hc_rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F1, b, c, d, e, a, wf_t);
a += SHA1M_A;
b += SHA1M_B;
c += SHA1M_C;
d += SHA1M_D;
e += SHA1M_E;
a += make_u32x (SHA1M_A);
b += make_u32x (SHA1M_B);
c += make_u32x (SHA1M_C);
d += make_u32x (SHA1M_D);
e += make_u32x (SHA1M_E);
a &= 0xff000000;
b &= 0x0000ffff;

@ -306,11 +306,11 @@ KERNEL_FQ void m15500_m04 (KERN_ATTR_BASIC ())
we_t = hc_rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F1, c, d, e, a, b, we_t);
wf_t = hc_rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F1, b, c, d, e, a, wf_t);
a += SHA1M_A;
b += SHA1M_B;
c += SHA1M_C;
d += SHA1M_D;
e += SHA1M_E;
a += make_u32x (SHA1M_A);
b += make_u32x (SHA1M_B);
c += make_u32x (SHA1M_C);
d += make_u32x (SHA1M_D);
e += make_u32x (SHA1M_E);
a &= 0xff000000;
b &= 0x0000ffff;
@ -634,11 +634,11 @@ KERNEL_FQ void m15500_s04 (KERN_ATTR_BASIC ())
we_t = hc_rotl32 ((wb_t ^ w6_t ^ w0_t ^ we_t), 1u); SHA1_STEP (SHA1_F1, c, d, e, a, b, we_t);
wf_t = hc_rotl32 ((wc_t ^ w7_t ^ w1_t ^ wf_t), 1u); SHA1_STEP (SHA1_F1, b, c, d, e, a, wf_t);
a += SHA1M_A;
b += SHA1M_B;
c += SHA1M_C;
d += SHA1M_D;
e += SHA1M_E;
a += make_u32x (SHA1M_A);
b += make_u32x (SHA1M_B);
c += make_u32x (SHA1M_C);
d += make_u32x (SHA1M_D);
e += make_u32x (SHA1M_E);
a &= 0xff000000;
b &= 0x0000ffff;

@ -299,11 +299,11 @@ DECLSPEC void m15500m (PRIVATE_AS u32 *w, const u32 pw_len, KERN_ATTR_FUNC_VECTO
SHA1_STEP (SHA1_F1 , c, d, e, a, b, (c_78s ^ w0s07 ^ w0s08 ^ w0s15 ^ w0s18 ^ w0s20));
SHA1_STEP (SHA1_F1 , b, c, d, e, a, (c_79s ^ w0s08 ^ w0s22));
a += SHA1M_A;
b += SHA1M_B;
c += SHA1M_C;
d += SHA1M_D;
e += SHA1M_E;
a += make_u32x (SHA1M_A);
b += make_u32x (SHA1M_B);
c += make_u32x (SHA1M_C);
d += make_u32x (SHA1M_D);
e += make_u32x (SHA1M_E);
a &= 0xff000000;
b &= 0x0000ffff;
@ -564,11 +564,11 @@ DECLSPEC void m15500s (PRIVATE_AS u32 *w, const u32 pw_len, KERN_ATTR_FUNC_VECTO
SHA1_STEP (SHA1_F1 , c, d, e, a, b, (c_78s ^ w0s07 ^ w0s08 ^ w0s15 ^ w0s18 ^ w0s20));
SHA1_STEP (SHA1_F1 , b, c, d, e, a, (c_79s ^ w0s08 ^ w0s22));
a += SHA1M_A;
b += SHA1M_B;
c += SHA1M_C;
d += SHA1M_D;
e += SHA1M_E;
a += make_u32x (SHA1M_A);
b += make_u32x (SHA1M_B);
c += make_u32x (SHA1M_C);
d += make_u32x (SHA1M_D);
e += make_u32x (SHA1M_E);
a &= 0xff000000;
b &= 0x0000ffff;

@ -230,14 +230,14 @@ KERNEL_FQ void m20710_m04 (KERN_ATTR_RULES ())
we_t = SHA256_EXPAND (wc_t, w7_t, wf_t, we_t); SHA256_STEP (SHA256_F0o, SHA256_F1o, c, d, e, f, g, h, a, b, we_t, SHA256C3e);
wf_t = SHA256_EXPAND (wd_t, w8_t, w0_t, wf_t); SHA256_STEP (SHA256_F0o, SHA256_F1o, b, c, d, e, f, g, h, a, wf_t, SHA256C3f);
a += SHA256M_A;
b += SHA256M_B;
c += SHA256M_C;
d += SHA256M_D;
e += SHA256M_E;
f += SHA256M_F;
g += SHA256M_G;
h += SHA256M_H;
a += make_u32x (SHA256M_A);
b += make_u32x (SHA256M_B);
c += make_u32x (SHA256M_C);
d += make_u32x (SHA256M_D);
e += make_u32x (SHA256M_E);
f += make_u32x (SHA256M_F);
g += make_u32x (SHA256M_G);
h += make_u32x (SHA256M_H);
// final sha256
@ -609,12 +609,12 @@ KERNEL_FQ void m20710_m04 (KERN_ATTR_RULES ())
// a += digest[0] - SHA256M_A;
// b += digest[1] - SHA256M_B;
c += digest[2] - SHA256M_C;
d += digest[3] - SHA256M_D;
c += digest[2] - make_u32x (SHA256M_C);
d += digest[3] - make_u32x (SHA256M_D);
// e += digest[4] - SHA256M_E;
// f += digest[5] - SHA256M_F;
g += digest[6] - SHA256M_G;
h += digest[7] - SHA256M_H;
g += digest[6] - make_u32x (SHA256M_G);
h += digest[7] - make_u32x (SHA256M_H);
COMPARE_M_SIMD (d, h, c, g);
}
@ -828,14 +828,14 @@ KERNEL_FQ void m20710_s04 (KERN_ATTR_RULES ())
we_t = SHA256_EXPAND (wc_t, w7_t, wf_t, we_t); SHA256_STEP (SHA256_F0o, SHA256_F1o, c, d, e, f, g, h, a, b, we_t, SHA256C3e);
wf_t = SHA256_EXPAND (wd_t, w8_t, w0_t, wf_t); SHA256_STEP (SHA256_F0o, SHA256_F1o, b, c, d, e, f, g, h, a, wf_t, SHA256C3f);
a += SHA256M_A;
b += SHA256M_B;
c += SHA256M_C;
d += SHA256M_D;
e += SHA256M_E;
f += SHA256M_F;
g += SHA256M_G;
h += SHA256M_H;
a += make_u32x (SHA256M_A);
b += make_u32x (SHA256M_B);
c += make_u32x (SHA256M_C);
d += make_u32x (SHA256M_D);
e += make_u32x (SHA256M_E);
f += make_u32x (SHA256M_F);
g += make_u32x (SHA256M_G);
h += make_u32x (SHA256M_H);
// final sha256
@ -1213,12 +1213,12 @@ KERNEL_FQ void m20710_s04 (KERN_ATTR_RULES ())
// a += digest[0] - SHA256M_A;
// b += digest[1] - SHA256M_B;
c += digest[2] - SHA256M_C;
d += digest[3] - SHA256M_D;
c += digest[2] - make_u32x (SHA256M_C);
d += digest[3] - make_u32x (SHA256M_D);
// e += digest[4] - SHA256M_E;
// f += digest[5] - SHA256M_F;
g += digest[6] - SHA256M_G;
h += digest[7] - SHA256M_H;
g += digest[6] - make_u32x (SHA256M_G);
h += digest[7] - make_u32x (SHA256M_H);
COMPARE_S_SIMD (d, h, c, g);
}

@ -286,14 +286,14 @@ KERNEL_FQ void m20710_m04 (KERN_ATTR_BASIC ())
we_t = SHA256_EXPAND (wc_t, w7_t, wf_t, we_t); SHA256_STEP (SHA256_F0o, SHA256_F1o, c, d, e, f, g, h, a, b, we_t, SHA256C3e);
wf_t = SHA256_EXPAND (wd_t, w8_t, w0_t, wf_t); SHA256_STEP (SHA256_F0o, SHA256_F1o, b, c, d, e, f, g, h, a, wf_t, SHA256C3f);
a += SHA256M_A;
b += SHA256M_B;
c += SHA256M_C;
d += SHA256M_D;
e += SHA256M_E;
f += SHA256M_F;
g += SHA256M_G;
h += SHA256M_H;
a += make_u32x (SHA256M_A);
b += make_u32x (SHA256M_B);
c += make_u32x (SHA256M_C);
d += make_u32x (SHA256M_D);
e += make_u32x (SHA256M_E);
f += make_u32x (SHA256M_F);
g += make_u32x (SHA256M_G);
h += make_u32x (SHA256M_H);
// final sha256
@ -665,12 +665,12 @@ KERNEL_FQ void m20710_m04 (KERN_ATTR_BASIC ())
// a += digest[0] - SHA256M_A;
// b += digest[1] - SHA256M_B;
c += digest[2] - SHA256M_C;
d += digest[3] - SHA256M_D;
c += digest[2] - make_u32x (SHA256M_C);
d += digest[3] - make_u32x (SHA256M_D);
// e += digest[4] - SHA256M_E;
// f += digest[5] - SHA256M_F;
g += digest[6] - SHA256M_G;
h += digest[7] - SHA256M_H;
g += digest[6] - make_u32x (SHA256M_G);
h += digest[7] - make_u32x (SHA256M_H);
COMPARE_M_SIMD (d, h, c, g);
}
@ -942,14 +942,14 @@ KERNEL_FQ void m20710_s04 (KERN_ATTR_BASIC ())
we_t = SHA256_EXPAND (wc_t, w7_t, wf_t, we_t); SHA256_STEP (SHA256_F0o, SHA256_F1o, c, d, e, f, g, h, a, b, we_t, SHA256C3e);
wf_t = SHA256_EXPAND (wd_t, w8_t, w0_t, wf_t); SHA256_STEP (SHA256_F0o, SHA256_F1o, b, c, d, e, f, g, h, a, wf_t, SHA256C3f);
a += SHA256M_A;
b += SHA256M_B;
c += SHA256M_C;
d += SHA256M_D;
e += SHA256M_E;
f += SHA256M_F;
g += SHA256M_G;
h += SHA256M_H;
a += make_u32x (SHA256M_A);
b += make_u32x (SHA256M_B);
c += make_u32x (SHA256M_C);
d += make_u32x (SHA256M_D);
e += make_u32x (SHA256M_E);
f += make_u32x (SHA256M_F);
g += make_u32x (SHA256M_G);
h += make_u32x (SHA256M_H);
// final sha256
@ -1327,12 +1327,12 @@ KERNEL_FQ void m20710_s04 (KERN_ATTR_BASIC ())
// a += digest[0] - SHA256M_A;
// b += digest[1] - SHA256M_B;
c += digest[2] - SHA256M_C;
d += digest[3] - SHA256M_D;
c += digest[2] - make_u32x (SHA256M_C);
d += digest[3] - make_u32x (SHA256M_D);
// e += digest[4] - SHA256M_E;
// f += digest[5] - SHA256M_F;
g += digest[6] - SHA256M_G;
h += digest[7] - SHA256M_H;
g += digest[6] - make_u32x (SHA256M_G);
h += digest[7] - make_u32x (SHA256M_H);
COMPARE_S_SIMD (d, h, c, g);
}

@ -184,14 +184,14 @@ DECLSPEC void m20710m (PRIVATE_AS u32 *w, const u32 pw_len, KERN_ATTR_FUNC_VECTO
we_t = SHA256_EXPAND (wc_t, w7_t, wf_t, we_t); SHA256_STEP (SHA256_F0o, SHA256_F1o, c, d, e, f, g, h, a, b, we_t, SHA256C3e);
wf_t = SHA256_EXPAND (wd_t, w8_t, w0_t, wf_t); SHA256_STEP (SHA256_F0o, SHA256_F1o, b, c, d, e, f, g, h, a, wf_t, SHA256C3f);
a += SHA256M_A;
b += SHA256M_B;
c += SHA256M_C;
d += SHA256M_D;
e += SHA256M_E;
f += SHA256M_F;
g += SHA256M_G;
h += SHA256M_H;
a += make_u32x (SHA256M_A);
b += make_u32x (SHA256M_B);
c += make_u32x (SHA256M_C);
d += make_u32x (SHA256M_D);
e += make_u32x (SHA256M_E);
f += make_u32x (SHA256M_F);
g += make_u32x (SHA256M_G);
h += make_u32x (SHA256M_H);
// final sha256
@ -563,12 +563,12 @@ DECLSPEC void m20710m (PRIVATE_AS u32 *w, const u32 pw_len, KERN_ATTR_FUNC_VECTO
// a += digest[0] - SHA256M_A;
// b += digest[1] - SHA256M_B;
c += digest[2] - SHA256M_C;
d += digest[3] - SHA256M_D;
c += digest[2] - make_u32x (SHA256M_C);
d += digest[3] - make_u32x (SHA256M_D);
// e += digest[4] - SHA256M_E;
// f += digest[5] - SHA256M_F;
g += digest[6] - SHA256M_G;
h += digest[7] - SHA256M_H;
g += digest[6] - make_u32x (SHA256M_G);
h += digest[7] - make_u32x (SHA256M_H);
COMPARE_M_SIMD (d, h, c, g);
}
@ -730,14 +730,14 @@ DECLSPEC void m20710s (PRIVATE_AS u32 *w, const u32 pw_len, KERN_ATTR_FUNC_VECTO
we_t = SHA256_EXPAND (wc_t, w7_t, wf_t, we_t); SHA256_STEP (SHA256_F0o, SHA256_F1o, c, d, e, f, g, h, a, b, we_t, SHA256C3e);
wf_t = SHA256_EXPAND (wd_t, w8_t, w0_t, wf_t); SHA256_STEP (SHA256_F0o, SHA256_F1o, b, c, d, e, f, g, h, a, wf_t, SHA256C3f);
a += SHA256M_A;
b += SHA256M_B;
c += SHA256M_C;
d += SHA256M_D;
e += SHA256M_E;
f += SHA256M_F;
g += SHA256M_G;
h += SHA256M_H;
a += make_u32x (SHA256M_A);
b += make_u32x (SHA256M_B);
c += make_u32x (SHA256M_C);
d += make_u32x (SHA256M_D);
e += make_u32x (SHA256M_E);
f += make_u32x (SHA256M_F);
g += make_u32x (SHA256M_G);
h += make_u32x (SHA256M_H);
// final sha256
@ -1104,7 +1104,7 @@ DECLSPEC void m20710s (PRIVATE_AS u32 *w, const u32 pw_len, KERN_ATTR_FUNC_VECTO
wb_t = SHA256_EXPAND (w9_t, w4_t, wc_t, wb_t); SHA256_STEP (SHA256_F0o, SHA256_F1o, f, g, h, a, b, c, d, e, wb_t, SHA256C3b);
wc_t = SHA256_EXPAND (wa_t, w5_t, wd_t, wc_t); SHA256_STEP (SHA256_F0o, SHA256_F1o, e, f, g, h, a, b, c, d, wc_t, SHA256C3c);
if (MATCHES_NONE_VS (h+digest[7]-SHA256M_H, search[1])) continue;
if (MATCHES_NONE_VS ((h + digest[7] - make_u32x (SHA256M_H)), search[1])) continue;
wd_t = SHA256_EXPAND (wb_t, w6_t, we_t, wd_t); SHA256_STEP (SHA256_F0o, SHA256_F1o, d, e, f, g, h, a, b, c, wd_t, SHA256C3d);
we_t = SHA256_EXPAND (wc_t, w7_t, wf_t, we_t); SHA256_STEP (SHA256_F0o, SHA256_F1o, c, d, e, f, g, h, a, b, we_t, SHA256C3e);
@ -1112,12 +1112,12 @@ DECLSPEC void m20710s (PRIVATE_AS u32 *w, const u32 pw_len, KERN_ATTR_FUNC_VECTO
// a += digest[0] - SHA256M_A;
// b += digest[1] - SHA256M_B;
c += digest[2] - SHA256M_C;
d += digest[3] - SHA256M_D;
c += digest[2] - make_u32x (SHA256M_C);
d += digest[3] - make_u32x (SHA256M_D);
// e += digest[4] - SHA256M_E;
// f += digest[5] - SHA256M_F;
g += digest[6] - SHA256M_G;
h += digest[7] - SHA256M_H;
g += digest[6] - make_u32x (SHA256M_G);
h += digest[7] - make_u32x (SHA256M_H);
COMPARE_S_SIMD (d, h, c, g);
}

@ -191,10 +191,10 @@ KERNEL_FQ void m20800_m04 (KERN_ATTR_RULES ())
MD5_STEP (MD5_I , c, d, a, b, w0[2], MD5C3e, MD5S32);
MD5_STEP (MD5_I , b, c, d, a, w2[1], MD5C3f, MD5S33);
a += MD5M_A;
b += MD5M_B;
c += MD5M_C;
d += MD5M_D;
a += make_u32x (MD5M_A);
b += make_u32x (MD5M_B);
c += make_u32x (MD5M_C);
d += make_u32x (MD5M_D);
/*
* sha256
@ -492,10 +492,10 @@ KERNEL_FQ void m20800_s04 (KERN_ATTR_RULES ())
MD5_STEP (MD5_I , c, d, a, b, w0[2], MD5C3e, MD5S32);
MD5_STEP (MD5_I , b, c, d, a, w2[1], MD5C3f, MD5S33);
a += MD5M_A;
b += MD5M_B;
c += MD5M_C;
d += MD5M_D;
a += make_u32x (MD5M_A);
b += make_u32x (MD5M_B);
c += make_u32x (MD5M_C);
d += make_u32x (MD5M_D);
/*
* sha256

@ -244,10 +244,10 @@ KERNEL_FQ void m20800_m04 (KERN_ATTR_BASIC ())
MD5_STEP (MD5_I , c, d, a, b, w0[2], MD5C3e, MD5S32);
MD5_STEP (MD5_I , b, c, d, a, w2[1], MD5C3f, MD5S33);
a += MD5M_A;
b += MD5M_B;
c += MD5M_C;
d += MD5M_D;
a += make_u32x (MD5M_A);
b += make_u32x (MD5M_B);
c += make_u32x (MD5M_C);
d += make_u32x (MD5M_D);
/*
* sha256
@ -600,10 +600,10 @@ KERNEL_FQ void m20800_s04 (KERN_ATTR_BASIC ())
MD5_STEP (MD5_I , c, d, a, b, w0[2], MD5C3e, MD5S32);
MD5_STEP (MD5_I , b, c, d, a, w2[1], MD5C3f, MD5S33);
a += MD5M_A;
b += MD5M_B;
c += MD5M_C;
d += MD5M_D;
a += make_u32x (MD5M_A);
b += make_u32x (MD5M_B);
c += make_u32x (MD5M_C);
d += make_u32x (MD5M_D);
/*
* sha256

@ -159,10 +159,10 @@ DECLSPEC void m20800m (PRIVATE_AS u32 *w0, PRIVATE_AS u32 *w1, PRIVATE_AS u32 *w
MD5_STEP (MD5_I , c, d, a, b, w2_t, MD5C3e, MD5S32);
MD5_STEP (MD5_I , b, c, d, a, w9_t, MD5C3f, MD5S33);
a += MD5M_A;
b += MD5M_B;
c += MD5M_C;
d += MD5M_D;
a += make_u32x (MD5M_A);
b += make_u32x (MD5M_B);
c += make_u32x (MD5M_C);
d += make_u32x (MD5M_D);
/*
* sha256
@ -422,10 +422,10 @@ DECLSPEC void m20800s (PRIVATE_AS u32 *w0, PRIVATE_AS u32 *w1, PRIVATE_AS u32 *w
MD5_STEP (MD5_I , c, d, a, b, w2_t, MD5C3e, MD5S32);
MD5_STEP (MD5_I , b, c, d, a, w9_t, MD5C3f, MD5S33);
a += MD5M_A;
b += MD5M_B;
c += MD5M_C;
d += MD5M_D;
a += make_u32x (MD5M_A);
b += make_u32x (MD5M_B);
c += make_u32x (MD5M_C);
d += make_u32x (MD5M_D);
/*
* sha256

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save