pull/2387/head
Jens Steube 4 years ago
commit 0b0cbab24b

@ -490,6 +490,194 @@ DECLSPEC u64 v64_from_v32ab_S (const u32 v32a, const u32 v32b)
// unpack function are similar, but always return u32
DECLSPEC u32x unpack_v8a_from_v32 (const u32x v32)
{
u32x r = 0;
#if defined IS_NV && HAS_BFE == 1
#if VECT_SIZE == 1
asm volatile ("bfe.u32 %0, %1, 0, 8;" : "=r"(r) : "r"(v32));
#endif
#if VECT_SIZE >= 2
asm volatile ("bfe.u32 %0, %1, 0, 8;" : "=r"(r.s0) : "r"(v32.s0));
asm volatile ("bfe.u32 %0, %1, 0, 8;" : "=r"(r.s1) : "r"(v32.s1));
#endif
#if VECT_SIZE >= 4
asm volatile ("bfe.u32 %0, %1, 0, 8;" : "=r"(r.s2) : "r"(v32.s2));
asm volatile ("bfe.u32 %0, %1, 0, 8;" : "=r"(r.s3) : "r"(v32.s3));
#endif
#if VECT_SIZE >= 8
asm volatile ("bfe.u32 %0, %1, 0, 8;" : "=r"(r.s4) : "r"(v32.s4));
asm volatile ("bfe.u32 %0, %1, 0, 8;" : "=r"(r.s5) : "r"(v32.s5));
asm volatile ("bfe.u32 %0, %1, 0, 8;" : "=r"(r.s6) : "r"(v32.s6));
asm volatile ("bfe.u32 %0, %1, 0, 8;" : "=r"(r.s7) : "r"(v32.s7));
#endif
#if VECT_SIZE >= 16
asm volatile ("bfe.u32 %0, %1, 0, 8;" : "=r"(r.s8) : "r"(v32.s8));
asm volatile ("bfe.u32 %0, %1, 0, 8;" : "=r"(r.s9) : "r"(v32.s9));
asm volatile ("bfe.u32 %0, %1, 0, 8;" : "=r"(r.sa) : "r"(v32.sa));
asm volatile ("bfe.u32 %0, %1, 0, 8;" : "=r"(r.sb) : "r"(v32.sb));
asm volatile ("bfe.u32 %0, %1, 0, 8;" : "=r"(r.sc) : "r"(v32.sc));
asm volatile ("bfe.u32 %0, %1, 0, 8;" : "=r"(r.sd) : "r"(v32.sd));
asm volatile ("bfe.u32 %0, %1, 0, 8;" : "=r"(r.se) : "r"(v32.se));
asm volatile ("bfe.u32 %0, %1, 0, 8;" : "=r"(r.sf) : "r"(v32.sf));
#endif
//#elif defined IS_AMD && HAS_VBFE == 1
//__asm__ __volatile__ ("V_BFE_U32 %0, %1, 0, 8;" : "=v"(r) : "v"(v32));
#else
r = (v32 >> 0) & 0xff;
#endif
return r;
}
DECLSPEC u32x unpack_v8b_from_v32 (const u32x v32)
{
u32x r = 0;
#if defined IS_NV && HAS_BFE == 1
#if VECT_SIZE == 1
asm volatile ("bfe.u32 %0, %1, 8, 8;" : "=r"(r) : "r"(v32));
#endif
#if VECT_SIZE >= 2
asm volatile ("bfe.u32 %0, %1, 8, 8;" : "=r"(r.s0) : "r"(v32.s0));
asm volatile ("bfe.u32 %0, %1, 8, 8;" : "=r"(r.s1) : "r"(v32.s1));
#endif
#if VECT_SIZE >= 4
asm volatile ("bfe.u32 %0, %1, 8, 8;" : "=r"(r.s2) : "r"(v32.s2));
asm volatile ("bfe.u32 %0, %1, 8, 8;" : "=r"(r.s3) : "r"(v32.s3));
#endif
#if VECT_SIZE >= 8
asm volatile ("bfe.u32 %0, %1, 8, 8;" : "=r"(r.s4) : "r"(v32.s4));
asm volatile ("bfe.u32 %0, %1, 8, 8;" : "=r"(r.s5) : "r"(v32.s5));
asm volatile ("bfe.u32 %0, %1, 8, 8;" : "=r"(r.s6) : "r"(v32.s6));
asm volatile ("bfe.u32 %0, %1, 8, 8;" : "=r"(r.s7) : "r"(v32.s7));
#endif
#if VECT_SIZE >= 16
asm volatile ("bfe.u32 %0, %1, 8, 8;" : "=r"(r.s8) : "r"(v32.s8));
asm volatile ("bfe.u32 %0, %1, 8, 8;" : "=r"(r.s9) : "r"(v32.s9));
asm volatile ("bfe.u32 %0, %1, 8, 8;" : "=r"(r.sa) : "r"(v32.sa));
asm volatile ("bfe.u32 %0, %1, 8, 8;" : "=r"(r.sb) : "r"(v32.sb));
asm volatile ("bfe.u32 %0, %1, 8, 8;" : "=r"(r.sc) : "r"(v32.sc));
asm volatile ("bfe.u32 %0, %1, 8, 8;" : "=r"(r.sd) : "r"(v32.sd));
asm volatile ("bfe.u32 %0, %1, 8, 8;" : "=r"(r.se) : "r"(v32.se));
asm volatile ("bfe.u32 %0, %1, 8, 8;" : "=r"(r.sf) : "r"(v32.sf));
#endif
//#elif defined IS_AMD && HAS_VBFE == 1
//__asm__ __volatile__ ("V_BFE_U32 %0, %1, 8, 8;" : "=v"(r) : "v"(v32));
#else
r = (v32 >> 8) & 0xff;
#endif
return r;
}
DECLSPEC u32x unpack_v8c_from_v32 (const u32x v32)
{
u32x r = 0;
#if defined IS_NV && HAS_BFE == 1
#if VECT_SIZE == 1
asm volatile ("bfe.u32 %0, %1, 16, 8;" : "=r"(r) : "r"(v32));
#endif
#if VECT_SIZE >= 2
asm volatile ("bfe.u32 %0, %1, 16, 8;" : "=r"(r.s0) : "r"(v32.s0));
asm volatile ("bfe.u32 %0, %1, 16, 8;" : "=r"(r.s1) : "r"(v32.s1));
#endif
#if VECT_SIZE >= 4
asm volatile ("bfe.u32 %0, %1, 16, 8;" : "=r"(r.s2) : "r"(v32.s2));
asm volatile ("bfe.u32 %0, %1, 16, 8;" : "=r"(r.s3) : "r"(v32.s3));
#endif
#if VECT_SIZE >= 8
asm volatile ("bfe.u32 %0, %1, 16, 8;" : "=r"(r.s4) : "r"(v32.s4));
asm volatile ("bfe.u32 %0, %1, 16, 8;" : "=r"(r.s5) : "r"(v32.s5));
asm volatile ("bfe.u32 %0, %1, 16, 8;" : "=r"(r.s6) : "r"(v32.s6));
asm volatile ("bfe.u32 %0, %1, 16, 8;" : "=r"(r.s7) : "r"(v32.s7));
#endif
#if VECT_SIZE >= 16
asm volatile ("bfe.u32 %0, %1, 16, 8;" : "=r"(r.s8) : "r"(v32.s8));
asm volatile ("bfe.u32 %0, %1, 16, 8;" : "=r"(r.s9) : "r"(v32.s9));
asm volatile ("bfe.u32 %0, %1, 16, 8;" : "=r"(r.sa) : "r"(v32.sa));
asm volatile ("bfe.u32 %0, %1, 16, 8;" : "=r"(r.sb) : "r"(v32.sb));
asm volatile ("bfe.u32 %0, %1, 16, 8;" : "=r"(r.sc) : "r"(v32.sc));
asm volatile ("bfe.u32 %0, %1, 16, 8;" : "=r"(r.sd) : "r"(v32.sd));
asm volatile ("bfe.u32 %0, %1, 16, 8;" : "=r"(r.se) : "r"(v32.se));
asm volatile ("bfe.u32 %0, %1, 16, 8;" : "=r"(r.sf) : "r"(v32.sf));
#endif
//#elif defined IS_AMD && HAS_VBFE == 1
//__asm__ __volatile__ ("V_BFE_U32 %0, %1, 16, 8;" : "=v"(r) : "v"(v32));
#else
r = (v32 >> 16) & 0xff;
#endif
return r;
}
DECLSPEC u32x unpack_v8d_from_v32 (const u32x v32)
{
u32x r = 0;
#if defined IS_NV && HAS_BFE == 1
#if VECT_SIZE == 1
asm volatile ("bfe.u32 %0, %1, 24, 8;" : "=r"(r) : "r"(v32));
#endif
#if VECT_SIZE >= 2
asm volatile ("bfe.u32 %0, %1, 24, 8;" : "=r"(r.s0) : "r"(v32.s0));
asm volatile ("bfe.u32 %0, %1, 24, 8;" : "=r"(r.s1) : "r"(v32.s1));
#endif
#if VECT_SIZE >= 4
asm volatile ("bfe.u32 %0, %1, 24, 8;" : "=r"(r.s2) : "r"(v32.s2));
asm volatile ("bfe.u32 %0, %1, 24, 8;" : "=r"(r.s3) : "r"(v32.s3));
#endif
#if VECT_SIZE >= 8
asm volatile ("bfe.u32 %0, %1, 24, 8;" : "=r"(r.s4) : "r"(v32.s4));
asm volatile ("bfe.u32 %0, %1, 24, 8;" : "=r"(r.s5) : "r"(v32.s5));
asm volatile ("bfe.u32 %0, %1, 24, 8;" : "=r"(r.s6) : "r"(v32.s6));
asm volatile ("bfe.u32 %0, %1, 24, 8;" : "=r"(r.s7) : "r"(v32.s7));
#endif
#if VECT_SIZE >= 16
asm volatile ("bfe.u32 %0, %1, 24, 8;" : "=r"(r.s8) : "r"(v32.s8));
asm volatile ("bfe.u32 %0, %1, 24, 8;" : "=r"(r.s9) : "r"(v32.s9));
asm volatile ("bfe.u32 %0, %1, 24, 8;" : "=r"(r.sa) : "r"(v32.sa));
asm volatile ("bfe.u32 %0, %1, 24, 8;" : "=r"(r.sb) : "r"(v32.sb));
asm volatile ("bfe.u32 %0, %1, 24, 8;" : "=r"(r.sc) : "r"(v32.sc));
asm volatile ("bfe.u32 %0, %1, 24, 8;" : "=r"(r.sd) : "r"(v32.sd));
asm volatile ("bfe.u32 %0, %1, 24, 8;" : "=r"(r.se) : "r"(v32.se));
asm volatile ("bfe.u32 %0, %1, 24, 8;" : "=r"(r.sf) : "r"(v32.sf));
#endif
//#elif defined IS_AMD && HAS_VBFE == 1
//__asm__ __volatile__ ("V_BFE_U32 %0, %1, 24, 8;" : "=v"(r) : "v"(v32));
#else
r = (v32 >> 24) & 0xff;
#endif
return r;
}
DECLSPEC u32 unpack_v8a_from_v32_S (const u32 v32)
{
u32 r = 0;

@ -171,6 +171,11 @@ DECLSPEC u64 v64_from_v32ab_S (const u32 v32a, const u32 v32b);
// inline asm packing
DECLSPEC u32x unpack_v8a_from_v32 (const u32x v32);
DECLSPEC u32x unpack_v8b_from_v32 (const u32x v32);
DECLSPEC u32x unpack_v8c_from_v32 (const u32x v32);
DECLSPEC u32x unpack_v8d_from_v32 (const u32x v32);
DECLSPEC u32 unpack_v8a_from_v32_S (const u32 v32);
DECLSPEC u32 unpack_v8b_from_v32_S (const u32 v32);
DECLSPEC u32 unpack_v8c_from_v32_S (const u32 v32);

@ -99,10 +99,10 @@ KERNEL_FQ void m00200_m04 (KERN_ATTR_RULES ())
{
const u32x wj = w_t[j];
ROUND ((wj >> 0) & 0xff);
ROUND ((wj >> 8) & 0xff);
ROUND ((wj >> 16) & 0xff);
ROUND ((wj >> 24) & 0xff);
ROUND (unpack_v8a_from_v32 (wj));
ROUND (unpack_v8b_from_v32 (wj));
ROUND (unpack_v8c_from_v32 (wj));
ROUND (unpack_v8d_from_v32 (wj));
}
const u32x wj = w_t[j];
@ -111,18 +111,18 @@ KERNEL_FQ void m00200_m04 (KERN_ATTR_RULES ())
if (left == 3)
{
ROUND ((wj >> 0) & 0xff);
ROUND ((wj >> 8) & 0xff);
ROUND ((wj >> 16) & 0xff);
ROUND (unpack_v8a_from_v32 (wj));
ROUND (unpack_v8b_from_v32 (wj));
ROUND (unpack_v8c_from_v32 (wj));
}
else if (left == 2)
{
ROUND ((wj >> 0) & 0xff);
ROUND ((wj >> 8) & 0xff);
ROUND (unpack_v8a_from_v32 (wj));
ROUND (unpack_v8b_from_v32 (wj));
}
else if (left == 1)
{
ROUND ((wj >> 0) & 0xff);
ROUND (unpack_v8a_from_v32 (wj));
}
a &= 0x7fffffff;
@ -237,10 +237,10 @@ KERNEL_FQ void m00200_s04 (KERN_ATTR_RULES ())
{
const u32x wj = w_t[j];
ROUND ((wj >> 0) & 0xff);
ROUND ((wj >> 8) & 0xff);
ROUND ((wj >> 16) & 0xff);
ROUND ((wj >> 24) & 0xff);
ROUND (unpack_v8a_from_v32 (wj));
ROUND (unpack_v8b_from_v32 (wj));
ROUND (unpack_v8c_from_v32 (wj));
ROUND (unpack_v8d_from_v32 (wj));
}
const u32x wj = w_t[j];
@ -249,18 +249,18 @@ KERNEL_FQ void m00200_s04 (KERN_ATTR_RULES ())
if (left == 3)
{
ROUND ((wj >> 0) & 0xff);
ROUND ((wj >> 8) & 0xff);
ROUND ((wj >> 16) & 0xff);
ROUND (unpack_v8a_from_v32 (wj));
ROUND (unpack_v8b_from_v32 (wj));
ROUND (unpack_v8c_from_v32 (wj));
}
else if (left == 2)
{
ROUND ((wj >> 0) & 0xff);
ROUND ((wj >> 8) & 0xff);
ROUND (unpack_v8a_from_v32 (wj));
ROUND (unpack_v8b_from_v32 (wj));
}
else if (left == 1)
{
ROUND ((wj >> 0) & 0xff);
ROUND (unpack_v8a_from_v32 (wj));
}
a &= 0x7fffffff;

@ -142,6 +142,8 @@ KERNEL_FQ void m00200_m04 (KERN_ATTR_BASIC ())
u32x a = MYSQL323_A;
u32x b = MYSQL323_B;
u32x c = 0;
u32x d = 0;
u32x add = 7;
@ -159,10 +161,10 @@ KERNEL_FQ void m00200_m04 (KERN_ATTR_BASIC ())
{
const u32x wj = w_t[j];
ROUND ((wj >> 0) & 0xff);
ROUND ((wj >> 8) & 0xff);
ROUND ((wj >> 16) & 0xff);
ROUND ((wj >> 24) & 0xff);
ROUND (unpack_v8a_from_v32 (wj));
ROUND (unpack_v8b_from_v32 (wj));
ROUND (unpack_v8c_from_v32 (wj));
ROUND (unpack_v8d_from_v32 (wj));
}
const u32x wj = w_t[j];
@ -171,18 +173,18 @@ KERNEL_FQ void m00200_m04 (KERN_ATTR_BASIC ())
if (left == 3)
{
ROUND ((wj >> 0) & 0xff);
ROUND ((wj >> 8) & 0xff);
ROUND ((wj >> 16) & 0xff);
ROUND (unpack_v8a_from_v32 (wj));
ROUND (unpack_v8b_from_v32 (wj));
ROUND (unpack_v8c_from_v32 (wj));
}
else if (left == 2)
{
ROUND ((wj >> 0) & 0xff);
ROUND ((wj >> 8) & 0xff);
ROUND (unpack_v8a_from_v32 (wj));
ROUND (unpack_v8b_from_v32 (wj));
}
else if (left == 1)
{
ROUND ((wj >> 0) & 0xff);
ROUND (unpack_v8a_from_v32 (wj));
}
a &= 0x7fffffff;
@ -361,10 +363,10 @@ KERNEL_FQ void m00200_s04 (KERN_ATTR_BASIC ())
{
const u32x wj = w_t[j];
ROUND ((wj >> 0) & 0xff);
ROUND ((wj >> 8) & 0xff);
ROUND ((wj >> 16) & 0xff);
ROUND ((wj >> 24) & 0xff);
ROUND (unpack_v8a_from_v32 (wj));
ROUND (unpack_v8b_from_v32 (wj));
ROUND (unpack_v8c_from_v32 (wj));
ROUND (unpack_v8d_from_v32 (wj));
}
const u32x wj = w_t[j];
@ -373,18 +375,18 @@ KERNEL_FQ void m00200_s04 (KERN_ATTR_BASIC ())
if (left == 3)
{
ROUND ((wj >> 0) & 0xff);
ROUND ((wj >> 8) & 0xff);
ROUND ((wj >> 16) & 0xff);
ROUND (unpack_v8a_from_v32 (wj));
ROUND (unpack_v8b_from_v32 (wj));
ROUND (unpack_v8c_from_v32 (wj));
}
else if (left == 2)
{
ROUND ((wj >> 0) & 0xff);
ROUND ((wj >> 8) & 0xff);
ROUND (unpack_v8a_from_v32 (wj));
ROUND (unpack_v8b_from_v32 (wj));
}
else if (left == 1)
{
ROUND ((wj >> 0) & 0xff);
ROUND (unpack_v8a_from_v32 (wj));
}
a &= 0x7fffffff;

@ -42,10 +42,10 @@
{ \
const u32 wj = w[j]; \
\
ROUND ((wj >> 0) & 0xff); \
ROUND ((wj >> 8) & 0xff); \
ROUND ((wj >> 16) & 0xff); \
ROUND ((wj >> 24) & 0xff); \
ROUND (unpack_v8a_from_v32 (wj)); \
ROUND (unpack_v8b_from_v32 (wj)); \
ROUND (unpack_v8c_from_v32 (wj)); \
ROUND (unpack_v8d_from_v32 (wj)); \
} \
\
const u32 wj = w[j]; \
@ -54,18 +54,18 @@
\
if (left == 3) \
{ \
ROUND ((wj >> 0) & 0xff); \
ROUND ((wj >> 8) & 0xff); \
ROUND ((wj >> 16) & 0xff); \
ROUND (unpack_v8a_from_v32 (wj)); \
ROUND (unpack_v8b_from_v32 (wj)); \
ROUND (unpack_v8c_from_v32 (wj)); \
} \
else if (left == 2) \
{ \
ROUND ((wj >> 0) & 0xff); \
ROUND ((wj >> 8) & 0xff); \
ROUND (unpack_v8a_from_v32 (wj)); \
ROUND (unpack_v8b_from_v32 (wj)); \
} \
else if (left == 1) \
{ \
ROUND ((wj >> 0) & 0xff); \
ROUND (unpack_v8a_from_v32 (wj)); \
}
#define CODE_POST_M \
@ -99,141 +99,123 @@ DECLSPEC void m00200m (u32 *w, const u32 pw_len, KERN_ATTR_VECTOR ())
const u64 gid = get_global_id (0);
const u64 lid = get_local_id (0);
/**
* digest
*/
const u32 search[4] =
{
digests_buf[digests_offset].digest_buf[DGST_R0],
digests_buf[digests_offset].digest_buf[DGST_R1],
0,
0
};
/**
* loop
*/
u32 w0l = w[0];
CODE_PRE;
switch (pw_len)
{
case 1:
CODE_PRE;
ROUND ((w0 >> 0) & 0xff);
CODE_POST_M;
ROUND (unpack_v8a_from_v32 ( w0));
break;
case 2:
CODE_PRE;
ROUND ((w0 >> 0) & 0xff); ROUND ((w0 >> 8) & 0xff);
CODE_POST_M;
ROUND (unpack_v8a_from_v32 ( w0)); ROUND (unpack_v8b_from_v32 ( w0));
break;
case 3:
CODE_PRE;
ROUND ((w0 >> 0) & 0xff); ROUND ((w0 >> 8) & 0xff); ROUND ((w0 >> 16) & 0xff);
CODE_POST_M;
ROUND (unpack_v8a_from_v32 ( w0)); ROUND (unpack_v8b_from_v32 ( w0)); ROUND (unpack_v8c_from_v32 ( w0));
break;
case 4:
CODE_PRE;
ROUND ((w0 >> 0) & 0xff); ROUND ((w0 >> 8) & 0xff); ROUND ((w0 >> 16) & 0xff); ROUND ((w0 >> 24) & 0xff);
CODE_POST_M;
ROUND (unpack_v8a_from_v32 ( w0)); ROUND (unpack_v8b_from_v32 ( w0)); ROUND (unpack_v8c_from_v32 ( w0)); ROUND (unpack_v8d_from_v32 ( w0));
break;
case 5:
CODE_PRE;
ROUND ((w0 >> 0) & 0xff); ROUND ((w0 >> 8) & 0xff); ROUND ((w0 >> 16) & 0xff); ROUND ((w0 >> 24) & 0xff);
ROUND ((w[1] >> 0) & 0xff);
CODE_POST_M;
ROUND (unpack_v8a_from_v32 ( w0)); ROUND (unpack_v8b_from_v32 ( w0)); ROUND (unpack_v8c_from_v32 ( w0)); ROUND (unpack_v8d_from_v32 ( w0));
ROUND (unpack_v8a_from_v32 (w[1]));
break;
case 6:
CODE_PRE;
ROUND ((w0 >> 0) & 0xff); ROUND ((w0 >> 8) & 0xff); ROUND ((w0 >> 16) & 0xff); ROUND ((w0 >> 24) & 0xff);
ROUND ((w[1] >> 0) & 0xff); ROUND ((w[1] >> 8) & 0xff);
CODE_POST_M;
ROUND (unpack_v8a_from_v32 ( w0)); ROUND (unpack_v8b_from_v32 ( w0)); ROUND (unpack_v8c_from_v32 ( w0)); ROUND (unpack_v8d_from_v32 ( w0));
ROUND (unpack_v8a_from_v32 (w[1])); ROUND (unpack_v8b_from_v32 (w[1]));
break;
case 7:
CODE_PRE;
ROUND ((w0 >> 0) & 0xff); ROUND ((w0 >> 8) & 0xff); ROUND ((w0 >> 16) & 0xff); ROUND ((w0 >> 24) & 0xff);
ROUND ((w[1] >> 0) & 0xff); ROUND ((w[1] >> 8) & 0xff); ROUND ((w[1] >> 16) & 0xff);
CODE_POST_M;
ROUND (unpack_v8a_from_v32 ( w0)); ROUND (unpack_v8b_from_v32 ( w0)); ROUND (unpack_v8c_from_v32 ( w0)); ROUND (unpack_v8d_from_v32 ( w0));
ROUND (unpack_v8a_from_v32 (w[1])); ROUND (unpack_v8b_from_v32 (w[1])); ROUND (unpack_v8c_from_v32 (w[1]));
break;
case 8:
CODE_PRE;
ROUND ((w0 >> 0) & 0xff); ROUND ((w0 >> 8) & 0xff); ROUND ((w0 >> 16) & 0xff); ROUND ((w0 >> 24) & 0xff);
ROUND ((w[1] >> 0) & 0xff); ROUND ((w[1] >> 8) & 0xff); ROUND ((w[1] >> 16) & 0xff); ROUND ((w[1] >> 24) & 0xff);
CODE_POST_M;
ROUND (unpack_v8a_from_v32 ( w0)); ROUND (unpack_v8b_from_v32 ( w0)); ROUND (unpack_v8c_from_v32 ( w0)); ROUND (unpack_v8d_from_v32 ( w0));
ROUND (unpack_v8a_from_v32 (w[1])); ROUND (unpack_v8b_from_v32 (w[1])); ROUND (unpack_v8c_from_v32 (w[1])); ROUND (unpack_v8d_from_v32 (w[1]));
break;
case 9:
CODE_PRE;
ROUND ((w0 >> 0) & 0xff); ROUND ((w0 >> 8) & 0xff); ROUND ((w0 >> 16) & 0xff); ROUND ((w0 >> 24) & 0xff);
ROUND ((w[1] >> 0) & 0xff); ROUND ((w[1] >> 8) & 0xff); ROUND ((w[1] >> 16) & 0xff); ROUND ((w[1] >> 24) & 0xff);
ROUND ((w[2] >> 0) & 0xff);
CODE_POST_M;
ROUND (unpack_v8a_from_v32 ( w0)); ROUND (unpack_v8b_from_v32 ( w0)); ROUND (unpack_v8c_from_v32 ( w0)); ROUND (unpack_v8d_from_v32 ( w0));
ROUND (unpack_v8a_from_v32 (w[1])); ROUND (unpack_v8b_from_v32 (w[1])); ROUND (unpack_v8c_from_v32 (w[1])); ROUND (unpack_v8d_from_v32 (w[1]));
ROUND (unpack_v8a_from_v32 (w[2]));
break;
case 10:
CODE_PRE;
ROUND ((w0 >> 0) & 0xff); ROUND ((w0 >> 8) & 0xff); ROUND ((w0 >> 16) & 0xff); ROUND ((w0 >> 24) & 0xff);
ROUND ((w[1] >> 0) & 0xff); ROUND ((w[1] >> 8) & 0xff); ROUND ((w[1] >> 16) & 0xff); ROUND ((w[1] >> 24) & 0xff);
ROUND ((w[2] >> 0) & 0xff); ROUND ((w[2] >> 8) & 0xff);
CODE_POST_M;
ROUND (unpack_v8a_from_v32 ( w0)); ROUND (unpack_v8b_from_v32 ( w0)); ROUND (unpack_v8c_from_v32 ( w0)); ROUND (unpack_v8d_from_v32 ( w0));
ROUND (unpack_v8a_from_v32 (w[1])); ROUND (unpack_v8b_from_v32 (w[1])); ROUND (unpack_v8c_from_v32 (w[1])); ROUND (unpack_v8d_from_v32 (w[1]));
ROUND (unpack_v8a_from_v32 (w[2])); ROUND (unpack_v8b_from_v32 (w[2]));
break;
case 11:
CODE_PRE;
ROUND ((w0 >> 0) & 0xff); ROUND ((w0 >> 8) & 0xff); ROUND ((w0 >> 16) & 0xff); ROUND ((w0 >> 24) & 0xff);
ROUND ((w[1] >> 0) & 0xff); ROUND ((w[1] >> 8) & 0xff); ROUND ((w[1] >> 16) & 0xff); ROUND ((w[1] >> 24) & 0xff);
ROUND ((w[2] >> 0) & 0xff); ROUND ((w[2] >> 8) & 0xff); ROUND ((w[2] >> 16) & 0xff);
CODE_POST_M;
ROUND (unpack_v8a_from_v32 ( w0)); ROUND (unpack_v8b_from_v32 ( w0)); ROUND (unpack_v8c_from_v32 ( w0)); ROUND (unpack_v8d_from_v32 ( w0));
ROUND (unpack_v8a_from_v32 (w[1])); ROUND (unpack_v8b_from_v32 (w[1])); ROUND (unpack_v8c_from_v32 (w[1])); ROUND (unpack_v8d_from_v32 (w[1]));
ROUND (unpack_v8a_from_v32 (w[2])); ROUND (unpack_v8b_from_v32 (w[2])); ROUND (unpack_v8c_from_v32 (w[2]));
break;
case 12:
CODE_PRE;
ROUND ((w0 >> 0) & 0xff); ROUND ((w0 >> 8) & 0xff); ROUND ((w0 >> 16) & 0xff); ROUND ((w0 >> 24) & 0xff);
ROUND ((w[1] >> 0) & 0xff); ROUND ((w[1] >> 8) & 0xff); ROUND ((w[1] >> 16) & 0xff); ROUND ((w[1] >> 24) & 0xff);
ROUND ((w[2] >> 0) & 0xff); ROUND ((w[2] >> 8) & 0xff); ROUND ((w[2] >> 16) & 0xff); ROUND ((w[2] >> 24) & 0xff);
CODE_POST_M;
ROUND (unpack_v8a_from_v32 ( w0)); ROUND (unpack_v8b_from_v32 ( w0)); ROUND (unpack_v8c_from_v32 ( w0)); ROUND (unpack_v8d_from_v32 ( w0));
ROUND (unpack_v8a_from_v32 (w[1])); ROUND (unpack_v8b_from_v32 (w[1])); ROUND (unpack_v8c_from_v32 (w[1])); ROUND (unpack_v8d_from_v32 (w[1]));
ROUND (unpack_v8a_from_v32 (w[2])); ROUND (unpack_v8b_from_v32 (w[2])); ROUND (unpack_v8c_from_v32 (w[2])); ROUND (unpack_v8d_from_v32 (w[2]));
break;
case 13:
CODE_PRE;
ROUND ((w0 >> 0) & 0xff); ROUND ((w0 >> 8) & 0xff); ROUND ((w0 >> 16) & 0xff); ROUND ((w0 >> 24) & 0xff);
ROUND ((w[1] >> 0) & 0xff); ROUND ((w[1] >> 8) & 0xff); ROUND ((w[1] >> 16) & 0xff); ROUND ((w[1] >> 24) & 0xff);
ROUND ((w[2] >> 0) & 0xff); ROUND ((w[2] >> 8) & 0xff); ROUND ((w[2] >> 16) & 0xff); ROUND ((w[2] >> 24) & 0xff);
ROUND ((w[3] >> 0) & 0xff);
CODE_POST_M;
ROUND (unpack_v8a_from_v32 ( w0)); ROUND (unpack_v8b_from_v32 ( w0)); ROUND (unpack_v8c_from_v32 ( w0)); ROUND (unpack_v8d_from_v32 ( w0));
ROUND (unpack_v8a_from_v32 (w[1])); ROUND (unpack_v8b_from_v32 (w[1])); ROUND (unpack_v8c_from_v32 (w[1])); ROUND (unpack_v8d_from_v32 (w[1]));
ROUND (unpack_v8a_from_v32 (w[2])); ROUND (unpack_v8b_from_v32 (w[2])); ROUND (unpack_v8c_from_v32 (w[2])); ROUND (unpack_v8d_from_v32 (w[2]));
ROUND (unpack_v8a_from_v32 (w[3]));
break;
case 14:
CODE_PRE;
ROUND ((w0 >> 0) & 0xff); ROUND ((w0 >> 8) & 0xff); ROUND ((w0 >> 16) & 0xff); ROUND ((w0 >> 24) & 0xff);
ROUND ((w[1] >> 0) & 0xff); ROUND ((w[1] >> 8) & 0xff); ROUND ((w[1] >> 16) & 0xff); ROUND ((w[1] >> 24) & 0xff);
ROUND ((w[2] >> 0) & 0xff); ROUND ((w[2] >> 8) & 0xff); ROUND ((w[2] >> 16) & 0xff); ROUND ((w[2] >> 24) & 0xff);
ROUND ((w[3] >> 0) & 0xff); ROUND ((w[3] >> 8) & 0xff);
CODE_POST_M;
ROUND (unpack_v8a_from_v32 ( w0)); ROUND (unpack_v8b_from_v32 ( w0)); ROUND (unpack_v8c_from_v32 ( w0)); ROUND (unpack_v8d_from_v32 ( w0));
ROUND (unpack_v8a_from_v32 (w[1])); ROUND (unpack_v8b_from_v32 (w[1])); ROUND (unpack_v8c_from_v32 (w[1])); ROUND (unpack_v8d_from_v32 (w[1]));
ROUND (unpack_v8a_from_v32 (w[2])); ROUND (unpack_v8b_from_v32 (w[2])); ROUND (unpack_v8c_from_v32 (w[2])); ROUND (unpack_v8d_from_v32 (w[2]));
ROUND (unpack_v8a_from_v32 (w[3])); ROUND (unpack_v8b_from_v32 (w[3]));
break;
case 15:
CODE_PRE;
ROUND ((w0 >> 0) & 0xff); ROUND ((w0 >> 8) & 0xff); ROUND ((w0 >> 16) & 0xff); ROUND ((w0 >> 24) & 0xff);
ROUND ((w[1] >> 0) & 0xff); ROUND ((w[1] >> 8) & 0xff); ROUND ((w[1] >> 16) & 0xff); ROUND ((w[1] >> 24) & 0xff);
ROUND ((w[2] >> 0) & 0xff); ROUND ((w[2] >> 8) & 0xff); ROUND ((w[2] >> 16) & 0xff); ROUND ((w[2] >> 24) & 0xff);
ROUND ((w[3] >> 0) & 0xff); ROUND ((w[3] >> 8) & 0xff); ROUND ((w[3] >> 16) & 0xff);
CODE_POST_M;
ROUND (unpack_v8a_from_v32 ( w0)); ROUND (unpack_v8b_from_v32 ( w0)); ROUND (unpack_v8c_from_v32 ( w0)); ROUND (unpack_v8d_from_v32 ( w0));
ROUND (unpack_v8a_from_v32 (w[1])); ROUND (unpack_v8b_from_v32 (w[1])); ROUND (unpack_v8c_from_v32 (w[1])); ROUND (unpack_v8d_from_v32 (w[1]));
ROUND (unpack_v8a_from_v32 (w[2])); ROUND (unpack_v8b_from_v32 (w[2])); ROUND (unpack_v8c_from_v32 (w[2])); ROUND (unpack_v8d_from_v32 (w[2]));
ROUND (unpack_v8a_from_v32 (w[3])); ROUND (unpack_v8b_from_v32 (w[3])); ROUND (unpack_v8c_from_v32 (w[3]));
break;
case 16:
CODE_PRE;
ROUND ((w0 >> 0) & 0xff); ROUND ((w0 >> 8) & 0xff); ROUND ((w0 >> 16) & 0xff); ROUND ((w0 >> 24) & 0xff);
ROUND ((w[1] >> 0) & 0xff); ROUND ((w[1] >> 8) & 0xff); ROUND ((w[1] >> 16) & 0xff); ROUND ((w[1] >> 24) & 0xff);
ROUND ((w[2] >> 0) & 0xff); ROUND ((w[2] >> 8) & 0xff); ROUND ((w[2] >> 16) & 0xff); ROUND ((w[2] >> 24) & 0xff);
ROUND ((w[3] >> 0) & 0xff); ROUND ((w[3] >> 8) & 0xff); ROUND ((w[3] >> 16) & 0xff); ROUND ((w[3] >> 24) & 0xff);
CODE_POST_M;
ROUND (unpack_v8a_from_v32 ( w0)); ROUND (unpack_v8b_from_v32 ( w0)); ROUND (unpack_v8c_from_v32 ( w0)); ROUND (unpack_v8d_from_v32 ( w0));
ROUND (unpack_v8a_from_v32 (w[1])); ROUND (unpack_v8b_from_v32 (w[1])); ROUND (unpack_v8c_from_v32 (w[1])); ROUND (unpack_v8d_from_v32 (w[1]));
ROUND (unpack_v8a_from_v32 (w[2])); ROUND (unpack_v8b_from_v32 (w[2])); ROUND (unpack_v8c_from_v32 (w[2])); ROUND (unpack_v8d_from_v32 (w[2]));
ROUND (unpack_v8a_from_v32 (w[3])); ROUND (unpack_v8b_from_v32 (w[3])); ROUND (unpack_v8c_from_v32 (w[3])); ROUND (unpack_v8d_from_v32 (w[3]));
break;
default:
CODE_PRE;
ROUND ((w0 >> 0) & 0xff); ROUND ((w0 >> 8) & 0xff); ROUND ((w0 >> 16) & 0xff); ROUND ((w0 >> 24) & 0xff);
ROUND (unpack_v8a_from_v32 ( w0)); ROUND (unpack_v8b_from_v32 ( w0)); ROUND (unpack_v8c_from_v32 ( w0)); ROUND (unpack_v8d_from_v32 ( w0));
CODE_LOOP (pw_len - 4);
CODE_POST_M;
break;
}
CODE_POST_M;
}
DECLSPEC void m00200s (u32 *w, const u32 pw_len, KERN_ATTR_VECTOR ())
@ -263,135 +245,105 @@ DECLSPEC void m00200s (u32 *w, const u32 pw_len, KERN_ATTR_VECTOR ())
u32 w0l = w[0];
CODE_PRE;
switch (pw_len)
{
case 1:
CODE_PRE;
ROUND ((w0 >> 0) & 0xff);
CODE_POST_S;
ROUND (unpack_v8a_from_v32 ( w0));
break;
case 2:
CODE_PRE;
ROUND ((w0 >> 0) & 0xff); ROUND ((w0 >> 8) & 0xff);
CODE_POST_S;
ROUND (unpack_v8a_from_v32 ( w0)); ROUND (unpack_v8b_from_v32 ( w0));
break;
case 3:
CODE_PRE;
ROUND ((w0 >> 0) & 0xff); ROUND ((w0 >> 8) & 0xff); ROUND ((w0 >> 16) & 0xff);
CODE_POST_S;
ROUND (unpack_v8a_from_v32 ( w0)); ROUND (unpack_v8b_from_v32 ( w0)); ROUND (unpack_v8c_from_v32 ( w0));
break;
case 4:
CODE_PRE;
ROUND ((w0 >> 0) & 0xff); ROUND ((w0 >> 8) & 0xff); ROUND ((w0 >> 16) & 0xff); ROUND ((w0 >> 24) & 0xff);
CODE_POST_S;
ROUND (unpack_v8a_from_v32 ( w0)); ROUND (unpack_v8b_from_v32 ( w0)); ROUND (unpack_v8c_from_v32 ( w0)); ROUND (unpack_v8d_from_v32 ( w0));
break;
case 5:
CODE_PRE;
ROUND ((w0 >> 0) & 0xff); ROUND ((w0 >> 8) & 0xff); ROUND ((w0 >> 16) & 0xff); ROUND ((w0 >> 24) & 0xff);
ROUND ((w[1] >> 0) & 0xff);
CODE_POST_S;
ROUND (unpack_v8a_from_v32 ( w0)); ROUND (unpack_v8b_from_v32 ( w0)); ROUND (unpack_v8c_from_v32 ( w0)); ROUND (unpack_v8d_from_v32 ( w0));
ROUND (unpack_v8a_from_v32 (w[1]));
break;
case 6:
CODE_PRE;
ROUND ((w0 >> 0) & 0xff); ROUND ((w0 >> 8) & 0xff); ROUND ((w0 >> 16) & 0xff); ROUND ((w0 >> 24) & 0xff);
ROUND ((w[1] >> 0) & 0xff); ROUND ((w[1] >> 8) & 0xff);
CODE_POST_S;
ROUND (unpack_v8a_from_v32 ( w0)); ROUND (unpack_v8b_from_v32 ( w0)); ROUND (unpack_v8c_from_v32 ( w0)); ROUND (unpack_v8d_from_v32 ( w0));
ROUND (unpack_v8a_from_v32 (w[1])); ROUND (unpack_v8b_from_v32 (w[1]));
break;
case 7:
CODE_PRE;
ROUND ((w0 >> 0) & 0xff); ROUND ((w0 >> 8) & 0xff); ROUND ((w0 >> 16) & 0xff); ROUND ((w0 >> 24) & 0xff);
ROUND ((w[1] >> 0) & 0xff); ROUND ((w[1] >> 8) & 0xff); ROUND ((w[1] >> 16) & 0xff);
CODE_POST_S;
ROUND (unpack_v8a_from_v32 ( w0)); ROUND (unpack_v8b_from_v32 ( w0)); ROUND (unpack_v8c_from_v32 ( w0)); ROUND (unpack_v8d_from_v32 ( w0));
ROUND (unpack_v8a_from_v32 (w[1])); ROUND (unpack_v8b_from_v32 (w[1])); ROUND (unpack_v8c_from_v32 (w[1]));
break;
case 8:
CODE_PRE;
ROUND ((w0 >> 0) & 0xff); ROUND ((w0 >> 8) & 0xff); ROUND ((w0 >> 16) & 0xff); ROUND ((w0 >> 24) & 0xff);
ROUND ((w[1] >> 0) & 0xff); ROUND ((w[1] >> 8) & 0xff); ROUND ((w[1] >> 16) & 0xff); ROUND ((w[1] >> 24) & 0xff);
CODE_POST_S;
ROUND (unpack_v8a_from_v32 ( w0)); ROUND (unpack_v8b_from_v32 ( w0)); ROUND (unpack_v8c_from_v32 ( w0)); ROUND (unpack_v8d_from_v32 ( w0));
ROUND (unpack_v8a_from_v32 (w[1])); ROUND (unpack_v8b_from_v32 (w[1])); ROUND (unpack_v8c_from_v32 (w[1])); ROUND (unpack_v8d_from_v32 (w[1]));
break;
case 9:
CODE_PRE;
ROUND ((w0 >> 0) & 0xff); ROUND ((w0 >> 8) & 0xff); ROUND ((w0 >> 16) & 0xff); ROUND ((w0 >> 24) & 0xff);
ROUND ((w[1] >> 0) & 0xff); ROUND ((w[1] >> 8) & 0xff); ROUND ((w[1] >> 16) & 0xff); ROUND ((w[1] >> 24) & 0xff);
ROUND ((w[2] >> 0) & 0xff);
CODE_POST_S;
ROUND (unpack_v8a_from_v32 ( w0)); ROUND (unpack_v8b_from_v32 ( w0)); ROUND (unpack_v8c_from_v32 ( w0)); ROUND (unpack_v8d_from_v32 ( w0));
ROUND (unpack_v8a_from_v32 (w[1])); ROUND (unpack_v8b_from_v32 (w[1])); ROUND (unpack_v8c_from_v32 (w[1])); ROUND (unpack_v8d_from_v32 (w[1]));
ROUND (unpack_v8a_from_v32 (w[2]));
break;
case 10:
CODE_PRE;
ROUND ((w0 >> 0) & 0xff); ROUND ((w0 >> 8) & 0xff); ROUND ((w0 >> 16) & 0xff); ROUND ((w0 >> 24) & 0xff);
ROUND ((w[1] >> 0) & 0xff); ROUND ((w[1] >> 8) & 0xff); ROUND ((w[1] >> 16) & 0xff); ROUND ((w[1] >> 24) & 0xff);
ROUND ((w[2] >> 0) & 0xff); ROUND ((w[2] >> 8) & 0xff);
CODE_POST_S;
ROUND (unpack_v8a_from_v32 ( w0)); ROUND (unpack_v8b_from_v32 ( w0)); ROUND (unpack_v8c_from_v32 ( w0)); ROUND (unpack_v8d_from_v32 ( w0));
ROUND (unpack_v8a_from_v32 (w[1])); ROUND (unpack_v8b_from_v32 (w[1])); ROUND (unpack_v8c_from_v32 (w[1])); ROUND (unpack_v8d_from_v32 (w[1]));
ROUND (unpack_v8a_from_v32 (w[2])); ROUND (unpack_v8b_from_v32 (w[2]));
break;
case 11:
CODE_PRE;
ROUND ((w0 >> 0) & 0xff); ROUND ((w0 >> 8) & 0xff); ROUND ((w0 >> 16) & 0xff); ROUND ((w0 >> 24) & 0xff);
ROUND ((w[1] >> 0) & 0xff); ROUND ((w[1] >> 8) & 0xff); ROUND ((w[1] >> 16) & 0xff); ROUND ((w[1] >> 24) & 0xff);
ROUND ((w[2] >> 0) & 0xff); ROUND ((w[2] >> 8) & 0xff); ROUND ((w[2] >> 16) & 0xff);
CODE_POST_S;
ROUND (unpack_v8a_from_v32 ( w0)); ROUND (unpack_v8b_from_v32 ( w0)); ROUND (unpack_v8c_from_v32 ( w0)); ROUND (unpack_v8d_from_v32 ( w0));
ROUND (unpack_v8a_from_v32 (w[1])); ROUND (unpack_v8b_from_v32 (w[1])); ROUND (unpack_v8c_from_v32 (w[1])); ROUND (unpack_v8d_from_v32 (w[1]));
ROUND (unpack_v8a_from_v32 (w[2])); ROUND (unpack_v8b_from_v32 (w[2])); ROUND (unpack_v8c_from_v32 (w[2]));
break;
case 12:
CODE_PRE;
ROUND ((w0 >> 0) & 0xff); ROUND ((w0 >> 8) & 0xff); ROUND ((w0 >> 16) & 0xff); ROUND ((w0 >> 24) & 0xff);
ROUND ((w[1] >> 0) & 0xff); ROUND ((w[1] >> 8) & 0xff); ROUND ((w[1] >> 16) & 0xff); ROUND ((w[1] >> 24) & 0xff);
ROUND ((w[2] >> 0) & 0xff); ROUND ((w[2] >> 8) & 0xff); ROUND ((w[2] >> 16) & 0xff); ROUND ((w[2] >> 24) & 0xff);
CODE_POST_S;
ROUND (unpack_v8a_from_v32 ( w0)); ROUND (unpack_v8b_from_v32 ( w0)); ROUND (unpack_v8c_from_v32 ( w0)); ROUND (unpack_v8d_from_v32 ( w0));
ROUND (unpack_v8a_from_v32 (w[1])); ROUND (unpack_v8b_from_v32 (w[1])); ROUND (unpack_v8c_from_v32 (w[1])); ROUND (unpack_v8d_from_v32 (w[1]));
ROUND (unpack_v8a_from_v32 (w[2])); ROUND (unpack_v8b_from_v32 (w[2])); ROUND (unpack_v8c_from_v32 (w[2])); ROUND (unpack_v8d_from_v32 (w[2]));
break;
case 13:
CODE_PRE;
ROUND ((w0 >> 0) & 0xff); ROUND ((w0 >> 8) & 0xff); ROUND ((w0 >> 16) & 0xff); ROUND ((w0 >> 24) & 0xff);
ROUND ((w[1] >> 0) & 0xff); ROUND ((w[1] >> 8) & 0xff); ROUND ((w[1] >> 16) & 0xff); ROUND ((w[1] >> 24) & 0xff);
ROUND ((w[2] >> 0) & 0xff); ROUND ((w[2] >> 8) & 0xff); ROUND ((w[2] >> 16) & 0xff); ROUND ((w[2] >> 24) & 0xff);
ROUND ((w[3] >> 0) & 0xff);
CODE_POST_S;
ROUND (unpack_v8a_from_v32 ( w0)); ROUND (unpack_v8b_from_v32 ( w0)); ROUND (unpack_v8c_from_v32 ( w0)); ROUND (unpack_v8d_from_v32 ( w0));
ROUND (unpack_v8a_from_v32 (w[1])); ROUND (unpack_v8b_from_v32 (w[1])); ROUND (unpack_v8c_from_v32 (w[1])); ROUND (unpack_v8d_from_v32 (w[1]));
ROUND (unpack_v8a_from_v32 (w[2])); ROUND (unpack_v8b_from_v32 (w[2])); ROUND (unpack_v8c_from_v32 (w[2])); ROUND (unpack_v8d_from_v32 (w[2]));
ROUND (unpack_v8a_from_v32 (w[3]));
break;
case 14:
CODE_PRE;
ROUND ((w0 >> 0) & 0xff); ROUND ((w0 >> 8) & 0xff); ROUND ((w0 >> 16) & 0xff); ROUND ((w0 >> 24) & 0xff);
ROUND ((w[1] >> 0) & 0xff); ROUND ((w[1] >> 8) & 0xff); ROUND ((w[1] >> 16) & 0xff); ROUND ((w[1] >> 24) & 0xff);
ROUND ((w[2] >> 0) & 0xff); ROUND ((w[2] >> 8) & 0xff); ROUND ((w[2] >> 16) & 0xff); ROUND ((w[2] >> 24) & 0xff);
ROUND ((w[3] >> 0) & 0xff); ROUND ((w[3] >> 8) & 0xff);
CODE_POST_S;
ROUND (unpack_v8a_from_v32 ( w0)); ROUND (unpack_v8b_from_v32 ( w0)); ROUND (unpack_v8c_from_v32 ( w0)); ROUND (unpack_v8d_from_v32 ( w0));
ROUND (unpack_v8a_from_v32 (w[1])); ROUND (unpack_v8b_from_v32 (w[1])); ROUND (unpack_v8c_from_v32 (w[1])); ROUND (unpack_v8d_from_v32 (w[1]));
ROUND (unpack_v8a_from_v32 (w[2])); ROUND (unpack_v8b_from_v32 (w[2])); ROUND (unpack_v8c_from_v32 (w[2])); ROUND (unpack_v8d_from_v32 (w[2]));
ROUND (unpack_v8a_from_v32 (w[3])); ROUND (unpack_v8b_from_v32 (w[3]));
break;
case 15:
CODE_PRE;
ROUND ((w0 >> 0) & 0xff); ROUND ((w0 >> 8) & 0xff); ROUND ((w0 >> 16) & 0xff); ROUND ((w0 >> 24) & 0xff);
ROUND ((w[1] >> 0) & 0xff); ROUND ((w[1] >> 8) & 0xff); ROUND ((w[1] >> 16) & 0xff); ROUND ((w[1] >> 24) & 0xff);
ROUND ((w[2] >> 0) & 0xff); ROUND ((w[2] >> 8) & 0xff); ROUND ((w[2] >> 16) & 0xff); ROUND ((w[2] >> 24) & 0xff);
ROUND ((w[3] >> 0) & 0xff); ROUND ((w[3] >> 8) & 0xff); ROUND ((w[3] >> 16) & 0xff);
CODE_POST_S;
ROUND (unpack_v8a_from_v32 ( w0)); ROUND (unpack_v8b_from_v32 ( w0)); ROUND (unpack_v8c_from_v32 ( w0)); ROUND (unpack_v8d_from_v32 ( w0));
ROUND (unpack_v8a_from_v32 (w[1])); ROUND (unpack_v8b_from_v32 (w[1])); ROUND (unpack_v8c_from_v32 (w[1])); ROUND (unpack_v8d_from_v32 (w[1]));
ROUND (unpack_v8a_from_v32 (w[2])); ROUND (unpack_v8b_from_v32 (w[2])); ROUND (unpack_v8c_from_v32 (w[2])); ROUND (unpack_v8d_from_v32 (w[2]));
ROUND (unpack_v8a_from_v32 (w[3])); ROUND (unpack_v8b_from_v32 (w[3])); ROUND (unpack_v8c_from_v32 (w[3]));
break;
case 16:
CODE_PRE;
ROUND ((w0 >> 0) & 0xff); ROUND ((w0 >> 8) & 0xff); ROUND ((w0 >> 16) & 0xff); ROUND ((w0 >> 24) & 0xff);
ROUND ((w[1] >> 0) & 0xff); ROUND ((w[1] >> 8) & 0xff); ROUND ((w[1] >> 16) & 0xff); ROUND ((w[1] >> 24) & 0xff);
ROUND ((w[2] >> 0) & 0xff); ROUND ((w[2] >> 8) & 0xff); ROUND ((w[2] >> 16) & 0xff); ROUND ((w[2] >> 24) & 0xff);
ROUND ((w[3] >> 0) & 0xff); ROUND ((w[3] >> 8) & 0xff); ROUND ((w[3] >> 16) & 0xff); ROUND ((w[3] >> 24) & 0xff);
CODE_POST_S;
ROUND (unpack_v8a_from_v32 ( w0)); ROUND (unpack_v8b_from_v32 ( w0)); ROUND (unpack_v8c_from_v32 ( w0)); ROUND (unpack_v8d_from_v32 ( w0));
ROUND (unpack_v8a_from_v32 (w[1])); ROUND (unpack_v8b_from_v32 (w[1])); ROUND (unpack_v8c_from_v32 (w[1])); ROUND (unpack_v8d_from_v32 (w[1]));
ROUND (unpack_v8a_from_v32 (w[2])); ROUND (unpack_v8b_from_v32 (w[2])); ROUND (unpack_v8c_from_v32 (w[2])); ROUND (unpack_v8d_from_v32 (w[2]));
ROUND (unpack_v8a_from_v32 (w[3])); ROUND (unpack_v8b_from_v32 (w[3])); ROUND (unpack_v8c_from_v32 (w[3])); ROUND (unpack_v8d_from_v32 (w[3]));
break;
default:
CODE_PRE;
ROUND ((w0 >> 0) & 0xff); ROUND ((w0 >> 8) & 0xff); ROUND ((w0 >> 16) & 0xff); ROUND ((w0 >> 24) & 0xff);
ROUND (unpack_v8a_from_v32 ( w0)); ROUND (unpack_v8b_from_v32 ( w0)); ROUND (unpack_v8c_from_v32 ( w0)); ROUND (unpack_v8d_from_v32 ( w0));
CODE_LOOP (pw_len - 4);
CODE_POST_S;
break;
}
CODE_POST_S;
}
KERNEL_FQ void m00200_m04 (KERN_ATTR_VECTOR ())

@ -16,7 +16,6 @@
#define COMPARE_M "inc_comp_multi_bs.cl"
#ifdef IS_NV
#undef _unroll
#define KXX_DECL
#endif

@ -16,7 +16,6 @@
#define COMPARE_M "inc_comp_multi_bs.cl"
#ifdef IS_NV
#undef _unroll
#define KXX_DECL
#endif

@ -356,7 +356,7 @@ CONSTANT_VK u32a c_skb[8][64] =
#define BOX(i,n,S) make_u32x ((S)[(n)][(i).s0], (S)[(n)][(i).s1], (S)[(n)][(i).s2], (S)[(n)][(i).s3], (S)[(n)][(i).s4], (S)[(n)][(i).s5], (S)[(n)][(i).s6], (S)[(n)][(i).s7], (S)[(n)][(i).s8], (S)[(n)][(i).s9], (S)[(n)][(i).sa], (S)[(n)][(i).sb], (S)[(n)][(i).sc], (S)[(n)][(i).sd], (S)[(n)][(i).se], (S)[(n)][(i).sf])
#endif
DECLSPEC void _des_crypt_encrypt (u32x *iv, u32x *data, u32x *Kc, u32x *Kd, LOCAL_AS u32 (*s_SPtrans)[64])
DECLSPEC void _des_crypt_encrypt (u32x *iv, u32x *data, u32x *Kc, u32x *Kd, SHM_TYPE u32 (*s_SPtrans)[64])
{
u32x r = data[0];
u32x l = data[1];
@ -398,7 +398,7 @@ DECLSPEC void _des_crypt_encrypt (u32x *iv, u32x *data, u32x *Kc, u32x *Kd, LOCA
iv[1] = r;
}
DECLSPEC void _des_crypt_keysetup (u32x c, u32x d, u32x *Kc, u32x *Kd, LOCAL_AS u32 (*s_skb)[64])
DECLSPEC void _des_crypt_keysetup (u32x c, u32x d, u32x *Kc, u32x *Kd, SHM_TYPE u32 (*s_skb)[64])
{
u32x tt;
@ -516,6 +516,8 @@ KERNEL_FQ void m05500_m04 (KERN_ATTR_RULES ())
* sbox, kbox
*/
#ifdef REAL_SHM
LOCAL_VK u32 s_SPtrans[8][64];
LOCAL_VK u32 s_skb[8][64];
@ -542,6 +544,13 @@ KERNEL_FQ void m05500_m04 (KERN_ATTR_RULES ())
SYNC_THREADS ();
#else
CONSTANT_AS u32a (*s_SPtrans)[64] = c_SPtrans;
CONSTANT_AS u32a (*s_skb)[64] = c_skb;
#endif
if (gid >= gid_max) return;
/**
@ -729,6 +738,8 @@ KERNEL_FQ void m05500_s04 (KERN_ATTR_RULES ())
* sbox, kbox
*/
#ifdef REAL_SHM
LOCAL_VK u32 s_SPtrans[8][64];
LOCAL_VK u32 s_skb[8][64];
@ -755,6 +766,13 @@ KERNEL_FQ void m05500_s04 (KERN_ATTR_RULES ())
SYNC_THREADS ();
#else
CONSTANT_AS u32a (*s_SPtrans)[64] = c_SPtrans;
CONSTANT_AS u32a (*s_skb)[64] = c_skb;
#endif
if (gid >= gid_max) return;
/**

@ -356,7 +356,7 @@ CONSTANT_VK u32a c_skb[8][64] =
#define BOX(i,n,S) make_u32x ((S)[(n)][(i).s0], (S)[(n)][(i).s1], (S)[(n)][(i).s2], (S)[(n)][(i).s3], (S)[(n)][(i).s4], (S)[(n)][(i).s5], (S)[(n)][(i).s6], (S)[(n)][(i).s7], (S)[(n)][(i).s8], (S)[(n)][(i).s9], (S)[(n)][(i).sa], (S)[(n)][(i).sb], (S)[(n)][(i).sc], (S)[(n)][(i).sd], (S)[(n)][(i).se], (S)[(n)][(i).sf])
#endif
DECLSPEC void _des_crypt_encrypt (u32 *iv, u32 *data, u32 *Kc, u32 *Kd, LOCAL_AS u32 (*s_SPtrans)[64])
DECLSPEC void _des_crypt_encrypt (u32 *iv, u32 *data, u32 *Kc, u32 *Kd, SHM_TYPE u32 (*s_SPtrans)[64])
{
u32 r = data[0];
u32 l = data[1];
@ -398,7 +398,7 @@ DECLSPEC void _des_crypt_encrypt (u32 *iv, u32 *data, u32 *Kc, u32 *Kd, LOCAL_AS
iv[1] = r;
}
DECLSPEC void _des_crypt_keysetup (u32 c, u32 d, u32 *Kc, u32 *Kd, LOCAL_AS u32 (*s_skb)[64])
DECLSPEC void _des_crypt_keysetup (u32 c, u32 d, u32 *Kc, u32 *Kd, SHM_TYPE u32 (*s_skb)[64])
{
u32 tt;
@ -516,6 +516,8 @@ KERNEL_FQ void m05500_mxx (KERN_ATTR_RULES ())
* sbox, kbox
*/
#ifdef REAL_SHM
LOCAL_VK u32 s_SPtrans[8][64];
LOCAL_VK u32 s_skb[8][64];
@ -542,6 +544,13 @@ KERNEL_FQ void m05500_mxx (KERN_ATTR_RULES ())
SYNC_THREADS ();
#else
CONSTANT_AS u32a (*s_SPtrans)[64] = c_SPtrans;
CONSTANT_AS u32a (*s_skb)[64] = c_skb;
#endif
if (gid >= gid_max) return;
/**
@ -640,6 +649,8 @@ KERNEL_FQ void m05500_sxx (KERN_ATTR_RULES ())
* sbox, kbox
*/
#ifdef REAL_SHM
LOCAL_VK u32 s_SPtrans[8][64];
LOCAL_VK u32 s_skb[8][64];
@ -666,6 +677,13 @@ KERNEL_FQ void m05500_sxx (KERN_ATTR_RULES ())
SYNC_THREADS ();
#else
CONSTANT_AS u32a (*s_SPtrans)[64] = c_SPtrans;
CONSTANT_AS u32a (*s_skb)[64] = c_skb;
#endif
if (gid >= gid_max) return;
/**

@ -354,7 +354,7 @@ CONSTANT_VK u32a c_skb[8][64] =
#define BOX(i,n,S) make_u32x ((S)[(n)][(i).s0], (S)[(n)][(i).s1], (S)[(n)][(i).s2], (S)[(n)][(i).s3], (S)[(n)][(i).s4], (S)[(n)][(i).s5], (S)[(n)][(i).s6], (S)[(n)][(i).s7], (S)[(n)][(i).s8], (S)[(n)][(i).s9], (S)[(n)][(i).sa], (S)[(n)][(i).sb], (S)[(n)][(i).sc], (S)[(n)][(i).sd], (S)[(n)][(i).se], (S)[(n)][(i).sf])
#endif
DECLSPEC void _des_crypt_encrypt (u32x *iv, u32x *data, u32x *Kc, u32x *Kd, LOCAL_AS u32 (*s_SPtrans)[64])
DECLSPEC void _des_crypt_encrypt (u32x *iv, u32x *data, u32x *Kc, u32x *Kd, SHM_TYPE u32 (*s_SPtrans)[64])
{
u32x r = data[0];
u32x l = data[1];
@ -396,7 +396,7 @@ DECLSPEC void _des_crypt_encrypt (u32x *iv, u32x *data, u32x *Kc, u32x *Kd, LOCA
iv[1] = r;
}
DECLSPEC void _des_crypt_keysetup (u32x c, u32x d, u32x *Kc, u32x *Kd, LOCAL_AS u32 (*s_skb)[64])
DECLSPEC void _des_crypt_keysetup (u32x c, u32x d, u32x *Kc, u32x *Kd, SHM_TYPE u32 (*s_skb)[64])
{
u32x tt;
@ -514,6 +514,8 @@ KERNEL_FQ void m05500_m04 (KERN_ATTR_BASIC ())
* sbox, kbox
*/
#ifdef REAL_SHM
LOCAL_VK u32 s_SPtrans[8][64];
LOCAL_VK u32 s_skb[8][64];
@ -540,6 +542,13 @@ KERNEL_FQ void m05500_m04 (KERN_ATTR_BASIC ())
SYNC_THREADS ();
#else
CONSTANT_AS u32a (*s_SPtrans)[64] = c_SPtrans;
CONSTANT_AS u32a (*s_skb)[64] = c_skb;
#endif
if (gid >= gid_max) return;
/**
@ -780,6 +789,8 @@ KERNEL_FQ void m05500_s04 (KERN_ATTR_BASIC ())
* sbox, kbox
*/
#ifdef REAL_SHM
LOCAL_VK u32 s_SPtrans[8][64];
LOCAL_VK u32 s_skb[8][64];
@ -806,6 +817,13 @@ KERNEL_FQ void m05500_s04 (KERN_ATTR_BASIC ())
SYNC_THREADS ();
#else
CONSTANT_AS u32a (*s_SPtrans)[64] = c_SPtrans;
CONSTANT_AS u32a (*s_skb)[64] = c_skb;
#endif
if (gid >= gid_max) return;
/**

@ -354,7 +354,7 @@ CONSTANT_VK u32a c_skb[8][64] =
#define BOX(i,n,S) make_u32x ((S)[(n)][(i).s0], (S)[(n)][(i).s1], (S)[(n)][(i).s2], (S)[(n)][(i).s3], (S)[(n)][(i).s4], (S)[(n)][(i).s5], (S)[(n)][(i).s6], (S)[(n)][(i).s7], (S)[(n)][(i).s8], (S)[(n)][(i).s9], (S)[(n)][(i).sa], (S)[(n)][(i).sb], (S)[(n)][(i).sc], (S)[(n)][(i).sd], (S)[(n)][(i).se], (S)[(n)][(i).sf])
#endif
DECLSPEC void _des_crypt_encrypt (u32 *iv, u32 *data, u32 *Kc, u32 *Kd, LOCAL_AS u32 (*s_SPtrans)[64])
DECLSPEC void _des_crypt_encrypt (u32 *iv, u32 *data, u32 *Kc, u32 *Kd, SHM_TYPE u32 (*s_SPtrans)[64])
{
u32 r = data[0];
u32 l = data[1];
@ -396,7 +396,7 @@ DECLSPEC void _des_crypt_encrypt (u32 *iv, u32 *data, u32 *Kc, u32 *Kd, LOCAL_AS
iv[1] = r;
}
DECLSPEC void _des_crypt_keysetup (u32 c, u32 d, u32 *Kc, u32 *Kd, LOCAL_AS u32 (*s_skb)[64])
DECLSPEC void _des_crypt_keysetup (u32 c, u32 d, u32 *Kc, u32 *Kd, SHM_TYPE u32 (*s_skb)[64])
{
u32 tt;
@ -514,6 +514,8 @@ KERNEL_FQ void m05500_mxx (KERN_ATTR_BASIC ())
* sbox, kbox
*/
#ifdef REAL_SHM
LOCAL_VK u32 s_SPtrans[8][64];
LOCAL_VK u32 s_skb[8][64];
@ -540,6 +542,13 @@ KERNEL_FQ void m05500_mxx (KERN_ATTR_BASIC ())
SYNC_THREADS ();
#else
CONSTANT_AS u32a (*s_SPtrans)[64] = c_SPtrans;
CONSTANT_AS u32a (*s_skb)[64] = c_skb;
#endif
if (gid >= gid_max) return;
/**
@ -636,6 +645,8 @@ KERNEL_FQ void m05500_sxx (KERN_ATTR_BASIC ())
* sbox, kbox
*/
#ifdef REAL_SHM
LOCAL_VK u32 s_SPtrans[8][64];
LOCAL_VK u32 s_skb[8][64];
@ -662,6 +673,13 @@ KERNEL_FQ void m05500_sxx (KERN_ATTR_BASIC ())
SYNC_THREADS ();
#else
CONSTANT_AS u32a (*s_SPtrans)[64] = c_SPtrans;
CONSTANT_AS u32a (*s_skb)[64] = c_skb;
#endif
if (gid >= gid_max) return;
/**

@ -354,7 +354,7 @@ CONSTANT_VK u32a c_skb[8][64] =
#define BOX(i,n,S) make_u32x ((S)[(n)][(i).s0], (S)[(n)][(i).s1], (S)[(n)][(i).s2], (S)[(n)][(i).s3], (S)[(n)][(i).s4], (S)[(n)][(i).s5], (S)[(n)][(i).s6], (S)[(n)][(i).s7], (S)[(n)][(i).s8], (S)[(n)][(i).s9], (S)[(n)][(i).sa], (S)[(n)][(i).sb], (S)[(n)][(i).sc], (S)[(n)][(i).sd], (S)[(n)][(i).se], (S)[(n)][(i).sf])
#endif
DECLSPEC void _des_crypt_encrypt (u32x *iv, u32x *data, u32x *Kc, u32x *Kd, LOCAL_AS u32 (*s_SPtrans)[64])
DECLSPEC void _des_crypt_encrypt (u32x *iv, u32x *data, u32x *Kc, u32x *Kd, SHM_TYPE u32 (*s_SPtrans)[64])
{
u32x r = data[0];
u32x l = data[1];
@ -396,7 +396,7 @@ DECLSPEC void _des_crypt_encrypt (u32x *iv, u32x *data, u32x *Kc, u32x *Kd, LOCA
iv[1] = r;
}
DECLSPEC void _des_crypt_keysetup (u32x c, u32x d, u32x *Kc, u32x *Kd, LOCAL_AS u32 (*s_skb)[64])
DECLSPEC void _des_crypt_keysetup (u32x c, u32x d, u32x *Kc, u32x *Kd, SHM_TYPE u32 (*s_skb)[64])
{
u32x tt;
@ -500,7 +500,7 @@ DECLSPEC void transform_netntlmv1_key (const u32x w0, const u32x w1, u32x *out)
| ((k[7] & 0xff) << 24);
}
DECLSPEC void m05500m (LOCAL_AS u32 (*s_SPtrans)[64], LOCAL_AS u32 (*s_skb)[64], u32 *w, const u32 pw_len, KERN_ATTR_VECTOR ())
DECLSPEC void m05500m (SHM_TYPE u32 (*s_SPtrans)[64], SHM_TYPE u32 (*s_skb)[64], u32 *w, const u32 pw_len, KERN_ATTR_VECTOR ())
{
/**
* modifier
@ -657,7 +657,7 @@ DECLSPEC void m05500m (LOCAL_AS u32 (*s_SPtrans)[64], LOCAL_AS u32 (*s_skb)[64],
}
}
DECLSPEC void m05500s (LOCAL_AS u32 (*s_SPtrans)[64], LOCAL_AS u32 (*s_skb)[64], u32 *w, const u32 pw_len, KERN_ATTR_VECTOR ())
DECLSPEC void m05500s (SHM_TYPE u32 (*s_SPtrans)[64], SHM_TYPE u32 (*s_skb)[64], u32 *w, const u32 pw_len, KERN_ATTR_VECTOR ())
{
/**
* modifier
@ -847,6 +847,8 @@ KERNEL_FQ void m05500_m04 (KERN_ATTR_VECTOR ())
* sbox, kbox
*/
#ifdef REAL_SHM
LOCAL_VK u32 s_SPtrans[8][64];
LOCAL_VK u32 s_skb[8][64];
@ -873,6 +875,13 @@ KERNEL_FQ void m05500_m04 (KERN_ATTR_VECTOR ())
SYNC_THREADS ();
#else
CONSTANT_AS u32a (*s_SPtrans)[64] = c_SPtrans;
CONSTANT_AS u32a (*s_skb)[64] = c_skb;
#endif
if (gid >= gid_max) return;
/**
@ -921,6 +930,8 @@ KERNEL_FQ void m05500_m08 (KERN_ATTR_VECTOR ())
* sbox, kbox
*/
#ifdef REAL_SHM
LOCAL_VK u32 s_SPtrans[8][64];
LOCAL_VK u32 s_skb[8][64];
@ -947,6 +958,13 @@ KERNEL_FQ void m05500_m08 (KERN_ATTR_VECTOR ())
SYNC_THREADS ();
#else
CONSTANT_AS u32a (*s_SPtrans)[64] = c_SPtrans;
CONSTANT_AS u32a (*s_skb)[64] = c_skb;
#endif
if (gid >= gid_max) return;
/**
@ -995,6 +1013,8 @@ KERNEL_FQ void m05500_m16 (KERN_ATTR_VECTOR ())
* sbox, kbox
*/
#ifdef REAL_SHM
LOCAL_VK u32 s_SPtrans[8][64];
LOCAL_VK u32 s_skb[8][64];
@ -1021,6 +1041,13 @@ KERNEL_FQ void m05500_m16 (KERN_ATTR_VECTOR ())
SYNC_THREADS ();
#else
CONSTANT_AS u32a (*s_SPtrans)[64] = c_SPtrans;
CONSTANT_AS u32a (*s_skb)[64] = c_skb;
#endif
if (gid >= gid_max) return;
/**
@ -1069,6 +1096,8 @@ KERNEL_FQ void m05500_s04 (KERN_ATTR_VECTOR ())
* sbox, kbox
*/
#ifdef REAL_SHM
LOCAL_VK u32 s_SPtrans[8][64];
LOCAL_VK u32 s_skb[8][64];
@ -1095,6 +1124,13 @@ KERNEL_FQ void m05500_s04 (KERN_ATTR_VECTOR ())
SYNC_THREADS ();
#else
CONSTANT_AS u32a (*s_SPtrans)[64] = c_SPtrans;
CONSTANT_AS u32a (*s_skb)[64] = c_skb;
#endif
if (gid >= gid_max) return;
/**
@ -1143,6 +1179,8 @@ KERNEL_FQ void m05500_s08 (KERN_ATTR_VECTOR ())
* sbox, kbox
*/
#ifdef REAL_SHM
LOCAL_VK u32 s_SPtrans[8][64];
LOCAL_VK u32 s_skb[8][64];
@ -1169,6 +1207,13 @@ KERNEL_FQ void m05500_s08 (KERN_ATTR_VECTOR ())
SYNC_THREADS ();
#else
CONSTANT_AS u32a (*s_SPtrans)[64] = c_SPtrans;
CONSTANT_AS u32a (*s_skb)[64] = c_skb;
#endif
if (gid >= gid_max) return;
/**
@ -1217,6 +1262,8 @@ KERNEL_FQ void m05500_s16 (KERN_ATTR_VECTOR ())
* sbox, kbox
*/
#ifdef REAL_SHM
LOCAL_VK u32 s_SPtrans[8][64];
LOCAL_VK u32 s_skb[8][64];
@ -1243,6 +1290,13 @@ KERNEL_FQ void m05500_s16 (KERN_ATTR_VECTOR ())
SYNC_THREADS ();
#else
CONSTANT_AS u32a (*s_SPtrans)[64] = c_SPtrans;
CONSTANT_AS u32a (*s_skb)[64] = c_skb;
#endif
if (gid >= gid_max) return;
/**

@ -354,7 +354,7 @@ CONSTANT_VK u32a c_skb[8][64] =
#define BOX(i,n,S) make_u32x ((S)[(n)][(i).s0], (S)[(n)][(i).s1], (S)[(n)][(i).s2], (S)[(n)][(i).s3], (S)[(n)][(i).s4], (S)[(n)][(i).s5], (S)[(n)][(i).s6], (S)[(n)][(i).s7], (S)[(n)][(i).s8], (S)[(n)][(i).s9], (S)[(n)][(i).sa], (S)[(n)][(i).sb], (S)[(n)][(i).sc], (S)[(n)][(i).sd], (S)[(n)][(i).se], (S)[(n)][(i).sf])
#endif
DECLSPEC void _des_crypt_encrypt (u32x *iv, u32x *data, u32x *Kc, u32x *Kd, LOCAL_AS u32 (*s_SPtrans)[64])
DECLSPEC void _des_crypt_encrypt (u32x *iv, u32x *data, u32x *Kc, u32x *Kd, SHM_TYPE u32 (*s_SPtrans)[64])
{
u32x r = data[0];
u32x l = data[1];
@ -396,7 +396,7 @@ DECLSPEC void _des_crypt_encrypt (u32x *iv, u32x *data, u32x *Kc, u32x *Kd, LOCA
iv[1] = r;
}
DECLSPEC void _des_crypt_keysetup (u32x c, u32x d, u32x *Kc, u32x *Kd, LOCAL_AS u32 (*s_skb)[64])
DECLSPEC void _des_crypt_keysetup (u32x c, u32x d, u32x *Kc, u32x *Kd, SHM_TYPE u32 (*s_skb)[64])
{
u32x tt;
@ -514,6 +514,8 @@ KERNEL_FQ void m05500_mxx (KERN_ATTR_VECTOR ())
* sbox, kbox
*/
#ifdef REAL_SHM
LOCAL_VK u32 s_SPtrans[8][64];
LOCAL_VK u32 s_skb[8][64];
@ -540,6 +542,13 @@ KERNEL_FQ void m05500_mxx (KERN_ATTR_VECTOR ())
SYNC_THREADS ();
#else
CONSTANT_AS u32a (*s_SPtrans)[64] = c_SPtrans;
CONSTANT_AS u32a (*s_skb)[64] = c_skb;
#endif
if (gid >= gid_max) return;
/**
@ -649,6 +658,8 @@ KERNEL_FQ void m05500_sxx (KERN_ATTR_VECTOR ())
* sbox, kbox
*/
#ifdef REAL_SHM
LOCAL_VK u32 s_SPtrans[8][64];
LOCAL_VK u32 s_skb[8][64];
@ -675,6 +686,13 @@ KERNEL_FQ void m05500_sxx (KERN_ATTR_VECTOR ())
SYNC_THREADS ();
#else
CONSTANT_AS u32a (*s_SPtrans)[64] = c_SPtrans;
CONSTANT_AS u32a (*s_skb)[64] = c_skb;
#endif
if (gid >= gid_max) return;
/**

@ -152,7 +152,7 @@ KERNEL_FQ void m07800_m04 (KERN_ATTR_RULES ())
#else
CONSTANT_AS u32a *s_theMagicArray = theMagicArray;
CONSTANT_AS u32a (*s_theMagicArray)[16] = theMagicArray;
#endif
@ -457,7 +457,7 @@ KERNEL_FQ void m07800_s04 (KERN_ATTR_RULES ())
#else
CONSTANT_AS u32a *s_theMagicArray = theMagicArray;
CONSTANT_AS u32a (*s_theMagicArray)[16] = theMagicArray;
#endif

@ -150,7 +150,7 @@ KERNEL_FQ void m07800_m04 (KERN_ATTR_BASIC ())
#else
CONSTANT_AS u32a *s_theMagicArray = theMagicArray;
CONSTANT_AS u32a (*s_theMagicArray)[16] = theMagicArray;
#endif
@ -519,7 +519,7 @@ KERNEL_FQ void m07800_s04 (KERN_ATTR_BASIC ())
#else
CONSTANT_AS u32a *s_theMagicArray = theMagicArray;
CONSTANT_AS u32a (*s_theMagicArray)[16] = theMagicArray;
#endif

@ -626,7 +626,7 @@ KERNEL_FQ void m07800_m04 (KERN_ATTR_BASIC ())
#else
CONSTANT_AS u32a *s_theMagicArray = theMagicArray;
CONSTANT_AS u32a (*s_theMagicArray)[16] = theMagicArray;
#endif
@ -711,7 +711,7 @@ KERNEL_FQ void m07800_m08 (KERN_ATTR_BASIC ())
#else
CONSTANT_AS u32a *s_theMagicArray = theMagicArray;
CONSTANT_AS u32a (*s_theMagicArray)[16] = theMagicArray;
#endif
@ -800,7 +800,7 @@ KERNEL_FQ void m07800_s04 (KERN_ATTR_BASIC ())
#else
CONSTANT_AS u32a *s_theMagicArray = theMagicArray;
CONSTANT_AS u32a (*s_theMagicArray)[16] = theMagicArray;
#endif
@ -885,7 +885,7 @@ KERNEL_FQ void m07800_s08 (KERN_ATTR_BASIC ())
#else
CONSTANT_AS u32a *s_theMagicArray = theMagicArray;
CONSTANT_AS u32a (*s_theMagicArray)[16] = theMagicArray;
#endif

@ -152,7 +152,7 @@ KERNEL_FQ void m07801_m04 (KERN_ATTR_RULES ())
#else
CONSTANT_AS u32a *s_theMagicArray = theMagicArray;
CONSTANT_AS u32a (*s_theMagicArray)[16] = theMagicArray;
#endif
@ -457,7 +457,7 @@ KERNEL_FQ void m07801_s04 (KERN_ATTR_RULES ())
#else
CONSTANT_AS u32a *s_theMagicArray = theMagicArray;
CONSTANT_AS u32a (*s_theMagicArray)[16] = theMagicArray;
#endif

@ -150,7 +150,7 @@ KERNEL_FQ void m07801_m04 (KERN_ATTR_BASIC ())
#else
CONSTANT_AS u32a *s_theMagicArray = theMagicArray;
CONSTANT_AS u32a (*s_theMagicArray)[16] = theMagicArray;
#endif
@ -519,7 +519,7 @@ KERNEL_FQ void m07801_s04 (KERN_ATTR_BASIC ())
#else
CONSTANT_AS u32a *s_theMagicArray = theMagicArray;
CONSTANT_AS u32a (*s_theMagicArray)[16] = theMagicArray;
#endif

@ -626,7 +626,7 @@ KERNEL_FQ void m07801_m04 (KERN_ATTR_BASIC ())
#else
CONSTANT_AS u32a *s_theMagicArray = theMagicArray;
CONSTANT_AS u32a (*s_theMagicArray)[16] = theMagicArray;
#endif
@ -711,7 +711,7 @@ KERNEL_FQ void m07801_m08 (KERN_ATTR_BASIC ())
#else
CONSTANT_AS u32a *s_theMagicArray = theMagicArray;
CONSTANT_AS u32a (*s_theMagicArray)[16] = theMagicArray;
#endif
@ -800,7 +800,7 @@ KERNEL_FQ void m07801_s04 (KERN_ATTR_BASIC ())
#else
CONSTANT_AS u32a *s_theMagicArray = theMagicArray;
CONSTANT_AS u32a (*s_theMagicArray)[16] = theMagicArray;
#endif
@ -885,7 +885,7 @@ KERNEL_FQ void m07801_s08 (KERN_ATTR_BASIC ())
#else
CONSTANT_AS u32a *s_theMagicArray = theMagicArray;
CONSTANT_AS u32a (*s_theMagicArray)[16] = theMagicArray;
#endif

@ -16,7 +16,6 @@
#define COMPARE_M "inc_comp_multi_bs.cl"
#ifdef IS_NV
#undef _unroll
#define KXX_DECL
#endif

@ -81,6 +81,7 @@
- Fixed cracking of Cisco-PIX and Cisco-ASA MD5 passwords in mask-attack mode if mask > length 16
- Fixed cracking of Electrum Wallet Salt-Type 2 hashes
- Fixed cracking of NetNTLMv1 passwords in mask-attack mode if mask > length 16 (optimized kernels only)
- Fixed cracking of VeraCrypt Streebog-512 hashes (CPU only)
- Fixed cracking raw Streebog-HMAC 256 and 512 hashes with password of length >= 64
- Fixed cracking raw Whirlpool hashes cracking with password of length >= 32
- Fixed incorrect progress-only result in a special race condition

@ -407,7 +407,6 @@ typedef enum opts_type
OPTS_TYPE_ST_ADDBITS15 = (1ULL << 24),
OPTS_TYPE_ST_HEX = (1ULL << 25),
OPTS_TYPE_ST_BASE64 = (1ULL << 26),
OPTS_TYPE_ST_HASH_MD5 = (1ULL << 27),
OPTS_TYPE_HASH_COPY = (1ULL << 28),
OPTS_TYPE_HASH_SPLIT = (1ULL << 29),
OPTS_TYPE_LOOP_EXTENDED = (1ULL << 30), // a kernel which is called each time normal _loop kernel finished.
@ -421,7 +420,6 @@ typedef enum opts_type
OPTS_TYPE_AUX3 = (1ULL << 37),
OPTS_TYPE_AUX4 = (1ULL << 38),
OPTS_TYPE_BINARY_HASHFILE = (1ULL << 39),
OPTS_TYPE_PREFERED_THREAD = (1ULL << 40), // some algorithms (complicated ones with many branches) benefit from this
OPTS_TYPE_PT_ADD06 = (1ULL << 41),
OPTS_TYPE_KEYBOARD_MAPPING = (1ULL << 42),
OPTS_TYPE_DEEP_COMP_KERNEL = (1ULL << 43), // if we have to iterate through each hash inside the comp kernel, for example if each hash has to be decrypted separately

@ -25,7 +25,7 @@ static const u64 OPTS_TYPE = OPTS_TYPE_PT_GENERATE_LE
| OPTS_TYPE_TM_KERNEL;
static const u32 SALT_TYPE = SALT_TYPE_EMBEDDED;
static const char *ST_PASS = NULL; // the self-test can't work because the salt is not part of the code at compile-time
static const char *ST_HASH = "8133vc.5rieNk";
static const char *ST_HASH = "24leDr0hHfb3A";
u32 module_attack_exec (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSED const user_options_t *user_options, MAYBE_UNUSED const user_options_extra_t *user_options_extra) { return ATTACK_EXEC; }
u32 module_dgst_pos0 (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSED const user_options_t *user_options, MAYBE_UNUSED const user_options_extra_t *user_options_extra) { return DGST_POS0; }

@ -79,6 +79,25 @@ static void transform_netntlmv1_key (const u8 *nthash, u8 *key)
key[7] |= 0x01;
}
char *module_jit_build_options (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSED const user_options_t *user_options, MAYBE_UNUSED const user_options_extra_t *user_options_extra, MAYBE_UNUSED const hashes_t *hashes, MAYBE_UNUSED const hc_device_param_t *device_param)
{
char *jit_build_options = NULL;
// Extra treatment for Apple systems
if (device_param->opencl_platform_vendor_id == VENDOR_ID_APPLE)
{
return jit_build_options;
}
// Intel CPU
if ((device_param->opencl_device_vendor_id == VENDOR_ID_INTEL_SDK) && (device_param->opencl_device_type & CL_DEVICE_TYPE_CPU))
{
hc_asprintf (&jit_build_options, "-D _unroll");
}
return jit_build_options;
}
u64 module_esalt_size (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSED const user_options_t *user_options, MAYBE_UNUSED const user_options_extra_t *user_options_extra)
{
const u64 esalt_size = (const u64) sizeof (netntlm_t);
@ -424,7 +443,7 @@ void module_init (module_ctx_t *module_ctx)
module_ctx->module_hook23 = MODULE_DEFAULT;
module_ctx->module_hook_salt_size = MODULE_DEFAULT;
module_ctx->module_hook_size = MODULE_DEFAULT;
module_ctx->module_jit_build_options = MODULE_DEFAULT;
module_ctx->module_jit_build_options = module_jit_build_options;
module_ctx->module_jit_cache_disable = MODULE_DEFAULT;
module_ctx->module_kernel_accel_max = MODULE_DEFAULT;
module_ctx->module_kernel_accel_min = MODULE_DEFAULT;

@ -27,7 +27,7 @@ static const u32 OPTI_TYPE = OPTI_TYPE_ZERO_BYTE
static const u64 OPTS_TYPE = OPTS_TYPE_PT_GENERATE_BE;
static const u32 SALT_TYPE = SALT_TYPE_GENERIC;
static const char *ST_PASS = "hashcat";
static const char *ST_HASH = "$telegram$1*518c001aeb3b4ae96c6173be4cebe60a85f67b1e087b045935849e2f815b5e41*25184098058621950709328221838128";
static const char *ST_HASH = "$telegram$0*518c001aeb3b4ae96c6173be4cebe60a85f67b1e087b045935849e2f815b5e41*25184098058621950709328221838128";
u32 module_attack_exec (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSED const user_options_t *user_options, MAYBE_UNUSED const user_options_extra_t *user_options_extra) { return ATTACK_EXEC; }
u32 module_dgst_pos0 (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSED const user_options_t *user_options, MAYBE_UNUSED const user_options_extra_t *user_options_extra) { return DGST_POS0; }
@ -84,7 +84,7 @@ int module_hash_decode (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSE
const u8 *version_pos = token.buf[1];
if (version_pos[0] != '1') return (PARSER_SALT_VALUE);
if (version_pos[0] != '0') return (PARSER_SALT_VALUE);
const u8 *hash_pos = token.buf[2];
@ -162,7 +162,7 @@ int module_hash_encode (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSE
const int line_len = snprintf (line_buf, line_size, "%s%i*%08x%08x%08x%08x%08x%08x%08x%08x*%08x%08x%08x%08x",
SIGNATURE_TELEGRAM,
1,
0,
tmp[0],
tmp[1],
tmp[2],

@ -21,7 +21,7 @@ sub module_generate_hash
my $digest = sha256_hex ($salt_bin . $word . $salt_bin);
my $hash = sprintf ("\$telegram\$1*%s*%s", $digest, $salt);
my $hash = sprintf ("\$telegram\$0*%s*%s", $digest, $salt);
return $hash;
}
@ -40,7 +40,7 @@ sub module_verify_hash
my $version = substr ($data[0], 10);
return unless ($version eq "1");
return unless ($version eq "0");
my $digest = $data[1];
my $salt = $data[2];

Loading…
Cancel
Save