1
0
mirror of https://github.com/hashcat/hashcat.git synced 2024-11-23 00:28:11 +00:00

Improve some NVidia specific inline assembly

This commit is contained in:
jsteube 2017-08-28 14:15:47 +02:00
parent 9de1e557bb
commit 918578bee1
8 changed files with 537 additions and 329 deletions

View File

@ -178,11 +178,11 @@ static u32 swap32_S (const u32 v)
{ {
#ifdef IS_AMD_ROCM #ifdef IS_AMD_ROCM
u32 t; u32 r;
__asm__ volatile ("V_PERM_B32 %0, 0, %1, %2;" : "=v"(t) : "v"(v), "v"(0x00010203)); __asm__ volatile ("V_PERM_B32 %0, 0, %1, %2;" : "=v"(r) : "v"(v), "v"(0x00010203));
return t; return r;
#else #else
@ -247,41 +247,41 @@ static u32x swap32 (const u32x v)
{ {
#ifdef IS_AMD_ROCM #ifdef IS_AMD_ROCM
u32x t; u32x r;
#if VECT_SIZE == 1 #if VECT_SIZE == 1
__asm__ volatile ("V_PERM_B32 %0, 0, %1, %2;" : "=v"(t) : "v"(v), "v"(0x00010203)); __asm__ volatile ("V_PERM_B32 %0, 0, %1, %2;" : "=v"(t) : "v"(v), "v"(0x00010203));
#endif #endif
#if VECT_SIZE >= 2 #if VECT_SIZE >= 2
__asm__ volatile ("V_PERM_B32 %0, 0, %1, %2;" : "=v"(t.s0) : "v"(v.s0), "v"(0x00010203)); __asm__ volatile ("V_PERM_B32 %0, 0, %1, %2;" : "=v"(r.s0) : "v"(v.s0), "v"(0x00010203));
__asm__ volatile ("V_PERM_B32 %0, 0, %1, %2;" : "=v"(t.s1) : "v"(v.s1), "v"(0x00010203)); __asm__ volatile ("V_PERM_B32 %0, 0, %1, %2;" : "=v"(r.s1) : "v"(v.s1), "v"(0x00010203));
#endif #endif
#if VECT_SIZE >= 4 #if VECT_SIZE >= 4
__asm__ volatile ("V_PERM_B32 %0, 0, %1, %2;" : "=v"(t.s2) : "v"(v.s2), "v"(0x00010203)); __asm__ volatile ("V_PERM_B32 %0, 0, %1, %2;" : "=v"(r.s2) : "v"(v.s2), "v"(0x00010203));
__asm__ volatile ("V_PERM_B32 %0, 0, %1, %2;" : "=v"(t.s3) : "v"(v.s3), "v"(0x00010203)); __asm__ volatile ("V_PERM_B32 %0, 0, %1, %2;" : "=v"(r.s3) : "v"(v.s3), "v"(0x00010203));
#endif #endif
#if VECT_SIZE >= 8 #if VECT_SIZE >= 8
__asm__ volatile ("V_PERM_B32 %0, 0, %1, %2;" : "=v"(t.s4) : "v"(v.s4), "v"(0x00010203)); __asm__ volatile ("V_PERM_B32 %0, 0, %1, %2;" : "=v"(r.s4) : "v"(v.s4), "v"(0x00010203));
__asm__ volatile ("V_PERM_B32 %0, 0, %1, %2;" : "=v"(t.s5) : "v"(v.s5), "v"(0x00010203)); __asm__ volatile ("V_PERM_B32 %0, 0, %1, %2;" : "=v"(r.s5) : "v"(v.s5), "v"(0x00010203));
__asm__ volatile ("V_PERM_B32 %0, 0, %1, %2;" : "=v"(t.s6) : "v"(v.s6), "v"(0x00010203)); __asm__ volatile ("V_PERM_B32 %0, 0, %1, %2;" : "=v"(r.s6) : "v"(v.s6), "v"(0x00010203));
__asm__ volatile ("V_PERM_B32 %0, 0, %1, %2;" : "=v"(t.s7) : "v"(v.s7), "v"(0x00010203)); __asm__ volatile ("V_PERM_B32 %0, 0, %1, %2;" : "=v"(r.s7) : "v"(v.s7), "v"(0x00010203));
#endif #endif
#if VECT_SIZE >= 16 #if VECT_SIZE >= 16
__asm__ volatile ("V_PERM_B32 %0, 0, %1, %2;" : "=v"(t.s8) : "v"(v.s8), "v"(0x00010203)); __asm__ volatile ("V_PERM_B32 %0, 0, %1, %2;" : "=v"(r.s8) : "v"(v.s8), "v"(0x00010203));
__asm__ volatile ("V_PERM_B32 %0, 0, %1, %2;" : "=v"(t.s9) : "v"(v.s9), "v"(0x00010203)); __asm__ volatile ("V_PERM_B32 %0, 0, %1, %2;" : "=v"(r.s9) : "v"(v.s9), "v"(0x00010203));
__asm__ volatile ("V_PERM_B32 %0, 0, %1, %2;" : "=v"(t.sa) : "v"(v.sa), "v"(0x00010203)); __asm__ volatile ("V_PERM_B32 %0, 0, %1, %2;" : "=v"(r.sa) : "v"(v.sa), "v"(0x00010203));
__asm__ volatile ("V_PERM_B32 %0, 0, %1, %2;" : "=v"(t.sb) : "v"(v.sb), "v"(0x00010203)); __asm__ volatile ("V_PERM_B32 %0, 0, %1, %2;" : "=v"(r.sb) : "v"(v.sb), "v"(0x00010203));
__asm__ volatile ("V_PERM_B32 %0, 0, %1, %2;" : "=v"(t.sc) : "v"(v.sc), "v"(0x00010203)); __asm__ volatile ("V_PERM_B32 %0, 0, %1, %2;" : "=v"(r.sc) : "v"(v.sc), "v"(0x00010203));
__asm__ volatile ("V_PERM_B32 %0, 0, %1, %2;" : "=v"(t.sd) : "v"(v.sd), "v"(0x00010203)); __asm__ volatile ("V_PERM_B32 %0, 0, %1, %2;" : "=v"(r.sd) : "v"(v.sd), "v"(0x00010203));
__asm__ volatile ("V_PERM_B32 %0, 0, %1, %2;" : "=v"(t.se) : "v"(v.se), "v"(0x00010203)); __asm__ volatile ("V_PERM_B32 %0, 0, %1, %2;" : "=v"(r.se) : "v"(v.se), "v"(0x00010203));
__asm__ volatile ("V_PERM_B32 %0, 0, %1, %2;" : "=v"(t.sf) : "v"(v.sf), "v"(0x00010203)); __asm__ volatile ("V_PERM_B32 %0, 0, %1, %2;" : "=v"(r.sf) : "v"(v.sf), "v"(0x00010203));
#endif #endif
return t; return r;
#else #else
@ -553,12 +553,31 @@ static u32 __add3_S (const u32 a, const u32 b, const u32 c)
#ifdef IS_NV #ifdef IS_NV
static u32 swap32_S (const u32 v) static u32 swap32_S (const u32 v)
{ {
return (as_uint (as_uchar4 (v).s3210)); u32 r;
asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(r) : "r"(v));
return r;
} }
static u64 swap64_S (const u64 v) static u64 swap64_S (const u64 v)
{ {
return (as_ulong (as_uchar8 (v).s76543210)); u32 il;
u32 ir;
asm volatile ("mov.b64 {%0, %1}, %2;" : "=r"(il), "=r"(ir) : "l"(v));
u32 tl;
u32 tr;
asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(tl) : "r"(il));
asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(tr) : "r"(ir));
u64 r;
asm volatile ("mov.b64 %0, {%1, %2};" : "=l"(r) : "r"(tr), "r"(tl));
return r;
} }
static u32 rotr32_S (const u32 a, const u32 n) static u32 rotr32_S (const u32 a, const u32 n)
@ -583,22 +602,167 @@ static u64 rotl64_S (const u64 a, const u32 n)
static u32x swap32 (const u32x v) static u32x swap32 (const u32x v)
{ {
return ((v >> 24) & 0x000000ff) u32x r;
| ((v >> 8) & 0x0000ff00)
| ((v << 8) & 0x00ff0000) #if VECT_SIZE == 1
| ((v << 24) & 0xff000000); asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(r) : "r"(v));
#endif
#if VECT_SIZE >= 2
asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(r.s0) : "r"(v.s0));
asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(r.s1) : "r"(v.s1));
#endif
#if VECT_SIZE >= 4
asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(r.s2) : "r"(v.s2));
asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(r.s3) : "r"(v.s3));
#endif
#if VECT_SIZE >= 8
asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(r.s4) : "r"(v.s4));
asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(r.s5) : "r"(v.s5));
asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(r.s6) : "r"(v.s6));
asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(r.s7) : "r"(v.s7));
#endif
#if VECT_SIZE >= 16
asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(r.s8) : "r"(v.s8));
asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(r.s9) : "r"(v.s9));
asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(r.sa) : "r"(v.sa));
asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(r.sb) : "r"(v.sb));
asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(r.sc) : "r"(v.sc));
asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(r.sd) : "r"(v.sd));
asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(r.se) : "r"(v.se));
asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(r.sf) : "r"(v.sf));
#endif
return r;
} }
static u64x swap64 (const u64x v) static u64x swap64 (const u64x v)
{ {
return ((v >> 56) & 0x00000000000000ff) u32x il;
| ((v >> 40) & 0x000000000000ff00) u32x ir;
| ((v >> 24) & 0x0000000000ff0000)
| ((v >> 8) & 0x00000000ff000000) #if VECT_SIZE == 1
| ((v << 8) & 0x000000ff00000000) asm volatile ("mov.b64 {%0, %1}, %2;" : "=r"(il), "=r"(ir) : "l"(v));
| ((v << 24) & 0x0000ff0000000000) #endif
| ((v << 40) & 0x00ff000000000000)
| ((v << 56) & 0xff00000000000000); #if VECT_SIZE >= 2
asm volatile ("mov.b64 {%0, %1}, %2;" : "=r"(il.s0), "=r"(ir.s0) : "l"(v.s0));
asm volatile ("mov.b64 {%0, %1}, %2;" : "=r"(il.s1), "=r"(ir.s1) : "l"(v.s1));
#endif
#if VECT_SIZE >= 4
asm volatile ("mov.b64 {%0, %1}, %2;" : "=r"(il.s2), "=r"(ir.s2) : "l"(v.s2));
asm volatile ("mov.b64 {%0, %1}, %2;" : "=r"(il.s3), "=r"(ir.s3) : "l"(v.s3));
#endif
#if VECT_SIZE >= 8
asm volatile ("mov.b64 {%0, %1}, %2;" : "=r"(il.s4), "=r"(ir.s4) : "l"(v.s4));
asm volatile ("mov.b64 {%0, %1}, %2;" : "=r"(il.s5), "=r"(ir.s5) : "l"(v.s5));
asm volatile ("mov.b64 {%0, %1}, %2;" : "=r"(il.s6), "=r"(ir.s6) : "l"(v.s6));
asm volatile ("mov.b64 {%0, %1}, %2;" : "=r"(il.s7), "=r"(ir.s7) : "l"(v.s7));
#endif
#if VECT_SIZE >= 16
asm volatile ("mov.b64 {%0, %1}, %2;" : "=r"(il.s8), "=r"(ir.s8) : "l"(v.s8));
asm volatile ("mov.b64 {%0, %1}, %2;" : "=r"(il.s9), "=r"(ir.s9) : "l"(v.s9));
asm volatile ("mov.b64 {%0, %1}, %2;" : "=r"(il.sa), "=r"(ir.sa) : "l"(v.sa));
asm volatile ("mov.b64 {%0, %1}, %2;" : "=r"(il.sb), "=r"(ir.sb) : "l"(v.sb));
asm volatile ("mov.b64 {%0, %1}, %2;" : "=r"(il.sc), "=r"(ir.sc) : "l"(v.sc));
asm volatile ("mov.b64 {%0, %1}, %2;" : "=r"(il.sd), "=r"(ir.sd) : "l"(v.sd));
asm volatile ("mov.b64 {%0, %1}, %2;" : "=r"(il.se), "=r"(ir.se) : "l"(v.se));
asm volatile ("mov.b64 {%0, %1}, %2;" : "=r"(il.sf), "=r"(ir.sf) : "l"(v.sf));
#endif
u32x tl;
u32x tr;
#if VECT_SIZE == 1
asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(tl) : "r"(il));
asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(tr) : "r"(ir));
#endif
#if VECT_SIZE >= 2
asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(tl.s0) : "r"(il.s0));
asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(tr.s0) : "r"(ir.s0));
asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(tl.s1) : "r"(il.s1));
asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(tr.s1) : "r"(ir.s1));
#endif
#if VECT_SIZE >= 4
asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(tl.s2) : "r"(il.s2));
asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(tr.s2) : "r"(ir.s2));
asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(tl.s3) : "r"(il.s3));
asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(tr.s3) : "r"(ir.s3));
#endif
#if VECT_SIZE >= 8
asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(tl.s4) : "r"(il.s4));
asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(tr.s4) : "r"(ir.s4));
asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(tl.s5) : "r"(il.s5));
asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(tr.s5) : "r"(ir.s5));
asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(tl.s6) : "r"(il.s6));
asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(tr.s6) : "r"(ir.s6));
asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(tl.s7) : "r"(il.s7));
asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(tr.s7) : "r"(ir.s7));
#endif
#if VECT_SIZE >= 16
asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(tl.s8) : "r"(il.s8));
asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(tr.s8) : "r"(ir.s8));
asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(tl.s9) : "r"(il.s9));
asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(tr.s9) : "r"(ir.s9));
asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(tl.sa) : "r"(il.sa));
asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(tr.sa) : "r"(ir.sa));
asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(tl.sb) : "r"(il.sb));
asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(tr.sb) : "r"(ir.sb));
asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(tl.sc) : "r"(il.sc));
asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(tr.sc) : "r"(ir.sc));
asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(tl.sd) : "r"(il.sd));
asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(tr.sd) : "r"(ir.sd));
asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(tl.se) : "r"(il.se));
asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(tr.se) : "r"(ir.se));
asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(tl.sf) : "r"(il.sf));
asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(tr.sf) : "r"(ir.sf));
#endif
u64x r;
#if VECT_SIZE == 1
asm volatile ("mov.b64 %0, {%1, %2};" : "=l"(r) : "r"(tr), "r"(tl));
#endif
#if VECT_SIZE >= 2
asm volatile ("mov.b64 %0, {%1, %2};" : "=l"(r.s0) : "r"(tr.s0), "r"(tl.s0));
asm volatile ("mov.b64 %0, {%1, %2};" : "=l"(r.s1) : "r"(tr.s1), "r"(tl.s1));
#endif
#if VECT_SIZE >= 4
asm volatile ("mov.b64 %0, {%1, %2};" : "=l"(r.s2) : "r"(tr.s2), "r"(tl.s2));
asm volatile ("mov.b64 %0, {%1, %2};" : "=l"(r.s3) : "r"(tr.s3), "r"(tl.s3));
#endif
#if VECT_SIZE >= 8
asm volatile ("mov.b64 %0, {%1, %2};" : "=l"(r.s4) : "r"(tr.s4), "r"(tl.s4));
asm volatile ("mov.b64 %0, {%1, %2};" : "=l"(r.s5) : "r"(tr.s5), "r"(tl.s5));
asm volatile ("mov.b64 %0, {%1, %2};" : "=l"(r.s6) : "r"(tr.s6), "r"(tl.s6));
asm volatile ("mov.b64 %0, {%1, %2};" : "=l"(r.s7) : "r"(tr.s7), "r"(tl.s7));
#endif
#if VECT_SIZE >= 16
asm volatile ("mov.b64 %0, {%1, %2};" : "=l"(r.s8) : "r"(tr.s8), "r"(tl.s8));
asm volatile ("mov.b64 %0, {%1, %2};" : "=l"(r.s9) : "r"(tr.s9), "r"(tl.s9));
asm volatile ("mov.b64 %0, {%1, %2};" : "=l"(r.sa) : "r"(tr.sa), "r"(tl.sa));
asm volatile ("mov.b64 %0, {%1, %2};" : "=l"(r.sb) : "r"(tr.sb), "r"(tl.sb));
asm volatile ("mov.b64 %0, {%1, %2};" : "=l"(r.sc) : "r"(tr.sc), "r"(tl.sc));
asm volatile ("mov.b64 %0, {%1, %2};" : "=l"(r.sd) : "r"(tr.sd), "r"(tl.sd));
asm volatile ("mov.b64 %0, {%1, %2};" : "=l"(r.se) : "r"(tr.se), "r"(tl.se));
asm volatile ("mov.b64 %0, {%1, %2};" : "=l"(r.sf) : "r"(tr.sf), "r"(tl.sf));
#endif
return r;
} }
static u32x rotr32 (const u32x a, const u32 n) static u32x rotr32 (const u32x a, const u32 n)
@ -626,35 +790,35 @@ static u32x __byte_perm (const u32x a, const u32x b, const u32x c)
u32x r; u32x r;
#if VECT_SIZE == 1 #if VECT_SIZE == 1
asm ("prmt.b32 %0, %1, %2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(c) ); asm volatile ("prmt.b32 %0, %1, %2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(c) );
#endif #endif
#if VECT_SIZE >= 2 #if VECT_SIZE >= 2
asm ("prmt.b32 %0, %1, %2, %3;" : "=r"(r.s0) : "r"(a.s0), "r"(b.s0), "r"(c.s0)); asm volatile ("prmt.b32 %0, %1, %2, %3;" : "=r"(r.s0) : "r"(a.s0), "r"(b.s0), "r"(c.s0));
asm ("prmt.b32 %0, %1, %2, %3;" : "=r"(r.s1) : "r"(a.s1), "r"(b.s1), "r"(c.s1)); asm volatile ("prmt.b32 %0, %1, %2, %3;" : "=r"(r.s1) : "r"(a.s1), "r"(b.s1), "r"(c.s1));
#endif #endif
#if VECT_SIZE >= 4 #if VECT_SIZE >= 4
asm ("prmt.b32 %0, %1, %2, %3;" : "=r"(r.s2) : "r"(a.s2), "r"(b.s2), "r"(c.s2)); asm volatile ("prmt.b32 %0, %1, %2, %3;" : "=r"(r.s2) : "r"(a.s2), "r"(b.s2), "r"(c.s2));
asm ("prmt.b32 %0, %1, %2, %3;" : "=r"(r.s3) : "r"(a.s3), "r"(b.s3), "r"(c.s3)); asm volatile ("prmt.b32 %0, %1, %2, %3;" : "=r"(r.s3) : "r"(a.s3), "r"(b.s3), "r"(c.s3));
#endif #endif
#if VECT_SIZE >= 8 #if VECT_SIZE >= 8
asm ("prmt.b32 %0, %1, %2, %3;" : "=r"(r.s4) : "r"(a.s4), "r"(b.s4), "r"(c.s4)); asm volatile ("prmt.b32 %0, %1, %2, %3;" : "=r"(r.s4) : "r"(a.s4), "r"(b.s4), "r"(c.s4));
asm ("prmt.b32 %0, %1, %2, %3;" : "=r"(r.s5) : "r"(a.s5), "r"(b.s5), "r"(c.s5)); asm volatile ("prmt.b32 %0, %1, %2, %3;" : "=r"(r.s5) : "r"(a.s5), "r"(b.s5), "r"(c.s5));
asm ("prmt.b32 %0, %1, %2, %3;" : "=r"(r.s6) : "r"(a.s6), "r"(b.s6), "r"(c.s6)); asm volatile ("prmt.b32 %0, %1, %2, %3;" : "=r"(r.s6) : "r"(a.s6), "r"(b.s6), "r"(c.s6));
asm ("prmt.b32 %0, %1, %2, %3;" : "=r"(r.s7) : "r"(a.s7), "r"(b.s7), "r"(c.s7)); asm volatile ("prmt.b32 %0, %1, %2, %3;" : "=r"(r.s7) : "r"(a.s7), "r"(b.s7), "r"(c.s7));
#endif #endif
#if VECT_SIZE >= 16 #if VECT_SIZE >= 16
asm ("prmt.b32 %0, %1, %2, %3;" : "=r"(r.s8) : "r"(a.s8), "r"(b.s8), "r"(c.s8)); asm volatile ("prmt.b32 %0, %1, %2, %3;" : "=r"(r.s8) : "r"(a.s8), "r"(b.s8), "r"(c.s8));
asm ("prmt.b32 %0, %1, %2, %3;" : "=r"(r.s9) : "r"(a.s9), "r"(b.s9), "r"(c.s9)); asm volatile ("prmt.b32 %0, %1, %2, %3;" : "=r"(r.s9) : "r"(a.s9), "r"(b.s9), "r"(c.s9));
asm ("prmt.b32 %0, %1, %2, %3;" : "=r"(r.sa) : "r"(a.sa), "r"(b.sa), "r"(c.sa)); asm volatile ("prmt.b32 %0, %1, %2, %3;" : "=r"(r.sa) : "r"(a.sa), "r"(b.sa), "r"(c.sa));
asm ("prmt.b32 %0, %1, %2, %3;" : "=r"(r.sb) : "r"(a.sb), "r"(b.sb), "r"(c.sb)); asm volatile ("prmt.b32 %0, %1, %2, %3;" : "=r"(r.sb) : "r"(a.sb), "r"(b.sb), "r"(c.sb));
asm ("prmt.b32 %0, %1, %2, %3;" : "=r"(r.sc) : "r"(a.sc), "r"(b.sc), "r"(c.sc)); asm volatile ("prmt.b32 %0, %1, %2, %3;" : "=r"(r.sc) : "r"(a.sc), "r"(b.sc), "r"(c.sc));
asm ("prmt.b32 %0, %1, %2, %3;" : "=r"(r.sd) : "r"(a.sd), "r"(b.sd), "r"(c.sd)); asm volatile ("prmt.b32 %0, %1, %2, %3;" : "=r"(r.sd) : "r"(a.sd), "r"(b.sd), "r"(c.sd));
asm ("prmt.b32 %0, %1, %2, %3;" : "=r"(r.se) : "r"(a.se), "r"(b.se), "r"(c.se)); asm volatile ("prmt.b32 %0, %1, %2, %3;" : "=r"(r.se) : "r"(a.se), "r"(b.se), "r"(c.se));
asm ("prmt.b32 %0, %1, %2, %3;" : "=r"(r.sf) : "r"(a.sf), "r"(b.sf), "r"(c.sf)); asm volatile ("prmt.b32 %0, %1, %2, %3;" : "=r"(r.sf) : "r"(a.sf), "r"(b.sf), "r"(c.sf));
#endif #endif
return r; return r;
@ -664,7 +828,7 @@ static u32 __byte_perm_S (const u32 a, const u32 b, const u32 c)
{ {
u32 r; u32 r;
asm ("prmt.b32 %0, %1, %2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(c)); asm volatile ("prmt.b32 %0, %1, %2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(c));
return r; return r;
} }
@ -674,35 +838,35 @@ static u32x __bfe (const u32x a, const u32x b, const u32x c)
u32x r; u32x r;
#if VECT_SIZE == 1 #if VECT_SIZE == 1
asm ("bfe.u32 %0, %1, %2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(c)); asm volatile ("bfe.u32 %0, %1, %2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(c));
#endif #endif
#if VECT_SIZE >= 2 #if VECT_SIZE >= 2
asm ("bfe.u32 %0, %1, %2, %3;" : "=r"(r.s0) : "r"(a.s0), "r"(b.s0), "r"(c.s0)); asm volatile ("bfe.u32 %0, %1, %2, %3;" : "=r"(r.s0) : "r"(a.s0), "r"(b.s0), "r"(c.s0));
asm ("bfe.u32 %0, %1, %2, %3;" : "=r"(r.s1) : "r"(a.s1), "r"(b.s1), "r"(c.s1)); asm volatile ("bfe.u32 %0, %1, %2, %3;" : "=r"(r.s1) : "r"(a.s1), "r"(b.s1), "r"(c.s1));
#endif #endif
#if VECT_SIZE >= 4 #if VECT_SIZE >= 4
asm ("bfe.u32 %0, %1, %2, %3;" : "=r"(r.s2) : "r"(a.s2), "r"(b.s2), "r"(c.s2)); asm volatile ("bfe.u32 %0, %1, %2, %3;" : "=r"(r.s2) : "r"(a.s2), "r"(b.s2), "r"(c.s2));
asm ("bfe.u32 %0, %1, %2, %3;" : "=r"(r.s3) : "r"(a.s3), "r"(b.s3), "r"(c.s3)); asm volatile ("bfe.u32 %0, %1, %2, %3;" : "=r"(r.s3) : "r"(a.s3), "r"(b.s3), "r"(c.s3));
#endif #endif
#if VECT_SIZE >= 8 #if VECT_SIZE >= 8
asm ("bfe.u32 %0, %1, %2, %3;" : "=r"(r.s4) : "r"(a.s4), "r"(b.s4), "r"(c.s4)); asm volatile ("bfe.u32 %0, %1, %2, %3;" : "=r"(r.s4) : "r"(a.s4), "r"(b.s4), "r"(c.s4));
asm ("bfe.u32 %0, %1, %2, %3;" : "=r"(r.s5) : "r"(a.s5), "r"(b.s5), "r"(c.s5)); asm volatile ("bfe.u32 %0, %1, %2, %3;" : "=r"(r.s5) : "r"(a.s5), "r"(b.s5), "r"(c.s5));
asm ("bfe.u32 %0, %1, %2, %3;" : "=r"(r.s6) : "r"(a.s6), "r"(b.s6), "r"(c.s6)); asm volatile ("bfe.u32 %0, %1, %2, %3;" : "=r"(r.s6) : "r"(a.s6), "r"(b.s6), "r"(c.s6));
asm ("bfe.u32 %0, %1, %2, %3;" : "=r"(r.s7) : "r"(a.s7), "r"(b.s7), "r"(c.s7)); asm volatile ("bfe.u32 %0, %1, %2, %3;" : "=r"(r.s7) : "r"(a.s7), "r"(b.s7), "r"(c.s7));
#endif #endif
#if VECT_SIZE >= 16 #if VECT_SIZE >= 16
asm ("bfe.u32 %0, %1, %2, %3;" : "=r"(r.s8) : "r"(a.s8), "r"(b.s8), "r"(c.s8)); asm volatile ("bfe.u32 %0, %1, %2, %3;" : "=r"(r.s8) : "r"(a.s8), "r"(b.s8), "r"(c.s8));
asm ("bfe.u32 %0, %1, %2, %3;" : "=r"(r.s9) : "r"(a.s9), "r"(b.s9), "r"(c.s9)); asm volatile ("bfe.u32 %0, %1, %2, %3;" : "=r"(r.s9) : "r"(a.s9), "r"(b.s9), "r"(c.s9));
asm ("bfe.u32 %0, %1, %2, %3;" : "=r"(r.sa) : "r"(a.sa), "r"(b.sa), "r"(c.sa)); asm volatile ("bfe.u32 %0, %1, %2, %3;" : "=r"(r.sa) : "r"(a.sa), "r"(b.sa), "r"(c.sa));
asm ("bfe.u32 %0, %1, %2, %3;" : "=r"(r.sb) : "r"(a.sb), "r"(b.sb), "r"(c.sb)); asm volatile ("bfe.u32 %0, %1, %2, %3;" : "=r"(r.sb) : "r"(a.sb), "r"(b.sb), "r"(c.sb));
asm ("bfe.u32 %0, %1, %2, %3;" : "=r"(r.sc) : "r"(a.sc), "r"(b.sc), "r"(c.sc)); asm volatile ("bfe.u32 %0, %1, %2, %3;" : "=r"(r.sc) : "r"(a.sc), "r"(b.sc), "r"(c.sc));
asm ("bfe.u32 %0, %1, %2, %3;" : "=r"(r.sd) : "r"(a.sd), "r"(b.sd), "r"(c.sd)); asm volatile ("bfe.u32 %0, %1, %2, %3;" : "=r"(r.sd) : "r"(a.sd), "r"(b.sd), "r"(c.sd));
asm ("bfe.u32 %0, %1, %2, %3;" : "=r"(r.se) : "r"(a.se), "r"(b.se), "r"(c.se)); asm volatile ("bfe.u32 %0, %1, %2, %3;" : "=r"(r.se) : "r"(a.se), "r"(b.se), "r"(c.se));
asm ("bfe.u32 %0, %1, %2, %3;" : "=r"(r.sf) : "r"(a.sf), "r"(b.sf), "r"(c.sf)); asm volatile ("bfe.u32 %0, %1, %2, %3;" : "=r"(r.sf) : "r"(a.sf), "r"(b.sf), "r"(c.sf));
#endif #endif
return r; return r;
@ -712,7 +876,7 @@ static u32 __bfe_S (const u32 a, const u32 b, const u32 c)
{ {
u32 r; u32 r;
asm ("bfe.u32 %0, %1, %2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(c)); asm volatile ("bfe.u32 %0, %1, %2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(c));
return r; return r;
} }
@ -724,35 +888,35 @@ static u32x amd_bytealign (const u32x a, const u32x b, const u32x c)
#if CUDA_ARCH >= 350 #if CUDA_ARCH >= 350
#if VECT_SIZE == 1 #if VECT_SIZE == 1
asm ("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(r) : "r"(b), "r"(a), "r"((c & 3) * 8)); asm volatile ("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(r) : "r"(b), "r"(a), "r"((c & 3) * 8));
#endif #endif
#if VECT_SIZE >= 2 #if VECT_SIZE >= 2
asm ("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(r.s0) : "r"(b.s0), "r"(a.s0), "r"((c.s0 & 3) * 8)); asm volatile ("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(r.s0) : "r"(b.s0), "r"(a.s0), "r"((c.s0 & 3) * 8));
asm ("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(r.s1) : "r"(b.s1), "r"(a.s1), "r"((c.s1 & 3) * 8)); asm volatile ("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(r.s1) : "r"(b.s1), "r"(a.s1), "r"((c.s1 & 3) * 8));
#endif #endif
#if VECT_SIZE >= 4 #if VECT_SIZE >= 4
asm ("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(r.s2) : "r"(b.s2), "r"(a.s2), "r"((c.s2 & 3) * 8)); asm volatile ("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(r.s2) : "r"(b.s2), "r"(a.s2), "r"((c.s2 & 3) * 8));
asm ("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(r.s3) : "r"(b.s3), "r"(a.s3), "r"((c.s3 & 3) * 8)); asm volatile ("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(r.s3) : "r"(b.s3), "r"(a.s3), "r"((c.s3 & 3) * 8));
#endif #endif
#if VECT_SIZE >= 8 #if VECT_SIZE >= 8
asm ("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(r.s4) : "r"(b.s4), "r"(a.s4), "r"((c.s4 & 3) * 8)); asm volatile ("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(r.s4) : "r"(b.s4), "r"(a.s4), "r"((c.s4 & 3) * 8));
asm ("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(r.s5) : "r"(b.s5), "r"(a.s5), "r"((c.s5 & 3) * 8)); asm volatile ("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(r.s5) : "r"(b.s5), "r"(a.s5), "r"((c.s5 & 3) * 8));
asm ("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(r.s6) : "r"(b.s6), "r"(a.s6), "r"((c.s6 & 3) * 8)); asm volatile ("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(r.s6) : "r"(b.s6), "r"(a.s6), "r"((c.s6 & 3) * 8));
asm ("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(r.s7) : "r"(b.s7), "r"(a.s7), "r"((c.s7 & 3) * 8)); asm volatile ("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(r.s7) : "r"(b.s7), "r"(a.s7), "r"((c.s7 & 3) * 8));
#endif #endif
#if VECT_SIZE >= 16 #if VECT_SIZE >= 16
asm ("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(r.s8) : "r"(b.s8), "r"(a.s8), "r"((c.s8 & 3) * 8)); asm volatile ("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(r.s8) : "r"(b.s8), "r"(a.s8), "r"((c.s8 & 3) * 8));
asm ("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(r.s9) : "r"(b.s9), "r"(a.s9), "r"((c.s9 & 3) * 8)); asm volatile ("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(r.s9) : "r"(b.s9), "r"(a.s9), "r"((c.s9 & 3) * 8));
asm ("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(r.sa) : "r"(b.sa), "r"(a.sa), "r"((c.sa & 3) * 8)); asm volatile ("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(r.sa) : "r"(b.sa), "r"(a.sa), "r"((c.sa & 3) * 8));
asm ("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(r.sb) : "r"(b.sb), "r"(a.sb), "r"((c.sb & 3) * 8)); asm volatile ("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(r.sb) : "r"(b.sb), "r"(a.sb), "r"((c.sb & 3) * 8));
asm ("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(r.sc) : "r"(b.sc), "r"(a.sc), "r"((c.sc & 3) * 8)); asm volatile ("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(r.sc) : "r"(b.sc), "r"(a.sc), "r"((c.sc & 3) * 8));
asm ("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(r.sd) : "r"(b.sd), "r"(a.sd), "r"((c.sd & 3) * 8)); asm volatile ("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(r.sd) : "r"(b.sd), "r"(a.sd), "r"((c.sd & 3) * 8));
asm ("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(r.se) : "r"(b.se), "r"(a.se), "r"((c.se & 3) * 8)); asm volatile ("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(r.se) : "r"(b.se), "r"(a.se), "r"((c.se & 3) * 8));
asm ("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(r.sf) : "r"(b.sf), "r"(a.sf), "r"((c.sf & 3) * 8)); asm volatile ("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(r.sf) : "r"(b.sf), "r"(a.sf), "r"((c.sf & 3) * 8));
#endif #endif
#else #else
@ -770,7 +934,7 @@ static u32 amd_bytealign_S (const u32 a, const u32 b, const u32 c)
#if CUDA_ARCH >= 350 #if CUDA_ARCH >= 350
asm ("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(r) : "r"(b), "r"(a), "r"((c & 3) * 8)); asm volatile ("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(r) : "r"(b), "r"(a), "r"((c & 3) * 8));
#else #else

View File

@ -114,15 +114,13 @@ __kernel void m05300_m04 (__global pw_t *pws, __constant const kernel_rule_t *ru
* s_msg * s_msg
*/ */
__local u32 w_s[16]; __local u32 s_nr_buf[16];
for (u32 i = lid; i < 16; i += lsz) for (u32 i = lid; i < 16; i += lsz)
{ {
w_s[i] = ikepsk_bufs[digests_offset].nr_buf[i]; s_nr_buf[i] = ikepsk_bufs[digests_offset].nr_buf[i];
} }
barrier (CLK_LOCAL_MEM_FENCE);
__local u32 s_msg_buf[128]; __local u32 s_msg_buf[128];
for (u32 i = lid; i < 128; i += lsz) for (u32 i = lid; i < 128; i += lsz)
@ -181,20 +179,20 @@ __kernel void m05300_m04 (__global pw_t *pws, __constant const kernel_rule_t *ru
hmac_md5_pad (w0, w1, w2, w3, ipad, opad); hmac_md5_pad (w0, w1, w2, w3, ipad, opad);
w0[0] = w_s[ 0]; w0[0] = s_nr_buf[ 0];
w0[1] = w_s[ 1]; w0[1] = s_nr_buf[ 1];
w0[2] = w_s[ 2]; w0[2] = s_nr_buf[ 2];
w0[3] = w_s[ 3]; w0[3] = s_nr_buf[ 3];
w1[0] = w_s[ 4]; w1[0] = s_nr_buf[ 4];
w1[1] = w_s[ 5]; w1[1] = s_nr_buf[ 5];
w1[2] = w_s[ 6]; w1[2] = s_nr_buf[ 6];
w1[3] = w_s[ 7]; w1[3] = s_nr_buf[ 7];
w2[0] = w_s[ 8]; w2[0] = s_nr_buf[ 8];
w2[1] = w_s[ 9]; w2[1] = s_nr_buf[ 9];
w2[2] = w_s[10]; w2[2] = s_nr_buf[10];
w2[3] = w_s[11]; w2[3] = s_nr_buf[11];
w3[0] = w_s[12]; w3[0] = s_nr_buf[12];
w3[1] = w_s[13]; w3[1] = s_nr_buf[13];
w3[2] = (64 + nr_len) * 8; w3[2] = (64 + nr_len) * 8;
w3[3] = 0; w3[3] = 0;
@ -291,15 +289,13 @@ __kernel void m05300_s04 (__global pw_t *pws, __constant const kernel_rule_t *ru
* s_msg * s_msg
*/ */
__local u32 w_s[16]; __local u32 s_nr_buf[16];
for (u32 i = lid; i < 16; i += lsz) for (u32 i = lid; i < 16; i += lsz)
{ {
w_s[i] = ikepsk_bufs[digests_offset].nr_buf[i]; s_nr_buf[i] = ikepsk_bufs[digests_offset].nr_buf[i];
} }
barrier (CLK_LOCAL_MEM_FENCE);
__local u32 s_msg_buf[128]; __local u32 s_msg_buf[128];
for (u32 i = lid; i < 128; i += lsz) for (u32 i = lid; i < 128; i += lsz)
@ -370,20 +366,20 @@ __kernel void m05300_s04 (__global pw_t *pws, __constant const kernel_rule_t *ru
hmac_md5_pad (w0, w1, w2, w3, ipad, opad); hmac_md5_pad (w0, w1, w2, w3, ipad, opad);
w0[0] = w_s[ 0]; w0[0] = s_nr_buf[ 0];
w0[1] = w_s[ 1]; w0[1] = s_nr_buf[ 1];
w0[2] = w_s[ 2]; w0[2] = s_nr_buf[ 2];
w0[3] = w_s[ 3]; w0[3] = s_nr_buf[ 3];
w1[0] = w_s[ 4]; w1[0] = s_nr_buf[ 4];
w1[1] = w_s[ 5]; w1[1] = s_nr_buf[ 5];
w1[2] = w_s[ 6]; w1[2] = s_nr_buf[ 6];
w1[3] = w_s[ 7]; w1[3] = s_nr_buf[ 7];
w2[0] = w_s[ 8]; w2[0] = s_nr_buf[ 8];
w2[1] = w_s[ 9]; w2[1] = s_nr_buf[ 9];
w2[2] = w_s[10]; w2[2] = s_nr_buf[10];
w2[3] = w_s[11]; w2[3] = s_nr_buf[11];
w3[0] = w_s[12]; w3[0] = s_nr_buf[12];
w3[1] = w_s[13]; w3[1] = s_nr_buf[13];
w3[2] = (64 + nr_len) * 8; w3[2] = (64 + nr_len) * 8;
w3[3] = 0; w3[3] = 0;

View File

@ -112,15 +112,13 @@ __kernel void m05300_m04 (__global pw_t *pws, __global const kernel_rule_t *rule
* s_msg * s_msg
*/ */
__local u32 w_s[16]; __local u32 s_nr_buf[16];
for (u32 i = lid; i < 16; i += lsz) for (u32 i = lid; i < 16; i += lsz)
{ {
w_s[i] = ikepsk_bufs[digests_offset].nr_buf[i]; s_nr_buf[i] = ikepsk_bufs[digests_offset].nr_buf[i];
} }
barrier (CLK_LOCAL_MEM_FENCE);
__local u32 s_msg_buf[128]; __local u32 s_msg_buf[128];
for (u32 i = lid; i < 128; i += lsz) for (u32 i = lid; i < 128; i += lsz)
@ -239,20 +237,20 @@ __kernel void m05300_m04 (__global pw_t *pws, __global const kernel_rule_t *rule
hmac_md5_pad (w0, w1, w2, w3, ipad, opad); hmac_md5_pad (w0, w1, w2, w3, ipad, opad);
w0[0] = w_s[ 0]; w0[0] = s_nr_buf[ 0];
w0[1] = w_s[ 1]; w0[1] = s_nr_buf[ 1];
w0[2] = w_s[ 2]; w0[2] = s_nr_buf[ 2];
w0[3] = w_s[ 3]; w0[3] = s_nr_buf[ 3];
w1[0] = w_s[ 4]; w1[0] = s_nr_buf[ 4];
w1[1] = w_s[ 5]; w1[1] = s_nr_buf[ 5];
w1[2] = w_s[ 6]; w1[2] = s_nr_buf[ 6];
w1[3] = w_s[ 7]; w1[3] = s_nr_buf[ 7];
w2[0] = w_s[ 8]; w2[0] = s_nr_buf[ 8];
w2[1] = w_s[ 9]; w2[1] = s_nr_buf[ 9];
w2[2] = w_s[10]; w2[2] = s_nr_buf[10];
w2[3] = w_s[11]; w2[3] = s_nr_buf[11];
w3[0] = w_s[12]; w3[0] = s_nr_buf[12];
w3[1] = w_s[13]; w3[1] = s_nr_buf[13];
w3[2] = (64 + nr_len) * 8; w3[2] = (64 + nr_len) * 8;
w3[3] = 0; w3[3] = 0;
@ -349,15 +347,13 @@ __kernel void m05300_s04 (__global pw_t *pws, __global const kernel_rule_t *rule
* s_msg * s_msg
*/ */
__local u32 w_s[16]; __local u32 s_nr_buf[16];
for (u32 i = lid; i < 16; i += lsz) for (u32 i = lid; i < 16; i += lsz)
{ {
w_s[i] = ikepsk_bufs[digests_offset].nr_buf[i]; s_nr_buf[i] = ikepsk_bufs[digests_offset].nr_buf[i];
} }
barrier (CLK_LOCAL_MEM_FENCE);
__local u32 s_msg_buf[128]; __local u32 s_msg_buf[128];
for (u32 i = lid; i < 128; i += lsz) for (u32 i = lid; i < 128; i += lsz)
@ -488,20 +484,20 @@ __kernel void m05300_s04 (__global pw_t *pws, __global const kernel_rule_t *rule
hmac_md5_pad (w0, w1, w2, w3, ipad, opad); hmac_md5_pad (w0, w1, w2, w3, ipad, opad);
w0[0] = w_s[ 0]; w0[0] = s_nr_buf[ 0];
w0[1] = w_s[ 1]; w0[1] = s_nr_buf[ 1];
w0[2] = w_s[ 2]; w0[2] = s_nr_buf[ 2];
w0[3] = w_s[ 3]; w0[3] = s_nr_buf[ 3];
w1[0] = w_s[ 4]; w1[0] = s_nr_buf[ 4];
w1[1] = w_s[ 5]; w1[1] = s_nr_buf[ 5];
w1[2] = w_s[ 6]; w1[2] = s_nr_buf[ 6];
w1[3] = w_s[ 7]; w1[3] = s_nr_buf[ 7];
w2[0] = w_s[ 8]; w2[0] = s_nr_buf[ 8];
w2[1] = w_s[ 9]; w2[1] = s_nr_buf[ 9];
w2[2] = w_s[10]; w2[2] = s_nr_buf[10];
w2[3] = w_s[11]; w2[3] = s_nr_buf[11];
w3[0] = w_s[12]; w3[0] = s_nr_buf[12];
w3[1] = w_s[13]; w3[1] = s_nr_buf[13];
w3[2] = (64 + nr_len) * 8; w3[2] = (64 + nr_len) * 8;
w3[3] = 0; w3[3] = 0;

View File

@ -98,7 +98,7 @@ void hmac_md5_run (u32x w0[4], u32x w1[4], u32x w2[4], u32x w3[4], u32x ipad[4],
md5_transform_vector (w0, w1, w2, w3, digest); md5_transform_vector (w0, w1, w2, w3, digest);
} }
void m05300m (u32 w0[4], u32 w1[4], u32 w2[4], u32 w3[4], const u32 pw_len, __global pw_t *pws, __global const kernel_rule_t *rules_buf, __global const pw_t *combs_buf, __global const bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global const u32 *bitmaps_buf_s1_a, __global const u32 *bitmaps_buf_s1_b, __global const u32 *bitmaps_buf_s1_c, __global const u32 *bitmaps_buf_s1_d, __global const u32 *bitmaps_buf_s2_a, __global const u32 *bitmaps_buf_s2_b, __global const u32 *bitmaps_buf_s2_c, __global const u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global const digest_t *digests_buf, __global u32 *hashes_shown, __global const salt_t *salt_bufs, __global ikepsk_t *ikepsk_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV0_buf, __global u32 *d_scryptV1_buf, __global u32 *d_scryptV2_buf, __global u32 *d_scryptV3_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 il_cnt, const u32 digests_cnt, const u32 digests_offset, __local u32 *s_msg_buf) void m05300m (u32 w0[4], u32 w1[4], u32 w2[4], u32 w3[4], const u32 pw_len, __global pw_t *pws, __global const kernel_rule_t *rules_buf, __global const pw_t *combs_buf, __global const bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global const u32 *bitmaps_buf_s1_a, __global const u32 *bitmaps_buf_s1_b, __global const u32 *bitmaps_buf_s1_c, __global const u32 *bitmaps_buf_s1_d, __global const u32 *bitmaps_buf_s2_a, __global const u32 *bitmaps_buf_s2_b, __global const u32 *bitmaps_buf_s2_c, __global const u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global const digest_t *digests_buf, __global u32 *hashes_shown, __global const salt_t *salt_bufs, __global ikepsk_t *ikepsk_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV0_buf, __global u32 *d_scryptV1_buf, __global u32 *d_scryptV2_buf, __global u32 *d_scryptV3_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 il_cnt, const u32 digests_cnt, const u32 digests_offset, __local u32 *s_msg_buf, __local u32 *s_nr_buf)
{ {
/** /**
* modifier * modifier
@ -157,20 +157,20 @@ void m05300m (u32 w0[4], u32 w1[4], u32 w2[4], u32 w3[4], const u32 pw_len, __gl
hmac_md5_pad (w0_t, w1_t, w2_t, w3_t, ipad, opad); hmac_md5_pad (w0_t, w1_t, w2_t, w3_t, ipad, opad);
w0_t[0] = ikepsk_bufs[digests_offset].nr_buf[ 0]; w0_t[0] = s_nr_buf[ 0];
w0_t[1] = ikepsk_bufs[digests_offset].nr_buf[ 1]; w0_t[1] = s_nr_buf[ 1];
w0_t[2] = ikepsk_bufs[digests_offset].nr_buf[ 2]; w0_t[2] = s_nr_buf[ 2];
w0_t[3] = ikepsk_bufs[digests_offset].nr_buf[ 3]; w0_t[3] = s_nr_buf[ 3];
w1_t[0] = ikepsk_bufs[digests_offset].nr_buf[ 4]; w1_t[0] = s_nr_buf[ 4];
w1_t[1] = ikepsk_bufs[digests_offset].nr_buf[ 5]; w1_t[1] = s_nr_buf[ 5];
w1_t[2] = ikepsk_bufs[digests_offset].nr_buf[ 6]; w1_t[2] = s_nr_buf[ 6];
w1_t[3] = ikepsk_bufs[digests_offset].nr_buf[ 7]; w1_t[3] = s_nr_buf[ 7];
w2_t[0] = ikepsk_bufs[digests_offset].nr_buf[ 8]; w2_t[0] = s_nr_buf[ 8];
w2_t[1] = ikepsk_bufs[digests_offset].nr_buf[ 9]; w2_t[1] = s_nr_buf[ 9];
w2_t[2] = ikepsk_bufs[digests_offset].nr_buf[10]; w2_t[2] = s_nr_buf[10];
w2_t[3] = ikepsk_bufs[digests_offset].nr_buf[11]; w2_t[3] = s_nr_buf[11];
w3_t[0] = ikepsk_bufs[digests_offset].nr_buf[12]; w3_t[0] = s_nr_buf[12];
w3_t[1] = ikepsk_bufs[digests_offset].nr_buf[13]; w3_t[1] = s_nr_buf[13];
w3_t[2] = (64 + nr_len) * 8; w3_t[2] = (64 + nr_len) * 8;
w3_t[3] = 0; w3_t[3] = 0;
@ -245,7 +245,7 @@ void m05300m (u32 w0[4], u32 w1[4], u32 w2[4], u32 w3[4], const u32 pw_len, __gl
} }
} }
void m05300s (u32 w0[4], u32 w1[4], u32 w2[4], u32 w3[4], const u32 pw_len, __global pw_t *pws, __global const kernel_rule_t *rules_buf, __global const pw_t *combs_buf, __global const bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global const u32 *bitmaps_buf_s1_a, __global const u32 *bitmaps_buf_s1_b, __global const u32 *bitmaps_buf_s1_c, __global const u32 *bitmaps_buf_s1_d, __global const u32 *bitmaps_buf_s2_a, __global const u32 *bitmaps_buf_s2_b, __global const u32 *bitmaps_buf_s2_c, __global const u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global const digest_t *digests_buf, __global u32 *hashes_shown, __global const salt_t *salt_bufs, __global ikepsk_t *ikepsk_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV0_buf, __global u32 *d_scryptV1_buf, __global u32 *d_scryptV2_buf, __global u32 *d_scryptV3_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 il_cnt, const u32 digests_cnt, const u32 digests_offset, __local u32 *s_msg_buf) void m05300s (u32 w0[4], u32 w1[4], u32 w2[4], u32 w3[4], const u32 pw_len, __global pw_t *pws, __global const kernel_rule_t *rules_buf, __global const pw_t *combs_buf, __global const bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global const u32 *bitmaps_buf_s1_a, __global const u32 *bitmaps_buf_s1_b, __global const u32 *bitmaps_buf_s1_c, __global const u32 *bitmaps_buf_s1_d, __global const u32 *bitmaps_buf_s2_a, __global const u32 *bitmaps_buf_s2_b, __global const u32 *bitmaps_buf_s2_c, __global const u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global const digest_t *digests_buf, __global u32 *hashes_shown, __global const salt_t *salt_bufs, __global ikepsk_t *ikepsk_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV0_buf, __global u32 *d_scryptV1_buf, __global u32 *d_scryptV2_buf, __global u32 *d_scryptV3_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 il_cnt, const u32 digests_cnt, const u32 digests_offset, __local u32 *s_msg_buf, __local u32 *s_nr_buf)
{ {
/** /**
* modifier * modifier
@ -316,20 +316,20 @@ void m05300s (u32 w0[4], u32 w1[4], u32 w2[4], u32 w3[4], const u32 pw_len, __gl
hmac_md5_pad (w0_t, w1_t, w2_t, w3_t, ipad, opad); hmac_md5_pad (w0_t, w1_t, w2_t, w3_t, ipad, opad);
w0_t[0] = ikepsk_bufs[digests_offset].nr_buf[ 0]; w0_t[0] = s_nr_buf[ 0];
w0_t[1] = ikepsk_bufs[digests_offset].nr_buf[ 1]; w0_t[1] = s_nr_buf[ 1];
w0_t[2] = ikepsk_bufs[digests_offset].nr_buf[ 2]; w0_t[2] = s_nr_buf[ 2];
w0_t[3] = ikepsk_bufs[digests_offset].nr_buf[ 3]; w0_t[3] = s_nr_buf[ 3];
w1_t[0] = ikepsk_bufs[digests_offset].nr_buf[ 4]; w1_t[0] = s_nr_buf[ 4];
w1_t[1] = ikepsk_bufs[digests_offset].nr_buf[ 5]; w1_t[1] = s_nr_buf[ 5];
w1_t[2] = ikepsk_bufs[digests_offset].nr_buf[ 6]; w1_t[2] = s_nr_buf[ 6];
w1_t[3] = ikepsk_bufs[digests_offset].nr_buf[ 7]; w1_t[3] = s_nr_buf[ 7];
w2_t[0] = ikepsk_bufs[digests_offset].nr_buf[ 8]; w2_t[0] = s_nr_buf[ 8];
w2_t[1] = ikepsk_bufs[digests_offset].nr_buf[ 9]; w2_t[1] = s_nr_buf[ 9];
w2_t[2] = ikepsk_bufs[digests_offset].nr_buf[10]; w2_t[2] = s_nr_buf[10];
w2_t[3] = ikepsk_bufs[digests_offset].nr_buf[11]; w2_t[3] = s_nr_buf[11];
w3_t[0] = ikepsk_bufs[digests_offset].nr_buf[12]; w3_t[0] = s_nr_buf[12];
w3_t[1] = ikepsk_bufs[digests_offset].nr_buf[13]; w3_t[1] = s_nr_buf[13];
w3_t[2] = (64 + nr_len) * 8; w3_t[2] = (64 + nr_len) * 8;
w3_t[3] = 0; w3_t[3] = 0;
@ -418,6 +418,13 @@ __kernel void m05300_m04 (__global pw_t *pws, __global const kernel_rule_t *rule
* s_msg * s_msg
*/ */
__local u32 s_nr_buf[16];
for (u32 i = lid; i < 16; i += lsz)
{
s_nr_buf[i] = ikepsk_bufs[digests_offset].nr_buf[i];
}
__local u32 s_msg_buf[128]; __local u32 s_msg_buf[128];
for (u32 i = lid; i < 128; i += lsz) for (u32 i = lid; i < 128; i += lsz)
@ -467,7 +474,7 @@ __kernel void m05300_m04 (__global pw_t *pws, __global const kernel_rule_t *rule
* main * main
*/ */
m05300m (w0, w1, w2, w3, pw_len, pws, rules_buf, combs_buf, bfs_buf, tmps, hooks, bitmaps_buf_s1_a, bitmaps_buf_s1_b, bitmaps_buf_s1_c, bitmaps_buf_s1_d, bitmaps_buf_s2_a, bitmaps_buf_s2_b, bitmaps_buf_s2_c, bitmaps_buf_s2_d, plains_buf, digests_buf, hashes_shown, salt_bufs, ikepsk_bufs, d_return_buf, d_scryptV0_buf, d_scryptV1_buf, d_scryptV2_buf, d_scryptV3_buf, bitmap_mask, bitmap_shift1, bitmap_shift2, salt_pos, loop_pos, loop_cnt, il_cnt, digests_cnt, digests_offset, s_msg_buf); m05300m (w0, w1, w2, w3, pw_len, pws, rules_buf, combs_buf, bfs_buf, tmps, hooks, bitmaps_buf_s1_a, bitmaps_buf_s1_b, bitmaps_buf_s1_c, bitmaps_buf_s1_d, bitmaps_buf_s2_a, bitmaps_buf_s2_b, bitmaps_buf_s2_c, bitmaps_buf_s2_d, plains_buf, digests_buf, hashes_shown, salt_bufs, ikepsk_bufs, d_return_buf, d_scryptV0_buf, d_scryptV1_buf, d_scryptV2_buf, d_scryptV3_buf, bitmap_mask, bitmap_shift1, bitmap_shift2, salt_pos, loop_pos, loop_cnt, il_cnt, digests_cnt, digests_offset, s_msg_buf, s_nr_buf);
} }
__kernel void m05300_m08 (__global pw_t *pws, __global const kernel_rule_t *rules_buf, __global const pw_t *combs_buf, __global const bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global const u32 *bitmaps_buf_s1_a, __global const u32 *bitmaps_buf_s1_b, __global const u32 *bitmaps_buf_s1_c, __global const u32 *bitmaps_buf_s1_d, __global const u32 *bitmaps_buf_s2_a, __global const u32 *bitmaps_buf_s2_b, __global const u32 *bitmaps_buf_s2_c, __global const u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global const digest_t *digests_buf, __global u32 *hashes_shown, __global const salt_t *salt_bufs, __global ikepsk_t *ikepsk_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV0_buf, __global u32 *d_scryptV1_buf, __global u32 *d_scryptV2_buf, __global u32 *d_scryptV3_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 il_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u64 gid_max) __kernel void m05300_m08 (__global pw_t *pws, __global const kernel_rule_t *rules_buf, __global const pw_t *combs_buf, __global const bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global const u32 *bitmaps_buf_s1_a, __global const u32 *bitmaps_buf_s1_b, __global const u32 *bitmaps_buf_s1_c, __global const u32 *bitmaps_buf_s1_d, __global const u32 *bitmaps_buf_s2_a, __global const u32 *bitmaps_buf_s2_b, __global const u32 *bitmaps_buf_s2_c, __global const u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global const digest_t *digests_buf, __global u32 *hashes_shown, __global const salt_t *salt_bufs, __global ikepsk_t *ikepsk_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV0_buf, __global u32 *d_scryptV1_buf, __global u32 *d_scryptV2_buf, __global u32 *d_scryptV3_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 il_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u64 gid_max)
@ -484,6 +491,13 @@ __kernel void m05300_m08 (__global pw_t *pws, __global const kernel_rule_t *rule
* s_msg * s_msg
*/ */
__local u32 s_nr_buf[16];
for (u32 i = lid; i < 16; i += lsz)
{
s_nr_buf[i] = ikepsk_bufs[digests_offset].nr_buf[i];
}
__local u32 s_msg_buf[128]; __local u32 s_msg_buf[128];
for (u32 i = lid; i < 128; i += lsz) for (u32 i = lid; i < 128; i += lsz)
@ -533,7 +547,7 @@ __kernel void m05300_m08 (__global pw_t *pws, __global const kernel_rule_t *rule
* main * main
*/ */
m05300m (w0, w1, w2, w3, pw_len, pws, rules_buf, combs_buf, bfs_buf, tmps, hooks, bitmaps_buf_s1_a, bitmaps_buf_s1_b, bitmaps_buf_s1_c, bitmaps_buf_s1_d, bitmaps_buf_s2_a, bitmaps_buf_s2_b, bitmaps_buf_s2_c, bitmaps_buf_s2_d, plains_buf, digests_buf, hashes_shown, salt_bufs, ikepsk_bufs, d_return_buf, d_scryptV0_buf, d_scryptV1_buf, d_scryptV2_buf, d_scryptV3_buf, bitmap_mask, bitmap_shift1, bitmap_shift2, salt_pos, loop_pos, loop_cnt, il_cnt, digests_cnt, digests_offset, s_msg_buf); m05300m (w0, w1, w2, w3, pw_len, pws, rules_buf, combs_buf, bfs_buf, tmps, hooks, bitmaps_buf_s1_a, bitmaps_buf_s1_b, bitmaps_buf_s1_c, bitmaps_buf_s1_d, bitmaps_buf_s2_a, bitmaps_buf_s2_b, bitmaps_buf_s2_c, bitmaps_buf_s2_d, plains_buf, digests_buf, hashes_shown, salt_bufs, ikepsk_bufs, d_return_buf, d_scryptV0_buf, d_scryptV1_buf, d_scryptV2_buf, d_scryptV3_buf, bitmap_mask, bitmap_shift1, bitmap_shift2, salt_pos, loop_pos, loop_cnt, il_cnt, digests_cnt, digests_offset, s_msg_buf, s_nr_buf);
} }
__kernel void m05300_m16 (__global pw_t *pws, __global const kernel_rule_t *rules_buf, __global const pw_t *combs_buf, __global const bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global const u32 *bitmaps_buf_s1_a, __global const u32 *bitmaps_buf_s1_b, __global const u32 *bitmaps_buf_s1_c, __global const u32 *bitmaps_buf_s1_d, __global const u32 *bitmaps_buf_s2_a, __global const u32 *bitmaps_buf_s2_b, __global const u32 *bitmaps_buf_s2_c, __global const u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global const digest_t *digests_buf, __global u32 *hashes_shown, __global const salt_t *salt_bufs, __global ikepsk_t *ikepsk_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV0_buf, __global u32 *d_scryptV1_buf, __global u32 *d_scryptV2_buf, __global u32 *d_scryptV3_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 il_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u64 gid_max) __kernel void m05300_m16 (__global pw_t *pws, __global const kernel_rule_t *rules_buf, __global const pw_t *combs_buf, __global const bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global const u32 *bitmaps_buf_s1_a, __global const u32 *bitmaps_buf_s1_b, __global const u32 *bitmaps_buf_s1_c, __global const u32 *bitmaps_buf_s1_d, __global const u32 *bitmaps_buf_s2_a, __global const u32 *bitmaps_buf_s2_b, __global const u32 *bitmaps_buf_s2_c, __global const u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global const digest_t *digests_buf, __global u32 *hashes_shown, __global const salt_t *salt_bufs, __global ikepsk_t *ikepsk_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV0_buf, __global u32 *d_scryptV1_buf, __global u32 *d_scryptV2_buf, __global u32 *d_scryptV3_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 il_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u64 gid_max)
@ -550,6 +564,13 @@ __kernel void m05300_m16 (__global pw_t *pws, __global const kernel_rule_t *rule
* s_msg * s_msg
*/ */
__local u32 s_nr_buf[16];
for (u32 i = lid; i < 16; i += lsz)
{
s_nr_buf[i] = ikepsk_bufs[digests_offset].nr_buf[i];
}
__local u32 s_msg_buf[128]; __local u32 s_msg_buf[128];
for (u32 i = lid; i < 128; i += lsz) for (u32 i = lid; i < 128; i += lsz)
@ -599,7 +620,7 @@ __kernel void m05300_m16 (__global pw_t *pws, __global const kernel_rule_t *rule
* main * main
*/ */
m05300m (w0, w1, w2, w3, pw_len, pws, rules_buf, combs_buf, bfs_buf, tmps, hooks, bitmaps_buf_s1_a, bitmaps_buf_s1_b, bitmaps_buf_s1_c, bitmaps_buf_s1_d, bitmaps_buf_s2_a, bitmaps_buf_s2_b, bitmaps_buf_s2_c, bitmaps_buf_s2_d, plains_buf, digests_buf, hashes_shown, salt_bufs, ikepsk_bufs, d_return_buf, d_scryptV0_buf, d_scryptV1_buf, d_scryptV2_buf, d_scryptV3_buf, bitmap_mask, bitmap_shift1, bitmap_shift2, salt_pos, loop_pos, loop_cnt, il_cnt, digests_cnt, digests_offset, s_msg_buf); m05300m (w0, w1, w2, w3, pw_len, pws, rules_buf, combs_buf, bfs_buf, tmps, hooks, bitmaps_buf_s1_a, bitmaps_buf_s1_b, bitmaps_buf_s1_c, bitmaps_buf_s1_d, bitmaps_buf_s2_a, bitmaps_buf_s2_b, bitmaps_buf_s2_c, bitmaps_buf_s2_d, plains_buf, digests_buf, hashes_shown, salt_bufs, ikepsk_bufs, d_return_buf, d_scryptV0_buf, d_scryptV1_buf, d_scryptV2_buf, d_scryptV3_buf, bitmap_mask, bitmap_shift1, bitmap_shift2, salt_pos, loop_pos, loop_cnt, il_cnt, digests_cnt, digests_offset, s_msg_buf, s_nr_buf);
} }
__kernel void m05300_s04 (__global pw_t *pws, __global const kernel_rule_t *rules_buf, __global const pw_t *combs_buf, __global const bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global const u32 *bitmaps_buf_s1_a, __global const u32 *bitmaps_buf_s1_b, __global const u32 *bitmaps_buf_s1_c, __global const u32 *bitmaps_buf_s1_d, __global const u32 *bitmaps_buf_s2_a, __global const u32 *bitmaps_buf_s2_b, __global const u32 *bitmaps_buf_s2_c, __global const u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global const digest_t *digests_buf, __global u32 *hashes_shown, __global const salt_t *salt_bufs, __global ikepsk_t *ikepsk_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV0_buf, __global u32 *d_scryptV1_buf, __global u32 *d_scryptV2_buf, __global u32 *d_scryptV3_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 il_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u64 gid_max) __kernel void m05300_s04 (__global pw_t *pws, __global const kernel_rule_t *rules_buf, __global const pw_t *combs_buf, __global const bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global const u32 *bitmaps_buf_s1_a, __global const u32 *bitmaps_buf_s1_b, __global const u32 *bitmaps_buf_s1_c, __global const u32 *bitmaps_buf_s1_d, __global const u32 *bitmaps_buf_s2_a, __global const u32 *bitmaps_buf_s2_b, __global const u32 *bitmaps_buf_s2_c, __global const u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global const digest_t *digests_buf, __global u32 *hashes_shown, __global const salt_t *salt_bufs, __global ikepsk_t *ikepsk_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV0_buf, __global u32 *d_scryptV1_buf, __global u32 *d_scryptV2_buf, __global u32 *d_scryptV3_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 il_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u64 gid_max)
@ -616,6 +637,13 @@ __kernel void m05300_s04 (__global pw_t *pws, __global const kernel_rule_t *rule
* s_msg * s_msg
*/ */
__local u32 s_nr_buf[16];
for (u32 i = lid; i < 16; i += lsz)
{
s_nr_buf[i] = ikepsk_bufs[digests_offset].nr_buf[i];
}
__local u32 s_msg_buf[128]; __local u32 s_msg_buf[128];
for (u32 i = lid; i < 128; i += lsz) for (u32 i = lid; i < 128; i += lsz)
@ -665,7 +693,7 @@ __kernel void m05300_s04 (__global pw_t *pws, __global const kernel_rule_t *rule
* main * main
*/ */
m05300s (w0, w1, w2, w3, pw_len, pws, rules_buf, combs_buf, bfs_buf, tmps, hooks, bitmaps_buf_s1_a, bitmaps_buf_s1_b, bitmaps_buf_s1_c, bitmaps_buf_s1_d, bitmaps_buf_s2_a, bitmaps_buf_s2_b, bitmaps_buf_s2_c, bitmaps_buf_s2_d, plains_buf, digests_buf, hashes_shown, salt_bufs, ikepsk_bufs, d_return_buf, d_scryptV0_buf, d_scryptV1_buf, d_scryptV2_buf, d_scryptV3_buf, bitmap_mask, bitmap_shift1, bitmap_shift2, salt_pos, loop_pos, loop_cnt, il_cnt, digests_cnt, digests_offset, s_msg_buf); m05300s (w0, w1, w2, w3, pw_len, pws, rules_buf, combs_buf, bfs_buf, tmps, hooks, bitmaps_buf_s1_a, bitmaps_buf_s1_b, bitmaps_buf_s1_c, bitmaps_buf_s1_d, bitmaps_buf_s2_a, bitmaps_buf_s2_b, bitmaps_buf_s2_c, bitmaps_buf_s2_d, plains_buf, digests_buf, hashes_shown, salt_bufs, ikepsk_bufs, d_return_buf, d_scryptV0_buf, d_scryptV1_buf, d_scryptV2_buf, d_scryptV3_buf, bitmap_mask, bitmap_shift1, bitmap_shift2, salt_pos, loop_pos, loop_cnt, il_cnt, digests_cnt, digests_offset, s_msg_buf, s_nr_buf);
} }
__kernel void m05300_s08 (__global pw_t *pws, __global const kernel_rule_t *rules_buf, __global const pw_t *combs_buf, __global const bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global const u32 *bitmaps_buf_s1_a, __global const u32 *bitmaps_buf_s1_b, __global const u32 *bitmaps_buf_s1_c, __global const u32 *bitmaps_buf_s1_d, __global const u32 *bitmaps_buf_s2_a, __global const u32 *bitmaps_buf_s2_b, __global const u32 *bitmaps_buf_s2_c, __global const u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global const digest_t *digests_buf, __global u32 *hashes_shown, __global const salt_t *salt_bufs, __global ikepsk_t *ikepsk_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV0_buf, __global u32 *d_scryptV1_buf, __global u32 *d_scryptV2_buf, __global u32 *d_scryptV3_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 il_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u64 gid_max) __kernel void m05300_s08 (__global pw_t *pws, __global const kernel_rule_t *rules_buf, __global const pw_t *combs_buf, __global const bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global const u32 *bitmaps_buf_s1_a, __global const u32 *bitmaps_buf_s1_b, __global const u32 *bitmaps_buf_s1_c, __global const u32 *bitmaps_buf_s1_d, __global const u32 *bitmaps_buf_s2_a, __global const u32 *bitmaps_buf_s2_b, __global const u32 *bitmaps_buf_s2_c, __global const u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global const digest_t *digests_buf, __global u32 *hashes_shown, __global const salt_t *salt_bufs, __global ikepsk_t *ikepsk_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV0_buf, __global u32 *d_scryptV1_buf, __global u32 *d_scryptV2_buf, __global u32 *d_scryptV3_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 il_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u64 gid_max)
@ -682,6 +710,13 @@ __kernel void m05300_s08 (__global pw_t *pws, __global const kernel_rule_t *rule
* s_msg * s_msg
*/ */
__local u32 s_nr_buf[16];
for (u32 i = lid; i < 16; i += lsz)
{
s_nr_buf[i] = ikepsk_bufs[digests_offset].nr_buf[i];
}
__local u32 s_msg_buf[128]; __local u32 s_msg_buf[128];
for (u32 i = lid; i < 128; i += lsz) for (u32 i = lid; i < 128; i += lsz)
@ -731,7 +766,7 @@ __kernel void m05300_s08 (__global pw_t *pws, __global const kernel_rule_t *rule
* main * main
*/ */
m05300s (w0, w1, w2, w3, pw_len, pws, rules_buf, combs_buf, bfs_buf, tmps, hooks, bitmaps_buf_s1_a, bitmaps_buf_s1_b, bitmaps_buf_s1_c, bitmaps_buf_s1_d, bitmaps_buf_s2_a, bitmaps_buf_s2_b, bitmaps_buf_s2_c, bitmaps_buf_s2_d, plains_buf, digests_buf, hashes_shown, salt_bufs, ikepsk_bufs, d_return_buf, d_scryptV0_buf, d_scryptV1_buf, d_scryptV2_buf, d_scryptV3_buf, bitmap_mask, bitmap_shift1, bitmap_shift2, salt_pos, loop_pos, loop_cnt, il_cnt, digests_cnt, digests_offset, s_msg_buf); m05300s (w0, w1, w2, w3, pw_len, pws, rules_buf, combs_buf, bfs_buf, tmps, hooks, bitmaps_buf_s1_a, bitmaps_buf_s1_b, bitmaps_buf_s1_c, bitmaps_buf_s1_d, bitmaps_buf_s2_a, bitmaps_buf_s2_b, bitmaps_buf_s2_c, bitmaps_buf_s2_d, plains_buf, digests_buf, hashes_shown, salt_bufs, ikepsk_bufs, d_return_buf, d_scryptV0_buf, d_scryptV1_buf, d_scryptV2_buf, d_scryptV3_buf, bitmap_mask, bitmap_shift1, bitmap_shift2, salt_pos, loop_pos, loop_cnt, il_cnt, digests_cnt, digests_offset, s_msg_buf, s_nr_buf);
} }
__kernel void m05300_s16 (__global pw_t *pws, __global const kernel_rule_t *rules_buf, __global const pw_t *combs_buf, __global const bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global const u32 *bitmaps_buf_s1_a, __global const u32 *bitmaps_buf_s1_b, __global const u32 *bitmaps_buf_s1_c, __global const u32 *bitmaps_buf_s1_d, __global const u32 *bitmaps_buf_s2_a, __global const u32 *bitmaps_buf_s2_b, __global const u32 *bitmaps_buf_s2_c, __global const u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global const digest_t *digests_buf, __global u32 *hashes_shown, __global const salt_t *salt_bufs, __global ikepsk_t *ikepsk_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV0_buf, __global u32 *d_scryptV1_buf, __global u32 *d_scryptV2_buf, __global u32 *d_scryptV3_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 il_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u64 gid_max) __kernel void m05300_s16 (__global pw_t *pws, __global const kernel_rule_t *rules_buf, __global const pw_t *combs_buf, __global const bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global const u32 *bitmaps_buf_s1_a, __global const u32 *bitmaps_buf_s1_b, __global const u32 *bitmaps_buf_s1_c, __global const u32 *bitmaps_buf_s1_d, __global const u32 *bitmaps_buf_s2_a, __global const u32 *bitmaps_buf_s2_b, __global const u32 *bitmaps_buf_s2_c, __global const u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global const digest_t *digests_buf, __global u32 *hashes_shown, __global const salt_t *salt_bufs, __global ikepsk_t *ikepsk_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV0_buf, __global u32 *d_scryptV1_buf, __global u32 *d_scryptV2_buf, __global u32 *d_scryptV3_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 il_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u64 gid_max)
@ -748,6 +783,13 @@ __kernel void m05300_s16 (__global pw_t *pws, __global const kernel_rule_t *rule
* s_msg * s_msg
*/ */
__local u32 s_nr_buf[16];
for (u32 i = lid; i < 16; i += lsz)
{
s_nr_buf[i] = ikepsk_bufs[digests_offset].nr_buf[i];
}
__local u32 s_msg_buf[128]; __local u32 s_msg_buf[128];
for (u32 i = lid; i < 128; i += lsz) for (u32 i = lid; i < 128; i += lsz)
@ -797,5 +839,5 @@ __kernel void m05300_s16 (__global pw_t *pws, __global const kernel_rule_t *rule
* main * main
*/ */
m05300s (w0, w1, w2, w3, pw_len, pws, rules_buf, combs_buf, bfs_buf, tmps, hooks, bitmaps_buf_s1_a, bitmaps_buf_s1_b, bitmaps_buf_s1_c, bitmaps_buf_s1_d, bitmaps_buf_s2_a, bitmaps_buf_s2_b, bitmaps_buf_s2_c, bitmaps_buf_s2_d, plains_buf, digests_buf, hashes_shown, salt_bufs, ikepsk_bufs, d_return_buf, d_scryptV0_buf, d_scryptV1_buf, d_scryptV2_buf, d_scryptV3_buf, bitmap_mask, bitmap_shift1, bitmap_shift2, salt_pos, loop_pos, loop_cnt, il_cnt, digests_cnt, digests_offset, s_msg_buf); m05300s (w0, w1, w2, w3, pw_len, pws, rules_buf, combs_buf, bfs_buf, tmps, hooks, bitmaps_buf_s1_a, bitmaps_buf_s1_b, bitmaps_buf_s1_c, bitmaps_buf_s1_d, bitmaps_buf_s2_a, bitmaps_buf_s2_b, bitmaps_buf_s2_c, bitmaps_buf_s2_d, plains_buf, digests_buf, hashes_shown, salt_bufs, ikepsk_bufs, d_return_buf, d_scryptV0_buf, d_scryptV1_buf, d_scryptV2_buf, d_scryptV3_buf, bitmap_mask, bitmap_shift1, bitmap_shift2, salt_pos, loop_pos, loop_cnt, il_cnt, digests_cnt, digests_offset, s_msg_buf, s_nr_buf);
} }

View File

@ -118,15 +118,13 @@ __kernel void m05400_m04 (__global pw_t *pws, __constant const kernel_rule_t *ru
* s_msg * s_msg
*/ */
__local u32 w_s[16]; __local u32 s_nr_buf[16];
for (u32 i = lid; i < 16; i += lsz) for (u32 i = lid; i < 16; i += lsz)
{ {
w_s[i] = swap32_S (ikepsk_bufs[digests_offset].nr_buf[i]); s_nr_buf[i] = swap32_S (ikepsk_bufs[digests_offset].nr_buf[i]);
} }
barrier (CLK_LOCAL_MEM_FENCE);
__local u32 s_msg_buf[128]; __local u32 s_msg_buf[128];
for (u32 i = lid; i < 128; i += lsz) for (u32 i = lid; i < 128; i += lsz)
@ -194,20 +192,20 @@ __kernel void m05400_m04 (__global pw_t *pws, __constant const kernel_rule_t *ru
hmac_sha1_pad (w0, w1, w2, w3, ipad, opad); hmac_sha1_pad (w0, w1, w2, w3, ipad, opad);
w0[0] = w_s[ 0]; w0[0] = s_nr_buf[ 0];
w0[1] = w_s[ 1]; w0[1] = s_nr_buf[ 1];
w0[2] = w_s[ 2]; w0[2] = s_nr_buf[ 2];
w0[3] = w_s[ 3]; w0[3] = s_nr_buf[ 3];
w1[0] = w_s[ 4]; w1[0] = s_nr_buf[ 4];
w1[1] = w_s[ 5]; w1[1] = s_nr_buf[ 5];
w1[2] = w_s[ 6]; w1[2] = s_nr_buf[ 6];
w1[3] = w_s[ 7]; w1[3] = s_nr_buf[ 7];
w2[0] = w_s[ 8]; w2[0] = s_nr_buf[ 8];
w2[1] = w_s[ 9]; w2[1] = s_nr_buf[ 9];
w2[2] = w_s[10]; w2[2] = s_nr_buf[10];
w2[3] = w_s[11]; w2[3] = s_nr_buf[11];
w3[0] = w_s[12]; w3[0] = s_nr_buf[12];
w3[1] = w_s[13]; w3[1] = s_nr_buf[13];
w3[2] = 0; w3[2] = 0;
w3[3] = (64 + nr_len) * 8; w3[3] = (64 + nr_len) * 8;
@ -304,15 +302,13 @@ __kernel void m05400_s04 (__global pw_t *pws, __constant const kernel_rule_t *ru
* s_msg * s_msg
*/ */
__local u32 w_s[16]; __local u32 s_nr_buf[16];
for (u32 i = lid; i < 16; i += lsz) for (u32 i = lid; i < 16; i += lsz)
{ {
w_s[i] = swap32_S (ikepsk_bufs[digests_offset].nr_buf[i]); s_nr_buf[i] = swap32_S (ikepsk_bufs[digests_offset].nr_buf[i]);
} }
barrier (CLK_LOCAL_MEM_FENCE);
__local u32 s_msg_buf[128]; __local u32 s_msg_buf[128];
for (u32 i = lid; i < 128; i += lsz) for (u32 i = lid; i < 128; i += lsz)
@ -392,20 +388,20 @@ __kernel void m05400_s04 (__global pw_t *pws, __constant const kernel_rule_t *ru
hmac_sha1_pad (w0, w1, w2, w3, ipad, opad); hmac_sha1_pad (w0, w1, w2, w3, ipad, opad);
w0[0] = w_s[ 0]; w0[0] = s_nr_buf[ 0];
w0[1] = w_s[ 1]; w0[1] = s_nr_buf[ 1];
w0[2] = w_s[ 2]; w0[2] = s_nr_buf[ 2];
w0[3] = w_s[ 3]; w0[3] = s_nr_buf[ 3];
w1[0] = w_s[ 4]; w1[0] = s_nr_buf[ 4];
w1[1] = w_s[ 5]; w1[1] = s_nr_buf[ 5];
w1[2] = w_s[ 6]; w1[2] = s_nr_buf[ 6];
w1[3] = w_s[ 7]; w1[3] = s_nr_buf[ 7];
w2[0] = w_s[ 8]; w2[0] = s_nr_buf[ 8];
w2[1] = w_s[ 9]; w2[1] = s_nr_buf[ 9];
w2[2] = w_s[10]; w2[2] = s_nr_buf[10];
w2[3] = w_s[11]; w2[3] = s_nr_buf[11];
w3[0] = w_s[12]; w3[0] = s_nr_buf[12];
w3[1] = w_s[13]; w3[1] = s_nr_buf[13];
w3[2] = 0; w3[2] = 0;
w3[3] = (64 + nr_len) * 8; w3[3] = (64 + nr_len) * 8;

View File

@ -116,15 +116,13 @@ __kernel void m05400_m04 (__global pw_t *pws, __global const kernel_rule_t *rule
* s_msg * s_msg
*/ */
__local u32 w_s[16]; __local u32 s_nr_buf[16];
for (u32 i = lid; i < 16; i += lsz) for (u32 i = lid; i < 16; i += lsz)
{ {
w_s[i] = swap32_S (ikepsk_bufs[digests_offset].nr_buf[i]); s_nr_buf[i] = swap32_S (ikepsk_bufs[digests_offset].nr_buf[i]);
} }
barrier (CLK_LOCAL_MEM_FENCE);
__local u32 s_msg_buf[128]; __local u32 s_msg_buf[128];
for (u32 i = lid; i < 128; i += lsz) for (u32 i = lid; i < 128; i += lsz)
@ -260,20 +258,20 @@ __kernel void m05400_m04 (__global pw_t *pws, __global const kernel_rule_t *rule
hmac_sha1_pad (w0, w1, w2, w3, ipad, opad); hmac_sha1_pad (w0, w1, w2, w3, ipad, opad);
w0[0] = w_s[ 0]; w0[0] = s_nr_buf[ 0];
w0[1] = w_s[ 1]; w0[1] = s_nr_buf[ 1];
w0[2] = w_s[ 2]; w0[2] = s_nr_buf[ 2];
w0[3] = w_s[ 3]; w0[3] = s_nr_buf[ 3];
w1[0] = w_s[ 4]; w1[0] = s_nr_buf[ 4];
w1[1] = w_s[ 5]; w1[1] = s_nr_buf[ 5];
w1[2] = w_s[ 6]; w1[2] = s_nr_buf[ 6];
w1[3] = w_s[ 7]; w1[3] = s_nr_buf[ 7];
w2[0] = w_s[ 8]; w2[0] = s_nr_buf[ 8];
w2[1] = w_s[ 9]; w2[1] = s_nr_buf[ 9];
w2[2] = w_s[10]; w2[2] = s_nr_buf[10];
w2[3] = w_s[11]; w2[3] = s_nr_buf[11];
w3[0] = w_s[12]; w3[0] = s_nr_buf[12];
w3[1] = w_s[13]; w3[1] = s_nr_buf[13];
w3[2] = 0; w3[2] = 0;
w3[3] = (64 + nr_len) * 8; w3[3] = (64 + nr_len) * 8;
@ -370,15 +368,13 @@ __kernel void m05400_s04 (__global pw_t *pws, __global const kernel_rule_t *rule
* s_msg * s_msg
*/ */
__local u32 w_s[16]; __local u32 s_nr_buf[16];
for (u32 i = lid; i < 16; i += lsz) for (u32 i = lid; i < 16; i += lsz)
{ {
w_s[i] = swap32_S (ikepsk_bufs[digests_offset].nr_buf[i]); s_nr_buf[i] = swap32_S (ikepsk_bufs[digests_offset].nr_buf[i]);
} }
barrier (CLK_LOCAL_MEM_FENCE);
__local u32 s_msg_buf[128]; __local u32 s_msg_buf[128];
for (u32 i = lid; i < 128; i += lsz) for (u32 i = lid; i < 128; i += lsz)
@ -526,20 +522,20 @@ __kernel void m05400_s04 (__global pw_t *pws, __global const kernel_rule_t *rule
hmac_sha1_pad (w0, w1, w2, w3, ipad, opad); hmac_sha1_pad (w0, w1, w2, w3, ipad, opad);
w0[0] = w_s[ 0]; w0[0] = s_nr_buf[ 0];
w0[1] = w_s[ 1]; w0[1] = s_nr_buf[ 1];
w0[2] = w_s[ 2]; w0[2] = s_nr_buf[ 2];
w0[3] = w_s[ 3]; w0[3] = s_nr_buf[ 3];
w1[0] = w_s[ 4]; w1[0] = s_nr_buf[ 4];
w1[1] = w_s[ 5]; w1[1] = s_nr_buf[ 5];
w1[2] = w_s[ 6]; w1[2] = s_nr_buf[ 6];
w1[3] = w_s[ 7]; w1[3] = s_nr_buf[ 7];
w2[0] = w_s[ 8]; w2[0] = s_nr_buf[ 8];
w2[1] = w_s[ 9]; w2[1] = s_nr_buf[ 9];
w2[2] = w_s[10]; w2[2] = s_nr_buf[10];
w2[3] = w_s[11]; w2[3] = s_nr_buf[11];
w3[0] = w_s[12]; w3[0] = s_nr_buf[12];
w3[1] = w_s[13]; w3[1] = s_nr_buf[13];
w3[2] = 0; w3[2] = 0;
w3[3] = (64 + nr_len) * 8; w3[3] = (64 + nr_len) * 8;

View File

@ -102,7 +102,7 @@ void hmac_sha1_run (u32x w0[4], u32x w1[4], u32x w2[4], u32x w3[4], u32x ipad[5]
sha1_transform_vector (w0, w1, w2, w3, digest); sha1_transform_vector (w0, w1, w2, w3, digest);
} }
void m05400m (u32 w0[4], u32 w1[4], u32 w2[4], u32 w3[4], const u32 pw_len, __global pw_t *pws, __global const kernel_rule_t *rules_buf, __global const pw_t *combs_buf, __global const bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global const u32 *bitmaps_buf_s1_a, __global const u32 *bitmaps_buf_s1_b, __global const u32 *bitmaps_buf_s1_c, __global const u32 *bitmaps_buf_s1_d, __global const u32 *bitmaps_buf_s2_a, __global const u32 *bitmaps_buf_s2_b, __global const u32 *bitmaps_buf_s2_c, __global const u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global const digest_t *digests_buf, __global u32 *hashes_shown, __global const salt_t *salt_bufs, __global ikepsk_t *ikepsk_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV0_buf, __global u32 *d_scryptV1_buf, __global u32 *d_scryptV2_buf, __global u32 *d_scryptV3_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 il_cnt, const u32 digests_cnt, const u32 digests_offset, __local u32 *s_msg_buf) void m05400m (u32 w0[4], u32 w1[4], u32 w2[4], u32 w3[4], const u32 pw_len, __global pw_t *pws, __global const kernel_rule_t *rules_buf, __global const pw_t *combs_buf, __global const bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global const u32 *bitmaps_buf_s1_a, __global const u32 *bitmaps_buf_s1_b, __global const u32 *bitmaps_buf_s1_c, __global const u32 *bitmaps_buf_s1_d, __global const u32 *bitmaps_buf_s2_a, __global const u32 *bitmaps_buf_s2_b, __global const u32 *bitmaps_buf_s2_c, __global const u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global const digest_t *digests_buf, __global u32 *hashes_shown, __global const salt_t *salt_bufs, __global ikepsk_t *ikepsk_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV0_buf, __global u32 *d_scryptV1_buf, __global u32 *d_scryptV2_buf, __global u32 *d_scryptV3_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 il_cnt, const u32 digests_cnt, const u32 digests_offset, __local u32 *s_msg_buf, __local u32 *s_nr_buf)
{ {
/** /**
* modifier * modifier
@ -161,20 +161,20 @@ void m05400m (u32 w0[4], u32 w1[4], u32 w2[4], u32 w3[4], const u32 pw_len, __gl
hmac_sha1_pad (w0_t, w1_t, w2_t, w3_t, ipad, opad); hmac_sha1_pad (w0_t, w1_t, w2_t, w3_t, ipad, opad);
w0_t[0] = swap32_S (ikepsk_bufs[digests_offset].nr_buf[ 0]); w0_t[0] = s_nr_buf[ 0];
w0_t[1] = swap32_S (ikepsk_bufs[digests_offset].nr_buf[ 1]); w0_t[1] = s_nr_buf[ 1];
w0_t[2] = swap32_S (ikepsk_bufs[digests_offset].nr_buf[ 2]); w0_t[2] = s_nr_buf[ 2];
w0_t[3] = swap32_S (ikepsk_bufs[digests_offset].nr_buf[ 3]); w0_t[3] = s_nr_buf[ 3];
w1_t[0] = swap32_S (ikepsk_bufs[digests_offset].nr_buf[ 4]); w1_t[0] = s_nr_buf[ 4];
w1_t[1] = swap32_S (ikepsk_bufs[digests_offset].nr_buf[ 5]); w1_t[1] = s_nr_buf[ 5];
w1_t[2] = swap32_S (ikepsk_bufs[digests_offset].nr_buf[ 6]); w1_t[2] = s_nr_buf[ 6];
w1_t[3] = swap32_S (ikepsk_bufs[digests_offset].nr_buf[ 7]); w1_t[3] = s_nr_buf[ 7];
w2_t[0] = swap32_S (ikepsk_bufs[digests_offset].nr_buf[ 8]); w2_t[0] = s_nr_buf[ 8];
w2_t[1] = swap32_S (ikepsk_bufs[digests_offset].nr_buf[ 9]); w2_t[1] = s_nr_buf[ 9];
w2_t[2] = swap32_S (ikepsk_bufs[digests_offset].nr_buf[10]); w2_t[2] = s_nr_buf[10];
w2_t[3] = swap32_S (ikepsk_bufs[digests_offset].nr_buf[11]); w2_t[3] = s_nr_buf[11];
w3_t[0] = swap32_S (ikepsk_bufs[digests_offset].nr_buf[12]); w3_t[0] = s_nr_buf[12];
w3_t[1] = swap32_S (ikepsk_bufs[digests_offset].nr_buf[13]); w3_t[1] = s_nr_buf[13];
w3_t[2] = 0; w3_t[2] = 0;
w3_t[3] = (64 + nr_len) * 8; w3_t[3] = (64 + nr_len) * 8;
@ -249,7 +249,7 @@ void m05400m (u32 w0[4], u32 w1[4], u32 w2[4], u32 w3[4], const u32 pw_len, __gl
} }
} }
void m05400s (u32 w0[4], u32 w1[4], u32 w2[4], u32 w3[4], const u32 pw_len, __global pw_t *pws, __global const kernel_rule_t *rules_buf, __global const pw_t *combs_buf, __global const bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global const u32 *bitmaps_buf_s1_a, __global const u32 *bitmaps_buf_s1_b, __global const u32 *bitmaps_buf_s1_c, __global const u32 *bitmaps_buf_s1_d, __global const u32 *bitmaps_buf_s2_a, __global const u32 *bitmaps_buf_s2_b, __global const u32 *bitmaps_buf_s2_c, __global const u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global const digest_t *digests_buf, __global u32 *hashes_shown, __global const salt_t *salt_bufs, __global ikepsk_t *ikepsk_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV0_buf, __global u32 *d_scryptV1_buf, __global u32 *d_scryptV2_buf, __global u32 *d_scryptV3_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 il_cnt, const u32 digests_cnt, const u32 digests_offset, __local u32 *s_msg_buf) void m05400s (u32 w0[4], u32 w1[4], u32 w2[4], u32 w3[4], const u32 pw_len, __global pw_t *pws, __global const kernel_rule_t *rules_buf, __global const pw_t *combs_buf, __global const bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global const u32 *bitmaps_buf_s1_a, __global const u32 *bitmaps_buf_s1_b, __global const u32 *bitmaps_buf_s1_c, __global const u32 *bitmaps_buf_s1_d, __global const u32 *bitmaps_buf_s2_a, __global const u32 *bitmaps_buf_s2_b, __global const u32 *bitmaps_buf_s2_c, __global const u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global const digest_t *digests_buf, __global u32 *hashes_shown, __global const salt_t *salt_bufs, __global ikepsk_t *ikepsk_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV0_buf, __global u32 *d_scryptV1_buf, __global u32 *d_scryptV2_buf, __global u32 *d_scryptV3_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 il_cnt, const u32 digests_cnt, const u32 digests_offset, __local u32 *s_msg_buf, __local u32 *s_nr_buf)
{ {
/** /**
* modifier * modifier
@ -320,20 +320,20 @@ void m05400s (u32 w0[4], u32 w1[4], u32 w2[4], u32 w3[4], const u32 pw_len, __gl
hmac_sha1_pad (w0_t, w1_t, w2_t, w3_t, ipad, opad); hmac_sha1_pad (w0_t, w1_t, w2_t, w3_t, ipad, opad);
w0_t[0] = swap32_S (ikepsk_bufs[digests_offset].nr_buf[ 0]); w0_t[0] = s_nr_buf[ 0];
w0_t[1] = swap32_S (ikepsk_bufs[digests_offset].nr_buf[ 1]); w0_t[1] = s_nr_buf[ 1];
w0_t[2] = swap32_S (ikepsk_bufs[digests_offset].nr_buf[ 2]); w0_t[2] = s_nr_buf[ 2];
w0_t[3] = swap32_S (ikepsk_bufs[digests_offset].nr_buf[ 3]); w0_t[3] = s_nr_buf[ 3];
w1_t[0] = swap32_S (ikepsk_bufs[digests_offset].nr_buf[ 4]); w1_t[0] = s_nr_buf[ 4];
w1_t[1] = swap32_S (ikepsk_bufs[digests_offset].nr_buf[ 5]); w1_t[1] = s_nr_buf[ 5];
w1_t[2] = swap32_S (ikepsk_bufs[digests_offset].nr_buf[ 6]); w1_t[2] = s_nr_buf[ 6];
w1_t[3] = swap32_S (ikepsk_bufs[digests_offset].nr_buf[ 7]); w1_t[3] = s_nr_buf[ 7];
w2_t[0] = swap32_S (ikepsk_bufs[digests_offset].nr_buf[ 8]); w2_t[0] = s_nr_buf[ 8];
w2_t[1] = swap32_S (ikepsk_bufs[digests_offset].nr_buf[ 9]); w2_t[1] = s_nr_buf[ 9];
w2_t[2] = swap32_S (ikepsk_bufs[digests_offset].nr_buf[10]); w2_t[2] = s_nr_buf[10];
w2_t[3] = swap32_S (ikepsk_bufs[digests_offset].nr_buf[11]); w2_t[3] = s_nr_buf[11];
w3_t[0] = swap32_S (ikepsk_bufs[digests_offset].nr_buf[12]); w3_t[0] = s_nr_buf[12];
w3_t[1] = swap32_S (ikepsk_bufs[digests_offset].nr_buf[13]); w3_t[1] = s_nr_buf[13];
w3_t[2] = 0; w3_t[2] = 0;
w3_t[3] = (64 + nr_len) * 8; w3_t[3] = (64 + nr_len) * 8;
@ -422,6 +422,13 @@ __kernel void m05400_m04 (__global pw_t *pws, __global const kernel_rule_t *rule
* s_msg * s_msg
*/ */
__local u32 s_nr_buf[16];
for (u32 i = lid; i < 16; i += lsz)
{
s_nr_buf[i] = swap32_S (ikepsk_bufs[digests_offset].nr_buf[i]);
}
__local u32 s_msg_buf[128]; __local u32 s_msg_buf[128];
for (u32 i = lid; i < 128; i += lsz) for (u32 i = lid; i < 128; i += lsz)
@ -471,7 +478,7 @@ __kernel void m05400_m04 (__global pw_t *pws, __global const kernel_rule_t *rule
* main * main
*/ */
m05400m (w0, w1, w2, w3, pw_len, pws, rules_buf, combs_buf, bfs_buf, tmps, hooks, bitmaps_buf_s1_a, bitmaps_buf_s1_b, bitmaps_buf_s1_c, bitmaps_buf_s1_d, bitmaps_buf_s2_a, bitmaps_buf_s2_b, bitmaps_buf_s2_c, bitmaps_buf_s2_d, plains_buf, digests_buf, hashes_shown, salt_bufs, ikepsk_bufs, d_return_buf, d_scryptV0_buf, d_scryptV1_buf, d_scryptV2_buf, d_scryptV3_buf, bitmap_mask, bitmap_shift1, bitmap_shift2, salt_pos, loop_pos, loop_cnt, il_cnt, digests_cnt, digests_offset, s_msg_buf); m05400m (w0, w1, w2, w3, pw_len, pws, rules_buf, combs_buf, bfs_buf, tmps, hooks, bitmaps_buf_s1_a, bitmaps_buf_s1_b, bitmaps_buf_s1_c, bitmaps_buf_s1_d, bitmaps_buf_s2_a, bitmaps_buf_s2_b, bitmaps_buf_s2_c, bitmaps_buf_s2_d, plains_buf, digests_buf, hashes_shown, salt_bufs, ikepsk_bufs, d_return_buf, d_scryptV0_buf, d_scryptV1_buf, d_scryptV2_buf, d_scryptV3_buf, bitmap_mask, bitmap_shift1, bitmap_shift2, salt_pos, loop_pos, loop_cnt, il_cnt, digests_cnt, digests_offset, s_msg_buf, s_nr_buf);
} }
__kernel void m05400_m08 (__global pw_t *pws, __global const kernel_rule_t *rules_buf, __global const pw_t *combs_buf, __global const bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global const u32 *bitmaps_buf_s1_a, __global const u32 *bitmaps_buf_s1_b, __global const u32 *bitmaps_buf_s1_c, __global const u32 *bitmaps_buf_s1_d, __global const u32 *bitmaps_buf_s2_a, __global const u32 *bitmaps_buf_s2_b, __global const u32 *bitmaps_buf_s2_c, __global const u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global const digest_t *digests_buf, __global u32 *hashes_shown, __global const salt_t *salt_bufs, __global ikepsk_t *ikepsk_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV0_buf, __global u32 *d_scryptV1_buf, __global u32 *d_scryptV2_buf, __global u32 *d_scryptV3_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 il_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u64 gid_max) __kernel void m05400_m08 (__global pw_t *pws, __global const kernel_rule_t *rules_buf, __global const pw_t *combs_buf, __global const bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global const u32 *bitmaps_buf_s1_a, __global const u32 *bitmaps_buf_s1_b, __global const u32 *bitmaps_buf_s1_c, __global const u32 *bitmaps_buf_s1_d, __global const u32 *bitmaps_buf_s2_a, __global const u32 *bitmaps_buf_s2_b, __global const u32 *bitmaps_buf_s2_c, __global const u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global const digest_t *digests_buf, __global u32 *hashes_shown, __global const salt_t *salt_bufs, __global ikepsk_t *ikepsk_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV0_buf, __global u32 *d_scryptV1_buf, __global u32 *d_scryptV2_buf, __global u32 *d_scryptV3_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 il_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u64 gid_max)
@ -488,6 +495,13 @@ __kernel void m05400_m08 (__global pw_t *pws, __global const kernel_rule_t *rule
* s_msg * s_msg
*/ */
__local u32 s_nr_buf[16];
for (u32 i = lid; i < 16; i += lsz)
{
s_nr_buf[i] = swap32_S (ikepsk_bufs[digests_offset].nr_buf[i]);
}
__local u32 s_msg_buf[128]; __local u32 s_msg_buf[128];
for (u32 i = lid; i < 128; i += lsz) for (u32 i = lid; i < 128; i += lsz)
@ -537,7 +551,7 @@ __kernel void m05400_m08 (__global pw_t *pws, __global const kernel_rule_t *rule
* main * main
*/ */
m05400m (w0, w1, w2, w3, pw_len, pws, rules_buf, combs_buf, bfs_buf, tmps, hooks, bitmaps_buf_s1_a, bitmaps_buf_s1_b, bitmaps_buf_s1_c, bitmaps_buf_s1_d, bitmaps_buf_s2_a, bitmaps_buf_s2_b, bitmaps_buf_s2_c, bitmaps_buf_s2_d, plains_buf, digests_buf, hashes_shown, salt_bufs, ikepsk_bufs, d_return_buf, d_scryptV0_buf, d_scryptV1_buf, d_scryptV2_buf, d_scryptV3_buf, bitmap_mask, bitmap_shift1, bitmap_shift2, salt_pos, loop_pos, loop_cnt, il_cnt, digests_cnt, digests_offset, s_msg_buf); m05400m (w0, w1, w2, w3, pw_len, pws, rules_buf, combs_buf, bfs_buf, tmps, hooks, bitmaps_buf_s1_a, bitmaps_buf_s1_b, bitmaps_buf_s1_c, bitmaps_buf_s1_d, bitmaps_buf_s2_a, bitmaps_buf_s2_b, bitmaps_buf_s2_c, bitmaps_buf_s2_d, plains_buf, digests_buf, hashes_shown, salt_bufs, ikepsk_bufs, d_return_buf, d_scryptV0_buf, d_scryptV1_buf, d_scryptV2_buf, d_scryptV3_buf, bitmap_mask, bitmap_shift1, bitmap_shift2, salt_pos, loop_pos, loop_cnt, il_cnt, digests_cnt, digests_offset, s_msg_buf, s_nr_buf);
} }
__kernel void m05400_m16 (__global pw_t *pws, __global const kernel_rule_t *rules_buf, __global const pw_t *combs_buf, __global const bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global const u32 *bitmaps_buf_s1_a, __global const u32 *bitmaps_buf_s1_b, __global const u32 *bitmaps_buf_s1_c, __global const u32 *bitmaps_buf_s1_d, __global const u32 *bitmaps_buf_s2_a, __global const u32 *bitmaps_buf_s2_b, __global const u32 *bitmaps_buf_s2_c, __global const u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global const digest_t *digests_buf, __global u32 *hashes_shown, __global const salt_t *salt_bufs, __global ikepsk_t *ikepsk_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV0_buf, __global u32 *d_scryptV1_buf, __global u32 *d_scryptV2_buf, __global u32 *d_scryptV3_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 il_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u64 gid_max) __kernel void m05400_m16 (__global pw_t *pws, __global const kernel_rule_t *rules_buf, __global const pw_t *combs_buf, __global const bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global const u32 *bitmaps_buf_s1_a, __global const u32 *bitmaps_buf_s1_b, __global const u32 *bitmaps_buf_s1_c, __global const u32 *bitmaps_buf_s1_d, __global const u32 *bitmaps_buf_s2_a, __global const u32 *bitmaps_buf_s2_b, __global const u32 *bitmaps_buf_s2_c, __global const u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global const digest_t *digests_buf, __global u32 *hashes_shown, __global const salt_t *salt_bufs, __global ikepsk_t *ikepsk_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV0_buf, __global u32 *d_scryptV1_buf, __global u32 *d_scryptV2_buf, __global u32 *d_scryptV3_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 il_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u64 gid_max)
@ -554,6 +568,13 @@ __kernel void m05400_m16 (__global pw_t *pws, __global const kernel_rule_t *rule
* s_msg * s_msg
*/ */
__local u32 s_nr_buf[16];
for (u32 i = lid; i < 16; i += lsz)
{
s_nr_buf[i] = swap32_S (ikepsk_bufs[digests_offset].nr_buf[i]);
}
__local u32 s_msg_buf[128]; __local u32 s_msg_buf[128];
for (u32 i = lid; i < 128; i += lsz) for (u32 i = lid; i < 128; i += lsz)
@ -603,7 +624,7 @@ __kernel void m05400_m16 (__global pw_t *pws, __global const kernel_rule_t *rule
* main * main
*/ */
m05400m (w0, w1, w2, w3, pw_len, pws, rules_buf, combs_buf, bfs_buf, tmps, hooks, bitmaps_buf_s1_a, bitmaps_buf_s1_b, bitmaps_buf_s1_c, bitmaps_buf_s1_d, bitmaps_buf_s2_a, bitmaps_buf_s2_b, bitmaps_buf_s2_c, bitmaps_buf_s2_d, plains_buf, digests_buf, hashes_shown, salt_bufs, ikepsk_bufs, d_return_buf, d_scryptV0_buf, d_scryptV1_buf, d_scryptV2_buf, d_scryptV3_buf, bitmap_mask, bitmap_shift1, bitmap_shift2, salt_pos, loop_pos, loop_cnt, il_cnt, digests_cnt, digests_offset, s_msg_buf); m05400m (w0, w1, w2, w3, pw_len, pws, rules_buf, combs_buf, bfs_buf, tmps, hooks, bitmaps_buf_s1_a, bitmaps_buf_s1_b, bitmaps_buf_s1_c, bitmaps_buf_s1_d, bitmaps_buf_s2_a, bitmaps_buf_s2_b, bitmaps_buf_s2_c, bitmaps_buf_s2_d, plains_buf, digests_buf, hashes_shown, salt_bufs, ikepsk_bufs, d_return_buf, d_scryptV0_buf, d_scryptV1_buf, d_scryptV2_buf, d_scryptV3_buf, bitmap_mask, bitmap_shift1, bitmap_shift2, salt_pos, loop_pos, loop_cnt, il_cnt, digests_cnt, digests_offset, s_msg_buf, s_nr_buf);
} }
__kernel void m05400_s04 (__global pw_t *pws, __global const kernel_rule_t *rules_buf, __global const pw_t *combs_buf, __global const bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global const u32 *bitmaps_buf_s1_a, __global const u32 *bitmaps_buf_s1_b, __global const u32 *bitmaps_buf_s1_c, __global const u32 *bitmaps_buf_s1_d, __global const u32 *bitmaps_buf_s2_a, __global const u32 *bitmaps_buf_s2_b, __global const u32 *bitmaps_buf_s2_c, __global const u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global const digest_t *digests_buf, __global u32 *hashes_shown, __global const salt_t *salt_bufs, __global ikepsk_t *ikepsk_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV0_buf, __global u32 *d_scryptV1_buf, __global u32 *d_scryptV2_buf, __global u32 *d_scryptV3_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 il_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u64 gid_max) __kernel void m05400_s04 (__global pw_t *pws, __global const kernel_rule_t *rules_buf, __global const pw_t *combs_buf, __global const bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global const u32 *bitmaps_buf_s1_a, __global const u32 *bitmaps_buf_s1_b, __global const u32 *bitmaps_buf_s1_c, __global const u32 *bitmaps_buf_s1_d, __global const u32 *bitmaps_buf_s2_a, __global const u32 *bitmaps_buf_s2_b, __global const u32 *bitmaps_buf_s2_c, __global const u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global const digest_t *digests_buf, __global u32 *hashes_shown, __global const salt_t *salt_bufs, __global ikepsk_t *ikepsk_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV0_buf, __global u32 *d_scryptV1_buf, __global u32 *d_scryptV2_buf, __global u32 *d_scryptV3_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 il_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u64 gid_max)
@ -620,6 +641,13 @@ __kernel void m05400_s04 (__global pw_t *pws, __global const kernel_rule_t *rule
* s_msg * s_msg
*/ */
__local u32 s_nr_buf[16];
for (u32 i = lid; i < 16; i += lsz)
{
s_nr_buf[i] = swap32_S (ikepsk_bufs[digests_offset].nr_buf[i]);
}
__local u32 s_msg_buf[128]; __local u32 s_msg_buf[128];
for (u32 i = lid; i < 128; i += lsz) for (u32 i = lid; i < 128; i += lsz)
@ -669,7 +697,7 @@ __kernel void m05400_s04 (__global pw_t *pws, __global const kernel_rule_t *rule
* main * main
*/ */
m05400s (w0, w1, w2, w3, pw_len, pws, rules_buf, combs_buf, bfs_buf, tmps, hooks, bitmaps_buf_s1_a, bitmaps_buf_s1_b, bitmaps_buf_s1_c, bitmaps_buf_s1_d, bitmaps_buf_s2_a, bitmaps_buf_s2_b, bitmaps_buf_s2_c, bitmaps_buf_s2_d, plains_buf, digests_buf, hashes_shown, salt_bufs, ikepsk_bufs, d_return_buf, d_scryptV0_buf, d_scryptV1_buf, d_scryptV2_buf, d_scryptV3_buf, bitmap_mask, bitmap_shift1, bitmap_shift2, salt_pos, loop_pos, loop_cnt, il_cnt, digests_cnt, digests_offset, s_msg_buf); m05400s (w0, w1, w2, w3, pw_len, pws, rules_buf, combs_buf, bfs_buf, tmps, hooks, bitmaps_buf_s1_a, bitmaps_buf_s1_b, bitmaps_buf_s1_c, bitmaps_buf_s1_d, bitmaps_buf_s2_a, bitmaps_buf_s2_b, bitmaps_buf_s2_c, bitmaps_buf_s2_d, plains_buf, digests_buf, hashes_shown, salt_bufs, ikepsk_bufs, d_return_buf, d_scryptV0_buf, d_scryptV1_buf, d_scryptV2_buf, d_scryptV3_buf, bitmap_mask, bitmap_shift1, bitmap_shift2, salt_pos, loop_pos, loop_cnt, il_cnt, digests_cnt, digests_offset, s_msg_buf, s_nr_buf);
} }
__kernel void m05400_s08 (__global pw_t *pws, __global const kernel_rule_t *rules_buf, __global const pw_t *combs_buf, __global const bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global const u32 *bitmaps_buf_s1_a, __global const u32 *bitmaps_buf_s1_b, __global const u32 *bitmaps_buf_s1_c, __global const u32 *bitmaps_buf_s1_d, __global const u32 *bitmaps_buf_s2_a, __global const u32 *bitmaps_buf_s2_b, __global const u32 *bitmaps_buf_s2_c, __global const u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global const digest_t *digests_buf, __global u32 *hashes_shown, __global const salt_t *salt_bufs, __global ikepsk_t *ikepsk_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV0_buf, __global u32 *d_scryptV1_buf, __global u32 *d_scryptV2_buf, __global u32 *d_scryptV3_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 il_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u64 gid_max) __kernel void m05400_s08 (__global pw_t *pws, __global const kernel_rule_t *rules_buf, __global const pw_t *combs_buf, __global const bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global const u32 *bitmaps_buf_s1_a, __global const u32 *bitmaps_buf_s1_b, __global const u32 *bitmaps_buf_s1_c, __global const u32 *bitmaps_buf_s1_d, __global const u32 *bitmaps_buf_s2_a, __global const u32 *bitmaps_buf_s2_b, __global const u32 *bitmaps_buf_s2_c, __global const u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global const digest_t *digests_buf, __global u32 *hashes_shown, __global const salt_t *salt_bufs, __global ikepsk_t *ikepsk_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV0_buf, __global u32 *d_scryptV1_buf, __global u32 *d_scryptV2_buf, __global u32 *d_scryptV3_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 il_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u64 gid_max)
@ -686,6 +714,13 @@ __kernel void m05400_s08 (__global pw_t *pws, __global const kernel_rule_t *rule
* s_msg * s_msg
*/ */
__local u32 s_nr_buf[16];
for (u32 i = lid; i < 16; i += lsz)
{
s_nr_buf[i] = swap32_S (ikepsk_bufs[digests_offset].nr_buf[i]);
}
__local u32 s_msg_buf[128]; __local u32 s_msg_buf[128];
for (u32 i = lid; i < 128; i += lsz) for (u32 i = lid; i < 128; i += lsz)
@ -735,7 +770,7 @@ __kernel void m05400_s08 (__global pw_t *pws, __global const kernel_rule_t *rule
* main * main
*/ */
m05400s (w0, w1, w2, w3, pw_len, pws, rules_buf, combs_buf, bfs_buf, tmps, hooks, bitmaps_buf_s1_a, bitmaps_buf_s1_b, bitmaps_buf_s1_c, bitmaps_buf_s1_d, bitmaps_buf_s2_a, bitmaps_buf_s2_b, bitmaps_buf_s2_c, bitmaps_buf_s2_d, plains_buf, digests_buf, hashes_shown, salt_bufs, ikepsk_bufs, d_return_buf, d_scryptV0_buf, d_scryptV1_buf, d_scryptV2_buf, d_scryptV3_buf, bitmap_mask, bitmap_shift1, bitmap_shift2, salt_pos, loop_pos, loop_cnt, il_cnt, digests_cnt, digests_offset, s_msg_buf); m05400s (w0, w1, w2, w3, pw_len, pws, rules_buf, combs_buf, bfs_buf, tmps, hooks, bitmaps_buf_s1_a, bitmaps_buf_s1_b, bitmaps_buf_s1_c, bitmaps_buf_s1_d, bitmaps_buf_s2_a, bitmaps_buf_s2_b, bitmaps_buf_s2_c, bitmaps_buf_s2_d, plains_buf, digests_buf, hashes_shown, salt_bufs, ikepsk_bufs, d_return_buf, d_scryptV0_buf, d_scryptV1_buf, d_scryptV2_buf, d_scryptV3_buf, bitmap_mask, bitmap_shift1, bitmap_shift2, salt_pos, loop_pos, loop_cnt, il_cnt, digests_cnt, digests_offset, s_msg_buf, s_nr_buf);
} }
__kernel void m05400_s16 (__global pw_t *pws, __global const kernel_rule_t *rules_buf, __global const pw_t *combs_buf, __global const bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global const u32 *bitmaps_buf_s1_a, __global const u32 *bitmaps_buf_s1_b, __global const u32 *bitmaps_buf_s1_c, __global const u32 *bitmaps_buf_s1_d, __global const u32 *bitmaps_buf_s2_a, __global const u32 *bitmaps_buf_s2_b, __global const u32 *bitmaps_buf_s2_c, __global const u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global const digest_t *digests_buf, __global u32 *hashes_shown, __global const salt_t *salt_bufs, __global ikepsk_t *ikepsk_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV0_buf, __global u32 *d_scryptV1_buf, __global u32 *d_scryptV2_buf, __global u32 *d_scryptV3_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 il_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u64 gid_max) __kernel void m05400_s16 (__global pw_t *pws, __global const kernel_rule_t *rules_buf, __global const pw_t *combs_buf, __global const bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global const u32 *bitmaps_buf_s1_a, __global const u32 *bitmaps_buf_s1_b, __global const u32 *bitmaps_buf_s1_c, __global const u32 *bitmaps_buf_s1_d, __global const u32 *bitmaps_buf_s2_a, __global const u32 *bitmaps_buf_s2_b, __global const u32 *bitmaps_buf_s2_c, __global const u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global const digest_t *digests_buf, __global u32 *hashes_shown, __global const salt_t *salt_bufs, __global ikepsk_t *ikepsk_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV0_buf, __global u32 *d_scryptV1_buf, __global u32 *d_scryptV2_buf, __global u32 *d_scryptV3_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 il_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u64 gid_max)
@ -752,6 +787,13 @@ __kernel void m05400_s16 (__global pw_t *pws, __global const kernel_rule_t *rule
* s_msg * s_msg
*/ */
__local u32 s_nr_buf[16];
for (u32 i = lid; i < 16; i += lsz)
{
s_nr_buf[i] = swap32_S (ikepsk_bufs[digests_offset].nr_buf[i]);
}
__local u32 s_msg_buf[128]; __local u32 s_msg_buf[128];
for (u32 i = lid; i < 128; i += lsz) for (u32 i = lid; i < 128; i += lsz)
@ -801,5 +843,5 @@ __kernel void m05400_s16 (__global pw_t *pws, __global const kernel_rule_t *rule
* main * main
*/ */
m05400s (w0, w1, w2, w3, pw_len, pws, rules_buf, combs_buf, bfs_buf, tmps, hooks, bitmaps_buf_s1_a, bitmaps_buf_s1_b, bitmaps_buf_s1_c, bitmaps_buf_s1_d, bitmaps_buf_s2_a, bitmaps_buf_s2_b, bitmaps_buf_s2_c, bitmaps_buf_s2_d, plains_buf, digests_buf, hashes_shown, salt_bufs, ikepsk_bufs, d_return_buf, d_scryptV0_buf, d_scryptV1_buf, d_scryptV2_buf, d_scryptV3_buf, bitmap_mask, bitmap_shift1, bitmap_shift2, salt_pos, loop_pos, loop_cnt, il_cnt, digests_cnt, digests_offset, s_msg_buf); m05400s (w0, w1, w2, w3, pw_len, pws, rules_buf, combs_buf, bfs_buf, tmps, hooks, bitmaps_buf_s1_a, bitmaps_buf_s1_b, bitmaps_buf_s1_c, bitmaps_buf_s1_d, bitmaps_buf_s2_a, bitmaps_buf_s2_b, bitmaps_buf_s2_c, bitmaps_buf_s2_d, plains_buf, digests_buf, hashes_shown, salt_bufs, ikepsk_bufs, d_return_buf, d_scryptV0_buf, d_scryptV1_buf, d_scryptV2_buf, d_scryptV3_buf, bitmap_mask, bitmap_shift1, bitmap_shift2, salt_pos, loop_pos, loop_cnt, il_cnt, digests_cnt, digests_offset, s_msg_buf, s_nr_buf);
} }

View File

@ -555,12 +555,6 @@ __kernel void m05600_m04 (__global pw_t *pws, __global const kernel_rule_t *rule
if (gid >= gid_max) return; if (gid >= gid_max) return;
const u32 userdomain_len = netntlm_bufs[digests_offset].user_len
+ netntlm_bufs[digests_offset].domain_len;
const u32 chall_len = netntlm_bufs[digests_offset].srvchall_len
+ netntlm_bufs[digests_offset].clichall_len;
/** /**
* base * base
*/ */
@ -634,12 +628,6 @@ __kernel void m05600_m08 (__global pw_t *pws, __global const kernel_rule_t *rule
if (gid >= gid_max) return; if (gid >= gid_max) return;
const u32 userdomain_len = netntlm_bufs[digests_offset].user_len
+ netntlm_bufs[digests_offset].domain_len;
const u32 chall_len = netntlm_bufs[digests_offset].srvchall_len
+ netntlm_bufs[digests_offset].clichall_len;
/** /**
* base * base
*/ */
@ -717,12 +705,6 @@ __kernel void m05600_s04 (__global pw_t *pws, __global const kernel_rule_t *rule
if (gid >= gid_max) return; if (gid >= gid_max) return;
const u32 userdomain_len = netntlm_bufs[digests_offset].user_len
+ netntlm_bufs[digests_offset].domain_len;
const u32 chall_len = netntlm_bufs[digests_offset].srvchall_len
+ netntlm_bufs[digests_offset].clichall_len;
/** /**
* base * base
*/ */
@ -796,12 +778,6 @@ __kernel void m05600_s08 (__global pw_t *pws, __global const kernel_rule_t *rule
if (gid >= gid_max) return; if (gid >= gid_max) return;
const u32 userdomain_len = netntlm_bufs[digests_offset].user_len
+ netntlm_bufs[digests_offset].domain_len;
const u32 chall_len = netntlm_bufs[digests_offset].srvchall_len
+ netntlm_bufs[digests_offset].clichall_len;
/** /**
* base * base
*/ */