Some unrolling for SHA2 based algorithms

pull/2022/head
Jens Steube 5 years ago
parent fa9d073f9a
commit e2da5c8d57

@ -104,11 +104,6 @@ DECLSPEC void sha224_transform (const u32 *w0, const u32 *w1, const u32 *w2, con
ROUND_STEP_S (0);
#ifdef IS_CUDA
ROUND_EXPAND_S (); ROUND_STEP_S (16);
ROUND_EXPAND_S (); ROUND_STEP_S (32);
ROUND_EXPAND_S (); ROUND_STEP_S (48);
#else
#ifdef _unroll
#pragma unroll
#endif
@ -116,7 +111,6 @@ DECLSPEC void sha224_transform (const u32 *w0, const u32 *w1, const u32 *w2, con
{
ROUND_EXPAND_S (); ROUND_STEP_S (i);
}
#endif
#undef ROUND_EXPAND_S
#undef ROUND_STEP_S

@ -104,11 +104,6 @@ DECLSPEC void sha256_transform (const u32 *w0, const u32 *w1, const u32 *w2, con
ROUND_STEP_S (0);
#ifdef IS_CUDA
ROUND_EXPAND_S (); ROUND_STEP_S (16);
ROUND_EXPAND_S (); ROUND_STEP_S (32);
ROUND_EXPAND_S (); ROUND_STEP_S (48);
#else
#ifdef _unroll
#pragma unroll
#endif
@ -116,7 +111,6 @@ DECLSPEC void sha256_transform (const u32 *w0, const u32 *w1, const u32 *w2, con
{
ROUND_EXPAND_S (); ROUND_STEP_S (i);
}
#endif
#undef ROUND_EXPAND_S
#undef ROUND_STEP_S

@ -108,12 +108,6 @@ DECLSPEC void sha384_transform (const u32 *w0, const u32 *w1, const u32 *w2, con
ROUND_STEP_S (0);
#ifdef IS_CUDA
ROUND_EXPAND_S (); ROUND_STEP_S (16);
ROUND_EXPAND_S (); ROUND_STEP_S (32);
ROUND_EXPAND_S (); ROUND_STEP_S (48);
ROUND_EXPAND_S (); ROUND_STEP_S (64);
#else
#ifdef _unroll
#pragma unroll
#endif
@ -121,7 +115,6 @@ DECLSPEC void sha384_transform (const u32 *w0, const u32 *w1, const u32 *w2, con
{
ROUND_EXPAND_S (); ROUND_STEP_S (i);
}
#endif
#undef ROUND_EXPAND_S
#undef ROUND_STEP_S

@ -108,12 +108,6 @@ DECLSPEC void sha512_transform (const u32 *w0, const u32 *w1, const u32 *w2, con
ROUND_STEP_S (0);
#ifdef IS_CUDA
ROUND_EXPAND_S (); ROUND_STEP_S (16);
ROUND_EXPAND_S (); ROUND_STEP_S (32);
ROUND_EXPAND_S (); ROUND_STEP_S (48);
ROUND_EXPAND_S (); ROUND_STEP_S (64);
#else
#ifdef _unroll
#pragma unroll
#endif
@ -121,7 +115,6 @@ DECLSPEC void sha512_transform (const u32 *w0, const u32 *w1, const u32 *w2, con
{
ROUND_EXPAND_S (); ROUND_STEP_S (i);
}
#endif
#undef ROUND_EXPAND_S
#undef ROUND_STEP_S

@ -86,12 +86,6 @@ DECLSPEC void sha512_transform_intern (const u32x *w0, const u32x *w1, const u32
ROUND_STEP (0);
#ifdef IS_CUDA
ROUND_EXPAND (); ROUND_STEP (16);
ROUND_EXPAND (); ROUND_STEP (32);
ROUND_EXPAND (); ROUND_STEP (48);
ROUND_EXPAND (); ROUND_STEP (64);
#else
#ifdef _unroll
#pragma unroll
#endif
@ -99,7 +93,6 @@ DECLSPEC void sha512_transform_intern (const u32x *w0, const u32x *w1, const u32
{
ROUND_EXPAND (); ROUND_STEP (i);
}
#endif
/* rev
digest[0] += a;

@ -84,12 +84,6 @@ DECLSPEC void sha512_transform_intern (const u32x *w0, const u32x *w1, const u32
ROUND_STEP (0);
#ifdef IS_CUDA
ROUND_EXPAND (); ROUND_STEP (16);
ROUND_EXPAND (); ROUND_STEP (32);
ROUND_EXPAND (); ROUND_STEP (48);
ROUND_EXPAND (); ROUND_STEP (64);
#else
#ifdef _unroll
#pragma unroll
#endif
@ -97,7 +91,6 @@ DECLSPEC void sha512_transform_intern (const u32x *w0, const u32x *w1, const u32
{
ROUND_EXPAND (); ROUND_STEP (i);
}
#endif
/* rev
digest[0] += a;

@ -84,12 +84,6 @@ DECLSPEC void sha512_transform_intern (const u32x *w0, const u32x *w1, const u32
ROUND_STEP (0);
#ifdef IS_CUDA
ROUND_EXPAND (); ROUND_STEP (16);
ROUND_EXPAND (); ROUND_STEP (32);
ROUND_EXPAND (); ROUND_STEP (48);
ROUND_EXPAND (); ROUND_STEP (64);
#else
#ifdef _unroll
#pragma unroll
#endif
@ -97,7 +91,6 @@ DECLSPEC void sha512_transform_intern (const u32x *w0, const u32x *w1, const u32
{
ROUND_EXPAND (); ROUND_STEP (i);
}
#endif
/* rev
digest[0] += a;

Loading…
Cancel
Save