More preparation for OpenCL emulation mode

pull/1971/head
jsteube 5 years ago
parent adeeaee84a
commit a44bed7549

@ -6,16 +6,16 @@
#ifndef _BITOPS_H
#define _BITOPS_H
u32 hc_add3 (const u32 a, const u32 b, const u32 c);
u32 hc_add3_S (const u32 a, const u32 b, const u32 c);
u32 add3 (const u32 a, const u32 b, const u32 c);
u32 add3_S (const u32 a, const u32 b, const u32 c);
u32 rotl32 (const u32 a, const u32 n);
u32 rotr32 (const u32 a, const u32 n);
u64 rotl64 (const u64 a, const u64 n);
u64 rotr64 (const u64 a, const u64 n);
u32 rotl32 (const u32 a, const int n);
u32 rotr32 (const u32 a, const int n);
u64 rotl64 (const u64 a, const int n);
u64 rotr64 (const u64 a, const int n);
u16 byte_swap_16 (const u16 n);
u32 byte_swap_32 (const u32 n);
u64 byte_swap_64 (const u64 n);
u16 byte_swap_16 (const u16 n);
u32 byte_swap_32 (const u32 n);
u64 byte_swap_64 (const u64 n);
#endif // _BITOPS_H

@ -7,17 +7,17 @@
#include "types.h"
#include "bitops.h"
u32 hc_add3_S (const u32 a, const u32 b, const u32 c)
u32 add3_S (const u32 a, const u32 b, const u32 c)
{
return a + b + c;
}
u32 hc_add3 (const u32 a, const u32 b, const u32 c)
u32 add3 (const u32 a, const u32 b, const u32 c)
{
return a + b + c;
return add3_S (a, b, c);
}
u32 rotl32 (const u32 a, const u32 n)
u32 rotl32 (const u32 a, const int n)
{
#if defined (_MSC_VER)
return _rotl (a, n);
@ -26,7 +26,7 @@ u32 rotl32 (const u32 a, const u32 n)
#endif
}
u32 rotr32 (const u32 a, const u32 n)
u32 rotr32 (const u32 a, const int n)
{
#if defined (_MSC_VER)
return _rotr (a, n);
@ -35,7 +35,7 @@ u32 rotr32 (const u32 a, const u32 n)
#endif
}
u64 rotl64 (const u64 a, const u64 n)
u64 rotl64 (const u64 a, const int n)
{
#if defined (_MSC_VER)
return _rotl64 (a, n);
@ -44,7 +44,7 @@ u64 rotl64 (const u64 a, const u64 n)
#endif
}
u64 rotr64 (const u64 a, const u64 n)
u64 rotr64 (const u64 a, const int n)
{
#if defined (_MSC_VER)
return _rotr64 (a, n);

@ -16,20 +16,20 @@
#define MD4_STEP_S(f,a,b,c,d,x,K,s) \
{ \
a += K; \
a = hc_add3_S (a, x, f (b, c, d)); \
a = add3_S (a, x, f (b, c, d)); \
a = rotl32_S (a, s); \
}
#define MD4_STEP(f,a,b,c,d,x,K,s) \
{ \
a += K; \
a = hc_add3 (a, x, f (b, c, d)); \
a = add3 (a, x, f (b, c, d)); \
a = rotl32 (a, s); \
}
#define MD4_STEP0(f,a,b,c,d,K,s) \
{ \
a = hc_add3 (a, K, f (b, c, d)); \
a = add3 (a, K, f (b, c, d)); \
a = rotl32 (a, s); \
}

@ -24,7 +24,7 @@
#define MD5_STEP_S(f,a,b,c,d,x,K,s) \
{ \
a += K; \
a = hc_add3_S (a, x, f (b, c, d)); \
a = add3_S (a, x, f (b, c, d)); \
a = rotl32_S (a, s); \
a += b; \
}
@ -32,14 +32,14 @@
#define MD5_STEP(f,a,b,c,d,x,K,s) \
{ \
a += K; \
a = hc_add3 (a, x, f (b, c, d)); \
a = add3 (a, x, f (b, c, d)); \
a = rotl32 (a, s); \
a += b; \
}
#define MD5_STEP0(f,a,b,c,d,K,s) \
{ \
a = hc_add3 (a, K, f (b, c, d)); \
a = add3 (a, K, f (b, c, d)); \
a = rotl32 (a, s); \
a += b; \
}

@ -16,7 +16,7 @@
#define SHA1_STEP_S(f,a,b,c,d,e,x) \
{ \
e += K; \
e = hc_add3_S (e, x, f (b, c, d)); \
e = add3_S (e, x, f (b, c, d)); \
e += rotl32_S (a, 5u); \
b = rotl32_S (b, 30u); \
}
@ -24,14 +24,14 @@
#define SHA1_STEP(f,a,b,c,d,e,x) \
{ \
e += K; \
e = hc_add3 (e, x, f (b, c, d)); \
e = add3 (e, x, f (b, c, d)); \
e += rotl32 (a, 5u); \
b = rotl32 (b, 30u); \
}
#define SHA1_STEPX(f,a,b,c,d,e,x) \
{ \
e = hc_add3 (e, x, f (b, c, d)); \
e = add3 (e, x, f (b, c, d)); \
e += rotl32 (a, 5u); \
b = rotl32 (b, 30u); \
}

@ -26,20 +26,20 @@
#define SHA256_STEP_S(F0,F1,a,b,c,d,e,f,g,h,x,K) \
{ \
h = hc_add3_S (h, K, x); \
h = hc_add3_S (h, SHA256_S3_S (e), F1 (e,f,g)); \
h = add3_S (h, K, x); \
h = add3_S (h, SHA256_S3_S (e), F1 (e,f,g)); \
d += h; \
h = hc_add3_S (h, SHA256_S2_S (a), F0 (a,b,c)); \
h = add3_S (h, SHA256_S2_S (a), F0 (a,b,c)); \
}
#define SHA256_EXPAND_S(x,y,z,w) (SHA256_S1_S (x) + y + SHA256_S0_S (z) + w)
#define SHA256_STEP(F0,F1,a,b,c,d,e,f,g,h,x,K) \
{ \
h = hc_add3 (h, K, x); \
h = hc_add3 (h, SHA256_S3 (e), F1 (e,f,g)); \
h = add3 (h, K, x); \
h = add3 (h, SHA256_S3 (e), F1 (e,f,g)); \
d += h; \
h = hc_add3 (h, SHA256_S2 (a), F0 (a,b,c)); \
h = add3 (h, SHA256_S2 (a), F0 (a,b,c)); \
}
#define SHA256_EXPAND(x,y,z,w) (SHA256_S1 (x) + y + SHA256_S0 (z) + w)

Loading…
Cancel
Save