Get rid of some old volatiles

pull/1518/head
jsteube 6 years ago
parent fa379074b6
commit 5951207365

@ -1287,7 +1287,7 @@ DECLSPEC void switch_buffer_by_offset_le (u32x w0[4], u32x w1[4], u32x w2[4], u3
const int offset_minus_4 = 4 - offset_mod_4;
#ifdef IS_AMD
volatile const int offset_switch = offset / 4;
const int offset_switch = offset / 4;
#else
const int offset_switch = offset / 4;
#endif
@ -1994,7 +1994,7 @@ DECLSPEC void switch_buffer_by_offset_carry_le (u32x w0[4], u32x w1[4], u32x w2[
const int offset_minus_4 = 4 - offset_mod_4;
#ifdef IS_AMD
volatile const int offset_switch = offset / 4;
const int offset_switch = offset / 4;
#else
const int offset_switch = offset / 4;
#endif
@ -3312,7 +3312,7 @@ DECLSPEC void switch_buffer_by_offset_carry_le (u32x w0[4], u32x w1[4], u32x w2[
DECLSPEC void switch_buffer_by_offset_be (u32x w0[4], u32x w1[4], u32x w2[4], u32x w3[4], const u32 offset)
{
#ifdef IS_AMD
volatile const int offset_switch = offset / 4;
const int offset_switch = offset / 4;
#else
const int offset_switch = offset / 4;
#endif
@ -3983,7 +3983,7 @@ DECLSPEC void switch_buffer_by_offset_be (u32x w0[4], u32x w1[4], u32x w2[4], u3
DECLSPEC void switch_buffer_by_offset_carry_be (u32x w0[4], u32x w1[4], u32x w2[4], u32x w3[4], u32x c0[4], u32x c1[4], u32x c2[4], u32x c3[4], const u32 offset)
{
#ifdef IS_AMD
volatile const int offset_switch = offset / 4;
const int offset_switch = offset / 4;
#else
const int offset_switch = offset / 4;
#endif
@ -4927,7 +4927,7 @@ DECLSPEC void switch_buffer_by_offset_8x4_le (u32x w0[4], u32x w1[4], u32x w2[4]
const int offset_minus_4 = 4 - offset_mod_4;
#ifdef IS_AMD
volatile const int offset_switch = offset / 4;
const int offset_switch = offset / 4;
#else
const int offset_switch = offset / 4;
#endif
@ -6733,7 +6733,7 @@ DECLSPEC void switch_buffer_by_offset_8x4_le (u32x w0[4], u32x w1[4], u32x w2[4]
DECLSPEC void switch_buffer_by_offset_8x4_be (u32x w0[4], u32x w1[4], u32x w2[4], u32x w3[4], u32x w4[4], u32x w5[4], u32x w6[4], u32x w7[4], const u32 offset)
{
#ifdef IS_AMD
volatile const int offset_switch = offset / 4;
const int offset_switch = offset / 4;
#else
const int offset_switch = offset / 4;
#endif
@ -9065,7 +9065,7 @@ DECLSPEC void switch_buffer_by_offset_8x4_be (u32x w0[4], u32x w1[4], u32x w2[4]
DECLSPEC void switch_buffer_by_offset_8x4_carry_be (u32x w0[4], u32x w1[4], u32x w2[4], u32x w3[4], u32x w4[4], u32x w5[4], u32x w6[4], u32x w7[4], u32x c0[4], u32x c1[4], u32x c2[4], u32x c3[4], u32x c4[4], u32x c5[4], u32x c6[4], u32x c7[4], const u32 offset)
{
#ifdef IS_AMD
volatile const int offset_switch = offset / 4;
const int offset_switch = offset / 4;
#else
const int offset_switch = offset / 4;
#endif
@ -12457,7 +12457,7 @@ DECLSPEC void switch_buffer_by_offset_1x64_le (u32x w[64], const u32 offset)
const int offset_minus_4 = 4 - offset_mod_4;
#ifdef IS_AMD
volatile const int offset_switch = offset / 4;
const int offset_switch = offset / 4;
#else
const int offset_switch = offset / 4;
#endif
@ -21197,7 +21197,7 @@ DECLSPEC void switch_buffer_by_offset_1x64_le (u32x w[64], const u32 offset)
DECLSPEC void switch_buffer_by_offset_1x64_be (u32x w[64], const u32 offset)
{
#ifdef IS_AMD
volatile const int offset_switch = offset / 4;
const int offset_switch = offset / 4;
#else
const int offset_switch = offset / 4;
#endif
@ -32491,7 +32491,7 @@ DECLSPEC void switch_buffer_by_offset_le_S (u32 w0[4], u32 w1[4], u32 w2[4], u32
const int offset_minus_4 = 4 - offset_mod_4;
#ifdef IS_AMD
volatile const int offset_switch = offset / 4;
const int offset_switch = offset / 4;
#else
const int offset_switch = offset / 4;
#endif
@ -33197,7 +33197,7 @@ DECLSPEC void switch_buffer_by_offset_carry_le_S (u32 w0[4], u32 w1[4], u32 w2[4
const int offset_minus_4 = 4 - offset_mod_4;
#ifdef IS_AMD
volatile const int offset_switch = offset / 4;
const int offset_switch = offset / 4;
#else
const int offset_switch = offset / 4;
#endif
@ -34515,7 +34515,7 @@ DECLSPEC void switch_buffer_by_offset_carry_le_S (u32 w0[4], u32 w1[4], u32 w2[4
DECLSPEC void switch_buffer_by_offset_be_S (u32 w0[4], u32 w1[4], u32 w2[4], u32 w3[4], const u32 offset)
{
#ifdef IS_AMD
volatile const int offset_switch = offset / 4;
const int offset_switch = offset / 4;
#else
const int offset_switch = offset / 4;
#endif
@ -35183,7 +35183,7 @@ DECLSPEC void switch_buffer_by_offset_be_S (u32 w0[4], u32 w1[4], u32 w2[4], u32
DECLSPEC void switch_buffer_by_offset_carry_be_S (u32 w0[4], u32 w1[4], u32 w2[4], u32 w3[4], u32 c0[4], u32 c1[4], u32 c2[4], u32 c3[4], const u32 offset)
{
#ifdef IS_AMD
volatile const int offset_switch = offset / 4;
const int offset_switch = offset / 4;
#else
const int offset_switch = offset / 4;
#endif
@ -36127,7 +36127,7 @@ DECLSPEC void switch_buffer_by_offset_8x4_le_S (u32 w0[4], u32 w1[4], u32 w2[4],
const int offset_minus_4 = 4 - offset_mod_4;
#ifdef IS_AMD
volatile const int offset_switch = offset / 4;
const int offset_switch = offset / 4;
#else
const int offset_switch = offset / 4;
#endif
@ -37933,7 +37933,7 @@ DECLSPEC void switch_buffer_by_offset_8x4_le_S (u32 w0[4], u32 w1[4], u32 w2[4],
DECLSPEC void switch_buffer_by_offset_8x4_be_S (u32 w0[4], u32 w1[4], u32 w2[4], u32 w3[4], u32 w4[4], u32 w5[4], u32 w6[4], u32 w7[4], const u32 offset)
{
#ifdef IS_AMD
volatile const int offset_switch = offset / 4;
const int offset_switch = offset / 4;
#else
const int offset_switch = offset / 4;
#endif
@ -40265,7 +40265,7 @@ DECLSPEC void switch_buffer_by_offset_8x4_be_S (u32 w0[4], u32 w1[4], u32 w2[4],
DECLSPEC void switch_buffer_by_offset_8x4_carry_be_S (u32 w0[4], u32 w1[4], u32 w2[4], u32 w3[4], u32 w4[4], u32 w5[4], u32 w6[4], u32 w7[4], u32 c0[4], u32 c1[4], u32 c2[4], u32 c3[4], u32 c4[4], u32 c5[4], u32 c6[4], u32 c7[4], const u32 offset)
{
#ifdef IS_AMD
volatile const int offset_switch = offset / 4;
const int offset_switch = offset / 4;
#else
const int offset_switch = offset / 4;
#endif
@ -43657,7 +43657,7 @@ DECLSPEC void switch_buffer_by_offset_1x64_le_S (u32 w[64], const u32 offset)
const int offset_minus_4 = 4 - offset_mod_4;
#ifdef IS_AMD
volatile const int offset_switch = offset / 4;
const int offset_switch = offset / 4;
#else
const int offset_switch = offset / 4;
#endif
@ -52397,7 +52397,7 @@ DECLSPEC void switch_buffer_by_offset_1x64_le_S (u32 w[64], const u32 offset)
DECLSPEC void switch_buffer_by_offset_1x64_be_S (u32 w[64], const u32 offset)
{
#ifdef IS_AMD
volatile const int offset_switch = offset / 4;
const int offset_switch = offset / 4;
#else
const int offset_switch = offset / 4;
#endif

@ -111,7 +111,7 @@ DECLSPEC void md4_init (md4_ctx_t *ctx)
DECLSPEC void md4_update_64 (md4_ctx_t *ctx, u32 w0[4], u32 w1[4], u32 w2[4], u32 w3[4], const int len)
{
#ifdef IS_AMD
volatile const int pos = ctx->len & 63;
const int pos = ctx->len & 63;
#else
const int pos = ctx->len & 63;
#endif
@ -1235,7 +1235,7 @@ DECLSPEC void md4_init_vector_from_scalar (md4_ctx_vector_t *ctx, md4_ctx_t *ctx
DECLSPEC void md4_update_vector_64 (md4_ctx_vector_t *ctx, u32x w0[4], u32x w1[4], u32x w2[4], u32x w3[4], const int len)
{
#ifdef IS_AMD
volatile const int pos = ctx->len & 63;
const int pos = ctx->len & 63;
#else
const int pos = ctx->len & 63;
#endif

@ -147,7 +147,7 @@ DECLSPEC void md5_init (md5_ctx_t *ctx)
DECLSPEC void md5_update_64 (md5_ctx_t *ctx, u32 w0[4], u32 w1[4], u32 w2[4], u32 w3[4], const int len)
{
#ifdef IS_AMD
volatile const int pos = ctx->len & 63;
const int pos = ctx->len & 63;
#else
const int pos = ctx->len & 63;
#endif
@ -1307,7 +1307,7 @@ DECLSPEC void md5_init_vector_from_scalar (md5_ctx_vector_t *ctx, md5_ctx_t *ctx
DECLSPEC void md5_update_vector_64 (md5_ctx_vector_t *ctx, u32x w0[4], u32x w1[4], u32x w2[4], u32x w3[4], const int len)
{
#ifdef IS_AMD
volatile const int pos = ctx->len & 63;
const int pos = ctx->len & 63;
#else
const int pos = ctx->len & 63;
#endif

@ -245,7 +245,7 @@ DECLSPEC void ripemd160_init (ripemd160_ctx_t *ctx)
DECLSPEC void ripemd160_update_64 (ripemd160_ctx_t *ctx, u32 w0[4], u32 w1[4], u32 w2[4], u32 w3[4], const int len)
{
#ifdef IS_AMD
volatile const int pos = ctx->len & 63;
const int pos = ctx->len & 63;
#else
const int pos = ctx->len & 63;
#endif
@ -1504,7 +1504,7 @@ DECLSPEC void ripemd160_init_vector_from_scalar (ripemd160_ctx_vector_t *ctx, ri
DECLSPEC void ripemd160_update_vector_64 (ripemd160_ctx_vector_t *ctx, u32x w0[4], u32x w1[4], u32x w2[4], u32x w3[4], const int len)
{
#ifdef IS_AMD
volatile const int pos = ctx->len & 63;
const int pos = ctx->len & 63;
#else
const int pos = ctx->len & 63;
#endif

@ -177,7 +177,7 @@ DECLSPEC void sha1_init (sha1_ctx_t *ctx)
DECLSPEC void sha1_update_64 (sha1_ctx_t *ctx, u32 w0[4], u32 w1[4], u32 w2[4], u32 w3[4], const int len)
{
#ifdef IS_AMD
volatile const int pos = ctx->len & 63;
const int pos = ctx->len & 63;
#else
const int pos = ctx->len & 63;
#endif
@ -1604,7 +1604,7 @@ DECLSPEC void sha1_init_vector_from_scalar (sha1_ctx_vector_t *ctx, sha1_ctx_t *
DECLSPEC void sha1_update_vector_64 (sha1_ctx_vector_t *ctx, u32x w0[4], u32x w1[4], u32x w2[4], u32x w3[4], const int len)
{
#ifdef IS_AMD
volatile const int pos = ctx->len & 63;
const int pos = ctx->len & 63;
#else
const int pos = ctx->len & 63;
#endif

@ -162,7 +162,7 @@ DECLSPEC void sha224_init (sha224_ctx_t *ctx)
DECLSPEC void sha224_update_64 (sha224_ctx_t *ctx, u32 w0[4], u32 w1[4], u32 w2[4], u32 w3[4], const int len)
{
#ifdef IS_AMD
volatile const int pos = ctx->len & 63;
const int pos = ctx->len & 63;
#else
const int pos = ctx->len & 63;
#endif
@ -1321,7 +1321,7 @@ DECLSPEC void sha224_init_vector_from_scalar (sha224_ctx_vector_t *ctx, sha224_c
DECLSPEC void sha224_update_vector_64 (sha224_ctx_vector_t *ctx, u32x w0[4], u32x w1[4], u32x w2[4], u32x w3[4], const int len)
{
#ifdef IS_AMD
volatile const int pos = ctx->len & 63;
const int pos = ctx->len & 63;
#else
const int pos = ctx->len & 63;
#endif

@ -162,7 +162,7 @@ DECLSPEC void sha256_init (sha256_ctx_t *ctx)
DECLSPEC void sha256_update_64 (sha256_ctx_t *ctx, u32 w0[4], u32 w1[4], u32 w2[4], u32 w3[4], const int len)
{
#ifdef IS_AMD
volatile const int pos = ctx->len & 63;
const int pos = ctx->len & 63;
#else
const int pos = ctx->len & 63;
#endif
@ -1321,7 +1321,7 @@ DECLSPEC void sha256_init_vector_from_scalar (sha256_ctx_vector_t *ctx, sha256_c
DECLSPEC void sha256_update_vector_64 (sha256_ctx_vector_t *ctx, u32x w0[4], u32x w1[4], u32x w2[4], u32x w3[4], const int len)
{
#ifdef IS_AMD
volatile const int pos = ctx->len & 63;
const int pos = ctx->len & 63;
#else
const int pos = ctx->len & 63;
#endif

@ -186,7 +186,7 @@ DECLSPEC void sha384_init (sha384_ctx_t *ctx)
DECLSPEC void sha384_update_128 (sha384_ctx_t *ctx, u32 w0[4], u32 w1[4], u32 w2[4], u32 w3[4], u32 w4[4], u32 w5[4], u32 w6[4], u32 w7[4], const int len)
{
#ifdef IS_AMD
volatile const int pos = ctx->len & 127;
const int pos = ctx->len & 127;
#else
const int pos = ctx->len & 127;
#endif
@ -2017,7 +2017,7 @@ DECLSPEC void sha384_init_vector_from_scalar (sha384_ctx_vector_t *ctx, sha384_c
DECLSPEC void sha384_update_vector_128 (sha384_ctx_vector_t *ctx, u32x w0[4], u32x w1[4], u32x w2[4], u32x w3[4], u32x w4[4], u32x w5[4], u32x w6[4], u32x w7[4], const int len)
{
#ifdef IS_AMD
volatile const int pos = ctx->len & 127;
const int pos = ctx->len & 127;
#else
const int pos = ctx->len & 127;
#endif

@ -186,7 +186,7 @@ DECLSPEC void sha512_init (sha512_ctx_t *ctx)
DECLSPEC void sha512_update_128 (sha512_ctx_t *ctx, u32 w0[4], u32 w1[4], u32 w2[4], u32 w3[4], u32 w4[4], u32 w5[4], u32 w6[4], u32 w7[4], const int len)
{
#ifdef IS_AMD
volatile const int pos = ctx->len & 127;
const int pos = ctx->len & 127;
#else
const int pos = ctx->len & 127;
#endif
@ -2017,7 +2017,7 @@ DECLSPEC void sha512_init_vector_from_scalar (sha512_ctx_vector_t *ctx, sha512_c
DECLSPEC void sha512_update_vector_128 (sha512_ctx_vector_t *ctx, u32x w0[4], u32x w1[4], u32x w2[4], u32x w3[4], u32x w4[4], u32x w5[4], u32x w6[4], u32x w7[4], const int len)
{
#ifdef IS_AMD
volatile const int pos = ctx->len & 127;
const int pos = ctx->len & 127;
#else
const int pos = ctx->len & 127;
#endif

@ -1345,7 +1345,7 @@ DECLSPEC void whirlpool_init (whirlpool_ctx_t *ctx, SHM_TYPE u32 (*s_Ch)[256], S
DECLSPEC void whirlpool_update_64 (whirlpool_ctx_t *ctx, u32 w0[4], u32 w1[4], u32 w2[4], u32 w3[4], const int len)
{
#ifdef IS_AMD
volatile const int pos = ctx->len & 63;
const int pos = ctx->len & 63;
#else
const int pos = ctx->len & 63;
#endif
@ -2608,7 +2608,7 @@ DECLSPEC void whirlpool_init_vector_from_scalar (whirlpool_ctx_vector_t *ctx, wh
DECLSPEC void whirlpool_update_vector_64 (whirlpool_ctx_vector_t *ctx, u32x w0[4], u32x w1[4], u32x w2[4], u32x w3[4], const int len)
{
#ifdef IS_AMD
volatile const int pos = ctx->len & 63;
const int pos = ctx->len & 63;
#else
const int pos = ctx->len & 63;
#endif

@ -2758,7 +2758,7 @@ DECLSPEC void luks_af_sha1_then_aes_decrypt (__global const luks_t *luks_bufs, _
if (key_size == HC_LUKS_KEY_SIZE_128)
{
#if defined (IS_APPLE) && defined (IS_GPU)
volatile u32 ukey[4];
u32 ukey[4];
#else
u32 ukey[4];
#endif
@ -2783,7 +2783,7 @@ DECLSPEC void luks_af_sha1_then_aes_decrypt (__global const luks_t *luks_bufs, _
else if (key_size == HC_LUKS_KEY_SIZE_256)
{
#if defined (IS_APPLE) && defined (IS_GPU)
volatile u32 ukey[8];
u32 ukey[8];
#else
u32 ukey[8];
#endif
@ -2815,7 +2815,7 @@ DECLSPEC void luks_af_sha1_then_aes_decrypt (__global const luks_t *luks_bufs, _
if (key_size == HC_LUKS_KEY_SIZE_128)
{
#if defined (IS_APPLE) && defined (IS_GPU)
volatile u32 ukey[4];
u32 ukey[4];
#else
u32 ukey[4];
#endif
@ -2834,7 +2834,7 @@ DECLSPEC void luks_af_sha1_then_aes_decrypt (__global const luks_t *luks_bufs, _
else if (key_size == HC_LUKS_KEY_SIZE_256)
{
#if defined (IS_APPLE) && defined (IS_GPU)
volatile u32 ukey[8];
u32 ukey[8];
#else
u32 ukey[8];
#endif
@ -2860,7 +2860,7 @@ DECLSPEC void luks_af_sha1_then_aes_decrypt (__global const luks_t *luks_bufs, _
if (key_size == HC_LUKS_KEY_SIZE_256)
{
#if defined (IS_APPLE) && defined (IS_GPU)
volatile u32 ukey1[4];
u32 ukey1[4];
#else
u32 ukey1[4];
#endif
@ -2871,7 +2871,7 @@ DECLSPEC void luks_af_sha1_then_aes_decrypt (__global const luks_t *luks_bufs, _
ukey1[3] = mk[3];
#if defined (IS_APPLE) && defined (IS_GPU)
volatile u32 ukey2[4];
u32 ukey2[4];
#else
u32 ukey2[4];
#endif
@ -2892,7 +2892,7 @@ DECLSPEC void luks_af_sha1_then_aes_decrypt (__global const luks_t *luks_bufs, _
else if (key_size == HC_LUKS_KEY_SIZE_512)
{
#if defined (IS_APPLE) && defined (IS_GPU)
volatile u32 ukey1[8];
u32 ukey1[8];
#else
u32 ukey1[8];
#endif
@ -2907,7 +2907,7 @@ DECLSPEC void luks_af_sha1_then_aes_decrypt (__global const luks_t *luks_bufs, _
ukey1[7] = mk[ 7];
#if defined (IS_APPLE) && defined (IS_GPU)
volatile u32 ukey2[8];
u32 ukey2[8];
#else
u32 ukey2[8];
#endif

@ -31,7 +31,7 @@ DECLSPEC void serpent256_decrypt_cbc (const u32 *ks1, const u32 *in, u32 *out, u
essiv[3] = in[3];
}
DECLSPEC void luks_decrypt_sector_serpent_cbc_essiv128 (__global const u32 *in, u32 *out, const u32 *ks1, const u32 *ks2, volatile const u32 sector)
DECLSPEC void luks_decrypt_sector_serpent_cbc_essiv128 (__global const u32 *in, u32 *out, const u32 *ks1, const u32 *ks2, const u32 sector)
{
u32 S[4] = { sector, 0, 0, 0 };
@ -65,7 +65,7 @@ DECLSPEC void luks_decrypt_sector_serpent_cbc_essiv128 (__global const u32 *in,
}
}
DECLSPEC void luks_decrypt_sector_serpent_cbc_essiv128_mk_sha1 (__global const u32 *in, u32 *mk, const u32 *ks1, const u32 *ks2, volatile const u32 sector)
DECLSPEC void luks_decrypt_sector_serpent_cbc_essiv128_mk_sha1 (__global const u32 *in, u32 *mk, const u32 *ks1, const u32 *ks2, const u32 sector)
{
u32 S[4] = { sector, 0, 0, 0 };
@ -102,7 +102,7 @@ DECLSPEC void luks_decrypt_sector_serpent_cbc_essiv128_mk_sha1 (__global const u
}
}
DECLSPEC void luks_decrypt_sector_serpent_cbc_essiv128_mk_sha1_final (__global const u32 *in, u32 *mk, const u32 *ks1, const u32 *ks2, volatile const u32 sector)
DECLSPEC void luks_decrypt_sector_serpent_cbc_essiv128_mk_sha1_final (__global const u32 *in, u32 *mk, const u32 *ks1, const u32 *ks2, const u32 sector)
{
u32 S[4] = { sector, 0, 0, 0 };
@ -162,7 +162,7 @@ DECLSPEC void luks_decrypt_sector_serpent_cbc_essiv128_mk_sha1_final (__global c
}
}
DECLSPEC void luks_decrypt_sector_serpent_cbc_essiv128_mk_sha256 (__global const u32 *in, u32 *mk, const u32 *ks1, const u32 *ks2, volatile const u32 sector)
DECLSPEC void luks_decrypt_sector_serpent_cbc_essiv128_mk_sha256 (__global const u32 *in, u32 *mk, const u32 *ks1, const u32 *ks2, const u32 sector)
{
u32 S[4] = { sector, 0, 0, 0 };
@ -199,7 +199,7 @@ DECLSPEC void luks_decrypt_sector_serpent_cbc_essiv128_mk_sha256 (__global const
}
}
DECLSPEC void luks_decrypt_sector_serpent_cbc_essiv128_mk_sha256_final (__global const u32 *in, u32 *mk, const u32 *ks1, const u32 *ks2, volatile const u32 sector)
DECLSPEC void luks_decrypt_sector_serpent_cbc_essiv128_mk_sha256_final (__global const u32 *in, u32 *mk, const u32 *ks1, const u32 *ks2, const u32 sector)
{
u32 S[4] = { sector, 0, 0, 0 };
@ -259,7 +259,7 @@ DECLSPEC void luks_decrypt_sector_serpent_cbc_essiv128_mk_sha256_final (__global
}
}
DECLSPEC void luks_decrypt_sector_serpent_cbc_essiv128_mk_sha512 (__global const u32 *in, u32 *mk, const u32 *ks1, const u32 *ks2, volatile const u32 sector)
DECLSPEC void luks_decrypt_sector_serpent_cbc_essiv128_mk_sha512 (__global const u32 *in, u32 *mk, const u32 *ks1, const u32 *ks2, const u32 sector)
{
u32 S[4] = { sector, 0, 0, 0 };
@ -296,7 +296,7 @@ DECLSPEC void luks_decrypt_sector_serpent_cbc_essiv128_mk_sha512 (__global const
}
}
DECLSPEC void luks_decrypt_sector_serpent_cbc_essiv128_mk_sha512_final (__global const u32 *in, u32 *mk, const u32 *ks1, const u32 *ks2, volatile const u32 sector)
DECLSPEC void luks_decrypt_sector_serpent_cbc_essiv128_mk_sha512_final (__global const u32 *in, u32 *mk, const u32 *ks1, const u32 *ks2, const u32 sector)
{
u32 S[4] = { sector, 0, 0, 0 };
@ -356,7 +356,7 @@ DECLSPEC void luks_decrypt_sector_serpent_cbc_essiv128_mk_sha512_final (__global
}
}
DECLSPEC void luks_decrypt_sector_serpent_cbc_essiv128_mk_ripemd160 (__global const u32 *in, u32 *mk, const u32 *ks1, const u32 *ks2, volatile const u32 sector)
DECLSPEC void luks_decrypt_sector_serpent_cbc_essiv128_mk_ripemd160 (__global const u32 *in, u32 *mk, const u32 *ks1, const u32 *ks2, const u32 sector)
{
u32 S[4] = { sector, 0, 0, 0 };
@ -393,7 +393,7 @@ DECLSPEC void luks_decrypt_sector_serpent_cbc_essiv128_mk_ripemd160 (__global co
}
}
DECLSPEC void luks_decrypt_sector_serpent_cbc_essiv128_mk_ripemd160_final (__global const u32 *in, u32 *mk, const u32 *ks1, const u32 *ks2, volatile const u32 sector)
DECLSPEC void luks_decrypt_sector_serpent_cbc_essiv128_mk_ripemd160_final (__global const u32 *in, u32 *mk, const u32 *ks1, const u32 *ks2, const u32 sector)
{
u32 S[4] = { sector, 0, 0, 0 };
@ -453,7 +453,7 @@ DECLSPEC void luks_decrypt_sector_serpent_cbc_essiv128_mk_ripemd160_final (__glo
}
}
DECLSPEC void luks_decrypt_sector_serpent_cbc_essiv256 (__global const u32 *in, u32 *out, const u32 *ks1, const u32 *ks2, volatile const u32 sector)
DECLSPEC void luks_decrypt_sector_serpent_cbc_essiv256 (__global const u32 *in, u32 *out, const u32 *ks1, const u32 *ks2, const u32 sector)
{
u32 S[4] = { sector, 0, 0, 0 };
@ -487,7 +487,7 @@ DECLSPEC void luks_decrypt_sector_serpent_cbc_essiv256 (__global const u32 *in,
}
}
DECLSPEC void luks_decrypt_sector_serpent_cbc_essiv256_mk_sha1 (__global const u32 *in, u32 *mk, const u32 *ks1, const u32 *ks2, volatile const u32 sector)
DECLSPEC void luks_decrypt_sector_serpent_cbc_essiv256_mk_sha1 (__global const u32 *in, u32 *mk, const u32 *ks1, const u32 *ks2, const u32 sector)
{
u32 S[4] = { sector, 0, 0, 0 };
@ -524,7 +524,7 @@ DECLSPEC void luks_decrypt_sector_serpent_cbc_essiv256_mk_sha1 (__global const u
}
}
DECLSPEC void luks_decrypt_sector_serpent_cbc_essiv256_mk_sha1_final (__global const u32 *in, u32 *mk, const u32 *ks1, const u32 *ks2, volatile const u32 sector)
DECLSPEC void luks_decrypt_sector_serpent_cbc_essiv256_mk_sha1_final (__global const u32 *in, u32 *mk, const u32 *ks1, const u32 *ks2, const u32 sector)
{
u32 S[4] = { sector, 0, 0, 0 };
@ -584,7 +584,7 @@ DECLSPEC void luks_decrypt_sector_serpent_cbc_essiv256_mk_sha1_final (__global c
}
}
DECLSPEC void luks_decrypt_sector_serpent_cbc_essiv256_mk_sha256 (__global const u32 *in, u32 *mk, const u32 *ks1, const u32 *ks2, volatile const u32 sector)
DECLSPEC void luks_decrypt_sector_serpent_cbc_essiv256_mk_sha256 (__global const u32 *in, u32 *mk, const u32 *ks1, const u32 *ks2, const u32 sector)
{
u32 S[4] = { sector, 0, 0, 0 };
@ -621,7 +621,7 @@ DECLSPEC void luks_decrypt_sector_serpent_cbc_essiv256_mk_sha256 (__global const
}
}
DECLSPEC void luks_decrypt_sector_serpent_cbc_essiv256_mk_sha256_final (__global const u32 *in, u32 *mk, const u32 *ks1, const u32 *ks2, volatile const u32 sector)
DECLSPEC void luks_decrypt_sector_serpent_cbc_essiv256_mk_sha256_final (__global const u32 *in, u32 *mk, const u32 *ks1, const u32 *ks2, const u32 sector)
{
u32 S[4] = { sector, 0, 0, 0 };
@ -681,7 +681,7 @@ DECLSPEC void luks_decrypt_sector_serpent_cbc_essiv256_mk_sha256_final (__global
}
}
DECLSPEC void luks_decrypt_sector_serpent_cbc_essiv256_mk_sha512 (__global const u32 *in, u32 *mk, const u32 *ks1, const u32 *ks2, volatile const u32 sector)
DECLSPEC void luks_decrypt_sector_serpent_cbc_essiv256_mk_sha512 (__global const u32 *in, u32 *mk, const u32 *ks1, const u32 *ks2, const u32 sector)
{
u32 S[4] = { sector, 0, 0, 0 };
@ -718,7 +718,7 @@ DECLSPEC void luks_decrypt_sector_serpent_cbc_essiv256_mk_sha512 (__global const
}
}
DECLSPEC void luks_decrypt_sector_serpent_cbc_essiv256_mk_sha512_final (__global const u32 *in, u32 *mk, const u32 *ks1, const u32 *ks2, volatile const u32 sector)
DECLSPEC void luks_decrypt_sector_serpent_cbc_essiv256_mk_sha512_final (__global const u32 *in, u32 *mk, const u32 *ks1, const u32 *ks2, const u32 sector)
{
u32 S[4] = { sector, 0, 0, 0 };
@ -778,7 +778,7 @@ DECLSPEC void luks_decrypt_sector_serpent_cbc_essiv256_mk_sha512_final (__global
}
}
DECLSPEC void luks_decrypt_sector_serpent_cbc_essiv256_mk_ripemd160 (__global const u32 *in, u32 *mk, const u32 *ks1, const u32 *ks2, volatile const u32 sector)
DECLSPEC void luks_decrypt_sector_serpent_cbc_essiv256_mk_ripemd160 (__global const u32 *in, u32 *mk, const u32 *ks1, const u32 *ks2, const u32 sector)
{
u32 S[4] = { sector, 0, 0, 0 };
@ -815,7 +815,7 @@ DECLSPEC void luks_decrypt_sector_serpent_cbc_essiv256_mk_ripemd160 (__global co
}
}
DECLSPEC void luks_decrypt_sector_serpent_cbc_essiv256_mk_ripemd160_final (__global const u32 *in, u32 *mk, const u32 *ks1, const u32 *ks2, volatile const u32 sector)
DECLSPEC void luks_decrypt_sector_serpent_cbc_essiv256_mk_ripemd160_final (__global const u32 *in, u32 *mk, const u32 *ks1, const u32 *ks2, const u32 sector)
{
u32 S[4] = { sector, 0, 0, 0 };
@ -877,7 +877,7 @@ DECLSPEC void luks_decrypt_sector_serpent_cbc_essiv256_mk_ripemd160_final (__glo
// cbc-plain
DECLSPEC void luks_decrypt_sector_serpent_cbc_plain128 (__global const u32 *in, u32 *out, const u32 *ks1, volatile const u32 sector)
DECLSPEC void luks_decrypt_sector_serpent_cbc_plain128 (__global const u32 *in, u32 *out, const u32 *ks1, const u32 sector)
{
u32 S[4] = { sector, 0, 0, 0 };
@ -907,7 +907,7 @@ DECLSPEC void luks_decrypt_sector_serpent_cbc_plain128 (__global const u32 *in,
}
}
DECLSPEC void luks_decrypt_sector_serpent_cbc_plain128_mk_sha1 (__global const u32 *in, u32 *mk, const u32 *ks1, volatile const u32 sector)
DECLSPEC void luks_decrypt_sector_serpent_cbc_plain128_mk_sha1 (__global const u32 *in, u32 *mk, const u32 *ks1, const u32 sector)
{
u32 S[4] = { sector, 0, 0, 0 };
@ -940,7 +940,7 @@ DECLSPEC void luks_decrypt_sector_serpent_cbc_plain128_mk_sha1 (__global const u
}
}
DECLSPEC void luks_decrypt_sector_serpent_cbc_plain128_mk_sha1_final (__global const u32 *in, u32 *mk, const u32 *ks1, volatile const u32 sector)
DECLSPEC void luks_decrypt_sector_serpent_cbc_plain128_mk_sha1_final (__global const u32 *in, u32 *mk, const u32 *ks1, const u32 sector)
{
u32 S[4] = { sector, 0, 0, 0 };
@ -996,7 +996,7 @@ DECLSPEC void luks_decrypt_sector_serpent_cbc_plain128_mk_sha1_final (__global c
}
}
DECLSPEC void luks_decrypt_sector_serpent_cbc_plain128_mk_sha256 (__global const u32 *in, u32 *mk, const u32 *ks1, volatile const u32 sector)
DECLSPEC void luks_decrypt_sector_serpent_cbc_plain128_mk_sha256 (__global const u32 *in, u32 *mk, const u32 *ks1, const u32 sector)
{
u32 S[4] = { sector, 0, 0, 0 };
@ -1029,7 +1029,7 @@ DECLSPEC void luks_decrypt_sector_serpent_cbc_plain128_mk_sha256 (__global const
}
}
DECLSPEC void luks_decrypt_sector_serpent_cbc_plain128_mk_sha256_final (__global const u32 *in, u32 *mk, const u32 *ks1, volatile const u32 sector)
DECLSPEC void luks_decrypt_sector_serpent_cbc_plain128_mk_sha256_final (__global const u32 *in, u32 *mk, const u32 *ks1, const u32 sector)
{
u32 S[4] = { sector, 0, 0, 0 };
@ -1085,7 +1085,7 @@ DECLSPEC void luks_decrypt_sector_serpent_cbc_plain128_mk_sha256_final (__global
}
}
DECLSPEC void luks_decrypt_sector_serpent_cbc_plain128_mk_sha512 (__global const u32 *in, u32 *mk, const u32 *ks1, volatile const u32 sector)
DECLSPEC void luks_decrypt_sector_serpent_cbc_plain128_mk_sha512 (__global const u32 *in, u32 *mk, const u32 *ks1, const u32 sector)
{
u32 S[4] = { sector, 0, 0, 0 };
@ -1118,7 +1118,7 @@ DECLSPEC void luks_decrypt_sector_serpent_cbc_plain128_mk_sha512 (__global const
}
}
DECLSPEC void luks_decrypt_sector_serpent_cbc_plain128_mk_sha512_final (__global const u32 *in, u32 *mk, const u32 *ks1, volatile const u32 sector)
DECLSPEC void luks_decrypt_sector_serpent_cbc_plain128_mk_sha512_final (__global const u32 *in, u32 *mk, const u32 *ks1, const u32 sector)
{
u32 S[4] = { sector, 0, 0, 0 };
@ -1174,7 +1174,7 @@ DECLSPEC void luks_decrypt_sector_serpent_cbc_plain128_mk_sha512_final (__global
}
}
DECLSPEC void luks_decrypt_sector_serpent_cbc_plain128_mk_ripemd160 (__global const u32 *in, u32 *mk, const u32 *ks1, volatile const u32 sector)
DECLSPEC void luks_decrypt_sector_serpent_cbc_plain128_mk_ripemd160 (__global const u32 *in, u32 *mk, const u32 *ks1, const u32 sector)
{
u32 S[4] = { sector, 0, 0, 0 };
@ -1207,7 +1207,7 @@ DECLSPEC void luks_decrypt_sector_serpent_cbc_plain128_mk_ripemd160 (__global co
}
}
DECLSPEC void luks_decrypt_sector_serpent_cbc_plain128_mk_ripemd160_final (__global const u32 *in, u32 *mk, const u32 *ks1, volatile const u32 sector)
DECLSPEC void luks_decrypt_sector_serpent_cbc_plain128_mk_ripemd160_final (__global const u32 *in, u32 *mk, const u32 *ks1, const u32 sector)
{
u32 S[4] = { sector, 0, 0, 0 };
@ -1263,7 +1263,7 @@ DECLSPEC void luks_decrypt_sector_serpent_cbc_plain128_mk_ripemd160_final (__glo
}
}
DECLSPEC void luks_decrypt_sector_serpent_cbc_plain256 (__global const u32 *in, u32 *out, const u32 *ks1, volatile const u32 sector)
DECLSPEC void luks_decrypt_sector_serpent_cbc_plain256 (__global const u32 *in, u32 *out, const u32 *ks1, const u32 sector)
{
u32 S[4] = { sector, 0, 0, 0 };
@ -1293,7 +1293,7 @@ DECLSPEC void luks_decrypt_sector_serpent_cbc_plain256 (__global const u32 *in,
}
}
DECLSPEC void luks_decrypt_sector_serpent_cbc_plain256_mk_sha1 (__global const u32 *in, u32 *mk, const u32 *ks1, volatile const u32 sector)
DECLSPEC void luks_decrypt_sector_serpent_cbc_plain256_mk_sha1 (__global const u32 *in, u32 *mk, const u32 *ks1, const u32 sector)
{
u32 S[4] = { sector, 0, 0, 0 };
@ -1326,7 +1326,7 @@ DECLSPEC void luks_decrypt_sector_serpent_cbc_plain256_mk_sha1 (__global const u
}
}
DECLSPEC void luks_decrypt_sector_serpent_cbc_plain256_mk_sha1_final (__global const u32 *in, u32 *mk, const u32 *ks1, volatile const u32 sector)
DECLSPEC void luks_decrypt_sector_serpent_cbc_plain256_mk_sha1_final (__global const u32 *in, u32 *mk, const u32 *ks1, const u32 sector)
{
u32 S[4] = { sector, 0, 0, 0 };
@ -1382,7 +1382,7 @@ DECLSPEC void luks_decrypt_sector_serpent_cbc_plain256_mk_sha1_final (__global c
}
}
DECLSPEC void luks_decrypt_sector_serpent_cbc_plain256_mk_sha256 (__global const u32 *in, u32 *mk, const u32 *ks1, volatile const u32 sector)
DECLSPEC void luks_decrypt_sector_serpent_cbc_plain256_mk_sha256 (__global const u32 *in, u32 *mk, const u32 *ks1, const u32 sector)
{
u32 S[4] = { sector, 0, 0, 0 };
@ -1415,7 +1415,7 @@ DECLSPEC void luks_decrypt_sector_serpent_cbc_plain256_mk_sha256 (__global const
}
}
DECLSPEC void luks_decrypt_sector_serpent_cbc_plain256_mk_sha256_final (__global const u32 *in, u32 *mk, const u32 *ks1, volatile const u32 sector)
DECLSPEC void luks_decrypt_sector_serpent_cbc_plain256_mk_sha256_final (__global const u32 *in, u32 *mk, const u32 *ks1, const u32 sector)
{
u32 S[4] = { sector, 0, 0, 0 };
@ -1471,7 +1471,7 @@ DECLSPEC void luks_decrypt_sector_serpent_cbc_plain256_mk_sha256_final (__global
}
}
DECLSPEC void luks_decrypt_sector_serpent_cbc_plain256_mk_sha512 (__global const u32 *in, u32 *mk, const u32 *ks1, volatile const u32 sector)
DECLSPEC void luks_decrypt_sector_serpent_cbc_plain256_mk_sha512 (__global const u32 *in, u32 *mk, const u32 *ks1, const u32 sector)
{
u32 S[4] = { sector, 0, 0, 0 };
@ -1504,7 +1504,7 @@ DECLSPEC void luks_decrypt_sector_serpent_cbc_plain256_mk_sha512 (__global const
}
}
DECLSPEC void luks_decrypt_sector_serpent_cbc_plain256_mk_sha512_final (__global const u32 *in, u32 *mk, const u32 *ks1, volatile const u32 sector)
DECLSPEC void luks_decrypt_sector_serpent_cbc_plain256_mk_sha512_final (__global const u32 *in, u32 *mk, const u32 *ks1, const u32 sector)
{
u32 S[4] = { sector, 0, 0, 0 };
@ -1560,7 +1560,7 @@ DECLSPEC void luks_decrypt_sector_serpent_cbc_plain256_mk_sha512_final (__global
}
}
DECLSPEC void luks_decrypt_sector_serpent_cbc_plain256_mk_ripemd160 (__global const u32 *in, u32 *mk, const u32 *ks1, volatile const u32 sector)
DECLSPEC void luks_decrypt_sector_serpent_cbc_plain256_mk_ripemd160 (__global const u32 *in, u32 *mk, const u32 *ks1, const u32 sector)
{
u32 S[4] = { sector, 0, 0, 0 };
@ -1593,7 +1593,7 @@ DECLSPEC void luks_decrypt_sector_serpent_cbc_plain256_mk_ripemd160 (__global co
}
}
DECLSPEC void luks_decrypt_sector_serpent_cbc_plain256_mk_ripemd160_final (__global const u32 *in, u32 *mk, const u32 *ks1, volatile const u32 sector)
DECLSPEC void luks_decrypt_sector_serpent_cbc_plain256_mk_ripemd160_final (__global const u32 *in, u32 *mk, const u32 *ks1, const u32 sector)
{
u32 S[4] = { sector, 0, 0, 0 };
@ -1695,7 +1695,7 @@ DECLSPEC void serpent256_decrypt_xts (const u32 *ks1, const u32 *in, u32 *out, u
xts_mul2 (T, T);
}
DECLSPEC void luks_decrypt_sector_serpent_xts_plain256 (__global const u32 *in, u32 *out, const u32 *ks1, const u32 *ks2, volatile const u32 sector)
DECLSPEC void luks_decrypt_sector_serpent_xts_plain256 (__global const u32 *in, u32 *out, const u32 *ks1, const u32 *ks2, const u32 sector)
{
u32 S[4] = { sector, 0, 0, 0 };
@ -1729,7 +1729,7 @@ DECLSPEC void luks_decrypt_sector_serpent_xts_plain256 (__global const u32 *in,
}
}
DECLSPEC void luks_decrypt_sector_serpent_xts_plain256_mk_sha1 (__global const u32 *in, u32 *mk, const u32 *ks1, const u32 *ks2, volatile const u32 sector)
DECLSPEC void luks_decrypt_sector_serpent_xts_plain256_mk_sha1 (__global const u32 *in, u32 *mk, const u32 *ks1, const u32 *ks2, const u32 sector)
{
u32 S[4] = { sector, 0, 0, 0 };
@ -1766,7 +1766,7 @@ DECLSPEC void luks_decrypt_sector_serpent_xts_plain256_mk_sha1 (__global const u
}
}
DECLSPEC void luks_decrypt_sector_serpent_xts_plain256_mk_sha1_final (__global const u32 *in, u32 *mk, const u32 *ks1, const u32 *ks2, volatile const u32 sector)
DECLSPEC void luks_decrypt_sector_serpent_xts_plain256_mk_sha1_final (__global const u32 *in, u32 *mk, const u32 *ks1, const u32 *ks2, const u32 sector)
{
u32 S[4] = { sector, 0, 0, 0 };
@ -1826,7 +1826,7 @@ DECLSPEC void luks_decrypt_sector_serpent_xts_plain256_mk_sha1_final (__global c
}
}
DECLSPEC void luks_decrypt_sector_serpent_xts_plain256_mk_sha256 (__global const u32 *in, u32 *mk, const u32 *ks1, const u32 *ks2, volatile const u32 sector)
DECLSPEC void luks_decrypt_sector_serpent_xts_plain256_mk_sha256 (__global const u32 *in, u32 *mk, const u32 *ks1, const u32 *ks2, const u32 sector)
{
u32 S[4] = { sector, 0, 0, 0 };
@ -1863,7 +1863,7 @@ DECLSPEC void luks_decrypt_sector_serpent_xts_plain256_mk_sha256 (__global const
}
}
DECLSPEC void luks_decrypt_sector_serpent_xts_plain256_mk_sha256_final (__global const u32 *in, u32 *mk, const u32 *ks1, const u32 *ks2, volatile const u32 sector)
DECLSPEC void luks_decrypt_sector_serpent_xts_plain256_mk_sha256_final (__global const u32 *in, u32 *mk, const u32 *ks1, const u32 *ks2, const u32 sector)
{
u32 S[4] = { sector, 0, 0, 0 };
@ -1923,7 +1923,7 @@ DECLSPEC void luks_decrypt_sector_serpent_xts_plain256_mk_sha256_final (__global
}
}
DECLSPEC void luks_decrypt_sector_serpent_xts_plain256_mk_sha512 (__global const u32 *in, u32 *mk, const u32 *ks1, const u32 *ks2, volatile const u32 sector)
DECLSPEC void luks_decrypt_sector_serpent_xts_plain256_mk_sha512 (__global const u32 *in, u32 *mk, const u32 *ks1, const u32 *ks2, const u32 sector)
{
u32 S[4] = { sector, 0, 0, 0 };
@ -1960,7 +1960,7 @@ DECLSPEC void luks_decrypt_sector_serpent_xts_plain256_mk_sha512 (__global const
}
}
DECLSPEC void luks_decrypt_sector_serpent_xts_plain256_mk_sha512_final (__global const u32 *in, u32 *mk, const u32 *ks1, const u32 *ks2, volatile const u32 sector)
DECLSPEC void luks_decrypt_sector_serpent_xts_plain256_mk_sha512_final (__global const u32 *in, u32 *mk, const u32 *ks1, const u32 *ks2, const u32 sector)
{
u32 S[4] = { sector, 0, 0, 0 };
@ -2020,7 +2020,7 @@ DECLSPEC void luks_decrypt_sector_serpent_xts_plain256_mk_sha512_final (__global
}
}
DECLSPEC void luks_decrypt_sector_serpent_xts_plain256_mk_ripemd160 (__global const u32 *in, u32 *mk, const u32 *ks1, const u32 *ks2, volatile const u32 sector)
DECLSPEC void luks_decrypt_sector_serpent_xts_plain256_mk_ripemd160 (__global const u32 *in, u32 *mk, const u32 *ks1, const u32 *ks2, const u32 sector)
{
u32 S[4] = { sector, 0, 0, 0 };
@ -2057,7 +2057,7 @@ DECLSPEC void luks_decrypt_sector_serpent_xts_plain256_mk_ripemd160 (__global co
}
}
DECLSPEC void luks_decrypt_sector_serpent_xts_plain256_mk_ripemd160_final (__global const u32 *in, u32 *mk, const u32 *ks1, const u32 *ks2, volatile const u32 sector)
DECLSPEC void luks_decrypt_sector_serpent_xts_plain256_mk_ripemd160_final (__global const u32 *in, u32 *mk, const u32 *ks1, const u32 *ks2, const u32 sector)
{
u32 S[4] = { sector, 0, 0, 0 };
@ -2117,7 +2117,7 @@ DECLSPEC void luks_decrypt_sector_serpent_xts_plain256_mk_ripemd160_final (__glo
}
}
DECLSPEC void luks_decrypt_sector_serpent_xts_plain512 (__global const u32 *in, u32 *out, const u32 *ks1, const u32 *ks2, volatile const u32 sector)
DECLSPEC void luks_decrypt_sector_serpent_xts_plain512 (__global const u32 *in, u32 *out, const u32 *ks1, const u32 *ks2, const u32 sector)
{
u32 S[4] = { sector, 0, 0, 0 };
@ -2151,7 +2151,7 @@ DECLSPEC void luks_decrypt_sector_serpent_xts_plain512 (__global const u32 *in,
}
}
DECLSPEC void luks_decrypt_sector_serpent_xts_plain512_mk_sha1 (__global const u32 *in, u32 *mk, const u32 *ks1, const u32 *ks2, volatile const u32 sector)
DECLSPEC void luks_decrypt_sector_serpent_xts_plain512_mk_sha1 (__global const u32 *in, u32 *mk, const u32 *ks1, const u32 *ks2, const u32 sector)
{
u32 S[4] = { sector, 0, 0, 0 };
@ -2188,7 +2188,7 @@ DECLSPEC void luks_decrypt_sector_serpent_xts_plain512_mk_sha1 (__global const u
}
}
DECLSPEC void luks_decrypt_sector_serpent_xts_plain512_mk_sha1_final (__global const u32 *in, u32 *mk, const u32 *ks1, const u32 *ks2, volatile const u32 sector)
DECLSPEC void luks_decrypt_sector_serpent_xts_plain512_mk_sha1_final (__global const u32 *in, u32 *mk, const u32 *ks1, const u32 *ks2, const u32 sector)
{
u32 S[4] = { sector, 0, 0, 0 };
@ -2248,7 +2248,7 @@ DECLSPEC void luks_decrypt_sector_serpent_xts_plain512_mk_sha1_final (__global c
}
}
DECLSPEC void luks_decrypt_sector_serpent_xts_plain512_mk_sha256 (__global const u32 *in, u32 *mk, const u32 *ks1, const u32 *ks2, volatile const u32 sector)
DECLSPEC void luks_decrypt_sector_serpent_xts_plain512_mk_sha256 (__global const u32 *in, u32 *mk, const u32 *ks1, const u32 *ks2, const u32 sector)
{
u32 S[4] = { sector, 0, 0, 0 };
@ -2285,7 +2285,7 @@ DECLSPEC void luks_decrypt_sector_serpent_xts_plain512_mk_sha256 (__global const
}
}
DECLSPEC void luks_decrypt_sector_serpent_xts_plain512_mk_sha256_final (__global const u32 *in, u32 *mk, const u32 *ks1, const u32 *ks2, volatile const u32 sector)
DECLSPEC void luks_decrypt_sector_serpent_xts_plain512_mk_sha256_final (__global const u32 *in, u32 *mk, const u32 *ks1, const u32 *ks2, const u32 sector)
{
u32 S[4] = { sector, 0, 0, 0 };
@ -2345,7 +2345,7 @@ DECLSPEC void luks_decrypt_sector_serpent_xts_plain512_mk_sha256_final (__global
}
}
DECLSPEC void luks_decrypt_sector_serpent_xts_plain512_mk_sha512 (__global const u32 *in, u32 *mk, const u32 *ks1, const u32 *ks2, volatile const u32 sector)
DECLSPEC void luks_decrypt_sector_serpent_xts_plain512_mk_sha512 (__global const u32 *in, u32 *mk, const u32 *ks1, const u32 *ks2, const u32 sector)
{
u32 S[4] = { sector, 0, 0, 0 };
@ -2382,7 +2382,7 @@ DECLSPEC void luks_decrypt_sector_serpent_xts_plain512_mk_sha512 (__global const
}
}
DECLSPEC void luks_decrypt_sector_serpent_xts_plain512_mk_sha512_final (__global const u32 *in, u32 *mk, const u32 *ks1, const u32 *ks2, volatile const u32 sector)
DECLSPEC void luks_decrypt_sector_serpent_xts_plain512_mk_sha512_final (__global const u32 *in, u32 *mk, const u32 *ks1, const u32 *ks2, const u32 sector)
{
u32 S[4] = { sector, 0, 0, 0 };
@ -2442,7 +2442,7 @@ DECLSPEC void luks_decrypt_sector_serpent_xts_plain512_mk_sha512_final (__global
}
}
DECLSPEC void luks_decrypt_sector_serpent_xts_plain512_mk_ripemd160 (__global const u32 *in, u32 *mk, const u32 *ks1, const u32 *ks2, volatile const u32 sector)
DECLSPEC void luks_decrypt_sector_serpent_xts_plain512_mk_ripemd160 (__global const u32 *in, u32 *mk, const u32 *ks1, const u32 *ks2, const u32 sector)
{
u32 S[4] = { sector, 0, 0, 0 };
@ -2479,7 +2479,7 @@ DECLSPEC void luks_decrypt_sector_serpent_xts_plain512_mk_ripemd160 (__global co
}
}
DECLSPEC void luks_decrypt_sector_serpent_xts_plain512_mk_ripemd160_final (__global const u32 *in, u32 *mk, const u32 *ks1, const u32 *ks2, volatile const u32 sector)
DECLSPEC void luks_decrypt_sector_serpent_xts_plain512_mk_ripemd160_final (__global const u32 *in, u32 *mk, const u32 *ks1, const u32 *ks2, const u32 sector)
{
u32 S[4] = { sector, 0, 0, 0 };

@ -21,7 +21,7 @@ void truncate_right (u32 buf0[4], u32 buf1[4], const u32 offset)
const u32 tmp = (1u << ((offset & 3u) * 8u)) - 1u;
#ifdef IS_AMD
volatile const int offset_switch = offset / 4;
const int offset_switch = offset / 4;
#else
const int offset_switch = offset / 4;
#endif
@ -80,7 +80,7 @@ void truncate_left (u32 buf0[4], u32 buf1[4], const u32 offset)
const u32 tmp = ~((1u << ((offset & 3u) * 8u)) - 1u);
#ifdef IS_AMD
volatile const int offset_switch = offset / 4;
const int offset_switch = offset / 4;
#else
const int offset_switch = offset / 4;
#endif
@ -780,7 +780,7 @@ void append_block8 (const u32 offset, u32 buf0[4], u32 buf1[4], const u32 src_l0
u32 s7 = 0;
#ifdef IS_AMD
volatile const int offset_switch = offset / 4;
const int offset_switch = offset / 4;
#else
const int offset_switch = offset / 4;
#endif
@ -1361,7 +1361,7 @@ u32 rule_op_mangle_delete_at (MAYBE_UNUSED const u32 p0, MAYBE_UNUSED const u32
const u32 mr = ~ml;
#ifdef IS_AMD
volatile const int p0_switch = p0 / 4;
const int p0_switch = p0 / 4;
#else
const int p0_switch = p0 / 4;
#endif
@ -1468,7 +1468,7 @@ u32 rule_op_mangle_omit (MAYBE_UNUSED const u32 p0, MAYBE_UNUSED const u32 p1, M
const u32 mr = ~ml;
#ifdef IS_AMD
volatile const int p0_switch = p0 / 4;
const int p0_switch = p0 / 4;
#else
const int p0_switch = p0 / 4;
#endif
@ -1554,7 +1554,7 @@ u32 rule_op_mangle_insert (MAYBE_UNUSED const u32 p0, MAYBE_UNUSED const u32 p1,
const u32 mr = 0xffffff00 << ((p0 & 3) * 8);
#ifdef IS_AMD
volatile const int p0_switch = p0 / 4;
const int p0_switch = p0 / 4;
#else
const int p0_switch = p0 / 4;
#endif

@ -335,7 +335,7 @@ __constant u32a c_sbox3[256] =
// temporary hack for Apple Iris GPUs (with as little performance drop as possible)
#if defined (IS_APPLE) && defined (IS_GPU)
#define TMP_TYPE volatile u32
#define TMP_TYPE u32
#else
#define TMP_TYPE u32
#endif

@ -250,7 +250,7 @@ __kernel void m12500_loop (__global pw_t *pws, __global const kernel_rule_t *rul
iter++;
}
for (volatile u32 j = 0; j < p3; j++)
for (u32 j = 0; j < p3; j++)
{
const u32 j16 = j * 16;

@ -118,7 +118,7 @@ DECLSPEC void cram_md5_transform (const u32 w0[4], const u32 w1[4], const u32 w2
DECLSPEC void cram_md5_update_64 (md5_ctx_t *ctx, u32 w0[4], u32 w1[4], u32 w2[4], u32 w3[4], const int len)
{
#ifdef IS_AMD
volatile const int pos = ctx->len & 63;
const int pos = ctx->len & 63;
#else
const int pos = ctx->len & 63;
#endif

@ -116,7 +116,7 @@ DECLSPEC void cram_md5_transform (const u32 w0[4], const u32 w1[4], const u32 w2
DECLSPEC void cram_md5_update_64 (md5_ctx_t *ctx, u32 w0[4], u32 w1[4], u32 w2[4], u32 w3[4], const int len)
{
#ifdef IS_AMD
volatile const int pos = ctx->len & 63;
const int pos = ctx->len & 63;
#else
const int pos = ctx->len & 63;
#endif

@ -116,7 +116,7 @@ DECLSPEC void cram_md5_transform_vector (const u32x w0[4], const u32x w1[4], con
DECLSPEC void cram_md5_update_vector_64 (md5_ctx_vector_t *ctx, u32x w0[4], u32x w1[4], u32x w2[4], u32x w3[4], const int len)
{
#ifdef IS_AMD
volatile const int pos = ctx->len & 63;
const int pos = ctx->len & 63;
#else
const int pos = ctx->len & 63;
#endif

Loading…
Cancel
Save