From 70fc36bf01c65368796d96719123fc9282b04795 Mon Sep 17 00:00:00 2001 From: jsteube Date: Sat, 20 Apr 2019 11:25:34 +0200 Subject: [PATCH] Reorganize inc_common.cl and make better use of HAS_* macros --- OpenCL/inc_common.cl | 1058 +++++++++++++++++++----------------------- OpenCL/inc_common.h | 8 - 2 files changed, 475 insertions(+), 591 deletions(-) diff --git a/OpenCL/inc_common.cl b/OpenCL/inc_common.cl index 5f5c3eb45..d497d349e 100644 --- a/OpenCL/inc_common.cl +++ b/OpenCL/inc_common.cl @@ -109,14 +109,10 @@ DECLSPEC u32 unpack_v8a_from_v32_S (const u32 v32) { u32 r = 0; - #if defined IS_NV + #if defined IS_NV && HAS_BFE == 1 asm volatile ("bfe.u32 %0, %1, 0, 8;" : "=r"(r) : "r"(v32)); - #elif defined IS_AMD - #if HAS_VBFE - __asm__ __volatile__ ("V_BFE_U32 %0, %1, 0, 8;" : "=v"(r) : "v"(v32)); - #else - r = (v32 >> 0) & 0xff; - #endif + #elif defined IS_AMD && HAS_VBFE == 1 + __asm__ __volatile__ ("V_BFE_U32 %0, %1, 0, 8;" : "=v"(r) : "v"(v32)); #else r = (v32 >> 0) & 0xff; #endif @@ -128,14 +124,10 @@ DECLSPEC u32 unpack_v8b_from_v32_S (const u32 v32) { u32 r = 0; - #if defined IS_NV + #if defined IS_NV && HAS_BFE == 1 asm volatile ("bfe.u32 %0, %1, 8, 8;" : "=r"(r) : "r"(v32)); - #elif defined IS_AMD - #if HAS_VBFE - __asm__ __volatile__ ("V_BFE_U32 %0, %1, 8, 8;" : "=v"(r) : "v"(v32)); - #else - r = (v32 >> 8) & 0xff; - #endif + #elif defined IS_AMD && HAS_VBFE == 1 + __asm__ __volatile__ ("V_BFE_U32 %0, %1, 8, 8;" : "=v"(r) : "v"(v32)); #else r = (v32 >> 8) & 0xff; #endif @@ -147,14 +139,10 @@ DECLSPEC u32 unpack_v8c_from_v32_S (const u32 v32) { u32 r = 0; - #if defined IS_NV + #if defined IS_NV && HAS_BFE == 1 asm volatile ("bfe.u32 %0, %1, 16, 8;" : "=r"(r) : "r"(v32)); - #elif defined IS_AMD - #if HAS_VBFE - __asm__ __volatile__ ("V_BFE_U32 %0, %1, 16, 8;" : "=v"(r) : "v"(v32)); - #else - r = (v32 >> 16) & 0xff; - #endif + #elif defined IS_AMD && HAS_VBFE == 1 + __asm__ __volatile__ ("V_BFE_U32 %0, %1, 16, 8;" : "=v"(r) : "v"(v32)); #else r = (v32 >> 16) & 0xff; #endif @@ -166,14 +154,10 @@ DECLSPEC u32 unpack_v8d_from_v32_S (const u32 v32) { u32 r = 0; - #if defined IS_NV + #if defined IS_NV && HAS_BFE == 1 asm volatile ("bfe.u32 %0, %1, 24, 8;" : "=r"(r) : "r"(v32)); - #elif defined IS_AMD - #if HAS_VBFE - __asm__ __volatile__ ("V_BFE_U32 %0, %1, 24, 8;" : "=v"(r) : "v"(v32)); - #else - r = (v32 >> 24) & 0xff; - #endif + #elif defined IS_AMD && HAS_VBFE == 1 + __asm__ __volatile__ ("V_BFE_U32 %0, %1, 24, 8;" : "=v"(r) : "v"(v32)); #else r = (v32 >> 24) & 0xff; #endif @@ -313,432 +297,415 @@ DECLSPEC u64x hl32_to_64 (const u32x a, const u32x b) return r; } -#ifdef IS_AMD +// bit rotates +// +// For _CPU_OPENCL_EMU_H we dont need to care about vector functions +// The VECT_SIZE is guaranteed to be set to 1 from cpu_opencl_emu.h -#if HAS_VPERM -DECLSPEC u32 hc_swap32_S (const u32 v) +DECLSPEC u32x hc_rotl32 (const u32x a, const int n) { - u32 r = 0; - - __asm__ __volatile__ ("V_PERM_B32 %0, 0, %1, %2;" : "=v"(r) : "v"(v), "v"(0x00010203)); - - return r; + #ifdef _CPU_OPENCL_EMU_H + return rotl32 (a, n); + #else + return rotate (a, (u32x) (n)); + #endif } -DECLSPEC u64 hc_swap64_S (const u64 v) -{ - const u32 v0 = h32_from_64_S (v); - const u32 v1 = l32_from_64_S (v); - - u32 t0; - u32 t1; - - __asm__ __volatile__ ("V_PERM_B32 %0, 0, %1, %2;" : "=v"(t0) : "v"(v0), "v"(0x00010203)); - __asm__ __volatile__ ("V_PERM_B32 %0, 0, %1, %2;" : "=v"(t1) : "v"(v1), "v"(0x00010203)); - - const u64 r = hl32_to_64_S (t1, t0); - - return r; -} -#else -DECLSPEC u32 hc_swap32_S (const u32 v) +DECLSPEC u32x hc_rotr32 (const u32x a, const int n) { - return as_uint (as_uchar4 (v).s3210); + #ifdef _CPU_OPENCL_EMU_H + return rotr32 (a, n); + #else + return rotate (a, (u32x) (32 - n)); + #endif } -DECLSPEC u64 hc_swap64_S (const u64 v) +DECLSPEC u32 hc_rotl32_S (const u32 a, const int n) { - return (as_ulong (as_uchar8 (v).s76543210)); + #ifdef _CPU_OPENCL_EMU_H + return rotl32 (a, n); + #else + return rotate (a, (u32) (n)); + #endif } -#endif DECLSPEC u32 hc_rotr32_S (const u32 a, const int n) { - return rotate (a, (u32) 32 - n); + #ifdef _CPU_OPENCL_EMU_H + return rotr32 (a, n); + #else + return rotate (a, (u32) (32 - n)); + #endif } -DECLSPEC u32 hc_rotl32_S (const u32 a, const int n) +DECLSPEC u64x hc_rotl64 (const u64x a, const int n) { - return rotate (a, (u32) n); + #ifdef _CPU_OPENCL_EMU_H + return rotl64 (a, n); + #else + return rotate (a, (u64x) (n)); + #endif } -DECLSPEC u64 hc_rotr64_S (const u64 a, const int n) +DECLSPEC u64x hc_rotr64 (const u64x a, const int n) { - const u32 a0 = h32_from_64_S (a); - const u32 a1 = l32_from_64_S (a); - - const u32 t0 = (n >= 32) ? amd_bitalign (a0, a1, n - 32) : amd_bitalign (a1, a0, n); - const u32 t1 = (n >= 32) ? amd_bitalign (a1, a0, n - 32) : amd_bitalign (a0, a1, n); - - const u64 r = hl32_to_64_S (t0, t1); - - return r; + #ifdef _CPU_OPENCL_EMU_H + return rotr64 (a, n); + #else + return rotate (a, (u64x) (64 - n)); + #endif } DECLSPEC u64 hc_rotl64_S (const u64 a, const int n) { - return hc_rotr64_S (a, 64 - n); + #ifdef _CPU_OPENCL_EMU_H + return rotl64 (a, n); + #else + return rotate (a, (u64) (n)); + #endif } -#if HAS_VPERM -DECLSPEC u32x hc_swap32 (const u32x v) +DECLSPEC u64 hc_rotr64_S (const u64 a, const int n) { - return bitselect (rotate (v, 24u), rotate (v, 8u), 0x00ff00ffu); + #ifdef _CPU_OPENCL_EMU_H + return rotr64 (a, n); + #else + return rotate (a, (u64) (64 - n)); + #endif } -DECLSPEC u64x hc_swap64 (const u64x v) +// bitwise swap + +DECLSPEC u32x hc_swap32 (const u32x v) { - const u32x a0 = h32_from_64 (v); - const u32x a1 = l32_from_64 (v); + u32x r; - u32x t0; - u32x t1; + #ifdef _CPU_OPENCL_EMU_H + r = byte_swap_32 (v); + #else + #if defined IS_AMD && HAS_VPERM == 1 + + const u32 m = 0x00010203; #if VECT_SIZE == 1 - __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(t0) : "v"(0), "v"(a0), "v"(0x00010203)); - __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(t1) : "v"(0), "v"(a1), "v"(0x00010203)); + __asm__ __volatile__ ("V_PERM_B32 %0, 0, %1, %2;" : "=v"(r) : "v"(v), "v"(m)); #endif #if VECT_SIZE >= 2 - __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(t0.s0) : "v"(0), "v"(a0.s0), "v"(0x00010203)); - __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(t1.s0) : "v"(0), "v"(a1.s0), "v"(0x00010203)); - __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(t0.s1) : "v"(0), "v"(a0.s1), "v"(0x00010203)); - __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(t1.s1) : "v"(0), "v"(a1.s1), "v"(0x00010203)); + __asm__ __volatile__ ("V_PERM_B32 %0, 0, %1, %2;" : "=r"(r.s0) : "r"(v.s0), "v"(m)); + __asm__ __volatile__ ("V_PERM_B32 %0, 0, %1, %2;" : "=r"(r.s1) : "r"(v.s1), "v"(m)); #endif #if VECT_SIZE >= 4 - __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(t0.s2) : "v"(0), "v"(a0.s2), "v"(0x00010203)); - __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(t1.s2) : "v"(0), "v"(a1.s2), "v"(0x00010203)); - __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(t0.s3) : "v"(0), "v"(a0.s3), "v"(0x00010203)); - __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(t1.s3) : "v"(0), "v"(a1.s3), "v"(0x00010203)); + __asm__ __volatile__ ("V_PERM_B32 %0, 0, %1, %2;" : "=r"(r.s2) : "r"(v.s2), "v"(m)); + __asm__ __volatile__ ("V_PERM_B32 %0, 0, %1, %2;" : "=r"(r.s3) : "r"(v.s3), "v"(m)); #endif #if VECT_SIZE >= 8 - __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(t0.s4) : "v"(0), "v"(a0.s4), "v"(0x00010203)); - __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(t1.s4) : "v"(0), "v"(a1.s4), "v"(0x00010203)); - __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(t0.s5) : "v"(0), "v"(a0.s5), "v"(0x00010203)); - __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(t1.s5) : "v"(0), "v"(a1.s5), "v"(0x00010203)); - __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(t0.s6) : "v"(0), "v"(a0.s6), "v"(0x00010203)); - __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(t1.s6) : "v"(0), "v"(a1.s6), "v"(0x00010203)); - __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(t0.s7) : "v"(0), "v"(a0.s7), "v"(0x00010203)); - __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(t1.s7) : "v"(0), "v"(a1.s7), "v"(0x00010203)); + __asm__ __volatile__ ("V_PERM_B32 %0, 0, %1, %2;" : "=r"(r.s4) : "r"(v.s4), "v"(m)); + __asm__ __volatile__ ("V_PERM_B32 %0, 0, %1, %2;" : "=r"(r.s5) : "r"(v.s5), "v"(m)); + __asm__ __volatile__ ("V_PERM_B32 %0, 0, %1, %2;" : "=r"(r.s6) : "r"(v.s6), "v"(m)); + __asm__ __volatile__ ("V_PERM_B32 %0, 0, %1, %2;" : "=r"(r.s7) : "r"(v.s7), "v"(m)); #endif #if VECT_SIZE >= 16 - __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(t0.s8) : "v"(0), "v"(a0.s8), "v"(0x00010203)); - __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(t1.s8) : "v"(0), "v"(a1.s8), "v"(0x00010203)); - __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(t0.s9) : "v"(0), "v"(a0.s9), "v"(0x00010203)); - __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(t1.s9) : "v"(0), "v"(a1.s9), "v"(0x00010203)); - __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(t0.sa) : "v"(0), "v"(a0.sa), "v"(0x00010203)); - __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(t1.sa) : "v"(0), "v"(a1.sa), "v"(0x00010203)); - __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(t0.sb) : "v"(0), "v"(a0.sb), "v"(0x00010203)); - __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(t1.sb) : "v"(0), "v"(a1.sb), "v"(0x00010203)); - __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(t0.sc) : "v"(0), "v"(a0.sc), "v"(0x00010203)); - __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(t1.sc) : "v"(0), "v"(a1.sc), "v"(0x00010203)); - __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(t0.sd) : "v"(0), "v"(a0.sd), "v"(0x00010203)); - __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(t1.sd) : "v"(0), "v"(a1.sd), "v"(0x00010203)); - __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(t0.se) : "v"(0), "v"(a0.se), "v"(0x00010203)); - __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(t1.se) : "v"(0), "v"(a1.se), "v"(0x00010203)); - __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(t0.sf) : "v"(0), "v"(a0.sf), "v"(0x00010203)); - __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(t1.sf) : "v"(0), "v"(a1.sf), "v"(0x00010203)); + __asm__ __volatile__ ("V_PERM_B32 %0, 0, %1, %2;" : "=r"(r.s8) : "r"(v.s8), "v"(m)); + __asm__ __volatile__ ("V_PERM_B32 %0, 0, %1, %2;" : "=r"(r.s9) : "r"(v.s9), "v"(m)); + __asm__ __volatile__ ("V_PERM_B32 %0, 0, %1, %2;" : "=r"(r.sa) : "r"(v.sa), "v"(m)); + __asm__ __volatile__ ("V_PERM_B32 %0, 0, %1, %2;" : "=r"(r.sb) : "r"(v.sb), "v"(m)); + __asm__ __volatile__ ("V_PERM_B32 %0, 0, %1, %2;" : "=r"(r.sc) : "r"(v.sc), "v"(m)); + __asm__ __volatile__ ("V_PERM_B32 %0, 0, %1, %2;" : "=r"(r.sd) : "r"(v.sd), "v"(m)); + __asm__ __volatile__ ("V_PERM_B32 %0, 0, %1, %2;" : "=r"(r.se) : "r"(v.se), "v"(m)); + __asm__ __volatile__ ("V_PERM_B32 %0, 0, %1, %2;" : "=r"(r.sf) : "r"(v.sf), "v"(m)); #endif - const u64x r = hl32_to_64 (t1, t0); - - return r; -} -#else -DECLSPEC u32x hc_swap32 (const u32x v) -{ - return bitselect (rotate (v, 24u), rotate (v, 8u), 0x00ff00ffu); -} + #elif defined IS_NV && HAS_PRMT == 1 -DECLSPEC u64x hc_swap64 (const u64x v) -{ - return bitselect (bitselect (rotate (v, 24ul), - rotate (v, 8ul), 0x000000ff000000fful), - bitselect (rotate (v, 56ul), - rotate (v, 40ul), 0x00ff000000ff0000ul), - 0xffff0000ffff0000ul); -} -#endif + #if VECT_SIZE == 1 + asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(r) : "r"(v)); + #endif -DECLSPEC u32x hc_rotr32 (const u32x a, const int n) -{ - return rotate (a, (u32) 32 - n); -} + #if VECT_SIZE >= 2 + asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(r.s0) : "r"(v.s0)); + asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(r.s1) : "r"(v.s1)); + #endif -DECLSPEC u32x hc_rotl32 (const u32x a, const int n) -{ - return rotate (a, (u32) n); -} + #if VECT_SIZE >= 4 + asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(r.s2) : "r"(v.s2)); + asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(r.s3) : "r"(v.s3)); + #endif -DECLSPEC u64x hc_rotr64 (const u64x a, const int n) -{ - const u32x a0 = h32_from_64 (a); - const u32x a1 = l32_from_64 (a); + #if VECT_SIZE >= 8 + asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(r.s4) : "r"(v.s4)); + asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(r.s5) : "r"(v.s5)); + asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(r.s6) : "r"(v.s6)); + asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(r.s7) : "r"(v.s7)); + #endif - const u32x t0 = (n >= 32) ? amd_bitalign (a0, a1, n - 32) : amd_bitalign (a1, a0, n); - const u32x t1 = (n >= 32) ? amd_bitalign (a1, a0, n - 32) : amd_bitalign (a0, a1, n); + #if VECT_SIZE >= 16 + asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(r.s8) : "r"(v.s8)); + asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(r.s9) : "r"(v.s9)); + asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(r.sa) : "r"(v.sa)); + asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(r.sb) : "r"(v.sb)); + asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(r.sc) : "r"(v.sc)); + asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(r.sd) : "r"(v.sd)); + asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(r.se) : "r"(v.se)); + asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(r.sf) : "r"(v.sf)); + #endif - const u64x r = hl32_to_64 (t0, t1); + #else + r = bitselect (rotate (v, (u32x) (24)), + rotate (v, (u32x) ( 8)), + (u32x) (0x00ff00ff)); + #endif + #endif return r; } -DECLSPEC u64x hc_rotl64 (const u64x a, const int n) -{ - return hc_rotr64 (a, 64 - n); -} - -DECLSPEC u32x hc_bfe (const u32x a, const u32x b, const u32x c) -{ - #define BIT(x) ((u32x) (1u) << (x)) - #define BIT_MASK(x) (BIT (x) - 1) - #define BFE(x,y,z) (((x) >> (y)) & BIT_MASK (z)) - - return BFE (a, b, c); - - #undef BIT - #undef BIT_MASK - #undef BFE -} - -DECLSPEC u32 hc_bfe_S (const u32 a, const u32 b, const u32 c) +DECLSPEC u32 hc_swap32_S (const u32 v) { - #define BIT(x) (1u << (x)) - #define BIT_MASK(x) (BIT (x) - 1) - #define BFE(x,y,z) (((x) >> (y)) & BIT_MASK (z)) + u32 r; - return BFE (a, b, c); + #ifdef _CPU_OPENCL_EMU_H + r = byte_swap_32 (v); + #else + #if defined IS_AMD && HAS_VPERM == 1 + __asm__ __volatile__ ("V_PERM_B32 %0, 0, %1, %2;" : "=v"(r) : "v"(v), "v"(0x00010203)); + #elif defined IS_NV && HAS_PRMT == 1 + asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(r) : "r"(v)); + #else + r = as_uint (as_uchar4 (v).s3210); + #endif + #endif - #undef BIT - #undef BIT_MASK - #undef BFE + return r; } -DECLSPEC u32x hc_bytealign_be (const u32x a, const u32x b, const int c) +DECLSPEC u64x hc_swap64 (const u64x v) { - u32x r = 0; + u64x r; - switch (c & 3) - { - case 0: r = b; break; - case 1: r = (a << 24) | (b >> 8); break; - case 2: r = (a << 16) | (b >> 16); break; - case 3: r = (a << 8) | (b >> 24); break; - } + #ifdef _CPU_OPENCL_EMU_H + r = byte_swap_64 (v); + #else + #if defined IS_AMD && HAS_VPERM == 1 - return r; -} + const u32 m = 0x00010203; -DECLSPEC u32 hc_bytealign_be_S (const u32 a, const u32 b, const int c) -{ - u32 r = 0; + const u32x a0 = h32_from_64 (v); + const u32x a1 = l32_from_64 (v); - switch (c & 3) - { - case 0: r = b; break; - case 1: r = (a << 24) | (b >> 8); break; - case 2: r = (a << 16) | (b >> 16); break; - case 3: r = (a << 8) | (b >> 24); break; - } + u32x t0; + u32x t1; - return r; -} + #if VECT_SIZE == 1 + __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(t0) : "v"(0), "v"(a0), "v"(m)); + __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(t1) : "v"(0), "v"(a1), "v"(m)); + #endif -DECLSPEC u32x hc_bytealign (const u32x a, const u32x b, const int c) -{ - u32x r = 0; + #if VECT_SIZE >= 2 + __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(t0.s0) : "v"(0), "v"(a0.s0), "v"(m)); + __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(t1.s0) : "v"(0), "v"(a1.s0), "v"(m)); + __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(t0.s1) : "v"(0), "v"(a0.s1), "v"(m)); + __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(t1.s1) : "v"(0), "v"(a1.s1), "v"(m)); + #endif - switch (c & 3) - { - case 0: r = b; break; - case 1: r = (a >> 24) | (b << 8); break; - case 2: r = (a >> 16) | (b << 16); break; - case 3: r = (a >> 8) | (b << 24); break; - } + #if VECT_SIZE >= 4 + __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(t0.s2) : "v"(0), "v"(a0.s2), "v"(m)); + __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(t1.s2) : "v"(0), "v"(a1.s2), "v"(m)); + __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(t0.s3) : "v"(0), "v"(a0.s3), "v"(m)); + __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(t1.s3) : "v"(0), "v"(a1.s3), "v"(m)); + #endif - return r; -} + #if VECT_SIZE >= 8 + __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(t0.s4) : "v"(0), "v"(a0.s4), "v"(m)); + __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(t1.s4) : "v"(0), "v"(a1.s4), "v"(m)); + __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(t0.s5) : "v"(0), "v"(a0.s5), "v"(m)); + __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(t1.s5) : "v"(0), "v"(a1.s5), "v"(m)); + __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(t0.s6) : "v"(0), "v"(a0.s6), "v"(m)); + __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(t1.s6) : "v"(0), "v"(a1.s6), "v"(m)); + __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(t0.s7) : "v"(0), "v"(a0.s7), "v"(m)); + __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(t1.s7) : "v"(0), "v"(a1.s7), "v"(m)); + #endif -DECLSPEC u32 hc_bytealign_S (const u32 a, const u32 b, const int c) -{ - u32 r = 0; + #if VECT_SIZE >= 16 + __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(t0.s8) : "v"(0), "v"(a0.s8), "v"(m)); + __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(t1.s8) : "v"(0), "v"(a1.s8), "v"(m)); + __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(t0.s9) : "v"(0), "v"(a0.s9), "v"(m)); + __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(t1.s9) : "v"(0), "v"(a1.s9), "v"(m)); + __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(t0.sa) : "v"(0), "v"(a0.sa), "v"(m)); + __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(t1.sa) : "v"(0), "v"(a1.sa), "v"(m)); + __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(t0.sb) : "v"(0), "v"(a0.sb), "v"(m)); + __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(t1.sb) : "v"(0), "v"(a1.sb), "v"(m)); + __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(t0.sc) : "v"(0), "v"(a0.sc), "v"(m)); + __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(t1.sc) : "v"(0), "v"(a1.sc), "v"(m)); + __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(t0.sd) : "v"(0), "v"(a0.sd), "v"(m)); + __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(t1.sd) : "v"(0), "v"(a1.sd), "v"(m)); + __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(t0.se) : "v"(0), "v"(a0.se), "v"(m)); + __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(t1.se) : "v"(0), "v"(a1.se), "v"(m)); + __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(t0.sf) : "v"(0), "v"(a0.sf), "v"(m)); + __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(t1.sf) : "v"(0), "v"(a1.sf), "v"(m)); + #endif - switch (c & 3) - { - case 0: r = b; break; - case 1: r = (a >> 24) | (b << 8); break; - case 2: r = (a >> 16) | (b << 16); break; - case 3: r = (a >> 8) | (b << 24); break; - } + r = hl32_to_64 (t1, t0); - return r; -} + #elif defined IS_NV && HAS_MOV64 == 1 && HAS_PRMT == 1 -#if HAS_VPERM -DECLSPEC u32x hc_byte_perm (const u32x a, const u32x b, const int c) -{ - u32x r = 0; + u32x il; + u32x ir; #if VECT_SIZE == 1 - __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(r) : "v"(b), "v"(a), "v"(c)); + asm volatile ("mov.b64 {%0, %1}, %2;" : "=r"(il), "=r"(ir) : "l"(v)); #endif #if VECT_SIZE >= 2 - __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(r.s0) : "v"(b.s0), "v"(a.s0), "v"(c)); - __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(r.s1) : "v"(b.s1), "v"(a.s1), "v"(c)); + asm volatile ("mov.b64 {%0, %1}, %2;" : "=r"(il.s0), "=r"(ir.s0) : "l"(v.s0)); + asm volatile ("mov.b64 {%0, %1}, %2;" : "=r"(il.s1), "=r"(ir.s1) : "l"(v.s1)); #endif #if VECT_SIZE >= 4 - __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(r.s0) : "v"(b.s0), "v"(a.s0), "v"(c)); - __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(r.s1) : "v"(b.s1), "v"(a.s1), "v"(c)); - __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(r.s2) : "v"(b.s2), "v"(a.s2), "v"(c)); - __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(r.s3) : "v"(b.s3), "v"(a.s3), "v"(c)); + asm volatile ("mov.b64 {%0, %1}, %2;" : "=r"(il.s2), "=r"(ir.s2) : "l"(v.s2)); + asm volatile ("mov.b64 {%0, %1}, %2;" : "=r"(il.s3), "=r"(ir.s3) : "l"(v.s3)); #endif #if VECT_SIZE >= 8 - __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(r.s0) : "v"(b.s0), "v"(a.s0), "v"(c)); - __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(r.s1) : "v"(b.s1), "v"(a.s1), "v"(c)); - __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(r.s2) : "v"(b.s2), "v"(a.s2), "v"(c)); - __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(r.s3) : "v"(b.s3), "v"(a.s3), "v"(c)); - __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(r.s4) : "v"(b.s4), "v"(a.s4), "v"(c)); - __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(r.s5) : "v"(b.s5), "v"(a.s5), "v"(c)); - __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(r.s6) : "v"(b.s6), "v"(a.s6), "v"(c)); - __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(r.s7) : "v"(b.s7), "v"(a.s7), "v"(c)); + asm volatile ("mov.b64 {%0, %1}, %2;" : "=r"(il.s4), "=r"(ir.s4) : "l"(v.s4)); + asm volatile ("mov.b64 {%0, %1}, %2;" : "=r"(il.s5), "=r"(ir.s5) : "l"(v.s5)); + asm volatile ("mov.b64 {%0, %1}, %2;" : "=r"(il.s6), "=r"(ir.s6) : "l"(v.s6)); + asm volatile ("mov.b64 {%0, %1}, %2;" : "=r"(il.s7), "=r"(ir.s7) : "l"(v.s7)); #endif #if VECT_SIZE >= 16 - __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(r.s0) : "v"(b.s0), "v"(a.s0), "v"(c)); - __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(r.s1) : "v"(b.s1), "v"(a.s1), "v"(c)); - __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(r.s2) : "v"(b.s2), "v"(a.s2), "v"(c)); - __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(r.s3) : "v"(b.s3), "v"(a.s3), "v"(c)); - __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(r.s4) : "v"(b.s4), "v"(a.s4), "v"(c)); - __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(r.s5) : "v"(b.s5), "v"(a.s5), "v"(c)); - __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(r.s6) : "v"(b.s6), "v"(a.s6), "v"(c)); - __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(r.s7) : "v"(b.s7), "v"(a.s7), "v"(c)); - __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(r.s8) : "v"(b.s8), "v"(a.s8), "v"(c)); - __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(r.s9) : "v"(b.s9), "v"(a.s9), "v"(c)); - __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(r.sa) : "v"(b.sa), "v"(a.sa), "v"(c)); - __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(r.sb) : "v"(b.sb), "v"(a.sb), "v"(c)); - __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(r.sc) : "v"(b.sc), "v"(a.sc), "v"(c)); - __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(r.sd) : "v"(b.sd), "v"(a.sd), "v"(c)); - __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(r.se) : "v"(b.se), "v"(a.se), "v"(c)); - __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(r.sf) : "v"(b.sf), "v"(a.sf), "v"(c)); + asm volatile ("mov.b64 {%0, %1}, %2;" : "=r"(il.s8), "=r"(ir.s8) : "l"(v.s8)); + asm volatile ("mov.b64 {%0, %1}, %2;" : "=r"(il.s9), "=r"(ir.s9) : "l"(v.s9)); + asm volatile ("mov.b64 {%0, %1}, %2;" : "=r"(il.sa), "=r"(ir.sa) : "l"(v.sa)); + asm volatile ("mov.b64 {%0, %1}, %2;" : "=r"(il.sb), "=r"(ir.sb) : "l"(v.sb)); + asm volatile ("mov.b64 {%0, %1}, %2;" : "=r"(il.sc), "=r"(ir.sc) : "l"(v.sc)); + asm volatile ("mov.b64 {%0, %1}, %2;" : "=r"(il.sd), "=r"(ir.sd) : "l"(v.sd)); + asm volatile ("mov.b64 {%0, %1}, %2;" : "=r"(il.se), "=r"(ir.se) : "l"(v.se)); + asm volatile ("mov.b64 {%0, %1}, %2;" : "=r"(il.sf), "=r"(ir.sf) : "l"(v.sf)); #endif - return r; -} + u32x tl; + u32x tr; -DECLSPEC u32 hc_byte_perm_S (const u32 a, const u32 b, const int c) -{ - u32 r = 0; + #if VECT_SIZE == 1 + asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(tl) : "r"(il)); + asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(tr) : "r"(ir)); + #endif - __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(r) : "v"(b), "v"(a), "v"(c)); + #if VECT_SIZE >= 2 + asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(tl.s0) : "r"(il.s0)); + asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(tr.s0) : "r"(ir.s0)); + asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(tl.s1) : "r"(il.s1)); + asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(tr.s1) : "r"(ir.s1)); + #endif - return r; -} -#endif + #if VECT_SIZE >= 4 + asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(tl.s2) : "r"(il.s2)); + asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(tr.s2) : "r"(ir.s2)); + asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(tl.s3) : "r"(il.s3)); + asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(tr.s3) : "r"(ir.s3)); + #endif -#if HAS_VADD3 -DECLSPEC u32x hc_add3 (const u32x a, const u32x b, const u32x c) -{ - u32x r = 0; + #if VECT_SIZE >= 8 + asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(tl.s4) : "r"(il.s4)); + asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(tr.s4) : "r"(ir.s4)); + asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(tl.s5) : "r"(il.s5)); + asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(tr.s5) : "r"(ir.s5)); + asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(tl.s6) : "r"(il.s6)); + asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(tr.s6) : "r"(ir.s6)); + asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(tl.s7) : "r"(il.s7)); + asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(tr.s7) : "r"(ir.s7)); + #endif + + #if VECT_SIZE >= 16 + asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(tl.s8) : "r"(il.s8)); + asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(tr.s8) : "r"(ir.s8)); + asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(tl.s9) : "r"(il.s9)); + asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(tr.s9) : "r"(ir.s9)); + asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(tl.sa) : "r"(il.sa)); + asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(tr.sa) : "r"(ir.sa)); + asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(tl.sb) : "r"(il.sb)); + asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(tr.sb) : "r"(ir.sb)); + asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(tl.sc) : "r"(il.sc)); + asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(tr.sc) : "r"(ir.sc)); + asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(tl.sd) : "r"(il.sd)); + asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(tr.sd) : "r"(ir.sd)); + asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(tl.se) : "r"(il.se)); + asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(tr.se) : "r"(ir.se)); + asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(tl.sf) : "r"(il.sf)); + asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(tr.sf) : "r"(ir.sf)); + #endif #if VECT_SIZE == 1 - __asm__ __volatile__ ("V_ADD3_U32 %0, %1, %2, %3;" : "=v"(r) : "v"(b), "v"(a), "v"(c)); + asm volatile ("mov.b64 %0, {%1, %2};" : "=l"(r) : "r"(tr), "r"(tl)); #endif #if VECT_SIZE >= 2 - __asm__ __volatile__ ("V_ADD3_U32 %0, %1, %2, %3;" : "=v"(r.s0) : "v"(b.s0), "v"(a.s0), "v"(c.s0)); - __asm__ __volatile__ ("V_ADD3_U32 %0, %1, %2, %3;" : "=v"(r.s1) : "v"(b.s1), "v"(a.s1), "v"(c.s1)); + asm volatile ("mov.b64 %0, {%1, %2};" : "=l"(r.s0) : "r"(tr.s0), "r"(tl.s0)); + asm volatile ("mov.b64 %0, {%1, %2};" : "=l"(r.s1) : "r"(tr.s1), "r"(tl.s1)); #endif #if VECT_SIZE >= 4 - __asm__ __volatile__ ("V_ADD3_U32 %0, %1, %2, %3;" : "=v"(r.s0) : "v"(b.s0), "v"(a.s0), "v"(c.s0)); - __asm__ __volatile__ ("V_ADD3_U32 %0, %1, %2, %3;" : "=v"(r.s1) : "v"(b.s1), "v"(a.s1), "v"(c.s1)); - __asm__ __volatile__ ("V_ADD3_U32 %0, %1, %2, %3;" : "=v"(r.s2) : "v"(b.s2), "v"(a.s2), "v"(c.s2)); - __asm__ __volatile__ ("V_ADD3_U32 %0, %1, %2, %3;" : "=v"(r.s3) : "v"(b.s3), "v"(a.s3), "v"(c.s3)); + asm volatile ("mov.b64 %0, {%1, %2};" : "=l"(r.s2) : "r"(tr.s2), "r"(tl.s2)); + asm volatile ("mov.b64 %0, {%1, %2};" : "=l"(r.s3) : "r"(tr.s3), "r"(tl.s3)); #endif #if VECT_SIZE >= 8 - __asm__ __volatile__ ("V_ADD3_U32 %0, %1, %2, %3;" : "=v"(r.s0) : "v"(b.s0), "v"(a.s0), "v"(c.s0)); - __asm__ __volatile__ ("V_ADD3_U32 %0, %1, %2, %3;" : "=v"(r.s1) : "v"(b.s1), "v"(a.s1), "v"(c.s1)); - __asm__ __volatile__ ("V_ADD3_U32 %0, %1, %2, %3;" : "=v"(r.s2) : "v"(b.s2), "v"(a.s2), "v"(c.s2)); - __asm__ __volatile__ ("V_ADD3_U32 %0, %1, %2, %3;" : "=v"(r.s3) : "v"(b.s3), "v"(a.s3), "v"(c.s3)); - __asm__ __volatile__ ("V_ADD3_U32 %0, %1, %2, %3;" : "=v"(r.s4) : "v"(b.s4), "v"(a.s4), "v"(c.s4)); - __asm__ __volatile__ ("V_ADD3_U32 %0, %1, %2, %3;" : "=v"(r.s5) : "v"(b.s5), "v"(a.s5), "v"(c.s5)); - __asm__ __volatile__ ("V_ADD3_U32 %0, %1, %2, %3;" : "=v"(r.s6) : "v"(b.s6), "v"(a.s6), "v"(c.s6)); - __asm__ __volatile__ ("V_ADD3_U32 %0, %1, %2, %3;" : "=v"(r.s7) : "v"(b.s7), "v"(a.s7), "v"(c.s7)); + asm volatile ("mov.b64 %0, {%1, %2};" : "=l"(r.s4) : "r"(tr.s4), "r"(tl.s4)); + asm volatile ("mov.b64 %0, {%1, %2};" : "=l"(r.s5) : "r"(tr.s5), "r"(tl.s5)); + asm volatile ("mov.b64 %0, {%1, %2};" : "=l"(r.s6) : "r"(tr.s6), "r"(tl.s6)); + asm volatile ("mov.b64 %0, {%1, %2};" : "=l"(r.s7) : "r"(tr.s7), "r"(tl.s7)); #endif #if VECT_SIZE >= 16 - __asm__ __volatile__ ("V_ADD3_U32 %0, %1, %2, %3;" : "=v"(r.s0) : "v"(b.s0), "v"(a.s0), "v"(c.s0)); - __asm__ __volatile__ ("V_ADD3_U32 %0, %1, %2, %3;" : "=v"(r.s1) : "v"(b.s1), "v"(a.s1), "v"(c.s1)); - __asm__ __volatile__ ("V_ADD3_U32 %0, %1, %2, %3;" : "=v"(r.s2) : "v"(b.s2), "v"(a.s2), "v"(c.s2)); - __asm__ __volatile__ ("V_ADD3_U32 %0, %1, %2, %3;" : "=v"(r.s3) : "v"(b.s3), "v"(a.s3), "v"(c.s3)); - __asm__ __volatile__ ("V_ADD3_U32 %0, %1, %2, %3;" : "=v"(r.s4) : "v"(b.s4), "v"(a.s4), "v"(c.s4)); - __asm__ __volatile__ ("V_ADD3_U32 %0, %1, %2, %3;" : "=v"(r.s5) : "v"(b.s5), "v"(a.s5), "v"(c.s5)); - __asm__ __volatile__ ("V_ADD3_U32 %0, %1, %2, %3;" : "=v"(r.s6) : "v"(b.s6), "v"(a.s6), "v"(c.s6)); - __asm__ __volatile__ ("V_ADD3_U32 %0, %1, %2, %3;" : "=v"(r.s7) : "v"(b.s7), "v"(a.s7), "v"(c.s7)); - __asm__ __volatile__ ("V_ADD3_U32 %0, %1, %2, %3;" : "=v"(r.s8) : "v"(b.s8), "v"(a.s8), "v"(c.s8)); - __asm__ __volatile__ ("V_ADD3_U32 %0, %1, %2, %3;" : "=v"(r.s9) : "v"(b.s9), "v"(a.s9), "v"(c.s9)); - __asm__ __volatile__ ("V_ADD3_U32 %0, %1, %2, %3;" : "=v"(r.sa) : "v"(b.sa), "v"(a.sa), "v"(c.sa)); - __asm__ __volatile__ ("V_ADD3_U32 %0, %1, %2, %3;" : "=v"(r.sb) : "v"(b.sb), "v"(a.sb), "v"(c.sb)); - __asm__ __volatile__ ("V_ADD3_U32 %0, %1, %2, %3;" : "=v"(r.sc) : "v"(b.sc), "v"(a.sc), "v"(c.sc)); - __asm__ __volatile__ ("V_ADD3_U32 %0, %1, %2, %3;" : "=v"(r.sd) : "v"(b.sd), "v"(a.sd), "v"(c.sd)); - __asm__ __volatile__ ("V_ADD3_U32 %0, %1, %2, %3;" : "=v"(r.se) : "v"(b.se), "v"(a.se), "v"(c.se)); - __asm__ __volatile__ ("V_ADD3_U32 %0, %1, %2, %3;" : "=v"(r.sf) : "v"(b.sf), "v"(a.sf), "v"(c.sf)); + asm volatile ("mov.b64 %0, {%1, %2};" : "=l"(r.s8) : "r"(tr.s8), "r"(tl.s8)); + asm volatile ("mov.b64 %0, {%1, %2};" : "=l"(r.s9) : "r"(tr.s9), "r"(tl.s9)); + asm volatile ("mov.b64 %0, {%1, %2};" : "=l"(r.sa) : "r"(tr.sa), "r"(tl.sa)); + asm volatile ("mov.b64 %0, {%1, %2};" : "=l"(r.sb) : "r"(tr.sb), "r"(tl.sb)); + asm volatile ("mov.b64 %0, {%1, %2};" : "=l"(r.sc) : "r"(tr.sc), "r"(tl.sc)); + asm volatile ("mov.b64 %0, {%1, %2};" : "=l"(r.sd) : "r"(tr.sd), "r"(tl.sd)); + asm volatile ("mov.b64 %0, {%1, %2};" : "=l"(r.se) : "r"(tr.se), "r"(tl.se)); + asm volatile ("mov.b64 %0, {%1, %2};" : "=l"(r.sf) : "r"(tr.sf), "r"(tl.sf)); #endif - return r; -} - -DECLSPEC u32 hc_add3_S (const u32 a, const u32 b, const u32 c) -{ - u32 r = 0; - - __asm__ __volatile__ ("V_ADD3_U32 %0, %1, %2, %3;" : "=v"(r) : "v"(b), "v"(a), "v"(c)); + #else + r = bitselect (bitselect (rotate (v, (u64x) (24)), + rotate (v, (u64x) ( 8)), + (u64x) (0x000000ff000000ff)), + bitselect (rotate (v, (u64x) (56)), + rotate (v, (u64x) (40)), + (u64x) (0x00ff000000ff0000)), + (u64x) (0xffff0000ffff0000)); + #endif + #endif return r; } -#else -DECLSPEC u32x hc_add3 (const u32x a, const u32x b, const u32x c) -{ - return a + b + c; -} - -DECLSPEC u32 hc_add3_S (const u32 a, const u32 b, const u32 c) -{ - return a + b + c; -} -#endif - -DECLSPEC u32x hc_lop_0x96 (const u32x a, const u32x b, const u32x c) -{ - return a ^ b ^ c; -} -DECLSPEC u32 hc_lop_0x96_S (const u32 a, const u32 b, const u32 c) +DECLSPEC u64 hc_swap64_S (const u64 v) { - return a ^ b ^ c; -} + u64 r; -#endif + #ifdef _CPU_OPENCL_EMU_H + r = byte_swap_64 (v); + #else + #if defined IS_AMD && HAS_VPERM == 1 + const u32 m = 0x00010203; -#ifdef IS_NV -DECLSPEC u32 hc_swap32_S (const u32 v) -{ - u32 r = 0; + const u32 v0 = h32_from_64_S (v); + const u32 v1 = l32_from_64_S (v); - asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(r) : "r"(v)); + u32 t0; + u32 t1; - return r; -} + __asm__ __volatile__ ("V_PERM_B32 %0, 0, %1, %2;" : "=v"(t0) : "v"(v0), "v"(m)); + __asm__ __volatile__ ("V_PERM_B32 %0, 0, %1, %2;" : "=v"(t1) : "v"(v1), "v"(m)); -DECLSPEC u64 hc_swap64_S (const u64 v) -{ + r = hl32_to_64_S (t1, t0); + #elif defined IS_NV && HAS_PRMT == 1 u32 il; u32 ir; @@ -750,218 +717,255 @@ DECLSPEC u64 hc_swap64_S (const u64 v) asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(tl) : "r"(il)); asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(tr) : "r"(ir)); - u64 r; - asm volatile ("mov.b64 %0, {%1, %2};" : "=l"(r) : "r"(tr), "r"(tl)); + #else + r = as_ulong (as_uchar8 (v).s76543210); + #endif + #endif return r; } -DECLSPEC u32 hc_rotr32_S (const u32 a, const int n) -{ - return rotate (a, (u32) 32 - n); -} +#ifdef IS_AMD -DECLSPEC u32 hc_rotl32_S (const u32 a, const int n) +DECLSPEC u32x hc_bfe (const u32x a, const u32x b, const u32x c) { - return rotate (a, (u32) n); -} + #define BIT(x) ((u32x) (1u) << (x)) + #define BIT_MASK(x) (BIT (x) - 1) + #define BFE(x,y,z) (((x) >> (y)) & BIT_MASK (z)) -DECLSPEC u64 hc_rotr64_S (const u64 a, const int n) -{ - return rotate (a, (u64) 64 - n); + return BFE (a, b, c); + + #undef BIT + #undef BIT_MASK + #undef BFE } -DECLSPEC u64 hc_rotl64_S (const u64 a, const int n) +DECLSPEC u32 hc_bfe_S (const u32 a, const u32 b, const u32 c) { - return rotate (a, (u64) n); + #define BIT(x) (1u << (x)) + #define BIT_MASK(x) (BIT (x) - 1) + #define BFE(x,y,z) (((x) >> (y)) & BIT_MASK (z)) + + return BFE (a, b, c); + + #undef BIT + #undef BIT_MASK + #undef BFE } -DECLSPEC u32x hc_swap32 (const u32x v) +DECLSPEC u32x hc_bytealign_be (const u32x a, const u32x b, const int c) { u32x r = 0; - #if VECT_SIZE == 1 - asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(r) : "r"(v)); - #endif - - #if VECT_SIZE >= 2 - asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(r.s0) : "r"(v.s0)); - asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(r.s1) : "r"(v.s1)); - #endif + switch (c & 3) + { + case 0: r = b; break; + case 1: r = (a << 24) | (b >> 8); break; + case 2: r = (a << 16) | (b >> 16); break; + case 3: r = (a << 8) | (b >> 24); break; + } - #if VECT_SIZE >= 4 - asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(r.s2) : "r"(v.s2)); - asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(r.s3) : "r"(v.s3)); - #endif + return r; +} - #if VECT_SIZE >= 8 - asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(r.s4) : "r"(v.s4)); - asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(r.s5) : "r"(v.s5)); - asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(r.s6) : "r"(v.s6)); - asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(r.s7) : "r"(v.s7)); - #endif +DECLSPEC u32 hc_bytealign_be_S (const u32 a, const u32 b, const int c) +{ + u32 r = 0; - #if VECT_SIZE >= 16 - asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(r.s8) : "r"(v.s8)); - asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(r.s9) : "r"(v.s9)); - asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(r.sa) : "r"(v.sa)); - asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(r.sb) : "r"(v.sb)); - asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(r.sc) : "r"(v.sc)); - asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(r.sd) : "r"(v.sd)); - asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(r.se) : "r"(v.se)); - asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(r.sf) : "r"(v.sf)); - #endif + switch (c & 3) + { + case 0: r = b; break; + case 1: r = (a << 24) | (b >> 8); break; + case 2: r = (a << 16) | (b >> 16); break; + case 3: r = (a << 8) | (b >> 24); break; + } return r; } -DECLSPEC u64x hc_swap64 (const u64x v) +DECLSPEC u32x hc_bytealign (const u32x a, const u32x b, const int c) { - u32x il; - u32x ir; - - #if VECT_SIZE == 1 - asm volatile ("mov.b64 {%0, %1}, %2;" : "=r"(il), "=r"(ir) : "l"(v)); - #endif + u32x r = 0; - #if VECT_SIZE >= 2 - asm volatile ("mov.b64 {%0, %1}, %2;" : "=r"(il.s0), "=r"(ir.s0) : "l"(v.s0)); - asm volatile ("mov.b64 {%0, %1}, %2;" : "=r"(il.s1), "=r"(ir.s1) : "l"(v.s1)); - #endif + switch (c & 3) + { + case 0: r = b; break; + case 1: r = (a >> 24) | (b << 8); break; + case 2: r = (a >> 16) | (b << 16); break; + case 3: r = (a >> 8) | (b << 24); break; + } - #if VECT_SIZE >= 4 - asm volatile ("mov.b64 {%0, %1}, %2;" : "=r"(il.s2), "=r"(ir.s2) : "l"(v.s2)); - asm volatile ("mov.b64 {%0, %1}, %2;" : "=r"(il.s3), "=r"(ir.s3) : "l"(v.s3)); - #endif + return r; +} - #if VECT_SIZE >= 8 - asm volatile ("mov.b64 {%0, %1}, %2;" : "=r"(il.s4), "=r"(ir.s4) : "l"(v.s4)); - asm volatile ("mov.b64 {%0, %1}, %2;" : "=r"(il.s5), "=r"(ir.s5) : "l"(v.s5)); - asm volatile ("mov.b64 {%0, %1}, %2;" : "=r"(il.s6), "=r"(ir.s6) : "l"(v.s6)); - asm volatile ("mov.b64 {%0, %1}, %2;" : "=r"(il.s7), "=r"(ir.s7) : "l"(v.s7)); - #endif +DECLSPEC u32 hc_bytealign_S (const u32 a, const u32 b, const int c) +{ + u32 r = 0; - #if VECT_SIZE >= 16 - asm volatile ("mov.b64 {%0, %1}, %2;" : "=r"(il.s8), "=r"(ir.s8) : "l"(v.s8)); - asm volatile ("mov.b64 {%0, %1}, %2;" : "=r"(il.s9), "=r"(ir.s9) : "l"(v.s9)); - asm volatile ("mov.b64 {%0, %1}, %2;" : "=r"(il.sa), "=r"(ir.sa) : "l"(v.sa)); - asm volatile ("mov.b64 {%0, %1}, %2;" : "=r"(il.sb), "=r"(ir.sb) : "l"(v.sb)); - asm volatile ("mov.b64 {%0, %1}, %2;" : "=r"(il.sc), "=r"(ir.sc) : "l"(v.sc)); - asm volatile ("mov.b64 {%0, %1}, %2;" : "=r"(il.sd), "=r"(ir.sd) : "l"(v.sd)); - asm volatile ("mov.b64 {%0, %1}, %2;" : "=r"(il.se), "=r"(ir.se) : "l"(v.se)); - asm volatile ("mov.b64 {%0, %1}, %2;" : "=r"(il.sf), "=r"(ir.sf) : "l"(v.sf)); - #endif + switch (c & 3) + { + case 0: r = b; break; + case 1: r = (a >> 24) | (b << 8); break; + case 2: r = (a >> 16) | (b << 16); break; + case 3: r = (a >> 8) | (b << 24); break; + } - u32x tl; - u32x tr; + return r; +} + +#if HAS_VPERM +DECLSPEC u32x hc_byte_perm (const u32x a, const u32x b, const int c) +{ + u32x r = 0; #if VECT_SIZE == 1 - asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(tl) : "r"(il)); - asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(tr) : "r"(ir)); + __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(r) : "v"(b), "v"(a), "v"(c)); #endif #if VECT_SIZE >= 2 - asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(tl.s0) : "r"(il.s0)); - asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(tr.s0) : "r"(ir.s0)); - asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(tl.s1) : "r"(il.s1)); - asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(tr.s1) : "r"(ir.s1)); + __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(r.s0) : "v"(b.s0), "v"(a.s0), "v"(c)); + __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(r.s1) : "v"(b.s1), "v"(a.s1), "v"(c)); #endif #if VECT_SIZE >= 4 - asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(tl.s2) : "r"(il.s2)); - asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(tr.s2) : "r"(ir.s2)); - asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(tl.s3) : "r"(il.s3)); - asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(tr.s3) : "r"(ir.s3)); + __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(r.s0) : "v"(b.s0), "v"(a.s0), "v"(c)); + __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(r.s1) : "v"(b.s1), "v"(a.s1), "v"(c)); + __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(r.s2) : "v"(b.s2), "v"(a.s2), "v"(c)); + __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(r.s3) : "v"(b.s3), "v"(a.s3), "v"(c)); #endif #if VECT_SIZE >= 8 - asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(tl.s4) : "r"(il.s4)); - asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(tr.s4) : "r"(ir.s4)); - asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(tl.s5) : "r"(il.s5)); - asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(tr.s5) : "r"(ir.s5)); - asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(tl.s6) : "r"(il.s6)); - asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(tr.s6) : "r"(ir.s6)); - asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(tl.s7) : "r"(il.s7)); - asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(tr.s7) : "r"(ir.s7)); + __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(r.s0) : "v"(b.s0), "v"(a.s0), "v"(c)); + __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(r.s1) : "v"(b.s1), "v"(a.s1), "v"(c)); + __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(r.s2) : "v"(b.s2), "v"(a.s2), "v"(c)); + __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(r.s3) : "v"(b.s3), "v"(a.s3), "v"(c)); + __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(r.s4) : "v"(b.s4), "v"(a.s4), "v"(c)); + __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(r.s5) : "v"(b.s5), "v"(a.s5), "v"(c)); + __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(r.s6) : "v"(b.s6), "v"(a.s6), "v"(c)); + __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(r.s7) : "v"(b.s7), "v"(a.s7), "v"(c)); #endif #if VECT_SIZE >= 16 - asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(tl.s8) : "r"(il.s8)); - asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(tr.s8) : "r"(ir.s8)); - asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(tl.s9) : "r"(il.s9)); - asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(tr.s9) : "r"(ir.s9)); - asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(tl.sa) : "r"(il.sa)); - asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(tr.sa) : "r"(ir.sa)); - asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(tl.sb) : "r"(il.sb)); - asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(tr.sb) : "r"(ir.sb)); - asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(tl.sc) : "r"(il.sc)); - asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(tr.sc) : "r"(ir.sc)); - asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(tl.sd) : "r"(il.sd)); - asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(tr.sd) : "r"(ir.sd)); - asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(tl.se) : "r"(il.se)); - asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(tr.se) : "r"(ir.se)); - asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(tl.sf) : "r"(il.sf)); - asm volatile ("prmt.b32 %0, %1, 0, 0x0123;" : "=r"(tr.sf) : "r"(ir.sf)); + __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(r.s0) : "v"(b.s0), "v"(a.s0), "v"(c)); + __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(r.s1) : "v"(b.s1), "v"(a.s1), "v"(c)); + __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(r.s2) : "v"(b.s2), "v"(a.s2), "v"(c)); + __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(r.s3) : "v"(b.s3), "v"(a.s3), "v"(c)); + __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(r.s4) : "v"(b.s4), "v"(a.s4), "v"(c)); + __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(r.s5) : "v"(b.s5), "v"(a.s5), "v"(c)); + __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(r.s6) : "v"(b.s6), "v"(a.s6), "v"(c)); + __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(r.s7) : "v"(b.s7), "v"(a.s7), "v"(c)); + __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(r.s8) : "v"(b.s8), "v"(a.s8), "v"(c)); + __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(r.s9) : "v"(b.s9), "v"(a.s9), "v"(c)); + __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(r.sa) : "v"(b.sa), "v"(a.sa), "v"(c)); + __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(r.sb) : "v"(b.sb), "v"(a.sb), "v"(c)); + __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(r.sc) : "v"(b.sc), "v"(a.sc), "v"(c)); + __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(r.sd) : "v"(b.sd), "v"(a.sd), "v"(c)); + __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(r.se) : "v"(b.se), "v"(a.se), "v"(c)); + __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(r.sf) : "v"(b.sf), "v"(a.sf), "v"(c)); #endif - u64x r; + return r; +} + +DECLSPEC u32 hc_byte_perm_S (const u32 a, const u32 b, const int c) +{ + u32 r = 0; + + __asm__ __volatile__ ("V_PERM_B32 %0, %1, %2, %3;" : "=v"(r) : "v"(b), "v"(a), "v"(c)); + + return r; +} +#endif + +#if HAS_VADD3 +DECLSPEC u32x hc_add3 (const u32x a, const u32x b, const u32x c) +{ + u32x r = 0; #if VECT_SIZE == 1 - asm volatile ("mov.b64 %0, {%1, %2};" : "=l"(r) : "r"(tr), "r"(tl)); + __asm__ __volatile__ ("V_ADD3_U32 %0, %1, %2, %3;" : "=v"(r) : "v"(b), "v"(a), "v"(c)); #endif #if VECT_SIZE >= 2 - asm volatile ("mov.b64 %0, {%1, %2};" : "=l"(r.s0) : "r"(tr.s0), "r"(tl.s0)); - asm volatile ("mov.b64 %0, {%1, %2};" : "=l"(r.s1) : "r"(tr.s1), "r"(tl.s1)); + __asm__ __volatile__ ("V_ADD3_U32 %0, %1, %2, %3;" : "=v"(r.s0) : "v"(b.s0), "v"(a.s0), "v"(c.s0)); + __asm__ __volatile__ ("V_ADD3_U32 %0, %1, %2, %3;" : "=v"(r.s1) : "v"(b.s1), "v"(a.s1), "v"(c.s1)); #endif #if VECT_SIZE >= 4 - asm volatile ("mov.b64 %0, {%1, %2};" : "=l"(r.s2) : "r"(tr.s2), "r"(tl.s2)); - asm volatile ("mov.b64 %0, {%1, %2};" : "=l"(r.s3) : "r"(tr.s3), "r"(tl.s3)); + __asm__ __volatile__ ("V_ADD3_U32 %0, %1, %2, %3;" : "=v"(r.s0) : "v"(b.s0), "v"(a.s0), "v"(c.s0)); + __asm__ __volatile__ ("V_ADD3_U32 %0, %1, %2, %3;" : "=v"(r.s1) : "v"(b.s1), "v"(a.s1), "v"(c.s1)); + __asm__ __volatile__ ("V_ADD3_U32 %0, %1, %2, %3;" : "=v"(r.s2) : "v"(b.s2), "v"(a.s2), "v"(c.s2)); + __asm__ __volatile__ ("V_ADD3_U32 %0, %1, %2, %3;" : "=v"(r.s3) : "v"(b.s3), "v"(a.s3), "v"(c.s3)); #endif #if VECT_SIZE >= 8 - asm volatile ("mov.b64 %0, {%1, %2};" : "=l"(r.s4) : "r"(tr.s4), "r"(tl.s4)); - asm volatile ("mov.b64 %0, {%1, %2};" : "=l"(r.s5) : "r"(tr.s5), "r"(tl.s5)); - asm volatile ("mov.b64 %0, {%1, %2};" : "=l"(r.s6) : "r"(tr.s6), "r"(tl.s6)); - asm volatile ("mov.b64 %0, {%1, %2};" : "=l"(r.s7) : "r"(tr.s7), "r"(tl.s7)); + __asm__ __volatile__ ("V_ADD3_U32 %0, %1, %2, %3;" : "=v"(r.s0) : "v"(b.s0), "v"(a.s0), "v"(c.s0)); + __asm__ __volatile__ ("V_ADD3_U32 %0, %1, %2, %3;" : "=v"(r.s1) : "v"(b.s1), "v"(a.s1), "v"(c.s1)); + __asm__ __volatile__ ("V_ADD3_U32 %0, %1, %2, %3;" : "=v"(r.s2) : "v"(b.s2), "v"(a.s2), "v"(c.s2)); + __asm__ __volatile__ ("V_ADD3_U32 %0, %1, %2, %3;" : "=v"(r.s3) : "v"(b.s3), "v"(a.s3), "v"(c.s3)); + __asm__ __volatile__ ("V_ADD3_U32 %0, %1, %2, %3;" : "=v"(r.s4) : "v"(b.s4), "v"(a.s4), "v"(c.s4)); + __asm__ __volatile__ ("V_ADD3_U32 %0, %1, %2, %3;" : "=v"(r.s5) : "v"(b.s5), "v"(a.s5), "v"(c.s5)); + __asm__ __volatile__ ("V_ADD3_U32 %0, %1, %2, %3;" : "=v"(r.s6) : "v"(b.s6), "v"(a.s6), "v"(c.s6)); + __asm__ __volatile__ ("V_ADD3_U32 %0, %1, %2, %3;" : "=v"(r.s7) : "v"(b.s7), "v"(a.s7), "v"(c.s7)); #endif #if VECT_SIZE >= 16 - asm volatile ("mov.b64 %0, {%1, %2};" : "=l"(r.s8) : "r"(tr.s8), "r"(tl.s8)); - asm volatile ("mov.b64 %0, {%1, %2};" : "=l"(r.s9) : "r"(tr.s9), "r"(tl.s9)); - asm volatile ("mov.b64 %0, {%1, %2};" : "=l"(r.sa) : "r"(tr.sa), "r"(tl.sa)); - asm volatile ("mov.b64 %0, {%1, %2};" : "=l"(r.sb) : "r"(tr.sb), "r"(tl.sb)); - asm volatile ("mov.b64 %0, {%1, %2};" : "=l"(r.sc) : "r"(tr.sc), "r"(tl.sc)); - asm volatile ("mov.b64 %0, {%1, %2};" : "=l"(r.sd) : "r"(tr.sd), "r"(tl.sd)); - asm volatile ("mov.b64 %0, {%1, %2};" : "=l"(r.se) : "r"(tr.se), "r"(tl.se)); - asm volatile ("mov.b64 %0, {%1, %2};" : "=l"(r.sf) : "r"(tr.sf), "r"(tl.sf)); + __asm__ __volatile__ ("V_ADD3_U32 %0, %1, %2, %3;" : "=v"(r.s0) : "v"(b.s0), "v"(a.s0), "v"(c.s0)); + __asm__ __volatile__ ("V_ADD3_U32 %0, %1, %2, %3;" : "=v"(r.s1) : "v"(b.s1), "v"(a.s1), "v"(c.s1)); + __asm__ __volatile__ ("V_ADD3_U32 %0, %1, %2, %3;" : "=v"(r.s2) : "v"(b.s2), "v"(a.s2), "v"(c.s2)); + __asm__ __volatile__ ("V_ADD3_U32 %0, %1, %2, %3;" : "=v"(r.s3) : "v"(b.s3), "v"(a.s3), "v"(c.s3)); + __asm__ __volatile__ ("V_ADD3_U32 %0, %1, %2, %3;" : "=v"(r.s4) : "v"(b.s4), "v"(a.s4), "v"(c.s4)); + __asm__ __volatile__ ("V_ADD3_U32 %0, %1, %2, %3;" : "=v"(r.s5) : "v"(b.s5), "v"(a.s5), "v"(c.s5)); + __asm__ __volatile__ ("V_ADD3_U32 %0, %1, %2, %3;" : "=v"(r.s6) : "v"(b.s6), "v"(a.s6), "v"(c.s6)); + __asm__ __volatile__ ("V_ADD3_U32 %0, %1, %2, %3;" : "=v"(r.s7) : "v"(b.s7), "v"(a.s7), "v"(c.s7)); + __asm__ __volatile__ ("V_ADD3_U32 %0, %1, %2, %3;" : "=v"(r.s8) : "v"(b.s8), "v"(a.s8), "v"(c.s8)); + __asm__ __volatile__ ("V_ADD3_U32 %0, %1, %2, %3;" : "=v"(r.s9) : "v"(b.s9), "v"(a.s9), "v"(c.s9)); + __asm__ __volatile__ ("V_ADD3_U32 %0, %1, %2, %3;" : "=v"(r.sa) : "v"(b.sa), "v"(a.sa), "v"(c.sa)); + __asm__ __volatile__ ("V_ADD3_U32 %0, %1, %2, %3;" : "=v"(r.sb) : "v"(b.sb), "v"(a.sb), "v"(c.sb)); + __asm__ __volatile__ ("V_ADD3_U32 %0, %1, %2, %3;" : "=v"(r.sc) : "v"(b.sc), "v"(a.sc), "v"(c.sc)); + __asm__ __volatile__ ("V_ADD3_U32 %0, %1, %2, %3;" : "=v"(r.sd) : "v"(b.sd), "v"(a.sd), "v"(c.sd)); + __asm__ __volatile__ ("V_ADD3_U32 %0, %1, %2, %3;" : "=v"(r.se) : "v"(b.se), "v"(a.se), "v"(c.se)); + __asm__ __volatile__ ("V_ADD3_U32 %0, %1, %2, %3;" : "=v"(r.sf) : "v"(b.sf), "v"(a.sf), "v"(c.sf)); #endif return r; } -DECLSPEC u32x hc_rotr32 (const u32x a, const int n) +DECLSPEC u32 hc_add3_S (const u32 a, const u32 b, const u32 c) { - return rotate (a, (u32x) 32 - n); + u32 r = 0; + + __asm__ __volatile__ ("V_ADD3_U32 %0, %1, %2, %3;" : "=v"(r) : "v"(b), "v"(a), "v"(c)); + + return r; +} +#else +DECLSPEC u32x hc_add3 (const u32x a, const u32x b, const u32x c) +{ + return a + b + c; } -DECLSPEC u32x hc_rotl32 (const u32x a, const int n) +DECLSPEC u32 hc_add3_S (const u32 a, const u32 b, const u32 c) { - return rotate (a, (u32x) n); + return a + b + c; } +#endif -DECLSPEC u64x hc_rotr64 (const u64x a, const int n) +DECLSPEC u32x hc_lop_0x96 (const u32x a, const u32x b, const u32x c) { - return rotate (a, (u64x) 64 - n); + return a ^ b ^ c; } -DECLSPEC u64x hc_rotl64 (const u64x a, const int n) +DECLSPEC u32 hc_lop_0x96_S (const u32 a, const u32 b, const u32 c) { - return rotate (a, (u64x) n); + return a ^ b ^ c; } +#endif + +#ifdef IS_NV + DECLSPEC u32x hc_byte_perm (const u32x a, const u32x b, const int c) { u32x r = 0; @@ -1157,118 +1161,6 @@ DECLSPEC u32 hc_lop_0x96_S (const u32 a, const u32 b, const u32 c) #endif #ifdef IS_GENERIC -DECLSPEC u32 hc_swap32_S (const u32 v) -{ - #ifdef _CPU_OPENCL_EMU_H - return byte_swap_32 (v); - #else - return (as_uint (as_uchar4 (v).s3210)); - #endif -} - -DECLSPEC u64 hc_swap64_S (const u64 v) -{ - #ifdef _CPU_OPENCL_EMU_H - return byte_swap_64 (v); - #else - return (as_ulong (as_uchar8 (v).s76543210)); - #endif -} - -DECLSPEC u32 hc_rotr32_S (const u32 a, const int n) -{ - #ifdef _CPU_OPENCL_EMU_H - return rotr32 (a, n); - #else - return rotate (a, (u32) 32 - n); - #endif -} - -DECLSPEC u32 hc_rotl32_S (const u32 a, const int n) -{ - #ifdef _CPU_OPENCL_EMU_H - return rotl32 (a, n); - #else - return rotate (a, (u32) n); - #endif -} - -DECLSPEC u64 hc_rotr64_S (const u64 a, const int n) -{ - #ifdef _CPU_OPENCL_EMU_H - return rotr64 (a, n); - #else - return rotate (a, (u64) 64 - n); - #endif -} - -DECLSPEC u64 hc_rotl64_S (const u64 a, const int n) -{ - #ifdef _CPU_OPENCL_EMU_H - return rotl64 (a, n); - #else - return rotate (a, (u64) n); - #endif -} - -DECLSPEC u32x hc_swap32 (const u32x v) -{ - return ((v >> 24) & 0x000000ff) - | ((v >> 8) & 0x0000ff00) - | ((v << 8) & 0x00ff0000) - | ((v << 24) & 0xff000000); -} - -DECLSPEC u64x hc_swap64 (const u64x v) -{ - return ((v >> 56) & 0x00000000000000ff) - | ((v >> 40) & 0x000000000000ff00) - | ((v >> 24) & 0x0000000000ff0000) - | ((v >> 8) & 0x00000000ff000000) - | ((v << 8) & 0x000000ff00000000) - | ((v << 24) & 0x0000ff0000000000) - | ((v << 40) & 0x00ff000000000000) - | ((v << 56) & 0xff00000000000000); -} - -// For _CPU_OPENCL_EMU_H we dont need to care about vector functions -// The VECT_SIZE is guaranteed to be set to 1 from cpu_opencl_emu.h - -DECLSPEC u32x hc_rotr32 (const u32x a, const int n) -{ - #ifdef _CPU_OPENCL_EMU_H - return rotr32 (a, n); - #else - return rotate (a, (u32x) 32 - n); - #endif -} - -DECLSPEC u32x hc_rotl32 (const u32x a, const int n) -{ - #ifdef _CPU_OPENCL_EMU_H - return rotl32 (a, n); - #else - return rotate (a, (u32x) n); - #endif -} - -DECLSPEC u64x hc_rotr64 (const u64x a, const int n) -{ - #ifdef _CPU_OPENCL_EMU_H - return rotr64 (a, n); - #else - return rotate (a, (u64x) 64 - n); - #endif -} - -DECLSPEC u64x hc_rotl64 (const u64x a, const int n) -{ - #ifdef _CPU_OPENCL_EMU_H - return rotl64 (a, n); - #else - return rotate (a, (u64x) n); - #endif -} DECLSPEC u32x hc_bfe (const u32x a, const u32x b, const u32x c) { diff --git a/OpenCL/inc_common.h b/OpenCL/inc_common.h index 92161834e..bdcb16d38 100644 --- a/OpenCL/inc_common.h +++ b/OpenCL/inc_common.h @@ -118,20 +118,12 @@ DECLSPEC u64 hl32_to_64_S (const u32 a, const u32 b); // bit operations DECLSPEC u32x hc_rotl32 (const u32x a, const int n); -DECLSPEC u32x hc_rotl32 (const u32x a, const int n); -DECLSPEC u32x hc_rotr32 (const u32x a, const int n); DECLSPEC u32x hc_rotr32 (const u32x a, const int n); DECLSPEC u32 hc_rotl32_S (const u32 a, const int n); -DECLSPEC u32 hc_rotl32_S (const u32 a, const int n); -DECLSPEC u32 hc_rotr32_S (const u32 a, const int n); DECLSPEC u32 hc_rotr32_S (const u32 a, const int n); DECLSPEC u64x hc_rotl64 (const u64x a, const int n); -DECLSPEC u64x hc_rotl64 (const u64x a, const int n); -DECLSPEC u64x hc_rotr64 (const u64x a, const int n); DECLSPEC u64x hc_rotr64 (const u64x a, const int n); DECLSPEC u64 hc_rotl64_S (const u64 a, const int n); -DECLSPEC u64 hc_rotl64_S (const u64 a, const int n); -DECLSPEC u64 hc_rotr64_S (const u64 a, const int n); DECLSPEC u64 hc_rotr64_S (const u64 a, const int n); DECLSPEC u32x hc_swap32 (const u32x v);