/** * Author......: See docs/credits.txt * License.....: MIT */ //#define NEW_SIMD_CODE #ifdef KERNEL_STATIC #include M2S(INCLUDE_PATH/inc_vendor.h) #include M2S(INCLUDE_PATH/inc_types.h) #include M2S(INCLUDE_PATH/inc_platform.cl) #include M2S(INCLUDE_PATH/inc_common.cl) #include M2S(INCLUDE_PATH/inc_rp_optimized.h) #include M2S(INCLUDE_PATH/inc_rp_optimized.cl) #include M2S(INCLUDE_PATH/inc_simd.cl) #endif DECLSPEC u32 MurmurHash64A_truncated (PRIVATE_AS const u32 *data, const u32 len) { #define M 0xc6a4a7935bd1e995 #define R 47 // Initialize hash u64 hash = len * M; // Twice the number of u64 blocks const u32 num_u32_blocks = (len / 8) * 2; // Loop over one u64 at a time u32 i = 0; while (i < num_u32_blocks) { // Reconstruct u64 from two u32s u64 k = hl32_to_64 (data[i + 1], data[i]); k *= M; k ^= k >> R; k *= M; hash ^= k; hash *= M; i += 2; } // Up to 7 overflow bytes const u32 overflow = len & 7; if (overflow > 4) { hash ^= hl32_to_64 (data[i + 1], data[i]); hash *= M; } else if (overflow > 0) { hash ^= hl32_to_64 (0, data[i]); hash *= M; } hash ^= hash >> R; hash *= M; hash ^= hash >> R; #undef M #undef R // Truncate to high 4 bytes return (u32) (hash >> 32); } KERNEL_FQ KERNEL_FA void m34211_m04 (KERN_ATTR_RULES ()) { /** * modifier */ const u64 lid = get_local_id (0); /** * base */ const u64 gid = get_global_id (0); if (gid >= GID_CNT) return; u32 pw_buf0[4]; u32 pw_buf1[4]; pw_buf0[0] = pws[gid].i[0]; pw_buf0[1] = pws[gid].i[1]; pw_buf0[2] = pws[gid].i[2]; pw_buf0[3] = pws[gid].i[3]; pw_buf1[0] = pws[gid].i[4]; pw_buf1[1] = pws[gid].i[5]; pw_buf1[2] = pws[gid].i[6]; pw_buf1[3] = pws[gid].i[7]; const u32 pw_len = pws[gid].pw_len; /** * loop */ for (u32 il_pos = 0; il_pos < IL_CNT; il_pos += VECT_SIZE) { u32x w[16] = { 0 }; const u32x out_len = apply_rules_vect_optimized (pw_buf0, pw_buf1, pw_len, rules_buf, il_pos, w + 0, w + 4); u32x hash = MurmurHash64A_truncated (w, out_len); const u32x z = 0; COMPARE_M_SIMD (hash, z, z, z); } } KERNEL_FQ KERNEL_FA void m34211_m08 (KERN_ATTR_RULES ()) { } KERNEL_FQ KERNEL_FA void m34211_m16 (KERN_ATTR_RULES ()) { } KERNEL_FQ KERNEL_FA void m34211_s04 (KERN_ATTR_RULES ()) { /** * modifier */ const u64 lid = get_local_id (0); /** * base */ const u64 gid = get_global_id (0); if (gid >= GID_CNT) return; u32 pw_buf0[4]; u32 pw_buf1[4]; pw_buf0[0] = pws[gid].i[0]; pw_buf0[1] = pws[gid].i[1]; pw_buf0[2] = pws[gid].i[2]; pw_buf0[3] = pws[gid].i[3]; pw_buf1[0] = pws[gid].i[4]; pw_buf1[1] = pws[gid].i[5]; pw_buf1[2] = pws[gid].i[6]; pw_buf1[3] = pws[gid].i[7]; const u32 pw_len = pws[gid].pw_len; /** * digest */ const u32 search[4] = { digests_buf[DIGESTS_OFFSET_HOST].digest_buf[DGST_R0], 0, 0, 0 }; /** * loop */ for (u32 il_pos = 0; il_pos < IL_CNT; il_pos += VECT_SIZE) { u32x w[16] = { 0 }; const u32x out_len = apply_rules_vect_optimized (pw_buf0, pw_buf1, pw_len, rules_buf, il_pos, w + 0, w + 4); u32x hash = MurmurHash64A_truncated (w, out_len); const u32x z = 0; COMPARE_S_SIMD (hash, z, z, z); } } KERNEL_FQ KERNEL_FA void m34211_s08 (KERN_ATTR_RULES ()) { } KERNEL_FQ KERNEL_FA void m34211_s16 (KERN_ATTR_RULES ()) { }