mirror of
https://github.com/hashcat/hashcat.git
synced 2024-12-23 07:08:19 +00:00
Add pure kernels for OpenCart
This commit is contained in:
parent
b6cf3144de
commit
29e13d6b77
356
OpenCL/m13900_a0.cl
Normal file
356
OpenCL/m13900_a0.cl
Normal file
@ -0,0 +1,356 @@
|
||||
/**
|
||||
* Author......: See docs/credits.txt
|
||||
* License.....: MIT
|
||||
*/
|
||||
|
||||
//#define NEW_SIMD_CODE
|
||||
|
||||
#include "inc_vendor.cl"
|
||||
#include "inc_hash_constants.h"
|
||||
#include "inc_hash_functions.cl"
|
||||
#include "inc_types.cl"
|
||||
#include "inc_common.cl"
|
||||
#include "inc_rp.h"
|
||||
#include "inc_rp.cl"
|
||||
#include "inc_scalar.cl"
|
||||
#include "inc_hash_sha1.cl"
|
||||
|
||||
#if VECT_SIZE == 1
|
||||
#define uint_to_hex_lower8_le(i) (u32x) (l_bin2asc[(i)])
|
||||
#elif VECT_SIZE == 2
|
||||
#define uint_to_hex_lower8_le(i) (u32x) (l_bin2asc[(i).s0], l_bin2asc[(i).s1])
|
||||
#elif VECT_SIZE == 4
|
||||
#define uint_to_hex_lower8_le(i) (u32x) (l_bin2asc[(i).s0], l_bin2asc[(i).s1], l_bin2asc[(i).s2], l_bin2asc[(i).s3])
|
||||
#elif VECT_SIZE == 8
|
||||
#define uint_to_hex_lower8_le(i) (u32x) (l_bin2asc[(i).s0], l_bin2asc[(i).s1], l_bin2asc[(i).s2], l_bin2asc[(i).s3], l_bin2asc[(i).s4], l_bin2asc[(i).s5], l_bin2asc[(i).s6], l_bin2asc[(i).s7])
|
||||
#elif VECT_SIZE == 16
|
||||
#define uint_to_hex_lower8_le(i) (u32x) (l_bin2asc[(i).s0], l_bin2asc[(i).s1], l_bin2asc[(i).s2], l_bin2asc[(i).s3], l_bin2asc[(i).s4], l_bin2asc[(i).s5], l_bin2asc[(i).s6], l_bin2asc[(i).s7], l_bin2asc[(i).s8], l_bin2asc[(i).s9], l_bin2asc[(i).sa], l_bin2asc[(i).sb], l_bin2asc[(i).sc], l_bin2asc[(i).sd], l_bin2asc[(i).se], l_bin2asc[(i).sf])
|
||||
#endif
|
||||
|
||||
__kernel void m13900_mxx (__global pw_t *pws, __global const kernel_rule_t *rules_buf, __global const pw_t *combs_buf, __global const bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global const u32 *bitmaps_buf_s1_a, __global const u32 *bitmaps_buf_s1_b, __global const u32 *bitmaps_buf_s1_c, __global const u32 *bitmaps_buf_s1_d, __global const u32 *bitmaps_buf_s2_a, __global const u32 *bitmaps_buf_s2_b, __global const u32 *bitmaps_buf_s2_c, __global const u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global const digest_t *digests_buf, __global u32 *hashes_shown, __global const salt_t *salt_bufs, __global const void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV0_buf, __global u32 *d_scryptV1_buf, __global u32 *d_scryptV2_buf, __global u32 *d_scryptV3_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 il_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
|
||||
{
|
||||
/**
|
||||
* modifier
|
||||
*/
|
||||
|
||||
const u32 gid = get_global_id (0);
|
||||
const u32 lid = get_local_id (0);
|
||||
const u32 lsz = get_local_size (0);
|
||||
|
||||
/**
|
||||
* shared
|
||||
*/
|
||||
|
||||
__local u32 l_bin2asc[256];
|
||||
|
||||
for (u32 i = lid; i < 256; i += lsz)
|
||||
{
|
||||
const u32 i0 = (i >> 0) & 15;
|
||||
const u32 i1 = (i >> 4) & 15;
|
||||
|
||||
l_bin2asc[i] = ((i0 < 10) ? '0' + i0 : 'a' - 10 + i0) << 0
|
||||
| ((i1 < 10) ? '0' + i1 : 'a' - 10 + i1) << 8;
|
||||
}
|
||||
|
||||
barrier (CLK_LOCAL_MEM_FENCE);
|
||||
|
||||
if (gid >= gid_max) return;
|
||||
|
||||
/**
|
||||
* base
|
||||
*/
|
||||
|
||||
const u32 pw_len = pws[gid].pw_len;
|
||||
|
||||
const u32 pw_lenv = ceil ((float) pw_len / 4);
|
||||
|
||||
u32 w[64] = { 0 };
|
||||
|
||||
for (int idx = 0; idx < pw_lenv; idx++)
|
||||
{
|
||||
w[idx] = pws[gid].i[idx];
|
||||
}
|
||||
|
||||
sha1_ctx_t ctx0;
|
||||
|
||||
sha1_init (&ctx0);
|
||||
|
||||
sha1_update_global_swap (&ctx0, salt_bufs[salt_pos].salt_buf, salt_bufs[salt_pos].salt_len);
|
||||
|
||||
/**
|
||||
* loop
|
||||
*/
|
||||
|
||||
for (u32 il_pos = 0; il_pos < il_cnt; il_pos++)
|
||||
{
|
||||
// todo: add rules engine
|
||||
|
||||
sha1_ctx_t ctx1;
|
||||
|
||||
sha1_init (&ctx1);
|
||||
|
||||
sha1_update_swap (&ctx1, w, pw_len);
|
||||
|
||||
sha1_final (&ctx1);
|
||||
|
||||
u32 a = ctx1.h[0];
|
||||
u32 b = ctx1.h[1];
|
||||
u32 c = ctx1.h[2];
|
||||
u32 d = ctx1.h[3];
|
||||
u32 e = ctx1.h[4];
|
||||
|
||||
sha1_ctx_t ctx2 = ctx0;
|
||||
|
||||
u32 w0[4];
|
||||
u32 w1[4];
|
||||
u32 w2[4];
|
||||
u32 w3[4];
|
||||
|
||||
w0[0] = uint_to_hex_lower8_le ((a >> 16) & 255) << 0
|
||||
| uint_to_hex_lower8_le ((a >> 24) & 255) << 16;
|
||||
w0[1] = uint_to_hex_lower8_le ((a >> 0) & 255) << 0
|
||||
| uint_to_hex_lower8_le ((a >> 8) & 255) << 16;
|
||||
w0[2] = uint_to_hex_lower8_le ((b >> 16) & 255) << 0
|
||||
| uint_to_hex_lower8_le ((b >> 24) & 255) << 16;
|
||||
w0[3] = uint_to_hex_lower8_le ((b >> 0) & 255) << 0
|
||||
| uint_to_hex_lower8_le ((b >> 8) & 255) << 16;
|
||||
w1[0] = uint_to_hex_lower8_le ((c >> 16) & 255) << 0
|
||||
| uint_to_hex_lower8_le ((c >> 24) & 255) << 16;
|
||||
w1[1] = uint_to_hex_lower8_le ((c >> 0) & 255) << 0
|
||||
| uint_to_hex_lower8_le ((c >> 8) & 255) << 16;
|
||||
w1[2] = uint_to_hex_lower8_le ((d >> 16) & 255) << 0
|
||||
| uint_to_hex_lower8_le ((d >> 24) & 255) << 16;
|
||||
w1[3] = uint_to_hex_lower8_le ((d >> 0) & 255) << 0
|
||||
| uint_to_hex_lower8_le ((d >> 8) & 255) << 16;
|
||||
w2[0] = uint_to_hex_lower8_le ((e >> 16) & 255) << 0
|
||||
| uint_to_hex_lower8_le ((e >> 24) & 255) << 16;
|
||||
w2[1] = uint_to_hex_lower8_le ((e >> 0) & 255) << 0
|
||||
| uint_to_hex_lower8_le ((e >> 8) & 255) << 16;
|
||||
w2[2] = 0;
|
||||
w2[3] = 0;
|
||||
w3[0] = 0;
|
||||
w3[1] = 0;
|
||||
w3[2] = 0;
|
||||
w3[3] = 0;
|
||||
|
||||
sha1_update_64 (&ctx2, w0, w1, w2, w3, 40);
|
||||
|
||||
sha1_final (&ctx2);
|
||||
|
||||
a = ctx2.h[0];
|
||||
b = ctx2.h[1];
|
||||
c = ctx2.h[2];
|
||||
d = ctx2.h[3];
|
||||
e = ctx2.h[4];
|
||||
|
||||
sha1_ctx_t ctx = ctx0;
|
||||
|
||||
w0[0] = uint_to_hex_lower8_le ((a >> 16) & 255) << 0
|
||||
| uint_to_hex_lower8_le ((a >> 24) & 255) << 16;
|
||||
w0[1] = uint_to_hex_lower8_le ((a >> 0) & 255) << 0
|
||||
| uint_to_hex_lower8_le ((a >> 8) & 255) << 16;
|
||||
w0[2] = uint_to_hex_lower8_le ((b >> 16) & 255) << 0
|
||||
| uint_to_hex_lower8_le ((b >> 24) & 255) << 16;
|
||||
w0[3] = uint_to_hex_lower8_le ((b >> 0) & 255) << 0
|
||||
| uint_to_hex_lower8_le ((b >> 8) & 255) << 16;
|
||||
w1[0] = uint_to_hex_lower8_le ((c >> 16) & 255) << 0
|
||||
| uint_to_hex_lower8_le ((c >> 24) & 255) << 16;
|
||||
w1[1] = uint_to_hex_lower8_le ((c >> 0) & 255) << 0
|
||||
| uint_to_hex_lower8_le ((c >> 8) & 255) << 16;
|
||||
w1[2] = uint_to_hex_lower8_le ((d >> 16) & 255) << 0
|
||||
| uint_to_hex_lower8_le ((d >> 24) & 255) << 16;
|
||||
w1[3] = uint_to_hex_lower8_le ((d >> 0) & 255) << 0
|
||||
| uint_to_hex_lower8_le ((d >> 8) & 255) << 16;
|
||||
w2[0] = uint_to_hex_lower8_le ((e >> 16) & 255) << 0
|
||||
| uint_to_hex_lower8_le ((e >> 24) & 255) << 16;
|
||||
w2[1] = uint_to_hex_lower8_le ((e >> 0) & 255) << 0
|
||||
| uint_to_hex_lower8_le ((e >> 8) & 255) << 16;
|
||||
w2[2] = 0;
|
||||
w2[3] = 0;
|
||||
w3[0] = 0;
|
||||
w3[1] = 0;
|
||||
w3[2] = 0;
|
||||
w3[3] = 0;
|
||||
|
||||
sha1_update_64 (&ctx, w0, w1, w2, w3, 40);
|
||||
|
||||
sha1_final (&ctx);
|
||||
|
||||
const u32 r0 = ctx.h[DGST_R0];
|
||||
const u32 r1 = ctx.h[DGST_R1];
|
||||
const u32 r2 = ctx.h[DGST_R2];
|
||||
const u32 r3 = ctx.h[DGST_R3];
|
||||
|
||||
COMPARE_M_SCALAR (r0, r1, r2, r3);
|
||||
}
|
||||
}
|
||||
|
||||
__kernel void m13900_sxx (__global pw_t *pws, __global const kernel_rule_t *rules_buf, __global const pw_t *combs_buf, __global const bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global const u32 *bitmaps_buf_s1_a, __global const u32 *bitmaps_buf_s1_b, __global const u32 *bitmaps_buf_s1_c, __global const u32 *bitmaps_buf_s1_d, __global const u32 *bitmaps_buf_s2_a, __global const u32 *bitmaps_buf_s2_b, __global const u32 *bitmaps_buf_s2_c, __global const u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global const digest_t *digests_buf, __global u32 *hashes_shown, __global const salt_t *salt_bufs, __global const void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV0_buf, __global u32 *d_scryptV1_buf, __global u32 *d_scryptV2_buf, __global u32 *d_scryptV3_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 il_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
|
||||
{
|
||||
/**
|
||||
* modifier
|
||||
*/
|
||||
|
||||
const u32 gid = get_global_id (0);
|
||||
const u32 lid = get_local_id (0);
|
||||
const u32 lsz = get_local_size (0);
|
||||
|
||||
/**
|
||||
* shared
|
||||
*/
|
||||
|
||||
__local u32 l_bin2asc[256];
|
||||
|
||||
for (u32 i = lid; i < 256; i += lsz)
|
||||
{
|
||||
const u32 i0 = (i >> 0) & 15;
|
||||
const u32 i1 = (i >> 4) & 15;
|
||||
|
||||
l_bin2asc[i] = ((i0 < 10) ? '0' + i0 : 'a' - 10 + i0) << 0
|
||||
| ((i1 < 10) ? '0' + i1 : 'a' - 10 + i1) << 8;
|
||||
}
|
||||
|
||||
barrier (CLK_LOCAL_MEM_FENCE);
|
||||
|
||||
if (gid >= gid_max) return;
|
||||
|
||||
/**
|
||||
* digest
|
||||
*/
|
||||
|
||||
const u32 search[4] =
|
||||
{
|
||||
digests_buf[digests_offset].digest_buf[DGST_R0],
|
||||
digests_buf[digests_offset].digest_buf[DGST_R1],
|
||||
digests_buf[digests_offset].digest_buf[DGST_R2],
|
||||
digests_buf[digests_offset].digest_buf[DGST_R3]
|
||||
};
|
||||
|
||||
/**
|
||||
* base
|
||||
*/
|
||||
|
||||
const u32 pw_len = pws[gid].pw_len;
|
||||
|
||||
const u32 pw_lenv = ceil ((float) pw_len / 4);
|
||||
|
||||
u32 w[64] = { 0 };
|
||||
|
||||
for (int idx = 0; idx < pw_lenv; idx++)
|
||||
{
|
||||
w[idx] = pws[gid].i[idx];
|
||||
}
|
||||
|
||||
sha1_ctx_t ctx0;
|
||||
|
||||
sha1_init (&ctx0);
|
||||
|
||||
sha1_update_global_swap (&ctx0, salt_bufs[salt_pos].salt_buf, salt_bufs[salt_pos].salt_len);
|
||||
|
||||
/**
|
||||
* loop
|
||||
*/
|
||||
|
||||
for (u32 il_pos = 0; il_pos < il_cnt; il_pos++)
|
||||
{
|
||||
// todo: add rules engine
|
||||
|
||||
sha1_ctx_t ctx1;
|
||||
|
||||
sha1_init (&ctx1);
|
||||
|
||||
sha1_update_swap (&ctx1, w, pw_len);
|
||||
|
||||
sha1_final (&ctx1);
|
||||
|
||||
u32 a = ctx1.h[0];
|
||||
u32 b = ctx1.h[1];
|
||||
u32 c = ctx1.h[2];
|
||||
u32 d = ctx1.h[3];
|
||||
u32 e = ctx1.h[4];
|
||||
|
||||
sha1_ctx_t ctx2 = ctx0;
|
||||
|
||||
u32 w0[4];
|
||||
u32 w1[4];
|
||||
u32 w2[4];
|
||||
u32 w3[4];
|
||||
|
||||
w0[0] = uint_to_hex_lower8_le ((a >> 16) & 255) << 0
|
||||
| uint_to_hex_lower8_le ((a >> 24) & 255) << 16;
|
||||
w0[1] = uint_to_hex_lower8_le ((a >> 0) & 255) << 0
|
||||
| uint_to_hex_lower8_le ((a >> 8) & 255) << 16;
|
||||
w0[2] = uint_to_hex_lower8_le ((b >> 16) & 255) << 0
|
||||
| uint_to_hex_lower8_le ((b >> 24) & 255) << 16;
|
||||
w0[3] = uint_to_hex_lower8_le ((b >> 0) & 255) << 0
|
||||
| uint_to_hex_lower8_le ((b >> 8) & 255) << 16;
|
||||
w1[0] = uint_to_hex_lower8_le ((c >> 16) & 255) << 0
|
||||
| uint_to_hex_lower8_le ((c >> 24) & 255) << 16;
|
||||
w1[1] = uint_to_hex_lower8_le ((c >> 0) & 255) << 0
|
||||
| uint_to_hex_lower8_le ((c >> 8) & 255) << 16;
|
||||
w1[2] = uint_to_hex_lower8_le ((d >> 16) & 255) << 0
|
||||
| uint_to_hex_lower8_le ((d >> 24) & 255) << 16;
|
||||
w1[3] = uint_to_hex_lower8_le ((d >> 0) & 255) << 0
|
||||
| uint_to_hex_lower8_le ((d >> 8) & 255) << 16;
|
||||
w2[0] = uint_to_hex_lower8_le ((e >> 16) & 255) << 0
|
||||
| uint_to_hex_lower8_le ((e >> 24) & 255) << 16;
|
||||
w2[1] = uint_to_hex_lower8_le ((e >> 0) & 255) << 0
|
||||
| uint_to_hex_lower8_le ((e >> 8) & 255) << 16;
|
||||
w2[2] = 0;
|
||||
w2[3] = 0;
|
||||
w3[0] = 0;
|
||||
w3[1] = 0;
|
||||
w3[2] = 0;
|
||||
w3[3] = 0;
|
||||
|
||||
sha1_update_64 (&ctx2, w0, w1, w2, w3, 40);
|
||||
|
||||
sha1_final (&ctx2);
|
||||
|
||||
a = ctx2.h[0];
|
||||
b = ctx2.h[1];
|
||||
c = ctx2.h[2];
|
||||
d = ctx2.h[3];
|
||||
e = ctx2.h[4];
|
||||
|
||||
sha1_ctx_t ctx = ctx0;
|
||||
|
||||
w0[0] = uint_to_hex_lower8_le ((a >> 16) & 255) << 0
|
||||
| uint_to_hex_lower8_le ((a >> 24) & 255) << 16;
|
||||
w0[1] = uint_to_hex_lower8_le ((a >> 0) & 255) << 0
|
||||
| uint_to_hex_lower8_le ((a >> 8) & 255) << 16;
|
||||
w0[2] = uint_to_hex_lower8_le ((b >> 16) & 255) << 0
|
||||
| uint_to_hex_lower8_le ((b >> 24) & 255) << 16;
|
||||
w0[3] = uint_to_hex_lower8_le ((b >> 0) & 255) << 0
|
||||
| uint_to_hex_lower8_le ((b >> 8) & 255) << 16;
|
||||
w1[0] = uint_to_hex_lower8_le ((c >> 16) & 255) << 0
|
||||
| uint_to_hex_lower8_le ((c >> 24) & 255) << 16;
|
||||
w1[1] = uint_to_hex_lower8_le ((c >> 0) & 255) << 0
|
||||
| uint_to_hex_lower8_le ((c >> 8) & 255) << 16;
|
||||
w1[2] = uint_to_hex_lower8_le ((d >> 16) & 255) << 0
|
||||
| uint_to_hex_lower8_le ((d >> 24) & 255) << 16;
|
||||
w1[3] = uint_to_hex_lower8_le ((d >> 0) & 255) << 0
|
||||
| uint_to_hex_lower8_le ((d >> 8) & 255) << 16;
|
||||
w2[0] = uint_to_hex_lower8_le ((e >> 16) & 255) << 0
|
||||
| uint_to_hex_lower8_le ((e >> 24) & 255) << 16;
|
||||
w2[1] = uint_to_hex_lower8_le ((e >> 0) & 255) << 0
|
||||
| uint_to_hex_lower8_le ((e >> 8) & 255) << 16;
|
||||
w2[2] = 0;
|
||||
w2[3] = 0;
|
||||
w3[0] = 0;
|
||||
w3[1] = 0;
|
||||
w3[2] = 0;
|
||||
w3[3] = 0;
|
||||
|
||||
sha1_update_64 (&ctx, w0, w1, w2, w3, 40);
|
||||
|
||||
sha1_final (&ctx);
|
||||
|
||||
const u32 r0 = ctx.h[DGST_R0];
|
||||
const u32 r1 = ctx.h[DGST_R1];
|
||||
const u32 r2 = ctx.h[DGST_R2];
|
||||
const u32 r3 = ctx.h[DGST_R3];
|
||||
|
||||
COMPARE_S_SCALAR (r0, r1, r2, r3);
|
||||
}
|
||||
}
|
336
OpenCL/m13900_a1.cl
Normal file
336
OpenCL/m13900_a1.cl
Normal file
@ -0,0 +1,336 @@
|
||||
/**
|
||||
* Author......: See docs/credits.txt
|
||||
* License.....: MIT
|
||||
*/
|
||||
|
||||
//#define NEW_SIMD_CODE
|
||||
|
||||
#include "inc_vendor.cl"
|
||||
#include "inc_hash_constants.h"
|
||||
#include "inc_hash_functions.cl"
|
||||
#include "inc_types.cl"
|
||||
#include "inc_common.cl"
|
||||
#include "inc_scalar.cl"
|
||||
#include "inc_hash_sha1.cl"
|
||||
|
||||
#if VECT_SIZE == 1
|
||||
#define uint_to_hex_lower8_le(i) (u32x) (l_bin2asc[(i)])
|
||||
#elif VECT_SIZE == 2
|
||||
#define uint_to_hex_lower8_le(i) (u32x) (l_bin2asc[(i).s0], l_bin2asc[(i).s1])
|
||||
#elif VECT_SIZE == 4
|
||||
#define uint_to_hex_lower8_le(i) (u32x) (l_bin2asc[(i).s0], l_bin2asc[(i).s1], l_bin2asc[(i).s2], l_bin2asc[(i).s3])
|
||||
#elif VECT_SIZE == 8
|
||||
#define uint_to_hex_lower8_le(i) (u32x) (l_bin2asc[(i).s0], l_bin2asc[(i).s1], l_bin2asc[(i).s2], l_bin2asc[(i).s3], l_bin2asc[(i).s4], l_bin2asc[(i).s5], l_bin2asc[(i).s6], l_bin2asc[(i).s7])
|
||||
#elif VECT_SIZE == 16
|
||||
#define uint_to_hex_lower8_le(i) (u32x) (l_bin2asc[(i).s0], l_bin2asc[(i).s1], l_bin2asc[(i).s2], l_bin2asc[(i).s3], l_bin2asc[(i).s4], l_bin2asc[(i).s5], l_bin2asc[(i).s6], l_bin2asc[(i).s7], l_bin2asc[(i).s8], l_bin2asc[(i).s9], l_bin2asc[(i).sa], l_bin2asc[(i).sb], l_bin2asc[(i).sc], l_bin2asc[(i).sd], l_bin2asc[(i).se], l_bin2asc[(i).sf])
|
||||
#endif
|
||||
|
||||
__kernel void m13900_mxx (__global pw_t *pws, __global const kernel_rule_t *rules_buf, __global const pw_t *combs_buf, __global const bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global const u32 *bitmaps_buf_s1_a, __global const u32 *bitmaps_buf_s1_b, __global const u32 *bitmaps_buf_s1_c, __global const u32 *bitmaps_buf_s1_d, __global const u32 *bitmaps_buf_s2_a, __global const u32 *bitmaps_buf_s2_b, __global const u32 *bitmaps_buf_s2_c, __global const u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global const digest_t *digests_buf, __global u32 *hashes_shown, __global const salt_t *salt_bufs, __global const void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV0_buf, __global u32 *d_scryptV1_buf, __global u32 *d_scryptV2_buf, __global u32 *d_scryptV3_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 il_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
|
||||
{
|
||||
/**
|
||||
* modifier
|
||||
*/
|
||||
|
||||
const u32 gid = get_global_id (0);
|
||||
const u32 lid = get_local_id (0);
|
||||
const u32 lsz = get_local_size (0);
|
||||
|
||||
/**
|
||||
* shared
|
||||
*/
|
||||
|
||||
__local u32 l_bin2asc[256];
|
||||
|
||||
for (u32 i = lid; i < 256; i += lsz)
|
||||
{
|
||||
const u32 i0 = (i >> 0) & 15;
|
||||
const u32 i1 = (i >> 4) & 15;
|
||||
|
||||
l_bin2asc[i] = ((i0 < 10) ? '0' + i0 : 'a' - 10 + i0) << 0
|
||||
| ((i1 < 10) ? '0' + i1 : 'a' - 10 + i1) << 8;
|
||||
}
|
||||
|
||||
barrier (CLK_LOCAL_MEM_FENCE);
|
||||
|
||||
if (gid >= gid_max) return;
|
||||
|
||||
/**
|
||||
* base
|
||||
*/
|
||||
|
||||
sha1_ctx_t ctx0;
|
||||
|
||||
sha1_init (&ctx0);
|
||||
|
||||
sha1_update_global_swap (&ctx0, salt_bufs[salt_pos].salt_buf, salt_bufs[salt_pos].salt_len);
|
||||
|
||||
sha1_ctx_t ctx1l;
|
||||
|
||||
sha1_init (&ctx1l);
|
||||
|
||||
sha1_update_global_swap (&ctx1l, pws[gid].i, pws[gid].pw_len);
|
||||
|
||||
/**
|
||||
* loop
|
||||
*/
|
||||
|
||||
for (u32 il_pos = 0; il_pos < il_cnt; il_pos++)
|
||||
{
|
||||
sha1_ctx_t ctx1 = ctx1l;
|
||||
|
||||
sha1_update_global_swap (&ctx1, combs_buf[il_pos].i, combs_buf[il_pos].pw_len);
|
||||
|
||||
sha1_final (&ctx1);
|
||||
|
||||
u32 a = ctx1.h[0];
|
||||
u32 b = ctx1.h[1];
|
||||
u32 c = ctx1.h[2];
|
||||
u32 d = ctx1.h[3];
|
||||
u32 e = ctx1.h[4];
|
||||
|
||||
sha1_ctx_t ctx2 = ctx0;
|
||||
|
||||
u32 w0[4];
|
||||
u32 w1[4];
|
||||
u32 w2[4];
|
||||
u32 w3[4];
|
||||
|
||||
w0[0] = uint_to_hex_lower8_le ((a >> 16) & 255) << 0
|
||||
| uint_to_hex_lower8_le ((a >> 24) & 255) << 16;
|
||||
w0[1] = uint_to_hex_lower8_le ((a >> 0) & 255) << 0
|
||||
| uint_to_hex_lower8_le ((a >> 8) & 255) << 16;
|
||||
w0[2] = uint_to_hex_lower8_le ((b >> 16) & 255) << 0
|
||||
| uint_to_hex_lower8_le ((b >> 24) & 255) << 16;
|
||||
w0[3] = uint_to_hex_lower8_le ((b >> 0) & 255) << 0
|
||||
| uint_to_hex_lower8_le ((b >> 8) & 255) << 16;
|
||||
w1[0] = uint_to_hex_lower8_le ((c >> 16) & 255) << 0
|
||||
| uint_to_hex_lower8_le ((c >> 24) & 255) << 16;
|
||||
w1[1] = uint_to_hex_lower8_le ((c >> 0) & 255) << 0
|
||||
| uint_to_hex_lower8_le ((c >> 8) & 255) << 16;
|
||||
w1[2] = uint_to_hex_lower8_le ((d >> 16) & 255) << 0
|
||||
| uint_to_hex_lower8_le ((d >> 24) & 255) << 16;
|
||||
w1[3] = uint_to_hex_lower8_le ((d >> 0) & 255) << 0
|
||||
| uint_to_hex_lower8_le ((d >> 8) & 255) << 16;
|
||||
w2[0] = uint_to_hex_lower8_le ((e >> 16) & 255) << 0
|
||||
| uint_to_hex_lower8_le ((e >> 24) & 255) << 16;
|
||||
w2[1] = uint_to_hex_lower8_le ((e >> 0) & 255) << 0
|
||||
| uint_to_hex_lower8_le ((e >> 8) & 255) << 16;
|
||||
w2[2] = 0;
|
||||
w2[3] = 0;
|
||||
w3[0] = 0;
|
||||
w3[1] = 0;
|
||||
w3[2] = 0;
|
||||
w3[3] = 0;
|
||||
|
||||
sha1_update_64 (&ctx2, w0, w1, w2, w3, 40);
|
||||
|
||||
sha1_final (&ctx2);
|
||||
|
||||
a = ctx2.h[0];
|
||||
b = ctx2.h[1];
|
||||
c = ctx2.h[2];
|
||||
d = ctx2.h[3];
|
||||
e = ctx2.h[4];
|
||||
|
||||
sha1_ctx_t ctx = ctx0;
|
||||
|
||||
w0[0] = uint_to_hex_lower8_le ((a >> 16) & 255) << 0
|
||||
| uint_to_hex_lower8_le ((a >> 24) & 255) << 16;
|
||||
w0[1] = uint_to_hex_lower8_le ((a >> 0) & 255) << 0
|
||||
| uint_to_hex_lower8_le ((a >> 8) & 255) << 16;
|
||||
w0[2] = uint_to_hex_lower8_le ((b >> 16) & 255) << 0
|
||||
| uint_to_hex_lower8_le ((b >> 24) & 255) << 16;
|
||||
w0[3] = uint_to_hex_lower8_le ((b >> 0) & 255) << 0
|
||||
| uint_to_hex_lower8_le ((b >> 8) & 255) << 16;
|
||||
w1[0] = uint_to_hex_lower8_le ((c >> 16) & 255) << 0
|
||||
| uint_to_hex_lower8_le ((c >> 24) & 255) << 16;
|
||||
w1[1] = uint_to_hex_lower8_le ((c >> 0) & 255) << 0
|
||||
| uint_to_hex_lower8_le ((c >> 8) & 255) << 16;
|
||||
w1[2] = uint_to_hex_lower8_le ((d >> 16) & 255) << 0
|
||||
| uint_to_hex_lower8_le ((d >> 24) & 255) << 16;
|
||||
w1[3] = uint_to_hex_lower8_le ((d >> 0) & 255) << 0
|
||||
| uint_to_hex_lower8_le ((d >> 8) & 255) << 16;
|
||||
w2[0] = uint_to_hex_lower8_le ((e >> 16) & 255) << 0
|
||||
| uint_to_hex_lower8_le ((e >> 24) & 255) << 16;
|
||||
w2[1] = uint_to_hex_lower8_le ((e >> 0) & 255) << 0
|
||||
| uint_to_hex_lower8_le ((e >> 8) & 255) << 16;
|
||||
w2[2] = 0;
|
||||
w2[3] = 0;
|
||||
w3[0] = 0;
|
||||
w3[1] = 0;
|
||||
w3[2] = 0;
|
||||
w3[3] = 0;
|
||||
|
||||
sha1_update_64 (&ctx, w0, w1, w2, w3, 40);
|
||||
|
||||
sha1_final (&ctx);
|
||||
|
||||
const u32 r0 = ctx.h[DGST_R0];
|
||||
const u32 r1 = ctx.h[DGST_R1];
|
||||
const u32 r2 = ctx.h[DGST_R2];
|
||||
const u32 r3 = ctx.h[DGST_R3];
|
||||
|
||||
COMPARE_M_SCALAR (r0, r1, r2, r3);
|
||||
}
|
||||
}
|
||||
|
||||
__kernel void m13900_sxx (__global pw_t *pws, __global const kernel_rule_t *rules_buf, __global const pw_t *combs_buf, __global const bf_t *bfs_buf, __global void *tmps, __global void *hooks, __global const u32 *bitmaps_buf_s1_a, __global const u32 *bitmaps_buf_s1_b, __global const u32 *bitmaps_buf_s1_c, __global const u32 *bitmaps_buf_s1_d, __global const u32 *bitmaps_buf_s2_a, __global const u32 *bitmaps_buf_s2_b, __global const u32 *bitmaps_buf_s2_c, __global const u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global const digest_t *digests_buf, __global u32 *hashes_shown, __global const salt_t *salt_bufs, __global const void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV0_buf, __global u32 *d_scryptV1_buf, __global u32 *d_scryptV2_buf, __global u32 *d_scryptV3_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 il_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
|
||||
{
|
||||
/**
|
||||
* modifier
|
||||
*/
|
||||
|
||||
const u32 gid = get_global_id (0);
|
||||
const u32 lid = get_local_id (0);
|
||||
const u32 lsz = get_local_size (0);
|
||||
|
||||
/**
|
||||
* shared
|
||||
*/
|
||||
|
||||
__local u32 l_bin2asc[256];
|
||||
|
||||
for (u32 i = lid; i < 256; i += lsz)
|
||||
{
|
||||
const u32 i0 = (i >> 0) & 15;
|
||||
const u32 i1 = (i >> 4) & 15;
|
||||
|
||||
l_bin2asc[i] = ((i0 < 10) ? '0' + i0 : 'a' - 10 + i0) << 0
|
||||
| ((i1 < 10) ? '0' + i1 : 'a' - 10 + i1) << 8;
|
||||
}
|
||||
|
||||
barrier (CLK_LOCAL_MEM_FENCE);
|
||||
|
||||
if (gid >= gid_max) return;
|
||||
|
||||
/**
|
||||
* digest
|
||||
*/
|
||||
|
||||
const u32 search[4] =
|
||||
{
|
||||
digests_buf[digests_offset].digest_buf[DGST_R0],
|
||||
digests_buf[digests_offset].digest_buf[DGST_R1],
|
||||
digests_buf[digests_offset].digest_buf[DGST_R2],
|
||||
digests_buf[digests_offset].digest_buf[DGST_R3]
|
||||
};
|
||||
|
||||
/**
|
||||
* base
|
||||
*/
|
||||
|
||||
sha1_ctx_t ctx0;
|
||||
|
||||
sha1_init (&ctx0);
|
||||
|
||||
sha1_update_global_swap (&ctx0, salt_bufs[salt_pos].salt_buf, salt_bufs[salt_pos].salt_len);
|
||||
|
||||
sha1_ctx_t ctx1l;
|
||||
|
||||
sha1_init (&ctx1l);
|
||||
|
||||
sha1_update_global_swap (&ctx1l, pws[gid].i, pws[gid].pw_len);
|
||||
|
||||
/**
|
||||
* loop
|
||||
*/
|
||||
|
||||
for (u32 il_pos = 0; il_pos < il_cnt; il_pos++)
|
||||
{
|
||||
sha1_ctx_t ctx1 = ctx1l;
|
||||
|
||||
sha1_update_global_swap (&ctx1, combs_buf[il_pos].i, combs_buf[il_pos].pw_len);
|
||||
|
||||
sha1_final (&ctx1);
|
||||
|
||||
u32 a = ctx1.h[0];
|
||||
u32 b = ctx1.h[1];
|
||||
u32 c = ctx1.h[2];
|
||||
u32 d = ctx1.h[3];
|
||||
u32 e = ctx1.h[4];
|
||||
|
||||
sha1_ctx_t ctx2 = ctx0;
|
||||
|
||||
u32 w0[4];
|
||||
u32 w1[4];
|
||||
u32 w2[4];
|
||||
u32 w3[4];
|
||||
|
||||
w0[0] = uint_to_hex_lower8_le ((a >> 16) & 255) << 0
|
||||
| uint_to_hex_lower8_le ((a >> 24) & 255) << 16;
|
||||
w0[1] = uint_to_hex_lower8_le ((a >> 0) & 255) << 0
|
||||
| uint_to_hex_lower8_le ((a >> 8) & 255) << 16;
|
||||
w0[2] = uint_to_hex_lower8_le ((b >> 16) & 255) << 0
|
||||
| uint_to_hex_lower8_le ((b >> 24) & 255) << 16;
|
||||
w0[3] = uint_to_hex_lower8_le ((b >> 0) & 255) << 0
|
||||
| uint_to_hex_lower8_le ((b >> 8) & 255) << 16;
|
||||
w1[0] = uint_to_hex_lower8_le ((c >> 16) & 255) << 0
|
||||
| uint_to_hex_lower8_le ((c >> 24) & 255) << 16;
|
||||
w1[1] = uint_to_hex_lower8_le ((c >> 0) & 255) << 0
|
||||
| uint_to_hex_lower8_le ((c >> 8) & 255) << 16;
|
||||
w1[2] = uint_to_hex_lower8_le ((d >> 16) & 255) << 0
|
||||
| uint_to_hex_lower8_le ((d >> 24) & 255) << 16;
|
||||
w1[3] = uint_to_hex_lower8_le ((d >> 0) & 255) << 0
|
||||
| uint_to_hex_lower8_le ((d >> 8) & 255) << 16;
|
||||
w2[0] = uint_to_hex_lower8_le ((e >> 16) & 255) << 0
|
||||
| uint_to_hex_lower8_le ((e >> 24) & 255) << 16;
|
||||
w2[1] = uint_to_hex_lower8_le ((e >> 0) & 255) << 0
|
||||
| uint_to_hex_lower8_le ((e >> 8) & 255) << 16;
|
||||
w2[2] = 0;
|
||||
w2[3] = 0;
|
||||
w3[0] = 0;
|
||||
w3[1] = 0;
|
||||
w3[2] = 0;
|
||||
w3[3] = 0;
|
||||
|
||||
sha1_update_64 (&ctx2, w0, w1, w2, w3, 40);
|
||||
|
||||
sha1_final (&ctx2);
|
||||
|
||||
a = ctx2.h[0];
|
||||
b = ctx2.h[1];
|
||||
c = ctx2.h[2];
|
||||
d = ctx2.h[3];
|
||||
e = ctx2.h[4];
|
||||
|
||||
sha1_ctx_t ctx = ctx0;
|
||||
|
||||
w0[0] = uint_to_hex_lower8_le ((a >> 16) & 255) << 0
|
||||
| uint_to_hex_lower8_le ((a >> 24) & 255) << 16;
|
||||
w0[1] = uint_to_hex_lower8_le ((a >> 0) & 255) << 0
|
||||
| uint_to_hex_lower8_le ((a >> 8) & 255) << 16;
|
||||
w0[2] = uint_to_hex_lower8_le ((b >> 16) & 255) << 0
|
||||
| uint_to_hex_lower8_le ((b >> 24) & 255) << 16;
|
||||
w0[3] = uint_to_hex_lower8_le ((b >> 0) & 255) << 0
|
||||
| uint_to_hex_lower8_le ((b >> 8) & 255) << 16;
|
||||
w1[0] = uint_to_hex_lower8_le ((c >> 16) & 255) << 0
|
||||
| uint_to_hex_lower8_le ((c >> 24) & 255) << 16;
|
||||
w1[1] = uint_to_hex_lower8_le ((c >> 0) & 255) << 0
|
||||
| uint_to_hex_lower8_le ((c >> 8) & 255) << 16;
|
||||
w1[2] = uint_to_hex_lower8_le ((d >> 16) & 255) << 0
|
||||
| uint_to_hex_lower8_le ((d >> 24) & 255) << 16;
|
||||
w1[3] = uint_to_hex_lower8_le ((d >> 0) & 255) << 0
|
||||
| uint_to_hex_lower8_le ((d >> 8) & 255) << 16;
|
||||
w2[0] = uint_to_hex_lower8_le ((e >> 16) & 255) << 0
|
||||
| uint_to_hex_lower8_le ((e >> 24) & 255) << 16;
|
||||
w2[1] = uint_to_hex_lower8_le ((e >> 0) & 255) << 0
|
||||
| uint_to_hex_lower8_le ((e >> 8) & 255) << 16;
|
||||
w2[2] = 0;
|
||||
w2[3] = 0;
|
||||
w3[0] = 0;
|
||||
w3[1] = 0;
|
||||
w3[2] = 0;
|
||||
w3[3] = 0;
|
||||
|
||||
sha1_update_64 (&ctx, w0, w1, w2, w3, 40);
|
||||
|
||||
sha1_final (&ctx);
|
||||
|
||||
const u32 r0 = ctx.h[DGST_R0];
|
||||
const u32 r1 = ctx.h[DGST_R1];
|
||||
const u32 r2 = ctx.h[DGST_R2];
|
||||
const u32 r3 = ctx.h[DGST_R3];
|
||||
|
||||
COMPARE_S_SCALAR (r0, r1, r2, r3);
|
||||
}
|
||||
}
|
374
OpenCL/m13900_a3.cl
Normal file
374
OpenCL/m13900_a3.cl
Normal file
@ -0,0 +1,374 @@
|
||||
/**
|
||||
* Author......: See docs/credits.txt
|
||||
* License.....: MIT
|
||||
*/
|
||||
|
||||
#define NEW_SIMD_CODE
|
||||
|
||||
#include "inc_vendor.cl"
|
||||
#include "inc_hash_constants.h"
|
||||
#include "inc_hash_functions.cl"
|
||||
#include "inc_types.cl"
|
||||
#include "inc_common.cl"
|
||||
#include "inc_simd.cl"
|
||||
#include "inc_hash_sha1.cl"
|
||||
|
||||
#if VECT_SIZE == 1
|
||||
#define uint_to_hex_lower8_le(i) (u32x) (l_bin2asc[(i)])
|
||||
#elif VECT_SIZE == 2
|
||||
#define uint_to_hex_lower8_le(i) (u32x) (l_bin2asc[(i).s0], l_bin2asc[(i).s1])
|
||||
#elif VECT_SIZE == 4
|
||||
#define uint_to_hex_lower8_le(i) (u32x) (l_bin2asc[(i).s0], l_bin2asc[(i).s1], l_bin2asc[(i).s2], l_bin2asc[(i).s3])
|
||||
#elif VECT_SIZE == 8
|
||||
#define uint_to_hex_lower8_le(i) (u32x) (l_bin2asc[(i).s0], l_bin2asc[(i).s1], l_bin2asc[(i).s2], l_bin2asc[(i).s3], l_bin2asc[(i).s4], l_bin2asc[(i).s5], l_bin2asc[(i).s6], l_bin2asc[(i).s7])
|
||||
#elif VECT_SIZE == 16
|
||||
#define uint_to_hex_lower8_le(i) (u32x) (l_bin2asc[(i).s0], l_bin2asc[(i).s1], l_bin2asc[(i).s2], l_bin2asc[(i).s3], l_bin2asc[(i).s4], l_bin2asc[(i).s5], l_bin2asc[(i).s6], l_bin2asc[(i).s7], l_bin2asc[(i).s8], l_bin2asc[(i).s9], l_bin2asc[(i).sa], l_bin2asc[(i).sb], l_bin2asc[(i).sc], l_bin2asc[(i).sd], l_bin2asc[(i).se], l_bin2asc[(i).sf])
|
||||
#endif
|
||||
|
||||
__kernel void m13900_mxx (__global pw_t *pws, __global const kernel_rule_t *rules_buf, __global const pw_t *combs_buf, __constant const u32x *words_buf_r, __global void *tmps, __global void *hooks, __global const u32 *bitmaps_buf_s1_a, __global const u32 *bitmaps_buf_s1_b, __global const u32 *bitmaps_buf_s1_c, __global const u32 *bitmaps_buf_s1_d, __global const u32 *bitmaps_buf_s2_a, __global const u32 *bitmaps_buf_s2_b, __global const u32 *bitmaps_buf_s2_c, __global const u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global const digest_t *digests_buf, __global u32 *hashes_shown, __global const salt_t *salt_bufs, __global const void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV0_buf, __global u32 *d_scryptV1_buf, __global u32 *d_scryptV2_buf, __global u32 *d_scryptV3_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 il_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
|
||||
{
|
||||
/**
|
||||
* modifier
|
||||
*/
|
||||
|
||||
const u32 gid = get_global_id (0);
|
||||
const u32 lid = get_local_id (0);
|
||||
const u32 lsz = get_local_size (0);
|
||||
|
||||
/**
|
||||
* shared
|
||||
*/
|
||||
|
||||
__local u32 l_bin2asc[256];
|
||||
|
||||
for (u32 i = lid; i < 256; i += lsz)
|
||||
{
|
||||
const u32 i0 = (i >> 0) & 15;
|
||||
const u32 i1 = (i >> 4) & 15;
|
||||
|
||||
l_bin2asc[i] = ((i0 < 10) ? '0' + i0 : 'a' - 10 + i0) << 0
|
||||
| ((i1 < 10) ? '0' + i1 : 'a' - 10 + i1) << 8;
|
||||
}
|
||||
|
||||
barrier (CLK_LOCAL_MEM_FENCE);
|
||||
|
||||
if (gid >= gid_max) return;
|
||||
|
||||
/**
|
||||
* base
|
||||
*/
|
||||
|
||||
const u32 pw_len = pws[gid].pw_len;
|
||||
|
||||
const u32 pw_lenv = ceil ((float) pw_len / 4);
|
||||
|
||||
u32x w[64] = { 0 };
|
||||
|
||||
for (int idx = 0; idx < pw_lenv; idx++)
|
||||
{
|
||||
w[idx] = pws[gid].i[idx];
|
||||
}
|
||||
|
||||
sha1_ctx_t ctx0;
|
||||
|
||||
sha1_init (&ctx0);
|
||||
|
||||
sha1_update_global_swap (&ctx0, salt_bufs[salt_pos].salt_buf, salt_bufs[salt_pos].salt_len);
|
||||
|
||||
/**
|
||||
* loop
|
||||
*/
|
||||
|
||||
u32x w0l = w[0];
|
||||
|
||||
for (u32 il_pos = 0; il_pos < il_cnt; il_pos += VECT_SIZE)
|
||||
{
|
||||
const u32x w0r = words_buf_r[il_pos / VECT_SIZE];
|
||||
|
||||
const u32x w0lr = w0l | w0r;
|
||||
|
||||
w[0] = w0lr;
|
||||
|
||||
sha1_ctx_vector_t ctx1;
|
||||
|
||||
sha1_init_vector (&ctx1);
|
||||
|
||||
sha1_update_vector (&ctx1, w, pw_len);
|
||||
|
||||
sha1_final_vector (&ctx1);
|
||||
|
||||
u32x a = ctx1.h[0];
|
||||
u32x b = ctx1.h[1];
|
||||
u32x c = ctx1.h[2];
|
||||
u32x d = ctx1.h[3];
|
||||
u32x e = ctx1.h[4];
|
||||
|
||||
sha1_ctx_vector_t ctx2;
|
||||
|
||||
sha1_init_vector_from_scalar (&ctx2, &ctx0);
|
||||
|
||||
u32x w0[4];
|
||||
u32x w1[4];
|
||||
u32x w2[4];
|
||||
u32x w3[4];
|
||||
|
||||
w0[0] = uint_to_hex_lower8_le ((a >> 16) & 255) << 0
|
||||
| uint_to_hex_lower8_le ((a >> 24) & 255) << 16;
|
||||
w0[1] = uint_to_hex_lower8_le ((a >> 0) & 255) << 0
|
||||
| uint_to_hex_lower8_le ((a >> 8) & 255) << 16;
|
||||
w0[2] = uint_to_hex_lower8_le ((b >> 16) & 255) << 0
|
||||
| uint_to_hex_lower8_le ((b >> 24) & 255) << 16;
|
||||
w0[3] = uint_to_hex_lower8_le ((b >> 0) & 255) << 0
|
||||
| uint_to_hex_lower8_le ((b >> 8) & 255) << 16;
|
||||
w1[0] = uint_to_hex_lower8_le ((c >> 16) & 255) << 0
|
||||
| uint_to_hex_lower8_le ((c >> 24) & 255) << 16;
|
||||
w1[1] = uint_to_hex_lower8_le ((c >> 0) & 255) << 0
|
||||
| uint_to_hex_lower8_le ((c >> 8) & 255) << 16;
|
||||
w1[2] = uint_to_hex_lower8_le ((d >> 16) & 255) << 0
|
||||
| uint_to_hex_lower8_le ((d >> 24) & 255) << 16;
|
||||
w1[3] = uint_to_hex_lower8_le ((d >> 0) & 255) << 0
|
||||
| uint_to_hex_lower8_le ((d >> 8) & 255) << 16;
|
||||
w2[0] = uint_to_hex_lower8_le ((e >> 16) & 255) << 0
|
||||
| uint_to_hex_lower8_le ((e >> 24) & 255) << 16;
|
||||
w2[1] = uint_to_hex_lower8_le ((e >> 0) & 255) << 0
|
||||
| uint_to_hex_lower8_le ((e >> 8) & 255) << 16;
|
||||
w2[2] = 0;
|
||||
w2[3] = 0;
|
||||
w3[0] = 0;
|
||||
w3[1] = 0;
|
||||
w3[2] = 0;
|
||||
w3[3] = 0;
|
||||
|
||||
sha1_update_vector_64 (&ctx2, w0, w1, w2, w3, 40);
|
||||
|
||||
sha1_final_vector (&ctx2);
|
||||
|
||||
a = ctx2.h[0];
|
||||
b = ctx2.h[1];
|
||||
c = ctx2.h[2];
|
||||
d = ctx2.h[3];
|
||||
e = ctx2.h[4];
|
||||
|
||||
sha1_ctx_vector_t ctx;
|
||||
|
||||
sha1_init_vector_from_scalar (&ctx, &ctx0);
|
||||
|
||||
w0[0] = uint_to_hex_lower8_le ((a >> 16) & 255) << 0
|
||||
| uint_to_hex_lower8_le ((a >> 24) & 255) << 16;
|
||||
w0[1] = uint_to_hex_lower8_le ((a >> 0) & 255) << 0
|
||||
| uint_to_hex_lower8_le ((a >> 8) & 255) << 16;
|
||||
w0[2] = uint_to_hex_lower8_le ((b >> 16) & 255) << 0
|
||||
| uint_to_hex_lower8_le ((b >> 24) & 255) << 16;
|
||||
w0[3] = uint_to_hex_lower8_le ((b >> 0) & 255) << 0
|
||||
| uint_to_hex_lower8_le ((b >> 8) & 255) << 16;
|
||||
w1[0] = uint_to_hex_lower8_le ((c >> 16) & 255) << 0
|
||||
| uint_to_hex_lower8_le ((c >> 24) & 255) << 16;
|
||||
w1[1] = uint_to_hex_lower8_le ((c >> 0) & 255) << 0
|
||||
| uint_to_hex_lower8_le ((c >> 8) & 255) << 16;
|
||||
w1[2] = uint_to_hex_lower8_le ((d >> 16) & 255) << 0
|
||||
| uint_to_hex_lower8_le ((d >> 24) & 255) << 16;
|
||||
w1[3] = uint_to_hex_lower8_le ((d >> 0) & 255) << 0
|
||||
| uint_to_hex_lower8_le ((d >> 8) & 255) << 16;
|
||||
w2[0] = uint_to_hex_lower8_le ((e >> 16) & 255) << 0
|
||||
| uint_to_hex_lower8_le ((e >> 24) & 255) << 16;
|
||||
w2[1] = uint_to_hex_lower8_le ((e >> 0) & 255) << 0
|
||||
| uint_to_hex_lower8_le ((e >> 8) & 255) << 16;
|
||||
w2[2] = 0;
|
||||
w2[3] = 0;
|
||||
w3[0] = 0;
|
||||
w3[1] = 0;
|
||||
w3[2] = 0;
|
||||
w3[3] = 0;
|
||||
|
||||
sha1_update_vector_64 (&ctx, w0, w1, w2, w3, 40);
|
||||
|
||||
sha1_final_vector (&ctx);
|
||||
|
||||
const u32x r0 = ctx.h[DGST_R0];
|
||||
const u32x r1 = ctx.h[DGST_R1];
|
||||
const u32x r2 = ctx.h[DGST_R2];
|
||||
const u32x r3 = ctx.h[DGST_R3];
|
||||
|
||||
COMPARE_M_SIMD (r0, r1, r2, r3);
|
||||
}
|
||||
}
|
||||
|
||||
__kernel void m13900_sxx (__global pw_t *pws, __global const kernel_rule_t *rules_buf, __global const pw_t *combs_buf, __constant const u32x *words_buf_r, __global void *tmps, __global void *hooks, __global const u32 *bitmaps_buf_s1_a, __global const u32 *bitmaps_buf_s1_b, __global const u32 *bitmaps_buf_s1_c, __global const u32 *bitmaps_buf_s1_d, __global const u32 *bitmaps_buf_s2_a, __global const u32 *bitmaps_buf_s2_b, __global const u32 *bitmaps_buf_s2_c, __global const u32 *bitmaps_buf_s2_d, __global plain_t *plains_buf, __global const digest_t *digests_buf, __global u32 *hashes_shown, __global const salt_t *salt_bufs, __global const void *esalt_bufs, __global u32 *d_return_buf, __global u32 *d_scryptV0_buf, __global u32 *d_scryptV1_buf, __global u32 *d_scryptV2_buf, __global u32 *d_scryptV3_buf, const u32 bitmap_mask, const u32 bitmap_shift1, const u32 bitmap_shift2, const u32 salt_pos, const u32 loop_pos, const u32 loop_cnt, const u32 il_cnt, const u32 digests_cnt, const u32 digests_offset, const u32 combs_mode, const u32 gid_max)
|
||||
{
|
||||
/**
|
||||
* modifier
|
||||
*/
|
||||
|
||||
const u32 gid = get_global_id (0);
|
||||
const u32 lid = get_local_id (0);
|
||||
const u32 lsz = get_local_size (0);
|
||||
|
||||
/**
|
||||
* shared
|
||||
*/
|
||||
|
||||
__local u32 l_bin2asc[256];
|
||||
|
||||
for (u32 i = lid; i < 256; i += lsz)
|
||||
{
|
||||
const u32 i0 = (i >> 0) & 15;
|
||||
const u32 i1 = (i >> 4) & 15;
|
||||
|
||||
l_bin2asc[i] = ((i0 < 10) ? '0' + i0 : 'a' - 10 + i0) << 0
|
||||
| ((i1 < 10) ? '0' + i1 : 'a' - 10 + i1) << 8;
|
||||
}
|
||||
|
||||
barrier (CLK_LOCAL_MEM_FENCE);
|
||||
|
||||
if (gid >= gid_max) return;
|
||||
|
||||
/**
|
||||
* digest
|
||||
*/
|
||||
|
||||
const u32 search[4] =
|
||||
{
|
||||
digests_buf[digests_offset].digest_buf[DGST_R0],
|
||||
digests_buf[digests_offset].digest_buf[DGST_R1],
|
||||
digests_buf[digests_offset].digest_buf[DGST_R2],
|
||||
digests_buf[digests_offset].digest_buf[DGST_R3]
|
||||
};
|
||||
|
||||
/**
|
||||
* base
|
||||
*/
|
||||
|
||||
const u32 pw_len = pws[gid].pw_len;
|
||||
|
||||
const u32 pw_lenv = ceil ((float) pw_len / 4);
|
||||
|
||||
u32x w[64] = { 0 };
|
||||
|
||||
for (int idx = 0; idx < pw_lenv; idx++)
|
||||
{
|
||||
w[idx] = pws[gid].i[idx];
|
||||
}
|
||||
|
||||
sha1_ctx_t ctx0;
|
||||
|
||||
sha1_init (&ctx0);
|
||||
|
||||
sha1_update_global_swap (&ctx0, salt_bufs[salt_pos].salt_buf, salt_bufs[salt_pos].salt_len);
|
||||
|
||||
/**
|
||||
* loop
|
||||
*/
|
||||
|
||||
u32x w0l = w[0];
|
||||
|
||||
for (u32 il_pos = 0; il_pos < il_cnt; il_pos += VECT_SIZE)
|
||||
{
|
||||
const u32x w0r = words_buf_r[il_pos / VECT_SIZE];
|
||||
|
||||
const u32x w0lr = w0l | w0r;
|
||||
|
||||
w[0] = w0lr;
|
||||
|
||||
sha1_ctx_vector_t ctx1;
|
||||
|
||||
sha1_init_vector (&ctx1);
|
||||
|
||||
sha1_update_vector (&ctx1, w, pw_len);
|
||||
|
||||
sha1_final_vector (&ctx1);
|
||||
|
||||
u32x a = ctx1.h[0];
|
||||
u32x b = ctx1.h[1];
|
||||
u32x c = ctx1.h[2];
|
||||
u32x d = ctx1.h[3];
|
||||
u32x e = ctx1.h[4];
|
||||
|
||||
sha1_ctx_vector_t ctx2;
|
||||
|
||||
sha1_init_vector_from_scalar (&ctx2, &ctx0);
|
||||
|
||||
u32x w0[4];
|
||||
u32x w1[4];
|
||||
u32x w2[4];
|
||||
u32x w3[4];
|
||||
|
||||
w0[0] = uint_to_hex_lower8_le ((a >> 16) & 255) << 0
|
||||
| uint_to_hex_lower8_le ((a >> 24) & 255) << 16;
|
||||
w0[1] = uint_to_hex_lower8_le ((a >> 0) & 255) << 0
|
||||
| uint_to_hex_lower8_le ((a >> 8) & 255) << 16;
|
||||
w0[2] = uint_to_hex_lower8_le ((b >> 16) & 255) << 0
|
||||
| uint_to_hex_lower8_le ((b >> 24) & 255) << 16;
|
||||
w0[3] = uint_to_hex_lower8_le ((b >> 0) & 255) << 0
|
||||
| uint_to_hex_lower8_le ((b >> 8) & 255) << 16;
|
||||
w1[0] = uint_to_hex_lower8_le ((c >> 16) & 255) << 0
|
||||
| uint_to_hex_lower8_le ((c >> 24) & 255) << 16;
|
||||
w1[1] = uint_to_hex_lower8_le ((c >> 0) & 255) << 0
|
||||
| uint_to_hex_lower8_le ((c >> 8) & 255) << 16;
|
||||
w1[2] = uint_to_hex_lower8_le ((d >> 16) & 255) << 0
|
||||
| uint_to_hex_lower8_le ((d >> 24) & 255) << 16;
|
||||
w1[3] = uint_to_hex_lower8_le ((d >> 0) & 255) << 0
|
||||
| uint_to_hex_lower8_le ((d >> 8) & 255) << 16;
|
||||
w2[0] = uint_to_hex_lower8_le ((e >> 16) & 255) << 0
|
||||
| uint_to_hex_lower8_le ((e >> 24) & 255) << 16;
|
||||
w2[1] = uint_to_hex_lower8_le ((e >> 0) & 255) << 0
|
||||
| uint_to_hex_lower8_le ((e >> 8) & 255) << 16;
|
||||
w2[2] = 0;
|
||||
w2[3] = 0;
|
||||
w3[0] = 0;
|
||||
w3[1] = 0;
|
||||
w3[2] = 0;
|
||||
w3[3] = 0;
|
||||
|
||||
sha1_update_vector_64 (&ctx2, w0, w1, w2, w3, 40);
|
||||
|
||||
sha1_final_vector (&ctx2);
|
||||
|
||||
a = ctx2.h[0];
|
||||
b = ctx2.h[1];
|
||||
c = ctx2.h[2];
|
||||
d = ctx2.h[3];
|
||||
e = ctx2.h[4];
|
||||
|
||||
sha1_ctx_vector_t ctx;
|
||||
|
||||
sha1_init_vector_from_scalar (&ctx, &ctx0);
|
||||
|
||||
w0[0] = uint_to_hex_lower8_le ((a >> 16) & 255) << 0
|
||||
| uint_to_hex_lower8_le ((a >> 24) & 255) << 16;
|
||||
w0[1] = uint_to_hex_lower8_le ((a >> 0) & 255) << 0
|
||||
| uint_to_hex_lower8_le ((a >> 8) & 255) << 16;
|
||||
w0[2] = uint_to_hex_lower8_le ((b >> 16) & 255) << 0
|
||||
| uint_to_hex_lower8_le ((b >> 24) & 255) << 16;
|
||||
w0[3] = uint_to_hex_lower8_le ((b >> 0) & 255) << 0
|
||||
| uint_to_hex_lower8_le ((b >> 8) & 255) << 16;
|
||||
w1[0] = uint_to_hex_lower8_le ((c >> 16) & 255) << 0
|
||||
| uint_to_hex_lower8_le ((c >> 24) & 255) << 16;
|
||||
w1[1] = uint_to_hex_lower8_le ((c >> 0) & 255) << 0
|
||||
| uint_to_hex_lower8_le ((c >> 8) & 255) << 16;
|
||||
w1[2] = uint_to_hex_lower8_le ((d >> 16) & 255) << 0
|
||||
| uint_to_hex_lower8_le ((d >> 24) & 255) << 16;
|
||||
w1[3] = uint_to_hex_lower8_le ((d >> 0) & 255) << 0
|
||||
| uint_to_hex_lower8_le ((d >> 8) & 255) << 16;
|
||||
w2[0] = uint_to_hex_lower8_le ((e >> 16) & 255) << 0
|
||||
| uint_to_hex_lower8_le ((e >> 24) & 255) << 16;
|
||||
w2[1] = uint_to_hex_lower8_le ((e >> 0) & 255) << 0
|
||||
| uint_to_hex_lower8_le ((e >> 8) & 255) << 16;
|
||||
w2[2] = 0;
|
||||
w2[3] = 0;
|
||||
w3[0] = 0;
|
||||
w3[1] = 0;
|
||||
w3[2] = 0;
|
||||
w3[3] = 0;
|
||||
|
||||
sha1_update_vector_64 (&ctx, w0, w1, w2, w3, 40);
|
||||
|
||||
sha1_final_vector (&ctx);
|
||||
|
||||
const u32x r0 = ctx.h[DGST_R0];
|
||||
const u32x r1 = ctx.h[DGST_R1];
|
||||
const u32x r2 = ctx.h[DGST_R2];
|
||||
const u32x r3 = ctx.h[DGST_R3];
|
||||
|
||||
COMPARE_S_SIMD (r0, r1, r2, r3);
|
||||
}
|
||||
}
|
@ -9,7 +9,7 @@ TDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
|
||||
|
||||
# missing hash types: 5200,6251,6261,6271,6281
|
||||
|
||||
HASH_TYPES="0 10 11 12 20 21 22 23 30 40 50 60 100 101 110 111 112 120 121 122 125 130 131 132 133 140 141 150 160 200 300 400 500 600 900 1000 1100 1300 1400 1410 1411 1420 1430 1440 1441 1450 1460 1500 1600 1700 1710 1711 1720 1722 1730 1731 1740 1750 1760 1800 2100 2400 2410 2500 2600 2611 2612 2711 2811 3000 3100 3200 3710 3711 3800 3910 4010 4110 4300 4400 4500 4520 4521 4522 4700 4800 4900 5000 5100 5300 5400 5500 5600 5700 5800 6000 6100 6211 6212 6213 6221 6222 6223 6231 6232 6233 6241 6242 6243 6300 6400 6500 6600 6700 6800 6900 7000 7100 7200 7300 7400 7500 7700 7800 7900 8000 8100 8200 8300 8400 8500 8600 8700 8900 9100 9200 9300 9400 9500 9600 9700 9800 9900 10000 10100 10200 10300 10400 10500 10600 10700 10800 10900 11000 11100 11200 11300 11400 11500 11600 11900 12000 12001 12100 12200 12300 12400 12600 12700 12800 12900 13000 13100 13200 13300 13400 13500 13600 13800 14000 14100 14400 14600 14700 14800 14900 15000 15100 15200 15300 15400 15500 15600 15700 15800 99999"
|
||||
HASH_TYPES="0 10 11 12 20 21 22 23 30 40 50 60 100 101 110 111 112 120 121 122 125 130 131 132 133 140 141 150 160 200 300 400 500 600 900 1000 1100 1300 1400 1410 1411 1420 1430 1440 1441 1450 1460 1500 1600 1700 1710 1711 1720 1722 1730 1731 1740 1750 1760 1800 2100 2400 2410 2500 2600 2611 2612 2711 2811 3000 3100 3200 3710 3711 3800 3910 4010 4110 4300 4400 4500 4520 4521 4522 4700 4800 4900 5000 5100 5300 5400 5500 5600 5700 5800 6000 6100 6211 6212 6213 6221 6222 6223 6231 6232 6233 6241 6242 6243 6300 6400 6500 6600 6700 6800 6900 7000 7100 7200 7300 7400 7500 7700 7800 7900 8000 8100 8200 8300 8400 8500 8600 8700 8900 9100 9200 9300 9400 9500 9600 9700 9800 9900 10000 10100 10200 10300 10400 10500 10600 10700 10800 10900 11000 11100 11200 11300 11400 11500 11600 11900 12000 12001 12100 12200 12300 12400 12600 12700 12800 12900 13000 13100 13200 13300 13400 13500 13600 13800 13900 14000 14100 14400 14600 14700 14800 14900 15000 15100 15200 15300 15400 15500 15600 15700 15800 99999"
|
||||
|
||||
#ATTACK_MODES="0 1 3 6 7"
|
||||
ATTACK_MODES="0 1 3 7"
|
||||
|
Loading…
Reference in New Issue
Block a user