pull/574/head
jsteube 8 years ago
commit 720b307ed7

@ -756,217 +756,260 @@ inline void append_block1 (const u32 offset, u32 buf0[4], u32 buf1[4], const u32
inline void append_block8 (const u32 offset, u32 buf0[4], u32 buf1[4], const u32 src_l0[4], const u32 src_l1[4], const u32 src_r0[4], const u32 src_r1[4])
{
switch (offset)
#if defined IS_AMD || defined IS_GENERIC
const int offset_mod_4 = offset & 3;
const int offset_minus_4 = 4 - offset;
u32 s0 = 0;
u32 s1 = 0;
u32 s2 = 0;
u32 s3 = 0;
u32 s4 = 0;
u32 s5 = 0;
u32 s6 = 0;
u32 s7 = 0;
u32 s8 = 0;
switch (offset / 4)
{
case 31:
buf1[3] = src_l1[3] | src_r0[0] << 24;
break;
case 30:
buf1[3] = src_l1[3] | src_r0[0] << 16;
break;
case 29:
buf1[3] = src_l1[3] | src_r0[0] << 8;
break;
case 28:
buf1[3] = src_r0[0];
break;
case 27:
buf1[3] = amd_bytealign_S (src_r0[1], src_r0[0], 1);
buf1[2] = src_l1[2] | src_r0[0] << 24;
break;
case 26:
buf1[3] = amd_bytealign_S (src_r0[1], src_r0[0], 2);
buf1[2] = src_l1[2] | src_r0[0] << 16;
break;
case 25:
buf1[3] = amd_bytealign_S (src_r0[1], src_r0[0], 3);
buf1[2] = src_l1[2] | src_r0[0] << 8;
break;
case 24:
buf1[3] = src_r0[1];
buf1[2] = src_r0[0];
break;
case 23:
buf1[3] = amd_bytealign_S (src_r0[2], src_r0[1], 1);
buf1[2] = amd_bytealign_S (src_r0[1], src_r0[0], 1);
buf1[1] = src_l1[1] | src_r0[0] << 24;
break;
case 22:
buf1[3] = amd_bytealign_S (src_r0[2], src_r0[1], 2);
buf1[2] = amd_bytealign_S (src_r0[1], src_r0[0], 2);
buf1[1] = src_l1[1] | src_r0[0] << 16;
break;
case 21:
buf1[3] = amd_bytealign_S (src_r0[2], src_r0[1], 3);
buf1[2] = amd_bytealign_S (src_r0[1], src_r0[0], 3);
buf1[1] = src_l1[1] | src_r0[0] << 8;
break;
case 20:
buf1[3] = src_r0[2];
buf1[2] = src_r0[1];
buf1[1] = src_r0[0];
break;
case 19:
buf1[3] = amd_bytealign_S (src_r0[3], src_r0[2], 1);
buf1[2] = amd_bytealign_S (src_r0[2], src_r0[1], 1);
buf1[1] = amd_bytealign_S (src_r0[1], src_r0[0], 1);
buf1[0] = src_l1[0] | src_r0[0] << 24;
break;
case 18:
buf1[3] = amd_bytealign_S (src_r0[3], src_r0[2], 2);
buf1[2] = amd_bytealign_S (src_r0[2], src_r0[1], 2);
buf1[1] = amd_bytealign_S (src_r0[1], src_r0[0], 2);
buf1[0] = src_l1[0] | src_r0[0] << 16;
break;
case 17:
buf1[3] = amd_bytealign_S (src_r0[3], src_r0[2], 3);
buf1[2] = amd_bytealign_S (src_r0[2], src_r0[1], 3);
buf1[1] = amd_bytealign_S (src_r0[1], src_r0[0], 3);
buf1[0] = src_l1[0] | src_r0[0] << 8;
break;
case 16:
buf1[3] = src_r0[3];
buf1[2] = src_r0[2];
buf1[1] = src_r0[1];
buf1[0] = src_r0[0];
break;
case 15:
buf1[3] = amd_bytealign_S (src_r1[0], src_r0[3], 1);
buf1[2] = amd_bytealign_S (src_r0[3], src_r0[2], 1);
buf1[1] = amd_bytealign_S (src_r0[2], src_r0[1], 1);
buf1[0] = amd_bytealign_S (src_r0[1], src_r0[0], 1);
buf0[3] = src_l0[3] | src_r0[0] << 24;
break;
case 14:
buf1[3] = amd_bytealign_S (src_r1[0], src_r0[3], 2);
buf1[2] = amd_bytealign_S (src_r0[3], src_r0[2], 2);
buf1[1] = amd_bytealign_S (src_r0[2], src_r0[1], 2);
buf1[0] = amd_bytealign_S (src_r0[1], src_r0[0], 2);
buf0[3] = src_l0[3] | src_r0[0] << 16;
case 0:
s8 = amd_bytealign ( 0, src_r1[3], offset_minus_4);
s7 = amd_bytealign (src_r1[3], src_r1[2], offset_minus_4);
s6 = amd_bytealign (src_r1[2], src_r1[1], offset_minus_4);
s5 = amd_bytealign (src_r1[1], src_r1[0], offset_minus_4);
s4 = amd_bytealign (src_r1[0], src_r0[3], offset_minus_4);
s3 = amd_bytealign (src_r0[3], src_r0[2], offset_minus_4);
s2 = amd_bytealign (src_r0[2], src_r0[1], offset_minus_4);
s1 = amd_bytealign (src_r0[1], src_r0[0], offset_minus_4);
s0 = amd_bytealign (src_r0[0], 0, offset_minus_4);
break;
case 13:
buf1[3] = amd_bytealign_S (src_r1[0], src_r0[3], 3);
buf1[2] = amd_bytealign_S (src_r0[3], src_r0[2], 3);
buf1[1] = amd_bytealign_S (src_r0[2], src_r0[1], 3);
buf1[0] = amd_bytealign_S (src_r0[1], src_r0[0], 3);
buf0[3] = src_l0[3] | src_r0[0] << 8;
case 1:
s8 = amd_bytealign ( 0, src_r1[2], offset_minus_4);
s7 = amd_bytealign (src_r1[2], src_r1[1], offset_minus_4);
s6 = amd_bytealign (src_r1[1], src_r1[0], offset_minus_4);
s5 = amd_bytealign (src_r1[0], src_r0[3], offset_minus_4);
s4 = amd_bytealign (src_r0[3], src_r0[2], offset_minus_4);
s3 = amd_bytealign (src_r0[2], src_r0[1], offset_minus_4);
s2 = amd_bytealign (src_r0[1], src_r0[0], offset_minus_4);
s1 = amd_bytealign (src_r0[0], 0, offset_minus_4);
s0 = 0;
break;
case 12:
buf1[3] = src_r1[0];
buf1[2] = src_r0[3];
buf1[1] = src_r0[2];
buf1[0] = src_r0[1];
buf0[3] = src_r0[0];
case 2:
s8 = amd_bytealign ( 0, src_r1[1], offset_minus_4);
s7 = amd_bytealign (src_r1[1], src_r1[0], offset_minus_4);
s6 = amd_bytealign (src_r1[0], src_r0[3], offset_minus_4);
s5 = amd_bytealign (src_r0[3], src_r0[2], offset_minus_4);
s4 = amd_bytealign (src_r0[2], src_r0[1], offset_minus_4);
s3 = amd_bytealign (src_r0[1], src_r0[0], offset_minus_4);
s2 = amd_bytealign (src_r0[0], 0, offset_minus_4);
s1 = 0;
s0 = 0;
break;
case 11:
buf1[3] = amd_bytealign_S (src_r1[1], src_r1[0], 1);
buf1[2] = amd_bytealign_S (src_r1[0], src_r0[3], 1);
buf1[1] = amd_bytealign_S (src_r0[3], src_r0[2], 1);
buf1[0] = amd_bytealign_S (src_r0[2], src_r0[1], 1);
buf0[3] = amd_bytealign_S (src_r0[1], src_r0[0], 1);
buf0[2] = src_l0[2] | src_r0[0] << 24;
case 3:
s8 = amd_bytealign ( 0, src_r1[0], offset_minus_4);
s7 = amd_bytealign (src_r1[0], src_r0[3], offset_minus_4);
s6 = amd_bytealign (src_r0[3], src_r0[2], offset_minus_4);
s5 = amd_bytealign (src_r0[2], src_r0[1], offset_minus_4);
s4 = amd_bytealign (src_r0[1], src_r0[0], offset_minus_4);
s3 = amd_bytealign (src_r0[0], 0, offset_minus_4);
s2 = 0;
s1 = 0;
s0 = 0;
break;
case 10:
buf1[3] = amd_bytealign_S (src_r1[1], src_r1[0], 2);
buf1[2] = amd_bytealign_S (src_r1[0], src_r0[3], 2);
buf1[1] = amd_bytealign_S (src_r0[3], src_r0[2], 2);
buf1[0] = amd_bytealign_S (src_r0[2], src_r0[1], 2);
buf0[3] = amd_bytealign_S (src_r0[1], src_r0[0], 2);
buf0[2] = src_l0[2] | src_r0[0] << 16;
case 4:
s8 = amd_bytealign ( 0, src_r0[3], offset_minus_4);
s7 = amd_bytealign (src_r0[3], src_r0[2], offset_minus_4);
s6 = amd_bytealign (src_r0[2], src_r0[1], offset_minus_4);
s5 = amd_bytealign (src_r0[1], src_r0[0], offset_minus_4);
s4 = amd_bytealign (src_r0[0], 0, offset_minus_4);
s3 = 0;
s2 = 0;
s1 = 0;
s0 = 0;
break;
case 9:
buf1[3] = amd_bytealign_S (src_r1[1], src_r1[0], 3);
buf1[2] = amd_bytealign_S (src_r1[0], src_r0[3], 3);
buf1[1] = amd_bytealign_S (src_r0[3], src_r0[2], 3);
buf1[0] = amd_bytealign_S (src_r0[2], src_r0[1], 3);
buf0[3] = amd_bytealign_S (src_r0[1], src_r0[0], 3);
buf0[2] = src_l0[2] | src_r0[0] << 8;
case 5:
s8 = amd_bytealign ( 0, src_r0[2], offset_minus_4);
s7 = amd_bytealign (src_r0[2], src_r0[1], offset_minus_4);
s6 = amd_bytealign (src_r0[1], src_r0[0], offset_minus_4);
s5 = amd_bytealign (src_r0[0], 0, offset_minus_4);
s4 = 0;
s3 = 0;
s2 = 0;
s1 = 0;
s0 = 0;
break;
case 8:
buf1[3] = src_r1[1];
buf1[2] = src_r1[0];
buf1[1] = src_r0[3];
buf1[0] = src_r0[2];
buf0[3] = src_r0[1];
buf0[2] = src_r0[0];
case 6:
s8 = amd_bytealign ( 0, src_r0[1], offset_minus_4);
s7 = amd_bytealign (src_r0[1], src_r0[0], offset_minus_4);
s6 = amd_bytealign (src_r0[0], 0, offset_minus_4);
s5 = 0;
s4 = 0;
s3 = 0;
s2 = 0;
s1 = 0;
s0 = 0;
break;
case 7:
buf1[3] = amd_bytealign_S (src_r1[2], src_r1[1], 1);
buf1[2] = amd_bytealign_S (src_r1[1], src_r1[0], 1);
buf1[1] = amd_bytealign_S (src_r1[0], src_r0[3], 1);
buf1[0] = amd_bytealign_S (src_r0[3], src_r0[2], 1);
buf0[3] = amd_bytealign_S (src_r0[2], src_r0[1], 1);
buf0[2] = amd_bytealign_S (src_r0[1], src_r0[0], 1);
buf0[1] = src_l0[1] | src_r0[0] << 24;
s8 = amd_bytealign ( 0, src_r0[0], offset_minus_4);
s7 = amd_bytealign (src_r0[0], 0, offset_minus_4);
s6 = 0;
s5 = 0;
s4 = 0;
s3 = 0;
s2 = 0;
s1 = 0;
s0 = 0;
break;
case 6:
buf1[3] = amd_bytealign_S (src_r1[2], src_r1[1], 2);
buf1[2] = amd_bytealign_S (src_r1[1], src_r1[0], 2);
buf1[1] = amd_bytealign_S (src_r1[0], src_r0[3], 2);
buf1[0] = amd_bytealign_S (src_r0[3], src_r0[2], 2);
buf0[3] = amd_bytealign_S (src_r0[2], src_r0[1], 2);
buf0[2] = amd_bytealign_S (src_r0[1], src_r0[0], 2);
buf0[1] = src_l0[1] | src_r0[0] << 16;
}
if (offset_mod_4 == 0)
{
buf0[0] = src_l0[0] | s1;
buf0[1] = src_l0[1] | s2;
buf0[2] = src_l0[2] | s3;
buf0[3] = src_l0[3] | s4;
buf1[0] = src_l1[0] | s5;
buf1[1] = src_l1[1] | s6;
buf1[2] = src_l1[2] | s7;
buf1[3] = src_l1[3] | s8;
}
else
{
buf0[0] = src_l0[0] | s0;
buf0[1] = src_l0[1] | s1;
buf0[2] = src_l0[2] | s2;
buf0[3] = src_l0[3] | s3;
buf1[0] = src_l1[0] | s4;
buf1[1] = src_l1[1] | s5;
buf1[2] = src_l1[2] | s6;
buf1[3] = src_l1[3] | s7;
}
#endif
#ifdef IS_NV
const int offset_minus_4 = 4 - (offset % 4);
const int selector = (0x76543210 >> (offset_minus_4 * 4)) & 0xffff;
u32 s0 = 0;
u32 s1 = 0;
u32 s2 = 0;
u32 s3 = 0;
u32 s4 = 0;
u32 s5 = 0;
u32 s6 = 0;
u32 s7 = 0;
switch (offset / 4)
{
case 0:
s7 = __byte_perm (src_r1[2], src_r1[3], selector);
s6 = __byte_perm (src_r1[1], src_r1[2], selector);
s5 = __byte_perm (src_r1[0], src_r1[1], selector);
s4 = __byte_perm (src_r0[3], src_r1[0], selector);
s3 = __byte_perm (src_r0[2], src_r0[3], selector);
s2 = __byte_perm (src_r0[1], src_r0[2], selector);
s1 = __byte_perm (src_r0[0], src_r0[1], selector);
s0 = __byte_perm ( 0, src_r0[0], selector);
break;
case 5:
buf1[3] = amd_bytealign_S (src_r1[2], src_r1[1], 3);
buf1[2] = amd_bytealign_S (src_r1[1], src_r1[0], 3);
buf1[1] = amd_bytealign_S (src_r1[0], src_r0[3], 3);
buf1[0] = amd_bytealign_S (src_r0[3], src_r0[2], 3);
buf0[3] = amd_bytealign_S (src_r0[2], src_r0[1], 3);
buf0[2] = amd_bytealign_S (src_r0[1], src_r0[0], 3);
buf0[1] = src_l0[1] | src_r0[0] << 8;
case 1:
s7 = __byte_perm (src_r1[1], src_r1[2], selector);
s6 = __byte_perm (src_r1[0], src_r1[1], selector);
s5 = __byte_perm (src_r0[3], src_r1[0], selector);
s4 = __byte_perm (src_r0[2], src_r0[3], selector);
s3 = __byte_perm (src_r0[1], src_r0[2], selector);
s2 = __byte_perm (src_r0[0], src_r0[1], selector);
s1 = __byte_perm ( 0, src_r0[0], selector);
s0 = 0;
break;
case 4:
buf1[3] = src_r1[2];
buf1[2] = src_r1[1];
buf1[1] = src_r1[0];
buf1[0] = src_r0[3];
buf0[3] = src_r0[2];
buf0[2] = src_r0[1];
buf0[1] = src_r0[0];
case 2:
s7 = __byte_perm (src_r1[0], src_r1[1], selector);
s6 = __byte_perm (src_r0[3], src_r1[0], selector);
s5 = __byte_perm (src_r0[2], src_r0[3], selector);
s4 = __byte_perm (src_r0[1], src_r0[2], selector);
s3 = __byte_perm (src_r0[0], src_r0[1], selector);
s2 = __byte_perm ( 0, src_r0[0], selector);
s1 = 0;
s0 = 0;
break;
case 3:
buf1[3] = amd_bytealign_S (src_r1[3], src_r1[2], 1);
buf1[2] = amd_bytealign_S (src_r1[2], src_r1[1], 1);
buf1[1] = amd_bytealign_S (src_r1[1], src_r1[0], 1);
buf1[0] = amd_bytealign_S (src_r1[0], src_r0[3], 1);
buf0[3] = amd_bytealign_S (src_r0[3], src_r0[2], 1);
buf0[2] = amd_bytealign_S (src_r0[2], src_r0[1], 1);
buf0[1] = amd_bytealign_S (src_r0[1], src_r0[0], 1);
buf0[0] = src_l0[0] | src_r0[0] << 24;
s7 = __byte_perm (src_r0[3], src_r1[0], selector);
s6 = __byte_perm (src_r0[2], src_r0[3], selector);
s5 = __byte_perm (src_r0[1], src_r0[2], selector);
s4 = __byte_perm (src_r0[0], src_r0[1], selector);
s3 = __byte_perm ( 0, src_r0[0], selector);
s2 = 0;
s1 = 0;
s0 = 0;
break;
case 2:
buf1[3] = amd_bytealign_S (src_r1[3], src_r1[2], 2);
buf1[2] = amd_bytealign_S (src_r1[2], src_r1[1], 2);
buf1[1] = amd_bytealign_S (src_r1[1], src_r1[0], 2);
buf1[0] = amd_bytealign_S (src_r1[0], src_r0[3], 2);
buf0[3] = amd_bytealign_S (src_r0[3], src_r0[2], 2);
buf0[2] = amd_bytealign_S (src_r0[2], src_r0[1], 2);
buf0[1] = amd_bytealign_S (src_r0[1], src_r0[0], 2);
buf0[0] = src_l0[0] | src_r0[0] << 16;
case 4:
s7 = __byte_perm (src_r0[2], src_r0[3], selector);
s6 = __byte_perm (src_r0[1], src_r0[2], selector);
s5 = __byte_perm (src_r0[0], src_r0[1], selector);
s4 = __byte_perm ( 0, src_r0[0], selector);
s3 = 0;
s2 = 0;
s1 = 0;
s0 = 0;
break;
case 1:
buf1[3] = amd_bytealign_S (src_r1[3], src_r1[2], 3);
buf1[2] = amd_bytealign_S (src_r1[2], src_r1[1], 3);
buf1[1] = amd_bytealign_S (src_r1[1], src_r1[0], 3);
buf1[0] = amd_bytealign_S (src_r1[0], src_r0[3], 3);
buf0[3] = amd_bytealign_S (src_r0[3], src_r0[2], 3);
buf0[2] = amd_bytealign_S (src_r0[2], src_r0[1], 3);
buf0[1] = amd_bytealign_S (src_r0[1], src_r0[0], 3);
buf0[0] = src_l0[0] | src_r0[0] << 8;
case 5:
s7 = __byte_perm (src_r0[1], src_r0[2], selector);
s6 = __byte_perm (src_r0[0], src_r0[1], selector);
s5 = __byte_perm ( 0, src_r0[0], selector);
s4 = 0;
s3 = 0;
s2 = 0;
s1 = 0;
s0 = 0;
break;
case 0:
buf1[3] = src_r1[3];
buf1[2] = src_r1[2];
buf1[1] = src_r1[1];
buf1[0] = src_r1[0];
buf0[3] = src_r0[3];
buf0[2] = src_r0[2];
buf0[1] = src_r0[1];
buf0[0] = src_r0[0];
case 6:
s7 = __byte_perm (src_r0[0], src_r0[1], selector);
s6 = __byte_perm ( 0, src_r0[0], selector);
s5 = 0;
s4 = 0;
s3 = 0;
s2 = 0;
s1 = 0;
s0 = 0;
break;
case 7:
s7 = __byte_perm ( 0, src_r0[0], selector);
s6 = 0;
s5 = 0;
s4 = 0;
s3 = 0;
s2 = 0;
s1 = 0;
s0 = 0;
break;
}
buf0[0] = src_l0[0] | s0;
buf0[1] = src_l0[1] | s1;
buf0[2] = src_l0[2] | s2;
buf0[3] = src_l0[3] | s3;
buf1[0] = src_l1[0] | s4;
buf1[1] = src_l1[1] | s5;
buf1[2] = src_l1[2] | s6;
buf1[3] = src_l1[3] | s7;
#endif
}
inline void reverse_block (u32 in0[4], u32 in1[4], u32 out0[4], u32 out1[4], const u32 len)

@ -8,7 +8,7 @@
#include <ctype.h>
bool need_hexify (const u8 *buf, const int len);
bool need_hexify (const u8 *buf, const int len, bool always_ascii);
void exec_hexify (const u8 *buf, const int len, u8 *out);
bool is_valid_hex_char (const u8 c);

@ -344,22 +344,23 @@ typedef enum opts_type
OPTS_TYPE_PT_GENERATE_BE = (1 << 9),
OPTS_TYPE_PT_NEVERCRACK = (1 << 10), // if we want all possible results
OPTS_TYPE_PT_BITSLICE = (1 << 11),
OPTS_TYPE_ST_UNICODE = (1 << 12),
OPTS_TYPE_ST_UPPER = (1 << 13),
OPTS_TYPE_ST_LOWER = (1 << 14),
OPTS_TYPE_ST_ADD01 = (1 << 15),
OPTS_TYPE_ST_ADD02 = (1 << 16),
OPTS_TYPE_ST_ADD80 = (1 << 17),
OPTS_TYPE_ST_ADDBITS14 = (1 << 18),
OPTS_TYPE_ST_ADDBITS15 = (1 << 19),
OPTS_TYPE_ST_GENERATE_LE = (1 << 20),
OPTS_TYPE_ST_GENERATE_BE = (1 << 21),
OPTS_TYPE_ST_HEX = (1 << 22),
OPTS_TYPE_ST_BASE64 = (1 << 23),
OPTS_TYPE_HASH_COPY = (1 << 24),
OPTS_TYPE_HOOK12 = (1 << 25),
OPTS_TYPE_HOOK23 = (1 << 26),
OPTS_TYPE_BINARY_HASHFILE = (1 << 27),
OPTS_TYPE_PT_ALWAYS_ASCII = (1 << 12),
OPTS_TYPE_ST_UNICODE = (1 << 13),
OPTS_TYPE_ST_UPPER = (1 << 14),
OPTS_TYPE_ST_LOWER = (1 << 15),
OPTS_TYPE_ST_ADD01 = (1 << 16),
OPTS_TYPE_ST_ADD02 = (1 << 17),
OPTS_TYPE_ST_ADD80 = (1 << 18),
OPTS_TYPE_ST_ADDBITS14 = (1 << 19),
OPTS_TYPE_ST_ADDBITS15 = (1 << 20),
OPTS_TYPE_ST_GENERATE_LE = (1 << 21),
OPTS_TYPE_ST_GENERATE_BE = (1 << 22),
OPTS_TYPE_ST_HEX = (1 << 23),
OPTS_TYPE_ST_BASE64 = (1 << 24),
OPTS_TYPE_HASH_COPY = (1 << 25),
OPTS_TYPE_HOOK12 = (1 << 26),
OPTS_TYPE_HOOK23 = (1 << 27),
OPTS_TYPE_BINARY_HASHFILE = (1 << 28),
} opts_type_t;
@ -1178,7 +1179,7 @@ typedef struct outfile_ctx
FILE *fp;
u32 outfile_format;
u32 outfile_autohex;
bool outfile_autohex;
} outfile_ctx_t;

@ -7,14 +7,97 @@
#include "types.h"
#include "convert.h"
bool need_hexify (const u8 *buf, const int len)
static bool printable_utf8 (const u8 *buf, const int len)
{
u8 a;
int length;
const u8 *buf_end = buf + len;
const u8 *srcptr;
const char trailingBytesUTF8[64] = {
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2, 3,3,3,3,3,3,3,3,4,4,4,4,5,5,5,5
};
while (buf < buf_end) {
// This line rejects unprintables. The rest of the function
// reliably rejects invalid UTF-8 sequences.
if (*buf < 0x20 || *buf == 0x7f) return false;
if (*buf < 0x80) {
buf++;
continue;
}
length = trailingBytesUTF8[*buf & 0x3f] + 1;
srcptr = buf + length;
if (srcptr > buf_end) return false;
switch (length) {
default:
return false;
case 4:
if ((a = (*--srcptr)) < 0x80 || a > 0xbf) return false;
case 3:
if ((a = (*--srcptr)) < 0x80 || a > 0xbf) return false;
case 2:
if ((a = (*--srcptr)) < 0x80 || a > 0xbf) return false;
switch (*buf) {
case 0xE0: if (a < 0xa0) return false; break;
case 0xED: if (a > 0x9f) return false; break;
case 0xF0: if (a < 0x90) return false; break;
case 0xF4: if (a > 0x8f) return false;
}
case 1:
if (*buf >= 0x80 && *buf < 0xc2) return false;
}
if (*buf > 0xf4)
return false;
buf += length;
}
return true;
}
static bool printable_ascii (const u8 *buf, const int len)
{
for (int i = 0; i < len; i++)
{
const u8 c = buf[i];
if (c < 0x20) return true;
if (c > 0x7f) return true;
if (c < 0x20) return false;
if (c > 0x7e) return false;
}
return true;
}
bool need_hexify (const u8 *buf, const int len, bool always_ascii)
{
if (always_ascii == true)
{
if (printable_ascii (buf, len) == true)
{
return false;
}
else
{
return true;
}
}
else
{
if (printable_utf8 (buf, len) == true)
{
return false;
}
else
{
return true;
}
}
return false;

@ -17430,7 +17430,8 @@ int hashconfig_init (hashcat_ctx_t *hashcat_ctx)
hashconfig->attack_exec = ATTACK_EXEC_INSIDE_KERNEL;
hashconfig->opts_type = OPTS_TYPE_PT_GENERATE_LE
| OPTS_TYPE_PT_UPPER
| OPTS_TYPE_PT_BITSLICE;
| OPTS_TYPE_PT_BITSLICE
| OPTS_TYPE_PT_ALWAYS_ASCII;
hashconfig->kern_type = KERN_TYPE_LM;
hashconfig->dgst_size = DGST_SIZE_4_4; // originally DGST_SIZE_4_2
hashconfig->parse_func = lm_parse_hash;

@ -326,8 +326,9 @@ void outfile_write_close (hashcat_ctx_t *hashcat_ctx)
int outfile_write (hashcat_ctx_t *hashcat_ctx, const char *out_buf, const unsigned char *plain_ptr, const u32 plain_len, const u64 crackpos, const unsigned char *username, const u32 user_len, char tmp_buf[HCBUFSIZ_LARGE])
{
const hashconfig_t *hashconfig = hashcat_ctx->hashconfig;
const outfile_ctx_t *outfile_ctx = hashcat_ctx->outfile_ctx;
const hashconfig_t *hashconfig = hashcat_ctx->hashconfig;
const outfile_ctx_t *outfile_ctx = hashcat_ctx->outfile_ctx;
const user_options_t *user_options = hashcat_ctx->user_options;
int tmp_len = 0;
@ -366,7 +367,9 @@ int outfile_write (hashcat_ctx_t *hashcat_ctx, const char *out_buf, const unsign
if (outfile_ctx->outfile_format & OUTFILE_FMT_PLAIN)
{
if (need_hexify (plain_ptr, plain_len) == true)
const bool always_ascii = (hashconfig->hash_type & OPTS_TYPE_PT_ALWAYS_ASCII);
if ((user_options->outfile_autohex == true) && (need_hexify (plain_ptr, plain_len, always_ascii) == true))
{
tmp_buf[tmp_len++] = '$';
tmp_buf[tmp_len++] = 'H';

@ -267,7 +267,9 @@ void potfile_write_close (hashcat_ctx_t *hashcat_ctx)
void potfile_write_append (hashcat_ctx_t *hashcat_ctx, const char *out_buf, u8 *plain_ptr, unsigned int plain_len)
{
potfile_ctx_t *potfile_ctx = hashcat_ctx->potfile_ctx;
const hashconfig_t *hashconfig = hashcat_ctx->hashconfig;
const potfile_ctx_t *potfile_ctx = hashcat_ctx->potfile_ctx;
const user_options_t *user_options = hashcat_ctx->user_options;
if (potfile_ctx->enabled == false) return;
@ -290,7 +292,9 @@ void potfile_write_append (hashcat_ctx_t *hashcat_ctx, const char *out_buf, u8 *
if (1)
{
if (need_hexify (plain_ptr, plain_len) == true)
const bool always_ascii = (hashconfig->hash_type & OPTS_TYPE_PT_ALWAYS_ASCII);
if ((user_options->outfile_autohex == true) && (need_hexify (plain_ptr, plain_len, always_ascii) == true))
{
tmp_buf[tmp_len++] = '$';
tmp_buf[tmp_len++] = 'H';

@ -9,6 +9,13 @@
#include "rp.h"
#include "rp_kernel_on_cpu.h"
static u32 amd_bytealign (const u32 a, const u32 b, const u32 c)
{
const u64 tmp = ((((u64) (a)) << 32) | ((u64) (b))) >> ((c & 3) * 8);
return (u32) tmp;
}
static u32 swap32_S (const u32 value)
{
return byte_swap_32 (value);
@ -768,248 +775,142 @@ static void append_block1 (const u32 offset, u32 buf0[4], u32 buf1[4], const u32
static void append_block8 (const u32 offset, u32 buf0[4], u32 buf1[4], const u32 src_l0[4], const u32 src_l1[4], const u32 src_r0[4], const u32 src_r1[4])
{
switch (offset)
const int offset_mod_4 = offset & 3;
const int offset_minus_4 = 4 - offset;
u32 s0 = 0;
u32 s1 = 0;
u32 s2 = 0;
u32 s3 = 0;
u32 s4 = 0;
u32 s5 = 0;
u32 s6 = 0;
u32 s7 = 0;
u32 s8 = 0;
switch (offset / 4)
{
case 0:
buf0[0] = src_r0[0];
buf0[1] = src_r0[1];
buf0[2] = src_r0[2];
buf0[3] = src_r0[3];
buf1[0] = src_r1[0];
buf1[1] = src_r1[1];
buf1[2] = src_r1[2];
buf1[3] = src_r1[3];
s8 = amd_bytealign ( 0, src_r1[3], offset_minus_4);
s7 = amd_bytealign (src_r1[3], src_r1[2], offset_minus_4);
s6 = amd_bytealign (src_r1[2], src_r1[1], offset_minus_4);
s5 = amd_bytealign (src_r1[1], src_r1[0], offset_minus_4);
s4 = amd_bytealign (src_r1[0], src_r0[3], offset_minus_4);
s3 = amd_bytealign (src_r0[3], src_r0[2], offset_minus_4);
s2 = amd_bytealign (src_r0[2], src_r0[1], offset_minus_4);
s1 = amd_bytealign (src_r0[1], src_r0[0], offset_minus_4);
s0 = amd_bytealign (src_r0[0], 0, offset_minus_4);
break;
case 1:
buf0[0] = src_l0[0] | src_r0[0] << 8;
buf0[1] = src_r0[0] >> 24 | src_r0[1] << 8;
buf0[2] = src_r0[1] >> 24 | src_r0[2] << 8;
buf0[3] = src_r0[2] >> 24 | src_r0[3] << 8;
buf1[0] = src_r0[3] >> 24 | src_r1[0] << 8;
buf1[1] = src_r1[0] >> 24 | src_r1[1] << 8;
buf1[2] = src_r1[1] >> 24 | src_r1[2] << 8;
buf1[3] = src_r1[2] >> 24 | src_r1[3] << 8;
s8 = amd_bytealign ( 0, src_r1[2], offset_minus_4);
s7 = amd_bytealign (src_r1[2], src_r1[1], offset_minus_4);
s6 = amd_bytealign (src_r1[1], src_r1[0], offset_minus_4);
s5 = amd_bytealign (src_r1[0], src_r0[3], offset_minus_4);
s4 = amd_bytealign (src_r0[3], src_r0[2], offset_minus_4);
s3 = amd_bytealign (src_r0[2], src_r0[1], offset_minus_4);
s2 = amd_bytealign (src_r0[1], src_r0[0], offset_minus_4);
s1 = amd_bytealign (src_r0[0], 0, offset_minus_4);
s0 = 0;
break;
case 2:
buf0[0] = src_l0[0] | src_r0[0] << 16;
buf0[1] = src_r0[0] >> 16 | src_r0[1] << 16;
buf0[2] = src_r0[1] >> 16 | src_r0[2] << 16;
buf0[3] = src_r0[2] >> 16 | src_r0[3] << 16;
buf1[0] = src_r0[3] >> 16 | src_r1[0] << 16;
buf1[1] = src_r1[0] >> 16 | src_r1[1] << 16;
buf1[2] = src_r1[1] >> 16 | src_r1[2] << 16;
buf1[3] = src_r1[2] >> 16 | src_r1[3] << 16;
s8 = amd_bytealign ( 0, src_r1[1], offset_minus_4);
s7 = amd_bytealign (src_r1[1], src_r1[0], offset_minus_4);
s6 = amd_bytealign (src_r1[0], src_r0[3], offset_minus_4);
s5 = amd_bytealign (src_r0[3], src_r0[2], offset_minus_4);
s4 = amd_bytealign (src_r0[2], src_r0[1], offset_minus_4);
s3 = amd_bytealign (src_r0[1], src_r0[0], offset_minus_4);
s2 = amd_bytealign (src_r0[0], 0, offset_minus_4);
s1 = 0;
s0 = 0;
break;
case 3:
buf0[0] = src_l0[0] | src_r0[0] << 24;
buf0[1] = src_r0[0] >> 8 | src_r0[1] << 24;
buf0[2] = src_r0[1] >> 8 | src_r0[2] << 24;
buf0[3] = src_r0[2] >> 8 | src_r0[3] << 24;
buf1[0] = src_r0[3] >> 8 | src_r1[0] << 24;
buf1[1] = src_r1[0] >> 8 | src_r1[1] << 24;
buf1[2] = src_r1[1] >> 8 | src_r1[2] << 24;
buf1[3] = src_r1[2] >> 8 | src_r1[3] << 24;
s8 = amd_bytealign ( 0, src_r1[0], offset_minus_4);
s7 = amd_bytealign (src_r1[0], src_r0[3], offset_minus_4);
s6 = amd_bytealign (src_r0[3], src_r0[2], offset_minus_4);
s5 = amd_bytealign (src_r0[2], src_r0[1], offset_minus_4);
s4 = amd_bytealign (src_r0[1], src_r0[0], offset_minus_4);
s3 = amd_bytealign (src_r0[0], 0, offset_minus_4);
s2 = 0;
s1 = 0;
s0 = 0;
break;
case 4:
buf0[1] = src_r0[0];
buf0[2] = src_r0[1];
buf0[3] = src_r0[2];
buf1[0] = src_r0[3];
buf1[1] = src_r1[0];
buf1[2] = src_r1[1];
buf1[3] = src_r1[2];
s8 = amd_bytealign ( 0, src_r0[3], offset_minus_4);
s7 = amd_bytealign (src_r0[3], src_r0[2], offset_minus_4);
s6 = amd_bytealign (src_r0[2], src_r0[1], offset_minus_4);
s5 = amd_bytealign (src_r0[1], src_r0[0], offset_minus_4);
s4 = amd_bytealign (src_r0[0], 0, offset_minus_4);
s3 = 0;
s2 = 0;
s1 = 0;
s0 = 0;
break;
case 5:
buf0[1] = src_l0[1] | src_r0[0] << 8;
buf0[2] = src_r0[0] >> 24 | src_r0[1] << 8;
buf0[3] = src_r0[1] >> 24 | src_r0[2] << 8;
buf1[0] = src_r0[2] >> 24 | src_r0[3] << 8;
buf1[1] = src_r0[3] >> 24 | src_r1[0] << 8;
buf1[2] = src_r1[0] >> 24 | src_r1[1] << 8;
buf1[3] = src_r1[1] >> 24 | src_r1[2] << 8;
s8 = amd_bytealign ( 0, src_r0[2], offset_minus_4);
s7 = amd_bytealign (src_r0[2], src_r0[1], offset_minus_4);
s6 = amd_bytealign (src_r0[1], src_r0[0], offset_minus_4);
s5 = amd_bytealign (src_r0[0], 0, offset_minus_4);
s4 = 0;
s3 = 0;
s2 = 0;
s1 = 0;
s0 = 0;
break;
case 6:
buf0[1] = src_l0[1] | src_r0[0] << 16;
buf0[2] = src_r0[0] >> 16 | src_r0[1] << 16;
buf0[3] = src_r0[1] >> 16 | src_r0[2] << 16;
buf1[0] = src_r0[2] >> 16 | src_r0[3] << 16;
buf1[1] = src_r0[3] >> 16 | src_r1[0] << 16;
buf1[2] = src_r1[0] >> 16 | src_r1[1] << 16;
buf1[3] = src_r1[1] >> 16 | src_r1[2] << 16;
s8 = amd_bytealign ( 0, src_r0[1], offset_minus_4);
s7 = amd_bytealign (src_r0[1], src_r0[0], offset_minus_4);
s6 = amd_bytealign (src_r0[0], 0, offset_minus_4);
s5 = 0;
s4 = 0;
s3 = 0;
s2 = 0;
s1 = 0;
s0 = 0;
break;
case 7:
buf0[1] = src_l0[1] | src_r0[0] << 24;
buf0[2] = src_r0[0] >> 8 | src_r0[1] << 24;
buf0[3] = src_r0[1] >> 8 | src_r0[2] << 24;
buf1[0] = src_r0[2] >> 8 | src_r0[3] << 24;
buf1[1] = src_r0[3] >> 8 | src_r1[0] << 24;
buf1[2] = src_r1[0] >> 8 | src_r1[1] << 24;
buf1[3] = src_r1[1] >> 8 | src_r1[2] << 24;
break;
case 8:
buf0[2] = src_r0[0];
buf0[3] = src_r0[1];
buf1[0] = src_r0[2];
buf1[1] = src_r0[3];
buf1[2] = src_r1[0];
buf1[3] = src_r1[1];
break;
case 9:
buf0[2] = src_l0[2] | src_r0[0] << 8;
buf0[3] = src_r0[0] >> 24 | src_r0[1] << 8;
buf1[0] = src_r0[1] >> 24 | src_r0[2] << 8;
buf1[1] = src_r0[2] >> 24 | src_r0[3] << 8;
buf1[2] = src_r0[3] >> 24 | src_r1[0] << 8;
buf1[3] = src_r1[0] >> 24 | src_r1[1] << 8;
break;
case 10:
buf0[2] = src_l0[2] | src_r0[0] << 16;
buf0[3] = src_r0[0] >> 16 | src_r0[1] << 16;
buf1[0] = src_r0[1] >> 16 | src_r0[2] << 16;
buf1[1] = src_r0[2] >> 16 | src_r0[3] << 16;
buf1[2] = src_r0[3] >> 16 | src_r1[0] << 16;
buf1[3] = src_r1[0] >> 16 | src_r1[1] << 16;
break;
case 11:
buf0[2] = src_l0[2] | src_r0[0] << 24;
buf0[3] = src_r0[0] >> 8 | src_r0[1] << 24;
buf1[0] = src_r0[1] >> 8 | src_r0[2] << 24;
buf1[1] = src_r0[2] >> 8 | src_r0[3] << 24;
buf1[2] = src_r0[3] >> 8 | src_r1[0] << 24;
buf1[3] = src_r1[0] >> 8 | src_r1[1] << 24;
break;
case 12:
buf0[3] = src_r0[0];
buf1[0] = src_r0[1];
buf1[1] = src_r0[2];
buf1[2] = src_r0[3];
buf1[3] = src_r1[0];
break;
case 13:
buf0[3] = src_l0[3] | src_r0[0] << 8;
buf1[0] = src_r0[0] >> 24 | src_r0[1] << 8;
buf1[1] = src_r0[1] >> 24 | src_r0[2] << 8;
buf1[2] = src_r0[2] >> 24 | src_r0[3] << 8;
buf1[3] = src_r0[3] >> 24 | src_r1[0] << 8;
break;
case 14:
buf0[3] = src_l0[3] | src_r0[0] << 16;
buf1[0] = src_r0[0] >> 16 | src_r0[1] << 16;
buf1[1] = src_r0[1] >> 16 | src_r0[2] << 16;
buf1[2] = src_r0[2] >> 16 | src_r0[3] << 16;
buf1[3] = src_r0[3] >> 16 | src_r1[0] << 16;
break;
case 15:
buf0[3] = src_l0[3] | src_r0[0] << 24;
buf1[0] = src_r0[0] >> 8 | src_r0[1] << 24;
buf1[1] = src_r0[1] >> 8 | src_r0[2] << 24;
buf1[2] = src_r0[2] >> 8 | src_r0[3] << 24;
buf1[3] = src_r0[3] >> 8 | src_r1[0] << 24;
break;
case 16:
buf1[0] = src_r0[0];
buf1[1] = src_r0[1];
buf1[2] = src_r0[2];
buf1[3] = src_r0[3];
break;
case 17:
buf1[0] = src_l1[0] | src_r0[0] << 8;
buf1[1] = src_r0[0] >> 24 | src_r0[1] << 8;
buf1[2] = src_r0[1] >> 24 | src_r0[2] << 8;
buf1[3] = src_r0[2] >> 24 | src_r0[3] << 8;
break;
case 18:
buf1[0] = src_l1[0] | src_r0[0] << 16;
buf1[1] = src_r0[0] >> 16 | src_r0[1] << 16;
buf1[2] = src_r0[1] >> 16 | src_r0[2] << 16;
buf1[3] = src_r0[2] >> 16 | src_r0[3] << 16;
break;
case 19:
buf1[0] = src_l1[0] | src_r0[0] << 24;
buf1[1] = src_r0[0] >> 8 | src_r0[1] << 24;
buf1[2] = src_r0[1] >> 8 | src_r0[2] << 24;
buf1[3] = src_r0[2] >> 8 | src_r0[3] << 24;
break;
case 20:
buf1[1] = src_r0[0];
buf1[2] = src_r0[1];
buf1[3] = src_r0[2];
break;
case 21:
buf1[1] = src_l1[1] | src_r0[0] << 8;
buf1[2] = src_r0[0] >> 24 | src_r0[1] << 8;
buf1[3] = src_r0[1] >> 24 | src_r0[2] << 8;
break;
case 22:
buf1[1] = src_l1[1] | src_r0[0] << 16;
buf1[2] = src_r0[0] >> 16 | src_r0[1] << 16;
buf1[3] = src_r0[1] >> 16 | src_r0[2] << 16;
break;
case 23:
buf1[1] = src_l1[1] | src_r0[0] << 24;
buf1[2] = src_r0[0] >> 8 | src_r0[1] << 24;
buf1[3] = src_r0[1] >> 8 | src_r0[2] << 24;
break;
case 24:
buf1[2] = src_r0[0];
buf1[3] = src_r0[1];
break;
case 25:
buf1[2] = src_l1[2] | src_r0[0] << 8;
buf1[3] = src_r0[0] >> 24 | src_r0[1] << 8;
break;
case 26:
buf1[2] = src_l1[2] | src_r0[0] << 16;
buf1[3] = src_r0[0] >> 16 | src_r0[1] << 16;
break;
case 27:
buf1[2] = src_l1[2] | src_r0[0] << 24;
buf1[3] = src_r0[0] >> 8 | src_r0[1] << 24;
break;
case 28:
buf1[3] = src_r0[0];
break;
case 29:
buf1[3] = src_l1[3] | src_r0[0] << 8;
break;
case 30:
buf1[3] = src_l1[3] | src_r0[0] << 16;
s8 = amd_bytealign ( 0, src_r0[0], offset_minus_4);
s7 = amd_bytealign (src_r0[0], 0, offset_minus_4);
s6 = 0;
s5 = 0;
s4 = 0;
s3 = 0;
s2 = 0;
s1 = 0;
s0 = 0;
break;
}
case 31:
buf1[3] = src_l1[3] | src_r0[0] << 24;
break;
if (offset_mod_4 == 0)
{
buf0[0] = src_l0[0] | s1;
buf0[1] = src_l0[1] | s2;
buf0[2] = src_l0[2] | s3;
buf0[3] = src_l0[3] | s4;
buf1[0] = src_l1[0] | s5;
buf1[1] = src_l1[1] | s6;
buf1[2] = src_l1[2] | s7;
buf1[3] = src_l1[3] | s8;
}
else
{
buf0[0] = src_l0[0] | s0;
buf0[1] = src_l0[1] | s1;
buf0[2] = src_l0[2] | s2;
buf0[3] = src_l0[3] | s3;
buf1[0] = src_l1[0] | s4;
buf1[1] = src_l1[1] | s5;
buf1[2] = src_l1[2] | s6;
buf1[3] = src_l1[3] | s7;
}
}
static void reverse_block (u32 in0[4], u32 in1[4], u32 out0[4], u32 out1[4], const u32 len)

@ -538,6 +538,7 @@ int status_get_input_mask_length (const hashcat_ctx_t *hashcat_ctx)
char *status_get_input_candidates_dev (const hashcat_ctx_t *hashcat_ctx, const int device_id)
{
const hashconfig_t *hashconfig = hashcat_ctx->hashconfig;
const opencl_ctx_t *opencl_ctx = hashcat_ctx->opencl_ctx;
const status_ctx_t *status_ctx = hashcat_ctx->status_ctx;
const user_options_extra_t *user_options_extra = hashcat_ctx->user_options_extra;
@ -582,8 +583,10 @@ char *status_get_input_candidates_dev (const hashcat_ctx_t *hashcat_ctx, const i
build_plain ((hashcat_ctx_t *) hashcat_ctx, device_param, &plain1, plain_buf1, &plain_len1);
build_plain ((hashcat_ctx_t *) hashcat_ctx, device_param, &plain2, plain_buf2, &plain_len2);
const bool need_hex1 = need_hexify (plain_ptr1, plain_len1);
const bool need_hex2 = need_hexify (plain_ptr2, plain_len2);
const bool always_ascii = (hashconfig->hash_type & OPTS_TYPE_PT_ALWAYS_ASCII);
const bool need_hex1 = need_hexify (plain_ptr1, plain_len1, always_ascii);
const bool need_hex2 = need_hexify (plain_ptr2, plain_len2, always_ascii);
if ((need_hex1 == true) || (need_hex2 == true))
{

@ -285,7 +285,7 @@ int user_options_getopt (hashcat_ctx_t *hashcat_ctx, int argc, char **argv)
case IDX_OUTFILE: user_options->outfile = optarg; break;
case IDX_OUTFILE_FORMAT: user_options->outfile_format = atoi (optarg);
user_options->outfile_format_chgd = true; break;
case IDX_OUTFILE_AUTOHEX_DISABLE: user_options->outfile_autohex = 0; break;
case IDX_OUTFILE_AUTOHEX_DISABLE: user_options->outfile_autohex = false; break;
case IDX_OUTFILE_CHECK_TIMER: user_options->outfile_check_timer = atoi (optarg); break;
case IDX_HEX_CHARSET: user_options->hex_charset = true; break;
case IDX_HEX_SALT: user_options->hex_salt = true; break;

Loading…
Cancel
Save