1
0
mirror of https://github.com/hashcat/hashcat.git synced 2024-12-26 08:28:20 +00:00

Add tokenizer option TOKEN_ATTR_SEPARATOR_FARTHEST as an option to deal with hash formats where separator character could be part of the salt data itself and at the same time does not allow escape character logic to be applied. This can only work if it is guaranteed by the design of the hash format that the separator character does not occur after the position of the separator character.

This commit is contained in:
Jens Steube 2020-02-19 10:35:44 +01:00
parent c4daa0c260
commit 9957422191
4 changed files with 46 additions and 12 deletions

View File

@ -79,6 +79,9 @@ u64 round_up_multiple_64 (const u64 v, const u64 m);
void hc_strncat (u8 *dst, const u8 *src, const size_t n);
const u8 *hc_strchr_next (const u8 *input_buf, const int input_len, const u8 separator);
const u8 *hc_strchr_last (const u8 *input_buf, const int input_len, const u8 separator);
int count_char (const u8 *buf, const int len, const u8 c);
float get_entropy (const u8 *buf, const int len);

View File

@ -771,15 +771,16 @@ typedef enum user_options_map
typedef enum token_attr
{
TOKEN_ATTR_FIXED_LENGTH = 1 << 0,
TOKEN_ATTR_OPTIONAL_ROUNDS = 1 << 1,
TOKEN_ATTR_VERIFY_SIGNATURE = 1 << 2,
TOKEN_ATTR_VERIFY_LENGTH = 1 << 3,
TOKEN_ATTR_VERIFY_DIGIT = 1 << 4,
TOKEN_ATTR_VERIFY_FLOAT = 1 << 5,
TOKEN_ATTR_VERIFY_HEX = 1 << 6,
TOKEN_ATTR_VERIFY_BASE64A = 1 << 7,
TOKEN_ATTR_VERIFY_BASE64B = 1 << 8,
TOKEN_ATTR_VERIFY_BASE64C = 1 << 9
TOKEN_ATTR_SEPARATOR_FARTHEST = 1 << 1,
TOKEN_ATTR_OPTIONAL_ROUNDS = 1 << 2,
TOKEN_ATTR_VERIFY_SIGNATURE = 1 << 3,
TOKEN_ATTR_VERIFY_LENGTH = 1 << 4,
TOKEN_ATTR_VERIFY_DIGIT = 1 << 5,
TOKEN_ATTR_VERIFY_FLOAT = 1 << 6,
TOKEN_ATTR_VERIFY_HEX = 1 << 7,
TOKEN_ATTR_VERIFY_BASE64A = 1 << 8,
TOKEN_ATTR_VERIFY_BASE64B = 1 << 9,
TOKEN_ATTR_VERIFY_BASE64C = 1 << 10,
} token_attr_t;

View File

@ -95,7 +95,8 @@ int module_hash_decode (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSE
token.len_min[2] = SALT_MIN;
token.len_max[2] = SALT_MAX;
token.sep[2] = '#';
token.attr[2] = TOKEN_ATTR_VERIFY_LENGTH;
token.attr[2] = TOKEN_ATTR_VERIFY_LENGTH
| TOKEN_ATTR_SEPARATOR_FARTHEST;
token.len_min[3] = 32;
token.len_max[3] = 32;

View File

@ -1028,6 +1028,26 @@ static int rounds_count_length (const char *input_buf, const int input_len)
return -1;
}
const u8 *hc_strchr_next (const u8 *input_buf, const int input_len, const u8 separator)
{
for (int i = 0; i < input_len; i++)
{
if (input_buf[i] == separator) return &input_buf[i];
}
return NULL;
}
const u8 *hc_strchr_last (const u8 *input_buf, const int input_len, const u8 separator)
{
for (int i = input_len - 1; i >= 0; i--)
{
if (input_buf[i] == separator) return &input_buf[i];
}
return NULL;
}
int input_tokenizer (const u8 *input_buf, const int input_len, token_t *token)
{
int len_left = input_len;
@ -1066,7 +1086,16 @@ int input_tokenizer (const u8 *input_buf, const int input_len, token_t *token)
}
}
const u8 *next_pos = (const u8 *) strchr ((const char *) token->buf[token_idx], token->sep[token_idx]);
const u8 *next_pos = NULL;
if (token->attr[token_idx] & TOKEN_ATTR_SEPARATOR_FARTHEST)
{
next_pos = hc_strchr_last (token->buf[token_idx], len_left, token->sep[token_idx]);
}
else
{
next_pos = hc_strchr_next (token->buf[token_idx], len_left, token->sep[token_idx]);
}
if (next_pos == NULL) return (PARSER_SEPARATOR_UNMATCHED);