Merge pull request #3463 from Banaanhangwagen/master

Add new algorithm: BISQ .wallet (scrypt) (-m 29800)
pull/3471/head
Jens Steube 2 years ago committed by GitHub
commit 1e5e7735dc
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -0,0 +1,667 @@
/**
* Author......: See docs/credits.txt
* License.....: MIT
*/
#ifdef KERNEL_STATIC
#include M2S(INCLUDE_PATH/inc_vendor.h)
#include M2S(INCLUDE_PATH/inc_types.h)
#include M2S(INCLUDE_PATH/inc_platform.cl)
#include M2S(INCLUDE_PATH/inc_common.cl)
#include M2S(INCLUDE_PATH/inc_hash_sha256.cl)
#include M2S(INCLUDE_PATH/inc_cipher_aes.cl)
#endif
typedef struct
{
#ifndef SCRYPT_TMP_ELEM
#define SCRYPT_TMP_ELEM 1
#endif
uint4 P[SCRYPT_TMP_ELEM];
} scrypt_tmp_t;
#if defined IS_CUDA || defined IS_HIP
inline __device__ uint4 operator & (const uint4 a, const u32 b) { return make_uint4 ((a.x & b ), (a.y & b ), (a.z & b ), (a.w & b )); }
inline __device__ uint4 operator << (const uint4 a, const u32 b) { return make_uint4 ((a.x << b ), (a.y << b ), (a.z << b ), (a.w << b )); }
inline __device__ uint4 operator >> (const uint4 a, const u32 b) { return make_uint4 ((a.x >> b ), (a.y >> b ), (a.z >> b ), (a.w >> b )); }
inline __device__ uint4 operator + (const uint4 a, const uint4 b) { return make_uint4 ((a.x + b.x), (a.y + b.y), (a.z + b.z), (a.w + b.w)); }
inline __device__ uint4 operator ^ (const uint4 a, const uint4 b) { return make_uint4 ((a.x ^ b.x), (a.y ^ b.y), (a.z ^ b.z), (a.w ^ b.w)); }
inline __device__ uint4 operator | (const uint4 a, const uint4 b) { return make_uint4 ((a.x | b.x), (a.y | b.y), (a.z | b.z), (a.w | b.w)); }
inline __device__ void operator ^= ( uint4 &a, const uint4 b) { a.x ^= b.x; a.y ^= b.y; a.z ^= b.z; a.w ^= b.w; }
inline __device__ uint4 rotate (const uint4 a, const int n)
{
return ((a << n) | ((a >> (32 - n))));
}
#endif
DECLSPEC uint4 hc_swap32_4 (uint4 v)
{
return (rotate ((v & 0x00FF00FF), 24u) | rotate ((v & 0xFF00FF00), 8u));
}
#define GET_SCRYPT_CNT(r,p) (2 * (r) * 16 * (p))
#define GET_SMIX_CNT(r,N) (2 * (r) * 16 * (N))
#define GET_STATE_CNT(r) (2 * (r) * 16)
#define SCRYPT_CNT GET_SCRYPT_CNT (SCRYPT_R, SCRYPT_P)
#define SCRYPT_CNT4 (SCRYPT_CNT / 4)
#define STATE_CNT GET_STATE_CNT (SCRYPT_R)
#define STATE_CNT4 (STATE_CNT / 4)
#define ADD_ROTATE_XOR(r,i1,i2,s) (r) ^= rotate ((i1) + (i2), (s));
#if defined IS_CUDA || defined IS_HIP
#define SALSA20_2R() \
{ \
ADD_ROTATE_XOR (X1, X0, X3, 7); \
ADD_ROTATE_XOR (X2, X1, X0, 9); \
ADD_ROTATE_XOR (X3, X2, X1, 13); \
ADD_ROTATE_XOR (X0, X3, X2, 18); \
\
X1 = make_uint4 (X1.w, X1.x, X1.y, X1.z); \
X2 = make_uint4 (X2.z, X2.w, X2.x, X2.y); \
X3 = make_uint4 (X3.y, X3.z, X3.w, X3.x); \
\
ADD_ROTATE_XOR (X3, X0, X1, 7); \
ADD_ROTATE_XOR (X2, X3, X0, 9); \
ADD_ROTATE_XOR (X1, X2, X3, 13); \
ADD_ROTATE_XOR (X0, X1, X2, 18); \
\
X1 = make_uint4 (X1.y, X1.z, X1.w, X1.x); \
X2 = make_uint4 (X2.z, X2.w, X2.x, X2.y); \
X3 = make_uint4 (X3.w, X3.x, X3.y, X3.z); \
}
#elif defined IS_METAL
#define SALSA20_2R() \
{ \
ADD_ROTATE_XOR (X1, X0, X3, 7); \
ADD_ROTATE_XOR (X2, X1, X0, 9); \
ADD_ROTATE_XOR (X3, X2, X1, 13); \
ADD_ROTATE_XOR (X0, X3, X2, 18); \
\
X1 = X1.wxyz; \
X2 = X2.zwxy; \
X3 = X3.yzwx; \
\
ADD_ROTATE_XOR (X3, X0, X1, 7); \
ADD_ROTATE_XOR (X2, X3, X0, 9); \
ADD_ROTATE_XOR (X1, X2, X3, 13); \
ADD_ROTATE_XOR (X0, X1, X2, 18); \
\
X1 = X1.yzwx; \
X2 = X2.zwxy; \
X3 = X3.wxyz; \
}
#else
#define SALSA20_2R() \
{ \
ADD_ROTATE_XOR (X1, X0, X3, 7); \
ADD_ROTATE_XOR (X2, X1, X0, 9); \
ADD_ROTATE_XOR (X3, X2, X1, 13); \
ADD_ROTATE_XOR (X0, X3, X2, 18); \
\
X1 = X1.s3012; \
X2 = X2.s2301; \
X3 = X3.s1230; \
\
ADD_ROTATE_XOR (X3, X0, X1, 7); \
ADD_ROTATE_XOR (X2, X3, X0, 9); \
ADD_ROTATE_XOR (X1, X2, X3, 13); \
ADD_ROTATE_XOR (X0, X1, X2, 18); \
\
X1 = X1.s1230; \
X2 = X2.s2301; \
X3 = X3.s3012; \
}
#endif
#define Coord(xd4,y,z) (((xd4) * ySIZE * zSIZE) + ((y) * zSIZE) + (z))
#define CO Coord(xd4,y,z)
DECLSPEC void salsa_r (PRIVATE_AS uint4 *TI)
{
uint4 R0 = TI[STATE_CNT4 - 4];
uint4 R1 = TI[STATE_CNT4 - 3];
uint4 R2 = TI[STATE_CNT4 - 2];
uint4 R3 = TI[STATE_CNT4 - 1];
for (int i = 0; i < STATE_CNT4; i += 4)
{
uint4 Y0 = TI[i + 0];
uint4 Y1 = TI[i + 1];
uint4 Y2 = TI[i + 2];
uint4 Y3 = TI[i + 3];
R0 = R0 ^ Y0;
R1 = R1 ^ Y1;
R2 = R2 ^ Y2;
R3 = R3 ^ Y3;
uint4 X0 = R0;
uint4 X1 = R1;
uint4 X2 = R2;
uint4 X3 = R3;
SALSA20_2R ();
SALSA20_2R ();
SALSA20_2R ();
SALSA20_2R ();
R0 = R0 + X0;
R1 = R1 + X1;
R2 = R2 + X2;
R3 = R3 + X3;
TI[i + 0] = R0;
TI[i + 1] = R1;
TI[i + 2] = R2;
TI[i + 3] = R3;
}
#if SCRYPT_R > 1
uint4 TT[STATE_CNT4 / 2];
for (int dst_off = 0, src_off = 4; src_off < STATE_CNT4; dst_off += 4, src_off += 8)
{
TT[dst_off + 0] = TI[src_off + 0];
TT[dst_off + 1] = TI[src_off + 1];
TT[dst_off + 2] = TI[src_off + 2];
TT[dst_off + 3] = TI[src_off + 3];
}
for (int dst_off = 4, src_off = 8; src_off < STATE_CNT4; dst_off += 4, src_off += 8)
{
TI[dst_off + 0] = TI[src_off + 0];
TI[dst_off + 1] = TI[src_off + 1];
TI[dst_off + 2] = TI[src_off + 2];
TI[dst_off + 3] = TI[src_off + 3];
}
for (int dst_off = STATE_CNT4 / 2, src_off = 0; dst_off < STATE_CNT4; dst_off += 4, src_off += 4)
{
TI[dst_off + 0] = TT[src_off + 0];
TI[dst_off + 1] = TT[src_off + 1];
TI[dst_off + 2] = TT[src_off + 2];
TI[dst_off + 3] = TT[src_off + 3];
}
#endif
}
DECLSPEC void scrypt_smix_init (PRIVATE_AS uint4 *X, GLOBAL_AS uint4 *V0, GLOBAL_AS uint4 *V1, GLOBAL_AS uint4 *V2, GLOBAL_AS uint4 *V3, const u64 gid)
{
const u32 ySIZE = SCRYPT_N / SCRYPT_TMTO;
const u32 zSIZE = STATE_CNT4;
const u32 x = (u32) gid;
const u32 xd4 = x / 4;
const u32 xm4 = x & 3;
GLOBAL_AS uint4 *V;
switch (xm4)
{
case 0: V = V0; break;
case 1: V = V1; break;
case 2: V = V2; break;
case 3: V = V3; break;
}
for (u32 y = 0; y < ySIZE; y++)
{
for (u32 z = 0; z < zSIZE; z++) V[CO] = X[z];
for (u32 i = 0; i < SCRYPT_TMTO; i++) salsa_r (X);
}
}
DECLSPEC void scrypt_smix_loop (PRIVATE_AS uint4 *X, GLOBAL_AS uint4 *V0, GLOBAL_AS uint4 *V1, GLOBAL_AS uint4 *V2, GLOBAL_AS uint4 *V3, const u64 gid)
{
const u32 ySIZE = SCRYPT_N / SCRYPT_TMTO;
const u32 zSIZE = STATE_CNT4;
const u32 x = (u32) gid;
const u32 xd4 = x / 4;
const u32 xm4 = x & 3;
GLOBAL_AS uint4 *V;
switch (xm4)
{
case 0: V = V0; break;
case 1: V = V1; break;
case 2: V = V2; break;
case 3: V = V3; break;
}
// note: fixed 1024 iterations = forced -u 1024
for (u32 N_pos = 0; N_pos < 1024; N_pos++)
{
const u32 k = X[zSIZE - 4].x & (SCRYPT_N - 1);
const u32 y = k / SCRYPT_TMTO;
const u32 km = k - (y * SCRYPT_TMTO);
uint4 T[STATE_CNT4];
for (u32 z = 0; z < zSIZE; z++) T[z] = V[CO];
for (u32 i = 0; i < km; i++) salsa_r (T);
for (u32 z = 0; z < zSIZE; z++) X[z] ^= T[z];
salsa_r (X);
}
}
KERNEL_FQ void m29800_init (KERN_ATTR_TMPS (scrypt_tmp_t))
{
/**
* base
*/
const u64 gid = get_global_id (0);
if (gid >= GID_CNT) return;
u32 w[128] = { 0 };
hc_enc_t hc_enc;
hc_enc_init (&hc_enc);
const int w_len = hc_enc_next_global (&hc_enc, pws[gid].i, pws[gid].pw_len, 256, w, sizeof (w));
if (w_len == -1) return;
// utf16le to utf16be
for (int i = 0, j = 0; i < w_len; i += 4, j += 1)
{
w[j] = ((w[j] >> 8) & 0x00ff00ff)
| ((w[j] << 8) & 0xff00ff00);
}
sha256_hmac_ctx_t sha256_hmac_ctx;
sha256_hmac_init_swap (&sha256_hmac_ctx, w, w_len);
u32 s0[4] = { 0 };
u32 s1[4] = { 0 };
u32 s2[4] = { 0 };
u32 s3[4] = { 0 };
s0[0] = salt_bufs[SALT_POS_HOST].salt_buf[0];
s0[1] = salt_bufs[SALT_POS_HOST].salt_buf[1];
sha256_hmac_update_64 (&sha256_hmac_ctx, s0, s1, s2, s3, 8);
for (u32 i = 0, j = 1, k = 0; i < SCRYPT_CNT; i += 8, j += 1, k += 2)
{
sha256_hmac_ctx_t sha256_hmac_ctx2 = sha256_hmac_ctx;
u32 w0[4];
u32 w1[4];
u32 w2[4];
u32 w3[4];
w0[0] = j;
w0[1] = 0;
w0[2] = 0;
w0[3] = 0;
w1[0] = 0;
w1[1] = 0;
w1[2] = 0;
w1[3] = 0;
w2[0] = 0;
w2[1] = 0;
w2[2] = 0;
w2[3] = 0;
w3[0] = 0;
w3[1] = 0;
w3[2] = 0;
w3[3] = 0;
sha256_hmac_update_64 (&sha256_hmac_ctx2, w0, w1, w2, w3, 4);
sha256_hmac_final (&sha256_hmac_ctx2);
u32 digest[8];
digest[0] = sha256_hmac_ctx2.opad.h[0];
digest[1] = sha256_hmac_ctx2.opad.h[1];
digest[2] = sha256_hmac_ctx2.opad.h[2];
digest[3] = sha256_hmac_ctx2.opad.h[3];
digest[4] = sha256_hmac_ctx2.opad.h[4];
digest[5] = sha256_hmac_ctx2.opad.h[5];
digest[6] = sha256_hmac_ctx2.opad.h[6];
digest[7] = sha256_hmac_ctx2.opad.h[7];
#if defined IS_CUDA || defined IS_HIP
const uint4 tmp0 = make_uint4 (digest[0], digest[1], digest[2], digest[3]);
const uint4 tmp1 = make_uint4 (digest[4], digest[5], digest[6], digest[7]);
#else
const uint4 tmp0 = (uint4) (digest[0], digest[1], digest[2], digest[3]);
const uint4 tmp1 = (uint4) (digest[4], digest[5], digest[6], digest[7]);
#endif
tmps[gid].P[k + 0] = tmp0;
tmps[gid].P[k + 1] = tmp1;
}
for (u32 l = 0; l < SCRYPT_CNT4; l += 4)
{
uint4 T[4];
T[0] = tmps[gid].P[l + 0];
T[1] = tmps[gid].P[l + 1];
T[2] = tmps[gid].P[l + 2];
T[3] = tmps[gid].P[l + 3];
T[0] = hc_swap32_4 (T[0]);
T[1] = hc_swap32_4 (T[1]);
T[2] = hc_swap32_4 (T[2]);
T[3] = hc_swap32_4 (T[3]);
uint4 X[4];
#if defined IS_CUDA || defined IS_HIP
X[0] = make_uint4 (T[0].x, T[1].y, T[2].z, T[3].w);
X[1] = make_uint4 (T[1].x, T[2].y, T[3].z, T[0].w);
X[2] = make_uint4 (T[2].x, T[3].y, T[0].z, T[1].w);
X[3] = make_uint4 (T[3].x, T[0].y, T[1].z, T[2].w);
#else
X[0] = (uint4) (T[0].x, T[1].y, T[2].z, T[3].w);
X[1] = (uint4) (T[1].x, T[2].y, T[3].z, T[0].w);
X[2] = (uint4) (T[2].x, T[3].y, T[0].z, T[1].w);
X[3] = (uint4) (T[3].x, T[0].y, T[1].z, T[2].w);
#endif
tmps[gid].P[l + 0] = X[0];
tmps[gid].P[l + 1] = X[1];
tmps[gid].P[l + 2] = X[2];
tmps[gid].P[l + 3] = X[3];
}
}
KERNEL_FQ void m29800_loop_prepare (KERN_ATTR_TMPS (scrypt_tmp_t))
{
/**
* base
*/
const u64 gid = get_global_id (0);
const u64 lid = get_local_id (0);
if (gid >= GID_CNT) return;
// SCRYPT part, init V
GLOBAL_AS uint4 *d_scrypt0_buf = (GLOBAL_AS uint4 *) d_extra0_buf;
GLOBAL_AS uint4 *d_scrypt1_buf = (GLOBAL_AS uint4 *) d_extra1_buf;
GLOBAL_AS uint4 *d_scrypt2_buf = (GLOBAL_AS uint4 *) d_extra2_buf;
GLOBAL_AS uint4 *d_scrypt3_buf = (GLOBAL_AS uint4 *) d_extra3_buf;
uint4 X[STATE_CNT4];
const u32 P_offset = SALT_REPEAT * STATE_CNT4;
GLOBAL_AS uint4 *P = tmps[gid].P + P_offset;
for (int z = 0; z < STATE_CNT4; z++) X[z] = P[z];
scrypt_smix_init (X, d_scrypt0_buf, d_scrypt1_buf, d_scrypt2_buf, d_scrypt3_buf, gid);
for (int z = 0; z < STATE_CNT4; z++) P[z] = X[z];
}
KERNEL_FQ void m29800_loop (KERN_ATTR_TMPS (scrypt_tmp_t))
{
const u64 gid = get_global_id (0);
const u64 lid = get_local_id (0);
if (gid >= GID_CNT) return;
GLOBAL_AS uint4 *d_scrypt0_buf = (GLOBAL_AS uint4 *) d_extra0_buf;
GLOBAL_AS uint4 *d_scrypt1_buf = (GLOBAL_AS uint4 *) d_extra1_buf;
GLOBAL_AS uint4 *d_scrypt2_buf = (GLOBAL_AS uint4 *) d_extra2_buf;
GLOBAL_AS uint4 *d_scrypt3_buf = (GLOBAL_AS uint4 *) d_extra3_buf;
uint4 X[STATE_CNT4];
const u32 P_offset = SALT_REPEAT * STATE_CNT4;
GLOBAL_AS uint4 *P = tmps[gid].P + P_offset;
for (int z = 0; z < STATE_CNT4; z++) X[z] = P[z];
scrypt_smix_loop (X, d_scrypt0_buf, d_scrypt1_buf, d_scrypt2_buf, d_scrypt3_buf, gid);
for (int z = 0; z < STATE_CNT4; z++) P[z] = X[z];
}
KERNEL_FQ void m29800_comp (KERN_ATTR_TMPS (scrypt_tmp_t))
{
const u64 gid = get_global_id (0);
const u64 lid = get_local_id (0);
const u64 lsz = get_local_size (0);
/**
* aes shared
*/
#ifdef REAL_SHM
LOCAL_VK u32 s_td0[256];
LOCAL_VK u32 s_td1[256];
LOCAL_VK u32 s_td2[256];
LOCAL_VK u32 s_td3[256];
LOCAL_VK u32 s_td4[256];
LOCAL_VK u32 s_te0[256];
LOCAL_VK u32 s_te1[256];
LOCAL_VK u32 s_te2[256];
LOCAL_VK u32 s_te3[256];
LOCAL_VK u32 s_te4[256];
for (u32 i = lid; i < 256; i += lsz)
{
s_td0[i] = td0[i];
s_td1[i] = td1[i];
s_td2[i] = td2[i];
s_td3[i] = td3[i];
s_td4[i] = td4[i];
s_te0[i] = te0[i];
s_te1[i] = te1[i];
s_te2[i] = te2[i];
s_te3[i] = te3[i];
s_te4[i] = te4[i];
}
SYNC_THREADS ();
#else
CONSTANT_AS u32a *s_td0 = td0;
CONSTANT_AS u32a *s_td1 = td1;
CONSTANT_AS u32a *s_td2 = td2;
CONSTANT_AS u32a *s_td3 = td3;
CONSTANT_AS u32a *s_td4 = td4;
CONSTANT_AS u32a *s_te0 = te0;
CONSTANT_AS u32a *s_te1 = te1;
CONSTANT_AS u32a *s_te2 = te2;
CONSTANT_AS u32a *s_te3 = te3;
CONSTANT_AS u32a *s_te4 = te4;
#endif
if (gid >= GID_CNT) return;
/**
* 2nd pbkdf2, creates B
*/
u32 w[128] = { 0 };
hc_enc_t hc_enc;
hc_enc_init (&hc_enc);
const int w_len = hc_enc_next_global (&hc_enc, pws[gid].i, pws[gid].pw_len, 256, w, sizeof (w));
if (w_len == -1) return;
// utf16le to utf16be
for (int i = 0, j = 0; i < w_len; i += 4, j += 1)
{
w[j] = ((w[j] >> 8) & 0x00ff00ff)
| ((w[j] << 8) & 0xff00ff00);
}
sha256_hmac_ctx_t ctx;
sha256_hmac_init_swap (&ctx, w, w_len);
u32 w0[4];
u32 w1[4];
u32 w2[4];
u32 w3[4];
for (u32 l = 0; l < SCRYPT_CNT4; l += 4)
{
uint4 X[4];
X[0] = tmps[gid].P[l + 0];
X[1] = tmps[gid].P[l + 1];
X[2] = tmps[gid].P[l + 2];
X[3] = tmps[gid].P[l + 3];
uint4 T[4];
#if defined IS_CUDA || defined IS_HIP
T[0] = make_uint4 (X[0].x, X[3].y, X[2].z, X[1].w);
T[1] = make_uint4 (X[1].x, X[0].y, X[3].z, X[2].w);
T[2] = make_uint4 (X[2].x, X[1].y, X[0].z, X[3].w);
T[3] = make_uint4 (X[3].x, X[2].y, X[1].z, X[0].w);
#else
T[0] = (uint4) (X[0].x, X[3].y, X[2].z, X[1].w);
T[1] = (uint4) (X[1].x, X[0].y, X[3].z, X[2].w);
T[2] = (uint4) (X[2].x, X[1].y, X[0].z, X[3].w);
T[3] = (uint4) (X[3].x, X[2].y, X[1].z, X[0].w);
#endif
T[0] = hc_swap32_4 (T[0]);
T[1] = hc_swap32_4 (T[1]);
T[2] = hc_swap32_4 (T[2]);
T[3] = hc_swap32_4 (T[3]);
w0[0] = T[0].x;
w0[1] = T[0].y;
w0[2] = T[0].z;
w0[3] = T[0].w;
w1[0] = T[1].x;
w1[1] = T[1].y;
w1[2] = T[1].z;
w1[3] = T[1].w;
w2[0] = T[2].x;
w2[1] = T[2].y;
w2[2] = T[2].z;
w2[3] = T[2].w;
w3[0] = T[3].x;
w3[1] = T[3].y;
w3[2] = T[3].z;
w3[3] = T[3].w;
sha256_hmac_update_64 (&ctx, w0, w1, w2, w3, 64);
}
w0[0] = 1;
w0[1] = 0;
w0[2] = 0;
w0[3] = 0;
w1[0] = 0;
w1[1] = 0;
w1[2] = 0;
w1[3] = 0;
w2[0] = 0;
w2[1] = 0;
w2[2] = 0;
w2[3] = 0;
w3[0] = 0;
w3[1] = 0;
w3[2] = 0;
w3[3] = 0;
sha256_hmac_update_64 (&ctx, w0, w1, w2, w3, 4);
sha256_hmac_final (&ctx);
// AES256-CBC decrypt
u32 key[8];
key[0] = ctx.opad.h[0];
key[1] = ctx.opad.h[1];
key[2] = ctx.opad.h[2];
key[3] = ctx.opad.h[3];
key[4] = ctx.opad.h[4];
key[5] = ctx.opad.h[5];
key[6] = ctx.opad.h[6];
key[7] = ctx.opad.h[7];
#define KEYLEN 60
u32 ks[KEYLEN];
AES256_set_decrypt_key (ks, key, s_te0, s_te1, s_te2, s_te3, s_td0, s_td1, s_td2, s_td3);
u32 iv[4];
iv[0] = salt_bufs[SALT_POS_HOST].salt_buf[2];
iv[1] = salt_bufs[SALT_POS_HOST].salt_buf[3];
iv[2] = salt_bufs[SALT_POS_HOST].salt_buf[4];
iv[3] = salt_bufs[SALT_POS_HOST].salt_buf[5];
u32 enc[4];
enc[0] = salt_bufs[SALT_POS_HOST].salt_buf[6];
enc[1] = salt_bufs[SALT_POS_HOST].salt_buf[7];
enc[2] = salt_bufs[SALT_POS_HOST].salt_buf[8];
enc[3] = salt_bufs[SALT_POS_HOST].salt_buf[9];
u32 dec[4];
aes256_decrypt (ks, enc, dec, s_td0, s_td1, s_td2, s_td3, s_td4);
dec[0] ^= iv[0];
dec[1] ^= iv[1];
dec[2] ^= iv[2];
dec[3] ^= iv[3];
if ((dec[0] == 0x10101010) &&
(dec[1] == 0x10101010) &&
(dec[2] == 0x10101010) &&
(dec[3] == 0x10101010))
{
if (hc_atomic_inc (&hashes_shown[DIGESTS_OFFSET_HOST]) == 0)
{
mark_hash (plains_buf, d_return_buf, SALT_POS_HOST, DIGESTS_CNT, 0, DIGESTS_OFFSET_HOST + 0, gid, 0, 0, 0);
}
return;
}
}

@ -1,5 +1,11 @@
* changes v6.2.6 -> v6.2.x
##
## Algorithms
##
- Added hash-mode: Bisq .wallet (scrypt)
##
## Technical
##

@ -422,6 +422,7 @@ NVIDIA GPUs require "NVIDIA Driver" (440.64 or later) and "CUDA Toolkit" (9.0 or
- MultiBit HD (scrypt)
- Exodus Desktop Wallet (scrypt)
- Terra Station Wallet (AES256-CBC(PBKDF2($pass)))
- Bisq .wallet (scrypt)
##
## Attack-Modes

@ -0,0 +1,490 @@
/**
* Author......: See docs/credits.txt
* License.....: MIT
*/
#include <inttypes.h>
#include "common.h"
#include "types.h"
#include "modules.h"
#include "bitops.h"
#include "convert.h"
#include "shared.h"
static const u32 ATTACK_EXEC = ATTACK_EXEC_OUTSIDE_KERNEL;
static const u32 DGST_POS0 = 0;
static const u32 DGST_POS1 = 1;
static const u32 DGST_POS2 = 2;
static const u32 DGST_POS3 = 3;
static const u32 DGST_SIZE = DGST_SIZE_4_4;
static const u32 HASH_CATEGORY = HASH_CATEGORY_CRYPTOCURRENCY_WALLET;
static const char *HASH_NAME = "Bisq .wallet (scrypt)";
static const u64 KERN_TYPE = 29800;
static const u32 OPTI_TYPE = OPTI_TYPE_ZERO_BYTE;
static const u64 OPTS_TYPE = OPTS_TYPE_STOCK_MODULE
| OPTS_TYPE_PT_GENERATE_LE
| OPTS_TYPE_MP_MULTI_DISABLE
| OPTS_TYPE_NATIVE_THREADS
| OPTS_TYPE_LOOP_PREPARE
| OPTS_TYPE_SELF_TEST_DISABLE;
static const u32 SALT_TYPE = SALT_TYPE_EMBEDDED;
static const char *ST_PASS = "hashcat1";
static const char *ST_HASH = "$bisq$3*32768*8*6*31d838af87f99cb8*5cfb7bf3228d9e865881156e17b1866589ffa6b757011e25d1319083595236d2";
u32 module_attack_exec (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSED const user_options_t *user_options, MAYBE_UNUSED const user_options_extra_t *user_options_extra) { return ATTACK_EXEC; }
u32 module_dgst_pos0 (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSED const user_options_t *user_options, MAYBE_UNUSED const user_options_extra_t *user_options_extra) { return DGST_POS0; }
u32 module_dgst_pos1 (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSED const user_options_t *user_options, MAYBE_UNUSED const user_options_extra_t *user_options_extra) { return DGST_POS1; }
u32 module_dgst_pos2 (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSED const user_options_t *user_options, MAYBE_UNUSED const user_options_extra_t *user_options_extra) { return DGST_POS2; }
u32 module_dgst_pos3 (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSED const user_options_t *user_options, MAYBE_UNUSED const user_options_extra_t *user_options_extra) { return DGST_POS3; }
u32 module_dgst_size (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSED const user_options_t *user_options, MAYBE_UNUSED const user_options_extra_t *user_options_extra) { return DGST_SIZE; }
u32 module_hash_category (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSED const user_options_t *user_options, MAYBE_UNUSED const user_options_extra_t *user_options_extra) { return HASH_CATEGORY; }
const char *module_hash_name (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSED const user_options_t *user_options, MAYBE_UNUSED const user_options_extra_t *user_options_extra) { return HASH_NAME; }
u64 module_kern_type (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSED const user_options_t *user_options, MAYBE_UNUSED const user_options_extra_t *user_options_extra) { return KERN_TYPE; }
u32 module_opti_type (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSED const user_options_t *user_options, MAYBE_UNUSED const user_options_extra_t *user_options_extra) { return OPTI_TYPE; }
u64 module_opts_type (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSED const user_options_t *user_options, MAYBE_UNUSED const user_options_extra_t *user_options_extra) { return OPTS_TYPE; }
u32 module_salt_type (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSED const user_options_t *user_options, MAYBE_UNUSED const user_options_extra_t *user_options_extra) { return SALT_TYPE; }
const char *module_st_hash (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSED const user_options_t *user_options, MAYBE_UNUSED const user_options_extra_t *user_options_extra) { return ST_HASH; }
const char *module_st_pass (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSED const user_options_t *user_options, MAYBE_UNUSED const user_options_extra_t *user_options_extra) { return ST_PASS; }
static const char *SIGNATURE_BISQ = "$bisq$";
static const u64 SCRYPT_N = 32768;
static const u64 SCRYPT_R = 8;
static const u64 SCRYPT_P = 6;
bool module_unstable_warning (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSED const user_options_t *user_options, MAYBE_UNUSED const user_options_extra_t *user_options_extra, MAYBE_UNUSED const hc_device_param_t *device_param)
{
// AMD Radeon Pro W5700X Compute Engine; 1.2 (Apr 22 2021 21:54:44); 11.3.1; 20E241
if ((device_param->opencl_platform_vendor_id == VENDOR_ID_APPLE) && (device_param->opencl_device_type & CL_DEVICE_TYPE_GPU))
{
return true;
}
return false;
}
u32 module_kernel_loops_min (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSED const user_options_t *user_options, MAYBE_UNUSED const user_options_extra_t *user_options_extra)
{
const u32 kernel_loops_min = 1024;
return kernel_loops_min;
}
u32 module_kernel_loops_max (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSED const user_options_t *user_options, MAYBE_UNUSED const user_options_extra_t *user_options_extra)
{
const u32 kernel_loops_max = 1024;
return kernel_loops_max;
}
u32 module_pw_max (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSED const user_options_t *user_options, MAYBE_UNUSED const user_options_extra_t *user_options_extra)
{
// this overrides the reductions of PW_MAX in case optimized kernel is selected
// IOW, even in optimized kernel mode it support length 256
const u32 pw_max = PW_MAX;
return pw_max;
}
u64 module_extra_buffer_size (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSED const user_options_t *user_options, MAYBE_UNUSED const user_options_extra_t *user_options_extra, MAYBE_UNUSED const hashes_t *hashes, MAYBE_UNUSED const hc_device_param_t *device_param)
{
// we need to set the self-test hash settings to pass the self-test
// the decoder for the self-test is called after this function
const u64 scrypt_N = (hashes->salts_buf[0].scrypt_N) ? hashes->salts_buf[0].scrypt_N : SCRYPT_N;
const u64 scrypt_r = (hashes->salts_buf[0].scrypt_r) ? hashes->salts_buf[0].scrypt_r : SCRYPT_R;
const u64 kernel_power_max = ((OPTS_TYPE & OPTS_TYPE_MP_MULTI_DISABLE) ? 1 : device_param->device_processors) * device_param->kernel_threads_max * device_param->kernel_accel_max;
u64 tmto_start = 0;
u64 tmto_stop = 4;
if (user_options->scrypt_tmto_chgd == true)
{
tmto_start = user_options->scrypt_tmto;
tmto_stop = user_options->scrypt_tmto;
}
// size_pws
const u64 size_pws = kernel_power_max * sizeof (pw_t);
const u64 size_pws_amp = size_pws;
// size_pws_comp
const u64 size_pws_comp = kernel_power_max * (sizeof (u32) * 64);
// size_pws_idx
const u64 size_pws_idx = (kernel_power_max + 1) * sizeof (pw_idx_t);
// size_tmps
const u64 size_tmps = kernel_power_max * hashconfig->tmp_size;
// size_hooks
const u64 size_hooks = kernel_power_max * hashconfig->hook_size;
u64 size_pws_pre = 4;
u64 size_pws_base = 4;
if (user_options->slow_candidates == true)
{
// size_pws_pre
size_pws_pre = kernel_power_max * sizeof (pw_pre_t);
// size_pws_base
size_pws_base = kernel_power_max * sizeof (pw_pre_t);
}
// sometimes device_available_mem and device_maxmem_alloc reported back from the opencl runtime are a bit inaccurate.
// let's add some extra space just to be sure.
// now depends on the kernel-accel value (where scrypt and similar benefits), but also hard minimum 64mb and maximum 1024mb limit
u64 EXTRA_SPACE = (1024ULL * 1024ULL) * device_param->kernel_accel_max;
EXTRA_SPACE = MAX (EXTRA_SPACE, ( 64ULL * 1024ULL * 1024ULL));
EXTRA_SPACE = MIN (EXTRA_SPACE, (1024ULL * 1024ULL * 1024ULL));
const u64 scrypt_extra_space
= device_param->size_bfs
+ device_param->size_combs
+ device_param->size_digests
+ device_param->size_esalts
+ device_param->size_markov_css
+ device_param->size_plains
+ device_param->size_results
+ device_param->size_root_css
+ device_param->size_rules
+ device_param->size_rules_c
+ device_param->size_salts
+ device_param->size_shown
+ device_param->size_tm
+ device_param->size_st_digests
+ device_param->size_st_salts
+ device_param->size_st_esalts
+ size_pws
+ size_pws_amp
+ size_pws_comp
+ size_pws_idx
+ size_tmps
+ size_hooks
+ size_pws_pre
+ size_pws_base
+ EXTRA_SPACE;
bool not_enough_memory = true;
u64 size_scrypt = 0;
u64 tmto;
for (tmto = tmto_start; tmto <= tmto_stop; tmto++)
{
size_scrypt = (128ULL * scrypt_r) * scrypt_N;
size_scrypt /= 1ull << tmto;
size_scrypt *= kernel_power_max;
if ((size_scrypt / 4) > device_param->device_maxmem_alloc) continue;
if ((size_scrypt + scrypt_extra_space) > device_param->device_available_mem) continue;
not_enough_memory = false;
break;
}
if (not_enough_memory == true) return -1;
return size_scrypt;
}
u64 module_tmp_size (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSED const user_options_t *user_options, MAYBE_UNUSED const user_options_extra_t *user_options_extra)
{
const u64 tmp_size = 0; // we'll add some later
return tmp_size;
}
u64 module_extra_tmp_size (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSED const user_options_t *user_options, MAYBE_UNUSED const user_options_extra_t *user_options_extra, MAYBE_UNUSED const hashes_t *hashes)
{
const u64 scrypt_N = (hashes->salts_buf[0].scrypt_N) ? hashes->salts_buf[0].scrypt_N : SCRYPT_N;
const u64 scrypt_r = (hashes->salts_buf[0].scrypt_r) ? hashes->salts_buf[0].scrypt_r : SCRYPT_R;
const u64 scrypt_p = (hashes->salts_buf[0].scrypt_p) ? hashes->salts_buf[0].scrypt_p : SCRYPT_P;
// we need to check that all hashes have the same scrypt settings
for (u32 i = 1; i < hashes->salts_cnt; i++)
{
if ((hashes->salts_buf[i].scrypt_N != scrypt_N)
|| (hashes->salts_buf[i].scrypt_r != scrypt_r)
|| (hashes->salts_buf[i].scrypt_p != scrypt_p))
{
return -1;
}
}
const u64 tmp_size = 128ULL * scrypt_r * scrypt_p;
return tmp_size;
}
bool module_warmup_disable (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSED const user_options_t *user_options, MAYBE_UNUSED const user_options_extra_t *user_options_extra)
{
return true;
}
char *module_jit_build_options (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSED const user_options_t *user_options, MAYBE_UNUSED const user_options_extra_t *user_options_extra, MAYBE_UNUSED const hashes_t *hashes, MAYBE_UNUSED const hc_device_param_t *device_param)
{
const u64 scrypt_N = (hashes->salts_buf[0].scrypt_N) ? hashes->salts_buf[0].scrypt_N : SCRYPT_N;
const u64 scrypt_r = (hashes->salts_buf[0].scrypt_r) ? hashes->salts_buf[0].scrypt_r : SCRYPT_R;
const u64 scrypt_p = (hashes->salts_buf[0].scrypt_p) ? hashes->salts_buf[0].scrypt_p : SCRYPT_P;
const u64 extra_buffer_size = device_param->extra_buffer_size;
const u64 kernel_power_max = ((OPTS_TYPE & OPTS_TYPE_MP_MULTI_DISABLE) ? 1 : device_param->device_processors) * device_param->kernel_threads_max * device_param->kernel_accel_max;
const u64 size_scrypt = 128ULL * scrypt_r * scrypt_N;
const u64 scrypt_tmto_final = (kernel_power_max * size_scrypt) / extra_buffer_size;
const u64 tmp_size = 128ULL * scrypt_r * scrypt_p;
char *jit_build_options = NULL;
hc_asprintf (&jit_build_options, "-D SCRYPT_N=%u -D SCRYPT_R=%u -D SCRYPT_P=%u -D SCRYPT_TMTO=%" PRIu64 " -D SCRYPT_TMP_ELEM=%" PRIu64,
hashes->salts_buf[0].scrypt_N,
hashes->salts_buf[0].scrypt_r,
hashes->salts_buf[0].scrypt_p,
scrypt_tmto_final,
tmp_size / 16);
return jit_build_options;
}
int module_hash_decode (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSED void *digest_buf, MAYBE_UNUSED salt_t *salt, MAYBE_UNUSED void *esalt_buf, MAYBE_UNUSED void *hook_salt_buf, MAYBE_UNUSED hashinfo_t *hash_info, const char *line_buf, MAYBE_UNUSED const int line_len)
{
u32 *digest = (u32 *) digest_buf;
hc_token_t token;
token.token_cnt = 7;
token.signatures_cnt = 1;
token.signatures_buf[0] = SIGNATURE_BISQ;
token.len[0] = 6;
token.attr[0] = TOKEN_ATTR_FIXED_LENGTH
| TOKEN_ATTR_VERIFY_SIGNATURE;
token.len_min[1] = 1;
token.len_max[1] = 1;
token.sep[1] = '*';
token.attr[1] = TOKEN_ATTR_VERIFY_LENGTH
| TOKEN_ATTR_VERIFY_DIGIT;
token.len_min[2] = 1;
token.len_max[2] = 10;
token.sep[2] = '*';
token.attr[2] = TOKEN_ATTR_VERIFY_LENGTH
| TOKEN_ATTR_VERIFY_DIGIT;
token.len_min[3] = 1;
token.len_max[3] = 2;
token.sep[3] = '*';
token.attr[3] = TOKEN_ATTR_VERIFY_LENGTH
| TOKEN_ATTR_VERIFY_DIGIT;
token.len_min[4] = 1;
token.len_max[4] = 1;
token.sep[4] = '*';
token.attr[4] = TOKEN_ATTR_VERIFY_LENGTH
| TOKEN_ATTR_VERIFY_DIGIT;
token.len_min[5] = 16;
token.len_max[5] = 16;
token.sep[5] = '*';
token.attr[5] = TOKEN_ATTR_VERIFY_LENGTH
| TOKEN_ATTR_VERIFY_HEX;
token.len[6] = 64;
token.attr[6] = TOKEN_ATTR_FIXED_LENGTH
| TOKEN_ATTR_VERIFY_HEX;
const int rc_tokenizer = input_tokenizer ((const u8 *) line_buf, line_len, &token);
if (rc_tokenizer != PARSER_OK) return (rc_tokenizer);
// version
const u8 *version_pos = token.buf[1];
if (version_pos[0] != (u8) '3') return (PARSER_SIGNATURE_UNMATCHED);
// scrypt settings
const u8 *scrypt_n_pos = token.buf[2];
const u32 scrypt_n = hc_strtoul ((const char *) scrypt_n_pos, NULL, 10);
const u8 *scrypt_r_pos = token.buf[3];
const u32 scrypt_r = hc_strtoul ((const char *) scrypt_r_pos, NULL, 10);
const u8 *scrypt_p_pos = token.buf[4];
const u32 scrypt_p = hc_strtoul ((const char *) scrypt_p_pos, NULL, 10);
salt->scrypt_N = scrypt_n;
salt->scrypt_r = scrypt_r;
salt->scrypt_p = scrypt_p;
salt->salt_iter = salt->scrypt_N;
salt->salt_repeats = salt->scrypt_p - 1;
// salt
const u8 *salt_pos = token.buf[5];
salt->salt_buf[0] = hex_to_u32 (salt_pos + 0);
salt->salt_buf[1] = hex_to_u32 (salt_pos + 8);
salt->salt_buf[0] = byte_swap_32 (salt->salt_buf[0]); // swap the salt (only the salt)
salt->salt_buf[1] = byte_swap_32 (salt->salt_buf[1]);
// IV
const u8 *blob_pos = token.buf[6];
salt->salt_buf[2] = hex_to_u32 (blob_pos + 0);
salt->salt_buf[3] = hex_to_u32 (blob_pos + 8);
salt->salt_buf[4] = hex_to_u32 (blob_pos + 16);
salt->salt_buf[5] = hex_to_u32 (blob_pos + 24);
// data
salt->salt_buf[6] = hex_to_u32 (blob_pos + 32);
salt->salt_buf[7] = hex_to_u32 (blob_pos + 40);
salt->salt_buf[8] = hex_to_u32 (blob_pos + 48);
salt->salt_buf[9] = hex_to_u32 (blob_pos + 56);
salt->salt_len = 40;
// fake digest:
digest[0] = salt->salt_buf[4];
digest[1] = salt->salt_buf[5];
digest[2] = salt->salt_buf[6];
digest[3] = salt->salt_buf[7];
return (PARSER_OK);
}
int module_hash_encode (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE_UNUSED const void *digest_buf, MAYBE_UNUSED const salt_t *salt, MAYBE_UNUSED const void *esalt_buf, MAYBE_UNUSED const void *hook_salt_buf, MAYBE_UNUSED const hashinfo_t *hash_info, char *line_buf, MAYBE_UNUSED const int line_size)
{
const int line_len = snprintf (line_buf, line_size, "%s%u*%u*%u*%u*%08x%08x*%08x%08x%08x%08x%08x%08x%08x%08x",
SIGNATURE_BISQ,
3,
salt->scrypt_N,
salt->scrypt_r,
salt->scrypt_p,
salt->salt_buf[0],
salt->salt_buf[1],
byte_swap_32 (salt->salt_buf[2]),
byte_swap_32 (salt->salt_buf[3]),
byte_swap_32 (salt->salt_buf[4]),
byte_swap_32 (salt->salt_buf[5]),
byte_swap_32 (salt->salt_buf[6]),
byte_swap_32 (salt->salt_buf[7]),
byte_swap_32 (salt->salt_buf[8]),
byte_swap_32 (salt->salt_buf[9]));
return line_len;
}
void module_init (module_ctx_t *module_ctx)
{
module_ctx->module_context_size = MODULE_CONTEXT_SIZE_CURRENT;
module_ctx->module_interface_version = MODULE_INTERFACE_VERSION_CURRENT;
module_ctx->module_attack_exec = module_attack_exec;
module_ctx->module_benchmark_esalt = MODULE_DEFAULT;
module_ctx->module_benchmark_hook_salt = MODULE_DEFAULT;
module_ctx->module_benchmark_mask = MODULE_DEFAULT;
module_ctx->module_benchmark_charset = MODULE_DEFAULT;
module_ctx->module_benchmark_salt = MODULE_DEFAULT;
module_ctx->module_build_plain_postprocess = MODULE_DEFAULT;
module_ctx->module_deep_comp_kernel = MODULE_DEFAULT;
module_ctx->module_deprecated_notice = MODULE_DEFAULT;
module_ctx->module_dgst_pos0 = module_dgst_pos0;
module_ctx->module_dgst_pos1 = module_dgst_pos1;
module_ctx->module_dgst_pos2 = module_dgst_pos2;
module_ctx->module_dgst_pos3 = module_dgst_pos3;
module_ctx->module_dgst_size = module_dgst_size;
module_ctx->module_dictstat_disable = MODULE_DEFAULT;
module_ctx->module_esalt_size = MODULE_DEFAULT;
module_ctx->module_extra_buffer_size = module_extra_buffer_size;
module_ctx->module_extra_tmp_size = module_extra_tmp_size;
module_ctx->module_extra_tuningdb_block = MODULE_DEFAULT;
module_ctx->module_forced_outfile_format = MODULE_DEFAULT;
module_ctx->module_hash_binary_count = MODULE_DEFAULT;
module_ctx->module_hash_binary_parse = MODULE_DEFAULT;
module_ctx->module_hash_binary_save = MODULE_DEFAULT;
module_ctx->module_hash_decode_postprocess = MODULE_DEFAULT;
module_ctx->module_hash_decode_potfile = MODULE_DEFAULT;
module_ctx->module_hash_decode_zero_hash = MODULE_DEFAULT;
module_ctx->module_hash_decode = module_hash_decode;
module_ctx->module_hash_encode_status = MODULE_DEFAULT;
module_ctx->module_hash_encode_potfile = MODULE_DEFAULT;
module_ctx->module_hash_encode = module_hash_encode;
module_ctx->module_hash_init_selftest = MODULE_DEFAULT;
module_ctx->module_hash_mode = MODULE_DEFAULT;
module_ctx->module_hash_category = module_hash_category;
module_ctx->module_hash_name = module_hash_name;
module_ctx->module_hashes_count_min = MODULE_DEFAULT;
module_ctx->module_hashes_count_max = MODULE_DEFAULT;
module_ctx->module_hlfmt_disable = MODULE_DEFAULT;
module_ctx->module_hook_extra_param_size = MODULE_DEFAULT;
module_ctx->module_hook_extra_param_init = MODULE_DEFAULT;
module_ctx->module_hook_extra_param_term = MODULE_DEFAULT;
module_ctx->module_hook12 = MODULE_DEFAULT;
module_ctx->module_hook23 = MODULE_DEFAULT;
module_ctx->module_hook_salt_size = MODULE_DEFAULT;
module_ctx->module_hook_size = MODULE_DEFAULT;
module_ctx->module_jit_build_options = module_jit_build_options;
module_ctx->module_jit_cache_disable = MODULE_DEFAULT;
module_ctx->module_kernel_accel_max = MODULE_DEFAULT;
module_ctx->module_kernel_accel_min = MODULE_DEFAULT;
module_ctx->module_kernel_loops_max = module_kernel_loops_max;
module_ctx->module_kernel_loops_min = module_kernel_loops_min;
module_ctx->module_kernel_threads_max = MODULE_DEFAULT;
module_ctx->module_kernel_threads_min = MODULE_DEFAULT;
module_ctx->module_kern_type = module_kern_type;
module_ctx->module_kern_type_dynamic = MODULE_DEFAULT;
module_ctx->module_opti_type = module_opti_type;
module_ctx->module_opts_type = module_opts_type;
module_ctx->module_outfile_check_disable = MODULE_DEFAULT;
module_ctx->module_outfile_check_nocomp = MODULE_DEFAULT;
module_ctx->module_potfile_custom_check = MODULE_DEFAULT;
module_ctx->module_potfile_disable = MODULE_DEFAULT;
module_ctx->module_potfile_keep_all_hashes = MODULE_DEFAULT;
module_ctx->module_pwdump_column = MODULE_DEFAULT;
module_ctx->module_pw_max = module_pw_max;
module_ctx->module_pw_min = MODULE_DEFAULT;
module_ctx->module_salt_max = MODULE_DEFAULT;
module_ctx->module_salt_min = MODULE_DEFAULT;
module_ctx->module_salt_type = module_salt_type;
module_ctx->module_separator = MODULE_DEFAULT;
module_ctx->module_st_hash = module_st_hash;
module_ctx->module_st_pass = module_st_pass;
module_ctx->module_tmp_size = module_tmp_size;
module_ctx->module_unstable_warning = module_unstable_warning;
module_ctx->module_warmup_disable = module_warmup_disable;
}

@ -0,0 +1,127 @@
#!/usr/bin/env python
# This software is Copyright (c) 2017, Dhiru Kholia <dhiru.kholia at gmail.com>
# and it is hereby released under GPL v2 license.
#
# Major parts are borrowed from the "btcrecover" program which is,
# Copyright (C) 2014-2016 Christopher Gurnee and under GPL v2.
#
# See https://github.com/gurnec/btcrecover for details.
#
# References,
#
# https://github.com/gurnec/btcrecover/blob/master/btcrecover/btcrpass.py
#
# Updated script in order to work with Bisq wallet v1.9.5 (2022)
# Author:
# https://github.com/Banaanhangwagen
# License: MIT
import os
import sys
import base64
import binascii
# The protobuf parsing code is borrowed from btcrecover, and was
# initially automatically generated by Google's protoc utility.
# The precompiled file is shipped in protobuf/wallet_pb2.py with John.
from protobuf.wallet_pb2 import *
def process_file(filename):
bname = os.path.basename(filename)
try:
f = open(filename, "rb")
data = f.read()
except IOError:
e = sys.exc_info()[1]
sys.stderr.write("%s\n" % str(e))
return
if "wallet" in bname or b"org.bitcoin.production" in data:
# sys.stderr.write("[WARNING] Cracking .wallet files is a very slow process, try cracking the associated .key file instead!\n")
version = 3 # MultiBit Classic .wallet file
# def is_wallet_file(wallet_file) from btcrecover
wallet_file = open(filename, "rb")
wallet_file.seek(0)
is_valid_bitcoinj_wallet = False
if wallet_file.read(1) == b"\x0a": # protobuf field number 1 of type length-delimited
network_identifier_len = ord(wallet_file.read(1))
if 1 <= network_identifier_len < 128:
wallet_file.seek(2 + network_identifier_len)
c = wallet_file.read(1)
if c and c in b"\x12\x1a": # field number 2 or 3 of type length-delimited
is_valid_bitcoinj_wallet = True
if is_valid_bitcoinj_wallet:
pb_wallet = Wallet()
pb_wallet.ParseFromString(data)
if pb_wallet.encryption_type == Wallet.UNENCRYPTED:
raise ValueError("bitcoinj wallet is not encrypted")
if pb_wallet.encryption_type != Wallet.ENCRYPTED_SCRYPT_AES:
raise NotImplementedError("Unsupported bitcoinj encryption type " + str(pb_wallet.encryption_type))
if not pb_wallet.HasField("encryption_parameters"):
raise ValueError("bitcoinj wallet is missing its scrypt encryption parameters")
for key in pb_wallet.key:
if key.type in (Key.ENCRYPTED_SCRYPT_AES, Key.DETERMINISTIC_KEY) and key.HasField("encrypted_data"):
encrypted_len = len(key.encrypted_data.encrypted_private_key)
if encrypted_len == 48:
# only need the final 2 encrypted blocks (half of it padding) plus the scrypt parameters
part_encrypted_key = key.encrypted_data.encrypted_private_key[-32:]
salt = pb_wallet.encryption_parameters.salt
n = pb_wallet.encryption_parameters.n
r = pb_wallet.encryption_parameters.r
p = pb_wallet.encryption_parameters.p
encrypted_data = binascii.hexlify(part_encrypted_key).decode("ascii")
salt = binascii.hexlify(salt).decode("ascii")
sys.stdout.write("%s:$bisq$%d*%s*%s*%s*%s*%s\n" % (bname, version, n, r, p, salt, encrypted_data))
return
return
pdata = b"".join(data.split())
if len(pdata) < 64:
sys.stderr.write("%s: Short length for a bisq wallet file!\n" % bname)
return
try:
pdata = base64.b64decode(pdata[:64])
if pdata.startswith(b"Salted__"):
version = 1 # MultiBit Classic
else:
version = 2 # MultiBit HD possibly? We need more tests!
except:
version = 2 # MultiBit HD possibly?
if version == 1:
encrypted_data = pdata[16:48] # two AES blocks
salt = pdata[8:16]
encrypted_data = binascii.hexlify(encrypted_data).decode("ascii")
salt = binascii.hexlify(salt).decode("ascii")
sys.stdout.write("%s:$bisq$%d*%s*%s\n" % (bname, version, salt, encrypted_data))
return
else:
version = 2
# sanity check but not a very good one
if "wallet" not in bname and "aes" not in bname:
sys.stderr.write("%s: Make sure that this is a Bisq wallet!\n" % bname)
# two possibilities
iv = data[:16] # v0.5.0+
block_iv = data[16:32] # v0.5.0+
block_noiv = data[:16] # encrypted using hardcoded iv, < v0.5.0
iv = binascii.hexlify(iv).decode("ascii")
block_iv = binascii.hexlify(block_iv).decode("ascii")
block_noiv = binascii.hexlify(block_noiv).decode("ascii")
sys.stdout.write("%s:$bisq$%d*%s*%s*%s\n" % (bname, version, iv, block_iv, block_noiv))
return
f.close()
if __name__ == "__main__":
if len(sys.argv) < 2:
sys.stderr.write("Usage: %s [Bisq wallet file (.wallet)]\n" % sys.argv[0])
# sys.stderr.write("\nMultiBit Classic -> for a wallet named 'xyz', we need the xyz-data/key-backup/xyz*.key OR xyz-data/wallet-backup/xyz*.wallet file\n")
sys.exit(1)
for j in range(1, len(sys.argv)):
process_file(sys.argv[j])

File diff suppressed because one or more lines are too long

@ -0,0 +1,135 @@
#!/usr/bin/env perl
##
## Author......: See docs/credits.txt
## License.....: MIT
##
use strict;
use warnings;
use Crypt::ScryptKDF qw (scrypt_raw);
use Encode;
use Crypt::CBC;
sub module_constraints { [[0, 256], [8, 8], [-1, -1], [-1, -1], [-1, -1]] }
my $SCRYPT_N = 32768;
my $SCRYPT_R = 8;
my $SCRYPT_P = 6;
my $DATA_FIXED = "\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10";
sub module_generate_hash
{
my $word = shift;
my $salt = shift;
my $iv = shift // random_bytes (16);
my $data = shift;
my $word_utf16be = encode ('UTF-16BE', $word);
my $key = scrypt_raw ($word_utf16be, $salt, $SCRYPT_N, $SCRYPT_R, $SCRYPT_P, 32);
my $aes_cbc = Crypt::CBC->new ({
cipher => "Crypt::Rijndael",
iv => $iv,
key => $key,
keysize => 32,
literal_key => 1,
header => "none",
padding => "none"
});
my $data_block = "";
if (defined ($data)) # verify
{
my $data_dec = $aes_cbc->decrypt ($data);
if ($data_dec eq $DATA_FIXED)
{
$data_block = $data;
}
}
else
{
$data = $DATA_FIXED;
$data_block = $aes_cbc->encrypt ($data);
}
my $hash = sprintf ("\$bisq\$3*%d*%d*%d*%s*%s", $SCRYPT_N, $SCRYPT_R, $SCRYPT_P, unpack ("H*", $salt), unpack ("H*", $iv . $data_block));
return $hash;
}
sub module_verify_hash
{
my $line = shift;
return unless (substr ($line, 0, 8) eq '$bisq$3*');
# split hash and word:
my $idx1 = index ($line, ":", 12);
return if ($idx1 < 1);
my $hash = substr ($line, 0, $idx1);
my $word = substr ($line, $idx1 + 1);
# scrypt parameters:
my $idx2 = index ($hash, "*", 12);
return if ($idx2 < 0);
my $scrypt_n = substr ($hash, 12, $idx2 - 12);
$idx1 = index ($hash, "*", $idx2 + 1);
return if ($idx1 < 0);
my $scrypt_r = substr ($hash, $idx2 + 1, $idx1 - $idx2 - 1);
$idx2 = index ($hash, "*", $idx1 + 1);
return if ($idx2 < 0);
my $scrypt_p = substr ($hash, $idx1 + 1, $idx2 - $idx1 - 1);
# salt:
$idx1 = index ($hash, "*", $idx2 + 1);
return if ($idx1 < 0);
my $salt = substr ($hash, $idx2 + 1, $idx1 - $idx2 - 1);
# IV:
my $iv = substr ($hash, $idx1 + 1, 32);
# data:
my $data = substr ($hash, $idx1 + 1 + 32, 32);
return unless $salt =~ m/^[0-9a-fA-F]{16}$/;
return unless $iv =~ m/^[0-9a-fA-F]{32}$/;
return unless $data =~ m/^[0-9a-fA-F]{32}$/;
# hex to binary/raw:
$salt = pack ("H*", $salt);
$iv = pack ("H*", $iv);
$data = pack ("H*", $data);
$word = pack_if_HEX_notation ($word);
my $new_hash = module_generate_hash ($word, $salt, $iv, $data);
return ($new_hash, $word);
}
1;

@ -0,0 +1,27 @@
#
# Find the right -n value for your GPU:
# =====================================
#
# 1. For example, to find the value for 27700, first create a valid hash for 27700 as follows:
#
# $ ./hashcat --example-hashes -m 27700 | grep Example.Hash | grep -v Format | cut -b 25- > tmp.hash.27700
#
# 2. Now let it iterate through all -n values to a certain point. In this case, I'm using 200, but in general it's a value that is at least twice that of the multiprocessor. If you don't mind you can just leave it as it is, it just runs a little longer.
#
# $ export i=1; while [ $i -ne 201 ]; do echo $i; ./hashcat --quiet tmp.hash.27700 --keep-guessing --self-test-disable --markov-disable --restore-disable --outfile-autohex-disable --wordlist-autohex-disable --potfile-disable --logfile-disable --hwmon-disable --status --status-timer 1 --runtime 28 --machine-readable --optimized-kernel-enable --workload-profile 3 --hash-type 27700 --attack-mode 3 ?b?b?b?b?b?b?b --backend-devices 1 --force -n $i; i=$(($i+1)); done | tee x
#
# 3. Determine the highest measured H/s speed. But don't just use the highest value. Instead, use the number that seems most stable, usually at the beginning.
#
# $ grep "$(printf 'STATUS\t3')" x | cut -f4 -d$'\t' | sort -n | tail
#
# 4. To match the speed you have chosen to the correct value in the 'x' file, simply search for it in it. Then go up a little on the block where you found him. The value -n is the single value that begins before the block start. If you have multiple blocks at the same speed, choose the lowest value for -n
#Device Attack Hash Vector Kernel Kernel
#Name Mode Type Width Accel Loops
DEVICE_TYPE_CPU * 29800 1 N A
DEVICE_TYPE_GPU * 29800 1 N A
GeForce_GTX_1070_Ti * 29800 1 14 A
GeForce_GTX_1080 * 29800 1 14 A
GeForce_GTX_1080_Ti * 29800 1 20 A
Loading…
Cancel
Save