|
|
|
@ -128,335 +128,174 @@ DECLSPEC void aes256_scrt_format (PRIVATE_AS u32 *aes_ks, PRIVATE_AS u32 *pw, co
|
|
|
|
|
AES256_encrypt (aes_ks, hash, out, s_te0, s_te1, s_te2, s_te3, s_te4);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
DECLSPEC void aes256_scrt_format_VV(PRIVATE_AS u32 *aes_ks, PRIVATE_AS u32x *w, const u32 pw_len, PRIVATE_AS u32x *hash, PRIVATE_AS u32x *out, SHM_TYPE u32 *s_te0, SHM_TYPE u32 *s_te1, SHM_TYPE u32 *s_te2, SHM_TYPE u32 *s_te3, SHM_TYPE u32 *s_te4)
|
|
|
|
|
DECLSPEC void aes256_scrt_format_VV (PRIVATE_AS u32 *aes_ks, PRIVATE_AS u32x *w, const u32 pw_len, PRIVATE_AS u32x *h, PRIVATE_AS u32x *out, SHM_TYPE u32 *s_te0, SHM_TYPE u32 *s_te1, SHM_TYPE u32 *s_te2, SHM_TYPE u32 *s_te3, SHM_TYPE u32 *s_te4)
|
|
|
|
|
{
|
|
|
|
|
#if VECT_SIZE == 1
|
|
|
|
|
aes256_scrt_format(aes_ks, w, pw_len, hash, out, s_te0, s_te1, s_te2, s_te3, s_te4);
|
|
|
|
|
aes256_scrt_format (aes_ks, w, pw_len, h, out, s_te0, s_te1, s_te2, s_te3, s_te4);
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
#if VECT_SIZE >= 2
|
|
|
|
|
|
|
|
|
|
u32 tmp_w[4];
|
|
|
|
|
u32 tmp_hash[4];
|
|
|
|
|
u32 tmp_w[16];
|
|
|
|
|
u32 tmp_h[8];
|
|
|
|
|
u32 tmp_out[4];
|
|
|
|
|
|
|
|
|
|
//s0
|
|
|
|
|
tmp_w[0] = w[0].s0;
|
|
|
|
|
tmp_w[1] = w[1].s0;
|
|
|
|
|
tmp_w[2] = w[2].s0;
|
|
|
|
|
tmp_w[3] = w[3].s0;
|
|
|
|
|
|
|
|
|
|
tmp_hash[0] = hash[0].s0;
|
|
|
|
|
tmp_hash[1] = hash[1].s0;
|
|
|
|
|
tmp_hash[2] = hash[2].s0;
|
|
|
|
|
tmp_hash[3] = hash[3].s0;
|
|
|
|
|
|
|
|
|
|
for (u32 i = 0; i < 64; i++) tmp_w[i] = w[i].s0;
|
|
|
|
|
for (u32 i = 0; i < 8; i++) tmp_h[i] = h[i].s0;
|
|
|
|
|
|
|
|
|
|
aes256_scrt_format(aes_ks, tmp_w, pw_len, tmp_hash, tmp_out, s_te0, s_te1, s_te2, s_te3, s_te4);
|
|
|
|
|
|
|
|
|
|
out[0].s0 = tmp_out[0];
|
|
|
|
|
out[1].s0 = tmp_out[1];
|
|
|
|
|
out[2].s0 = tmp_out[2];
|
|
|
|
|
out[3].s0 = tmp_out[3];
|
|
|
|
|
aes256_scrt_format (aes_ks, tmp_w, pw_len, tmp_h, tmp_out, s_te0, s_te1, s_te2, s_te3, s_te4);
|
|
|
|
|
|
|
|
|
|
for (u32 i = 0; i < 4; i++) out[i].s0 = tmp_out[i];
|
|
|
|
|
|
|
|
|
|
//s1
|
|
|
|
|
tmp_w[0] = w[0].s1;
|
|
|
|
|
tmp_w[1] = w[1].s1;
|
|
|
|
|
tmp_w[2] = w[2].s1;
|
|
|
|
|
tmp_w[3] = w[3].s1;
|
|
|
|
|
|
|
|
|
|
tmp_hash[0] = hash[0].s1;
|
|
|
|
|
tmp_hash[1] = hash[1].s1;
|
|
|
|
|
tmp_hash[2] = hash[2].s1;
|
|
|
|
|
tmp_hash[3] = hash[3].s1;
|
|
|
|
|
|
|
|
|
|
for (u32 i = 0; i < 64; i++) tmp_w[i] = w[i].s1;
|
|
|
|
|
for (u32 i = 0; i < 8; i++) tmp_h[i] = h[i].s1;
|
|
|
|
|
|
|
|
|
|
aes256_scrt_format(aes_ks, tmp_w, pw_len, tmp_hash, tmp_out, s_te0, s_te1, s_te2, s_te3, s_te4);
|
|
|
|
|
|
|
|
|
|
out[0].s1 = tmp_out[0];
|
|
|
|
|
out[1].s1 = tmp_out[1];
|
|
|
|
|
out[2].s1 = tmp_out[2];
|
|
|
|
|
out[3].s1 = tmp_out[3];
|
|
|
|
|
aes256_scrt_format (aes_ks, tmp_w, pw_len, tmp_h, tmp_out, s_te0, s_te1, s_te2, s_te3, s_te4);
|
|
|
|
|
|
|
|
|
|
for (u32 i = 0; i < 4; i++) out[i].s1 = tmp_out[i];
|
|
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
#if VECT_SIZE >= 4
|
|
|
|
|
//s2
|
|
|
|
|
tmp_w[0] = w[0].s2;
|
|
|
|
|
tmp_w[1] = w[1].s2;
|
|
|
|
|
tmp_w[2] = w[2].s2;
|
|
|
|
|
tmp_w[3] = w[3].s2;
|
|
|
|
|
|
|
|
|
|
tmp_hash[0] = hash[0].s2;
|
|
|
|
|
tmp_hash[1] = hash[1].s2;
|
|
|
|
|
tmp_hash[2] = hash[2].s2;
|
|
|
|
|
tmp_hash[3] = hash[3].s2;
|
|
|
|
|
//s2
|
|
|
|
|
|
|
|
|
|
aes256_scrt_format(aes_ks, tmp_w, pw_len, tmp_hash, tmp_out, s_te0, s_te1, s_te2, s_te3, s_te4);
|
|
|
|
|
for (u32 i = 0; i < 64; i++) tmp_w[i] = w[i].s2;
|
|
|
|
|
for (u32 i = 0; i < 8; i++) tmp_h[i] = h[i].s2;
|
|
|
|
|
|
|
|
|
|
out[0].s2 = tmp_out[0];
|
|
|
|
|
out[1].s2 = tmp_out[1];
|
|
|
|
|
out[2].s2 = tmp_out[2];
|
|
|
|
|
out[3].s2 = tmp_out[3];
|
|
|
|
|
aes256_scrt_format (aes_ks, tmp_w, pw_len, tmp_h, tmp_out, s_te0, s_te1, s_te2, s_te3, s_te4);
|
|
|
|
|
|
|
|
|
|
for (u32 i = 0; i < 4; i++) out[i].s2 = tmp_out[i];
|
|
|
|
|
|
|
|
|
|
//s3
|
|
|
|
|
tmp_w[0] = w[0].s3;
|
|
|
|
|
tmp_w[1] = w[1].s3;
|
|
|
|
|
tmp_w[2] = w[2].s3;
|
|
|
|
|
tmp_w[3] = w[3].s3;
|
|
|
|
|
|
|
|
|
|
tmp_hash[0] = hash[0].s3;
|
|
|
|
|
tmp_hash[1] = hash[1].s3;
|
|
|
|
|
tmp_hash[2] = hash[2].s3;
|
|
|
|
|
tmp_hash[3] = hash[3].s3;
|
|
|
|
|
|
|
|
|
|
aes256_scrt_format(aes_ks, tmp_w, pw_len, tmp_hash, tmp_out, s_te0, s_te1, s_te2, s_te3, s_te4);
|
|
|
|
|
for (u32 i = 0; i < 64; i++) tmp_w[i] = w[i].s3;
|
|
|
|
|
for (u32 i = 0; i < 8; i++) tmp_h[i] = h[i].s3;
|
|
|
|
|
|
|
|
|
|
out[0].s3 = tmp_out[0];
|
|
|
|
|
out[1].s3 = tmp_out[1];
|
|
|
|
|
out[2].s3 = tmp_out[2];
|
|
|
|
|
out[3].s3 = tmp_out[3];
|
|
|
|
|
aes256_scrt_format (aes_ks, tmp_w, pw_len, tmp_h, tmp_out, s_te0, s_te1, s_te2, s_te3, s_te4);
|
|
|
|
|
|
|
|
|
|
for (u32 i = 0; i < 4; i++) out[i].s3 = tmp_out[i];
|
|
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
#if VECT_SIZE >= 8
|
|
|
|
|
//s4
|
|
|
|
|
tmp_w[0] = w[0].s4;
|
|
|
|
|
tmp_w[1] = w[1].s4;
|
|
|
|
|
tmp_w[2] = w[2].s4;
|
|
|
|
|
tmp_w[3] = w[3].s4;
|
|
|
|
|
|
|
|
|
|
tmp_hash[0] = hash[0].s4;
|
|
|
|
|
tmp_hash[1] = hash[1].s4;
|
|
|
|
|
tmp_hash[2] = hash[2].s4;
|
|
|
|
|
tmp_hash[3] = hash[3].s4;
|
|
|
|
|
//s4
|
|
|
|
|
|
|
|
|
|
for (u32 i = 0; i < 64; i++) tmp_w[i] = w[i].s4;
|
|
|
|
|
for (u32 i = 0; i < 8; i++) tmp_h[i] = h[i].s4;
|
|
|
|
|
|
|
|
|
|
aes256_scrt_format(aes_ks, tmp_w, pw_len, tmp_hash, tmp_out, s_te0, s_te1, s_te2, s_te3, s_te4);
|
|
|
|
|
aes256_scrt_format (aes_ks, tmp_w, pw_len, tmp_h, tmp_out, s_te0, s_te1, s_te2, s_te3, s_te4);
|
|
|
|
|
|
|
|
|
|
out[0].s4 = tmp_out[0];
|
|
|
|
|
out[1].s4 = tmp_out[1];
|
|
|
|
|
out[2].s4 = tmp_out[2];
|
|
|
|
|
out[3].s4 = tmp_out[3];
|
|
|
|
|
for (u32 i = 0; i < 4; i++) out[i].s4 = tmp_out[i];
|
|
|
|
|
|
|
|
|
|
//s5
|
|
|
|
|
tmp_w[0] = w[0].s5;
|
|
|
|
|
tmp_w[1] = w[1].s5;
|
|
|
|
|
tmp_w[2] = w[2].s5;
|
|
|
|
|
tmp_w[3] = w[3].s5;
|
|
|
|
|
|
|
|
|
|
tmp_hash[0] = hash[0].s5;
|
|
|
|
|
tmp_hash[1] = hash[1].s5;
|
|
|
|
|
tmp_hash[2] = hash[2].s5;
|
|
|
|
|
tmp_hash[3] = hash[3].s5;
|
|
|
|
|
for (u32 i = 0; i < 64; i++) tmp_w[i] = w[i].s5;
|
|
|
|
|
for (u32 i = 0; i < 8; i++) tmp_h[i] = h[i].s5;
|
|
|
|
|
|
|
|
|
|
aes256_scrt_format(aes_ks, tmp_w, pw_len, tmp_hash, tmp_out, s_te0, s_te1, s_te2, s_te3, s_te4);
|
|
|
|
|
|
|
|
|
|
out[0].s5 = tmp_out[0];
|
|
|
|
|
out[1].s5 = tmp_out[1];
|
|
|
|
|
out[2].s5 = tmp_out[2];
|
|
|
|
|
out[3].s5 = tmp_out[3];
|
|
|
|
|
aes256_scrt_format (aes_ks, tmp_w, pw_len, tmp_h, tmp_out, s_te0, s_te1, s_te2, s_te3, s_te4);
|
|
|
|
|
|
|
|
|
|
for (u32 i = 0; i < 4; i++) out[i].s5 = tmp_out[i];
|
|
|
|
|
|
|
|
|
|
//s6
|
|
|
|
|
tmp_w[0] = w[0].s6;
|
|
|
|
|
tmp_w[1] = w[1].s6;
|
|
|
|
|
tmp_w[2] = w[2].s6;
|
|
|
|
|
tmp_w[3] = w[3].s6;
|
|
|
|
|
|
|
|
|
|
tmp_hash[0] = hash[0].s6;
|
|
|
|
|
tmp_hash[1] = hash[1].s6;
|
|
|
|
|
tmp_hash[2] = hash[2].s6;
|
|
|
|
|
tmp_hash[3] = hash[3].s6;
|
|
|
|
|
|
|
|
|
|
aes256_scrt_format(aes_ks, tmp_w, pw_len, tmp_hash, tmp_out, s_te0, s_te1, s_te2, s_te3, s_te4);
|
|
|
|
|
for (u32 i = 0; i < 64; i++) tmp_w[i] = w[i].s6;
|
|
|
|
|
for (u32 i = 0; i < 8; i++) tmp_h[i] = h[i].s6;
|
|
|
|
|
|
|
|
|
|
out[0].s6 = tmp_out[0];
|
|
|
|
|
out[1].s6 = tmp_out[1];
|
|
|
|
|
out[2].s6 = tmp_out[2];
|
|
|
|
|
out[3].s6 = tmp_out[3];
|
|
|
|
|
aes256_scrt_format (aes_ks, tmp_w, pw_len, tmp_h, tmp_out, s_te0, s_te1, s_te2, s_te3, s_te4);
|
|
|
|
|
|
|
|
|
|
for (u32 i = 0; i < 4; i++) out[i].s6 = tmp_out[i];
|
|
|
|
|
|
|
|
|
|
//s7
|
|
|
|
|
tmp_w[0] = w[0].s7;
|
|
|
|
|
tmp_w[1] = w[1].s7;
|
|
|
|
|
tmp_w[2] = w[2].s7;
|
|
|
|
|
tmp_w[3] = w[3].s7;
|
|
|
|
|
|
|
|
|
|
tmp_hash[0] = hash[0].s7;
|
|
|
|
|
tmp_hash[1] = hash[1].s7;
|
|
|
|
|
tmp_hash[2] = hash[2].s7;
|
|
|
|
|
tmp_hash[3] = hash[3].s7;
|
|
|
|
|
for (u32 i = 0; i < 64; i++) tmp_w[i] = w[i].s7;
|
|
|
|
|
for (u32 i = 0; i < 8; i++) tmp_h[i] = h[i].s7;
|
|
|
|
|
|
|
|
|
|
aes256_scrt_format(aes_ks, tmp_w, pw_len, tmp_hash, tmp_out, s_te0, s_te1, s_te2, s_te3, s_te4);
|
|
|
|
|
aes256_scrt_format (aes_ks, tmp_w, pw_len, tmp_h, tmp_out, s_te0, s_te1, s_te2, s_te3, s_te4);
|
|
|
|
|
|
|
|
|
|
out[0].s7 = tmp_out[0];
|
|
|
|
|
out[1].s7 = tmp_out[1];
|
|
|
|
|
out[2].s7 = tmp_out[2];
|
|
|
|
|
out[3].s7 = tmp_out[3];
|
|
|
|
|
for (u32 i = 0; i < 4; i++) out[i].s7 = tmp_out[i];
|
|
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
#if VECT_SIZE >= 16
|
|
|
|
|
|
|
|
|
|
//s8
|
|
|
|
|
tmp_w[0] = w[0].s8;
|
|
|
|
|
tmp_w[1] = w[1].s8;
|
|
|
|
|
tmp_w[2] = w[2].s8;
|
|
|
|
|
tmp_w[3] = w[3].s8;
|
|
|
|
|
|
|
|
|
|
tmp_hash[0] = hash[0].s8;
|
|
|
|
|
tmp_hash[1] = hash[1].s8;
|
|
|
|
|
tmp_hash[2] = hash[2].s8;
|
|
|
|
|
tmp_hash[3] = hash[3].s8;
|
|
|
|
|
for (u32 i = 0; i < 64; i++) tmp_w[i] = w[i].s8;
|
|
|
|
|
for (u32 i = 0; i < 8; i++) tmp_h[i] = h[i].s8;
|
|
|
|
|
|
|
|
|
|
aes256_scrt_format(aes_ks, tmp_w, pw_len, tmp_hash, tmp_out, s_te0, s_te1, s_te2, s_te3, s_te4);
|
|
|
|
|
|
|
|
|
|
out[0].s8 = tmp_out[0];
|
|
|
|
|
out[1].s8 = tmp_out[1];
|
|
|
|
|
out[2].s8 = tmp_out[2];
|
|
|
|
|
out[3].s8 = tmp_out[3];
|
|
|
|
|
aes256_scrt_format (aes_ks, tmp_w, pw_len, tmp_h, tmp_out, s_te0, s_te1, s_te2, s_te3, s_te4);
|
|
|
|
|
|
|
|
|
|
for (u32 i = 0; i < 4; i++) out[i].s8 = tmp_out[i];
|
|
|
|
|
|
|
|
|
|
//s9
|
|
|
|
|
tmp_w[0] = w[0].s9;
|
|
|
|
|
tmp_w[1] = w[1].s9;
|
|
|
|
|
tmp_w[2] = w[2].s9;
|
|
|
|
|
tmp_w[3] = w[3].s9;
|
|
|
|
|
|
|
|
|
|
tmp_hash[0] = hash[0].s9;
|
|
|
|
|
tmp_hash[1] = hash[1].s9;
|
|
|
|
|
tmp_hash[2] = hash[2].s9;
|
|
|
|
|
tmp_hash[3] = hash[3].s9;
|
|
|
|
|
|
|
|
|
|
aes256_scrt_format(aes_ks, tmp_w, pw_len, tmp_hash, tmp_out, s_te0, s_te1, s_te2, s_te3, s_te4);
|
|
|
|
|
|
|
|
|
|
out[0].s9 = tmp_out[0];
|
|
|
|
|
out[1].s9 = tmp_out[1];
|
|
|
|
|
out[2].s9 = tmp_out[2];
|
|
|
|
|
out[3].s9 = tmp_out[3];
|
|
|
|
|
for (u32 i = 0; i < 64; i++) tmp_w[i] = w[i].s9;
|
|
|
|
|
for (u32 i = 0; i < 8; i++) tmp_h[i] = h[i].s9;
|
|
|
|
|
|
|
|
|
|
aes256_scrt_format (aes_ks, tmp_w, pw_len, tmp_h, tmp_out, s_te0, s_te1, s_te2, s_te3, s_te4);
|
|
|
|
|
|
|
|
|
|
//s10
|
|
|
|
|
tmp_w[0] = w[0].sa;
|
|
|
|
|
tmp_w[1] = w[1].sa;
|
|
|
|
|
tmp_w[2] = w[2].sa;
|
|
|
|
|
tmp_w[3] = w[3].sa;
|
|
|
|
|
for (u32 i = 0; i < 4; i++) out[i].s9 = tmp_out[i];
|
|
|
|
|
|
|
|
|
|
tmp_hash[0] = hash[0].sa;
|
|
|
|
|
tmp_hash[1] = hash[1].sa;
|
|
|
|
|
tmp_hash[2] = hash[2].sa;
|
|
|
|
|
tmp_hash[3] = hash[3].sa;
|
|
|
|
|
//sa
|
|
|
|
|
|
|
|
|
|
for (u32 i = 0; i < 64; i++) tmp_w[i] = w[i].sa;
|
|
|
|
|
for (u32 i = 0; i < 8; i++) tmp_h[i] = h[i].sa;
|
|
|
|
|
|
|
|
|
|
aes256_scrt_format(aes_ks, tmp_w, pw_len, tmp_hash, tmp_out, s_te0, s_te1, s_te2, s_te3, s_te4);
|
|
|
|
|
aes256_scrt_format (aes_ks, tmp_w, pw_len, tmp_h, tmp_out, s_te0, s_te1, s_te2, s_te3, s_te4);
|
|
|
|
|
|
|
|
|
|
out[0].sa = tmp_out[0];
|
|
|
|
|
out[1].sa = tmp_out[1];
|
|
|
|
|
out[2].sa = tmp_out[2];
|
|
|
|
|
out[3].sa = tmp_out[3];
|
|
|
|
|
for (u32 i = 0; i < 4; i++) out[i].sa = tmp_out[i];
|
|
|
|
|
|
|
|
|
|
//sb
|
|
|
|
|
|
|
|
|
|
//s11
|
|
|
|
|
tmp_w[0] = w[0].sb;
|
|
|
|
|
tmp_w[1] = w[1].sb;
|
|
|
|
|
tmp_w[2] = w[2].sb;
|
|
|
|
|
tmp_w[3] = w[3].sb;
|
|
|
|
|
for (u32 i = 0; i < 64; i++) tmp_w[i] = w[i].sb;
|
|
|
|
|
for (u32 i = 0; i < 8; i++) tmp_h[i] = h[i].sb;
|
|
|
|
|
|
|
|
|
|
tmp_hash[0] = hash[0].sb;
|
|
|
|
|
tmp_hash[1] = hash[1].sb;
|
|
|
|
|
tmp_hash[2] = hash[2].sb;
|
|
|
|
|
tmp_hash[3] = hash[3].sb;
|
|
|
|
|
aes256_scrt_format (aes_ks, tmp_w, pw_len, tmp_h, tmp_out, s_te0, s_te1, s_te2, s_te3, s_te4);
|
|
|
|
|
|
|
|
|
|
aes256_scrt_format(aes_ks, tmp_w, pw_len, tmp_hash, tmp_out, s_te0, s_te1, s_te2, s_te3, s_te4);
|
|
|
|
|
for (u32 i = 0; i < 4; i++) out[i].sb = tmp_out[i];
|
|
|
|
|
|
|
|
|
|
out[0].sb = tmp_out[0];
|
|
|
|
|
out[1].sb = tmp_out[1];
|
|
|
|
|
out[2].sb = tmp_out[2];
|
|
|
|
|
out[3].sb = tmp_out[3];
|
|
|
|
|
//sc
|
|
|
|
|
|
|
|
|
|
for (u32 i = 0; i < 64; i++) tmp_w[i] = w[i].sc;
|
|
|
|
|
for (u32 i = 0; i < 8; i++) tmp_h[i] = h[i].sc;
|
|
|
|
|
|
|
|
|
|
//s12
|
|
|
|
|
tmp_w[0] = w[0].sc;
|
|
|
|
|
tmp_w[1] = w[1].sc;
|
|
|
|
|
tmp_w[2] = w[2].sc;
|
|
|
|
|
tmp_w[3] = w[3].sc;
|
|
|
|
|
aes256_scrt_format (aes_ks, tmp_w, pw_len, tmp_h, tmp_out, s_te0, s_te1, s_te2, s_te3, s_te4);
|
|
|
|
|
|
|
|
|
|
tmp_hash[0] = hash[0].sc;
|
|
|
|
|
tmp_hash[1] = hash[1].sc;
|
|
|
|
|
tmp_hash[2] = hash[2].sc;
|
|
|
|
|
tmp_hash[3] = hash[3].sc;
|
|
|
|
|
for (u32 i = 0; i < 4; i++) out[i].sc = tmp_out[i];
|
|
|
|
|
|
|
|
|
|
aes256_scrt_format(aes_ks, tmp_w, pw_len, tmp_hash, tmp_out, s_te0, s_te1, s_te2, s_te3, s_te4);
|
|
|
|
|
//sd
|
|
|
|
|
|
|
|
|
|
out[0].sc = tmp_out[0];
|
|
|
|
|
out[1].sc = tmp_out[1];
|
|
|
|
|
out[2].sc = tmp_out[2];
|
|
|
|
|
out[3].sc = tmp_out[3];
|
|
|
|
|
for (u32 i = 0; i < 64; i++) tmp_w[i] = w[i].sd;
|
|
|
|
|
for (u32 i = 0; i < 8; i++) tmp_h[i] = h[i].sd;
|
|
|
|
|
|
|
|
|
|
aes256_scrt_format (aes_ks, tmp_w, pw_len, tmp_h, tmp_out, s_te0, s_te1, s_te2, s_te3, s_te4);
|
|
|
|
|
|
|
|
|
|
//s13
|
|
|
|
|
tmp_w[0] = w[0].sd;
|
|
|
|
|
tmp_w[1] = w[1].sd;
|
|
|
|
|
tmp_w[2] = w[2].sd;
|
|
|
|
|
tmp_w[3] = w[3].sd;
|
|
|
|
|
for (u32 i = 0; i < 4; i++) out[i].sd = tmp_out[i];
|
|
|
|
|
|
|
|
|
|
tmp_hash[0] = hash[0].sd;
|
|
|
|
|
tmp_hash[1] = hash[1].sd;
|
|
|
|
|
tmp_hash[2] = hash[2].sd;
|
|
|
|
|
tmp_hash[3] = hash[3].sd;
|
|
|
|
|
//se
|
|
|
|
|
|
|
|
|
|
aes256_scrt_format(aes_ks, tmp_w, pw_len, tmp_hash, tmp_out, s_te0, s_te1, s_te2, s_te3, s_te4);
|
|
|
|
|
for (u32 i = 0; i < 64; i++) tmp_w[i] = w[i].se;
|
|
|
|
|
for (u32 i = 0; i < 8; i++) tmp_h[i] = h[i].se;
|
|
|
|
|
|
|
|
|
|
out[0].sd = tmp_out[0];
|
|
|
|
|
out[1].sd = tmp_out[1];
|
|
|
|
|
out[2].sd = tmp_out[2];
|
|
|
|
|
out[3].sd = tmp_out[3];
|
|
|
|
|
aes256_scrt_format (aes_ks, tmp_w, pw_len, tmp_h, tmp_out, s_te0, s_te1, s_te2, s_te3, s_te4);
|
|
|
|
|
|
|
|
|
|
//s14
|
|
|
|
|
tmp_w[0] = w[0].se;
|
|
|
|
|
tmp_w[1] = w[1].se;
|
|
|
|
|
tmp_w[2] = w[2].se;
|
|
|
|
|
tmp_w[3] = w[3].se;
|
|
|
|
|
for (u32 i = 0; i < 4; i++) out[i].se = tmp_out[i];
|
|
|
|
|
|
|
|
|
|
tmp_hash[0] = hash[0].se;
|
|
|
|
|
tmp_hash[1] = hash[1].se;
|
|
|
|
|
tmp_hash[2] = hash[2].se;
|
|
|
|
|
tmp_hash[3] = hash[3].se;
|
|
|
|
|
//sf
|
|
|
|
|
|
|
|
|
|
aes256_scrt_format(aes_ks, tmp_w, pw_len, tmp_hash, tmp_out, s_te0, s_te1, s_te2, s_te3, s_te4);
|
|
|
|
|
for (u32 i = 0; i < 64; i++) tmp_w[i] = w[i].sf;
|
|
|
|
|
for (u32 i = 0; i < 8; i++) tmp_h[i] = h[i].sf;
|
|
|
|
|
|
|
|
|
|
out[0].se = tmp_out[0];
|
|
|
|
|
out[1].se = tmp_out[1];
|
|
|
|
|
out[2].se = tmp_out[2];
|
|
|
|
|
out[3].se = tmp_out[3];
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
//s15
|
|
|
|
|
tmp_w[0] = w[0].sf;
|
|
|
|
|
tmp_w[1] = w[1].sf;
|
|
|
|
|
tmp_w[2] = w[2].sf;
|
|
|
|
|
tmp_w[3] = w[3].sf;
|
|
|
|
|
|
|
|
|
|
tmp_hash[0] = hash[0].sf;
|
|
|
|
|
tmp_hash[1] = hash[1].sf;
|
|
|
|
|
tmp_hash[2] = hash[2].sf;
|
|
|
|
|
tmp_hash[3] = hash[3].sf;
|
|
|
|
|
|
|
|
|
|
aes256_scrt_format(aes_ks, tmp_w, pw_len, tmp_hash, tmp_out, s_te0, s_te1, s_te2, s_te3, s_te4);
|
|
|
|
|
|
|
|
|
|
out[0].sf = tmp_out[0];
|
|
|
|
|
out[1].sf = tmp_out[1];
|
|
|
|
|
out[2].sf = tmp_out[2];
|
|
|
|
|
out[3].sf = tmp_out[3];
|
|
|
|
|
aes256_scrt_format (aes_ks, tmp_w, pw_len, tmp_h, tmp_out, s_te0, s_te1, s_te2, s_te3, s_te4);
|
|
|
|
|
|
|
|
|
|
for (u32 i = 0; i < 4; i++) out[i].sf = tmp_out[i];
|
|
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
KERNEL_FQ void m31400_mxx (KERN_ATTR_VECTOR())
|
|
|
|
@ -502,8 +341,7 @@ KERNEL_FQ void m31400_mxx(KERN_ATTR_VECTOR())
|
|
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
if (gid >= GID_CNT)
|
|
|
|
|
return;
|
|
|
|
|
if (gid >= GID_CNT) return;
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* base
|
|
|
|
@ -518,8 +356,6 @@ KERNEL_FQ void m31400_mxx(KERN_ATTR_VECTOR())
|
|
|
|
|
w[idx] = pws[gid].i[idx];
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
u32 aes_ks[60];
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* loop
|
|
|
|
|
*/
|
|
|
|
@ -544,6 +380,8 @@ KERNEL_FQ void m31400_mxx(KERN_ATTR_VECTOR())
|
|
|
|
|
|
|
|
|
|
u32x out[4] = {0};
|
|
|
|
|
|
|
|
|
|
u32 aes_ks[60];
|
|
|
|
|
|
|
|
|
|
aes256_scrt_format_VV (aes_ks, w, pw_len, ctx.h, out, s_te0, s_te1, s_te2, s_te3, s_te4);
|
|
|
|
|
|
|
|
|
|
const u32x r0 = out[DGST_R0];
|
|
|
|
@ -598,8 +436,8 @@ KERNEL_FQ void m31400_sxx(KERN_ATTR_VECTOR())
|
|
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
if (gid >= GID_CNT)
|
|
|
|
|
return;
|
|
|
|
|
if (gid >= GID_CNT) return;
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* digest
|
|
|
|
|
*/
|
|
|
|
@ -609,7 +447,8 @@ KERNEL_FQ void m31400_sxx(KERN_ATTR_VECTOR())
|
|
|
|
|
digests_buf[DIGESTS_OFFSET_HOST].digest_buf[DGST_R0],
|
|
|
|
|
digests_buf[DIGESTS_OFFSET_HOST].digest_buf[DGST_R1],
|
|
|
|
|
digests_buf[DIGESTS_OFFSET_HOST].digest_buf[DGST_R2],
|
|
|
|
|
digests_buf[DIGESTS_OFFSET_HOST].digest_buf[DGST_R3]};
|
|
|
|
|
digests_buf[DIGESTS_OFFSET_HOST].digest_buf[DGST_R3]
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* base
|
|
|
|
@ -628,7 +467,6 @@ KERNEL_FQ void m31400_sxx(KERN_ATTR_VECTOR())
|
|
|
|
|
* loop
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
u32 aes_ks[60];
|
|
|
|
|
u32x w0l = w[0];
|
|
|
|
|
|
|
|
|
|
for (u32 il_pos = 0; il_pos < IL_CNT; il_pos += VECT_SIZE)
|
|
|
|
@ -649,6 +487,8 @@ KERNEL_FQ void m31400_sxx(KERN_ATTR_VECTOR())
|
|
|
|
|
|
|
|
|
|
u32x out[4] = {0};
|
|
|
|
|
|
|
|
|
|
u32 aes_ks[60];
|
|
|
|
|
|
|
|
|
|
aes256_scrt_format_VV (aes_ks, w, pw_len, ctx.h, out, s_te0, s_te1, s_te2, s_te3, s_te4);
|
|
|
|
|
|
|
|
|
|
const u32x r0 = out[DGST_R0];
|
|
|
|
|