1
0
mirror of https://github.com/trezor/trezor-firmware.git synced 2024-11-12 18:49:07 +00:00

Faster PBKDF2 by reusing intermediate results.

The old implementation needed 6 sha transformations per iterations:

- 2 for computing sha512 of seed,
- 2 for computing digests of ipads/opads,
- 2 for computing digests of intermediate hashes.

The first 4 transformations are the same in every iteration so we cache
them.  A new function hmac_sha512_prepare computes these digests.
We made sha512_Transform visible in pbkdf2 and prevent unneccessary
big/little endian conversions back and forth.
This commit is contained in:
Jochen Hoenicke 2016-07-12 13:43:13 +02:00
parent 4912ac550c
commit 32bda8d1d9
No known key found for this signature in database
GPG Key ID: 65B10C0466560648
6 changed files with 340 additions and 252 deletions

56
hmac.c
View File

@ -69,6 +69,34 @@ void hmac_sha256(const uint8_t *key, const uint32_t keylen, const uint8_t *msg,
hmac_sha256_Final(&hctx, hmac);
}
void hmac_sha256_prepare(const uint8_t *key, const uint32_t keylen, uint32_t *opad_digest, uint32_t *ipad_digest)
{
int i;
uint32_t buf[SHA256_BLOCK_LENGTH/sizeof(uint32_t)];
uint32_t o_key_pad[16], i_key_pad[16];
memset(buf, 0, SHA256_BLOCK_LENGTH);
if (keylen > SHA256_BLOCK_LENGTH) {
sha256_Raw(key, keylen, (uint8_t*) buf);
} else {
memcpy(buf, key, keylen);
}
for (i = 0; i < 16; i++) {
uint32_t data;
#if BYTE_ORDER == LITTLE_ENDIAN
REVERSE32(buf[i], data);
#else
data = buf[i];
#endif
o_key_pad[i] = data ^ 0x5c5c5c5c;
i_key_pad[i] = data ^ 0x36363636;
}
sha256_Transform(sha256_initial_hash_value, o_key_pad, opad_digest);
sha256_Transform(sha256_initial_hash_value, i_key_pad, ipad_digest);
}
void hmac_sha512_Init(HMAC_SHA512_CTX *hctx, const uint8_t *key, const uint32_t keylen)
{
uint8_t i_key_pad[SHA512_BLOCK_LENGTH];
@ -111,3 +139,31 @@ void hmac_sha512(const uint8_t *key, const uint32_t keylen, const uint8_t *msg,
hmac_sha512_Update(&hctx, msg, msglen);
hmac_sha512_Final(&hctx, hmac);
}
void hmac_sha512_prepare(const uint8_t *key, const uint32_t keylen, uint64_t *opad_digest, uint64_t *ipad_digest)
{
int i;
uint64_t buf[SHA512_BLOCK_LENGTH/sizeof(uint64_t)];
uint64_t o_key_pad[16], i_key_pad[16];
memset(buf, 0, SHA512_BLOCK_LENGTH);
if (keylen > SHA512_BLOCK_LENGTH) {
sha512_Raw(key, keylen, (uint8_t*)buf);
} else {
memcpy(buf, key, keylen);
}
for (i = 0; i < 16; i++) {
uint64_t data;
#if BYTE_ORDER == LITTLE_ENDIAN
REVERSE64(buf[i], data);
#else
data = buf[i];
#endif
o_key_pad[i] = data ^ 0x5c5c5c5c5c5c5c5c;
i_key_pad[i] = data ^ 0x3636363636363636;
}
sha512_Transform(sha512_initial_hash_value, o_key_pad, opad_digest);
sha512_Transform(sha512_initial_hash_value, i_key_pad, ipad_digest);
}

2
hmac.h
View File

@ -41,10 +41,12 @@ void hmac_sha256_Init(HMAC_SHA256_CTX *hctx, const uint8_t *key, const uint32_t
void hmac_sha256_Update(HMAC_SHA256_CTX *hctx, const uint8_t *msg, const uint32_t msglen);
void hmac_sha256_Final(HMAC_SHA256_CTX *hctx, uint8_t *hmac);
void hmac_sha256(const uint8_t *key, const uint32_t keylen, const uint8_t *msg, const uint32_t msglen, uint8_t *hmac);
void hmac_sha256_prepare(const uint8_t *key, const uint32_t keylen, uint32_t *opad_digest, uint32_t *ipad_digest);
void hmac_sha512_Init(HMAC_SHA512_CTX *hctx, const uint8_t *key, const uint32_t keylen);
void hmac_sha512_Update(HMAC_SHA512_CTX *hctx, const uint8_t *msg, const uint32_t msglen);
void hmac_sha512_Final(HMAC_SHA512_CTX *hctx, uint8_t *hmac);
void hmac_sha512(const uint8_t *key, const uint32_t keylen, const uint8_t *msg, const uint32_t msglen, uint8_t *hmac);
void hmac_sha512_prepare(const uint8_t *key, const uint32_t keylen, uint64_t *opad_digest, uint64_t *ipad_digest);
#endif

View File

@ -24,26 +24,43 @@
#include <string.h>
#include "pbkdf2.h"
#include "hmac.h"
#include "sha2.h"
#include "macros.h"
void pbkdf2_hmac_sha256_Init(PBKDF2_HMAC_SHA256_CTX *pctx, const uint8_t *pass, int passlen, const uint8_t *salt, int saltlen)
{
HMAC_SHA256_CTX hctx;
hmac_sha256_Init(&hctx, pass, passlen);
hmac_sha256_Update(&hctx, salt, saltlen);
hmac_sha256_Update(&hctx, (const uint8_t *)"\x00\x00\x00\x01", 4);
hmac_sha256_Final(&hctx, pctx->g);
SHA256_CTX ctx;
uint32_t blocknr = 1;
#if BYTE_ORDER == LITTLE_ENDIAN
REVERSE32(blocknr, blocknr);
#endif
hmac_sha256_prepare(pass, passlen, pctx->odig, pctx->idig);
memset(pctx->g, 0, sizeof(pctx->g));
pctx->g[8] = 0x80000000;
pctx->g[15] = (SHA256_BLOCK_LENGTH + SHA256_DIGEST_LENGTH) * 8;
memcpy (ctx.state, pctx->idig, sizeof(pctx->idig));
ctx.bitcount = SHA256_BLOCK_LENGTH * 8;
sha256_Update(&ctx, salt, saltlen);
sha256_Update(&ctx, (uint8_t*)&blocknr, sizeof(blocknr));
sha256_Final(&ctx, (uint8_t*)pctx->g);
#if BYTE_ORDER == LITTLE_ENDIAN
for (uint32_t k = 0; k < SHA256_DIGEST_LENGTH / sizeof(uint32_t); k++) {
REVERSE32(pctx->g[k], pctx->g[k]);
}
#endif
sha256_Transform(pctx->odig, pctx->g, pctx->g);
memcpy(pctx->f, pctx->g, SHA256_DIGEST_LENGTH);
pctx->pass = pass;
pctx->passlen = passlen;
pctx->first = 1;
}
void pbkdf2_hmac_sha256_Update(PBKDF2_HMAC_SHA256_CTX *pctx, uint32_t iterations)
{
for (uint32_t i = pctx->first; i < iterations; i++) {
hmac_sha256(pctx->pass, pctx->passlen, pctx->g, SHA256_DIGEST_LENGTH, pctx->g);
for (uint32_t j = 0; j < SHA256_DIGEST_LENGTH; j++) {
sha256_Transform(pctx->idig, pctx->g, pctx->g);
sha256_Transform(pctx->odig, pctx->g, pctx->g);
for (uint32_t j = 0; j < SHA256_DIGEST_LENGTH/sizeof(uint32_t); j++) {
pctx->f[j] ^= pctx->g[j];
}
}
@ -52,6 +69,11 @@ void pbkdf2_hmac_sha256_Update(PBKDF2_HMAC_SHA256_CTX *pctx, uint32_t iterations
void pbkdf2_hmac_sha256_Final(PBKDF2_HMAC_SHA256_CTX *pctx, uint8_t *key)
{
#if BYTE_ORDER == LITTLE_ENDIAN
for (uint32_t k = 0; k < SHA256_DIGEST_LENGTH/sizeof(uint32_t); k++) {
REVERSE32(pctx->f[k], pctx->f[k]);
}
#endif
memcpy(key, pctx->f, SHA256_DIGEST_LENGTH);
MEMSET_BZERO(pctx, sizeof(PBKDF2_HMAC_SHA256_CTX));
}
@ -66,22 +88,39 @@ void pbkdf2_hmac_sha256(const uint8_t *pass, int passlen, const uint8_t *salt, i
void pbkdf2_hmac_sha512_Init(PBKDF2_HMAC_SHA512_CTX *pctx, const uint8_t *pass, int passlen, const uint8_t *salt, int saltlen)
{
HMAC_SHA512_CTX hctx;
hmac_sha512_Init(&hctx, pass, passlen);
hmac_sha512_Update(&hctx, salt, saltlen);
hmac_sha512_Update(&hctx, (const uint8_t *)"\x00\x00\x00\x01", 4);
hmac_sha512_Final(&hctx, pctx->g);
SHA512_CTX ctx;
uint32_t blocknr = 1;
#if BYTE_ORDER == LITTLE_ENDIAN
REVERSE32(blocknr, blocknr);
#endif
hmac_sha512_prepare(pass, passlen, pctx->odig, pctx->idig);
memset(pctx->g, 0, sizeof(pctx->g));
pctx->g[8] = 0x8000000000000000;
pctx->g[15] = (SHA512_BLOCK_LENGTH + SHA512_DIGEST_LENGTH) * 8;
memcpy (ctx.state, pctx->idig, sizeof(pctx->idig));
ctx.bitcount[0] = SHA512_BLOCK_LENGTH * 8;
ctx.bitcount[1] = 0;
sha512_Update(&ctx, salt, saltlen);
sha512_Update(&ctx, (uint8_t*)&blocknr, sizeof(blocknr));
sha512_Final(&ctx, (uint8_t*)pctx->g);
#if BYTE_ORDER == LITTLE_ENDIAN
for (uint32_t k = 0; k < SHA512_DIGEST_LENGTH / sizeof(uint64_t); k++) {
REVERSE64(pctx->g[k], pctx->g[k]);
}
#endif
sha512_Transform(pctx->odig, pctx->g, pctx->g);
memcpy(pctx->f, pctx->g, SHA512_DIGEST_LENGTH);
pctx->pass = pass;
pctx->passlen = passlen;
pctx->first = 1;
}
void pbkdf2_hmac_sha512_Update(PBKDF2_HMAC_SHA512_CTX *pctx, uint32_t iterations)
{
for (uint32_t i = pctx->first; i < iterations; i++) {
hmac_sha512(pctx->pass, pctx->passlen, pctx->g, SHA512_DIGEST_LENGTH, pctx->g);
for (uint32_t j = 0; j < SHA512_DIGEST_LENGTH; j++) {
sha512_Transform(pctx->idig, pctx->g, pctx->g);
sha512_Transform(pctx->odig, pctx->g, pctx->g);
for (uint32_t j = 0; j < SHA512_DIGEST_LENGTH / sizeof(uint64_t); j++) {
pctx->f[j] ^= pctx->g[j];
}
}
@ -90,6 +129,11 @@ void pbkdf2_hmac_sha512_Update(PBKDF2_HMAC_SHA512_CTX *pctx, uint32_t iterations
void pbkdf2_hmac_sha512_Final(PBKDF2_HMAC_SHA512_CTX *pctx, uint8_t *key)
{
#if BYTE_ORDER == LITTLE_ENDIAN
for (uint32_t k = 0; k < SHA512_DIGEST_LENGTH/sizeof(uint64_t); k++) {
REVERSE64(pctx->f[k], pctx->f[k]);
}
#endif
memcpy(key, pctx->f, SHA512_DIGEST_LENGTH);
MEMSET_BZERO(pctx, sizeof(PBKDF2_HMAC_SHA512_CTX));
}

View File

@ -28,18 +28,18 @@
#include "sha2.h"
typedef struct _PBKDF2_HMAC_SHA256_CTX {
uint8_t f[SHA256_DIGEST_LENGTH];
uint8_t g[SHA256_DIGEST_LENGTH];
const uint8_t *pass;
int passlen;
uint32_t odig[SHA256_DIGEST_LENGTH / sizeof(uint32_t)];
uint32_t idig[SHA256_DIGEST_LENGTH / sizeof(uint32_t)];
uint32_t f[SHA256_DIGEST_LENGTH / sizeof(uint32_t)];
uint32_t g[SHA256_BLOCK_LENGTH / sizeof(uint32_t)];
char first;
} PBKDF2_HMAC_SHA256_CTX;
typedef struct _PBKDF2_HMAC_SHA512_CTX {
uint8_t f[SHA512_DIGEST_LENGTH];
uint8_t g[SHA512_DIGEST_LENGTH];
const uint8_t *pass;
int passlen;
uint64_t odig[SHA512_DIGEST_LENGTH / sizeof(uint64_t)];
uint64_t idig[SHA512_DIGEST_LENGTH / sizeof(uint64_t)];
uint64_t f[SHA512_DIGEST_LENGTH / sizeof(uint64_t)];
uint64_t g[SHA512_BLOCK_LENGTH / sizeof(uint64_t)];
char first;
} PBKDF2_HMAC_SHA512_CTX;

403
sha2.c
View File

@ -82,15 +82,6 @@
* made).
*/
#ifndef LITTLE_ENDIAN
#define LITTLE_ENDIAN 1234
#define BIG_ENDIAN 4321
#endif
#ifndef BYTE_ORDER
#define BYTE_ORDER LITTLE_ENDIAN
#endif
#if !defined(BYTE_ORDER) || (BYTE_ORDER != LITTLE_ENDIAN && BYTE_ORDER != BIG_ENDIAN)
#error Define BYTE_ORDER to be equal to either LITTLE_ENDIAN or BIG_ENDIAN
#endif
@ -104,24 +95,6 @@ typedef uint64_t sha2_word64; /* Exactly 8 bytes */
#define SHA256_SHORT_BLOCK_LENGTH (SHA256_BLOCK_LENGTH - 8)
#define SHA512_SHORT_BLOCK_LENGTH (SHA512_BLOCK_LENGTH - 16)
/*** ENDIAN REVERSAL MACROS *******************************************/
#if BYTE_ORDER == LITTLE_ENDIAN
#define REVERSE32(w,x) { \
sha2_word32 tmp = (w); \
tmp = (tmp >> 16) | (tmp << 16); \
(x) = ((tmp & 0xff00ff00UL) >> 8) | ((tmp & 0x00ff00ffUL) << 8); \
}
#define REVERSE64(w,x) { \
sha2_word64 tmp = (w); \
tmp = (tmp >> 32) | (tmp << 32); \
tmp = ((tmp & 0xff00ff00ff00ff00ULL) >> 8) | \
((tmp & 0x00ff00ff00ff00ffULL) << 8); \
(x) = ((tmp & 0xffff0000ffff0000ULL) >> 16) | \
((tmp & 0x0000ffff0000ffffULL) << 16); \
}
#endif /* BYTE_ORDER == LITTLE_ENDIAN */
/*
* Macro for incrementally adding the unsigned 64-bit integer n to the
* unsigned 128-bit integer (represented using a two-element array of
@ -174,9 +147,7 @@ typedef uint64_t sha2_word64; /* Exactly 8 bytes */
* library -- they are intended for private internal visibility/use
* only.
*/
void sha512_Last(SHA512_CTX*);
void sha256_Transform(SHA256_CTX*, const sha2_word32*);
void sha512_Transform(SHA512_CTX*, const sha2_word64*);
static void sha512_Last(SHA512_CTX*);
/*** SHA-XYZ INITIAL HASH VALUES AND CONSTANTS ************************/
@ -201,7 +172,7 @@ static const sha2_word32 K256[64] = {
};
/* Initial hash value H for SHA-256: */
static const sha2_word32 sha256_initial_hash_value[8] = {
const sha2_word32 sha256_initial_hash_value[8] = {
0x6a09e667UL,
0xbb67ae85UL,
0x3c6ef372UL,
@ -257,7 +228,7 @@ static const sha2_word64 K512[80] = {
};
/* Initial hash value H for SHA-512 */
static const sha2_word64 sha512_initial_hash_value[8] = {
const sha2_word64 sha512_initial_hash_value[8] = {
0x6a09e667f3bcc908ULL,
0xbb67ae8584caa73bULL,
0x3c6ef372fe94f82bULL,
@ -292,7 +263,7 @@ void sha256_Init(SHA256_CTX* context) {
#if BYTE_ORDER == LITTLE_ENDIAN
#define ROUND256_0_TO_15(a,b,c,d,e,f,g,h) \
REVERSE32(*data++, W256[j]); \
W256[j] = *data++; \
T1 = (h) + Sigma1_256(e) + Ch((e), (f), (g)) + \
K256[j] + W256[j]; \
(d) += T1; \
@ -322,22 +293,21 @@ void sha256_Init(SHA256_CTX* context) {
(h) = T1 + Sigma0_256(a) + Maj((a), (b), (c)); \
j++
void sha256_Transform(SHA256_CTX* context, const sha2_word32* data) {
void sha256_Transform(const sha2_word32* state_in, const sha2_word32* data, sha2_word32* state_out) {
sha2_word32 a, b, c, d, e, f, g, h, s0, s1;
sha2_word32 T1, *W256;
sha2_word32 T1;
sha2_word32 W256[16];
int j;
W256 = (sha2_word32*)context->buffer;
/* Initialize registers with the prev. intermediate value */
a = context->state[0];
b = context->state[1];
c = context->state[2];
d = context->state[3];
e = context->state[4];
f = context->state[5];
g = context->state[6];
h = context->state[7];
a = state_in[0];
b = state_in[1];
c = state_in[2];
d = state_in[3];
e = state_in[4];
f = state_in[5];
g = state_in[6];
h = state_in[7];
j = 0;
do {
@ -365,14 +335,14 @@ void sha256_Transform(SHA256_CTX* context, const sha2_word32* data) {
} while (j < 64);
/* Compute the current intermediate hash value */
context->state[0] += a;
context->state[1] += b;
context->state[2] += c;
context->state[3] += d;
context->state[4] += e;
context->state[5] += f;
context->state[6] += g;
context->state[7] += h;
state_out[0] = state_in[0] + a;
state_out[1] = state_in[1] + b;
state_out[2] = state_in[2] + c;
state_out[3] = state_in[3] + d;
state_out[4] = state_in[4] + e;
state_out[5] = state_in[5] + f;
state_out[6] = state_in[6] + g;
state_out[7] = state_in[7] + h;
/* Clean up */
a = b = c = d = e = f = g = h = T1 = 0;
@ -380,34 +350,25 @@ void sha256_Transform(SHA256_CTX* context, const sha2_word32* data) {
#else /* SHA2_UNROLL_TRANSFORM */
void sha256_Transform(SHA256_CTX* context, const sha2_word32* data) {
void sha256_Transform(const sha2_word32* state_in, const sha2_word32* data, sha2_word32* state_out) {
sha2_word32 a, b, c, d, e, f, g, h, s0, s1;
sha2_word32 T1, T2, *W256;
sha2_word32 T1, T2, W256[16];
int j;
W256 = (sha2_word32*)(void*)context->buffer;
/* Initialize registers with the prev. intermediate value */
a = context->state[0];
b = context->state[1];
c = context->state[2];
d = context->state[3];
e = context->state[4];
f = context->state[5];
g = context->state[6];
h = context->state[7];
a = state_in[0];
b = state_in[1];
c = state_in[2];
d = state_in[3];
e = state_in[4];
f = state_in[5];
g = state_in[6];
h = state_in[7];
j = 0;
do {
#if BYTE_ORDER == LITTLE_ENDIAN
/* Copy data while converting to host byte order */
REVERSE32(*data++,W256[j]);
/* Apply the SHA-256 compression function to update a..h */
T1 = h + Sigma1_256(e) + Ch(e, f, g) + K256[j] + W256[j];
#else /* BYTE_ORDER == LITTLE_ENDIAN */
/* Apply the SHA-256 compression function to update a..h with copy */
T1 = h + Sigma1_256(e) + Ch(e, f, g) + K256[j] + (W256[j] = *data++);
#endif /* BYTE_ORDER == LITTLE_ENDIAN */
T2 = Sigma0_256(a) + Maj(a, b, c);
h = g;
g = f;
@ -445,14 +406,14 @@ void sha256_Transform(SHA256_CTX* context, const sha2_word32* data) {
} while (j < 64);
/* Compute the current intermediate hash value */
context->state[0] += a;
context->state[1] += b;
context->state[2] += c;
context->state[3] += d;
context->state[4] += e;
context->state[5] += f;
context->state[6] += g;
context->state[7] += h;
state_out[0] = state_in[0] + a;
state_out[1] = state_in[1] + b;
state_out[2] = state_in[2] + c;
state_out[3] = state_in[3] + d;
state_out[4] = state_in[4] + e;
state_out[5] = state_in[5] + f;
state_out[6] = state_in[6] + g;
state_out[7] = state_in[7] + h;
/* Clean up */
a = b = c = d = e = f = g = h = T1 = T2 = 0;
@ -475,14 +436,20 @@ void sha256_Update(SHA256_CTX* context, const sha2_byte *data, size_t len) {
if (len >= freespace) {
/* Fill the buffer completely and process it */
MEMCPY_BCOPY(&context->buffer[usedspace], data, freespace);
MEMCPY_BCOPY(((uint8_t*)context->buffer) + usedspace, data, freespace);
context->bitcount += freespace << 3;
len -= freespace;
data += freespace;
sha256_Transform(context, (sha2_word32*)(void*)context->buffer);
#if BYTE_ORDER == LITTLE_ENDIAN
/* Convert TO host byte order */
for (int j = 0; j < 16; j++) {
REVERSE32(context->buffer[j],context->buffer[j]);
}
#endif
sha256_Transform(context->state, context->buffer, context->state);
} else {
/* The buffer is not yet full */
MEMCPY_BCOPY(&context->buffer[usedspace], data, len);
MEMCPY_BCOPY(((uint8_t*)context->buffer) + usedspace, data, len);
context->bitcount += len << 3;
/* Clean up: */
usedspace = freespace = 0;
@ -491,7 +458,14 @@ void sha256_Update(SHA256_CTX* context, const sha2_byte *data, size_t len) {
}
while (len >= SHA256_BLOCK_LENGTH) {
/* Process as many complete blocks as we can */
sha256_Transform(context, (sha2_word32*)(void*)data);
MEMCPY_BCOPY(context->buffer, data, SHA256_BLOCK_LENGTH);
#if BYTE_ORDER == LITTLE_ENDIAN
/* Convert TO host byte order */
for (int j = 0; j < 16; j++) {
REVERSE32(context->buffer[j],context->buffer[j]);
}
#endif
sha256_Transform(context->state, context->buffer, context->state);
context->bitcount += SHA256_BLOCK_LENGTH << 3;
len -= SHA256_BLOCK_LENGTH;
data += SHA256_BLOCK_LENGTH;
@ -512,53 +486,49 @@ void sha256_Final(SHA256_CTX* context, sha2_byte digest[]) {
/* If no digest buffer is passed, we don't bother doing this: */
if (digest != (sha2_byte*)0) {
usedspace = (context->bitcount >> 3) % SHA256_BLOCK_LENGTH;
#if BYTE_ORDER == LITTLE_ENDIAN
/* Convert FROM host byte order */
REVERSE64(context->bitcount,context->bitcount);
#endif
if (usedspace > 0) {
/* Begin padding with a 1 bit: */
context->buffer[usedspace++] = 0x80;
if (usedspace <= SHA256_SHORT_BLOCK_LENGTH) {
/* Set-up for the last transform: */
MEMSET_BZERO(&context->buffer[usedspace], SHA256_SHORT_BLOCK_LENGTH - usedspace);
} else {
if (usedspace < SHA256_BLOCK_LENGTH) {
MEMSET_BZERO(&context->buffer[usedspace], SHA256_BLOCK_LENGTH - usedspace);
}
/* Do second-to-last transform: */
sha256_Transform(context, (sha2_word32*)(void*)context->buffer);
/* And set-up for the last transform: */
MEMSET_BZERO(context->buffer, SHA256_SHORT_BLOCK_LENGTH);
}
} else {
/* Begin padding with a 1 bit: */
((uint8_t*)context->buffer)[usedspace++] = 0x80;
if (usedspace <= SHA256_SHORT_BLOCK_LENGTH) {
/* Set-up for the last transform: */
MEMSET_BZERO(((uint8_t*)context->buffer) + usedspace, SHA256_SHORT_BLOCK_LENGTH - usedspace);
} else {
if (usedspace < SHA256_BLOCK_LENGTH) {
MEMSET_BZERO(((uint8_t*)context->buffer) + usedspace, SHA256_BLOCK_LENGTH - usedspace);
}
#if BYTE_ORDER == LITTLE_ENDIAN
/* Convert TO host byte order */
for (int j = 0; j < 16; j++) {
REVERSE32(context->buffer[j],context->buffer[j]);
}
#endif
/* Do second-to-last transform: */
sha256_Transform(context->state, context->buffer, context->state);
/* And set-up for the last transform: */
MEMSET_BZERO(context->buffer, SHA256_SHORT_BLOCK_LENGTH);
/* Begin padding with a 1 bit: */
*context->buffer = 0x80;
}
#if BYTE_ORDER == LITTLE_ENDIAN
/* Convert TO host byte order */
for (int j = 0; j < 14; j++) {
REVERSE32(context->buffer[j],context->buffer[j]);
}
#endif
/* Set the bit count: */
sha2_word64 *t = (sha2_word64 *)(void*)&context->buffer[SHA256_SHORT_BLOCK_LENGTH];
*t = context->bitcount;
context->buffer[14] = context->bitcount >> 32;
context->buffer[15] = context->bitcount & 0xffffffff;
/* Final transform: */
sha256_Transform(context, (sha2_word32*)(void*)context->buffer);
sha256_Transform(context->state, context->buffer, context->state);
#if BYTE_ORDER == LITTLE_ENDIAN
{
/* Convert TO host byte order */
int j;
for (j = 0; j < 8; j++) {
REVERSE32(context->state[j],context->state[j]);
*d++ = context->state[j];
}
/* Convert FROM host byte order */
for (int j = 0; j < 8; j++) {
REVERSE32(context->state[j],context->state[j]);
}
#else
MEMCPY_BCOPY(d, context->state, SHA256_DIGEST_LENGTH);
#endif
MEMCPY_BCOPY(d, context->state, SHA256_DIGEST_LENGTH);
}
/* Clean up state data: */
@ -615,19 +585,6 @@ void sha512_Init(SHA512_CTX* context) {
#ifdef SHA2_UNROLL_TRANSFORM
/* Unrolled SHA-512 round macros: */
#if BYTE_ORDER == LITTLE_ENDIAN
#define ROUND512_0_TO_15(a,b,c,d,e,f,g,h) \
REVERSE64(*data++, W512[j]); \
T1 = (h) + Sigma1_512(e) + Ch((e), (f), (g)) + \
K512[j] + W512[j]; \
(d) += T1, \
(h) = T1 + Sigma0_512(a) + Maj((a), (b), (c)), \
j++
#else /* BYTE_ORDER == LITTLE_ENDIAN */
#define ROUND512_0_TO_15(a,b,c,d,e,f,g,h) \
T1 = (h) + Sigma1_512(e) + Ch((e), (f), (g)) + \
K512[j] + (W512[j] = *data++); \
@ -635,8 +592,6 @@ void sha512_Init(SHA512_CTX* context) {
(h) = T1 + Sigma0_512(a) + Maj((a), (b), (c)); \
j++
#endif /* BYTE_ORDER == LITTLE_ENDIAN */
#define ROUND512(a,b,c,d,e,f,g,h) \
s0 = W512[(j+1)&0x0f]; \
s0 = sigma0_512(s0); \
@ -648,20 +603,20 @@ void sha512_Init(SHA512_CTX* context) {
(h) = T1 + Sigma0_512(a) + Maj((a), (b), (c)); \
j++
void sha512_Transform(SHA512_CTX* context, const sha2_word64* data) {
void sha512_Transform(const sha2_word64* state_in, const sha2_word64* data, sha2_word64* state_out) {
sha2_word64 a, b, c, d, e, f, g, h, s0, s1;
sha2_word64 T1, *W512 = (sha2_word64*)context->buffer;
sha2_word64 T1, W512[16];
int j;
/* Initialize registers with the prev. intermediate value */
a = context->state[0];
b = context->state[1];
c = context->state[2];
d = context->state[3];
e = context->state[4];
f = context->state[5];
g = context->state[6];
h = context->state[7];
a = state_in[0];
b = state_in[1];
c = state_in[2];
d = state_in[3];
e = state_in[4];
f = state_in[5];
g = state_in[6];
h = state_in[7];
j = 0;
do {
@ -688,14 +643,14 @@ void sha512_Transform(SHA512_CTX* context, const sha2_word64* data) {
} while (j < 80);
/* Compute the current intermediate hash value */
context->state[0] += a;
context->state[1] += b;
context->state[2] += c;
context->state[3] += d;
context->state[4] += e;
context->state[5] += f;
context->state[6] += g;
context->state[7] += h;
state_out[0] = state_in[0] + a;
state_out[1] = state_in[1] + b;
state_out[2] = state_in[2] + c;
state_out[3] = state_in[3] + d;
state_out[4] = state_in[4] + e;
state_out[5] = state_in[5] + f;
state_out[6] = state_in[6] + g;
state_out[7] = state_in[7] + h;
/* Clean up */
a = b = c = d = e = f = g = h = T1 = 0;
@ -703,32 +658,25 @@ void sha512_Transform(SHA512_CTX* context, const sha2_word64* data) {
#else /* SHA2_UNROLL_TRANSFORM */
void sha512_Transform(SHA512_CTX* context, const sha2_word64* data) {
void sha512_Transform(const sha2_word64* state_in, const sha2_word64* data, sha2_word64* state_out) {
sha2_word64 a, b, c, d, e, f, g, h, s0, s1;
sha2_word64 T1, T2, *W512 = (sha2_word64*)(void*)context->buffer;
sha2_word64 T1, T2, W512[16];
int j;
/* Initialize registers with the prev. intermediate value */
a = context->state[0];
b = context->state[1];
c = context->state[2];
d = context->state[3];
e = context->state[4];
f = context->state[5];
g = context->state[6];
h = context->state[7];
a = state_in[0];
b = state_in[1];
c = state_in[2];
d = state_in[3];
e = state_in[4];
f = state_in[5];
g = state_in[6];
h = state_in[7];
j = 0;
do {
#if BYTE_ORDER == LITTLE_ENDIAN
/* Convert TO host byte order */
REVERSE64(*data++, W512[j]);
/* Apply the SHA-512 compression function to update a..h */
T1 = h + Sigma1_512(e) + Ch(e, f, g) + K512[j] + W512[j];
#else /* BYTE_ORDER == LITTLE_ENDIAN */
/* Apply the SHA-512 compression function to update a..h with copy */
T1 = h + Sigma1_512(e) + Ch(e, f, g) + K512[j] + (W512[j] = *data++);
#endif /* BYTE_ORDER == LITTLE_ENDIAN */
T2 = Sigma0_512(a) + Maj(a, b, c);
h = g;
g = f;
@ -766,14 +714,14 @@ void sha512_Transform(SHA512_CTX* context, const sha2_word64* data) {
} while (j < 80);
/* Compute the current intermediate hash value */
context->state[0] += a;
context->state[1] += b;
context->state[2] += c;
context->state[3] += d;
context->state[4] += e;
context->state[5] += f;
context->state[6] += g;
context->state[7] += h;
state_out[0] = state_in[0] + a;
state_out[1] = state_in[1] + b;
state_out[2] = state_in[2] + c;
state_out[3] = state_in[3] + d;
state_out[4] = state_in[4] + e;
state_out[5] = state_in[5] + f;
state_out[6] = state_in[6] + g;
state_out[7] = state_in[7] + h;
/* Clean up */
a = b = c = d = e = f = g = h = T1 = T2 = 0;
@ -796,14 +744,20 @@ void sha512_Update(SHA512_CTX* context, const sha2_byte *data, size_t len) {
if (len >= freespace) {
/* Fill the buffer completely and process it */
MEMCPY_BCOPY(&context->buffer[usedspace], data, freespace);
MEMCPY_BCOPY(((uint8_t*)context->buffer) + usedspace, data, freespace);
ADDINC128(context->bitcount, freespace << 3);
len -= freespace;
data += freespace;
sha512_Transform(context, (sha2_word64*)(void*)context->buffer);
#if BYTE_ORDER == LITTLE_ENDIAN
/* Convert TO host byte order */
for (int j = 0; j < 16; j++) {
REVERSE64(context->buffer[j],context->buffer[j]);
}
#endif
sha512_Transform(context->state, context->buffer, context->state);
} else {
/* The buffer is not yet full */
MEMCPY_BCOPY(&context->buffer[usedspace], data, len);
MEMCPY_BCOPY(((uint8_t*)context->buffer) + usedspace, data, len);
ADDINC128(context->bitcount, len << 3);
/* Clean up: */
usedspace = freespace = 0;
@ -812,7 +766,14 @@ void sha512_Update(SHA512_CTX* context, const sha2_byte *data, size_t len) {
}
while (len >= SHA512_BLOCK_LENGTH) {
/* Process as many complete blocks as we can */
sha512_Transform(context, (sha2_word64*)(void*)data);
MEMCPY_BCOPY(context->buffer, data, SHA512_BLOCK_LENGTH);
#if BYTE_ORDER == LITTLE_ENDIAN
/* Convert TO host byte order */
for (int j = 0; j < 16; j++) {
REVERSE64(((sha2_word64*)data)[j],context->buffer[j]);
}
#endif
sha512_Transform(context->state, context->buffer, context->state);
ADDINC128(context->bitcount, SHA512_BLOCK_LENGTH << 3);
len -= SHA512_BLOCK_LENGTH;
data += SHA512_BLOCK_LENGTH;
@ -826,48 +787,47 @@ void sha512_Update(SHA512_CTX* context, const sha2_byte *data, size_t len) {
usedspace = freespace = 0;
}
void sha512_Last(SHA512_CTX* context) {
static void sha512_Last(SHA512_CTX* context) {
unsigned int usedspace;
usedspace = (context->bitcount[0] >> 3) % SHA512_BLOCK_LENGTH;
#if BYTE_ORDER == LITTLE_ENDIAN
/* Convert FROM host byte order */
REVERSE64(context->bitcount[0],context->bitcount[0]);
REVERSE64(context->bitcount[1],context->bitcount[1]);
#endif
if (usedspace > 0) {
/* Begin padding with a 1 bit: */
context->buffer[usedspace++] = 0x80;
if (usedspace <= SHA512_SHORT_BLOCK_LENGTH) {
/* Set-up for the last transform: */
MEMSET_BZERO(&context->buffer[usedspace], SHA512_SHORT_BLOCK_LENGTH - usedspace);
} else {
if (usedspace < SHA512_BLOCK_LENGTH) {
MEMSET_BZERO(&context->buffer[usedspace], SHA512_BLOCK_LENGTH - usedspace);
}
/* Do second-to-last transform: */
sha512_Transform(context, (sha2_word64*)(void*)context->buffer);
/* And set-up for the last transform: */
MEMSET_BZERO(context->buffer, SHA512_BLOCK_LENGTH - 2);
}
/* Begin padding with a 1 bit: */
((uint8_t*)context->buffer)[usedspace++] = 0x80;
if (usedspace <= SHA512_SHORT_BLOCK_LENGTH) {
/* Set-up for the last transform: */
MEMSET_BZERO(((uint8_t*)context->buffer) + usedspace, SHA512_SHORT_BLOCK_LENGTH - usedspace);
} else {
/* Prepare for final transform: */
MEMSET_BZERO(context->buffer, SHA512_SHORT_BLOCK_LENGTH);
if (usedspace < SHA512_BLOCK_LENGTH) {
MEMSET_BZERO(((uint8_t*)context->buffer) + usedspace, SHA512_BLOCK_LENGTH - usedspace);
}
#if BYTE_ORDER == LITTLE_ENDIAN
/* Convert TO host byte order */
for (int j = 0; j < 16; j++) {
REVERSE64(context->buffer[j],context->buffer[j]);
}
#endif
/* Do second-to-last transform: */
sha512_Transform(context->state, context->buffer, context->state);
/* Begin padding with a 1 bit: */
*context->buffer = 0x80;
/* And set-up for the last transform: */
MEMSET_BZERO(context->buffer, SHA512_BLOCK_LENGTH - 2);
}
#if BYTE_ORDER == LITTLE_ENDIAN
/* Convert TO host byte order */
for (int j = 0; j < 16; j++) {
REVERSE64(context->buffer[j],context->buffer[j]);
}
#endif
/* Store the length of input data (in bits): */
sha2_word64 *t;
t = (sha2_word64 *)(void*)&context->buffer[SHA512_SHORT_BLOCK_LENGTH];
*t = context->bitcount[1];
t = (sha2_word64 *)(void*)&context->buffer[SHA512_SHORT_BLOCK_LENGTH+8];
*t = context->bitcount[0];
t = &context->buffer[SHA512_SHORT_BLOCK_LENGTH/sizeof(sha2_word64)];
t[0] = context->bitcount[1];
t[1] = context->bitcount[0];
/* Final transform: */
sha512_Transform(context, (sha2_word64*)(void*)context->buffer);
sha512_Transform(context->state, context->buffer, context->state);
}
void sha512_Final(SHA512_CTX* context, sha2_byte digest[]) {
@ -879,17 +839,12 @@ void sha512_Final(SHA512_CTX* context, sha2_byte digest[]) {
/* Save the hash data for output: */
#if BYTE_ORDER == LITTLE_ENDIAN
{
/* Convert TO host byte order */
int j;
for (j = 0; j < 8; j++) {
REVERSE64(context->state[j],context->state[j]);
*d++ = context->state[j];
}
/* Convert FROM host byte order */
for (int j = 0; j < 8; j++) {
REVERSE64(context->state[j],context->state[j]);
}
#else
MEMCPY_BCOPY(d, context->state, SHA512_DIGEST_LENGTH);
#endif
MEMCPY_BCOPY(d, context->state, SHA512_DIGEST_LENGTH);
}
/* Zero out state data */

35
sha2.h
View File

@ -44,14 +44,44 @@
typedef struct _SHA256_CTX {
uint32_t state[8];
uint64_t bitcount;
uint8_t buffer[SHA256_BLOCK_LENGTH];
uint32_t buffer[SHA256_BLOCK_LENGTH/sizeof(uint32_t)];
} SHA256_CTX;
typedef struct _SHA512_CTX {
uint64_t state[8];
uint64_t bitcount[2];
uint8_t buffer[SHA512_BLOCK_LENGTH];
uint64_t buffer[SHA512_BLOCK_LENGTH/sizeof(uint64_t)];
} SHA512_CTX;
/*** ENDIAN REVERSAL MACROS *******************************************/
#ifndef LITTLE_ENDIAN
#define LITTLE_ENDIAN 1234
#define BIG_ENDIAN 4321
#endif
#ifndef BYTE_ORDER
#define BYTE_ORDER LITTLE_ENDIAN
#endif
#if BYTE_ORDER == LITTLE_ENDIAN
#define REVERSE32(w,x) { \
uint32_t tmp = (w); \
tmp = (tmp >> 16) | (tmp << 16); \
(x) = ((tmp & 0xff00ff00UL) >> 8) | ((tmp & 0x00ff00ffUL) << 8); \
}
#define REVERSE64(w,x) { \
uint64_t tmp = (w); \
tmp = (tmp >> 32) | (tmp << 32); \
tmp = ((tmp & 0xff00ff00ff00ff00ULL) >> 8) | \
((tmp & 0x00ff00ff00ff00ffULL) << 8); \
(x) = ((tmp & 0xffff0000ffff0000ULL) >> 16) | \
((tmp & 0x0000ffff0000ffffULL) << 16); \
}
#endif /* BYTE_ORDER == LITTLE_ENDIAN */
extern const uint32_t sha256_initial_hash_value[8];
extern const uint64_t sha512_initial_hash_value[8];
void sha256_Transform(const uint32_t* state_in, const uint32_t* data, uint32_t* state_out);
void sha256_Init(SHA256_CTX *);
void sha256_Update(SHA256_CTX*, const uint8_t*, size_t);
void sha256_Final(SHA256_CTX*, uint8_t[SHA256_DIGEST_LENGTH]);
@ -59,6 +89,7 @@ char* sha256_End(SHA256_CTX*, char[SHA256_DIGEST_STRING_LENGTH]);
void sha256_Raw(const uint8_t*, size_t, uint8_t[SHA256_DIGEST_LENGTH]);
char* sha256_Data(const uint8_t*, size_t, char[SHA256_DIGEST_STRING_LENGTH]);
void sha512_Transform(const uint64_t* state_in, const uint64_t* data, uint64_t* state_out);
void sha512_Init(SHA512_CTX*);
void sha512_Update(SHA512_CTX*, const uint8_t*, size_t);
void sha512_Final(SHA512_CTX*, uint8_t[SHA512_DIGEST_LENGTH]);