Andrew Kozlik 1 month ago committed by GitHub
commit bc0f4b0410
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

@ -65,7 +65,7 @@ yaml_check: ## check yaml formatting
yamllint .
editor_check: ## check editorconfig formatting
editorconfig-checker -exclude '.*\.(so|dat|toif|der)'
editorconfig-checker -exclude '.*\.(so|dat|toif|der)|^crypto/aes/'
cstyle_check: ## run code style check on low-level C code
clang-format --version

@ -19,6 +19,7 @@ FEATURE_FLAGS = {
"RDI": True,
"SECP256K1_ZKP": True, # required for trezor.crypto.curve.bip340 (BIP340/Taproot)
"SYSTEM_VIEW": False,
"AES_GCM": False,
}
FEATURES_WANTED = ["input", "sbu", "sd_card", "rgb_led", "dma2d", "consumption_mask", "usb" ,"optiga", "haptic"]
@ -177,6 +178,17 @@ if FEATURE_FLAGS["SECP256K1_ZKP"]:
'vendor/trezor-crypto/zkp_bip340.c',
]
# AES-GCM
if FEATURE_FLAGS["AES_GCM"]:
CPPDEFINES_MOD += [
'USE_AES_GCM',
'AES_VAR',
]
SOURCE_MOD += [
'vendor/trezor-crypto/aes/gf128mul.c',
'vendor/trezor-crypto/aes/aesgcm.c',
]
# modtrezorio
SOURCE_MOD += [
'embed/extmod/modtrezorio/modtrezorio.c',

@ -24,6 +24,7 @@ if TREZOR_MODEL in ('DISC1', 'DISC2'):
FEATURE_FLAGS = {
"SECP256K1_ZKP": True, # required for trezor.crypto.curve.bip340 (BIP340/Taproot)
"AES_GCM": True,
}
CCFLAGS_MOD = ''
@ -174,6 +175,17 @@ if FEATURE_FLAGS["SECP256K1_ZKP"]:
'vendor/trezor-crypto/zkp_bip340.c',
]
# AES-GCM
if FEATURE_FLAGS["AES_GCM"]:
CPPDEFINES_MOD += [
'USE_AES_GCM',
'AES_VAR',
]
SOURCE_MOD += [
'vendor/trezor-crypto/aes/gf128mul.c',
'vendor/trezor-crypto/aes/aesgcm.c',
]
# modtrezorio
SOURCE_MOD += [
'embed/extmod/modtrezorio/modtrezorio.c',

@ -62,10 +62,8 @@ STATIC mp_obj_t mod_trezorcrypto_AES_make_new(const mp_obj_type_t *type,
size_t n_args, size_t n_kw,
const mp_obj_t *args) {
mp_arg_check_num(n_args, n_kw, 2, 3, false);
mp_obj_AES_t *o = m_new_obj_with_finaliser(mp_obj_AES_t);
o->base.type = type;
o->mode = mp_obj_get_int(args[0]);
if (o->mode < ECB || o->mode > CTR) {
mp_int_t mode = mp_obj_get_int(args[0]);
if (mode < ECB || mode > CTR) {
mp_raise_ValueError("Invalid AES mode");
}
mp_buffer_info_t key = {0};
@ -74,13 +72,19 @@ STATIC mp_obj_t mod_trezorcrypto_AES_make_new(const mp_obj_type_t *type,
mp_raise_ValueError(
"Invalid length of key (has to be 128, 192 or 256 bits)");
}
mp_buffer_info_t iv = {0};
if (n_args > 2) {
mp_buffer_info_t iv = {0};
mp_get_buffer_raise(args[2], &iv, MP_BUFFER_READ);
if (iv.len != AES_BLOCK_SIZE) {
mp_raise_ValueError(
"Invalid length of initialization vector (has to be 128 bits)");
}
}
mp_obj_AES_t *o = m_new_obj_with_finaliser(mp_obj_AES_t);
o->base.type = type;
o->mode = mode;
if (iv.len != 0) {
memcpy(o->iv, iv.buf, AES_BLOCK_SIZE);
} else {
memzero(o->iv, AES_BLOCK_SIZE);

@ -0,0 +1,213 @@
/*
* This file is part of the Trezor project, https://trezor.io/
*
* Copyright (c) SatoshiLabs
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "py/objstr.h"
#include "aes/aesgcm.h"
#include "memzero.h"
/// package: trezorcrypto.__init__
/// class aesgcm:
/// """
/// AES-GCM context.
/// """
typedef struct _mp_obj_AesGcm_t {
mp_obj_base_t base;
gcm_ctx ctx;
enum {
STATE_INIT,
STATE_ENCRYPTING,
STATE_DECRYPTING,
STATE_FINISHED,
STATE_FAILED,
} state;
} mp_obj_AesGcm_t;
/// def __init__(self, key: bytes, iv: bytes) -> None:
/// """
/// Initialize the AES-GCM context for encryption or decryption.
/// """
STATIC mp_obj_t mod_trezorcrypto_AesGcm_make_new(const mp_obj_type_t *type,
size_t n_args, size_t n_kw,
const mp_obj_t *args) {
mp_arg_check_num(n_args, n_kw, 2, 2, false);
mp_buffer_info_t key = {0}, iv = {0};
mp_get_buffer_raise(args[0], &key, MP_BUFFER_READ);
mp_get_buffer_raise(args[1], &iv, MP_BUFFER_READ);
if (key.len != 16 && key.len != 24 && key.len != 32) {
mp_raise_ValueError(
"Invalid length of key (has to be 128, 192 or 256 bits)");
}
mp_obj_AesGcm_t *o = m_new_obj_with_finaliser(mp_obj_AesGcm_t);
o->base.type = type;
o->state = STATE_INIT;
if (gcm_init_and_key(key.buf, key.len, &(o->ctx)) != RETURN_GOOD ||
gcm_init_message(iv.buf, iv.len, &(o->ctx)) != RETURN_GOOD) {
m_del_obj(mp_obj_AesGcm_t, o);
mp_raise_type(&mp_type_RuntimeError);
}
return MP_OBJ_FROM_PTR(o);
}
/// def reset(self, iv: bytes) -> None:
/// """
/// Reset the IV for encryption or decryption.
/// """
STATIC mp_obj_t mod_trezorcrypto_AesGcm_reset(mp_obj_t self, mp_obj_t iv) {
mp_obj_AesGcm_t *o = MP_OBJ_TO_PTR(self);
mp_buffer_info_t in = {0};
mp_get_buffer_raise(iv, &in, MP_BUFFER_READ);
if (gcm_init_message(in.buf, in.len, &(o->ctx)) != RETURN_GOOD) {
o->state = STATE_FAILED;
mp_raise_type(&mp_type_RuntimeError);
}
o->state = STATE_INIT;
return mp_const_none;
}
STATIC MP_DEFINE_CONST_FUN_OBJ_2(mod_trezorcrypto_AesGcm_reset_obj,
mod_trezorcrypto_AesGcm_reset);
/// def encrypt(self, data: bytes) -> bytes:
/// """
/// Encrypt data chunk.
/// """
STATIC mp_obj_t mod_trezorcrypto_AesGcm_encrypt(mp_obj_t self, mp_obj_t data) {
mp_obj_AesGcm_t *o = MP_OBJ_TO_PTR(self);
if (o->state != STATE_INIT && o->state != STATE_ENCRYPTING) {
mp_raise_msg(&mp_type_RuntimeError, "Invalid state.");
}
o->state = STATE_ENCRYPTING;
mp_buffer_info_t in = {0};
mp_get_buffer_raise(data, &in, MP_BUFFER_READ);
vstr_t vstr = {0};
vstr_init_len(&vstr, in.len);
memcpy(vstr.buf, in.buf, in.len);
if (gcm_encrypt((unsigned char *)vstr.buf, in.len, &(o->ctx)) !=
RETURN_GOOD) {
o->state = STATE_FAILED;
mp_raise_type(&mp_type_RuntimeError);
}
return mp_obj_new_str_from_vstr(&mp_type_bytes, &vstr);
}
STATIC MP_DEFINE_CONST_FUN_OBJ_2(mod_trezorcrypto_AesGcm_encrypt_obj,
mod_trezorcrypto_AesGcm_encrypt);
/// def decrypt(self, data: bytes) -> bytes:
/// """
/// Decrypt data chunk.
/// """
STATIC mp_obj_t mod_trezorcrypto_AesGcm_decrypt(mp_obj_t self, mp_obj_t data) {
mp_obj_AesGcm_t *o = MP_OBJ_TO_PTR(self);
if (o->state != STATE_INIT && o->state != STATE_DECRYPTING) {
mp_raise_msg(&mp_type_RuntimeError, "Invalid state.");
}
o->state = STATE_DECRYPTING;
mp_buffer_info_t in = {0};
mp_get_buffer_raise(data, &in, MP_BUFFER_READ);
vstr_t vstr = {0};
vstr_init_len(&vstr, in.len);
memcpy(vstr.buf, in.buf, in.len);
if (gcm_decrypt((unsigned char *)vstr.buf, in.len, &(o->ctx)) !=
RETURN_GOOD) {
o->state = STATE_FAILED;
mp_raise_type(&mp_type_RuntimeError);
}
return mp_obj_new_str_from_vstr(&mp_type_bytes, &vstr);
}
STATIC MP_DEFINE_CONST_FUN_OBJ_2(mod_trezorcrypto_AesGcm_decrypt_obj,
mod_trezorcrypto_AesGcm_decrypt);
/// def auth(self, data: bytes) -> None:
/// """
/// Include authenticated data chunk in the GCM authentication tag. This can
/// be called repeatedly to add authenticated data at any point before
/// finish().
/// """
STATIC mp_obj_t mod_trezorcrypto_AesGcm_auth(mp_obj_t self, mp_obj_t data) {
mp_obj_AesGcm_t *o = MP_OBJ_TO_PTR(self);
if (o->state != STATE_INIT && o->state != STATE_ENCRYPTING &&
o->state != STATE_DECRYPTING) {
mp_raise_msg(&mp_type_RuntimeError, "Invalid state.");
}
mp_buffer_info_t in = {0};
mp_get_buffer_raise(data, &in, MP_BUFFER_READ);
if (gcm_auth_header(in.buf, in.len, &(o->ctx)) != RETURN_GOOD) {
o->state = STATE_FAILED;
mp_raise_type(&mp_type_RuntimeError);
}
return mp_const_none;
}
STATIC MP_DEFINE_CONST_FUN_OBJ_2(mod_trezorcrypto_AesGcm_auth_obj,
mod_trezorcrypto_AesGcm_auth);
/// def finish(self) -> bytes:
/// """
/// Compute GCM authentication tag.
/// """
STATIC mp_obj_t mod_trezorcrypto_AesGcm_finish(mp_obj_t self) {
mp_obj_AesGcm_t *o = MP_OBJ_TO_PTR(self);
if (o->state != STATE_INIT && o->state != STATE_ENCRYPTING &&
o->state != STATE_DECRYPTING) {
mp_raise_msg(&mp_type_RuntimeError, "Invalid state.");
}
o->state = STATE_FINISHED;
vstr_t tag = {0};
vstr_init_len(&tag, 16);
if (gcm_compute_tag((unsigned char *)tag.buf, tag.len, &(o->ctx)) !=
RETURN_GOOD) {
o->state = STATE_FAILED;
mp_raise_type(&mp_type_RuntimeError);
}
return mp_obj_new_str_from_vstr(&mp_type_bytes, &tag);
}
STATIC MP_DEFINE_CONST_FUN_OBJ_1(mod_trezorcrypto_AesGcm_finish_obj,
mod_trezorcrypto_AesGcm_finish);
STATIC mp_obj_t mod_trezorcrypto_AesGcm___del__(mp_obj_t self) {
mp_obj_AesGcm_t *o = MP_OBJ_TO_PTR(self);
memzero(&(o->ctx), sizeof(gcm_ctx));
return mp_const_none;
}
STATIC MP_DEFINE_CONST_FUN_OBJ_1(mod_trezorcrypto_AesGcm___del___obj,
mod_trezorcrypto_AesGcm___del__);
STATIC const mp_rom_map_elem_t mod_trezorcrypto_AesGcm_locals_dict_table[] = {
{MP_ROM_QSTR(MP_QSTR_reset),
MP_ROM_PTR(&mod_trezorcrypto_AesGcm_reset_obj)},
{MP_ROM_QSTR(MP_QSTR_encrypt),
MP_ROM_PTR(&mod_trezorcrypto_AesGcm_encrypt_obj)},
{MP_ROM_QSTR(MP_QSTR_decrypt),
MP_ROM_PTR(&mod_trezorcrypto_AesGcm_decrypt_obj)},
{MP_ROM_QSTR(MP_QSTR_auth), MP_ROM_PTR(&mod_trezorcrypto_AesGcm_auth_obj)},
{MP_ROM_QSTR(MP_QSTR_finish),
MP_ROM_PTR(&mod_trezorcrypto_AesGcm_finish_obj)},
{MP_ROM_QSTR(MP_QSTR___del__),
MP_ROM_PTR(&mod_trezorcrypto_AesGcm___del___obj)},
};
STATIC MP_DEFINE_CONST_DICT(mod_trezorcrypto_AesGcm_locals_dict,
mod_trezorcrypto_AesGcm_locals_dict_table);
STATIC const mp_obj_type_t mod_trezorcrypto_AesGcm_type = {
{&mp_type_type},
.name = MP_QSTR_AesGcm,
.make_new = mod_trezorcrypto_AesGcm_make_new,
.locals_dict = (void *)&mod_trezorcrypto_AesGcm_locals_dict,
};

@ -98,6 +98,7 @@ STATIC mp_obj_t mod_trezorcrypto_Blake2b_make_new(const mp_obj_type_t *type,
}
if (res < 0) {
m_del_obj(mp_obj_Blake2b_t, o);
mp_raise_ValueError("Invalid Blake2b parameters");
}

@ -98,6 +98,7 @@ STATIC mp_obj_t mod_trezorcrypto_Blake2s_make_new(const mp_obj_type_t *type,
}
if (res < 0) {
m_del_obj(mp_obj_Blake2s_t, o);
mp_raise_ValueError("Invalid Blake2s parameters");
}

@ -113,6 +113,7 @@ STATIC mp_obj_t mod_trezorcrypto_from_secret(mp_obj_t secret) {
o->base.type = &mod_trezorcrypto_HDNode_type;
const int res = hdnode_from_secret_cardano(bufinfo.buf, &o->hdnode);
if (res != 1) {
m_del_obj(mp_obj_HDNode_t, o);
mp_raise_msg(&mp_type_RuntimeError,
"Unexpected failure in constructing Cardano node.");
}

@ -43,9 +43,6 @@ STATIC mp_obj_t mod_trezorcrypto_ChaCha20Poly1305_make_new(
const mp_obj_type_t *type, size_t n_args, size_t n_kw,
const mp_obj_t *args) {
mp_arg_check_num(n_args, n_kw, 2, 2, false);
mp_obj_ChaCha20Poly1305_t *o =
m_new_obj_with_finaliser(mp_obj_ChaCha20Poly1305_t);
o->base.type = type;
mp_buffer_info_t key = {0}, nonce = {0};
mp_get_buffer_raise(args[0], &key, MP_BUFFER_READ);
mp_get_buffer_raise(args[1], &nonce, MP_BUFFER_READ);
@ -55,7 +52,10 @@ STATIC mp_obj_t mod_trezorcrypto_ChaCha20Poly1305_make_new(
if (nonce.len != 12) {
mp_raise_ValueError("Invalid length of nonce");
}
mp_obj_ChaCha20Poly1305_t *o =
m_new_obj_with_finaliser(mp_obj_ChaCha20Poly1305_t);
rfc7539_init(&(o->ctx), key.buf, nonce.buf);
o->base.type = type;
o->alen = 0;
o->plen = 0;
return MP_OBJ_FROM_PTR(o);

@ -57,8 +57,6 @@ STATIC mp_obj_t mod_trezorcrypto_Hmac_make_new(const mp_obj_type_t *type,
size_t n_args, size_t n_kw,
const mp_obj_t *args) {
mp_arg_check_num(n_args, n_kw, 2, 3, false);
mp_obj_Hmac_t *o = m_new_obj_with_finaliser(mp_obj_Hmac_t);
o->base.type = type;
mp_buffer_info_t key = {0};
mp_get_buffer_raise(args[1], &key, MP_BUFFER_READ);
@ -67,12 +65,15 @@ STATIC mp_obj_t mod_trezorcrypto_Hmac_make_new(const mp_obj_type_t *type,
key.buf = "";
}
mp_obj_Hmac_t *o = m_new_obj_with_finaliser(mp_obj_Hmac_t);
o->base.type = type;
o->hashtype = trezor_obj_get_uint(args[0]);
if (o->hashtype == SHA256) {
hmac_sha256_Init(&(o->ctx256), key.buf, key.len);
} else if (o->hashtype == SHA512) {
hmac_sha512_Init(&(o->ctx512), key.buf, key.len);
} else {
m_del_obj(mp_obj_Hmac_t, o);
mp_raise_ValueError("Invalid hashtype");
}
// constructor called with message as third parameter

@ -160,6 +160,7 @@ STATIC mp_obj_t mod_trezorcrypto_monero_ge25519_make_new(
} else if (n_args == 1 && MP_OBJ_IS_STR_OR_BYTES(args[0])) {
mp_unpack_ge25519(&o->p, args[0], 0);
} else {
m_del_obj(mp_obj_ge25519_t, o);
mp_raise_ValueError("Invalid ge25519 constructor");
}
@ -202,6 +203,7 @@ STATIC mp_obj_t mod_trezorcrypto_monero_bignum256modm_make_new(
uint64_t v = trezor_obj_get_uint64(args[0]);
set256_modm(o->p, v);
} else {
m_del_obj(mp_obj_bignum256modm_t, o);
mp_raise_ValueError("Invalid scalar constructor");
}

@ -59,8 +59,6 @@ STATIC mp_obj_t mod_trezorcrypto_Pbkdf2_make_new(const mp_obj_type_t *type,
size_t n_args, size_t n_kw,
const mp_obj_t *args) {
mp_arg_check_num(n_args, n_kw, 3, 4, false);
mp_obj_Pbkdf2_t *o = m_new_obj_with_finaliser(mp_obj_Pbkdf2_t);
o->base.type = type;
mp_buffer_info_t password = {0};
mp_get_buffer_raise(args[1], &password, MP_BUFFER_READ);
@ -79,7 +77,10 @@ STATIC mp_obj_t mod_trezorcrypto_Pbkdf2_make_new(const mp_obj_type_t *type,
blocknr = trezor_obj_get_uint(args[4]);
}
o->prf = trezor_obj_get_uint(args[0]);
mp_uint_t prf = trezor_obj_get_uint(args[0]);
mp_obj_Pbkdf2_t *o = m_new_obj_with_finaliser(mp_obj_Pbkdf2_t);
o->base.type = type;
o->prf = prf;
if (o->prf == PRF_HMAC_SHA256) {
pbkdf2_hmac_sha256_Init(&(o->ctx256), password.buf, password.len, salt.buf,
salt.len, blocknr);
@ -87,6 +88,7 @@ STATIC mp_obj_t mod_trezorcrypto_Pbkdf2_make_new(const mp_obj_type_t *type,
pbkdf2_hmac_sha512_Init(&(o->ctx512), password.buf, password.len, salt.buf,
salt.len, blocknr);
} else {
m_del_obj(mp_obj_Pbkdf2_t, o);
mp_raise_ValueError("Invalid PRF");
}
// constructor called with iterations as fourth parameter

@ -50,11 +50,6 @@ STATIC mp_obj_t mod_trezorcrypto_Sha3_256_make_new(const mp_obj_type_t *type,
size_t n_args, size_t n_kw,
const mp_obj_t *args) {
mp_arg_check_num(n_args, n_kw, 0, 1, true);
mp_obj_Sha3_256_t *o = m_new_obj_with_finaliser(mp_obj_Sha3_256_t);
o->base.type = type;
o->keccak = 0;
sha3_256_Init(&(o->ctx));
STATIC const mp_arg_t allowed_args[] = {
{MP_QSTR_data, MP_ARG_OBJ, {.u_obj = mp_const_none}},
{MP_QSTR_keccak, MP_ARG_OBJ | MP_ARG_KW_ONLY, {.u_obj = MP_OBJ_NULL}},
@ -62,6 +57,11 @@ STATIC mp_obj_t mod_trezorcrypto_Sha3_256_make_new(const mp_obj_type_t *type,
mp_arg_val_t vals[MP_ARRAY_SIZE(allowed_args)] = {0};
mp_arg_parse_all_kw_array(n_args, n_kw, args, MP_ARRAY_SIZE(allowed_args),
allowed_args, vals);
mp_obj_Sha3_256_t *o = m_new_obj_with_finaliser(mp_obj_Sha3_256_t);
o->base.type = type;
o->keccak = 0;
sha3_256_Init(&(o->ctx));
if (vals[1].u_obj != MP_OBJ_NULL) {
o->keccak = mp_obj_is_true(vals[1].u_obj);
}

@ -50,11 +50,6 @@ STATIC mp_obj_t mod_trezorcrypto_Sha3_512_make_new(const mp_obj_type_t *type,
size_t n_args, size_t n_kw,
const mp_obj_t *args) {
mp_arg_check_num(n_args, n_kw, 0, 1, true);
mp_obj_Sha3_512_t *o = m_new_obj_with_finaliser(mp_obj_Sha3_512_t);
o->base.type = type;
o->keccak = 0;
sha3_512_Init(&(o->ctx));
STATIC const mp_arg_t allowed_args[] = {
{MP_QSTR_data, MP_ARG_OBJ, {.u_obj = mp_const_none}},
{MP_QSTR_keccak, MP_ARG_OBJ | MP_ARG_KW_ONLY, {.u_obj = MP_OBJ_NULL}},
@ -62,6 +57,11 @@ STATIC mp_obj_t mod_trezorcrypto_Sha3_512_make_new(const mp_obj_type_t *type,
mp_arg_val_t vals[MP_ARRAY_SIZE(allowed_args)] = {0};
mp_arg_parse_all_kw_array(n_args, n_kw, args, MP_ARRAY_SIZE(allowed_args),
allowed_args, vals);
mp_obj_Sha3_512_t *o = m_new_obj_with_finaliser(mp_obj_Sha3_512_t);
o->base.type = type;
o->keccak = 0;
sha3_512_Init(&(o->ctx));
if (vals[1].u_obj != MP_OBJ_NULL) {
o->keccak = mp_obj_is_true(vals[1].u_obj);
}

@ -39,6 +39,9 @@ static void wrapped_ui_wait_callback(uint32_t current, uint32_t total) {
}
#include "modtrezorcrypto-aes.h"
#ifdef USE_AES_GCM
#include "modtrezorcrypto-aesgcm.h"
#endif
#include "modtrezorcrypto-bech32.h"
#include "modtrezorcrypto-bip32.h"
#ifdef USE_SECP256K1_ZKP
@ -78,6 +81,9 @@ static void wrapped_ui_wait_callback(uint32_t current, uint32_t total) {
STATIC const mp_rom_map_elem_t mp_module_trezorcrypto_globals_table[] = {
{MP_ROM_QSTR(MP_QSTR___name__), MP_ROM_QSTR(MP_QSTR_trezorcrypto)},
{MP_ROM_QSTR(MP_QSTR_aes), MP_ROM_PTR(&mod_trezorcrypto_AES_type)},
#if USE_AES_GCM
{MP_ROM_QSTR(MP_QSTR_aesgcm), MP_ROM_PTR(&mod_trezorcrypto_AesGcm_type)},
#endif
{MP_ROM_QSTR(MP_QSTR_bech32), MP_ROM_PTR(&mod_trezorcrypto_bech32_module)},
{MP_ROM_QSTR(MP_QSTR_bip32), MP_ROM_PTR(&mod_trezorcrypto_bip32_module)},
{MP_ROM_QSTR(MP_QSTR_bip39), MP_ROM_PTR(&mod_trezorcrypto_bip39_module)},

@ -33,6 +33,45 @@ class aes:
"""
# extmod/modtrezorcrypto/modtrezorcrypto-aesgcm.h
class aesgcm:
"""
AES-GCM context.
"""
def __init__(self, key: bytes, iv: bytes) -> None:
"""
Initialize the AES-GCM context for encryption or decryption.
"""
def reset(self, iv: bytes) -> None:
"""
Reset the IV for encryption or decryption.
"""
def encrypt(self, data: bytes) -> bytes:
"""
Encrypt data chunk.
"""
def decrypt(self, data: bytes) -> bytes:
"""
Decrypt data chunk.
"""
def auth(self, data: bytes) -> None:
"""
Include authenticated data chunk in the GCM authentication tag. This can
be called repeatedly to add authenticated data at any point before
finish().
"""
def finish(self) -> bytes:
"""
Compute GCM authentication tag.
"""
# extmod/modtrezorcrypto/modtrezorcrypto-blake256.h
class blake256:
"""

@ -9,6 +9,11 @@ from trezorcrypto import ( # noqa: F401
random,
)
try:
from trezorcrypto import aesgcm # noqa: F401
except Exception:
pass
from trezor import utils
if not utils.BITCOIN_ONLY:

@ -0,0 +1,88 @@
from common import * # isort:skip
from trezor.crypto import aesgcm
class TestCryptoAes(unittest.TestCase):
# test vectors from
# https://csrc.nist.gov/CSRC/media/Projects/Cryptographic-Algorithm-Validation-Program/documents/mac/gcmtestvectors.zip
vectors = [
(
"11754cd72aec309bf52f7687212e8957",
"3c819d9a9bed087615030b65",
"",
"",
"",
"250327c674aaf477aef2675748cf6971",
),
(
"fe9bb47deb3a61e423c2231841cfd1fb",
"4d328eb776f500a2f7fb47aa",
"f1cc3818e421876bb6b8bbd6c9",
"",
"b88c5c1977b35b517b0aeae967",
"43fd4727fe5cdb4b5b42818dea7ef8c9",
),
(
"6f44f52c2f62dae4e8684bd2bc7d16ee7c557330305a790d",
"9ae35825d7c7edc9a39a0732",
"37222d30895eb95884bbbbaee4d9cae1",
"1b4236b846fc2a0f782881ba48a067e9",
"a54b5da33fc1196a8ef31a5321bfcaeb",
"1c198086450ae1834dd6c2636796bce2",
),
(
"05f714021372ae1c8d72c98e6307fbddb26ee27615860a9fb48ba4c3ea360a00",
"c0",
"ec3afbaa1447e47ce068bffb787bd0cadc9f0deceb11fa78e981271390578ae95891f26664b5e62d1fd5fd0d0767a54da5f86f",
"faf9fa457a8e70ea709da28545f18f041351e8d5",
"c8c5816ba9e7e0d20820dc0064a519a277889f5ac9661c9882b5a9896fd12836c6721514e885b1d34f5e888d1d85abce8c2ebb",
"0856f211fade7d26d64478ca46025a3c",
),
]
def test_gcm(self):
for vector in self.vectors:
key, iv, pt, aad, ct, tag = map(unhexlify, vector)
# Test encryption.
ctx = aesgcm(key, iv)
if aad:
ctx.auth(aad)
self.assertEqual(ctx.encrypt(pt), ct)
self.assertEqual(ctx.finish(), tag)
# Test decryption.
ctx.reset(iv)
if aad:
ctx.auth(aad)
self.assertEqual(ctx.decrypt(ct), pt)
self.assertEqual(ctx.finish(), tag)
def test_gcm_chunks(self):
for vector in self.vectors:
key, iv, pt, aad, ct, tag = map(unhexlify, vector)
chunk1 = len(pt) // 3
# Decrypt by chunks and add authenticated data by chunks.
ctx = aesgcm(key, iv)
self.assertEqual(ctx.decrypt(ct[:chunk1]), pt[:chunk1])
ctx.auth(aad[:17])
self.assertEqual(ctx.decrypt(ct[chunk1:]), pt[chunk1:])
ctx.auth(aad[17:])
self.assertEqual(ctx.finish(), tag)
# Encrypt by chunks and add authenticated data by chunks.
ctx.reset(iv)
ctx.auth(aad[:7])
self.assertEqual(ctx.encrypt(pt[:chunk1]), ct[:chunk1])
ctx.auth(aad[7:])
self.assertEqual(ctx.encrypt(pt[chunk1:]), ct[chunk1:])
self.assertEqual(ctx.finish(), tag)
if __name__ == "__main__":
unittest.main()

@ -84,6 +84,8 @@ CFLAGS += -DUSE_NEM=1
CFLAGS += -DUSE_CARDANO=1
CFLAGS += -DUSE_INSECURE_PRNG=1
CFLAGS += -DAES_128
CFLAGS += -DAES_192
CFLAGS += -DAES_VAR
CFLAGS += $(shell pkg-config --cflags openssl)
# disable certain optimizations and features when small footprint is required
@ -98,7 +100,7 @@ SRCS += ripemd160.c
SRCS += sha2.c
SRCS += sha3.c
SRCS += hasher.c
SRCS += aes/aesccm.c aes/aescrypt.c aes/aeskey.c aes/aestab.c aes/aes_modes.c
SRCS += aes/aesccm.c aes/aescrypt.c aes/aesgcm.c aes/aeskey.c aes/aestab.c aes/aes_modes.c aes/gf128mul.c
SRCS += ed25519-donna/curve25519-donna-32bit.c ed25519-donna/curve25519-donna-helpers.c ed25519-donna/modm-donna-32bit.c
SRCS += ed25519-donna/ed25519-donna-basepoint-table.c ed25519-donna/ed25519-donna-32bit-tables.c ed25519-donna/ed25519-donna-impl-base.c
SRCS += ed25519-donna/ed25519.c ed25519-donna/curve25519-donna-scalarmult-base.c ed25519-donna/ed25519-sha3.c ed25519-donna/ed25519-keccak.c
@ -157,7 +159,7 @@ tests/test_openssl: tests/test_openssl.o $(OBJS)
$(CC) $(CFLAGS) tests/test_openssl.o $(OBJS) $(TESTSSLLIBS) -o tests/test_openssl
tests/libtrezor-crypto.so: $(SRCS) secp256k1-zkp.o precomputed_ecmult.o precomputed_ecmult_gen.o
$(CC) $(CFLAGS) -DAES_128 -DAES_192 -fPIC -shared $(SRCS) secp256k1-zkp.o precomputed_ecmult.o precomputed_ecmult_gen.o -o tests/libtrezor-crypto.so
$(CC) $(CFLAGS) -fPIC -shared $(SRCS) secp256k1-zkp.o precomputed_ecmult.o precomputed_ecmult_gen.o -o tests/libtrezor-crypto.so
tools: tools/xpubaddrgen tools/mktable tools/bip39bruteforce

@ -27,7 +27,6 @@ Issue Date: 02/08/2018
#include <stdlib.h>
#include <stdint.h>
#define VOID_RETURN void
#define INT_RETURN int
#define ALIGN_OFFSET(x,n) (((intptr_t)(x)) & ((n) - 1))
#define ALIGN_FLOOR(x,n) ((uint8_t*)(x) - ( ((intptr_t)(x)) & ((n) - 1)))

@ -107,7 +107,7 @@ aligned_array(unsigned long, dec_hybrid_table, 12, 16) = NEH_DEC_HYBRID_DATA;
/* test the code for detecting and setting pointer alignment */
AES_RETURN aes_test_alignment_detection(unsigned int n) /* 4 <= n <= 16 */
{ uint8_t p[16];
{ uint8_t p[16] = {0};
uint32_t i = 0, count_eq = 0, count_neq = 0;
if(n < 4 || n > 16)
@ -357,7 +357,7 @@ AES_RETURN aes_cbc_encrypt(const unsigned char *ibuf, unsigned char *obuf,
AES_RETURN aes_cbc_decrypt(const unsigned char *ibuf, unsigned char *obuf,
int len, unsigned char *iv, const aes_decrypt_ctx ctx[1])
{ unsigned char tmp[AES_BLOCK_SIZE];
{ unsigned char tmp[AES_BLOCK_SIZE] = {0};
int nb = len >> AES_BLOCK_SIZE_P2;
if(len & (AES_BLOCK_SIZE - 1))
@ -456,7 +456,7 @@ AES_RETURN aes_cbc_decrypt(const unsigned char *ibuf, unsigned char *obuf,
AES_RETURN aes_cfb_encrypt(const unsigned char *ibuf, unsigned char *obuf,
int len, unsigned char *iv, aes_encrypt_ctx ctx[1])
{ int cnt = 0, b_pos = (int)ctx->inf.b[2], nb;
{ int cnt = 0, b_pos = (int)ctx->inf.b[2], nb = 0;
if(b_pos) /* complete any partial block */
{
@ -474,7 +474,7 @@ AES_RETURN aes_cfb_encrypt(const unsigned char *ibuf, unsigned char *obuf,
#if defined( USE_VIA_ACE_IF_PRESENT )
if(ctx->inf.b[1] == 0xff)
{ int m;
{ int m = 0;
uint8_t *ksp = (uint8_t*)(ctx->ks), *ivp = iv;
aligned_auto(uint8_t, liv, AES_BLOCK_SIZE, 16);
via_cwd(cwd, hybrid, enc, 2 * ctx->inf.b[0] - 192);
@ -581,10 +581,10 @@ AES_RETURN aes_cfb_encrypt(const unsigned char *ibuf, unsigned char *obuf,
AES_RETURN aes_cfb_decrypt(const unsigned char *ibuf, unsigned char *obuf,
int len, unsigned char *iv, aes_encrypt_ctx ctx[1])
{ int cnt = 0, b_pos = (int)ctx->inf.b[2], nb;
{ int cnt = 0, b_pos = (int)ctx->inf.b[2], nb = 0;
if(b_pos) /* complete any partial block */
{ uint8_t t;
{ uint8_t t = 0;
while(b_pos < AES_BLOCK_SIZE && cnt < len)
{
@ -602,7 +602,7 @@ AES_RETURN aes_cfb_decrypt(const unsigned char *ibuf, unsigned char *obuf,
#if defined( USE_VIA_ACE_IF_PRESENT )
if(ctx->inf.b[1] == 0xff)
{ int m;
{ int m = 0;
uint8_t *ksp = (uint8_t*)(ctx->ks), *ivp = iv;
aligned_auto(uint8_t, liv, AES_BLOCK_SIZE, 16);
via_cwd(cwd, hybrid, dec, 2 * ctx->inf.b[0] - 192);
@ -655,7 +655,7 @@ AES_RETURN aes_cfb_decrypt(const unsigned char *ibuf, unsigned char *obuf,
# ifdef FAST_BUFFER_OPERATIONS
if(!ALIGN_OFFSET( ibuf, 4 ) && !ALIGN_OFFSET( obuf, 4 ) &&!ALIGN_OFFSET( iv, 4 ))
while(cnt + AES_BLOCK_SIZE <= len)
{ uint32_t t;
{ uint32_t t = 0;
assert(b_pos == 0);
if(aes_encrypt(iv, iv, ctx) != EXIT_SUCCESS)
@ -671,7 +671,7 @@ AES_RETURN aes_cfb_decrypt(const unsigned char *ibuf, unsigned char *obuf,
else
# endif
while(cnt + AES_BLOCK_SIZE <= len)
{ uint8_t t;
{ uint8_t t = 0;
assert(b_pos == 0);
if(aes_encrypt(iv, iv, ctx) != EXIT_SUCCESS)
@ -700,7 +700,7 @@ AES_RETURN aes_cfb_decrypt(const unsigned char *ibuf, unsigned char *obuf,
}
while(cnt < len)
{ uint8_t t;
{ uint8_t t = 0;
if(!b_pos && aes_encrypt(iv, iv, ctx) != EXIT_SUCCESS)
return EXIT_FAILURE;
@ -722,7 +722,7 @@ AES_RETURN aes_cfb_decrypt(const unsigned char *ibuf, unsigned char *obuf,
AES_RETURN aes_ofb_crypt(const unsigned char *ibuf, unsigned char *obuf,
int len, unsigned char *iv, aes_encrypt_ctx ctx[1])
{ int cnt = 0, b_pos = (int)ctx->inf.b[2], nb;
{ int cnt = 0, b_pos = (int)ctx->inf.b[2], nb = 0;
if(b_pos) /* complete any partial block */
{
@ -740,7 +740,7 @@ AES_RETURN aes_ofb_crypt(const unsigned char *ibuf, unsigned char *obuf,
#if defined( USE_VIA_ACE_IF_PRESENT )
if(ctx->inf.b[1] == 0xff)
{ int m;
{ int m = 0;
uint8_t *ksp = (uint8_t*)(ctx->ks), *ivp = iv;
aligned_auto(uint8_t, liv, AES_BLOCK_SIZE, 16);
via_cwd(cwd, hybrid, enc, 2 * ctx->inf.b[0] - 192);
@ -849,7 +849,7 @@ AES_RETURN aes_ofb_crypt(const unsigned char *ibuf, unsigned char *obuf,
AES_RETURN aes_ctr_crypt(const unsigned char *ibuf, unsigned char *obuf,
int len, unsigned char *cbuf, cbuf_inc ctr_inc, aes_encrypt_ctx ctx[1])
{ unsigned char *ip;
{ unsigned char *ip = NULL;
int i = 0, blen = 0, b_pos = (int)(ctx->inf.b[2]);
#if defined( USE_VIA_ACE_IF_PRESENT )

@ -37,9 +37,9 @@ extern "C"
#define so(y,x,c) word_out(y, c, s(x,c))
#if defined(ARRAYS)
#define locals(y,x) x[4],y[4]
#define locals(y,x) x[4] = {0}, y[4] = {0}
#else
#define locals(y,x) x##0,x##1,x##2,x##3,y##0,y##1,y##2,y##3
#define locals(y,x) x##0=0,x##1=0,x##2=0,x##3=0,y##0=0,y##1=0,y##2=0,y##3=0
#endif
#define l_copy(y, x) s(y,0) = s(x,0); s(y,1) = s(x,1); \
@ -138,7 +138,7 @@ AES_RETURN aes_xi(encrypt)(const unsigned char *in, unsigned char *out, const ae
#else
#if (ENC_UNROLL == PARTIAL)
{ uint32_t rnd;
{ uint32_t rnd = 0;
for(rnd = 0; rnd < (cx->inf.b[0] >> 5) - 1; ++rnd)
{
kp += N_COLS;
@ -149,7 +149,7 @@ AES_RETURN aes_xi(encrypt)(const unsigned char *in, unsigned char *out, const ae
kp += N_COLS;
round(fwd_rnd, b1, b0, kp);
#else
{ uint32_t rnd;
{ uint32_t rnd = 0;
for(rnd = 0; rnd < (cx->inf.b[0] >> 4) - 1; ++rnd)
{
kp += N_COLS;
@ -272,7 +272,7 @@ AES_RETURN aes_xi(decrypt)(const unsigned char *in, unsigned char *out, const ae
#else
#if (DEC_UNROLL == PARTIAL)
{ uint32_t rnd;
{ uint32_t rnd = 0;
for(rnd = 0; rnd < (cx->inf.b[0] >> 5) - 1; ++rnd)
{
kp = rnd_key(1);
@ -283,7 +283,7 @@ AES_RETURN aes_xi(decrypt)(const unsigned char *in, unsigned char *out, const ae
kp = rnd_key(1);
round(inv_rnd, b1, b0, kp);
#else
{ uint32_t rnd;
{ uint32_t rnd = 0;
for(rnd = 0; rnd < (cx->inf.b[0] >> 4) - 1; ++rnd)
{
kp = rnd_key(1);

@ -0,0 +1,547 @@
/*
---------------------------------------------------------------------------
Copyright (c) 1998-2010, Brian Gladman, Worcester, UK. All rights reserved.
The redistribution and use of this software (with or without changes)
is allowed without the payment of fees or royalties provided that:
source code distributions include the above copyright notice, this
list of conditions and the following disclaimer;
binary distributions include the above copyright notice, this list
of conditions and the following disclaimer in their documentation.
This software is provided 'as is' with no explicit or implied warranties
in respect of its operation, including, but not limited to, correctness
and fitness for purpose.
---------------------------------------------------------------------------
Issue Date: 30/03/2011
My thanks to:
Colin Sinclair for finding an error and suggesting a number of
improvements to this code.
John Viega and David McGrew for their support in the development
of this code and to David for testing it on a big-endIAN system.
Mark Rodenkirch and Jason Papadopoulos for their help in finding
a bug in the fast buffer operations on big endian systems.
*/
#include "aesgcm.h"
#include "mode_hdr.h"
/* This GCM implementation needs a Galois Field multiplier for GF(2^128).
which operates on field elements using a polynomial field representation
x^127 + x^126 + ... + x^2 + x + 1 using the bits in a bit sequence that
will be numbered by the power of x that they represent. GCM uses the
polynomial x^128 + x^7 + x^2 + x + 1 as its basis for representation.
The obvious way of representing this in a computer system is to map GF
'x' to the binary integer '2' - but this was way too obvious for any
cryptographer to adopt!
Here bytes are numbered in memory order and bits within bytes according
to their integer numeric significance. The term 'little endian' is then
used to describe mappings in which numeric (power of 2) or field (power
of x) significance increase with increasing bit or byte numbers with
'big endian' being used to describe the inverse situation.
GCM uses little endian byte ordering and big endian bit ordering, a
representation that will be described as LB. Hence the low end of the
field polynomial is in byte[0], which has the value 0xe1 rather than
0x87 in the more obvious mappings.
The related field multipler can use this mapping but if you want to
use an alternative (e.g hardware) multiplier that uses a different
polynomial field representation, you can do so by changing the form
used for the field elements when this alternative multiplier is used.
If GF_REPRESENTATION is defined as one of:
REVERSE_BITS // change to LL
REVERSE_BYTES | REVERSE_BITS // change to BL
REVERSE_NONE // no change
REVERSE_BYTES // change to BB
then an appropriate change of representation will occur before and
after calls to your revised field multiplier. To use this you need
to add gf_convert.c to your application.
*/
#if defined(__cplusplus)
extern "C"
{
#endif
#if 1
# undef GF_REPRESENTATION
#elif 0
# define GF_REPRESENTATION REVERSE_BITS
#elif 0
# define GF_REPRESENTATION REVERSE_BYTES | REVERSE_BITS
#elif 0
# define GF_REPRESENTATION REVERSE_NONE
#elif 0
# define GF_REPRESENTATION REVERSE_BITS
#endif
#define BLOCK_SIZE GCM_BLOCK_SIZE /* block length */
#define BLK_ADR_MASK (BLOCK_SIZE - 1) /* mask for 'in block' address */
#define CTR_POS 12
#define inc_ctr(x) \
{ int i = BLOCK_SIZE; while(i-- > CTR_POS && !++(UI8_PTR(x)[i])) ; }
ret_type gcm_init_and_key( /* initialise mode and set key */
const unsigned char key[], /* the key value */
unsigned long key_len, /* and its length in bytes */
gcm_ctx ctx[1]) /* the mode context */
{
memset(ctx->ghash_h, 0, sizeof(ctx->ghash_h));
/* set the AES key */
aes_encrypt_key(key, key_len, ctx->aes);
/* compute E(0) (for the hash function) */
aes_encrypt(UI8_PTR(ctx->ghash_h), UI8_PTR(ctx->ghash_h), ctx->aes);
#if defined( GF_REPRESENTATION )
convert_representation(ctx->ghash_h, ctx->ghash_h, GF_REPRESENTATION);
#endif
#if defined( TABLES_64K )
init_64k_table(ctx->ghash_h, ctx->gf_t64k);
#elif defined( TABLES_8K )
init_8k_table(ctx->ghash_h, ctx->gf_t8k);
#elif defined( TABLES_4K )
init_4k_table(ctx->ghash_h, ctx->gf_t4k);
#elif defined( TABLES_256 )
init_256_table(ctx->ghash_h, ctx->gf_t256);
#endif
#if defined( GF_REPRESENTATION )
convert_representation(ctx->ghash_h, ctx->ghash_h, GF_REPRESENTATION);
#endif
return RETURN_GOOD;
}
void gf_mul_hh(gf_t a, gcm_ctx ctx[1])
{
#if defined( GF_REPRESENTATION ) || !defined( NO_TABLES )
gf_t scr = {0};
#endif
#if defined( GF_REPRESENTATION )
convert_representation(a, a, GF_REPRESENTATION);
#endif
#if defined( TABLES_64K )
gf_mul_64k(a, ctx->gf_t64k, scr);
#elif defined( TABLES_8K )
gf_mul_8k(a, ctx->gf_t8k, scr);
#elif defined( TABLES_4K )
gf_mul_4k(a, ctx->gf_t4k, scr);
#elif defined( TABLES_256 )
gf_mul_256(a, ctx->gf_t256, scr);
#else
# if defined( GF_REPRESENTATION )
convert_representation(scr, ctx->ghash_h, GF_REPRESENTATION);
gf_mul(a, scr);
# else
gf_mul(a, ctx->ghash_h);
# endif
#endif
#if defined( GF_REPRESENTATION )
convert_representation(a, a, GF_REPRESENTATION);
#endif
}
ret_type gcm_init_message( /* initialise a new message */
const unsigned char iv[], /* the initialisation vector */
unsigned long iv_len, /* and its length in bytes */
gcm_ctx ctx[1]) /* the mode context */
{ uint32_t i = 0, n_pos = 0;
uint8_t *p = NULL;
memset(ctx->ctr_val, 0, BLOCK_SIZE);
if(iv_len == CTR_POS)
{
memcpy(ctx->ctr_val, iv, CTR_POS); UI8_PTR(ctx->ctr_val)[15] = 0x01;
}
else
{ n_pos = iv_len;
while(n_pos >= BLOCK_SIZE)
{
xor_block_aligned(ctx->ctr_val, ctx->ctr_val, iv);
n_pos -= BLOCK_SIZE;
iv += BLOCK_SIZE;
gf_mul_hh(ctx->ctr_val, ctx);
}
if(n_pos)
{
p = UI8_PTR(ctx->ctr_val);
while(n_pos-- > 0)
*p++ ^= *iv++;
gf_mul_hh(ctx->ctr_val, ctx);
}
n_pos = (iv_len << 3);
for(i = BLOCK_SIZE - 1; n_pos; --i, n_pos >>= 8)
UI8_PTR(ctx->ctr_val)[i] ^= (unsigned char)n_pos;
gf_mul_hh(ctx->ctr_val, ctx);
}
ctx->y0_val = *UI32_PTR(UI8_PTR(ctx->ctr_val) + CTR_POS);
memset(ctx->hdr_ghv, 0, BLOCK_SIZE);
memset(ctx->txt_ghv, 0, BLOCK_SIZE);
ctx->hdr_cnt = 0;
ctx->txt_ccnt = ctx->txt_acnt = 0;
return RETURN_GOOD;
}
ret_type gcm_auth_header( /* authenticate the header */
const unsigned char hdr[], /* the header buffer */
unsigned long hdr_len, /* and its length in bytes */
gcm_ctx ctx[1]) /* the mode context */
{ uint32_t cnt = 0, b_pos = (uint32_t)ctx->hdr_cnt & BLK_ADR_MASK;
if(!hdr_len)
return RETURN_GOOD;
if(ctx->hdr_cnt && b_pos == 0)
gf_mul_hh(ctx->hdr_ghv, ctx);
if(!((hdr - (UI8_PTR(ctx->hdr_ghv) + b_pos)) & BUF_ADRMASK))
{
while(cnt < hdr_len && (b_pos & BUF_ADRMASK))
UI8_PTR(ctx->hdr_ghv)[b_pos++] ^= hdr[cnt++];
while(cnt + BUF_INC <= hdr_len && b_pos <= BLOCK_SIZE - BUF_INC)
{
*UNIT_PTR(UI8_PTR(ctx->hdr_ghv) + b_pos) ^= *UNIT_PTR(hdr + cnt);
cnt += BUF_INC; b_pos += BUF_INC;
}
while(cnt + BLOCK_SIZE <= hdr_len)
{
gf_mul_hh(ctx->hdr_ghv, ctx);
xor_block_aligned(ctx->hdr_ghv, ctx->hdr_ghv, hdr + cnt);
cnt += BLOCK_SIZE;
}
}
else
{
while(cnt < hdr_len && b_pos < BLOCK_SIZE)
UI8_PTR(ctx->hdr_ghv)[b_pos++] ^= hdr[cnt++];
while(cnt + BLOCK_SIZE <= hdr_len)
{
gf_mul_hh(ctx->hdr_ghv, ctx);
xor_block(ctx->hdr_ghv, ctx->hdr_ghv, hdr + cnt);
cnt += BLOCK_SIZE;
}
}
while(cnt < hdr_len)
{
if(b_pos == BLOCK_SIZE)
{
gf_mul_hh(ctx->hdr_ghv, ctx);
b_pos = 0;
}
UI8_PTR(ctx->hdr_ghv)[b_pos++] ^= hdr[cnt++];
}
ctx->hdr_cnt += cnt;
return RETURN_GOOD;
}
ret_type gcm_auth_data( /* authenticate ciphertext data */
const unsigned char data[], /* the data buffer */
unsigned long data_len, /* and its length in bytes */
gcm_ctx ctx[1]) /* the mode context */
{ uint32_t cnt = 0, b_pos = (uint32_t)ctx->txt_acnt & BLK_ADR_MASK;
if(!data_len)
return RETURN_GOOD;
if(ctx->txt_acnt && b_pos == 0)
gf_mul_hh(ctx->txt_ghv, ctx);
if(!((data - (UI8_PTR(ctx->txt_ghv) + b_pos)) & BUF_ADRMASK))
{
while(cnt < data_len && (b_pos & BUF_ADRMASK))
UI8_PTR(ctx->txt_ghv)[b_pos++] ^= data[cnt++];
while(cnt + BUF_INC <= data_len && b_pos <= BLOCK_SIZE - BUF_INC)
{
*UNIT_PTR(UI8_PTR(ctx->txt_ghv) + b_pos) ^= *UNIT_PTR(data + cnt);
cnt += BUF_INC; b_pos += BUF_INC;
}
while(cnt + BLOCK_SIZE <= data_len)
{
gf_mul_hh(ctx->txt_ghv, ctx);
xor_block_aligned(ctx->txt_ghv, ctx->txt_ghv, data + cnt);
cnt += BLOCK_SIZE;
}
}
else
{
while(cnt < data_len && b_pos < BLOCK_SIZE)
UI8_PTR(ctx->txt_ghv)[b_pos++] ^= data[cnt++];
while(cnt + BLOCK_SIZE <= data_len)
{
gf_mul_hh(ctx->txt_ghv, ctx);
xor_block(ctx->txt_ghv, ctx->txt_ghv, data + cnt);
cnt += BLOCK_SIZE;
}
}
while(cnt < data_len)
{
if(b_pos == BLOCK_SIZE)
{
gf_mul_hh(ctx->txt_ghv, ctx);
b_pos = 0;
}
UI8_PTR(ctx->txt_ghv)[b_pos++] ^= data[cnt++];
}
ctx->txt_acnt += cnt;
return RETURN_GOOD;
}
ret_type gcm_crypt_data( /* encrypt or decrypt data */
unsigned char data[], /* the data buffer */
unsigned long data_len, /* and its length in bytes */
gcm_ctx ctx[1]) /* the mode context */
{ uint32_t cnt = 0, b_pos = (uint32_t)ctx->txt_ccnt & BLK_ADR_MASK;
if(!data_len)
return RETURN_GOOD;
if(!((data - (UI8_PTR(ctx->enc_ctr) + b_pos)) & BUF_ADRMASK))
{
if(b_pos)
{
while(cnt < data_len && (b_pos & BUF_ADRMASK))
data[cnt++] ^= UI8_PTR(ctx->enc_ctr)[b_pos++];
while(cnt + BUF_INC <= data_len && b_pos <= BLOCK_SIZE - BUF_INC)
{
*UNIT_PTR(data + cnt) ^= *UNIT_PTR(UI8_PTR(ctx->enc_ctr) + b_pos);
cnt += BUF_INC; b_pos += BUF_INC;
}
}
while(cnt + BLOCK_SIZE <= data_len)
{
inc_ctr(ctx->ctr_val);
aes_encrypt(UI8_PTR(ctx->ctr_val), UI8_PTR(ctx->enc_ctr), ctx->aes);
xor_block_aligned(data + cnt, data + cnt, ctx->enc_ctr);
cnt += BLOCK_SIZE;
}
}
else
{
if(b_pos)
while(cnt < data_len && b_pos < BLOCK_SIZE)
data[cnt++] ^= UI8_PTR(ctx->enc_ctr)[b_pos++];
while(cnt + BLOCK_SIZE <= data_len)
{
inc_ctr(ctx->ctr_val);
aes_encrypt(UI8_PTR(ctx->ctr_val), UI8_PTR(ctx->enc_ctr), ctx->aes);
xor_block(data + cnt, data + cnt, ctx->enc_ctr);
cnt += BLOCK_SIZE;
}
}
while(cnt < data_len)
{
if(b_pos == BLOCK_SIZE || !b_pos)
{
inc_ctr(ctx->ctr_val);
aes_encrypt(UI8_PTR(ctx->ctr_val), UI8_PTR(ctx->enc_ctr), ctx->aes);
b_pos = 0;
}
data[cnt++] ^= UI8_PTR(ctx->enc_ctr)[b_pos++];
}
ctx->txt_ccnt += cnt;
return RETURN_GOOD;
}
ret_type gcm_compute_tag( /* compute authentication tag */
unsigned char tag[], /* the buffer for the tag */
unsigned long tag_len, /* and its length in bytes */
gcm_ctx ctx[1]) /* the mode context */
{ uint32_t i = 0, ln = 0;
gf_t tbuf = {0};
if(ctx->txt_acnt != ctx->txt_ccnt && ctx->txt_ccnt > 0)
return RETURN_ERROR;
gf_mul_hh(ctx->hdr_ghv, ctx);
gf_mul_hh(ctx->txt_ghv, ctx);
if(ctx->hdr_cnt)
{
ln = (uint32_t)((ctx->txt_acnt + BLOCK_SIZE - 1) / BLOCK_SIZE);
if(ln)
{
#if 1 /* alternative versions of the exponentiation operation */
memcpy(tbuf, ctx->ghash_h, BLOCK_SIZE);
# if defined( GF_REPRESENTATION )
convert_representation(tbuf, tbuf, GF_REPRESENTATION);
convert_representation(ctx->hdr_ghv, ctx->hdr_ghv, GF_REPRESENTATION);
# endif
for( ; ; )
{
if(ln & 1)
{
gf_mul((void*)ctx->hdr_ghv, tbuf);
}
if(!(ln >>= 1))
break;
gf_mul(tbuf, tbuf);
}
#else /* this one seems slower on x86 and x86_64 :-( */
i = ln | ln >> 1; i |= i >> 2; i |= i >> 4;
i |= i >> 8; i |= i >> 16; i &= ~(i >> 1);
memset(tbuf, 0, BLOCK_SIZE);
UI8_PTR(tbuf)[0] = 0x80;
while(i)
{
# if defined( GF_REPRESENTATION )
convert_representation(tbuf, tbuf, GF_REPRESENTATION);
# endif
gf_mul(tbuf, tbuf);
# if defined( GF_REPRESENTATION )
convert_representation(tbuf, tbuf, GF_REPRESENTATION);
# endif
if(i & ln)
gf_mul_hh(tbuf, ctx);
i >>= 1;
}
# if defined( GF_REPRESENTATION )
convert_representation(tbuf, tbuf, GF_REPRESENTATION);
convert_representation(ctx->hdr_ghv, ctx->hdr_ghv, GF_REPRESENTATION);
# endif
gf_mul((void*)ctx->hdr_ghv, tbuf);
#endif
#if defined( GF_REPRESENTATION )
convert_representation(ctx->hdr_ghv, ctx->hdr_ghv, GF_REPRESENTATION);
# endif
}
}
i = BLOCK_SIZE;
#ifdef BRG_UI64
{ uint64_t tm = ((uint64_t)ctx->txt_acnt) << 3;
while(i-- > 0)
{
UI8_PTR(ctx->hdr_ghv)[i] ^= UI8_PTR(ctx->txt_ghv)[i] ^ (unsigned char)tm;
tm = (i == 8 ? (((uint64_t)ctx->hdr_cnt) << 3) : tm >> 8);
}
}
#else
{ uint32_t tm = ctx->txt_acnt << 3;
while(i-- > 0)
{
UI8_PTR(ctx->hdr_ghv)[i] ^= UI8_PTR(ctx->txt_ghv)[i] ^ (unsigned char)tm;
if(i & 3)
tm >>= 8;
else if(i == 4)
tm = ctx->txt_acnt >> 29;
else if(i == 8)
tm = ctx->hdr_cnt << 3;
else
tm = ctx->hdr_cnt >> 29;
}
}
#endif
gf_mul_hh(ctx->hdr_ghv, ctx);
memcpy(ctx->enc_ctr, ctx->ctr_val, BLOCK_SIZE);
*UI32_PTR(UI8_PTR(ctx->enc_ctr) + CTR_POS) = ctx->y0_val;
aes_encrypt(UI8_PTR(ctx->enc_ctr), UI8_PTR(ctx->enc_ctr), ctx->aes);
for(i = 0; i < (unsigned int)tag_len; ++i)
tag[i] = (unsigned char)(UI8_PTR(ctx->hdr_ghv)[i] ^ UI8_PTR(ctx->enc_ctr)[i]);
return (ctx->txt_ccnt == ctx->txt_acnt ? RETURN_GOOD : RETURN_WARN);
}
ret_type gcm_end( /* clean up and end operation */
gcm_ctx ctx[1]) /* the mode context */
{
memset(ctx, 0, sizeof(gcm_ctx));
return RETURN_GOOD;
}
ret_type gcm_encrypt( /* encrypt & authenticate data */
unsigned char data[], /* the data buffer */
unsigned long data_len, /* and its length in bytes */
gcm_ctx ctx[1]) /* the mode context */
{
gcm_crypt_data(data, data_len, ctx);
gcm_auth_data(data, data_len, ctx);
return RETURN_GOOD;
}
ret_type gcm_decrypt( /* authenticate & decrypt data */
unsigned char data[], /* the data buffer */
unsigned long data_len, /* and its length in bytes */
gcm_ctx ctx[1]) /* the mode context */
{
gcm_auth_data(data, data_len, ctx);
gcm_crypt_data(data, data_len, ctx);
return RETURN_GOOD;
}
ret_type gcm_encrypt_message( /* encrypt an entire message */
const unsigned char iv[], /* the initialisation vector */
unsigned long iv_len, /* and its length in bytes */
const unsigned char hdr[], /* the header buffer */
unsigned long hdr_len, /* and its length in bytes */
unsigned char msg[], /* the message buffer */
unsigned long msg_len, /* and its length in bytes */
unsigned char tag[], /* the buffer for the tag */
unsigned long tag_len, /* and its length in bytes */
gcm_ctx ctx[1]) /* the mode context */
{
gcm_init_message(iv, iv_len, ctx);
gcm_auth_header(hdr, hdr_len, ctx);
gcm_encrypt(msg, msg_len, ctx);
return gcm_compute_tag(tag, tag_len, ctx) ? RETURN_ERROR : RETURN_GOOD;
}
ret_type gcm_decrypt_message( /* decrypt an entire message */
const unsigned char iv[], /* the initialisation vector */
unsigned long iv_len, /* and its length in bytes */
const unsigned char hdr[], /* the header buffer */
unsigned long hdr_len, /* and its length in bytes */
unsigned char msg[], /* the message buffer */
unsigned long msg_len, /* and its length in bytes */
const unsigned char tag[], /* the buffer for the tag */
unsigned long tag_len, /* and its length in bytes */
gcm_ctx ctx[1]) /* the mode context */
{ uint8_t local_tag[BLOCK_SIZE] = {0};
ret_type rr = 0;
gcm_init_message(iv, iv_len, ctx);
gcm_auth_header(hdr, hdr_len, ctx);
gcm_decrypt(msg, msg_len, ctx);
rr = gcm_compute_tag(local_tag, tag_len, ctx);
return (rr != RETURN_GOOD || memcmp(tag, local_tag, tag_len)) ? RETURN_ERROR : RETURN_GOOD;
}
#if defined(__cplusplus)
}
#endif

@ -0,0 +1,233 @@
/*
---------------------------------------------------------------------------
Copyright (c) 1998-2010, Brian Gladman, Worcester, UK. All rights reserved.
The redistribution and use of this software (with or without changes)
is allowed without the payment of fees or royalties provided that:
source code distributions include the above copyright notice, this
list of conditions and the following disclaimer;
binary distributions include the above copyright notice, this list
of conditions and the following disclaimer in their documentation.
This software is provided 'as is' with no explicit or implied warranties
in respect of its operation, including, but not limited to, correctness
and fitness for purpose.
---------------------------------------------------------------------------
Issue Date: 11/01/2011
I am grateful for the work done by Mark Rodenkirch and Jason Papadopoulos
in helping to remove a bug in the operation of this code on big endian
systems when fast buffer operations are enabled.
---------------------------------------------------------------------------
*/
#ifndef _GCM_H
#define _GCM_H
#include "aes.h"
#include "gf128mul.h"
/* USER DEFINABLE OPTIONS (Further options need to be set in gf128mul.h) */
/* UNIT_BITS sets the size of variables used to process 16 byte buffers
when the buffer alignment allows this. When buffers are processed
in bytes, 16 individual operations are invoolved. But if, say, such
a buffer is divided into 4 32 bit variables, it can then be processed
in 4 operations, making the code typically much faster. In general
it will pay to use the longest natively supported size, which will
probably be 32 or 64 bits in 32 and 64 bit systems respectively.
*/
#if defined( UNIT_BITS )
# undef UNIT_BITS
#endif
#if !defined( UNIT_BITS )
# if PLATFORM_BYTE_ORDER == IS_BIG_ENDIAN
# if 0
# define UNIT_BITS 8
# elif 0
# define UNIT_BITS 32
# elif 1
# define UNIT_BITS 64
# endif
# elif defined( _WIN64 )
# define UNIT_BITS 64
# else
# define UNIT_BITS 32
# endif
#endif
#if UNIT_BITS == 64 && !defined( NEED_UINT_64T )
# define NEED_UINT_64T
#endif
/* END OF USER DEFINABLE OPTIONS */
/* After encryption or decryption operations the return value of
'compute tag' will be one of the values RETURN_GOOD, RETURN_WARN
or RETURN_ERROR, the latter indicating an error. A return value
RETURN_GOOD indicates that both encryption and authentication
have taken place and resulted in the returned tag value. If
the returned value is RETURN_WARN, the tag value is the result
of authentication alone without encryption (CCM) or decryption
(GCM and EAX).
*/
#ifndef RETURN_GOOD
# define RETURN_WARN 1
# define RETURN_GOOD 0
# define RETURN_ERROR -1
#endif
#if defined(__cplusplus)
extern "C"
{
#endif
#ifndef RET_TYPE_DEFINED
typedef int ret_type;
#endif
UNIT_TYPEDEF(gcm_unit_t, UNIT_BITS);
BUFR_TYPEDEF(gcm_buf_t, UNIT_BITS, AES_BLOCK_SIZE);
#define GCM_BLOCK_SIZE AES_BLOCK_SIZE
/* The GCM-AES context */
typedef struct
{
#if defined( TABLES_64K )
gf_t64k_a gf_t64k;
#endif
#if defined( TABLES_8K )
gf_t8k_a gf_t8k;
#endif
#if defined( TABLES_4K )
gf_t4k_a gf_t4k;
#endif
#if defined( TABLES_256 )
gf_t256_a gf_t256;
#endif
gcm_buf_t ctr_val; /* CTR counter value */
gcm_buf_t enc_ctr; /* encrypted CTR block */
gcm_buf_t hdr_ghv; /* ghash buffer (header) */
gcm_buf_t txt_ghv; /* ghash buffer (ciphertext) */
gf_t ghash_h; /* ghash H value */
aes_encrypt_ctx aes[1]; /* AES encryption context */
uint32_t y0_val; /* initial counter value */
uint32_t hdr_cnt; /* header bytes so far */
uint32_t txt_ccnt; /* text bytes so far (encrypt) */
uint32_t txt_acnt; /* text bytes so far (auth) */
} gcm_ctx;
/* The following calls handle mode initialisation, keying and completion */
ret_type gcm_init_and_key( /* initialise mode and set key */
const unsigned char key[], /* the key value */
unsigned long key_len, /* and its length in bytes */
gcm_ctx ctx[1]); /* the mode context */
ret_type gcm_end( /* clean up and end operation */
gcm_ctx ctx[1]); /* the mode context */
/* The following calls handle complete messages in memory as one operation */
ret_type gcm_encrypt_message( /* encrypt an entire message */
const unsigned char iv[], /* the initialisation vector */
unsigned long iv_len, /* and its length in bytes */
const unsigned char hdr[], /* the header buffer */
unsigned long hdr_len, /* and its length in bytes */
unsigned char msg[], /* the message buffer */
unsigned long msg_len, /* and its length in bytes */
unsigned char tag[], /* the buffer for the tag */
unsigned long tag_len, /* and its length in bytes */
gcm_ctx ctx[1]); /* the mode context */
/* RETURN_GOOD is returned if the input tag */
/* matches that for the decrypted message */
ret_type gcm_decrypt_message( /* decrypt an entire message */
const unsigned char iv[], /* the initialisation vector */
unsigned long iv_len, /* and its length in bytes */
const unsigned char hdr[], /* the header buffer */
unsigned long hdr_len, /* and its length in bytes */
unsigned char msg[], /* the message buffer */
unsigned long msg_len, /* and its length in bytes */
const unsigned char tag[], /* the buffer for the tag */
unsigned long tag_len, /* and its length in bytes */
gcm_ctx ctx[1]); /* the mode context */
/* The following calls handle messages in a sequence of operations followed */
/* by tag computation after the sequence has been completed. In these calls */
/* the user is responsible for verfiying the computed tag on decryption */
ret_type gcm_init_message( /* initialise a new message */
const unsigned char iv[], /* the initialisation vector */
unsigned long iv_len, /* and its length in bytes */
gcm_ctx ctx[1]); /* the mode context */
ret_type gcm_auth_header( /* authenticate the header */
const unsigned char hdr[], /* the header buffer */
unsigned long hdr_len, /* and its length in bytes */
gcm_ctx ctx[1]); /* the mode context */
ret_type gcm_encrypt( /* encrypt & authenticate data */
unsigned char data[], /* the data buffer */
unsigned long data_len, /* and its length in bytes */
gcm_ctx ctx[1]); /* the mode context */
ret_type gcm_decrypt( /* authenticate & decrypt data */
unsigned char data[], /* the data buffer */
unsigned long data_len, /* and its length in bytes */
gcm_ctx ctx[1]); /* the mode context */
ret_type gcm_compute_tag( /* compute authentication tag */
unsigned char tag[], /* the buffer for the tag */
unsigned long tag_len, /* and its length in bytes */
gcm_ctx ctx[1]); /* the mode context */
/* The use of the following calls should be avoided if possible because
their use requires a very good understanding of the way this encryption
mode works and the way in which this code implements it in order to use
them correctly.
The gcm_auth_data routine is used to authenticate encrypted message data.
In message encryption gcm_crypt_data must be called before gcm_auth_data
is called since it is encrypted data that is authenticated. In message
decryption authentication must occur before decryption and data can be
authenticated without being decrypted if necessary.
If these calls are used it is up to the user to ensure that these routines
are called in the correct order and that the correct data is passed to
them.
When gcm_compute_tag is called it is assumed that an error in use has
occurred if both encryption (or decryption) and authentication have taken
place but the total lengths of the message data respectively authenticated
and encrypted are not the same. If authentication has taken place but
there has been no corresponding encryption or decryption operations (none
at all) only a warning is issued. This should be treated as an error if it
occurs during encryption but it is only signalled as a warning as it might
be intentional when decryption operations are involved (this avoids having
different compute tag functions for encryption and decryption). Decryption
operations can be undertaken freely after authetication but if the tag is
computed after such operations an error will be signalled if the lengths
of the data authenticated and decrypted don't match.
*/
ret_type gcm_auth_data( /* authenticate ciphertext data */
const unsigned char data[], /* the data buffer */
unsigned long data_len, /* and its length in bytes */
gcm_ctx ctx[1]); /* the mode context */
ret_type gcm_crypt_data( /* encrypt or decrypt data */
unsigned char data[], /* the data buffer */
unsigned long data_len, /* and its length in bytes */
gcm_ctx ctx[1]); /* the mode context */
#if defined(__cplusplus)
}
#endif
#endif

@ -80,7 +80,7 @@ extern "C"
}
AES_RETURN aes_xi(encrypt_key128)(const unsigned char *key, aes_encrypt_ctx cx[1])
{ uint32_t ss[4];
{ uint32_t ss[4] = {0};
cx->ks[0] = ss[0] = word_in(key, 0);
cx->ks[1] = ss[1] = word_in(key, 1);
@ -94,7 +94,7 @@ AES_RETURN aes_xi(encrypt_key128)(const unsigned char *key, aes_encrypt_ctx cx[1
ke4(cx->ks, 6); ke4(cx->ks, 7);
ke4(cx->ks, 8);
#else
{ uint32_t i;
{ uint32_t i = 0;
for(i = 0; i < 9; ++i)
ke4(cx->ks, i);
}
@ -128,7 +128,7 @@ AES_RETURN aes_xi(encrypt_key128)(const unsigned char *key, aes_encrypt_ctx cx[1
}
AES_RETURN aes_xi(encrypt_key192)(const unsigned char *key, aes_encrypt_ctx cx[1])
{ uint32_t ss[6];
{ uint32_t ss[6] = {0};
cx->ks[0] = ss[0] = word_in(key, 0);
cx->ks[1] = ss[1] = word_in(key, 1);
@ -143,7 +143,7 @@ AES_RETURN aes_xi(encrypt_key192)(const unsigned char *key, aes_encrypt_ctx cx[1
ke6(cx->ks, 4); ke6(cx->ks, 5);
ke6(cx->ks, 6);
#else
{ uint32_t i;
{ uint32_t i = 0;
for(i = 0; i < 7; ++i)
ke6(cx->ks, i);
}
@ -179,7 +179,7 @@ AES_RETURN aes_xi(encrypt_key192)(const unsigned char *key, aes_encrypt_ctx cx[1
}
AES_RETURN aes_xi(encrypt_key256)(const unsigned char *key, aes_encrypt_ctx cx[1])
{ uint32_t ss[8];
{ uint32_t ss[8] = {0};
cx->ks[0] = ss[0] = word_in(key, 0);
cx->ks[1] = ss[1] = word_in(key, 1);
@ -195,7 +195,7 @@ AES_RETURN aes_xi(encrypt_key256)(const unsigned char *key, aes_encrypt_ctx cx[1
ke8(cx->ks, 2); ke8(cx->ks, 3);
ke8(cx->ks, 4); ke8(cx->ks, 5);
#else
{ uint32_t i;
{ uint32_t i = 0;
for(i = 0; i < 6; ++i)
ke8(cx->ks, i);
}
@ -302,7 +302,7 @@ AES_RETURN aes_xi(encrypt_key256)(const unsigned char *key, aes_encrypt_ctx cx[1
#endif
AES_RETURN aes_xi(decrypt_key128)(const unsigned char *key, aes_decrypt_ctx cx[1])
{ uint32_t ss[5];
{ uint32_t ss[5] = {0};
#if defined( d_vars )
d_vars;
#endif
@ -319,7 +319,7 @@ AES_RETURN aes_xi(decrypt_key128)(const unsigned char *key, aes_decrypt_ctx cx[1
kd4(cx->ks, 6); kd4(cx->ks, 7);
kd4(cx->ks, 8); kdl4(cx->ks, 9);
#else
{ uint32_t i;
{ uint32_t i = 0;
for(i = 0; i < 10; ++i)
k4e(cx->ks, i);
#if !(DEC_ROUND == NO_TABLES)
@ -382,7 +382,7 @@ AES_RETURN aes_xi(decrypt_key128)(const unsigned char *key, aes_decrypt_ctx cx[1
}
AES_RETURN aes_xi(decrypt_key192)(const unsigned char *key, aes_decrypt_ctx cx[1])
{ uint32_t ss[7];
{ uint32_t ss[7] = {0};
#if defined( d_vars )
d_vars;
#endif
@ -404,7 +404,7 @@ AES_RETURN aes_xi(decrypt_key192)(const unsigned char *key, aes_decrypt_ctx cx[1
#else
cx->ks[v(48,(4))] = ss[4] = word_in(key, 4);
cx->ks[v(48,(5))] = ss[5] = word_in(key, 5);
{ uint32_t i;
{ uint32_t i = 0;
for(i = 0; i < 7; ++i)
k6e(cx->ks, i);
@ -476,7 +476,7 @@ AES_RETURN aes_xi(decrypt_key192)(const unsigned char *key, aes_decrypt_ctx cx[1
}
AES_RETURN aes_xi(decrypt_key256)(const unsigned char *key, aes_decrypt_ctx cx[1])
{ uint32_t ss[9];
{ uint32_t ss[9] = {0};
#if defined( d_vars )
d_vars;
#endif
@ -504,7 +504,7 @@ AES_RETURN aes_xi(decrypt_key256)(const unsigned char *key, aes_decrypt_ctx cx[1
cx->ks[v(56,(5))] = ss[5] = word_in(key, 5);
cx->ks[v(56,(6))] = ss[6] = word_in(key, 6);
cx->ks[v(56,(7))] = ss[7] = word_in(key, 7);
{ uint32_t i;
{ uint32_t i = 0;
for(i = 0; i < 6; ++i)
k8e(cx->ks, i);

@ -95,9 +95,7 @@ Issue Date: 20/12/2007
/* PLATFORM SPECIFIC INCLUDES */
#define IS_BIG_ENDIAN 4321
#define IS_LITTLE_ENDIAN 1234
#define PLATFORM_BYTE_ORDER IS_LITTLE_ENDIAN
#include "brg_endian.h"
/* CONFIGURATION - THE USE OF DEFINES

@ -268,7 +268,7 @@ uint8_t inv_affine(const uint8_t x)
static int init = 0;
AES_RETURN aes_init(void)
{ uint32_t i, w;
{ uint32_t i = 0, w = 0;
#if defined(FF_TABLES)
@ -303,7 +303,7 @@ AES_RETURN aes_init(void)
}
for(i = 0; i < 256; ++i)
{ uint8_t b;
{ uint8_t b = 0;
b = fwd_affine(gf_inv((uint8_t)i));
w = bytes2word(f2(b), b, b, f3(b));

@ -57,7 +57,7 @@ void out_state(long s0, long s1, long s2, long s3)
}
void oblk(char m[], unsigned char v[], unsigned long n)
{ unsigned long i;
{ unsigned long i = 0;
printf("\n%s", m);
@ -117,9 +117,9 @@ void cycles(volatile uint64_t *rtn)
}
int main(void)
{ unsigned char out[32], ret[32], err = 0;
f_ectx alge[1];
f_dctx algd[1];
{ unsigned char out[32] = {0}, ret[32] = {0}, err = 0;
f_ectx alge[1] = {0};
f_dctx algd[1] = {0};
aes_init();

@ -0,0 +1,29 @@
/*
---------------------------------------------------------------------------
Copyright (c) 1998-2013, Brian Gladman, Worcester, UK. All rights reserved.
The redistribution and use of this software (with or without changes)
is allowed without the payment of fees or royalties provided that:
source code distributions include the above copyright notice, this
list of conditions and the following disclaimer;
binary distributions include the above copyright notice, this list
of conditions and the following disclaimer in their documentation.
This software is provided 'as is' with no explicit or implied warranties
in respect of its operation, including, but not limited to, correctness
and fitness for purpose.
---------------------------------------------------------------------------
Issue Date: 10/09/2018
*/
#ifndef _BRG_ENDIAN_H
#define _BRG_ENDIAN_H
#define IS_BIG_ENDIAN 4321 /* byte 0 is most significant (mc68k) */
#define IS_LITTLE_ENDIAN 1234 /* byte 0 is least significant (i386) */
#define PLATFORM_BYTE_ORDER IS_LITTLE_ENDIAN
#endif

@ -0,0 +1,471 @@
/*
---------------------------------------------------------------------------
Copyright (c) 1998-2010, Brian Gladman, Worcester, UK. All rights reserved.
The redistribution and use of this software (with or without changes)
is allowed without the payment of fees or royalties provided that:
source code distributions include the above copyright notice, this
list of conditions and the following disclaimer;
binary distributions include the above copyright notice, this list
of conditions and the following disclaimer in their documentation.
This software is provided 'as is' with no explicit or implied warranties
in respect of its operation, including, but not limited to, correctness
and fitness for purpose.
---------------------------------------------------------------------------
Issue Date: 20/12/2007
This file provides fast multiplication in GF(128) as required by several
cryptographic authentication modes (see gfmul128.h).
*/
/* Speed critical loops can be unrolled to gain speed but consume more memory */
#if 1
# define UNROLL_LOOPS
#endif
/* The order of these includes matters */
#include "mode_hdr.h"
#include "gf128mul.h"
#include "gf_mul_lo.h"
#if defined( GF_MODE_LL )
# define mode _ll
#elif defined( GF_MODE_BL )
# define mode _bl
#elif defined( GF_MODE_LB )
# define mode _lb
#elif defined( GF_MODE_BB )
# define mode _bb
#else
# error mode is not defined
#endif
#if defined( GF_MODE_LL) || defined( GF_MODE_LB )
# define GF_INDEX(i) (i)
#else
# define GF_INDEX(i) (15 - (i))
#endif
/* A slow field multiplier */
void gf_mul(gf_t a, const gf_t b)
{ gf_t p[8] = {0};
uint8_t *q = NULL, ch = 0;
int i = 0;
copy_block_aligned(p[0], a);
for(i = 0; i < 7; ++i)
gf_mulx1(mode)(p[i + 1], p[i]);
q = (uint8_t*)(a == b ? p[0] : b);
memset(a, 0, GF_BYTE_LEN);
for(i = 15 ; ; )
{
ch = q[GF_INDEX(i)];
if(ch & X_0)
xor_block_aligned(a, a, p[0]);
if(ch & X_1)
xor_block_aligned(a, a, p[1]);
if(ch & X_2)
xor_block_aligned(a, a, p[2]);
if(ch & X_3)
xor_block_aligned(a, a, p[3]);
if(ch & X_4)
xor_block_aligned(a, a, p[4]);
if(ch & X_5)
xor_block_aligned(a, a, p[5]);
if(ch & X_6)
xor_block_aligned(a, a, p[6]);
if(ch & X_7)
xor_block_aligned(a, a, p[7]);
if(!i--)
break;
gf_mulx8(mode)(a);
}
}
#if defined( TABLES_64K )
/* This version uses 64k bytes of table space on the stack.
An input variable field value in a[] has to be multiplied
by a key value in g[] that changes far less frequently.
To do this a[] is split up into 16 smaller field values,
each one byte in length. For the 256 values of each of
these smaller values, we can precompute the result of
mulltiplying g by this field value. We can then combine
these values to provide the full multiply. So for each
of 16 bytes we have a table of 256 field values each of
16 bytes - 64k bytes in total.
*/
void init_64k_table(const gf_t g, gf_t64k_t t)
{ int i = 0, j = 0, k = 0;
/*
depending on the representation we have to process bits
within bytes high to low (0xe1 style ) or low to high
(0x87 style). We start by producing the powers x ,x^2
.. x^7 and put them in t[0][1], t[0][2] .. t[128] or in
t[128], t[64] .. t[1] depending on the bit order in use.
*/
/* clear the element for the zero field element */
memset(t[0][0], 0, GF_BYTE_LEN);
#if defined( GF_MODE_LL ) || defined( GF_MODE_BL )
/* g -> t[0][1], generate t[0][2] ... */
memcpy(t[0][1], g, GF_BYTE_LEN);
for(j = 1; j <= 64; j <<= 1)
gf_mulx1(mode)(t[0][j + j], t[0][j]);
#else
/* g -> t[0][128], generate t[0][64] ... */
memcpy(t[0][128], g, GF_BYTE_LEN);
for(j = 64; j >= 1; j >>= 1)
gf_mulx1(mode)(t[0][j], t[0][j + j]);
#endif
for( ; ; )
{
/* if { n } stands for the field value represented by
the integer n, we can express higher multiplies in
the table as follows:
1. g * { 3} = g * {2} ^ g * {1}
2. g * { 5} = g * {4} ^ g * {1}
g * { 6} = g * {4} ^ g * {2}
g * { 7} = g * {4} ^ g * {3}
3. g * { 9} = g * {8} ^ g * {1}
g * {10} = g * {8} ^ g * {2}
....
and so on. This is what the following loops do.
*/
for(j = 2; j < 256; j += j)
for(k = 1; k < j; ++k)
xor_block_aligned(t[i][j + k], t[i][j], t[i][k]);
if(++i == GF_BYTE_LEN) /* all 16 byte positions done */
return;
/* We now move to the next byte up and set up its eight
starting values by multiplying the values in the
lower table by x^8
*/
memset(t[i][0], 0, GF_BYTE_LEN);
for(j = 128; j > 0; j >>= 1)
{
memcpy(t[i][j], t[i - 1][j], GF_BYTE_LEN);
gf_mulx8(mode)(t[i][j]);
}
}
}
#define xor_64k(i,ap,t,r) xor_block_aligned(r, r, t[i][ap[GF_INDEX(i)]])
#if defined( UNROLL_LOOPS )
void gf_mul_64k(gf_t a, const gf_t64k_t t, gf_t r)
{ uint8_t *ap = (uint8_t*)a;
memset(r, 0, GF_BYTE_LEN);
xor_64k(15, ap, t, r); xor_64k(14, ap, t, r);
xor_64k(13, ap, t, r); xor_64k(12, ap, t, r);
xor_64k(11, ap, t, r); xor_64k(10, ap, t, r);
xor_64k( 9, ap, t, r); xor_64k( 8, ap, t, r);
xor_64k( 7, ap, t, r); xor_64k( 6, ap, t, r);
xor_64k( 5, ap, t, r); xor_64k( 4, ap, t, r);
xor_64k( 3, ap, t, r); xor_64k( 2, ap, t, r);
xor_64k( 1, ap, t, r); xor_64k( 0, ap, t, r);
copy_block_aligned(a, r);
}
#else
void gf_mul_64k(gf_t a, const gf_t64k_t t, gf_t r)
{ int i = 0;
uint8_t *ap = (uint8_t*)a;
memset(r, 0, GF_BYTE_LEN);
for(i = 15; i >= 0; --i)
{
xor_64k(i,ap,t,r);
}
copy_block_aligned(a, r);
}
#endif
#endif
#if defined( TABLES_8K )
/* This version uses 8k bytes of table space on the stack.
An input field value in a[] has to be multiplied by a
key value in g[]. To do this a[] is split up into 32
smaller field values each 4-bits in length. For the
16 values of each of these smaller field values we can
precompute the result of mulltiplying g[] by the field
value in question. So for each of 32 nibbles we have a
table of 16 field values, each of 16 bytes - 8k bytes
in total.
*/
void init_8k_table(const gf_t g, gf_t8k_t t)
{ int i = 0, j = 0, k = 0;
/* do the low 4-bit nibble first - t[0][16] - and note
that the unit multiplier sits at 0x01 - t[0][1] in
the table. Then multiplies by x go at 2, 4, 8
*/
/* set the table elements for a zero multiplier */
memset(t[0][0], 0, GF_BYTE_LEN);
memset(t[1][0], 0, GF_BYTE_LEN);
#if defined( GF_MODE_LL ) || defined( GF_MODE_BL )
/* t[0][1] = g, compute t[0][2], t[0][4], t[0][8] */
memcpy(t[0][1], g, GF_BYTE_LEN);
for(j = 1; j <= 4; j <<= 1)
gf_mulx1(mode)(t[0][j + j], t[0][j]);
/* t[1][1] = t[0][1] * x^4 = t[0][8] * x */
gf_mulx1(mode)(t[1][1], t[0][8]);
for(j = 1; j <= 4; j <<= 1)
gf_mulx1(mode)(t[1][j + j], t[1][j]);
#else
/* g -> t[0][8], compute t[0][4], t[0][2], t[0][1] */
memcpy(t[1][8], g, GF_BYTE_LEN);
for(j = 4; j >= 1; j >>= 1)
gf_mulx1(mode)(t[1][j], t[1][j + j]);
/* t[1][1] = t[0][1] * x^4 = t[0][8] * x */
gf_mulx1(mode)(t[0][8], t[1][1]);
for(j = 4; j >= 1; j >>= 1)
gf_mulx1(mode)(t[0][j], t[0][j + j]);
#endif
for( ; ; )
{
for(j = 2; j < 16; j += j)
for(k = 1; k < j; ++k)
xor_block_aligned(t[i][j + k], t[i][j], t[i][k]);
if(++i == 2 * GF_BYTE_LEN)
return;
if(i > 1)
{
memset(t[i][0], 0, GF_BYTE_LEN);
for(j = 8; j > 0; j >>= 1)
{
memcpy(t[i][j], t[i - 2][j], GF_BYTE_LEN);
gf_mulx8(mode)(t[i][j]);
}
}
}
}
#define xor_8k(i,ap,t,r) \
xor_block_aligned(r, r, t[i + i][ap[GF_INDEX(i)] & 15]); \
xor_block_aligned(r, r, t[i + i + 1][ap[GF_INDEX(i)] >> 4])
#if defined( UNROLL_LOOPS )
void gf_mul_8k(gf_t a, const gf_t8k_t t, gf_t r)
{ uint8_t *ap = (uint8_t*)a;
memset(r, 0, GF_BYTE_LEN);
xor_8k(15, ap, t, r); xor_8k(14, ap, t, r);
xor_8k(13, ap, t, r); xor_8k(12, ap, t, r);
xor_8k(11, ap, t, r); xor_8k(10, ap, t, r);
xor_8k( 9, ap, t, r); xor_8k( 8, ap, t, r);
xor_8k( 7, ap, t, r); xor_8k( 6, ap, t, r);
xor_8k( 5, ap, t, r); xor_8k( 4, ap, t, r);
xor_8k( 3, ap, t, r); xor_8k( 2, ap, t, r);
xor_8k( 1, ap, t, r); xor_8k( 0, ap, t, r);
copy_block_aligned(a, r);
}
#else
void gf_mul_8k(gf_t a, const gf_t8k_t t, gf_t r)
{ int i = 0;
uint8_t *ap = (uint8_t*)a;
memset(r, 0, GF_BYTE_LEN);
for(i = 15; i >= 0; --i)
{
xor_8k(i,ap,t,r);
}
memcpy(a, r, GF_BYTE_LEN);
}
#endif
#endif
#if defined( TABLES_4K )
/* This version uses 4k bytes of table space on the stack.
A 16 byte buffer has to be multiplied by a 16 byte key
value in GF(128). If we consider a GF(128) value in a
single byte, we can construct a table of the 256 16
byte values that result from multiplying g by the 256
values of this byte. This requires 4096 bytes.
If we take the highest byte in the buffer and use this
table to multiply it by g, we then have to multiply it
by x^120 to get the final value. For the next highest
byte the result has to be multiplied by x^112 and so on.
But we can do this by accumulating the result in an
accumulator starting with the result for the top byte.
We repeatedly multiply the accumulator value by x^8 and
then add in (i.e. xor) the 16 bytes of the next lower
byte in the buffer, stopping when we reach the lowest
byte. This requires a 4096 byte table.
*/
void init_4k_table(const gf_t g, gf_t4k_t t)
{ int j = 0, k = 0;
memset(t[0], 0, GF_BYTE_LEN);
#if defined( GF_MODE_LL ) || defined( GF_MODE_BL )
memcpy(t[1], g, GF_BYTE_LEN);
for(j = 1; j <= 64; j <<= 1)
gf_mulx1(mode)(t[j + j], t[j]);
#else
memcpy(t[128], g, GF_BYTE_LEN);
for(j = 64; j >= 1; j >>= 1)
gf_mulx1(mode)(t[j], t[j + j]);
#endif
for(j = 2; j < 256; j += j)
for(k = 1; k < j; ++k)
xor_block_aligned(t[j + k], t[j], t[k]);
}
#define xor_4k(i,ap,t,r) gf_mulx8(mode)(r); xor_block_aligned(r, r, t[ap[GF_INDEX(i)]])
#if defined( UNROLL_LOOPS )
void gf_mul_4k(gf_t a, const gf_t4k_t t, gf_t r)
{ uint8_t *ap = (uint8_t*)a;
memset(r, 0, GF_BYTE_LEN);
xor_4k(15, ap, t, r); xor_4k(14, ap, t, r);
xor_4k(13, ap, t, r); xor_4k(12, ap, t, r);
xor_4k(11, ap, t, r); xor_4k(10, ap, t, r);
xor_4k( 9, ap, t, r); xor_4k( 8, ap, t, r);
xor_4k( 7, ap, t, r); xor_4k( 6, ap, t, r);
xor_4k( 5, ap, t, r); xor_4k( 4, ap, t, r);
xor_4k( 3, ap, t, r); xor_4k( 2, ap, t, r);
xor_4k( 1, ap, t, r); xor_4k( 0, ap, t, r);
copy_block_aligned(a, r);
}
#else
void gf_mul_4k(gf_t a, const gf_t4k_t t, gf_t r)
{ int i = 15;
uint8_t *ap = (uint8_t*)a;
memset(r, 0, GF_BYTE_LEN);
for(i = 15; i >=0; --i)
{
xor_4k(i, ap, t, r);
}
copy_block_aligned(a, r);
}
#endif
#endif
#if defined( TABLES_256 )
/* This version uses 256 bytes of table space on the stack.
A 16 byte buffer has to be multiplied by a 16 byte key
value in GF(128). If we consider a GF(128) value in a
single 4-bit nibble, we can construct a table of the 16
16 byte values that result from the 16 values of this
byte. This requires 256 bytes. If we take the highest
4-bit nibble in the buffer and use this table to get the
result, we then have to multiply by x^124 to get the
final value. For the next highest byte the result has to
be multiplied by x^120 and so on. But we can do this by
accumulating the result in an accumulator starting with
the result for the top nibble. We repeatedly multiply
the accumulator value by x^4 and then add in (i.e. xor)
the 16 bytes of the next lower nibble in the buffer,
stopping when we reach the lowest nibble. This uses a
256 byte table.
*/
void init_256_table(const gf_t g, gf_t256_t t)
{ int j = 0, k = 0;
memset(t[0], 0, GF_BYTE_LEN);
#if defined( GF_MODE_LL ) || defined( GF_MODE_BL )
memcpy(t[1], g, GF_BYTE_LEN);
for(j = 1; j <= 4; j <<= 1)
gf_mulx1(mode)(t[j + j], t[j]);
#else
memcpy(t[8], g, GF_BYTE_LEN);
for(j = 4; j >= 1; j >>= 1)
gf_mulx1(mode)(t[j], t[j + j]);
#endif
for(j = 2; j < 16; j += j)
for(k = 1; k < j; ++k)
xor_block_aligned(t[j + k], t[j], t[k]);
}
#define x_lo(i,ap,t,r) gf_mulx4(mode)(r); xor_block_aligned(r, r, t[ap[GF_INDEX(i)] & 0x0f])
#define x_hi(i,ap,t,r) gf_mulx4(mode)(r); xor_block_aligned(r, r, t[ap[GF_INDEX(i)] >> 4])
#if defined( GF_MODE_LL ) || defined( GF_MODE_BL )
#define xor_256(a,b,c,d) x_hi(a,b,c,d); x_lo(a,b,c,d)
#else
#define xor_256(a,b,c,d) x_lo(a,b,c,d); x_hi(a,b,c,d)
#endif
#if defined( UNROLL_LOOPS )
void gf_mul_256(gf_t a, const gf_t256_t t, gf_t r)
{ uint8_t *ap = (uint8_t*)a;
memset(r, 0, GF_BYTE_LEN);
xor_256(15, ap, t, r); xor_256(14, ap, t, r);
xor_256(13, ap, t, r); xor_256(12, ap, t, r);
xor_256(11, ap, t, r); xor_256(10, ap, t, r);
xor_256( 9, ap, t, r); xor_256( 8, ap, t, r);
xor_256( 7, ap, t, r); xor_256( 6, ap, t, r);
xor_256( 5, ap, t, r); xor_256( 4, ap, t, r);
xor_256( 3, ap, t, r); xor_256( 2, ap, t, r);
xor_256( 1, ap, t, r); xor_256( 0, ap, t, r);
copy_block_aligned(a, r);
}
#else
void gf_mul_256(gf_t a, const gf_t256_t t, gf_t r)
{ int i = 0;
uint8_t *ap = (uint8_t*)a;
memset(r, 0, GF_BYTE_LEN);
for(i = 15; i >= 0; --i)
{
xor_256(i, ap, t, r);
}
copy_block_aligned(a, r);
}
#endif
#endif

@ -0,0 +1,215 @@
/*
---------------------------------------------------------------------------
Copyright (c) 1998-2010, Brian Gladman, Worcester, UK. All rights reserved.
The redistribution and use of this software (with or without changes)
is allowed without the payment of fees or royalties provided that:
source code distributions include the above copyright notice, this
list of conditions and the following disclaimer;
binary distributions include the above copyright notice, this list
of conditions and the following disclaimer in their documentation.
This software is provided 'as is' with no explicit or implied warranties
in respect of its operation, including, but not limited to, correctness
and fitness for purpose.
---------------------------------------------------------------------------
Issue Date: 11/01/2011
I am grateful for the work done by Mark Rodenkirch and Jason Papadopoulos
in helping to remove a bug in the operation of this code on big endian
systems when fast buffer operations are enabled.
---------------------------------------------------------------------------
An implementation of field multiplication in the Galois Field GF(2^128)
A polynomial representation is used for the field with the coefficients
held in bit sequences in which the bit numbers are the powers of x that
a bit represents. The field polynomial used is (x^128+x^7+x^2+x+1).
The obvious way of representing field elements in a computer system is
to map 'x' in the field to the binary integer '2'. But this was way too
obvious for cryptographers!
Here bytes are numbered in their memory order and bits within bytes are
numbered according to their integer numeric significance (that is as is
now normal with bit 0 representing unity). The term 'little endian'
will then used to describe mappings where numeric (power of 2) or field
(power of x) significance increases with increasing bit or byte numbers
with 'big endian' being used to describe the inverse situation.
The GF bit sequence can then be mapped onto 8-bit bytes in computer
memory in one of four simple ways:
A mapping in which x maps to the integer 2 in little endian
form for both bytes and bits within bytes:
LL: bit for x^n ==> bit for 2^(n % 8) in byte[n / 8]
A mapping in which x maps to the integer 2 in big endian form
for both bytes and bits within bytes:
BL: bit for x^n ==> bit for 2^(n % 8) in byte[15 - n / 8]
A little endian mapping for bytes but with the bits within
bytes in reverse order (big endian bytes):
LB: bit for x^n ==> bit for 2^(7 - n % 8) in byte[n / 8]
A big endian mapping for bytes but with the bits within
bytes in reverse order (big endian bytes):
BB: bit for x^n ==> bit for 2^(7 - n % 8) in byte[15 - n / 8]
128-bit field elements are represented by 16 byte buffers but for
processing efficiency reasons it is often desirable to process arrays
of bytes using longer types such as, for example, unsigned long values.
The type used for representing these buffers will be called a 'gf_unit'
and the buffer itself will be referred to as a 'gf_t' type.
THe field multiplier is based on the assumption that one of the two
field elements involved in multiplication will change only relatively
infrequently, making it worthwhile to precompute tables to speed up
multiplication by this value.
*/
#ifndef _GF128MUL_H
#define _GF128MUL_H
#include <stdlib.h>
#include <string.h>
#include "brg_endian.h"
/* USER DEFINABLE OPTIONS */
/* UNIT_BITS sets the size of variables used to process 16 byte buffers
when the buffer alignment allows this. When buffers are processed
in bytes, 16 individual operations are invoolved. But if, say, such
a buffer is divided into 4 32 bit variables, it can then be processed
in 4 operations, making the code typically much faster. In general
it will pay to use the longest natively supported size, which will
probably be 32 or 64 bits in 32 and 64 bit systems respectively.
*/
#if defined( UNIT_BITS )
# undef UNIT_BITS
#endif
#if !defined( UNIT_BITS )
# if PLATFORM_BYTE_ORDER == IS_BIG_ENDIAN
# if 0
# define UNIT_BITS 8
# elif 0
# define UNIT_BITS 32
# elif 1
# define UNIT_BITS 64
# endif
# elif defined( _WIN64 )
# define UNIT_BITS 64
# else
# define UNIT_BITS 32
# endif
#endif
#if UNIT_BITS == 64 && !defined( NEED_UINT_64T )
# define NEED_UINT_64T
#endif
#include "mode_hdr.h"
/* Choose the Galois Field representation to use (see above) */
#if 0
# define GF_MODE_LL
#elif 0
# define GF_MODE_BL
#elif 1
# define GF_MODE_LB /* the representation used by GCM */
#elif 0
# define GF_MODE_BB
#else
# error mode is not defined
#endif
/* Table sizes for GF(128) Multiply. Normally larger tables give
higher speed but cache loading might change this. Normally only
one table size (or none at all) will be specified here
*/
#if 0
# define TABLES_64K
#endif
#if 0
# define TABLES_8K
#endif
#if 0
# define TABLES_4K
#endif
#if 0
# define TABLES_256
#endif
/* END OF USER DEFINABLE OPTIONS */
#if !(defined( TABLES_64K ) || defined( TABLES_8K ) \
|| defined( TABLES_4K ) || defined( TABLES_256 ))
# define NO_TABLES
#endif
#if defined(__cplusplus)
extern "C"
{
#endif
#define GF_BYTE_LEN 16
#define GF_UNIT_LEN (GF_BYTE_LEN / (UNIT_BITS >> 3))
UNIT_TYPEDEF(gf_unit_t, UNIT_BITS);
BUFR_TYPEDEF(gf_t, UNIT_BITS, GF_BYTE_LEN);
/* Code for conversion between the four different galois field representations
is optionally available using gf_convert.c
*/
typedef enum { REVERSE_NONE = 0, REVERSE_BITS = 1, REVERSE_BYTES = 2 } transform;
void convert_representation(gf_t dest, const gf_t source, transform rev);
void gf_mul(gf_t a, const gf_t b); /* slow field multiply */
/* types and calls for 64k table driven field multiplier */
typedef gf_t gf_t64k_a[16][256];
typedef gf_t (*gf_t64k_t)[256];
void init_64k_table(const gf_t g, gf_t64k_t t);
void gf_mul_64k(gf_t a, const gf_t64k_t t, void *r);
/* types and calls for 8k table driven field multiplier */
typedef gf_t gf_t8k_a[32][16];
typedef gf_t (*gf_t8k_t)[16];
void init_8k_table(const gf_t g, gf_t8k_t t);
void gf_mul_8k(gf_t a, const gf_t8k_t t, gf_t r);
/* types and calls for 8k table driven field multiplier */
typedef gf_t gf_t4k_a[256];
typedef gf_t (*gf_t4k_t);
void init_4k_table(const gf_t g, gf_t4k_t t);
void gf_mul_4k(gf_t a, const gf_t4k_t t, gf_t r);
/* types and calls for 8k table driven field multiplier */
typedef gf_t gf_t256_a[16];
typedef gf_t (*gf_t256_t);
void init_256_table(const gf_t g, gf_t256_t t);
void gf_mul_256(gf_t a, const gf_t256_t t, gf_t r);
#if defined(__cplusplus)
}
#endif
#endif

@ -0,0 +1,773 @@
/*
---------------------------------------------------------------------------
Copyright (c) 1998-2010, Brian Gladman, Worcester, UK. All rights reserved.
The redistribution and use of this software (with or without changes)
is allowed without the payment of fees or royalties provided that:
source code distributions include the above copyright notice, this
list of conditions and the following disclaimer;
binary distributions include the above copyright notice, this list
of conditions and the following disclaimer in their documentation.
This software is provided 'as is' with no explicit or implied warranties
in respect of its operation, including, but not limited to, correctness
and fitness for purpose.
---------------------------------------------------------------------------
Issue Date: 18/02/2014
This file provides the low level primitives needed for Galois Field
operations in GF(2^128) for the four most likely field representations.
*/
#ifndef _GF_MUL_LO_H
#define _GF_MUL_LO_H
#if defined( USE_INLINING )
# if defined( _MSC_VER )
# define gf_decl __inline
# elif defined( __GNUC__ ) || defined( __GNU_LIBRARY__ )
# define gf_decl static inline
# else
# define gf_decl static
# endif
#endif
#if 0 /* used for testing only: t1(UNIT_BITS), t2(UNIT_BITS) */
# define _t1(n) bswap ## n ## _block(x, x)
# define t1(n) _t1(n)
# define _t2(n) bswap ## n ## _block(x, x); bswap ## n ## _block(r, r)
# define t2(n) _t2(n)
#endif
#define gf_m(n,x) gf_mulx ## n ## x
#define gf_mulx1(x) gf_m(1,x)
#define gf_mulx4(x) gf_m(4,x)
#define gf_mulx8(x) gf_m(8,x)
#define MASK(x) ((x) * (UNIT_CAST(-1,UNIT_BITS) / 0xff))
#define DATA_256(q) {\
q(0x00), q(0x01), q(0x02), q(0x03), q(0x04), q(0x05), q(0x06), q(0x07),\
q(0x08), q(0x09), q(0x0a), q(0x0b), q(0x0c), q(0x0d), q(0x0e), q(0x0f),\
q(0x10), q(0x11), q(0x12), q(0x13), q(0x14), q(0x15), q(0x16), q(0x17),\
q(0x18), q(0x19), q(0x1a), q(0x1b), q(0x1c), q(0x1d), q(0x1e), q(0x1f),\
q(0x20), q(0x21), q(0x22), q(0x23), q(0x24), q(0x25), q(0x26), q(0x27),\
q(0x28), q(0x29), q(0x2a), q(0x2b), q(0x2c), q(0x2d), q(0x2e), q(0x2f),\
q(0x30), q(0x31), q(0x32), q(0x33), q(0x34), q(0x35), q(0x36), q(0x37),\
q(0x38), q(0x39), q(0x3a), q(0x3b), q(0x3c), q(0x3d), q(0x3e), q(0x3f),\
q(0x40), q(0x41), q(0x42), q(0x43), q(0x44), q(0x45), q(0x46), q(0x47),\
q(0x48), q(0x49), q(0x4a), q(0x4b), q(0x4c), q(0x4d), q(0x4e), q(0x4f),\
q(0x50), q(0x51), q(0x52), q(0x53), q(0x54), q(0x55), q(0x56), q(0x57),\
q(0x58), q(0x59), q(0x5a), q(0x5b), q(0x5c), q(0x5d), q(0x5e), q(0x5f),\
q(0x60), q(0x61), q(0x62), q(0x63), q(0x64), q(0x65), q(0x66), q(0x67),\
q(0x68), q(0x69), q(0x6a), q(0x6b), q(0x6c), q(0x6d), q(0x6e), q(0x6f),\
q(0x70), q(0x71), q(0x72), q(0x73), q(0x74), q(0x75), q(0x76), q(0x77),\
q(0x78), q(0x79), q(0x7a), q(0x7b), q(0x7c), q(0x7d), q(0x7e), q(0x7f),\
q(0x80), q(0x81), q(0x82), q(0x83), q(0x84), q(0x85), q(0x86), q(0x87),\
q(0x88), q(0x89), q(0x8a), q(0x8b), q(0x8c), q(0x8d), q(0x8e), q(0x8f),\
q(0x90), q(0x91), q(0x92), q(0x93), q(0x94), q(0x95), q(0x96), q(0x97),\
q(0x98), q(0x99), q(0x9a), q(0x9b), q(0x9c), q(0x9d), q(0x9e), q(0x9f),\
q(0xa0), q(0xa1), q(0xa2), q(0xa3), q(0xa4), q(0xa5), q(0xa6), q(0xa7),\
q(0xa8), q(0xa9), q(0xaa), q(0xab), q(0xac), q(0xad), q(0xae), q(0xaf),\
q(0xb0), q(0xb1), q(0xb2), q(0xb3), q(0xb4), q(0xb5), q(0xb6), q(0xb7),\
q(0xb8), q(0xb9), q(0xba), q(0xbb), q(0xbc), q(0xbd), q(0xbe), q(0xbf),\
q(0xc0), q(0xc1), q(0xc2), q(0xc3), q(0xc4), q(0xc5), q(0xc6), q(0xc7),\
q(0xc8), q(0xc9), q(0xca), q(0xcb), q(0xcc), q(0xcd), q(0xce), q(0xcf),\
q(0xd0), q(0xd1), q(0xd2), q(0xd3), q(0xd4), q(0xd5), q(0xd6), q(0xd7),\
q(0xd8), q(0xd9), q(0xda), q(0xdb), q(0xdc), q(0xdd), q(0xde), q(0xdf),\
q(0xe0), q(0xe1), q(0xe2), q(0xe3), q(0xe4), q(0xe5), q(0xe6), q(0xe7),\
q(0xe8), q(0xe9), q(0xea), q(0xeb), q(0xec), q(0xed), q(0xee), q(0xef),\
q(0xf0), q(0xf1), q(0xf2), q(0xf3), q(0xf4), q(0xf5), q(0xf6), q(0xf7),\
q(0xf8), q(0xf9), q(0xfa), q(0xfb), q(0xfc), q(0xfd), q(0xfe), q(0xff) }
/* Within the 16 bytes of the field element the top and bottom field bits
are within bytes as follows (bit numbers in bytes 0 from ls up) for
each of the four field representations supported (see gf128mul.txt):
GF_BIT 127 126 125 124 123 122 121 120 ..... 7 6 5 4 3 2 1 0
0x87 1 0 0 0 0 1 1 1
BL x[ 0] 7 6 5 4 3 2 1 0 x[15] 7 6 5 4 3 2 1 0
LL x[15] 7 6 5 4 3 2 1 0 x[ 0] 7 6 5 4 3 2 1 0
GF_BIT 120 121 122 123 124 125 126 127 ..... 0 1 2 3 4 5 6 7
0xc1 1 1 1 0 0 0 0 1
BB x[ 0] 7 6 5 4 3 2 1 0 x[15] 7 6 5 4 3 2 1 0
LB x[15] 7 6 5 4 3 2 1 0 x[ 0] 7 6 5 4 3 2 1 0
When the field element is multiplied by x^n, the high bits overflow
and are used to form an overflow byte. For the BL and LL modes this
byte has the lowest overflow bit in bit 0 whereas for the BB and LB
modes this bit is in biit 7. So we have for this byte:
bit (bit n = 2^n) 7 6 5 4 3 2 1 0
BL and LL x^7 x^6 x^5 x^4 x^3 x^2 x^1 x^0
BB and LB x^0 x^1 x^2 x^3 x^4 x^5 x^6 x^7
This byte then has to be multiplied by the low bits of the field
polynomial, which produces a value of 16 bits to be xored into the
left shifted field value. For the BL and LL modes bit 0 gives the
word value 0x0087, bit 1 gives 0x010e (0x87 left shifted 1), 0x021c
(0x87 left shifted 2), ... For the BB and LB modes, bit 7 gives the
value 0x00e1, bit 6 gives 0x8070, bit 5 gives 0x4038, ... Each bit
in the overflow byte is expanded in this way and is xored into the
overall result, so eaach of the 256 byte values will produce a
corresponding word value that is computed by the gf_uint16_xor(i)
macros below.
These word values have to be xored into the low 16 bits of the
field value. If the byte endianess of the mode matches that of
the architecture xoring the word value will be correct. But if
the mode has the opposite endianess, the word value has to be
xored in byte reversed order. This is done by the ord() macro.
*/
#if PLATFORM_BYTE_ORDER == IS_LITTLE_ENDIAN \
&& (defined( GF_MODE_LB ) || defined( GF_MODE_LL )) || \
PLATFORM_BYTE_ORDER == IS_BIG_ENDIAN \
&& (defined( GF_MODE_BB ) || defined( GF_MODE_BL ))
# define ord(hi, lo) 0x##hi##lo
#else
# define ord(hi, lo) 0x##lo##hi
#endif
#if defined( GF_MODE_BL ) || defined( GF_MODE_LL )
/* field and numeric bit significance correspond */
#define gf_uint16_xor(i) ( \
(i & 0x01 ? ord(00,87) : 0) ^ (i & 0x02 ? ord(01,0e) : 0) ^ \
(i & 0x04 ? ord(02,1c) : 0) ^ (i & 0x08 ? ord(04,38) : 0) ^ \
(i & 0x10 ? ord(08,70) : 0) ^ (i & 0x20 ? ord(10,e0) : 0) ^ \
(i & 0x40 ? ord(21,c0) : 0) ^ (i & 0x80 ? ord(43,80) : 0) )
enum x_bit
{
X_0 = 0x01, X_1 = 0x02, X_2 = 0x04, X_3 = 0x08,
X_4 = 0x10, X_5 = 0x20, X_6 = 0x40, X_7 = 0x80
};
#elif defined( GF_MODE_BB ) || defined( GF_MODE_LB )
/* field and numeric bit significance are in reverse */
#define gf_uint16_xor(i) ( \
(i & 0x80 ? ord(00,e1) : 0) ^ (i & 0x40 ? ord(80,70) : 0) ^ \
(i & 0x20 ? ord(40,38) : 0) ^ (i & 0x10 ? ord(20,1c) : 0) ^ \
(i & 0x08 ? ord(10,0e) : 0) ^ (i & 0x04 ? ord(08,07) : 0) ^ \
(i & 0x02 ? ord(84,03) : 0) ^ (i & 0x01 ? ord(c2,01) : 0) )
enum x_bit
{
X_0 = 0x80, X_1 = 0x40, X_2 = 0x20, X_3 = 0x10,
X_4 = 0x08, X_5 = 0x04, X_6 = 0x02, X_7 = 0x01
};
#else
#error Galois Field representation has not been set
#endif
const uint16_t gf_tab[256] = DATA_256(gf_uint16_xor);
/* LL Mode Galois Field operations
x[0] x[1] x[2] x[3] x[4] x[5] x[6] x[7]
ms ls ms ls ms ls ms ls ms ls ms ls ms ls ms ls
10000111 ........ ........ ........ ........ ........ ........ ........
07....00 15....08 23....16 31....24 39....32 47....40 55....48 63....56
x[8] x[9] x[10] x[11] x[12] x[13] x[14] x[15]
ms ls ms ls ms ls ms ls ms ls ms ls ms ls ms ls
........ ........ ........ ........ ........ ........ ........ M.......
71....64 79....72 87....80 95....88 103...96 111..104 119..112 127..120
*/
#if UNIT_BITS == 64
#if PLATFORM_BYTE_ORDER == IS_LITTLE_ENDIAN
#define f1_ll(n,r,x) r[n] = (x[n] << 1) | (n ? x[n-1] >> 63 : 0)
#define f4_ll(n,r,x) r[n] = (x[n] << 4) | (n ? x[n-1] >> 60 : 0)
#define f8_ll(n,r,x) r[n] = (x[n] << 8) | (n ? x[n-1] >> 56 : 0)
#else
#define f1_ll(n,r,x) r[n] = ((x[n] << 1) & ~MASK(0x01)) | (((x[n] >> 15) \
| (n ? x[n-1] << 49 : 0)) & MASK(0x01))
#define f4_ll(n,r,x) r[n] = ((x[n] << 4) & ~MASK(0x0f)) | (((x[n] >> 12) \
| (n ? x[n-1] << 52 : 0)) & MASK(0x0f))
#define f8_ll(n,r,x) r[n] = (x[n] >> 8) | (n ? x[n-1] << 56 : 0)
#endif
gf_decl void gf_mulx1_ll(gf_t r, const gf_t x)
{ gf_unit_t _tt = 0;
#if PLATFORM_BYTE_ORDER == IS_LITTLE_ENDIAN
_tt = gf_tab[(UNIT_PTR(x)[1] >> 63) & 0x01];
#else
_tt = ((gf_unit_t)(gf_tab[(UNIT_PTR(x)[1] >> 7) & 0x01])) << 48;
#endif
rep2_d2(f1_ll, UNIT_PTR(r), UNIT_PTR(x));
UNIT_PTR(r)[0] ^= _tt;
}
gf_decl void gf_mulx4_ll(gf_t x)
{ gf_unit_t _tt = 0;
#if PLATFORM_BYTE_ORDER == IS_LITTLE_ENDIAN
_tt = gf_tab[(UNIT_PTR(x)[1] >> 60) & 0x0f];
#else
_tt = ((gf_unit_t)(gf_tab[(UNIT_PTR(x)[1] >> 4) & 0x0f])) << 48;
#endif
rep2_d2(f4_ll, UNIT_PTR(x), UNIT_PTR(x));
UNIT_PTR(x)[0] ^= _tt;
}
gf_decl void gf_mulx8_ll(gf_t x)
{ gf_unit_t _tt = 0;
#if PLATFORM_BYTE_ORDER == IS_LITTLE_ENDIAN
_tt = gf_tab[UNIT_PTR(x)[1] >> 56];
#else
_tt = ((gf_unit_t)(gf_tab[UNIT_PTR(x)[1] & 0xff])) << 48;
#endif
rep2_d2(f8_ll, UNIT_PTR(x), UNIT_PTR(x));
UNIT_PTR(x)[0] ^= _tt;
}
#elif UNIT_BITS == 32
#if PLATFORM_BYTE_ORDER == IS_LITTLE_ENDIAN
#define f1_ll(n,r,x) r[n] = (x[n] << 1) | (n ? x[n-1] >> 31 : 0)
#define f4_ll(n,r,x) r[n] = (x[n] << 4) | (n ? x[n-1] >> 28 : 0)
#define f8_ll(n,r,x) r[n] = (x[n] << 8) | (n ? x[n-1] >> 24 : 0)
#else
#define f1_ll(n,r,x) r[n] = ((x[n] << 1) & ~MASK(0x01)) | (((x[n] >> 15) \
| (n ? x[n-1] << 17 : 0)) & MASK(0x01))
#define f4_ll(n,r,x) r[n] = ((x[n] << 4) & ~MASK(0x0f)) | (((x[n] >> 12) \
| (n ? x[n-1] << 20 : 0)) & MASK(0x0f))
#define f8_ll(n,r,x) r[n] = (x[n] >> 8) | (n ? x[n-1] << 24 : 0)
#endif
gf_decl void gf_mulx1_ll(gf_t r, const gf_t x)
{ gf_unit_t _tt = 0;
#if PLATFORM_BYTE_ORDER == IS_LITTLE_ENDIAN
_tt = gf_tab[(UNIT_PTR(x)[3] >> 31) & 0x01];
#else
_tt = ((gf_unit_t)(gf_tab[(UNIT_PTR(x)[3] >> 7) & 0x01])) << 16;
#endif
rep2_d4(f1_ll, UNIT_PTR(r), UNIT_PTR(x));
UNIT_PTR(r)[0] ^= _tt;
}
gf_decl void gf_mulx4_ll(gf_t x)
{ gf_unit_t _tt = 0;
#if PLATFORM_BYTE_ORDER == IS_LITTLE_ENDIAN
_tt = gf_tab[(UNIT_PTR(x)[3] >> 28) & 0x0f];
#else
_tt = ((gf_unit_t)(gf_tab[(UNIT_PTR(x)[3] >> 4) & 0x0f])) << 16;
#endif
rep2_d4(f4_ll, UNIT_PTR(x), UNIT_PTR(x));
UNIT_PTR(x)[0] ^= _tt;
}
gf_decl void gf_mulx8_ll(gf_t x)
{ gf_unit_t _tt = 0;
#if PLATFORM_BYTE_ORDER == IS_LITTLE_ENDIAN
_tt = gf_tab[UNIT_PTR(x)[3] >> 24];
#else
_tt = ((gf_unit_t)(gf_tab[UNIT_PTR(x)[3] & 0xff])) << 16;
#endif
rep2_d4(f8_ll, UNIT_PTR(x), UNIT_PTR(x));
UNIT_PTR(x)[0] ^= _tt;
}
#else
#define f1_ll(n,r,x) r[n] = (x[n] << 1) | (n ? x[n-1] >> 7 : 0)
#define f4_ll(n,r,x) r[n] = (x[n] << 4) | (n ? x[n-1] >> 4 : 0)
gf_decl void gf_mulx1_ll(gf_t r, const gf_t x)
{ uint16_t _tt = 0;
_tt = gf_tab[(UNIT_PTR(x)[15] >> 7) & 0x01];
rep2_d16(f1_ll, UNIT_PTR(r), UNIT_PTR(x));
#if PLATFORM_BYTE_ORDER == IS_LITTLE_ENDIAN
UNIT_PTR(r)[0] ^= _tt & 0xff;
#else
UNIT_PTR(r)[0] ^= _tt >> 8;
#endif
}
gf_decl void gf_mulx4_ll(gf_t x)
{ uint16_t _tt = 0;
_tt = gf_tab[(UNIT_PTR(x)[15] >> 4) & 0x0f];
rep2_d16(f4_ll, UNIT_PTR(x), UNIT_PTR(x));
#if PLATFORM_BYTE_ORDER == IS_LITTLE_ENDIAN
UNIT_PTR(x)[1] ^= _tt >> 8;
UNIT_PTR(x)[0] ^= _tt & 0xff;
#else
UNIT_PTR(x)[1] ^= _tt & 0xff;
UNIT_PTR(x)[0] = _tt >> 8;
#endif
}
gf_decl void gf_mulx8_ll(gf_t x)
{ uint16_t _tt = 0;
_tt = gf_tab[UNIT_PTR(x)[15]];
memmove(UNIT_PTR(x) + 1, UNIT_PTR(x), 15);
#if PLATFORM_BYTE_ORDER == IS_LITTLE_ENDIAN
UNIT_PTR(x)[1] ^= _tt >> 8;
UNIT_PTR(x)[0] = _tt & 0xff;
#else
UNIT_PTR(x)[1] ^= _tt & 0xff;
UNIT_PTR(x)[0] = _tt >> 8;
#endif
}
#endif
/* BL Mode Galois Field operations
x[0] x[1] x[2] x[3] x[4] x[5] x[6] x[7]
ms ls ms ls ms ls ms ls ms ls ms ls ms ls ms ls
M....... ........ ........ ........ ........ ........ ........ ........
127..120 119..112 111..104 103...96 95....88 87....80 79....72 71....64
x[8] x[9] x[10] x[11] x[12] x[13] x[14] x[15]
ms ls ms ls ms ls ms ls ms ls ms ls ms ls ms ls
........ ........ ........ ........ ........ ........ ........ 10000111
63....56 55....48 47....40 39....32 31....24 23....16 15....08 07....00
*/
#if UNIT_BITS == 64
#if PLATFORM_BYTE_ORDER == IS_LITTLE_ENDIAN
#define f1_bl(n,r,x) r[n] = ((x[n] << 1) & ~MASK(0x01)) | (((x[n] >> 15) \
| (!n ? x[n+1] << 49 : 0)) & MASK(0x01))
#define f4_bl(n,r,x) r[n] = ((x[n] << 4) & ~MASK(0x0f)) | (((x[n] >> 12) \
| (!n ? x[n+1] << 52 : 0)) & MASK(0x0f))
#define f8_bl(n,r,x) r[n] = (x[n] >> 8) | (!n ? x[n+1] << 56 : 0)
#else
#define f1_bl(n,r,x) r[n] = (x[n] << 1) | (!n ? x[n+1] >> 63 : 0)
#define f4_bl(n,r,x) r[n] = (x[n] << 4) | (!n ? x[n+1] >> 60 : 0)
#define f8_bl(n,r,x) r[n] = (x[n] << 8) | (!n ? x[n+1] >> 56 : 0)
#endif
gf_decl void gf_mulx1_bl(gf_t r, const gf_t x)
{ gf_unit_t _tt = 0;
#if PLATFORM_BYTE_ORDER == IS_LITTLE_ENDIAN
_tt = ((gf_unit_t)(gf_tab[(UNIT_PTR(x)[0] >> 7) & 0x01])) << 48;
#else
_tt = gf_tab[(UNIT_PTR(x)[0] >> 63) & 0x01];
#endif
rep2_u2(f1_bl, UNIT_PTR(r), UNIT_PTR(x));
UNIT_PTR(r)[1] ^= _tt;
}
gf_decl void gf_mulx4_bl(gf_t x)
{ gf_unit_t _tt = 0;
#if PLATFORM_BYTE_ORDER == IS_LITTLE_ENDIAN
_tt = ((gf_unit_t)(gf_tab[(UNIT_PTR(x)[0] >> 4) & 0x0f])) << 48;
#else
_tt = gf_tab[(UNIT_PTR(x)[0] >> 60) & 0x0f];
#endif
rep2_u2(f4_bl, UNIT_PTR(x), UNIT_PTR(x));
UNIT_PTR(x)[1] ^= _tt;
}
gf_decl void gf_mulx8_bl(gf_t x)
{ gf_unit_t _tt = 0;
#if PLATFORM_BYTE_ORDER == IS_LITTLE_ENDIAN
_tt = ((gf_unit_t)(gf_tab[UNIT_PTR(x)[0] & 0xff])) << 48;
#else
_tt = gf_tab[(UNIT_PTR(x)[0] >> 56) & 0xff];
#endif
rep2_u2(f8_bl, UNIT_PTR(x), UNIT_PTR(x));
UNIT_PTR(x)[1] ^= _tt;
}
#elif UNIT_BITS == 32
#if PLATFORM_BYTE_ORDER == IS_LITTLE_ENDIAN
#define f1_bl(n,r,x) r[n] = ((x[n] << 1) & ~MASK(0x01)) | (((x[n] >> 15) \
| (n < 3 ? x[n+1] << 17 : 0)) & MASK(0x01))
#define f4_bl(n,r,x) r[n] = ((x[n] << 4) & ~MASK(0x0f)) | (((x[n] >> 12) \
| (n < 3 ? x[n+1] << 20 : 0)) & MASK(0x0f))
#define f8_bl(n,r,x) r[n] = (x[n] >> 8) | (n < 3 ? x[n+1] << 24 : 0)
#else
#define f1_bl(n,r,x) r[n] = (x[n] << 1) | (n < 3 ? x[n+1] >> 31 : 0)
#define f4_bl(n,r,x) r[n] = (x[n] << 4) | (n < 3 ? x[n+1] >> 28 : 0)
#define f8_bl(n,r,x) r[n] = (x[n] << 8) | (n < 3 ? x[n+1] >> 24 : 0)
#endif
gf_decl void gf_mulx1_bl(gf_t r, const gf_t x)
{ gf_unit_t _tt = 0;
#if PLATFORM_BYTE_ORDER == IS_LITTLE_ENDIAN
_tt = ((gf_unit_t)(gf_tab[(UNIT_PTR(x)[0] >> 7) & 0x01])) << 16;
#else
_tt = gf_tab[(UNIT_PTR(x)[0] >> 31) & 0x01];
#endif
rep2_u4(f1_bl, UNIT_PTR(r), UNIT_PTR(x));
UNIT_PTR(r)[3] ^= _tt;
}
gf_decl void gf_mulx4_bl(gf_t x)
{ gf_unit_t _tt = 0;
#if PLATFORM_BYTE_ORDER == IS_LITTLE_ENDIAN
_tt = ((gf_unit_t)(gf_tab[(UNIT_PTR(x)[0] >> 4) & 0x0f])) << 16;
#else
_tt = gf_tab[(UNIT_PTR(x)[0] >> 28) & 0x0f];
#endif
rep2_u4(f4_bl, UNIT_PTR(x), UNIT_PTR(x));
UNIT_PTR(x)[3] ^= _tt;
}
gf_decl void gf_mulx8_bl(gf_t x)
{ gf_unit_t _tt = 0;
#if PLATFORM_BYTE_ORDER == IS_LITTLE_ENDIAN
_tt = ((gf_unit_t)(gf_tab[UNIT_PTR(x)[0] & 0xff])) << 16;
#else
_tt = gf_tab[(UNIT_PTR(x)[0] >> 24) & 0xff];
#endif
rep2_u4(f8_bl, UNIT_PTR(x), UNIT_PTR(x));
UNIT_PTR(x)[3] ^= _tt;
}
#else
#define f1_bl(n,r,x) r[n] = (x[n] << 1) | (n < 15 ? x[n+1] >> 7 : 0)
#define f4_bl(n,r,x) r[n] = (x[n] << 4) | (n < 15 ? x[n+1] >> 4 : 0)
gf_decl void gf_mulx1_bl(gf_t r, const gf_t x)
{ uint16_t _tt = 0;
_tt = gf_tab[(UNIT_PTR(x)[0] >> 7) & 0x01];
rep2_u16(f1_bl, UNIT_PTR(r), UNIT_PTR(x));
#if PLATFORM_BYTE_ORDER == IS_LITTLE_ENDIAN
UNIT_PTR(r)[15] ^= _tt >> 8;
#else
UNIT_PTR(r)[15] ^= _tt & 0xff;
#endif
}
gf_decl void gf_mulx4_bl(gf_t x)
{ uint16_t _tt = 0;
_tt = gf_tab[(UNIT_PTR(x)[0] >> 4) & 0x0f];
rep2_u16(f4_bl, UNIT_PTR(x), UNIT_PTR(x));
#if PLATFORM_BYTE_ORDER == IS_LITTLE_ENDIAN
UNIT_PTR(x)[14] ^= _tt & 0xff;
UNIT_PTR(x)[15] ^= _tt >> 8;
#else
UNIT_PTR(x)[14] ^= _tt >> 8;
UNIT_PTR(x)[15] = _tt & 0xff;
#endif
}
gf_decl void gf_mulx8_bl(gf_t x)
{ uint16_t _tt = 0;
_tt = gf_tab[UNIT_PTR(x)[0]];
memmove(UNIT_PTR(x), UNIT_PTR(x) + 1, 15);
#if PLATFORM_BYTE_ORDER == IS_LITTLE_ENDIAN
UNIT_PTR(x)[14] ^= _tt & 0xff;
UNIT_PTR(x)[15] = _tt >> 8;
#else
UNIT_PTR(x)[14] ^= _tt >> 8;
UNIT_PTR(x)[15] = _tt & 0xff;
#endif
}
#endif
/* LB Mode Galois Field operations
x[0] x[1] x[2] x[3] x[4] x[5] x[6] x[7]
ms ls ms ls ms ls ms ls ms ls ms ls ms ls ms ls
11100001 ........ ........ ........ ........ ........ ........ ........
00....07 08....15 16....23 24....31 32....39 40....47 48....55 56....63
x[8] x[9] x[10] x[11] x[12] x[13] x[14] x[15]
ms ls ms ls ms ls ms ls ms ls ms ls ms ls ms ls
........ ........ ........ ........ ........ ........ ........ .......M
64....71 72....79 80....87 88....95 96...103 104..111 112..119 120..127
*/
#if UNIT_BITS == 64
#if PLATFORM_BYTE_ORDER == IS_LITTLE_ENDIAN
#define f1_lb(n,r,x) r[n] = ((x[n] >> 1) & ~MASK(0x80)) | (((x[n] << 15) \
| (n ? x[n-1] >> 49 : 0)) & MASK(0x80))
#define f4_lb(n,r,x) r[n] = ((x[n] >> 4) & ~MASK(0xf0)) | (((x[n] << 12) \
| (n ? x[n-1] >> 52 : 0)) & MASK(0xf0))
#define f8_lb(n,r,x) r[n] = (x[n] << 8) | (n ? x[n-1] >> 56 : 0)
#else
#define f1_lb(n,r,x) r[n] = (x[n] >> 1) | (n ? x[n-1] << 63 : 0)
#define f4_lb(n,r,x) r[n] = (x[n] >> 4) | (n ? x[n-1] << 60 : 0)
#define f8_lb(n,r,x) x[n] = (x[n] >> 8) | (n ? x[n-1] << 56 : 0)
#endif
gf_decl void gf_mulx1_lb(gf_t r, const gf_t x)
{ gf_unit_t _tt = 0;
#if PLATFORM_BYTE_ORDER == IS_LITTLE_ENDIAN
_tt = gf_tab[(UNIT_PTR(x)[1] >> 49) & MASK(0x80)];
#else
_tt = ((gf_unit_t)(gf_tab[(UNIT_PTR(x)[1] << 7) & 0xff])) << 48;
#endif
rep2_d2(f1_lb, UNIT_PTR(r), UNIT_PTR(x));
UNIT_PTR(r)[0] ^= _tt;
}
gf_decl void gf_mulx4_lb(gf_t x)
{ gf_unit_t _tt = 0;
#if PLATFORM_BYTE_ORDER == IS_LITTLE_ENDIAN
_tt = gf_tab[(UNIT_PTR(x)[1] >> 52) & MASK(0xf0)];
#else
_tt = ((gf_unit_t)(gf_tab[(UNIT_PTR(x)[1] << 4) & 0xff])) << 48;
#endif
rep2_d2(f4_lb, UNIT_PTR(x), UNIT_PTR(x));
UNIT_PTR(x)[0] ^= _tt;
}
gf_decl void gf_mulx8_lb(gf_t x)
{ gf_unit_t _tt = 0;
#if PLATFORM_BYTE_ORDER == IS_LITTLE_ENDIAN
_tt = gf_tab[UNIT_PTR(x)[1] >> 56];
#else
_tt = ((gf_unit_t)(gf_tab[UNIT_PTR(x)[1] & 0xff])) << 48;
#endif
rep2_d2(f8_lb, UNIT_PTR(x), UNIT_PTR(x));
UNIT_PTR(x)[0] ^= _tt;
}
#elif UNIT_BITS == 32
#if PLATFORM_BYTE_ORDER == IS_LITTLE_ENDIAN
#define f1_lb(n,r,x) r[n] = ((x[n] >> 1) & ~MASK(0x80)) | (((x[n] << 15) \
| (n ? x[n-1] >> 17 : 0)) & MASK(0x80))
#define f4_lb(n,r,x) r[n] = ((x[n] >> 4) & ~MASK(0xf0)) | (((x[n] << 12) \
| (n ? x[n-1] >> 20 : 0)) & MASK(0xf0))
#define f8_lb(n,r,x) r[n] = (x[n] << 8) | (n ? x[n-1] >> 24 : 0)
#else
#define f1_lb(n,r,x) r[n] = (x[n] >> 1) | (n ? x[n-1] << 31 : 0)
#define f4_lb(n,r,x) r[n] = (x[n] >> 4) | (n ? x[n-1] << 28 : 0)
#define f8_lb(n,r,x) r[n] = (x[n] >> 8) | (n ? x[n-1] << 24 : 0)
#endif
gf_decl void gf_mulx1_lb(gf_t r, const gf_t x)
{ gf_unit_t _tt = 0;
#if PLATFORM_BYTE_ORDER == IS_LITTLE_ENDIAN
_tt = gf_tab[(UNIT_PTR(x)[3] >> 17) & MASK(0x80)];
#else
_tt = ((gf_unit_t)(gf_tab[(UNIT_PTR(x)[3] << 7) & 0xff])) << 16;
#endif
rep2_d4(f1_lb, UNIT_PTR(r), UNIT_PTR(x));
UNIT_PTR(r)[0] ^= _tt;
}
gf_decl void gf_mulx4_lb(gf_t x)
{ gf_unit_t _tt = 0;
#if PLATFORM_BYTE_ORDER == IS_LITTLE_ENDIAN
_tt = gf_tab[(UNIT_PTR(x)[3] >> 20) & MASK(0xf0)];
#else
_tt = ((gf_unit_t)(gf_tab[(UNIT_PTR(x)[3] << 4) & 0xff])) << 16;
#endif
rep2_d4(f4_lb, UNIT_PTR(x), UNIT_PTR(x));
UNIT_PTR(x)[0] ^= _tt;
}
gf_decl void gf_mulx8_lb(gf_t x)
{ gf_unit_t _tt = 0;
#if PLATFORM_BYTE_ORDER == IS_LITTLE_ENDIAN
_tt = gf_tab[UNIT_PTR(x)[3] >> 24];
#else
_tt = ((gf_unit_t)(gf_tab[UNIT_PTR(x)[3] & 0xff])) << 16;
#endif
rep2_d4(f8_lb, UNIT_PTR(x), UNIT_PTR(x));
UNIT_PTR(x)[0] ^= _tt;
}
#else
#define f1_lb(n,r,x) r[n] = (x[n] >> 1) | (n ? x[n-1] << 7 : 0)
#define f4_lb(n,r,x) r[n] = (x[n] >> 4) | (n ? x[n-1] << 4 : 0)
gf_decl void gf_mulx1_lb(gf_t r, const gf_t x)
{ uint16_t _tt = 0;
_tt = gf_tab[(UNIT_PTR(x)[15] << 7) & 0x80];
rep2_d16(f1_lb, UNIT_PTR(r), UNIT_PTR(x));
#if PLATFORM_BYTE_ORDER == IS_LITTLE_ENDIAN
UNIT_PTR(r)[0] ^= _tt;
#else
UNIT_PTR(r)[0] ^= _tt >> 8;
#endif
}
gf_decl void gf_mulx4_lb(gf_t x)
{ uint16_t _tt = 0;
_tt = gf_tab[(UNIT_PTR(x)[15] << 4) & 0xf0];
rep2_d16(f4_lb, UNIT_PTR(x), UNIT_PTR(x));
#if PLATFORM_BYTE_ORDER == IS_LITTLE_ENDIAN
UNIT_PTR(x)[1] ^= _tt >> 8;
UNIT_PTR(x)[0] ^= _tt & 0xff;
#else
UNIT_PTR(x)[1] ^= _tt & 0xff;
UNIT_PTR(x)[0] ^= _tt >> 8;
#endif
}
gf_decl void gf_mulx8_lb(gf_t x)
{ uint16_t _tt = 0;
_tt = gf_tab[UNIT_PTR(x)[15]];
memmove(UNIT_PTR(x) + 1, UNIT_PTR(x), 15);
#if PLATFORM_BYTE_ORDER == IS_LITTLE_ENDIAN
UNIT_PTR(x)[1] ^= _tt >> 8;
UNIT_PTR(x)[0] = _tt & 0xff;
#else
UNIT_PTR(x)[1] ^= _tt & 0xff;
UNIT_PTR(x)[0] = _tt >> 8;
#endif
}
#endif
/* BB Mode Galois Field operations
x[0] x[1] x[2] x[3] x[4] x[5] x[6] x[7]
ms ls ms ls ms ls ms ls ms ls ms ls ms ls ms ls
.......M ........ ........ ........ ........ ........ ........ ........
120..127 112..119 104..111 96...103 88....95 80....87 72....79 64....71
x[8] x[9] x[10] x[11] x[12] x[13] x[14] x[15]
ms ls ms ls ms ls ms ls ms ls ms ls ms ls ms ls
........ ........ ........ ........ ........ ........ ........ 11100001
56....63 48....55 40....47 32....39 24....31 16....23 08....15 00....07
*/
#if UNIT_BITS == 64
#if PLATFORM_BYTE_ORDER == IS_LITTLE_ENDIAN
#define f1_bb(n,r,x) r[n] = (x[n] >> 1) | (!n ? x[n+1] << 63 : 0)
#define f4_bb(n,r,x) r[n] = (x[n] >> 4) | (!n ? x[n+1] << 60 : 0)
#define f8_bb(n,r,x) r[n] = (x[n] >> 8) | (!n ? x[n+1] << 56 : 0)
#else
#define f1_bb(n,r,x) r[n] = ((x[n] >> 1) & ~MASK(0x80)) | (((x[n] << 15) \
| (!n ? x[n+1] >> 49 : 0)) & MASK(0x80))
#define f4_bb(n,r,x) r[n] = ((x[n] >> 4) & ~MASK(0xf0)) | (((x[n] << 12) \
| (!n ? x[n+1] >> 52 : 0)) & MASK(0xf0))
#define f8_bb(n,r,x) r[n] = (x[n] << 8) | (!n ? x[n+1] >> 56 : 0)
#endif
gf_decl void gf_mulx1_bb(gf_t r, const gf_t x)
{ gf_unit_t _tt = 0;
#if PLATFORM_BYTE_ORDER == IS_LITTLE_ENDIAN
_tt = (( gf_unit_t)(gf_tab[(UNIT_PTR(x)[0] << 7) & 0x80])) << 48;
#else
_tt = gf_tab[(UNIT_PTR(x)[0] >> 49) & 0x80];
#endif
rep2_u2(f1_bb, UNIT_PTR(r), UNIT_PTR(x));
UNIT_PTR(r)[1] ^= _tt;
}
gf_decl void gf_mulx4_bb(gf_t x)
{ gf_unit_t _tt = 0;
#if PLATFORM_BYTE_ORDER == IS_LITTLE_ENDIAN
_tt = ((gf_unit_t)(gf_tab[(UNIT_PTR(x)[0] << 4) & 0xf0])) << 48;
#else
_tt = gf_tab[(UNIT_PTR(x)[0] >> 52) & 0xf0];
#endif
rep2_u2(f4_bb, UNIT_PTR(x), UNIT_PTR(x));
UNIT_PTR(x)[1] ^= _tt;
}
gf_decl void gf_mulx8_bb(gf_t x)
{ gf_unit_t _tt = 0;
#if PLATFORM_BYTE_ORDER == IS_LITTLE_ENDIAN
_tt = ((gf_unit_t)(gf_tab[UNIT_PTR(x)[0] & 0xff])) << 48;
#else
_tt = gf_tab[(UNIT_PTR(x)[0] >> 56) & 0xff];
#endif
rep2_u2(f8_bb, UNIT_PTR(x), UNIT_PTR(x));
UNIT_PTR(x)[1] ^= _tt;
}
#elif UNIT_BITS == 32
#if PLATFORM_BYTE_ORDER == IS_LITTLE_ENDIAN
#define f1_bb(n,r,x) r[n] = (x[n] >> 1) | (n < 3 ? x[n+1] << 31 : 0)
#define f4_bb(n,r,x) r[n] = (x[n] >> 4) | (n < 3 ? x[n+1] << 28 : 0)
#define f8_bb(n,r,x) r[n] = (x[n] >> 8) | (n < 3 ? x[n+1] << 24 : 0)
#else
#define f1_bb(n,r,x) r[n] = ((x[n] >> 1) & ~MASK(0x80)) | (((x[n] << 15) \
| (n < 3 ? x[n+1] >> 17 : 0)) & MASK(0x80))
#define f4_bb(n,r,x) r[n] = ((x[n] >> 4) & ~MASK(0xf0)) | (((x[n] << 12) \
| (n < 3 ? x[n+1] >> 20 : 0)) & MASK(0xf0))
#define f8_bb(n,r,x) r[n] = (x[n] << 8) | (n < 3 ? x[n+1] >> 24 : 0)
#endif
gf_decl void gf_mulx1_bb(gf_t r, const gf_t x)
{ gf_unit_t _tt = 0;
#if PLATFORM_BYTE_ORDER == IS_LITTLE_ENDIAN
_tt = ((gf_unit_t)(gf_tab[(UNIT_PTR(x)[0] << 7) & 0x80])) << 16;
#else
_tt = gf_tab[(UNIT_PTR(x)[0] >> 17) & 0x80];
#endif
rep2_u4(f1_bb, UNIT_PTR(r), UNIT_PTR(x));
UNIT_PTR(r)[3] ^= _tt;
}
gf_decl void gf_mulx4_bb(gf_t x)
{ gf_unit_t _tt = 0;
#if PLATFORM_BYTE_ORDER == IS_LITTLE_ENDIAN
_tt = ((gf_unit_t)(gf_tab[(UNIT_PTR(x)[0] << 4) & 0xf0])) << 16;
#else
_tt = gf_tab[(UNIT_PTR(x)[0] >> 20) & 0xf0];
#endif
rep2_u4(f4_bb, UNIT_PTR(x), UNIT_PTR(x));
UNIT_PTR(x)[3] ^= _tt;
}
gf_decl void gf_mulx8_bb(gf_t x)
{ gf_unit_t _tt = 0;
#if PLATFORM_BYTE_ORDER == IS_LITTLE_ENDIAN
_tt = ((gf_unit_t)(gf_tab[UNIT_PTR(x)[0] & 0xff])) << 16;
#else
_tt = gf_tab[(UNIT_PTR(x)[0] >> 24) & 0xff];
#endif
rep2_u4(f8_bb, UNIT_PTR(x), UNIT_PTR(x));
UNIT_PTR(x)[3] ^= _tt;
}
#else
#define f1_bb(n,r,x) r[n] = (x[n] >> 1) | (n < 15 ? x[n+1] << 7 : 0)
#define f4_bb(n,r,x) r[n] = (x[n] >> 4) | (n < 15 ? x[n+1] << 4 : 0)
gf_decl void gf_mulx1_bb(gf_t r, const gf_t x)
{ uint16_t _tt = 0;
_tt = gf_tab[(UNIT_PTR(x)[0] << 7) & 0x80];
rep2_u16(f1_bb, UNIT_PTR(r), UNIT_PTR(x));
#if PLATFORM_BYTE_ORDER == IS_LITTLE_ENDIAN
UNIT_PTR(r)[15] ^= _tt >> 8;
#else
UNIT_PTR(r)[15] ^= _tt;
#endif
}
gf_decl void gf_mulx4_bb(gf_t x)
{ uint16_t _tt = 0;
_tt = gf_tab[(UNIT_PTR(x)[0] << 4) & 0xf0];
rep2_u16(f4_bb, UNIT_PTR(x), UNIT_PTR(x));
#if PLATFORM_BYTE_ORDER == IS_LITTLE_ENDIAN
UNIT_PTR(x)[14] ^= _tt & 0xff;
UNIT_PTR(x)[15] ^= _tt >> 8;
#else
UNIT_PTR(x)[14] ^= _tt >> 8;
UNIT_PTR(x)[15] ^= _tt & 0xff;
#endif
}
gf_decl void gf_mulx8_bb(gf_t x)
{ uint16_t _tt = 0;
_tt = gf_tab[UNIT_PTR(x)[0]];
memmove(UNIT_PTR(x), UNIT_PTR(x) + 1, 15);
#if PLATFORM_BYTE_ORDER == IS_LITTLE_ENDIAN
UNIT_PTR(x)[14] ^= _tt & 0xff;
UNIT_PTR(x)[15] = _tt >> 8;
#else
UNIT_PTR(x)[14] ^= _tt >> 8;
UNIT_PTR(x)[15] = _tt & 0xff;
#endif
}
#endif
#endif

@ -0,0 +1,334 @@
/*
---------------------------------------------------------------------------
Copyright (c) 1998-2014, Brian Gladman, Worcester, UK. All rights reserved.
The redistribution and use of this software (with or without changes)
is allowed without the payment of fees or royalties provided that:
source code distributions include the above copyright notice, this
list of conditions and the following disclaimer;
binary distributions include the above copyright notice, this list
of conditions and the following disclaimer in their documentation.
This software is provided 'as is' with no explicit or implied warranties
in respect of its operation, including, but not limited to, correctness
and fitness for purpose.
---------------------------------------------------------------------------
Issue Date: 18/02/2014
This header file is an INTERNAL file which supports mode implementation
*/
#ifndef _MODE_HDR_H
#define _MODE_HDR_H
#include <string.h>
#include <limits.h>
#include "brg_endian.h"
/* This define sets the units in which buffers are processed. This code
can provide significant speed gains if buffers can be processed in
32 or 64 bit chunks rather than in bytes. This define sets the units
in which buffers will be accessed if possible
*/
#if !defined( UNIT_BITS )
# if PLATFORM_BYTE_ORDER == IS_BIG_ENDIAN
# if 0
# define UNIT_BITS 32
# elif 1
# define UNIT_BITS 64
# endif
# elif defined( _WIN64 )
# define UNIT_BITS 64
# else
# define UNIT_BITS 32
# endif
#endif
#if UNIT_BITS == 64 && !defined( NEED_UINT_64T )
# define NEED_UINT_64T
#endif
#include <stdint.h>
#define UI_TYPE(size) uint##size##_t
#define UNIT_TYPEDEF(x,size) typedef UI_TYPE(size) x
#define BUFR_TYPEDEF(x,size,bsize) typedef UI_TYPE(size) x[bsize / (size >> 3)]
#define UNIT_CAST(x,size) ((UI_TYPE(size) )(x))
#define UPTR_CAST(x,size) ((UI_TYPE(size)*)(x))
/* Use of inlines is preferred but code blocks can also be expanded inline
using 'defines'. But the latter approach will typically generate a LOT
of code and is not recommended.
*/
#if 1 && !defined( USE_INLINING )
# define USE_INLINING
#endif
#if defined( _MSC_VER )
# if _MSC_VER >= 1400
# include <stdlib.h>
# include <intrin.h>
# pragma intrinsic(memset)
# pragma intrinsic(memcpy)
# define rotl32 _rotl
# define rotr32 _rotr
# define rotl64 _rotl64
# define rotr64 _rotl64
# define bswap_16(x) _byteswap_ushort(x)
# define bswap_32(x) _byteswap_ulong(x)
# define bswap_64(x) _byteswap_uint64(x)
# else
# define rotl32 _lrotl
# define rotr32 _lrotr
# endif
#endif
#if defined( USE_INLINING )
# if defined( _MSC_VER )
# define mh_decl __inline
# elif defined( __GNUC__ ) || defined( __GNU_LIBRARY__ )
# define mh_decl static inline
# else
# define mh_decl static
# endif
#endif
#if defined(__cplusplus)
extern "C" {
#endif
#define UI8_PTR(x) UPTR_CAST(x, 8)
#define UI16_PTR(x) UPTR_CAST(x, 16)
#define UI32_PTR(x) UPTR_CAST(x, 32)
#define UI64_PTR(x) UPTR_CAST(x, 64)
#define UNIT_PTR(x) UPTR_CAST(x, UNIT_BITS)
#define UI8_VAL(x) UNIT_CAST(x, 8)
#define UI16_VAL(x) UNIT_CAST(x, 16)
#define UI32_VAL(x) UNIT_CAST(x, 32)
#define UI64_VAL(x) UNIT_CAST(x, 64)
#define UNIT_VAL(x) UNIT_CAST(x, UNIT_BITS)
#define BUF_INC (UNIT_BITS >> 3)
#define BUF_ADRMASK ((UNIT_BITS >> 3) - 1)
#define rep2_u2(f,r,x) f( 0,r,x); f( 1,r,x)
#define rep2_u4(f,r,x) f( 0,r,x); f( 1,r,x); f( 2,r,x); f( 3,r,x)
#define rep2_u16(f,r,x) f( 0,r,x); f( 1,r,x); f( 2,r,x); f( 3,r,x); \
f( 4,r,x); f( 5,r,x); f( 6,r,x); f( 7,r,x); \
f( 8,r,x); f( 9,r,x); f(10,r,x); f(11,r,x); \
f(12,r,x); f(13,r,x); f(14,r,x); f(15,r,x)
#define rep2_d2(f,r,x) f( 1,r,x); f( 0,r,x)
#define rep2_d4(f,r,x) f( 3,r,x); f( 2,r,x); f( 1,r,x); f( 0,r,x)
#define rep2_d16(f,r,x) f(15,r,x); f(14,r,x); f(13,r,x); f(12,r,x); \
f(11,r,x); f(10,r,x); f( 9,r,x); f( 8,r,x); \
f( 7,r,x); f( 6,r,x); f( 5,r,x); f( 4,r,x); \
f( 3,r,x); f( 2,r,x); f( 1,r,x); f( 0,r,x)
#define rep3_u2(f,r,x,y,c) f( 0,r,x,y,c); f( 1,r,x,y,c)
#define rep3_u4(f,r,x,y,c) f( 0,r,x,y,c); f( 1,r,x,y,c); f( 2,r,x,y,c); f( 3,r,x,y,c)
#define rep3_u16(f,r,x,y,c) f( 0,r,x,y,c); f( 1,r,x,y,c); f( 2,r,x,y,c); f( 3,r,x,y,c); \
f( 4,r,x,y,c); f( 5,r,x,y,c); f( 6,r,x,y,c); f( 7,r,x,y,c); \
f( 8,r,x,y,c); f( 9,r,x,y,c); f(10,r,x,y,c); f(11,r,x,y,c); \
f(12,r,x,y,c); f(13,r,x,y,c); f(14,r,x,y,c); f(15,r,x,y,c)
#define rep3_d2(f,r,x,y,c) f( 1,r,x,y,c); f( 0,r,x,y,c)
#define rep3_d4(f,r,x,y,c) f( 3,r,x,y,c); f( 2,r,x,y,c); f( 1,r,x,y,c); f( 0,r,x,y,c)
#define rep3_d16(f,r,x,y,c) f(15,r,x,y,c); f(14,r,x,y,c); f(13,r,x,y,c); f(12,r,x,y,c); \
f(11,r,x,y,c); f(10,r,x,y,c); f( 9,r,x,y,c); f( 8,r,x,y,c); \
f( 7,r,x,y,c); f( 6,r,x,y,c); f( 5,r,x,y,c); f( 4,r,x,y,c); \
f( 3,r,x,y,c); f( 2,r,x,y,c); f( 1,r,x,y,c); f( 0,r,x,y,c)
/* function pointers might be used for fast XOR operations */
typedef void (*xor_function)(void* r, const void* p, const void* q);
/* left and right rotates on 32 and 64 bit variables */
#if !defined( rotl32 ) /* NOTE: 0 <= n <= 32 ASSUMED */
mh_decl uint32_t rotl32(uint32_t x, int n)
{
return (((x) << n) | ((x) >> (32 - n)));
}
#endif
#if !defined( rotr32 ) /* NOTE: 0 <= n <= 32 ASSUMED */
mh_decl uint32_t rotr32(uint32_t x, int n)
{
return (((x) >> n) | ((x) << (32 - n)));
}
#endif
#if ( UNIT_BITS == 64 ) && !defined( rotl64 ) /* NOTE: 0 <= n <= 64 ASSUMED */
mh_decl uint64_t rotl64(uint64_t x, int n)
{
return (((x) << n) | ((x) >> (64 - n)));
}
#endif
#if ( UNIT_BITS == 64 ) && !defined( rotr64 ) /* NOTE: 0 <= n <= 64 ASSUMED */
mh_decl uint64_t rotr64(uint64_t x, int n)
{
return (((x) >> n) | ((x) << (64 - n)));
}
#endif
/* byte order inversions for 16, 32 and 64 bit variables */
#if !defined(bswap_16)
mh_decl uint16_t bswap_16(uint16_t x)
{
return (uint16_t)((x >> 8) | (x << 8));
}
#endif
#if !defined(bswap_32)
mh_decl uint32_t bswap_32(uint32_t x)
{
return ((rotr32((x), 24) & 0x00ff00ff) | (rotr32((x), 8) & 0xff00ff00));
}
#endif
#if ( UNIT_BITS == 64 ) && !defined(bswap_64)
mh_decl uint64_t bswap_64(uint64_t x)
{
return bswap_32((uint32_t)(x >> 32)) | ((uint64_t)bswap_32((uint32_t)x) << 32);
}
#endif
/* support for fast aligned buffer move, xor and byte swap operations -
source and destination buffers for move and xor operations must not
overlap, those for byte order revesal must either not overlap or
must be identical
*/
#define f_copy(n,p,q) p[n] = q[n]
#define f_xor(n,r,p,q,c) r[n] = c(p[n] ^ q[n])
mh_decl void copy_block(void* p, const void* q)
{
memcpy(p, q, 16);
}
mh_decl void copy_block_aligned(void *p, const void *q)
{
#if UNIT_BITS == 8
memcpy(p, q, 16);
#elif UNIT_BITS == 32
rep2_u4(f_copy,UNIT_PTR(p),UNIT_PTR(q));
#else
rep2_u2(f_copy,UNIT_PTR(p),UNIT_PTR(q));
#endif
}
mh_decl void xor_block(void *r, const void* p, const void* q)
{
rep3_u16(f_xor, UI8_PTR(r), UI8_PTR(p), UI8_PTR(q), UI8_VAL);
}
mh_decl void xor_block_aligned(void *r, const void *p, const void *q)
{
#if UNIT_BITS == 8
rep3_u16(f_xor, UNIT_PTR(r), UNIT_PTR(p), UNIT_PTR(q), UNIT_VAL);
#elif UNIT_BITS == 32
rep3_u4(f_xor, UNIT_PTR(r), UNIT_PTR(p), UNIT_PTR(q), UNIT_VAL);
#else
rep3_u2(f_xor, UNIT_PTR(r), UNIT_PTR(p), UNIT_PTR(q), UNIT_VAL);
#endif
}
/* byte swap within 32-bit words in a 16 byte block; don't move 32-bit words */
mh_decl void bswap32_block(void *d, const void* s)
{
#if UNIT_BITS == 8
uint8_t t = 0;
t = UNIT_PTR(s)[ 0]; UNIT_PTR(d)[ 0] = UNIT_PTR(s)[ 3]; UNIT_PTR(d)[ 3] = t;
t = UNIT_PTR(s)[ 1]; UNIT_PTR(d)[ 1] = UNIT_PTR(s)[ 2]; UNIT_PTR(d)[ 2] = t;
t = UNIT_PTR(s)[ 4]; UNIT_PTR(d)[ 4] = UNIT_PTR(s)[ 7]; UNIT_PTR(d)[ 7] = t;
t = UNIT_PTR(s)[ 5]; UNIT_PTR(d)[ 5] = UNIT_PTR(s)[ 6]; UNIT_PTR(d) [6] = t;
t = UNIT_PTR(s)[ 8]; UNIT_PTR(d)[ 8] = UNIT_PTR(s)[11]; UNIT_PTR(d)[12] = t;
t = UNIT_PTR(s)[ 9]; UNIT_PTR(d)[ 9] = UNIT_PTR(s)[10]; UNIT_PTR(d)[10] = t;
t = UNIT_PTR(s)[12]; UNIT_PTR(d)[12] = UNIT_PTR(s)[15]; UNIT_PTR(d)[15] = t;
t = UNIT_PTR(s)[13]; UNIT_PTR(d)[ 3] = UNIT_PTR(s)[14]; UNIT_PTR(d)[14] = t;
#elif UNIT_BITS == 32
UNIT_PTR(d)[0] = bswap_32(UNIT_PTR(s)[0]); UNIT_PTR(d)[1] = bswap_32(UNIT_PTR(s)[1]);
UNIT_PTR(d)[2] = bswap_32(UNIT_PTR(s)[2]); UNIT_PTR(d)[3] = bswap_32(UNIT_PTR(s)[3]);
#else
UI32_PTR(d)[0] = bswap_32(UI32_PTR(s)[0]); UI32_PTR(d)[1] = bswap_32(UI32_PTR(s)[1]);
UI32_PTR(d)[2] = bswap_32(UI32_PTR(s)[2]); UI32_PTR(d)[3] = bswap_32(UI32_PTR(s)[3]);
#endif
}
/* byte swap within 64-bit words in a 16 byte block; don't move 64-bit words */
mh_decl void bswap64_block(void *d, const void* s)
{
#if UNIT_BITS == 8
uint8_t t = 0;
t = UNIT_PTR(s)[ 0]; UNIT_PTR(d)[ 0] = UNIT_PTR(s)[ 7]; UNIT_PTR(d)[ 7] = t;
t = UNIT_PTR(s)[ 1]; UNIT_PTR(d)[ 1] = UNIT_PTR(s)[ 6]; UNIT_PTR(d)[ 6] = t;
t = UNIT_PTR(s)[ 2]; UNIT_PTR(d)[ 2] = UNIT_PTR(s)[ 5]; UNIT_PTR(d)[ 5] = t;
t = UNIT_PTR(s)[ 3]; UNIT_PTR(d)[ 3] = UNIT_PTR(s)[ 3]; UNIT_PTR(d) [3] = t;
t = UNIT_PTR(s)[ 8]; UNIT_PTR(d)[ 8] = UNIT_PTR(s)[15]; UNIT_PTR(d)[15] = t;
t = UNIT_PTR(s)[ 9]; UNIT_PTR(d)[ 9] = UNIT_PTR(s)[14]; UNIT_PTR(d)[14] = t;
t = UNIT_PTR(s)[10]; UNIT_PTR(d)[10] = UNIT_PTR(s)[13]; UNIT_PTR(d)[13] = t;
t = UNIT_PTR(s)[11]; UNIT_PTR(d)[11] = UNIT_PTR(s)[12]; UNIT_PTR(d)[12] = t;
#elif UNIT_BITS == 32
uint32_t t = 0;
t = bswap_32(UNIT_PTR(s)[0]); UNIT_PTR(d)[0] = bswap_32(UNIT_PTR(s)[1]); UNIT_PTR(d)[1] = t;
t = bswap_32(UNIT_PTR(s)[2]); UNIT_PTR(d)[2] = bswap_32(UNIT_PTR(s)[2]); UNIT_PTR(d)[3] = t;
#else
UNIT_PTR(d)[0] = bswap_64(UNIT_PTR(s)[0]); UNIT_PTR(d)[1] = bswap_64(UNIT_PTR(s)[1]);
#endif
}
mh_decl void bswap128_block(void *d, const void* s)
{
#if UNIT_BITS == 8
uint8_t t = 0;
t = UNIT_PTR(s)[0]; UNIT_PTR(d)[0] = UNIT_PTR(s)[15]; UNIT_PTR(d)[15] = t;
t = UNIT_PTR(s)[1]; UNIT_PTR(d)[1] = UNIT_PTR(s)[14]; UNIT_PTR(d)[14] = t;
t = UNIT_PTR(s)[2]; UNIT_PTR(d)[2] = UNIT_PTR(s)[13]; UNIT_PTR(d)[13] = t;
t = UNIT_PTR(s)[3]; UNIT_PTR(d)[3] = UNIT_PTR(s)[12]; UNIT_PTR(d)[12] = t;
t = UNIT_PTR(s)[4]; UNIT_PTR(d)[4] = UNIT_PTR(s)[11]; UNIT_PTR(d)[11] = t;
t = UNIT_PTR(s)[5]; UNIT_PTR(d)[5] = UNIT_PTR(s)[10]; UNIT_PTR(d)[10] = t;
t = UNIT_PTR(s)[6]; UNIT_PTR(d)[6] = UNIT_PTR(s)[ 9]; UNIT_PTR(d)[ 9] = t;
t = UNIT_PTR(s)[7]; UNIT_PTR(d)[7] = UNIT_PTR(s)[ 8]; UNIT_PTR(d)[ 8] = t;
#elif UNIT_BITS == 32
uint32_t t = 0;
t = bswap_32(UNIT_PTR(s)[0]); UNIT_PTR(d)[0] = bswap_32(UNIT_PTR(s)[3]); UNIT_PTR(d)[3] = t;
t = bswap_32(UNIT_PTR(s)[1]); UNIT_PTR(d)[1] = bswap_32(UNIT_PTR(s)[2]); UNIT_PTR(d)[2] = t;
#else
uint64_t t = 0;
t = bswap_64(UNIT_PTR(s)[0]); UNIT_PTR(d)[0] = bswap_64(UNIT_PTR(s)[1]); UNIT_PTR(d)[1] = t;
#endif
}
/* platform byte order to big or little endian order for 16, 32 and 64 bit variables */
#if PLATFORM_BYTE_ORDER == IS_BIG_ENDIAN
# define uint16_t_to_le(x) (x) = bswap_16((x))
# define uint32_t_to_le(x) (x) = bswap_32((x))
# define uint64_t_to_le(x) (x) = bswap_64((x))
# define uint16_t_to_be(x)
# define uint32_t_to_be(x)
# define uint64_t_to_be(x)
#else
# define uint16_t_to_le(x)
# define uint32_t_to_le(x)
# define uint64_t_to_le(x)
# define uint16_t_to_be(x) (x) = bswap_16((x))
# define uint32_t_to_be(x) (x) = bswap_32((x))
# define uint64_t_to_be(x) (x) = bswap_64((x))
#endif
#if defined(__cplusplus)
}
#endif
#endif

@ -42,6 +42,7 @@
#include "address.h"
#include "aes/aes.h"
#include "aes/aesccm.h"
#include "aes/aesgcm.h"
#include "base32.h"
#include "base58.h"
#include "bignum.h"
@ -4406,6 +4407,273 @@ START_TEST(test_aesccm) {
}
END_TEST
// test vectors from
// https://csrc.nist.rip/groups/ST/toolkit/BCM/documents/proposedmodes/gcm/gcm-spec.pdf
START_TEST(test_aesgcm) {
struct {
char *key;
char *iv;
char *aad;
char *plaintext;
char *ciphertext;
char *tag;
} vectors[] = {
// Test case 1
{
"00000000000000000000000000000000",
"000000000000000000000000",
"",
"",
"",
"58e2fccefa7e3061367f1d57a4e7455a",
},
// Test case 2
{
"00000000000000000000000000000000",
"000000000000000000000000",
"",
"00000000000000000000000000000000",
"0388dace60b6a392f328c2b971b2fe78",
"ab6e47d42cec13bdf53a67b21257bddf",
},
// Test case 3
{
"feffe9928665731c6d6a8f9467308308",
"cafebabefacedbaddecaf888",
"",
"d9313225f88406e5a55909c5aff5269a86a7a9531534f7da2e4c303d8a318a721c3c"
"0c95956809532fcf0e2449a6b525b16aedf5aa0de657ba637b391aafd255",
"42831ec2217774244b7221b784d0d49ce3aa212f2c02a4e035c17e2329aca12e21d5"
"14b25466931c7d8f6a5aac84aa051ba30b396a0aac973d58e091473f5985",
"4d5c2af327cd64a62cf35abd2ba6fab4",
},
// Test case 4
{
"feffe9928665731c6d6a8f9467308308",
"cafebabefacedbaddecaf888",
"feedfacedeadbeeffeedfacedeadbeefabaddad2",
"d9313225f88406e5a55909c5aff5269a86a7a9531534f7da2e4c303d8a318a721c3c"
"0c95956809532fcf0e2449a6b525b16aedf5aa0de657ba637b39",
"42831ec2217774244b7221b784d0d49ce3aa212f2c02a4e035c17e2329aca12e21d5"
"14b25466931c7d8f6a5aac84aa051ba30b396a0aac973d58e091",
"5bc94fbc3221a5db94fae95ae7121a47",
},
// Test case 5
{
"feffe9928665731c6d6a8f9467308308",
"cafebabefacedbad",
"feedfacedeadbeeffeedfacedeadbeefabaddad2",
"d9313225f88406e5a55909c5aff5269a86a7a9531534f7da2e4c303d8a318a721c3c"
"0c95956809532fcf0e2449a6b525b16aedf5aa0de657ba637b39",
"61353b4c2806934a777ff51fa22a4755699b2a714fcdc6f83766e5f97b6c74237380"
"6900e49f24b22b097544d4896b424989b5e1ebac0f07c23f4598",
"3612d2e79e3b0785561be14aaca2fccb",
},
// Test case 6
{
"feffe9928665731c6d6a8f9467308308",
"9313225df88406e555909c5aff5269aa6a7a9538534f7da1e4c303d2a318a728c3c0"
"c95156809539fcf0e2429a6b525416aedbf5a0de6a57a637b39b",
"feedfacedeadbeeffeedfacedeadbeefabaddad2",
"d9313225f88406e5a55909c5aff5269a86a7a9531534f7da2e4c303d8a318a721c3c"
"0c95956809532fcf0e2449a6b525b16aedf5aa0de657ba637b39",
"8ce24998625615b603a033aca13fb894be9112a5c3a211a8ba262a3cca7e2ca701e4"
"a9a4fba43c90ccdcb281d48c7c6fd62875d2aca417034c34aee5",
"619cc5aefffe0bfa462af43c1699d050",
},
// Test case 7
{
"000000000000000000000000000000000000000000000000",
"000000000000000000000000",
"",
"",
"",
"cd33b28ac773f74ba00ed1f312572435",
},
// Test case 8
{
"000000000000000000000000000000000000000000000000",
"000000000000000000000000",
"",
"00000000000000000000000000000000",
"98e7247c07f0fe411c267e4384b0f600",
"2ff58d80033927ab8ef4d4587514f0fb",
},
// Test case 9
{
"feffe9928665731c6d6a8f9467308308feffe9928665731c",
"cafebabefacedbaddecaf888",
"",
"d9313225f88406e5a55909c5aff5269a86a7a9531534f7da2e4c303d8a318a721c3c"
"0c95956809532fcf0e2449a6b525b16aedf5aa0de657ba637b391aafd255",
"3980ca0b3c00e841eb06fac4872a2757859e1ceaa6efd984628593b40ca1e19c7d77"
"3d00c144c525ac619d18c84a3f4718e2448b2fe324d9ccda2710acade256",
"9924a7c8587336bfb118024db8674a14",
},
// Test case 10
{
"feffe9928665731c6d6a8f9467308308feffe9928665731c",
"cafebabefacedbaddecaf888",
"feedfacedeadbeeffeedfacedeadbeefabaddad2",
"d9313225f88406e5a55909c5aff5269a86a7a9531534f7da2e4c303d8a318a721c3c"
"0c95956809532fcf0e2449a6b525b16aedf5aa0de657ba637b39",
"3980ca0b3c00e841eb06fac4872a2757859e1ceaa6efd984628593b40ca1e19c7d77"
"3d00c144c525ac619d18c84a3f4718e2448b2fe324d9ccda2710",
"2519498e80f1478f37ba55bd6d27618c",
},
// Test case 11
{
"feffe9928665731c6d6a8f9467308308feffe9928665731c",
"cafebabefacedbad",
"feedfacedeadbeeffeedfacedeadbeefabaddad2",
"d9313225f88406e5a55909c5aff5269a86a7a9531534f7da2e4c303d8a318a721c3c"
"0c95956809532fcf0e2449a6b525b16aedf5aa0de657ba637b39",
"0f10f599ae14a154ed24b36e25324db8c566632ef2bbb34f8347280fc4507057fddc"
"29df9a471f75c66541d4d4dad1c9e93a19a58e8b473fa0f062f7",
"65dcc57fcf623a24094fcca40d3533f8",
},
// Test case 12
{
"feffe9928665731c6d6a8f9467308308feffe9928665731c",
"9313225df88406e555909c5aff5269aa6a7a9538534f7da1e4c303d2a318a728c3c0"
"c95156809539fcf0e2429a6b525416aedbf5a0de6a57a637b39b",
"feedfacedeadbeeffeedfacedeadbeefabaddad2",
"d9313225f88406e5a55909c5aff5269a86a7a9531534f7da2e4c303d8a318a721c3c"
"0c95956809532fcf0e2449a6b525b16aedf5aa0de657ba637b39",
"d27e88681ce3243c4830165a8fdcf9ff1de9a1d8e6b447ef6ef7b79828666e4581e7"
"9012af34ddd9e2f037589b292db3e67c036745fa22e7e9b7373b",
"dcf566ff291c25bbb8568fc3d376a6d9",
},
// Test case 13
{
"0000000000000000000000000000000000000000000000000000000000000000",
"000000000000000000000000",
"",
"",
"",
"530f8afbc74536b9a963b4f1c4cb738b",
},
// Test case 14
{
"0000000000000000000000000000000000000000000000000000000000000000",
"000000000000000000000000",
"",
"00000000000000000000000000000000",
"cea7403d4d606b6e074ec5d3baf39d18",
"d0d1c8a799996bf0265b98b5d48ab919",
},
// Test case 15
{
"feffe9928665731c6d6a8f9467308308feffe9928665731c6d6a8f9467308308",
"cafebabefacedbaddecaf888",
"",
"d9313225f88406e5a55909c5aff5269a86a7a9531534f7da2e4c303d8a318a721c3c"
"0c95956809532fcf0e2449a6b525b16aedf5aa0de657ba637b391aafd255",
"522dc1f099567d07f47f37a32a84427d643a8cdcbfe5c0c97598a2bd2555d1aa8cb0"
"8e48590dbb3da7b08b1056828838c5f61e6393ba7a0abcc9f662898015ad",
"b094dac5d93471bdec1a502270e3cc6c",
},
// Test case 16
{
"feffe9928665731c6d6a8f9467308308feffe9928665731c6d6a8f9467308308",
"cafebabefacedbaddecaf888",
"feedfacedeadbeeffeedfacedeadbeefabaddad2",
"d9313225f88406e5a55909c5aff5269a86a7a9531534f7da2e4c303d8a318a721c3c"
"0c95956809532fcf0e2449a6b525b16aedf5aa0de657ba637b39",
"522dc1f099567d07f47f37a32a84427d643a8cdcbfe5c0c97598a2bd2555d1aa8cb0"
"8e48590dbb3da7b08b1056828838c5f61e6393ba7a0abcc9f662",
"76fc6ece0f4e1768cddf8853bb2d551b",
},
// Test case 17
{
"feffe9928665731c6d6a8f9467308308feffe9928665731c6d6a8f9467308308",
"cafebabefacedbad",
"feedfacedeadbeeffeedfacedeadbeefabaddad2",
"d9313225f88406e5a55909c5aff5269a86a7a9531534f7da2e4c303d8a318a721c3c"
"0c95956809532fcf0e2449a6b525b16aedf5aa0de657ba637b39",
"c3762df1ca787d32ae47c13bf19844cbaf1ae14d0b976afac52ff7d79bba9de0feb5"
"82d33934a4f0954cc2363bc73f7862ac430e64abe499f47c9b1f",
"3a337dbf46a792c45e454913fe2ea8f2",
},
// Test case 18
{
"feffe9928665731c6d6a8f9467308308feffe9928665731c6d6a8f9467308308",
"9313225df88406e555909c5aff5269aa6a7a9538534f7da1e4c303d2a318a728c3c0"
"c95156809539fcf0e2429a6b525416aedbf5a0de6a57a637b39b",
"feedfacedeadbeeffeedfacedeadbeefabaddad2",
"d9313225f88406e5a55909c5aff5269a86a7a9531534f7da2e4c303d8a318a721c3c"
"0c95956809532fcf0e2449a6b525b16aedf5aa0de657ba637b39",
"5a8def2f0c9e53f1f75d7853659e2a20eeb2b22aafde6419a058ab4f6f746bf40fc0"
"c3b780f244452da3ebf1c5d82cdea2418997200ef82e44ae7e3f",
"a44a8266ee1c8eb0c8b5d4cf5ae9f19a",
},
};
uint8_t iv[60] = {0};
uint8_t aad[20] = {0};
uint8_t msg[64] = {0};
uint8_t tag[16] = {0};
for (size_t i = 0; i < sizeof(vectors) / sizeof(vectors[0]); ++i) {
size_t iv_len = strlen(vectors[i].iv) / 2;
memcpy(iv, fromhex(vectors[i].iv), iv_len);
size_t aad_len = strlen(vectors[i].aad) / 2;
memcpy(aad, fromhex(vectors[i].aad), aad_len);
size_t msg_len = strlen(vectors[i].plaintext) / 2;
memcpy(msg, fromhex(vectors[i].plaintext), msg_len);
size_t tag_len = strlen(vectors[i].tag) / 2;
// Set key.
gcm_ctx ctx = {0};
ret_type ret = gcm_init_and_key(fromhex(vectors[i].key),
strlen(vectors[i].key) / 2, &ctx);
ck_assert_int_eq(ret, RETURN_GOOD);
// Test encryption.
ret = gcm_encrypt_message(iv, iv_len, aad, aad_len, msg, msg_len, tag,
tag_len, &ctx);
ck_assert_int_eq(ret, RETURN_GOOD);
ck_assert_mem_eq(msg, fromhex(vectors[i].ciphertext), msg_len);
ck_assert_mem_eq(tag, fromhex(vectors[i].tag), tag_len);
// Test decryption.
ret = gcm_decrypt_message(iv, iv_len, aad, aad_len, msg, msg_len, tag,
tag_len, &ctx);
ck_assert_int_eq(ret, RETURN_GOOD);
ck_assert_mem_eq(msg, fromhex(vectors[i].plaintext), msg_len);
// Test encryption in three chunks.
size_t chunk1_len = msg_len / 6;
size_t chunk2_len = msg_len / 2;
size_t chunk3_len = msg_len - chunk1_len - chunk2_len;
ck_assert_int_eq(gcm_init_message(iv, iv_len, &ctx), RETURN_GOOD);
ck_assert_int_eq(gcm_auth_header(aad, aad_len, &ctx), RETURN_GOOD);
ck_assert_int_eq(gcm_encrypt(msg, chunk1_len, &ctx), RETURN_GOOD);
ret = gcm_encrypt(&msg[chunk1_len], chunk2_len, &ctx);
ck_assert_int_eq(ret, RETURN_GOOD);
ret = gcm_encrypt(&msg[chunk1_len + chunk2_len], chunk3_len, &ctx);
ck_assert_int_eq(ret, RETURN_GOOD);
ck_assert_int_eq(gcm_compute_tag(tag, tag_len, &ctx), RETURN_GOOD);
ck_assert_mem_eq(msg, fromhex(vectors[i].ciphertext), msg_len);
ck_assert_mem_eq(tag, fromhex(vectors[i].tag), tag_len);
// Test decryption in three chunks.
ck_assert_int_eq(gcm_init_message(iv, iv_len, &ctx), RETURN_GOOD);
ck_assert_int_eq(gcm_auth_header(aad, aad_len, &ctx), RETURN_GOOD);
ck_assert_int_eq(gcm_decrypt(msg, chunk3_len, &ctx), RETURN_GOOD);
ret = gcm_decrypt(&msg[chunk3_len], chunk2_len, &ctx);
ck_assert_int_eq(ret, RETURN_GOOD);
ret = gcm_decrypt(&msg[chunk3_len + chunk2_len], chunk1_len, &ctx);
ck_assert_int_eq(ret, RETURN_GOOD);
ck_assert_int_eq(gcm_compute_tag(tag, tag_len, &ctx), RETURN_GOOD);
ck_assert_mem_eq(msg, fromhex(vectors[i].plaintext), msg_len);
ck_assert_mem_eq(tag, fromhex(vectors[i].tag), tag_len);
// Clean up.
ck_assert_int_eq(gcm_end(&ctx), RETURN_GOOD);
}
}
END_TEST
#define TEST1 "abc"
#define TEST2_1 "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq"
#define TEST2_2a "abcdefghbcdefghicdefghijdefghijkefghijklfghijklmghijklmn"
@ -10274,6 +10542,10 @@ Suite *test_suite(void) {
tcase_add_test(tc, test_aesccm);
suite_add_tcase(s, tc);
tc = tcase_create("aes_gcm");
tcase_add_test(tc, test_aesgcm);
suite_add_tcase(s, tc);
tc = tcase_create("sha2");
tcase_add_test(tc, test_sha1);
tcase_add_test(tc, test_sha256);

@ -1,5 +1,5 @@
^\./core/embed/bootloader/protob/
^\./crypto/aes/aes\(\|crypt\|key\|_modes\|opt\|tab\|tst\)\.
^\./crypto/aes/
^\./crypto/chacha20poly1305/
^\./crypto/ed25519-donna/
^\./crypto/gui/

Loading…
Cancel
Save