/* * Argon2 reference source code package - reference C implementations * * Copyright 2015 * Daniel Dinu, Dmitry Khovratovich, Jean-Philippe Aumasson, and Samuel Neves * * You may use this work under the terms of a Creative Commons CC0 1.0 * License/Waiver or the Apache Public License 2.0, at your option. The terms of * these licenses can be found at: * * - CC0 1.0 Universal : http://creativecommons.org/publicdomain/zero/1.0 * - Apache 2.0 : http://www.apache.org/licenses/LICENSE-2.0 * * You should have received a copy of both of these licenses along with this * software. If not, they may be obtained at the above URLs. */ /*For memory wiping*/ #ifdef _MSC_VER #include #include /* For SecureZeroMemory */ #endif #if defined __STDC_LIB_EXT1__ #define __STDC_WANT_LIB_EXT1__ 1 #endif #define VC_GE_2005(version) (version >= 1400) /* for explicit_bzero() on glibc */ #define _DEFAULT_SOURCE #include #include #include #include "core.h" #include "blake2/blake2.h" #include "blake2/blake2-impl.h" #if defined(__clang__) #if __has_attribute(optnone) #define NOT_OPTIMIZED __attribute__((optnone)) #endif #elif defined(__GNUC__) #define GCC_VERSION \ (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) #if GCC_VERSION >= 40400 #define NOT_OPTIMIZED __attribute__((optimize("O0"))) #endif #endif #ifndef NOT_OPTIMIZED #define NOT_OPTIMIZED #endif /***************Instance and Position constructors**********/ void init_block_value(block *b, uint8_t in) { memset(b->v, in, sizeof(b->v)); } void copy_block(block *dst, const block *src) { memcpy(dst->v, src->v, sizeof(uint64_t) * ARGON2_QWORDS_IN_BLOCK); } void xor_block(block *dst, const block *src) { int i; for (i = 0; i < ARGON2_QWORDS_IN_BLOCK; ++i) { dst->v[i] ^= src->v[i]; } } static void load_block(block *dst, const void *input) { unsigned i; for (i = 0; i < ARGON2_QWORDS_IN_BLOCK; ++i) { dst->v[i] = load64((const uint8_t *)input + i * sizeof(dst->v[i])); } } static void store_block(void *output, const block *src) { unsigned i; for (i = 0; i < ARGON2_QWORDS_IN_BLOCK; ++i) { store64((uint8_t *)output + i * sizeof(src->v[i]), src->v[i]); } } /***************Memory functions*****************/ void finalize(const argon2_context *context, argon2_instance_t *instance) { if (context != NULL && instance != NULL) { block blockhash; uint32_t l; copy_block(&blockhash, instance->memory + instance->lane_length - 1); /* XOR the last blocks */ for (l = 1; l < instance->lanes; ++l) { uint32_t last_block_in_lane = l * instance->lane_length + (instance->lane_length - 1); xor_block(&blockhash, instance->memory + last_block_in_lane); } /* Hash the result */ { uint8_t blockhash_bytes[ARGON2_BLOCK_SIZE]; store_block(blockhash_bytes, &blockhash); blake2b_long(context->out, context->outlen, blockhash_bytes, ARGON2_BLOCK_SIZE); } } } uint32_t index_alpha(const argon2_instance_t *instance, const argon2_position_t *position, uint32_t pseudo_rand, int same_lane) { /* * Pass 0: * This lane : all already finished segments plus already constructed * blocks in this segment * Other lanes : all already finished segments * Pass 1+: * This lane : (SYNC_POINTS - 1) last segments plus already constructed * blocks in this segment * Other lanes : (SYNC_POINTS - 1) last segments */ uint32_t reference_area_size; uint64_t relative_position; uint32_t start_position, absolute_position; if (0 == position->pass) { /* First pass */ if (0 == position->slice) { /* First slice */ reference_area_size = position->index - 1; /* all but the previous */ } else { if (same_lane) { /* The same lane => add current segment */ reference_area_size = position->slice * instance->segment_length + position->index - 1; } else { reference_area_size = position->slice * instance->segment_length + ((position->index == 0) ? (-1) : 0); } } } else { /* Second pass */ if (same_lane) { reference_area_size = instance->lane_length - instance->segment_length + position->index - 1; } else { reference_area_size = instance->lane_length - instance->segment_length + ((position->index == 0) ? (-1) : 0); } } /* 1.2.4. Mapping pseudo_rand to 0.. and produce * relative position */ relative_position = pseudo_rand; relative_position = relative_position * relative_position >> 32; relative_position = reference_area_size - 1 - (reference_area_size * relative_position >> 32); /* 1.2.5 Computing starting position */ start_position = 0; if (0 != position->pass) { start_position = (position->slice == ARGON2_SYNC_POINTS - 1) ? 0 : (position->slice + 1) * instance->segment_length; } /* 1.2.6. Computing absolute position */ absolute_position = (start_position + relative_position) % instance->lane_length; /* absolute position */ return absolute_position; } /* Single-threaded version for p=1 case */ int fill_memory_blocks(argon2_instance_t *instance) { uint32_t r, s, l; for (r = 0; r < instance->passes; ++r) { for (s = 0; s < ARGON2_SYNC_POINTS; ++s) { for (l = 0; l < instance->lanes; ++l) { argon2_position_t position = {r, l, (uint8_t)s, 0}; fill_segment(instance, position); } } } return ARGON2_OK; } void fill_first_blocks(uint8_t *blockhash, const argon2_instance_t *instance) { uint32_t l; /* Make the first and second block in each lane as G(H0||0||i) or G(H0||1||i) */ uint8_t blockhash_bytes[ARGON2_BLOCK_SIZE]; for (l = 0; l < instance->lanes; ++l) { store32(blockhash + ARGON2_PREHASH_DIGEST_LENGTH, 0); store32(blockhash + ARGON2_PREHASH_DIGEST_LENGTH + 4, l); blake2b_long(blockhash_bytes, ARGON2_BLOCK_SIZE, blockhash, ARGON2_PREHASH_SEED_LENGTH); load_block(&instance->memory[l * instance->lane_length + 0], blockhash_bytes); store32(blockhash + ARGON2_PREHASH_DIGEST_LENGTH, 1); blake2b_long(blockhash_bytes, ARGON2_BLOCK_SIZE, blockhash, ARGON2_PREHASH_SEED_LENGTH); load_block(&instance->memory[l * instance->lane_length + 1], blockhash_bytes); } } void initial_hash(uint8_t *blockhash, argon2_context *context, argon2_type type) { blake2b_state BlakeHash; uint8_t value[sizeof(uint32_t)]; if (NULL == context || NULL == blockhash) { return; } blake2b_init(&BlakeHash, ARGON2_PREHASH_DIGEST_LENGTH); store32(&value, context->lanes); blake2b_update(&BlakeHash, (const uint8_t *)&value, sizeof(value)); store32(&value, context->outlen); blake2b_update(&BlakeHash, (const uint8_t *)&value, sizeof(value)); store32(&value, context->m_cost); blake2b_update(&BlakeHash, (const uint8_t *)&value, sizeof(value)); store32(&value, context->t_cost); blake2b_update(&BlakeHash, (const uint8_t *)&value, sizeof(value)); store32(&value, context->version); blake2b_update(&BlakeHash, (const uint8_t *)&value, sizeof(value)); store32(&value, (uint32_t)type); blake2b_update(&BlakeHash, (const uint8_t *)&value, sizeof(value)); store32(&value, context->pwdlen); blake2b_update(&BlakeHash, (const uint8_t *)&value, sizeof(value)); if (context->pwd != NULL) { blake2b_update(&BlakeHash, (const uint8_t *)context->pwd, context->pwdlen); } store32(&value, context->saltlen); blake2b_update(&BlakeHash, (const uint8_t *)&value, sizeof(value)); if (context->salt != NULL) { blake2b_update(&BlakeHash, (const uint8_t *)context->salt, context->saltlen); } store32(&value, context->secretlen); blake2b_update(&BlakeHash, (const uint8_t *)&value, sizeof(value)); if (context->secret != NULL) { blake2b_update(&BlakeHash, (const uint8_t *)context->secret, context->secretlen); } store32(&value, context->adlen); blake2b_update(&BlakeHash, (const uint8_t *)&value, sizeof(value)); if (context->ad != NULL) { blake2b_update(&BlakeHash, (const uint8_t *)context->ad, context->adlen); } blake2b_final(&BlakeHash, blockhash, ARGON2_PREHASH_DIGEST_LENGTH); } int initialize(argon2_instance_t *instance, argon2_context *context) { uint8_t blockhash[ARGON2_PREHASH_SEED_LENGTH]; instance->context_ptr = context; /* 1. Memory allocation */ instance->memory = context->memory; /* 2. Initial hashing */ /* H_0 + 8 extra bytes to produce the first blocks */ /* uint8_t blockhash[ARGON2_PREHASH_SEED_LENGTH]; */ /* Hashing all inputs */ initial_hash(blockhash, context, instance->type); /* Zeroing 8 extra bytes */ /* 3. Creating first blocks, we always have at least two blocks in a slice */ fill_first_blocks(blockhash, instance); return ARGON2_OK; }