diff --git a/Makefile b/Makefile index 65275c370..bcd923695 100644 --- a/Makefile +++ b/Makefile @@ -26,7 +26,7 @@ CFLAGS += $(OPTFLAGS) \ # disable sequence point warning because of AES code CFLAGS += -Wno-sequence-point -CFLAGS += -Ied25519-donna -Icurve25519-donna -I. +CFLAGS += -Ied25519-donna -I. CFLAGS += -DUSE_ETHEREUM=1 CFLAGS += -DUSE_GRAPHENE=1 @@ -42,8 +42,7 @@ SRCS += ripemd160.c SRCS += sha2.c SRCS += sha3.c SRCS += aescrypt.c aeskey.c aestab.c aes_modes.c -SRCS += ed25519-donna/ed25519.c -SRCS += curve25519-donna/curve25519.c +SRCS += ed25519-donna/ed25519.c ed25519-donna/curve25519.c SRCS += blake2b.c blake2s.c OBJS = $(SRCS:.c=.o) @@ -80,5 +79,5 @@ tools/bip39bruteforce: tools/bip39bruteforce.o $(OBJS) $(CC) tools/bip39bruteforce.o $(OBJS) -o tools/bip39bruteforce clean: - rm -f *.o ed25519-donna/*.o curve25519-donna/*.o tests test_speed test-openssl libtrezor-crypto.so + rm -f *.o ed25519-donna/*.o tests test_speed test-openssl libtrezor-crypto.so rm -f tools/*.o tools/xpubaddrgen tools/mktable tools/bip39bruteforce diff --git a/curve25519-donna/README.md b/curve25519-donna/README.md deleted file mode 100644 index 4d7dfb7af..000000000 --- a/curve25519-donna/README.md +++ /dev/null @@ -1,107 +0,0 @@ -[curve25519](http://cr.yp.to/ecdh.html) is an elliptic curve, developed by -[Dan Bernstein](http://cr.yp.to/djb.html), for fast -[Diffie-Hellman](http://en.wikipedia.org/wiki/Diffie-Hellman) key agreement. -DJB's [original implementation](http://cr.yp.to/ecdh.html) was written in a -language of his own devising called [qhasm](http://cr.yp.to/qhasm.html). -The original qhasm source isn't available, only the x86 32-bit assembly output. - -This project provides performant, portable 32-bit & 64-bit implementations. -All implementations are of course constant time in regard to secret data. - -#### Performance - -Compilers versions are gcc 4.6.3, icc 13.1.1, clang 3.4-1~exp1. - -Counts are in thousands of cycles. - -Note that SSE2 performance may be less impressive on AMD & older CPUs with slower SSE ops! - -##### E5200 @ 2.5ghz, march=core2 - - - - - - - - - -
Versiongcciccclang
64-bit SSE2 278k 265k 302k
64-bit 273k 271k 377k
32-bit SSE2 304k 289k 317k
32-bit 1417k 845k 981k
- -##### E3-1270 @ 3.4ghz, march=corei7-avx - - - - - - - - - -
Versiongcciccclang
64-bit 201k 192k 233k
64-bit SSE2 201k 201k 261k
32-bit SSE2 238k 225k 250k
32-bit 1293k 822k 848k
- -#### Compilation - -No configuration is needed. - -##### 32-bit - - gcc curve25519.c -m32 -O3 -c - -##### 64-bit - - gcc curve25519.c -m64 -O3 -c - -##### SSE2 - - gcc curve25519.c -m32 -O3 -c -DCURVE25519_SSE2 -msse2 - gcc curve25519.c -m64 -O3 -c -DCURVE25519_SSE2 - -clang, icc, and msvc are also supported - -##### Named Versions - -Define CURVE25519_SUFFIX to append a suffix to public functions, e.g. -`-DCURVE25519_SUFFIX=_sse2` to create curve25519_donna_sse2 and -curve25519_donna_basepoint_sse2. - -#### Usage - -To use the code, link against `curve25519.o` and: - - #include "curve25519.h" - -To generate a private/secret key, generate 32 cryptographically random bytes: - - curve25519_key sk; - randombytes(sk, sizeof(curve25519_key)); - -Manual clamping is not needed, and it is actually not possible to use unclamped -keys due to the code taking advantage of the clamped bits internally. - -To generate the public key from the private/secret key: - - curve25519_key pk; - curve25519_donna_basepoint(pk, sk); - -To generate a shared key with your private/secret key and someone elses public key: - - curve25519_key shared; - curve25519_donna(shared, mysk, yourpk); - -And hash `shared` with a cryptographic hash before using, or e.g. pass `shared` through -HSalsa20/HChacha as NaCl does. - -#### Testing - -Fuzzing against a reference implemenation is now available. See [fuzz/README](fuzz/README.md). - -Building `curve25519.c` and linking with `test.c` will run basic sanity tests and benchmark curve25519_donna. - -#### Papers - -[djb's curve25519 paper](http://cr.yp.to/ecdh/curve25519-20060209.pdf) - -#### License - -Public Domain, or MIT \ No newline at end of file diff --git a/curve25519-donna/curve25519-donna-32bit.h b/curve25519-donna/curve25519-donna-32bit.h deleted file mode 100644 index 5ef91a202..000000000 --- a/curve25519-donna/curve25519-donna-32bit.h +++ /dev/null @@ -1,466 +0,0 @@ -typedef uint32_t bignum25519[10]; - -static const uint32_t reduce_mask_26 = (1 << 26) - 1; -static const uint32_t reduce_mask_25 = (1 << 25) - 1; - -/* out = in */ -DONNA_INLINE static void -curve25519_copy(bignum25519 out, const bignum25519 in) { - out[0] = in[0]; - out[1] = in[1]; - out[2] = in[2]; - out[3] = in[3]; - out[4] = in[4]; - out[5] = in[5]; - out[6] = in[6]; - out[7] = in[7]; - out[8] = in[8]; - out[9] = in[9]; -} - -/* out = a + b */ -DONNA_INLINE static void -curve25519_add(bignum25519 out, const bignum25519 a, const bignum25519 b) { - out[0] = a[0] + b[0]; - out[1] = a[1] + b[1]; - out[2] = a[2] + b[2]; - out[3] = a[3] + b[3]; - out[4] = a[4] + b[4]; - out[5] = a[5] + b[5]; - out[6] = a[6] + b[6]; - out[7] = a[7] + b[7]; - out[8] = a[8] + b[8]; - out[9] = a[9] + b[9]; -} - -/* out = a - b */ -DONNA_INLINE static void -curve25519_sub(bignum25519 out, const bignum25519 a, const bignum25519 b) { - uint32_t c; - out[0] = 0x7ffffda + a[0] - b[0] ; c = (out[0] >> 26); out[0] &= reduce_mask_26; - out[1] = 0x3fffffe + a[1] - b[1] + c; c = (out[1] >> 25); out[1] &= reduce_mask_25; - out[2] = 0x7fffffe + a[2] - b[2] + c; c = (out[2] >> 26); out[2] &= reduce_mask_26; - out[3] = 0x3fffffe + a[3] - b[3] + c; c = (out[3] >> 25); out[3] &= reduce_mask_25; - out[4] = 0x7fffffe + a[4] - b[4] + c; c = (out[4] >> 26); out[4] &= reduce_mask_26; - out[5] = 0x3fffffe + a[5] - b[5] + c; c = (out[5] >> 25); out[5] &= reduce_mask_25; - out[6] = 0x7fffffe + a[6] - b[6] + c; c = (out[6] >> 26); out[6] &= reduce_mask_26; - out[7] = 0x3fffffe + a[7] - b[7] + c; c = (out[7] >> 25); out[7] &= reduce_mask_25; - out[8] = 0x7fffffe + a[8] - b[8] + c; c = (out[8] >> 26); out[8] &= reduce_mask_26; - out[9] = 0x3fffffe + a[9] - b[9] + c; c = (out[9] >> 25); out[9] &= reduce_mask_25; - out[0] += 19 * c; -} - -/* out = in * scalar */ -DONNA_INLINE static void -curve25519_scalar_product(bignum25519 out, const bignum25519 in, const uint32_t scalar) { - uint64_t a; - uint32_t c; - a = mul32x32_64(in[0], scalar); out[0] = (uint32_t)a & reduce_mask_26; c = (uint32_t)(a >> 26); - a = mul32x32_64(in[1], scalar) + c; out[1] = (uint32_t)a & reduce_mask_25; c = (uint32_t)(a >> 25); - a = mul32x32_64(in[2], scalar) + c; out[2] = (uint32_t)a & reduce_mask_26; c = (uint32_t)(a >> 26); - a = mul32x32_64(in[3], scalar) + c; out[3] = (uint32_t)a & reduce_mask_25; c = (uint32_t)(a >> 25); - a = mul32x32_64(in[4], scalar) + c; out[4] = (uint32_t)a & reduce_mask_26; c = (uint32_t)(a >> 26); - a = mul32x32_64(in[5], scalar) + c; out[5] = (uint32_t)a & reduce_mask_25; c = (uint32_t)(a >> 25); - a = mul32x32_64(in[6], scalar) + c; out[6] = (uint32_t)a & reduce_mask_26; c = (uint32_t)(a >> 26); - a = mul32x32_64(in[7], scalar) + c; out[7] = (uint32_t)a & reduce_mask_25; c = (uint32_t)(a >> 25); - a = mul32x32_64(in[8], scalar) + c; out[8] = (uint32_t)a & reduce_mask_26; c = (uint32_t)(a >> 26); - a = mul32x32_64(in[9], scalar) + c; out[9] = (uint32_t)a & reduce_mask_25; c = (uint32_t)(a >> 25); - out[0] += c * 19; -} - -/* out = a * b */ -DONNA_INLINE static void -curve25519_mul(bignum25519 out, const bignum25519 a, const bignum25519 b) { - uint32_t r0,r1,r2,r3,r4,r5,r6,r7,r8,r9; - uint32_t s0,s1,s2,s3,s4,s5,s6,s7,s8,s9; - uint64_t m0,m1,m2,m3,m4,m5,m6,m7,m8,m9,c; - uint32_t p; - - r0 = b[0]; - r1 = b[1]; - r2 = b[2]; - r3 = b[3]; - r4 = b[4]; - r5 = b[5]; - r6 = b[6]; - r7 = b[7]; - r8 = b[8]; - r9 = b[9]; - - s0 = a[0]; - s1 = a[1]; - s2 = a[2]; - s3 = a[3]; - s4 = a[4]; - s5 = a[5]; - s6 = a[6]; - s7 = a[7]; - s8 = a[8]; - s9 = a[9]; - - m1 = mul32x32_64(r0, s1) + mul32x32_64(r1, s0); - m3 = mul32x32_64(r0, s3) + mul32x32_64(r1, s2) + mul32x32_64(r2, s1) + mul32x32_64(r3, s0); - m5 = mul32x32_64(r0, s5) + mul32x32_64(r1, s4) + mul32x32_64(r2, s3) + mul32x32_64(r3, s2) + mul32x32_64(r4, s1) + mul32x32_64(r5, s0); - m7 = mul32x32_64(r0, s7) + mul32x32_64(r1, s6) + mul32x32_64(r2, s5) + mul32x32_64(r3, s4) + mul32x32_64(r4, s3) + mul32x32_64(r5, s2) + mul32x32_64(r6, s1) + mul32x32_64(r7, s0); - m9 = mul32x32_64(r0, s9) + mul32x32_64(r1, s8) + mul32x32_64(r2, s7) + mul32x32_64(r3, s6) + mul32x32_64(r4, s5) + mul32x32_64(r5, s4) + mul32x32_64(r6, s3) + mul32x32_64(r7, s2) + mul32x32_64(r8, s1) + mul32x32_64(r9, s0); - - r1 *= 2; - r3 *= 2; - r5 *= 2; - r7 *= 2; - - m0 = mul32x32_64(r0, s0); - m2 = mul32x32_64(r0, s2) + mul32x32_64(r1, s1) + mul32x32_64(r2, s0); - m4 = mul32x32_64(r0, s4) + mul32x32_64(r1, s3) + mul32x32_64(r2, s2) + mul32x32_64(r3, s1) + mul32x32_64(r4, s0); - m6 = mul32x32_64(r0, s6) + mul32x32_64(r1, s5) + mul32x32_64(r2, s4) + mul32x32_64(r3, s3) + mul32x32_64(r4, s2) + mul32x32_64(r5, s1) + mul32x32_64(r6, s0); - m8 = mul32x32_64(r0, s8) + mul32x32_64(r1, s7) + mul32x32_64(r2, s6) + mul32x32_64(r3, s5) + mul32x32_64(r4, s4) + mul32x32_64(r5, s3) + mul32x32_64(r6, s2) + mul32x32_64(r7, s1) + mul32x32_64(r8, s0); - - r1 *= 19; - r2 *= 19; - r3 = (r3 / 2) * 19; - r4 *= 19; - r5 = (r5 / 2) * 19; - r6 *= 19; - r7 = (r7 / 2) * 19; - r8 *= 19; - r9 *= 19; - - m1 += (mul32x32_64(r9, s2) + mul32x32_64(r8, s3) + mul32x32_64(r7, s4) + mul32x32_64(r6, s5) + mul32x32_64(r5, s6) + mul32x32_64(r4, s7) + mul32x32_64(r3, s8) + mul32x32_64(r2, s9)); - m3 += (mul32x32_64(r9, s4) + mul32x32_64(r8, s5) + mul32x32_64(r7, s6) + mul32x32_64(r6, s7) + mul32x32_64(r5, s8) + mul32x32_64(r4, s9)); - m5 += (mul32x32_64(r9, s6) + mul32x32_64(r8, s7) + mul32x32_64(r7, s8) + mul32x32_64(r6, s9)); - m7 += (mul32x32_64(r9, s8) + mul32x32_64(r8, s9)); - - r3 *= 2; - r5 *= 2; - r7 *= 2; - r9 *= 2; - - m0 += (mul32x32_64(r9, s1) + mul32x32_64(r8, s2) + mul32x32_64(r7, s3) + mul32x32_64(r6, s4) + mul32x32_64(r5, s5) + mul32x32_64(r4, s6) + mul32x32_64(r3, s7) + mul32x32_64(r2, s8) + mul32x32_64(r1, s9)); - m2 += (mul32x32_64(r9, s3) + mul32x32_64(r8, s4) + mul32x32_64(r7, s5) + mul32x32_64(r6, s6) + mul32x32_64(r5, s7) + mul32x32_64(r4, s8) + mul32x32_64(r3, s9)); - m4 += (mul32x32_64(r9, s5) + mul32x32_64(r8, s6) + mul32x32_64(r7, s7) + mul32x32_64(r6, s8) + mul32x32_64(r5, s9)); - m6 += (mul32x32_64(r9, s7) + mul32x32_64(r8, s8) + mul32x32_64(r7, s9)); - m8 += (mul32x32_64(r9, s9)); - - r0 = (uint32_t)m0 & reduce_mask_26; c = (m0 >> 26); - m1 += c; r1 = (uint32_t)m1 & reduce_mask_25; c = (m1 >> 25); - m2 += c; r2 = (uint32_t)m2 & reduce_mask_26; c = (m2 >> 26); - m3 += c; r3 = (uint32_t)m3 & reduce_mask_25; c = (m3 >> 25); - m4 += c; r4 = (uint32_t)m4 & reduce_mask_26; c = (m4 >> 26); - m5 += c; r5 = (uint32_t)m5 & reduce_mask_25; c = (m5 >> 25); - m6 += c; r6 = (uint32_t)m6 & reduce_mask_26; c = (m6 >> 26); - m7 += c; r7 = (uint32_t)m7 & reduce_mask_25; c = (m7 >> 25); - m8 += c; r8 = (uint32_t)m8 & reduce_mask_26; c = (m8 >> 26); - m9 += c; r9 = (uint32_t)m9 & reduce_mask_25; p = (uint32_t)(m9 >> 25); - m0 = r0 + mul32x32_64(p,19); r0 = (uint32_t)m0 & reduce_mask_26; p = (uint32_t)(m0 >> 26); - r1 += p; - - out[0] = r0; - out[1] = r1; - out[2] = r2; - out[3] = r3; - out[4] = r4; - out[5] = r5; - out[6] = r6; - out[7] = r7; - out[8] = r8; - out[9] = r9; -} - -/* out = in * in */ -DONNA_INLINE static void -curve25519_square(bignum25519 out, const bignum25519 in) { - uint32_t r0,r1,r2,r3,r4,r5,r6,r7,r8,r9; - uint32_t d6,d7,d8,d9; - uint64_t m0,m1,m2,m3,m4,m5,m6,m7,m8,m9,c; - uint32_t p; - - r0 = in[0]; - r1 = in[1]; - r2 = in[2]; - r3 = in[3]; - r4 = in[4]; - r5 = in[5]; - r6 = in[6]; - r7 = in[7]; - r8 = in[8]; - r9 = in[9]; - - - m0 = mul32x32_64(r0, r0); - r0 *= 2; - m1 = mul32x32_64(r0, r1); - m2 = mul32x32_64(r0, r2) + mul32x32_64(r1, r1 * 2); - r1 *= 2; - m3 = mul32x32_64(r0, r3) + mul32x32_64(r1, r2 ); - m4 = mul32x32_64(r0, r4) + mul32x32_64(r1, r3 * 2) + mul32x32_64(r2, r2); - r2 *= 2; - m5 = mul32x32_64(r0, r5) + mul32x32_64(r1, r4 ) + mul32x32_64(r2, r3); - m6 = mul32x32_64(r0, r6) + mul32x32_64(r1, r5 * 2) + mul32x32_64(r2, r4) + mul32x32_64(r3, r3 * 2); - r3 *= 2; - m7 = mul32x32_64(r0, r7) + mul32x32_64(r1, r6 ) + mul32x32_64(r2, r5) + mul32x32_64(r3, r4 ); - m8 = mul32x32_64(r0, r8) + mul32x32_64(r1, r7 * 2) + mul32x32_64(r2, r6) + mul32x32_64(r3, r5 * 2) + mul32x32_64(r4, r4 ); - m9 = mul32x32_64(r0, r9) + mul32x32_64(r1, r8 ) + mul32x32_64(r2, r7) + mul32x32_64(r3, r6 ) + mul32x32_64(r4, r5 * 2); - - d6 = r6 * 19; - d7 = r7 * 2 * 19; - d8 = r8 * 19; - d9 = r9 * 2 * 19; - - m0 += (mul32x32_64(d9, r1 ) + mul32x32_64(d8, r2 ) + mul32x32_64(d7, r3 ) + mul32x32_64(d6, r4 * 2) + mul32x32_64(r5, r5 * 2 * 19)); - m1 += (mul32x32_64(d9, r2 / 2) + mul32x32_64(d8, r3 ) + mul32x32_64(d7, r4 ) + mul32x32_64(d6, r5 * 2)); - m2 += (mul32x32_64(d9, r3 ) + mul32x32_64(d8, r4 * 2) + mul32x32_64(d7, r5 * 2) + mul32x32_64(d6, r6 )); - m3 += (mul32x32_64(d9, r4 ) + mul32x32_64(d8, r5 * 2) + mul32x32_64(d7, r6 )); - m4 += (mul32x32_64(d9, r5 * 2) + mul32x32_64(d8, r6 * 2) + mul32x32_64(d7, r7 )); - m5 += (mul32x32_64(d9, r6 ) + mul32x32_64(d8, r7 * 2)); - m6 += (mul32x32_64(d9, r7 * 2) + mul32x32_64(d8, r8 )); - m7 += (mul32x32_64(d9, r8 )); - m8 += (mul32x32_64(d9, r9 )); - - r0 = (uint32_t)m0 & reduce_mask_26; c = (m0 >> 26); - m1 += c; r1 = (uint32_t)m1 & reduce_mask_25; c = (m1 >> 25); - m2 += c; r2 = (uint32_t)m2 & reduce_mask_26; c = (m2 >> 26); - m3 += c; r3 = (uint32_t)m3 & reduce_mask_25; c = (m3 >> 25); - m4 += c; r4 = (uint32_t)m4 & reduce_mask_26; c = (m4 >> 26); - m5 += c; r5 = (uint32_t)m5 & reduce_mask_25; c = (m5 >> 25); - m6 += c; r6 = (uint32_t)m6 & reduce_mask_26; c = (m6 >> 26); - m7 += c; r7 = (uint32_t)m7 & reduce_mask_25; c = (m7 >> 25); - m8 += c; r8 = (uint32_t)m8 & reduce_mask_26; c = (m8 >> 26); - m9 += c; r9 = (uint32_t)m9 & reduce_mask_25; p = (uint32_t)(m9 >> 25); - m0 = r0 + mul32x32_64(p,19); r0 = (uint32_t)m0 & reduce_mask_26; p = (uint32_t)(m0 >> 26); - r1 += p; - - out[0] = r0; - out[1] = r1; - out[2] = r2; - out[3] = r3; - out[4] = r4; - out[5] = r5; - out[6] = r6; - out[7] = r7; - out[8] = r8; - out[9] = r9; -} - -/* out = in^(2 * count) */ -static void -curve25519_square_times(bignum25519 out, const bignum25519 in, int count) { - uint32_t r0,r1,r2,r3,r4,r5,r6,r7,r8,r9; - uint32_t d6,d7,d8,d9; - uint64_t m0,m1,m2,m3,m4,m5,m6,m7,m8,m9,c; - uint32_t p; - - r0 = in[0]; - r1 = in[1]; - r2 = in[2]; - r3 = in[3]; - r4 = in[4]; - r5 = in[5]; - r6 = in[6]; - r7 = in[7]; - r8 = in[8]; - r9 = in[9]; - - do { - m0 = mul32x32_64(r0, r0); - r0 *= 2; - m1 = mul32x32_64(r0, r1); - m2 = mul32x32_64(r0, r2) + mul32x32_64(r1, r1 * 2); - r1 *= 2; - m3 = mul32x32_64(r0, r3) + mul32x32_64(r1, r2 ); - m4 = mul32x32_64(r0, r4) + mul32x32_64(r1, r3 * 2) + mul32x32_64(r2, r2); - r2 *= 2; - m5 = mul32x32_64(r0, r5) + mul32x32_64(r1, r4 ) + mul32x32_64(r2, r3); - m6 = mul32x32_64(r0, r6) + mul32x32_64(r1, r5 * 2) + mul32x32_64(r2, r4) + mul32x32_64(r3, r3 * 2); - r3 *= 2; - m7 = mul32x32_64(r0, r7) + mul32x32_64(r1, r6 ) + mul32x32_64(r2, r5) + mul32x32_64(r3, r4 ); - m8 = mul32x32_64(r0, r8) + mul32x32_64(r1, r7 * 2) + mul32x32_64(r2, r6) + mul32x32_64(r3, r5 * 2) + mul32x32_64(r4, r4 ); - m9 = mul32x32_64(r0, r9) + mul32x32_64(r1, r8 ) + mul32x32_64(r2, r7) + mul32x32_64(r3, r6 ) + mul32x32_64(r4, r5 * 2); - - d6 = r6 * 19; - d7 = r7 * 2 * 19; - d8 = r8 * 19; - d9 = r9 * 2 * 19; - - m0 += (mul32x32_64(d9, r1 ) + mul32x32_64(d8, r2 ) + mul32x32_64(d7, r3 ) + mul32x32_64(d6, r4 * 2) + mul32x32_64(r5, r5 * 2 * 19)); - m1 += (mul32x32_64(d9, r2 / 2) + mul32x32_64(d8, r3 ) + mul32x32_64(d7, r4 ) + mul32x32_64(d6, r5 * 2)); - m2 += (mul32x32_64(d9, r3 ) + mul32x32_64(d8, r4 * 2) + mul32x32_64(d7, r5 * 2) + mul32x32_64(d6, r6 )); - m3 += (mul32x32_64(d9, r4 ) + mul32x32_64(d8, r5 * 2) + mul32x32_64(d7, r6 )); - m4 += (mul32x32_64(d9, r5 * 2) + mul32x32_64(d8, r6 * 2) + mul32x32_64(d7, r7 )); - m5 += (mul32x32_64(d9, r6 ) + mul32x32_64(d8, r7 * 2)); - m6 += (mul32x32_64(d9, r7 * 2) + mul32x32_64(d8, r8 )); - m7 += (mul32x32_64(d9, r8 )); - m8 += (mul32x32_64(d9, r9 )); - - r0 = (uint32_t)m0 & reduce_mask_26; c = (m0 >> 26); - m1 += c; r1 = (uint32_t)m1 & reduce_mask_25; c = (m1 >> 25); - m2 += c; r2 = (uint32_t)m2 & reduce_mask_26; c = (m2 >> 26); - m3 += c; r3 = (uint32_t)m3 & reduce_mask_25; c = (m3 >> 25); - m4 += c; r4 = (uint32_t)m4 & reduce_mask_26; c = (m4 >> 26); - m5 += c; r5 = (uint32_t)m5 & reduce_mask_25; c = (m5 >> 25); - m6 += c; r6 = (uint32_t)m6 & reduce_mask_26; c = (m6 >> 26); - m7 += c; r7 = (uint32_t)m7 & reduce_mask_25; c = (m7 >> 25); - m8 += c; r8 = (uint32_t)m8 & reduce_mask_26; c = (m8 >> 26); - m9 += c; r9 = (uint32_t)m9 & reduce_mask_25; p = (uint32_t)(m9 >> 25); - m0 = r0 + mul32x32_64(p,19); r0 = (uint32_t)m0 & reduce_mask_26; p = (uint32_t)(m0 >> 26); - r1 += p; - } while (--count); - - out[0] = r0; - out[1] = r1; - out[2] = r2; - out[3] = r3; - out[4] = r4; - out[5] = r5; - out[6] = r6; - out[7] = r7; - out[8] = r8; - out[9] = r9; -} - - -/* Take a little-endian, 32-byte number and expand it into polynomial form */ -static void -curve25519_expand(bignum25519 out, const unsigned char in[32]) { - static const union { uint8_t b[2]; uint16_t s; } endian_check = {{1,0}}; - uint32_t x0,x1,x2,x3,x4,x5,x6,x7; - - if (endian_check.s == 1) { - x0 = *(uint32_t *)(in + 0); - x1 = *(uint32_t *)(in + 4); - x2 = *(uint32_t *)(in + 8); - x3 = *(uint32_t *)(in + 12); - x4 = *(uint32_t *)(in + 16); - x5 = *(uint32_t *)(in + 20); - x6 = *(uint32_t *)(in + 24); - x7 = *(uint32_t *)(in + 28); - } else { - #define F(s) \ - ((((uint32_t)in[s + 0]) ) | \ - (((uint32_t)in[s + 1]) << 8) | \ - (((uint32_t)in[s + 2]) << 16) | \ - (((uint32_t)in[s + 3]) << 24)) - x0 = F(0); - x1 = F(4); - x2 = F(8); - x3 = F(12); - x4 = F(16); - x5 = F(20); - x6 = F(24); - x7 = F(28); - #undef F - } - - out[0] = ( x0 ) & reduce_mask_26; - out[1] = ((((uint64_t)x1 << 32) | x0) >> 26) & reduce_mask_25; - out[2] = ((((uint64_t)x2 << 32) | x1) >> 19) & reduce_mask_26; - out[3] = ((((uint64_t)x3 << 32) | x2) >> 13) & reduce_mask_25; - out[4] = (( x3) >> 6) & reduce_mask_26; - out[5] = ( x4 ) & reduce_mask_25; - out[6] = ((((uint64_t)x5 << 32) | x4) >> 25) & reduce_mask_26; - out[7] = ((((uint64_t)x6 << 32) | x5) >> 19) & reduce_mask_25; - out[8] = ((((uint64_t)x7 << 32) | x6) >> 12) & reduce_mask_26; - out[9] = (( x7) >> 6) & reduce_mask_25; /* ignore the top bit */ -} - -/* Take a fully reduced polynomial form number and contract it into a little-endian, 32-byte array */ -static void -curve25519_contract(unsigned char out[32], const bignum25519 in) { - bignum25519 f; - curve25519_copy(f, in); - - #define carry_pass() \ - f[1] += f[0] >> 26; f[0] &= reduce_mask_26; \ - f[2] += f[1] >> 25; f[1] &= reduce_mask_25; \ - f[3] += f[2] >> 26; f[2] &= reduce_mask_26; \ - f[4] += f[3] >> 25; f[3] &= reduce_mask_25; \ - f[5] += f[4] >> 26; f[4] &= reduce_mask_26; \ - f[6] += f[5] >> 25; f[5] &= reduce_mask_25; \ - f[7] += f[6] >> 26; f[6] &= reduce_mask_26; \ - f[8] += f[7] >> 25; f[7] &= reduce_mask_25; \ - f[9] += f[8] >> 26; f[8] &= reduce_mask_26; - - #define carry_pass_full() \ - carry_pass() \ - f[0] += 19 * (f[9] >> 25); f[9] &= reduce_mask_25; - - #define carry_pass_final() \ - carry_pass() \ - f[9] &= reduce_mask_25; - - carry_pass_full() - carry_pass_full() - - /* now t is between 0 and 2^255-1, properly carried. */ - /* case 1: between 0 and 2^255-20. case 2: between 2^255-19 and 2^255-1. */ - f[0] += 19; - carry_pass_full() - - /* now between 19 and 2^255-1 in both cases, and offset by 19. */ - f[0] += (1 << 26) - 19; - f[1] += (1 << 25) - 1; - f[2] += (1 << 26) - 1; - f[3] += (1 << 25) - 1; - f[4] += (1 << 26) - 1; - f[5] += (1 << 25) - 1; - f[6] += (1 << 26) - 1; - f[7] += (1 << 25) - 1; - f[8] += (1 << 26) - 1; - f[9] += (1 << 25) - 1; - - /* now between 2^255 and 2^256-20, and offset by 2^255. */ - carry_pass_final() - - #undef carry_pass - #undef carry_full - #undef carry_final - - f[1] <<= 2; - f[2] <<= 3; - f[3] <<= 5; - f[4] <<= 6; - f[6] <<= 1; - f[7] <<= 3; - f[8] <<= 4; - f[9] <<= 6; - - #define F(i, s) \ - out[s+0] |= (unsigned char )(f[i] & 0xff); \ - out[s+1] = (unsigned char )((f[i] >> 8) & 0xff); \ - out[s+2] = (unsigned char )((f[i] >> 16) & 0xff); \ - out[s+3] = (unsigned char )((f[i] >> 24) & 0xff); - - out[0] = 0; - out[16] = 0; - F(0,0); - F(1,3); - F(2,6); - F(3,9); - F(4,12); - F(5,16); - F(6,19); - F(7,22); - F(8,25); - F(9,28); - #undef F -} - -/* - * Swap the contents of [qx] and [qpx] iff @swap is non-zero - */ -DONNA_INLINE static void -curve25519_swap_conditional(bignum25519 x, bignum25519 qpx, uint32_t iswap) { - const uint32_t swap = (uint32_t)(-(int32_t)iswap); - uint32_t x0,x1,x2,x3,x4,x5,x6,x7,x8,x9; - - x0 = swap & (x[0] ^ qpx[0]); x[0] ^= x0; qpx[0] ^= x0; - x1 = swap & (x[1] ^ qpx[1]); x[1] ^= x1; qpx[1] ^= x1; - x2 = swap & (x[2] ^ qpx[2]); x[2] ^= x2; qpx[2] ^= x2; - x3 = swap & (x[3] ^ qpx[3]); x[3] ^= x3; qpx[3] ^= x3; - x4 = swap & (x[4] ^ qpx[4]); x[4] ^= x4; qpx[4] ^= x4; - x5 = swap & (x[5] ^ qpx[5]); x[5] ^= x5; qpx[5] ^= x5; - x6 = swap & (x[6] ^ qpx[6]); x[6] ^= x6; qpx[6] ^= x6; - x7 = swap & (x[7] ^ qpx[7]); x[7] ^= x7; qpx[7] ^= x7; - x8 = swap & (x[8] ^ qpx[8]); x[8] ^= x8; qpx[8] ^= x8; - x9 = swap & (x[9] ^ qpx[9]); x[9] ^= x9; qpx[9] ^= x9; -} - diff --git a/curve25519-donna/curve25519-donna-64bit.h b/curve25519-donna/curve25519-donna-64bit.h deleted file mode 100644 index ec4df526b..000000000 --- a/curve25519-donna/curve25519-donna-64bit.h +++ /dev/null @@ -1,345 +0,0 @@ -typedef uint64_t bignum25519[5]; - -static const uint64_t reduce_mask_51 = ((uint64_t)1 << 51) - 1; -static const uint64_t reduce_mask_52 = ((uint64_t)1 << 52) - 1; - -/* out = in */ -DONNA_INLINE static void -curve25519_copy(bignum25519 out, const bignum25519 in) { - out[0] = in[0]; - out[1] = in[1]; - out[2] = in[2]; - out[3] = in[3]; - out[4] = in[4]; -} - -/* out = a + b */ -DONNA_INLINE static void -curve25519_add(bignum25519 out, const bignum25519 a, const bignum25519 b) { - out[0] = a[0] + b[0]; - out[1] = a[1] + b[1]; - out[2] = a[2] + b[2]; - out[3] = a[3] + b[3]; - out[4] = a[4] + b[4]; -} - -static const uint64_t two54m152 = (((uint64_t)1) << 54) - 152; -static const uint64_t two54m8 = (((uint64_t)1) << 54) - 8; - -/* out = a - b */ -DONNA_INLINE static void -curve25519_sub(bignum25519 out, const bignum25519 a, const bignum25519 b) { - out[0] = a[0] + two54m152 - b[0]; - out[1] = a[1] + two54m8 - b[1]; - out[2] = a[2] + two54m8 - b[2]; - out[3] = a[3] + two54m8 - b[3]; - out[4] = a[4] + two54m8 - b[4]; -} - - -/* out = (in * scalar) */ -DONNA_INLINE static void -curve25519_scalar_product(bignum25519 out, const bignum25519 in, const uint64_t scalar) { - uint128_t a; - uint64_t c; - -#if defined(HAVE_NATIVE_UINT128) - a = ((uint128_t) in[0]) * scalar; out[0] = (uint64_t)a & reduce_mask_51; c = (uint64_t)(a >> 51); - a = ((uint128_t) in[1]) * scalar + c; out[1] = (uint64_t)a & reduce_mask_51; c = (uint64_t)(a >> 51); - a = ((uint128_t) in[2]) * scalar + c; out[2] = (uint64_t)a & reduce_mask_51; c = (uint64_t)(a >> 51); - a = ((uint128_t) in[3]) * scalar + c; out[3] = (uint64_t)a & reduce_mask_51; c = (uint64_t)(a >> 51); - a = ((uint128_t) in[4]) * scalar + c; out[4] = (uint64_t)a & reduce_mask_51; c = (uint64_t)(a >> 51); - out[0] += c * 19; -#else - mul64x64_128(a, in[0], scalar) out[0] = lo128(a) & reduce_mask_51; shr128(c, a, 51); - mul64x64_128(a, in[1], scalar) add128_64(a, c) out[1] = lo128(a) & reduce_mask_51; shr128(c, a, 51); - mul64x64_128(a, in[2], scalar) add128_64(a, c) out[2] = lo128(a) & reduce_mask_51; shr128(c, a, 51); - mul64x64_128(a, in[3], scalar) add128_64(a, c) out[3] = lo128(a) & reduce_mask_51; shr128(c, a, 51); - mul64x64_128(a, in[4], scalar) add128_64(a, c) out[4] = lo128(a) & reduce_mask_51; shr128(c, a, 51); - out[0] += c * 19; -#endif -} - -/* out = a * b */ -DONNA_INLINE static void -curve25519_mul(bignum25519 out, const bignum25519 a, const bignum25519 b) { -#if !defined(HAVE_NATIVE_UINT128) - uint128_t mul; -#endif - uint128_t t[5]; - uint64_t r0,r1,r2,r3,r4,s0,s1,s2,s3,s4,c; - - r0 = b[0]; - r1 = b[1]; - r2 = b[2]; - r3 = b[3]; - r4 = b[4]; - - s0 = a[0]; - s1 = a[1]; - s2 = a[2]; - s3 = a[3]; - s4 = a[4]; - -#if defined(HAVE_NATIVE_UINT128) - t[0] = ((uint128_t) r0) * s0; - t[1] = ((uint128_t) r0) * s1 + ((uint128_t) r1) * s0; - t[2] = ((uint128_t) r0) * s2 + ((uint128_t) r2) * s0 + ((uint128_t) r1) * s1; - t[3] = ((uint128_t) r0) * s3 + ((uint128_t) r3) * s0 + ((uint128_t) r1) * s2 + ((uint128_t) r2) * s1; - t[4] = ((uint128_t) r0) * s4 + ((uint128_t) r4) * s0 + ((uint128_t) r3) * s1 + ((uint128_t) r1) * s3 + ((uint128_t) r2) * s2; -#else - mul64x64_128(t[0], r0, s0) - mul64x64_128(t[1], r0, s1) mul64x64_128(mul, r1, s0) add128(t[1], mul) - mul64x64_128(t[2], r0, s2) mul64x64_128(mul, r2, s0) add128(t[2], mul) mul64x64_128(mul, r1, s1) add128(t[2], mul) - mul64x64_128(t[3], r0, s3) mul64x64_128(mul, r3, s0) add128(t[3], mul) mul64x64_128(mul, r1, s2) add128(t[3], mul) mul64x64_128(mul, r2, s1) add128(t[3], mul) - mul64x64_128(t[4], r0, s4) mul64x64_128(mul, r4, s0) add128(t[4], mul) mul64x64_128(mul, r3, s1) add128(t[4], mul) mul64x64_128(mul, r1, s3) add128(t[4], mul) mul64x64_128(mul, r2, s2) add128(t[4], mul) -#endif - - r1 *= 19; - r2 *= 19; - r3 *= 19; - r4 *= 19; - -#if defined(HAVE_NATIVE_UINT128) - t[0] += ((uint128_t) r4) * s1 + ((uint128_t) r1) * s4 + ((uint128_t) r2) * s3 + ((uint128_t) r3) * s2; - t[1] += ((uint128_t) r4) * s2 + ((uint128_t) r2) * s4 + ((uint128_t) r3) * s3; - t[2] += ((uint128_t) r4) * s3 + ((uint128_t) r3) * s4; - t[3] += ((uint128_t) r4) * s4; -#else - mul64x64_128(mul, r4, s1) add128(t[0], mul) mul64x64_128(mul, r1, s4) add128(t[0], mul) mul64x64_128(mul, r2, s3) add128(t[0], mul) mul64x64_128(mul, r3, s2) add128(t[0], mul) - mul64x64_128(mul, r4, s2) add128(t[1], mul) mul64x64_128(mul, r2, s4) add128(t[1], mul) mul64x64_128(mul, r3, s3) add128(t[1], mul) - mul64x64_128(mul, r4, s3) add128(t[2], mul) mul64x64_128(mul, r3, s4) add128(t[2], mul) - mul64x64_128(mul, r4, s4) add128(t[3], mul) -#endif - - r0 = lo128(t[0]) & reduce_mask_51; shr128(c, t[0], 51); - add128_64(t[1], c) r1 = lo128(t[1]) & reduce_mask_51; shr128(c, t[1], 51); - add128_64(t[2], c) r2 = lo128(t[2]) & reduce_mask_51; shr128(c, t[2], 51); - add128_64(t[3], c) r3 = lo128(t[3]) & reduce_mask_51; shr128(c, t[3], 51); - add128_64(t[4], c) r4 = lo128(t[4]) & reduce_mask_51; shr128(c, t[4], 51); - r0 += c * 19; c = r0 >> 51; r0 = r0 & reduce_mask_51; - r1 += c; - - out[0] = r0; - out[1] = r1; - out[2] = r2; - out[3] = r3; - out[4] = r4; -} - -/* out = in^(2 * count) */ -DONNA_INLINE static void -curve25519_square_times(bignum25519 out, const bignum25519 in, uint64_t count) { -#if !defined(HAVE_NATIVE_UINT128) - uint128_t mul; -#endif - uint128_t t[5]; - uint64_t r0,r1,r2,r3,r4,c; - uint64_t d0,d1,d2,d4,d419; - - r0 = in[0]; - r1 = in[1]; - r2 = in[2]; - r3 = in[3]; - r4 = in[4]; - - do { - d0 = r0 * 2; - d1 = r1 * 2; - d2 = r2 * 2 * 19; - d419 = r4 * 19; - d4 = d419 * 2; - -#if defined(HAVE_NATIVE_UINT128) - t[0] = ((uint128_t) r0) * r0 + ((uint128_t) d4) * r1 + (((uint128_t) d2) * (r3 )); - t[1] = ((uint128_t) d0) * r1 + ((uint128_t) d4) * r2 + (((uint128_t) r3) * (r3 * 19)); - t[2] = ((uint128_t) d0) * r2 + ((uint128_t) r1) * r1 + (((uint128_t) d4) * (r3 )); - t[3] = ((uint128_t) d0) * r3 + ((uint128_t) d1) * r2 + (((uint128_t) r4) * (d419 )); - t[4] = ((uint128_t) d0) * r4 + ((uint128_t) d1) * r3 + (((uint128_t) r2) * (r2 )); -#else - mul64x64_128(t[0], r0, r0) mul64x64_128(mul, d4, r1) add128(t[0], mul) mul64x64_128(mul, d2, r3) add128(t[0], mul) - mul64x64_128(t[1], d0, r1) mul64x64_128(mul, d4, r2) add128(t[1], mul) mul64x64_128(mul, r3, r3 * 19) add128(t[1], mul) - mul64x64_128(t[2], d0, r2) mul64x64_128(mul, r1, r1) add128(t[2], mul) mul64x64_128(mul, d4, r3) add128(t[2], mul) - mul64x64_128(t[3], d0, r3) mul64x64_128(mul, d1, r2) add128(t[3], mul) mul64x64_128(mul, r4, d419) add128(t[3], mul) - mul64x64_128(t[4], d0, r4) mul64x64_128(mul, d1, r3) add128(t[4], mul) mul64x64_128(mul, r2, r2) add128(t[4], mul) -#endif - - r0 = lo128(t[0]) & reduce_mask_51; shr128(c, t[0], 51); - add128_64(t[1], c) r1 = lo128(t[1]) & reduce_mask_51; shr128(c, t[1], 51); - add128_64(t[2], c) r2 = lo128(t[2]) & reduce_mask_51; shr128(c, t[2], 51); - add128_64(t[3], c) r3 = lo128(t[3]) & reduce_mask_51; shr128(c, t[3], 51); - add128_64(t[4], c) r4 = lo128(t[4]) & reduce_mask_51; shr128(c, t[4], 51); - r0 += c * 19; c = r0 >> 51; r0 = r0 & reduce_mask_51; - r1 += c; - } while(--count); - - out[0] = r0; - out[1] = r1; - out[2] = r2; - out[3] = r3; - out[4] = r4; -} - -DONNA_INLINE static void -curve25519_square(bignum25519 out, const bignum25519 in) { -#if !defined(HAVE_NATIVE_UINT128) - uint128_t mul; -#endif - uint128_t t[5]; - uint64_t r0,r1,r2,r3,r4,c; - uint64_t d0,d1,d2,d4,d419; - - r0 = in[0]; - r1 = in[1]; - r2 = in[2]; - r3 = in[3]; - r4 = in[4]; - - d0 = r0 * 2; - d1 = r1 * 2; - d2 = r2 * 2 * 19; - d419 = r4 * 19; - d4 = d419 * 2; - -#if defined(HAVE_NATIVE_UINT128) - t[0] = ((uint128_t) r0) * r0 + ((uint128_t) d4) * r1 + (((uint128_t) d2) * (r3 )); - t[1] = ((uint128_t) d0) * r1 + ((uint128_t) d4) * r2 + (((uint128_t) r3) * (r3 * 19)); - t[2] = ((uint128_t) d0) * r2 + ((uint128_t) r1) * r1 + (((uint128_t) d4) * (r3 )); - t[3] = ((uint128_t) d0) * r3 + ((uint128_t) d1) * r2 + (((uint128_t) r4) * (d419 )); - t[4] = ((uint128_t) d0) * r4 + ((uint128_t) d1) * r3 + (((uint128_t) r2) * (r2 )); -#else - mul64x64_128(t[0], r0, r0) mul64x64_128(mul, d4, r1) add128(t[0], mul) mul64x64_128(mul, d2, r3) add128(t[0], mul) - mul64x64_128(t[1], d0, r1) mul64x64_128(mul, d4, r2) add128(t[1], mul) mul64x64_128(mul, r3, r3 * 19) add128(t[1], mul) - mul64x64_128(t[2], d0, r2) mul64x64_128(mul, r1, r1) add128(t[2], mul) mul64x64_128(mul, d4, r3) add128(t[2], mul) - mul64x64_128(t[3], d0, r3) mul64x64_128(mul, d1, r2) add128(t[3], mul) mul64x64_128(mul, r4, d419) add128(t[3], mul) - mul64x64_128(t[4], d0, r4) mul64x64_128(mul, d1, r3) add128(t[4], mul) mul64x64_128(mul, r2, r2) add128(t[4], mul) -#endif - - r0 = lo128(t[0]) & reduce_mask_51; shr128(c, t[0], 51); - add128_64(t[1], c) r1 = lo128(t[1]) & reduce_mask_51; shr128(c, t[1], 51); - add128_64(t[2], c) r2 = lo128(t[2]) & reduce_mask_51; shr128(c, t[2], 51); - add128_64(t[3], c) r3 = lo128(t[3]) & reduce_mask_51; shr128(c, t[3], 51); - add128_64(t[4], c) r4 = lo128(t[4]) & reduce_mask_51; shr128(c, t[4], 51); - r0 += c * 19; c = r0 >> 51; r0 = r0 & reduce_mask_51; - r1 += c; - - out[0] = r0; - out[1] = r1; - out[2] = r2; - out[3] = r3; - out[4] = r4; -} - - -/* Take a little-endian, 32-byte number and expand it into polynomial form */ -DONNA_INLINE static void -curve25519_expand(bignum25519 out, const unsigned char *in) { - static const union { uint8_t b[2]; uint16_t s; } endian_check = {{1,0}}; - uint64_t x0,x1,x2,x3; - - if (endian_check.s == 1) { - x0 = *(uint64_t *)(in + 0); - x1 = *(uint64_t *)(in + 8); - x2 = *(uint64_t *)(in + 16); - x3 = *(uint64_t *)(in + 24); - } else { - #define F(s) \ - ((((uint64_t)in[s + 0]) ) | \ - (((uint64_t)in[s + 1]) << 8) | \ - (((uint64_t)in[s + 2]) << 16) | \ - (((uint64_t)in[s + 3]) << 24) | \ - (((uint64_t)in[s + 4]) << 32) | \ - (((uint64_t)in[s + 5]) << 40) | \ - (((uint64_t)in[s + 6]) << 48) | \ - (((uint64_t)in[s + 7]) << 56)) - - x0 = F(0); - x1 = F(8); - x2 = F(16); - x3 = F(24); - } - - out[0] = x0 & reduce_mask_51; x0 = (x0 >> 51) | (x1 << 13); - out[1] = x0 & reduce_mask_51; x1 = (x1 >> 38) | (x2 << 26); - out[2] = x1 & reduce_mask_51; x2 = (x2 >> 25) | (x3 << 39); - out[3] = x2 & reduce_mask_51; x3 = (x3 >> 12); - out[4] = x3 & reduce_mask_51; /* ignore the top bit */ -} - -/* Take a fully reduced polynomial form number and contract it into a - * little-endian, 32-byte array - */ -DONNA_INLINE static void -curve25519_contract(unsigned char *out, const bignum25519 input) { - uint64_t t[5]; - uint64_t f, i; - - t[0] = input[0]; - t[1] = input[1]; - t[2] = input[2]; - t[3] = input[3]; - t[4] = input[4]; - - #define curve25519_contract_carry() \ - t[1] += t[0] >> 51; t[0] &= reduce_mask_51; \ - t[2] += t[1] >> 51; t[1] &= reduce_mask_51; \ - t[3] += t[2] >> 51; t[2] &= reduce_mask_51; \ - t[4] += t[3] >> 51; t[3] &= reduce_mask_51; - - #define curve25519_contract_carry_full() curve25519_contract_carry() \ - t[0] += 19 * (t[4] >> 51); t[4] &= reduce_mask_51; - - #define curve25519_contract_carry_final() curve25519_contract_carry() \ - t[4] &= reduce_mask_51; - - curve25519_contract_carry_full() - curve25519_contract_carry_full() - - /* now t is between 0 and 2^255-1, properly carried. */ - /* case 1: between 0 and 2^255-20. case 2: between 2^255-19 and 2^255-1. */ - t[0] += 19; - curve25519_contract_carry_full() - - /* now between 19 and 2^255-1 in both cases, and offset by 19. */ - t[0] += 0x8000000000000 - 19; - t[1] += 0x8000000000000 - 1; - t[2] += 0x8000000000000 - 1; - t[3] += 0x8000000000000 - 1; - t[4] += 0x8000000000000 - 1; - - /* now between 2^255 and 2^256-20, and offset by 2^255. */ - curve25519_contract_carry_final() - - #define write51full(n,shift) \ - f = ((t[n] >> shift) | (t[n+1] << (51 - shift))); \ - for (i = 0; i < 8; i++, f >>= 8) *out++ = (unsigned char)f; - #define write51(n) write51full(n,13*n) - - write51(0) - write51(1) - write51(2) - write51(3) - - #undef curve25519_contract_carry - #undef curve25519_contract_carry_full - #undef curve25519_contract_carry_final - #undef write51full - #undef write51 -} - -/* - * Swap the contents of [qx] and [qpx] iff @swap is non-zero - */ -DONNA_INLINE static void -curve25519_swap_conditional(bignum25519 x, bignum25519 qpx, uint64_t iswap) { - const uint64_t swap = (uint64_t)(-(int64_t)iswap); - uint64_t x0,x1,x2,x3,x4; - - x0 = swap & (x[0] ^ qpx[0]); x[0] ^= x0; qpx[0] ^= x0; - x1 = swap & (x[1] ^ qpx[1]); x[1] ^= x1; qpx[1] ^= x1; - x2 = swap & (x[2] ^ qpx[2]); x[2] ^= x2; qpx[2] ^= x2; - x3 = swap & (x[3] ^ qpx[3]); x[3] ^= x3; qpx[3] ^= x3; - x4 = swap & (x[4] ^ qpx[4]); x[4] ^= x4; qpx[4] ^= x4; - -} - diff --git a/curve25519-donna/curve25519-donna-common.h b/curve25519-donna/curve25519-donna-common.h deleted file mode 100644 index 6b3ed2ad6..000000000 --- a/curve25519-donna/curve25519-donna-common.h +++ /dev/null @@ -1,43 +0,0 @@ -/* - * In: b = 2^5 - 2^0 - * Out: b = 2^250 - 2^0 - */ -static void -curve25519_pow_two5mtwo0_two250mtwo0(bignum25519 b) { - bignum25519 ALIGN(16) t0,c; - - /* 2^5 - 2^0 */ /* b */ - /* 2^10 - 2^5 */ curve25519_square_times(t0, b, 5); - /* 2^10 - 2^0 */ curve25519_mul(b, t0, b); - /* 2^20 - 2^10 */ curve25519_square_times(t0, b, 10); - /* 2^20 - 2^0 */ curve25519_mul(c, t0, b); - /* 2^40 - 2^20 */ curve25519_square_times(t0, c, 20); - /* 2^40 - 2^0 */ curve25519_mul(t0, t0, c); - /* 2^50 - 2^10 */ curve25519_square_times(t0, t0, 10); - /* 2^50 - 2^0 */ curve25519_mul(b, t0, b); - /* 2^100 - 2^50 */ curve25519_square_times(t0, b, 50); - /* 2^100 - 2^0 */ curve25519_mul(c, t0, b); - /* 2^200 - 2^100 */ curve25519_square_times(t0, c, 100); - /* 2^200 - 2^0 */ curve25519_mul(t0, t0, c); - /* 2^250 - 2^50 */ curve25519_square_times(t0, t0, 50); - /* 2^250 - 2^0 */ curve25519_mul(b, t0, b); -} - -/* - * z^(p - 2) = z(2^255 - 21) - */ -static void -curve25519_recip(bignum25519 out, const bignum25519 z) { - bignum25519 ALIGN(16) a,t0,b; - - /* 2 */ curve25519_square(a, z); /* a = 2 */ - /* 8 */ curve25519_square_times(t0, a, 2); - /* 9 */ curve25519_mul(b, t0, z); /* b = 9 */ - /* 11 */ curve25519_mul(a, b, a); /* a = 11 */ - /* 22 */ curve25519_square(t0, a); - /* 2^5 - 2^0 = 31 */ curve25519_mul(b, t0, b); - /* 2^250 - 2^0 */ curve25519_pow_two5mtwo0_two250mtwo0(b); - /* 2^255 - 2^5 */ curve25519_square_times(b, b, 5); - /* 2^255 - 21 */ curve25519_mul(out, b, a); -} - diff --git a/curve25519-donna/curve25519-donna-portable-identify.h b/curve25519-donna/curve25519-donna-portable-identify.h deleted file mode 100644 index 26a264cf9..000000000 --- a/curve25519-donna/curve25519-donna-portable-identify.h +++ /dev/null @@ -1,103 +0,0 @@ -/* os */ -#if defined(_WIN32) || defined(_WIN64) || defined(__TOS_WIN__) || defined(__WINDOWS__) - #define OS_WINDOWS -#elif defined(sun) || defined(__sun) || defined(__SVR4) || defined(__svr4__) - #define OS_SOLARIS -#else - #include /* need this to define BSD */ - #define OS_NIX - #if defined(__linux__) - #define OS_LINUX - #elif defined(BSD) - #define OS_BSD - #if defined(MACOS_X) || (defined(__APPLE__) & defined(__MACH__)) - #define OS_OSX - #elif defined(macintosh) || defined(Macintosh) - #define OS_MAC - #elif defined(__OpenBSD__) - #define OS_OPENBSD - #endif - #endif -#endif - - -/* compiler */ -#if defined(_MSC_VER) - #define COMPILER_MSVC -#endif -#if defined(__ICC) - #define COMPILER_INTEL -#endif -#if defined(__GNUC__) - #if (__GNUC__ >= 3) - #define COMPILER_GCC ((__GNUC__ * 10000) + (__GNUC_MINOR__ * 100) + (__GNUC_PATCHLEVEL__)) - #else - #define COMPILER_GCC ((__GNUC__ * 10000) + (__GNUC_MINOR__ * 100) ) - #endif -#endif -#if defined(__PATHCC__) - #define COMPILER_PATHCC -#endif -#if defined(__clang__) - #define COMPILER_CLANG ((__clang_major__ * 10000) + (__clang_minor__ * 100) + (__clang_patchlevel__)) -#endif - - - -/* cpu */ -#if defined(__amd64__) || defined(__amd64) || defined(__x86_64__ ) || defined(_M_X64) - #define CPU_X86_64 -#elif defined(__i586__) || defined(__i686__) || (defined(_M_IX86) && (_M_IX86 >= 500)) - #define CPU_X86 500 -#elif defined(__i486__) || (defined(_M_IX86) && (_M_IX86 >= 400)) - #define CPU_X86 400 -#elif defined(__i386__) || (defined(_M_IX86) && (_M_IX86 >= 300)) || defined(__X86__) || defined(_X86_) || defined(__I86__) - #define CPU_X86 300 -#elif defined(__ia64__) || defined(_IA64) || defined(__IA64__) || defined(_M_IA64) || defined(__ia64) - #define CPU_IA64 -#endif - -#if defined(__sparc__) || defined(__sparc) || defined(__sparcv9) - #define CPU_SPARC - #if defined(__sparcv9) - #define CPU_SPARC64 - #endif -#endif - -#if defined(powerpc) || defined(__PPC__) || defined(__ppc__) || defined(_ARCH_PPC) || defined(__powerpc__) || defined(__powerpc) || defined(POWERPC) || defined(_M_PPC) - #define CPU_PPC - #if defined(_ARCH_PWR7) - #define CPU_POWER7 - #elif defined(__64BIT__) - #define CPU_PPC64 - #else - #define CPU_PPC32 - #endif -#endif - -#if defined(__hppa__) || defined(__hppa) - #define CPU_HPPA -#endif - -#if defined(__alpha__) || defined(__alpha) || defined(_M_ALPHA) - #define CPU_ALPHA -#endif - -/* 64 bit cpu */ -#if defined(CPU_X86_64) || defined(CPU_IA64) || defined(CPU_SPARC64) || defined(__64BIT__) || defined(__LP64__) || defined(_LP64) || (defined(_MIPS_SZLONG) && (_MIPS_SZLONG == 64)) - #define CPU_64BITS -#endif - -#if defined(COMPILER_MSVC) - typedef signed char int8_t; - typedef unsigned char uint8_t; - typedef signed short int16_t; - typedef unsigned short uint16_t; - typedef signed int int32_t; - typedef unsigned int uint32_t; - typedef signed __int64 int64_t; - typedef unsigned __int64 uint64_t; -#else - #include -#endif - diff --git a/curve25519-donna/curve25519-donna-portable.h b/curve25519-donna/curve25519-donna-portable.h deleted file mode 100644 index 0f428730e..000000000 --- a/curve25519-donna/curve25519-donna-portable.h +++ /dev/null @@ -1,94 +0,0 @@ -#include "curve25519-donna-portable-identify.h" - -#define mul32x32_64(a,b) (((uint64_t)(a))*(b)) - -/* platform */ -#if defined(COMPILER_MSVC) - #include - #if !defined(_DEBUG) - #undef mul32x32_64 - #define mul32x32_64(a,b) __emulu(a,b) - #endif - #undef inline - #define inline __forceinline - #define DONNA_INLINE __forceinline - #define DONNA_NOINLINE __declspec(noinline) - #undef ALIGN - #define ALIGN(x) __declspec(align(x)) - #define ROTL32(a,b) _rotl(a,b) - #define ROTR32(a,b) _rotr(a,b) -#else - #include - #define DONNA_INLINE inline __attribute__((always_inline)) - #define DONNA_NOINLINE __attribute__((noinline)) - #undef ALIGN - #define ALIGN(x) __attribute__((aligned(x))) - #define ROTL32(a,b) (((a) << (b)) | ((a) >> (32 - b))) - #define ROTR32(a,b) (((a) >> (b)) | ((a) << (32 - b))) -#endif - -/* uint128_t */ -#if defined(CPU_64BITS) && !defined(ED25519_FORCE_32BIT) - #if defined(COMPILER_CLANG) && (COMPILER_CLANG >= 30100) - #define HAVE_NATIVE_UINT128 - typedef unsigned __int128 uint128_t; - #elif defined(COMPILER_MSVC) - #define HAVE_UINT128 - typedef struct uint128_t { - uint64_t lo, hi; - } uint128_t; - #define mul64x64_128(out,a,b) out.lo = _umul128(a,b,&out.hi); - #define shr128_pair(out,hi,lo,shift) out = __shiftright128(lo, hi, shift); - #define shl128_pair(out,hi,lo,shift) out = __shiftleft128(lo, hi, shift); - #define shr128(out,in,shift) shr128_pair(out, in.hi, in.lo, shift) - #define shl128(out,in,shift) shl128_pair(out, in.hi, in.lo, shift) - #define add128(a,b) { uint64_t p = a.lo; a.lo += b.lo; a.hi += b.hi + (a.lo < p); } - #define add128_64(a,b) { uint64_t p = a.lo; a.lo += b; a.hi += (a.lo < p); } - #define lo128(a) (a.lo) - #define hi128(a) (a.hi) - #elif defined(COMPILER_GCC) && !defined(HAVE_NATIVE_UINT128) - #if defined(__SIZEOF_INT128__) - #define HAVE_NATIVE_UINT128 - typedef unsigned __int128 uint128_t; - #elif (COMPILER_GCC >= 40400) - #define HAVE_NATIVE_UINT128 - typedef unsigned uint128_t __attribute__((mode(TI))); - #elif defined(CPU_X86_64) - #define HAVE_UINT128 - typedef struct uint128_t { - uint64_t lo, hi; - } uint128_t; - #define mul64x64_128(out,a,b) __asm__ ("mulq %3" : "=a" (out.lo), "=d" (out.hi) : "a" (a), "rm" (b)); - #define shr128_pair(out,hi,lo,shift) __asm__ ("shrdq %2,%1,%0" : "+r" (lo) : "r" (hi), "J" (shift)); out = lo; - #define shl128_pair(out,hi,lo,shift) __asm__ ("shldq %2,%1,%0" : "+r" (hi) : "r" (lo), "J" (shift)); out = hi; - #define shr128(out,in,shift) shr128_pair(out,in.hi, in.lo, shift) - #define shl128(out,in,shift) shl128_pair(out,in.hi, in.lo, shift) - #define add128(a,b) __asm__ ("addq %4,%2; adcq %5,%3" : "=r" (a.hi), "=r" (a.lo) : "1" (a.lo), "0" (a.hi), "rm" (b.lo), "rm" (b.hi) : "cc"); - #define add128_64(a,b) __asm__ ("addq %4,%2; adcq $0,%3" : "=r" (a.hi), "=r" (a.lo) : "1" (a.lo), "0" (a.hi), "rm" (b) : "cc"); - #define lo128(a) (a.lo) - #define hi128(a) (a.hi) - #endif - #endif - - #if defined(HAVE_NATIVE_UINT128) - #define HAVE_UINT128 - #define mul64x64_128(out,a,b) out = (uint128_t)a * b; - #define shr128_pair(out,hi,lo,shift) out = (uint64_t)((((uint128_t)hi << 64) | lo) >> (shift)); - #define shl128_pair(out,hi,lo,shift) out = (uint64_t)(((((uint128_t)hi << 64) | lo) << (shift)) >> 64); - #define shr128(out,in,shift) out = (uint64_t)(in >> (shift)); - #define shl128(out,in,shift) out = (uint64_t)((in << shift) >> 64); - #define add128(a,b) a += b; - #define add128_64(a,b) a += (uint64_t)b; - #define lo128(a) ((uint64_t)a) - #define hi128(a) ((uint64_t)(a >> 64)) - #endif - - #if !defined(HAVE_UINT128) - #error Need a uint128_t implementation! - #endif -#endif - -#include -#include - - diff --git a/curve25519-donna/curve25519-donna-scalarmult-sse2.h b/curve25519-donna/curve25519-donna-scalarmult-sse2.h deleted file mode 100644 index e0ef14c11..000000000 --- a/curve25519-donna/curve25519-donna-scalarmult-sse2.h +++ /dev/null @@ -1,65 +0,0 @@ - -/* Calculates nQ where Q is the x-coordinate of a point on the curve - * - * mypublic: the packed little endian x coordinate of the resulting curve point - * n: a little endian, 32-byte number - * basepoint: a packed little endian point of the curve - */ -static void -curve25519_scalarmult_donna(curve25519_key mypublic, const curve25519_key n, const curve25519_key basepoint) { - bignum25519 ALIGN(16) nqx = {1}, nqpqz = {1}, nqz = {0}, nqpqx, zmone; - packed32bignum25519 qx, qz, pqz, pqx; - packed64bignum25519 nq, sq, sqscalar, prime, primex, primez, nqpq; - bignum25519mulprecomp preq; - size_t bit, lastbit, i; - - curve25519_expand(nqpqx, basepoint); - curve25519_mul_precompute(&preq, nqpqx); - - /* do bits 254..3 */ - for (i = 254, lastbit = 0; i >= 3; i--) { - bit = (n[i/8] >> (i & 7)) & 1; - curve25519_swap_conditional(nqx, nqpqx, bit ^ lastbit); - curve25519_swap_conditional(nqz, nqpqz, bit ^ lastbit); - lastbit = bit; - - curve25519_tangle32(qx, nqx, nqpqx); /* qx = [nqx,nqpqx] */ - curve25519_tangle32(qz, nqz, nqpqz); /* qz = [nqz,nqpqz] */ - - curve25519_add_packed32(pqx, qx, qz); /* pqx = [nqx+nqz,nqpqx+nqpqz] */ - curve25519_sub_packed32(pqz, qx, qz); /* pqz = [nqx-nqz,nqpqx-nqpqz] */ - - curve25519_make_nqpq(primex, primez, pqx, pqz); /* primex = [nqx+nqz,nqpqx+nqpqz], primez = [nqpqx-nqpqz,nqx-nqz] */ - curve25519_mul_packed64(prime, primex, primez); /* prime = [nqx+nqz,nqpqx+nqpqz] * [nqpqx-nqpqz,nqx-nqz] */ - curve25519_addsub_packed64(prime); /* prime = [prime.x+prime.z,prime.x-prime.z] */ - curve25519_square_packed64(nqpq, prime); /* nqpq = prime^2 */ - curve25519_untangle64(nqpqx, nqpqz, nqpq); - curve25519_mul_precomputed(nqpqz, nqpqz, &preq); /* nqpqz = nqpqz * q */ - - /* (((sq.x-sq.z)*121665)+sq.x) * (sq.x-sq.z) is equivalent to (sq.x*121666-sq.z*121665) * (sq.x-sq.z) */ - curve25519_make_nq(nq, pqx, pqz); /* nq = [nqx+nqz,nqx-nqz] */ - curve25519_square_packed64(sq, nq); /* sq = nq^2 */ - curve25519_121665_packed64(sqscalar, sq); /* sqscalar = sq * [121666,121665] */ - curve25519_final_nq(nq, sq, sqscalar); /* nq = [sq.x,sqscalar.x-sqscalar.z] * [sq.z,sq.x-sq.z] */ - curve25519_untangle64(nqx, nqz, nq); - }; - - /* it's possible to get rid of this swap with the swap in the above loop - at the bottom instead of the top, but compilers seem to optimize better this way */ - curve25519_swap_conditional(nqx, nqpqx, bit); - curve25519_swap_conditional(nqz, nqpqz, bit); - - /* do bits 2..0 */ - for (i = 0; i < 3; i++) { - curve25519_compute_nq(nq, nqx, nqz); - curve25519_square_packed64(sq, nq); /* sq = nq^2 */ - curve25519_121665_packed64(sqscalar, sq); /* sqscalar = sq * [121666,121665] */ - curve25519_final_nq(nq, sq, sqscalar); /* nq = [sq.x,sqscalar.x-sqscalar.z] * [sq.z,sq.x-sq.z] */ - curve25519_untangle64(nqx, nqz, nq); - } - - curve25519_recip(zmone, nqz); - curve25519_mul(nqz, nqx, zmone); - curve25519_contract(mypublic, nqz); -} - diff --git a/curve25519-donna/curve25519-donna-sse2.h b/curve25519-donna/curve25519-donna-sse2.h deleted file mode 100644 index ff2416209..000000000 --- a/curve25519-donna/curve25519-donna-sse2.h +++ /dev/null @@ -1,1009 +0,0 @@ -#include -typedef __m128i xmmi; - -typedef union packedelem8_t { - unsigned char u[16]; - xmmi v; -} packedelem8; - -typedef union packedelem32_t { - uint32_t u[4]; - xmmi v; -} packedelem32; - -typedef union packedelem64_t { - uint64_t u[2]; - xmmi v; -} packedelem64; - -/* 10 elements + an extra 2 to fit in 3 xmm registers */ -typedef uint32_t bignum25519[10+2]; -typedef packedelem32 packed32bignum25519[5]; -typedef packedelem64 packed64bignum25519[10]; - -static const uint32_t reduce_mask_26 = (1 << 26) - 1; -static const uint32_t reduce_mask_25 = (1 << 25) - 1; - -static const packedelem32 sse2_bot32bitmask = {{0xffffffff, 0x00000000, 0xffffffff, 0x00000000}}; -static const packedelem32 sse2_top32bitmask = {{0x00000000, 0xffffffff, 0x00000000, 0xffffffff}}; -static const packedelem32 sse2_top64bitmask = {{0x00000000, 0x00000000, 0xffffffff, 0xffffffff}}; -static const packedelem32 sse2_bot64bitmask = {{0xffffffff, 0xffffffff, 0x00000000, 0x00000000}}; - -/* reduction masks */ -static const packedelem64 packedmask26 = {{0x03ffffff, 0x03ffffff}}; -static const packedelem64 packedmask25 = {{0x01ffffff, 0x01ffffff}}; -static const packedelem32 packedmask2625 = {{0x3ffffff,0,0x1ffffff,0}}; -static const packedelem32 packedmask26262626 = {{0x03ffffff, 0x03ffffff, 0x03ffffff, 0x03ffffff}}; -static const packedelem32 packedmask25252525 = {{0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff}}; - -/* multipliers */ -static const packedelem64 packednineteen = {{19, 19}}; -static const packedelem64 packednineteenone = {{19, 1}}; -static const packedelem64 packedthirtyeight = {{38, 38}}; -static const packedelem64 packed3819 = {{19*2,19}}; -static const packedelem64 packed9638 = {{19*4,19*2}}; - -/* 121666,121665 */ -static const packedelem64 packed121666121665 = {{121666, 121665}}; - -/* 2*(2^255 - 19) = 0 mod p */ -static const packedelem32 packed2p0 = {{0x7ffffda,0x3fffffe,0x7fffffe,0x3fffffe}}; -static const packedelem32 packed2p1 = {{0x7fffffe,0x3fffffe,0x7fffffe,0x3fffffe}}; -static const packedelem32 packed2p2 = {{0x7fffffe,0x3fffffe,0x0000000,0x0000000}}; - -static const packedelem32 packed32zeromodp0 = {{0x7ffffda,0x7ffffda,0x3fffffe,0x3fffffe}}; -static const packedelem32 packed32zeromodp1 = {{0x7fffffe,0x7fffffe,0x3fffffe,0x3fffffe}}; - -/* Copy a bignum to another: out = in */ -DONNA_INLINE static void -curve25519_copy(bignum25519 out, const bignum25519 in) { - xmmi x0,x1,x2; - x0 = _mm_load_si128((xmmi*)in + 0); - x1 = _mm_load_si128((xmmi*)in + 1); - x2 = _mm_load_si128((xmmi*)in + 2); - _mm_store_si128((xmmi*)out + 0, x0); - _mm_store_si128((xmmi*)out + 1, x1); - _mm_store_si128((xmmi*)out + 2, x2); -} - -/* Take a little-endian, 32-byte number and expand it into polynomial form */ -DONNA_INLINE static void -curve25519_expand(bignum25519 out, const unsigned char in[32]) { - uint32_t x0,x1,x2,x3,x4,x5,x6,x7; - - x0 = *(uint32_t *)(in + 0); - x1 = *(uint32_t *)(in + 4); - x2 = *(uint32_t *)(in + 8); - x3 = *(uint32_t *)(in + 12); - x4 = *(uint32_t *)(in + 16); - x5 = *(uint32_t *)(in + 20); - x6 = *(uint32_t *)(in + 24); - x7 = *(uint32_t *)(in + 28); - - out[0] = ( x0 ) & reduce_mask_26; - out[1] = ((((uint64_t)x1 << 32) | x0) >> 26) & reduce_mask_25; - out[2] = ((((uint64_t)x2 << 32) | x1) >> 19) & reduce_mask_26; - out[3] = ((((uint64_t)x3 << 32) | x2) >> 13) & reduce_mask_25; - out[4] = (( x3) >> 6) & reduce_mask_26; - out[5] = ( x4 ) & reduce_mask_25; - out[6] = ((((uint64_t)x5 << 32) | x4) >> 25) & reduce_mask_26; - out[7] = ((((uint64_t)x6 << 32) | x5) >> 19) & reduce_mask_25; - out[8] = ((((uint64_t)x7 << 32) | x6) >> 12) & reduce_mask_26; - out[9] = (( x7) >> 6) & reduce_mask_25; /* ignore the top bit */ - - out[10] = 0; - out[11] = 0; -} - -/* Take a fully reduced polynomial form number and contract it into a - * little-endian, 32-byte array - */ -DONNA_INLINE static void -curve25519_contract(unsigned char out[32], const bignum25519 in) { - bignum25519 ALIGN(16) f; - - curve25519_copy(f, in); - - #define carry_pass() \ - f[1] += f[0] >> 26; f[0] &= reduce_mask_26; \ - f[2] += f[1] >> 25; f[1] &= reduce_mask_25; \ - f[3] += f[2] >> 26; f[2] &= reduce_mask_26; \ - f[4] += f[3] >> 25; f[3] &= reduce_mask_25; \ - f[5] += f[4] >> 26; f[4] &= reduce_mask_26; \ - f[6] += f[5] >> 25; f[5] &= reduce_mask_25; \ - f[7] += f[6] >> 26; f[6] &= reduce_mask_26; \ - f[8] += f[7] >> 25; f[7] &= reduce_mask_25; \ - f[9] += f[8] >> 26; f[8] &= reduce_mask_26; - - #define carry_pass_full() \ - carry_pass() \ - f[0] += 19 * (f[9] >> 25); f[9] &= reduce_mask_25; - - #define carry_pass_final() \ - carry_pass() \ - f[9] &= reduce_mask_25; - - carry_pass_full() - carry_pass_full() - - /* now t is between 0 and 2^255-1, properly carried. */ - /* case 1: between 0 and 2^255-20. case 2: between 2^255-19 and 2^255-1. */ - f[0] += 19; - carry_pass_full() - - /* now between 19 and 2^255-1 in both cases, and offset by 19. */ - f[0] += (1 << 26) - 19; - f[1] += (1 << 25) - 1; - f[2] += (1 << 26) - 1; - f[3] += (1 << 25) - 1; - f[4] += (1 << 26) - 1; - f[5] += (1 << 25) - 1; - f[6] += (1 << 26) - 1; - f[7] += (1 << 25) - 1; - f[8] += (1 << 26) - 1; - f[9] += (1 << 25) - 1; - - /* now between 2^255 and 2^256-20, and offset by 2^255. */ - carry_pass_final() - - #undef carry_pass - #undef carry_full - #undef carry_final - - *(uint32_t *)(out + 0) = ((f[0] ) | (f[1] << 26)); - *(uint32_t *)(out + 4) = ((f[1] >> 6) | (f[2] << 19)); - *(uint32_t *)(out + 8) = ((f[2] >> 13) | (f[3] << 13)); - *(uint32_t *)(out + 12) = ((f[3] >> 19) | (f[4] << 6)); - *(uint32_t *)(out + 16) = ((f[5] ) | (f[6] << 25)); - *(uint32_t *)(out + 20) = ((f[6] >> 7) | (f[7] << 19)); - *(uint32_t *)(out + 24) = ((f[7] >> 13) | (f[8] << 12)); - *(uint32_t *)(out + 28) = ((f[8] >> 20) | (f[9] << 6)); -} - -/* - * Maybe swap the contents of two felem arrays (@a and @b), each 5 elements - * long. Perform the swap iff @swap is non-zero. - */ -DONNA_INLINE static void -curve25519_swap_conditional(bignum25519 a, bignum25519 b, uint32_t iswap) { - const uint32_t swap = (uint32_t)(-(int32_t)iswap); - xmmi a0,a1,a2,b0,b1,b2,x0,x1,x2; - xmmi mask = _mm_cvtsi32_si128(swap); - mask = _mm_shuffle_epi32(mask, 0); - a0 = _mm_load_si128((xmmi *)a + 0); - a1 = _mm_load_si128((xmmi *)a + 1); - a2 = _mm_load_si128((xmmi *)a + 2); - b0 = _mm_load_si128((xmmi *)b + 0); - b1 = _mm_load_si128((xmmi *)b + 1); - b2 = _mm_load_si128((xmmi *)b + 2); - b0 = _mm_xor_si128(a0, b0); - b1 = _mm_xor_si128(a1, b1); - b2 = _mm_xor_si128(a2, b2); - x0 = _mm_and_si128(b0, mask); - x1 = _mm_and_si128(b1, mask); - x2 = _mm_and_si128(b2, mask); - x0 = _mm_xor_si128(x0, a0); - x1 = _mm_xor_si128(x1, a1); - x2 = _mm_xor_si128(x2, a2); - a0 = _mm_xor_si128(x0, b0); - a1 = _mm_xor_si128(x1, b1); - a2 = _mm_xor_si128(x2, b2); - _mm_store_si128((xmmi *)a + 0, x0); - _mm_store_si128((xmmi *)a + 1, x1); - _mm_store_si128((xmmi *)a + 2, x2); - _mm_store_si128((xmmi *)b + 0, a0); - _mm_store_si128((xmmi *)b + 1, a1); - _mm_store_si128((xmmi *)b + 2, a2); -} - -/* interleave two bignums */ -DONNA_INLINE static void -curve25519_tangle32(packedelem32 *out, const bignum25519 x, const bignum25519 z) { - xmmi x0,x1,x2,z0,z1,z2; - - x0 = _mm_load_si128((xmmi *)(x + 0)); - x1 = _mm_load_si128((xmmi *)(x + 4)); - x2 = _mm_load_si128((xmmi *)(x + 8)); - z0 = _mm_load_si128((xmmi *)(z + 0)); - z1 = _mm_load_si128((xmmi *)(z + 4)); - z2 = _mm_load_si128((xmmi *)(z + 8)); - - out[0].v = _mm_unpacklo_epi32(x0, z0); - out[1].v = _mm_unpackhi_epi32(x0, z0); - out[2].v = _mm_unpacklo_epi32(x1, z1); - out[3].v = _mm_unpackhi_epi32(x1, z1); - out[4].v = _mm_unpacklo_epi32(x2, z2); -} - -/* split a packed bignum in to it's two parts */ -DONNA_INLINE static void -curve25519_untangle64(bignum25519 x, bignum25519 z, const packedelem64 *in) { - _mm_store_si128((xmmi *)(x + 0), _mm_unpacklo_epi64(_mm_unpacklo_epi32(in[0].v, in[1].v), _mm_unpacklo_epi32(in[2].v, in[3].v))); - _mm_store_si128((xmmi *)(x + 4), _mm_unpacklo_epi64(_mm_unpacklo_epi32(in[4].v, in[5].v), _mm_unpacklo_epi32(in[6].v, in[7].v))); - _mm_store_si128((xmmi *)(x + 8), _mm_unpacklo_epi32(in[8].v, in[9].v) ); - _mm_store_si128((xmmi *)(z + 0), _mm_unpacklo_epi64(_mm_unpackhi_epi32(in[0].v, in[1].v), _mm_unpackhi_epi32(in[2].v, in[3].v))); - _mm_store_si128((xmmi *)(z + 4), _mm_unpacklo_epi64(_mm_unpackhi_epi32(in[4].v, in[5].v), _mm_unpackhi_epi32(in[6].v, in[7].v))); - _mm_store_si128((xmmi *)(z + 8), _mm_unpackhi_epi32(in[8].v, in[9].v) ); -} - -/* add two packed bignums */ -DONNA_INLINE static void -curve25519_add_packed32(packedelem32 *out, const packedelem32 *r, const packedelem32 *s) { - out[0].v = _mm_add_epi32(r[0].v, s[0].v); - out[1].v = _mm_add_epi32(r[1].v, s[1].v); - out[2].v = _mm_add_epi32(r[2].v, s[2].v); - out[3].v = _mm_add_epi32(r[3].v, s[3].v); - out[4].v = _mm_add_epi32(r[4].v, s[4].v); -} - -/* subtract two packed bignums */ -DONNA_INLINE static void -curve25519_sub_packed32(packedelem32 *out, const packedelem32 *r, const packedelem32 *s) { - xmmi r0,r1,r2,r3,r4; - xmmi s0,s1,s2,s3; - xmmi c1,c2; - - r0 = _mm_add_epi32(r[0].v, packed32zeromodp0.v); - r1 = _mm_add_epi32(r[1].v, packed32zeromodp1.v); - r2 = _mm_add_epi32(r[2].v, packed32zeromodp1.v); - r3 = _mm_add_epi32(r[3].v, packed32zeromodp1.v); - r4 = _mm_add_epi32(r[4].v, packed32zeromodp1.v); - r0 = _mm_sub_epi32(r0, s[0].v); /* 00 11 */ - r1 = _mm_sub_epi32(r1, s[1].v); /* 22 33 */ - r2 = _mm_sub_epi32(r2, s[2].v); /* 44 55 */ - r3 = _mm_sub_epi32(r3, s[3].v); /* 66 77 */ - r4 = _mm_sub_epi32(r4, s[4].v); /* 88 99 */ - - s0 = _mm_unpacklo_epi64(r0, r2); /* 00 44 */ - s1 = _mm_unpackhi_epi64(r0, r2); /* 11 55 */ - s2 = _mm_unpacklo_epi64(r1, r3); /* 22 66 */ - s3 = _mm_unpackhi_epi64(r1, r3); /* 33 77 */ - - c1 = _mm_srli_epi32(s0, 26); c2 = _mm_srli_epi32(s2, 26); s0 = _mm_and_si128(s0, packedmask26262626.v); s2 = _mm_and_si128(s2, packedmask26262626.v); s1 = _mm_add_epi32(s1, c1); s3 = _mm_add_epi32(s3, c2); - c1 = _mm_srli_epi32(s1, 25); c2 = _mm_srli_epi32(s3, 25); s1 = _mm_and_si128(s1, packedmask25252525.v); s3 = _mm_and_si128(s3, packedmask25252525.v); s2 = _mm_add_epi32(s2, c1); r4 = _mm_add_epi32(r4, _mm_srli_si128(c2, 8)); s0 = _mm_add_epi32(s0, _mm_slli_si128(c2, 8)); - - out[0].v = _mm_unpacklo_epi64(s0, s1); /* 00 11 */ - out[1].v = _mm_unpacklo_epi64(s2, s3); /* 22 33 */ - out[2].v = _mm_unpackhi_epi64(s0, s1); /* 44 55 */ - out[3].v = _mm_unpackhi_epi64(s2, s3); /* 66 77 */ - out[4].v = r4; /* 88 99 */ -} - -/* multiply two packed bignums */ -DONNA_INLINE static void -curve25519_mul_packed64(packedelem64 *out, const packedelem64 *r, const packedelem64 *s) { - xmmi r1,r2,r3,r4,r5,r6,r7,r8,r9; - xmmi r1_2,r3_2,r5_2,r7_2,r9_2; - xmmi c1,c2; - - out[0].v = _mm_mul_epu32(r[0].v, s[0].v); - out[1].v = _mm_add_epi64(_mm_mul_epu32(r[0].v, s[1].v), _mm_mul_epu32(r[1].v, s[0].v)); - r1_2 = _mm_slli_epi32(r[1].v, 1); - out[2].v = _mm_add_epi64(_mm_mul_epu32(r[0].v, s[2].v), _mm_add_epi64(_mm_mul_epu32(r1_2 , s[1].v), _mm_mul_epu32(r[2].v, s[0].v))); - out[3].v = _mm_add_epi64(_mm_mul_epu32(r[0].v, s[3].v), _mm_add_epi64(_mm_mul_epu32(r[1].v, s[2].v), _mm_add_epi64(_mm_mul_epu32(r[2].v, s[1].v), _mm_mul_epu32(r[3].v, s[0].v)))); - r3_2 = _mm_slli_epi32(r[3].v, 1); - out[4].v = _mm_add_epi64(_mm_mul_epu32(r[0].v, s[4].v), _mm_add_epi64(_mm_mul_epu32(r1_2 , s[3].v), _mm_add_epi64(_mm_mul_epu32(r[2].v, s[2].v), _mm_add_epi64(_mm_mul_epu32(r3_2 , s[1].v), _mm_mul_epu32(r[4].v, s[0].v))))); - out[5].v = _mm_add_epi64(_mm_mul_epu32(r[0].v, s[5].v), _mm_add_epi64(_mm_mul_epu32(r[1].v, s[4].v), _mm_add_epi64(_mm_mul_epu32(r[2].v, s[3].v), _mm_add_epi64(_mm_mul_epu32(r[3].v, s[2].v), _mm_add_epi64(_mm_mul_epu32(r[4].v, s[1].v), _mm_mul_epu32(r[5].v, s[0].v)))))); - r5_2 = _mm_slli_epi32(r[5].v, 1); - out[6].v = _mm_add_epi64(_mm_mul_epu32(r[0].v, s[6].v), _mm_add_epi64(_mm_mul_epu32(r1_2 , s[5].v), _mm_add_epi64(_mm_mul_epu32(r[2].v, s[4].v), _mm_add_epi64(_mm_mul_epu32(r3_2 , s[3].v), _mm_add_epi64(_mm_mul_epu32(r[4].v, s[2].v), _mm_add_epi64(_mm_mul_epu32(r5_2 , s[1].v), _mm_mul_epu32(r[6].v, s[0].v))))))); - out[7].v = _mm_add_epi64(_mm_mul_epu32(r[0].v, s[7].v), _mm_add_epi64(_mm_mul_epu32(r[1].v, s[6].v), _mm_add_epi64(_mm_mul_epu32(r[2].v, s[5].v), _mm_add_epi64(_mm_mul_epu32(r[3].v, s[4].v), _mm_add_epi64(_mm_mul_epu32(r[4].v, s[3].v), _mm_add_epi64(_mm_mul_epu32(r[5].v, s[2].v), _mm_add_epi64(_mm_mul_epu32(r[6].v, s[1].v), _mm_mul_epu32(r[7].v , s[0].v)))))))); - r7_2 = _mm_slli_epi32(r[7].v, 1); - out[8].v = _mm_add_epi64(_mm_mul_epu32(r[0].v, s[8].v), _mm_add_epi64(_mm_mul_epu32(r1_2 , s[7].v), _mm_add_epi64(_mm_mul_epu32(r[2].v, s[6].v), _mm_add_epi64(_mm_mul_epu32(r3_2 , s[5].v), _mm_add_epi64(_mm_mul_epu32(r[4].v, s[4].v), _mm_add_epi64(_mm_mul_epu32(r5_2 , s[3].v), _mm_add_epi64(_mm_mul_epu32(r[6].v, s[2].v), _mm_add_epi64(_mm_mul_epu32(r7_2 , s[1].v), _mm_mul_epu32(r[8].v, s[0].v))))))))); - out[9].v = _mm_add_epi64(_mm_mul_epu32(r[0].v, s[9].v), _mm_add_epi64(_mm_mul_epu32(r[1].v, s[8].v), _mm_add_epi64(_mm_mul_epu32(r[2].v, s[7].v), _mm_add_epi64(_mm_mul_epu32(r[3].v, s[6].v), _mm_add_epi64(_mm_mul_epu32(r[4].v, s[5].v), _mm_add_epi64(_mm_mul_epu32(r[5].v, s[4].v), _mm_add_epi64(_mm_mul_epu32(r[6].v, s[3].v), _mm_add_epi64(_mm_mul_epu32(r[7].v, s[2].v), _mm_add_epi64(_mm_mul_epu32(r[8].v, s[1].v), _mm_mul_epu32(r[9].v, s[0].v)))))))))); - - r1 = _mm_mul_epu32(r[1].v, packednineteen.v); - r2 = _mm_mul_epu32(r[2].v, packednineteen.v); - r1_2 = _mm_slli_epi32(r1, 1); - r3 = _mm_mul_epu32(r[3].v, packednineteen.v); - r4 = _mm_mul_epu32(r[4].v, packednineteen.v); - r3_2 = _mm_slli_epi32(r3, 1); - r5 = _mm_mul_epu32(r[5].v, packednineteen.v); - r6 = _mm_mul_epu32(r[6].v, packednineteen.v); - r5_2 = _mm_slli_epi32(r5, 1); - r7 = _mm_mul_epu32(r[7].v, packednineteen.v); - r8 = _mm_mul_epu32(r[8].v, packednineteen.v); - r7_2 = _mm_slli_epi32(r7, 1); - r9 = _mm_mul_epu32(r[9].v, packednineteen.v); - r9_2 = _mm_slli_epi32(r9, 1); - - out[0].v = _mm_add_epi64(out[0].v, _mm_add_epi64(_mm_mul_epu32(r9_2, s[1].v), _mm_add_epi64(_mm_mul_epu32(r8, s[2].v), _mm_add_epi64(_mm_mul_epu32(r7_2, s[3].v), _mm_add_epi64(_mm_mul_epu32(r6, s[4].v), _mm_add_epi64(_mm_mul_epu32(r5_2, s[5].v), _mm_add_epi64(_mm_mul_epu32(r4, s[6].v), _mm_add_epi64(_mm_mul_epu32(r3_2, s[7].v), _mm_add_epi64(_mm_mul_epu32(r2, s[8].v), _mm_mul_epu32(r1_2, s[9].v)))))))))); - out[1].v = _mm_add_epi64(out[1].v, _mm_add_epi64(_mm_mul_epu32(r9 , s[2].v), _mm_add_epi64(_mm_mul_epu32(r8, s[3].v), _mm_add_epi64(_mm_mul_epu32(r7 , s[4].v), _mm_add_epi64(_mm_mul_epu32(r6, s[5].v), _mm_add_epi64(_mm_mul_epu32(r5 , s[6].v), _mm_add_epi64(_mm_mul_epu32(r4, s[7].v), _mm_add_epi64(_mm_mul_epu32(r3 , s[8].v), _mm_mul_epu32(r2, s[9].v))))))))); - out[2].v = _mm_add_epi64(out[2].v, _mm_add_epi64(_mm_mul_epu32(r9_2, s[3].v), _mm_add_epi64(_mm_mul_epu32(r8, s[4].v), _mm_add_epi64(_mm_mul_epu32(r7_2, s[5].v), _mm_add_epi64(_mm_mul_epu32(r6, s[6].v), _mm_add_epi64(_mm_mul_epu32(r5_2, s[7].v), _mm_add_epi64(_mm_mul_epu32(r4, s[8].v), _mm_mul_epu32(r3_2, s[9].v)))))))); - out[3].v = _mm_add_epi64(out[3].v, _mm_add_epi64(_mm_mul_epu32(r9 , s[4].v), _mm_add_epi64(_mm_mul_epu32(r8, s[5].v), _mm_add_epi64(_mm_mul_epu32(r7 , s[6].v), _mm_add_epi64(_mm_mul_epu32(r6, s[7].v), _mm_add_epi64(_mm_mul_epu32(r5 , s[8].v), _mm_mul_epu32(r4, s[9].v))))))); - out[4].v = _mm_add_epi64(out[4].v, _mm_add_epi64(_mm_mul_epu32(r9_2, s[5].v), _mm_add_epi64(_mm_mul_epu32(r8, s[6].v), _mm_add_epi64(_mm_mul_epu32(r7_2, s[7].v), _mm_add_epi64(_mm_mul_epu32(r6, s[8].v), _mm_mul_epu32(r5_2, s[9].v)))))); - out[5].v = _mm_add_epi64(out[5].v, _mm_add_epi64(_mm_mul_epu32(r9 , s[6].v), _mm_add_epi64(_mm_mul_epu32(r8, s[7].v), _mm_add_epi64(_mm_mul_epu32(r7 , s[8].v), _mm_mul_epu32(r6, s[9].v))))); - out[6].v = _mm_add_epi64(out[6].v, _mm_add_epi64(_mm_mul_epu32(r9_2, s[7].v), _mm_add_epi64(_mm_mul_epu32(r8, s[8].v), _mm_mul_epu32(r7_2, s[9].v)))); - out[7].v = _mm_add_epi64(out[7].v, _mm_add_epi64(_mm_mul_epu32(r9 , s[8].v), _mm_mul_epu32(r8, s[9].v))); - out[8].v = _mm_add_epi64(out[8].v, _mm_mul_epu32(r9_2, s[9].v)); - - c1 = _mm_srli_epi64(out[0].v, 26); c2 = _mm_srli_epi64(out[4].v, 26); out[0].v = _mm_and_si128(out[0].v, packedmask26.v); out[4].v = _mm_and_si128(out[4].v, packedmask26.v); out[1].v = _mm_add_epi64(out[1].v, c1); out[5].v = _mm_add_epi64(out[5].v, c2); - c1 = _mm_srli_epi64(out[1].v, 25); c2 = _mm_srli_epi64(out[5].v, 25); out[1].v = _mm_and_si128(out[1].v, packedmask25.v); out[5].v = _mm_and_si128(out[5].v, packedmask25.v); out[2].v = _mm_add_epi64(out[2].v, c1); out[6].v = _mm_add_epi64(out[6].v, c2); - c1 = _mm_srli_epi64(out[2].v, 26); c2 = _mm_srli_epi64(out[6].v, 26); out[2].v = _mm_and_si128(out[2].v, packedmask26.v); out[6].v = _mm_and_si128(out[6].v, packedmask26.v); out[3].v = _mm_add_epi64(out[3].v, c1); out[7].v = _mm_add_epi64(out[7].v, c2); - c1 = _mm_srli_epi64(out[3].v, 25); c2 = _mm_srli_epi64(out[7].v, 25); out[3].v = _mm_and_si128(out[3].v, packedmask25.v); out[7].v = _mm_and_si128(out[7].v, packedmask25.v); out[4].v = _mm_add_epi64(out[4].v, c1); out[8].v = _mm_add_epi64(out[8].v, c2); - c2 = _mm_srli_epi64(out[8].v, 26); out[8].v = _mm_and_si128(out[8].v, packedmask26.v); out[9].v = _mm_add_epi64(out[9].v, c2); - c2 = _mm_srli_epi64(out[9].v, 25); out[9].v = _mm_and_si128(out[9].v, packedmask25.v); out[0].v = _mm_add_epi64(out[0].v, _mm_mul_epu32(c2, packednineteen.v)); - c1 = _mm_srli_epi64(out[0].v, 26); c2 = _mm_srli_epi64(out[4].v, 26); out[0].v = _mm_and_si128(out[0].v, packedmask26.v); out[4].v = _mm_and_si128(out[4].v, packedmask26.v); out[1].v = _mm_add_epi64(out[1].v, c1); out[5].v = _mm_add_epi64(out[5].v, c2); -} - -/* multiply a bignum */ -static void -curve25519_mul(bignum25519 out, const bignum25519 r, const bignum25519 s) { - xmmi m01,m23,m45,m67,m89; - xmmi m0123,m4567; - xmmi s0123,s4567; - xmmi s01,s23,s45,s67,s89; - xmmi s12,s34,s56,s78,s9; - xmmi r0,r2,r4,r6,r8; - xmmi r1,r3,r5,r7,r9; - xmmi r119,r219,r319,r419,r519,r619,r719,r819,r919; - xmmi c1,c2,c3; - - s0123 = _mm_load_si128((xmmi*)s + 0); - s01 = _mm_shuffle_epi32(s0123,_MM_SHUFFLE(3,1,2,0)); - s12 = _mm_shuffle_epi32(s0123, _MM_SHUFFLE(2,2,1,1)); - s23 = _mm_shuffle_epi32(s0123,_MM_SHUFFLE(3,3,2,2)); - s4567 = _mm_load_si128((xmmi*)s + 1); - s34 = _mm_unpacklo_epi64(_mm_srli_si128(s0123,12),s4567); - s45 = _mm_shuffle_epi32(s4567,_MM_SHUFFLE(3,1,2,0)); - s56 = _mm_shuffle_epi32(s4567, _MM_SHUFFLE(2,2,1,1)); - s67 = _mm_shuffle_epi32(s4567,_MM_SHUFFLE(3,3,2,2)); - s89 = _mm_load_si128((xmmi*)s + 2); - s78 = _mm_unpacklo_epi64(_mm_srli_si128(s4567,12),s89); - s89 = _mm_shuffle_epi32(s89,_MM_SHUFFLE(3,1,2,0)); - s9 = _mm_shuffle_epi32(s89, _MM_SHUFFLE(3,3,2,2)); - - r0 = _mm_load_si128((xmmi*)r + 0); - r1 = _mm_shuffle_epi32(r0, _MM_SHUFFLE(1,1,1,1)); - r1 = _mm_add_epi64(r1, _mm_and_si128(r1, sse2_top64bitmask.v)); - r2 = _mm_shuffle_epi32(r0, _MM_SHUFFLE(2,2,2,2)); - r3 = _mm_shuffle_epi32(r0, _MM_SHUFFLE(3,3,3,3)); - r3 = _mm_add_epi64(r3, _mm_and_si128(r3, sse2_top64bitmask.v)); - r0 = _mm_shuffle_epi32(r0, _MM_SHUFFLE(0,0,0,0)); - r4 = _mm_load_si128((xmmi*)r + 1); - r5 = _mm_shuffle_epi32(r4, _MM_SHUFFLE(1,1,1,1)); - r5 = _mm_add_epi64(r5, _mm_and_si128(r5, sse2_top64bitmask.v)); - r6 = _mm_shuffle_epi32(r4, _MM_SHUFFLE(2,2,2,2)); - r7 = _mm_shuffle_epi32(r4, _MM_SHUFFLE(3,3,3,3)); - r7 = _mm_add_epi64(r7, _mm_and_si128(r7, sse2_top64bitmask.v)); - r4 = _mm_shuffle_epi32(r4, _MM_SHUFFLE(0,0,0,0)); - r8 = _mm_load_si128((xmmi*)r + 2); - r9 = _mm_shuffle_epi32(r8, _MM_SHUFFLE(3,1,3,1)); - r9 = _mm_add_epi64(r9, _mm_and_si128(r9, sse2_top64bitmask.v)); - r8 = _mm_shuffle_epi32(r8, _MM_SHUFFLE(3,0,3,0)); - - m01 = _mm_mul_epu32(r1,s01); - m23 = _mm_mul_epu32(r1,s23); - m45 = _mm_mul_epu32(r1,s45); - m67 = _mm_mul_epu32(r1,s67); - m23 = _mm_add_epi64(m23,_mm_mul_epu32(r3,s01)); - m45 = _mm_add_epi64(m45,_mm_mul_epu32(r3,s23)); - m67 = _mm_add_epi64(m67,_mm_mul_epu32(r3,s45)); - m89 = _mm_mul_epu32(r1,s89); - m45 = _mm_add_epi64(m45,_mm_mul_epu32(r5,s01)); - m67 = _mm_add_epi64(m67,_mm_mul_epu32(r5,s23)); - m89 = _mm_add_epi64(m89,_mm_mul_epu32(r3,s67)); - m67 = _mm_add_epi64(m67,_mm_mul_epu32(r7,s01)); - m89 = _mm_add_epi64(m89,_mm_mul_epu32(r5,s45)); - m89 = _mm_add_epi64(m89,_mm_mul_epu32(r7,s23)); - m89 = _mm_add_epi64(m89,_mm_mul_epu32(r9,s01)); - - /* shift up */ - m89 = _mm_unpackhi_epi64(m67,_mm_slli_si128(m89,8)); - m67 = _mm_unpackhi_epi64(m45,_mm_slli_si128(m67,8)); - m45 = _mm_unpackhi_epi64(m23,_mm_slli_si128(m45,8)); - m23 = _mm_unpackhi_epi64(m01,_mm_slli_si128(m23,8)); - m01 = _mm_unpackhi_epi64(_mm_setzero_si128(),_mm_slli_si128(m01,8)); - - m01 = _mm_add_epi64(m01,_mm_mul_epu32(r0,s01)); - m23 = _mm_add_epi64(m23,_mm_mul_epu32(r0,s23)); - m45 = _mm_add_epi64(m45,_mm_mul_epu32(r0,s45)); - m67 = _mm_add_epi64(m67,_mm_mul_epu32(r0,s67)); - m23 = _mm_add_epi64(m23,_mm_mul_epu32(r2,s01)); - m45 = _mm_add_epi64(m45,_mm_mul_epu32(r2,s23)); - m67 = _mm_add_epi64(m67,_mm_mul_epu32(r4,s23)); - m89 = _mm_add_epi64(m89,_mm_mul_epu32(r0,s89)); - m45 = _mm_add_epi64(m45,_mm_mul_epu32(r4,s01)); - m67 = _mm_add_epi64(m67,_mm_mul_epu32(r2,s45)); - m89 = _mm_add_epi64(m89,_mm_mul_epu32(r2,s67)); - m67 = _mm_add_epi64(m67,_mm_mul_epu32(r6,s01)); - m89 = _mm_add_epi64(m89,_mm_mul_epu32(r4,s45)); - m89 = _mm_add_epi64(m89,_mm_mul_epu32(r6,s23)); - m89 = _mm_add_epi64(m89,_mm_mul_epu32(r8,s01)); - - r219 = _mm_mul_epu32(r2, packednineteen.v); - r419 = _mm_mul_epu32(r4, packednineteen.v); - r619 = _mm_mul_epu32(r6, packednineteen.v); - r819 = _mm_mul_epu32(r8, packednineteen.v); - r119 = _mm_shuffle_epi32(r1,_MM_SHUFFLE(0,0,2,2)); r119 = _mm_mul_epu32(r119, packednineteen.v); - r319 = _mm_shuffle_epi32(r3,_MM_SHUFFLE(0,0,2,2)); r319 = _mm_mul_epu32(r319, packednineteen.v); - r519 = _mm_shuffle_epi32(r5,_MM_SHUFFLE(0,0,2,2)); r519 = _mm_mul_epu32(r519, packednineteen.v); - r719 = _mm_shuffle_epi32(r7,_MM_SHUFFLE(0,0,2,2)); r719 = _mm_mul_epu32(r719, packednineteen.v); - r919 = _mm_shuffle_epi32(r9,_MM_SHUFFLE(0,0,2,2)); r919 = _mm_mul_epu32(r919, packednineteen.v); - - m01 = _mm_add_epi64(m01,_mm_mul_epu32(r919,s12)); - m23 = _mm_add_epi64(m23,_mm_mul_epu32(r919,s34)); - m45 = _mm_add_epi64(m45,_mm_mul_epu32(r919,s56)); - m67 = _mm_add_epi64(m67,_mm_mul_epu32(r919,s78)); - m01 = _mm_add_epi64(m01,_mm_mul_epu32(r719,s34)); - m23 = _mm_add_epi64(m23,_mm_mul_epu32(r719,s56)); - m45 = _mm_add_epi64(m45,_mm_mul_epu32(r719,s78)); - m67 = _mm_add_epi64(m67,_mm_mul_epu32(r719,s9)); - m01 = _mm_add_epi64(m01,_mm_mul_epu32(r519,s56)); - m23 = _mm_add_epi64(m23,_mm_mul_epu32(r519,s78)); - m45 = _mm_add_epi64(m45,_mm_mul_epu32(r519,s9)); - m67 = _mm_add_epi64(m67,_mm_mul_epu32(r819,s89)); - m01 = _mm_add_epi64(m01,_mm_mul_epu32(r319,s78)); - m23 = _mm_add_epi64(m23,_mm_mul_epu32(r319,s9)); - m45 = _mm_add_epi64(m45,_mm_mul_epu32(r619,s89)); - m89 = _mm_add_epi64(m89,_mm_mul_epu32(r919,s9)); - m01 = _mm_add_epi64(m01,_mm_mul_epu32(r819,s23)); - m23 = _mm_add_epi64(m23,_mm_mul_epu32(r819,s45)); - m45 = _mm_add_epi64(m45,_mm_mul_epu32(r819,s67)); - m01 = _mm_add_epi64(m01,_mm_mul_epu32(r619,s45)); - m23 = _mm_add_epi64(m23,_mm_mul_epu32(r619,s67)); - m01 = _mm_add_epi64(m01,_mm_mul_epu32(r419,s67)); - m23 = _mm_add_epi64(m23,_mm_mul_epu32(r419,s89)); - m01 = _mm_add_epi64(m01,_mm_mul_epu32(r219,s89)); - m01 = _mm_add_epi64(m01,_mm_mul_epu32(r119,s9)); - - r0 = _mm_unpacklo_epi64(m01, m45); - r1 = _mm_unpackhi_epi64(m01, m45); - r2 = _mm_unpacklo_epi64(m23, m67); - r3 = _mm_unpackhi_epi64(m23, m67); - r4 = _mm_unpacklo_epi64(m89, m89); - r5 = _mm_unpackhi_epi64(m89, m89); - - c1 = _mm_srli_epi64(r0, 26); c2 = _mm_srli_epi64(r2, 26); r0 = _mm_and_si128(r0, packedmask26.v); r2 = _mm_and_si128(r2, packedmask26.v); r1 = _mm_add_epi64(r1, c1); r3 = _mm_add_epi64(r3, c2); - c1 = _mm_srli_epi64(r1, 25); c2 = _mm_srli_epi64(r3, 25); r1 = _mm_and_si128(r1, packedmask25.v); r3 = _mm_and_si128(r3, packedmask25.v); r2 = _mm_add_epi64(r2, c1); r4 = _mm_add_epi64(r4, c2); c3 = _mm_slli_si128(c2, 8); - c1 = _mm_srli_epi64(r4, 26); r4 = _mm_and_si128(r4, packedmask26.v); r5 = _mm_add_epi64(r5, c1); - c1 = _mm_srli_epi64(r5, 25); r5 = _mm_and_si128(r5, packedmask25.v); r0 = _mm_add_epi64(r0, _mm_unpackhi_epi64(_mm_mul_epu32(c1, packednineteen.v), c3)); - c1 = _mm_srli_epi64(r0, 26); c2 = _mm_srli_epi64(r2, 26); r0 = _mm_and_si128(r0, packedmask26.v); r2 = _mm_and_si128(r2, packedmask26.v); r1 = _mm_add_epi64(r1, c1); r3 = _mm_add_epi64(r3, c2); - - m0123 = _mm_unpacklo_epi32(r0, r1); - m4567 = _mm_unpackhi_epi32(r0, r1); - m0123 = _mm_unpacklo_epi64(m0123, _mm_unpacklo_epi32(r2, r3)); - m4567 = _mm_unpacklo_epi64(m4567, _mm_unpackhi_epi32(r2, r3)); - m89 = _mm_unpackhi_epi32(r4, r5); - - _mm_store_si128((xmmi*)out + 0, m0123); - _mm_store_si128((xmmi*)out + 1, m4567); - _mm_store_si128((xmmi*)out + 2, m89); -} - -typedef struct bignum25519mulprecomp_t { - xmmi r0,r2,r4,r6,r8; - xmmi r1,r3,r5,r7,r9; - xmmi r119,r219,r319,r419,r519,r619,r719,r819,r919; -} bignum25519mulprecomp; - -/* precompute a constant to multiply by */ -DONNA_INLINE static void -curve25519_mul_precompute(bignum25519mulprecomp *pre, const bignum25519 r) { - pre->r0 = _mm_load_si128((xmmi*)r + 0); - pre->r1 = _mm_shuffle_epi32(pre->r0, _MM_SHUFFLE(1,1,1,1)); - pre->r1 = _mm_add_epi64(pre->r1, _mm_and_si128(pre->r1, sse2_top64bitmask.v)); - pre->r2 = _mm_shuffle_epi32(pre->r0, _MM_SHUFFLE(2,2,2,2)); - pre->r3 = _mm_shuffle_epi32(pre->r0, _MM_SHUFFLE(3,3,3,3)); - pre->r3 = _mm_add_epi64(pre->r3, _mm_and_si128(pre->r3, sse2_top64bitmask.v)); - pre->r0 = _mm_shuffle_epi32(pre->r0, _MM_SHUFFLE(0,0,0,0)); - pre->r4 = _mm_load_si128((xmmi*)r + 1); - pre->r5 = _mm_shuffle_epi32(pre->r4, _MM_SHUFFLE(1,1,1,1)); - pre->r5 = _mm_add_epi64(pre->r5, _mm_and_si128(pre->r5, sse2_top64bitmask.v)); - pre->r6 = _mm_shuffle_epi32(pre->r4, _MM_SHUFFLE(2,2,2,2)); - pre->r7 = _mm_shuffle_epi32(pre->r4, _MM_SHUFFLE(3,3,3,3)); - pre->r7 = _mm_add_epi64(pre->r7, _mm_and_si128(pre->r7, sse2_top64bitmask.v)); - pre->r4 = _mm_shuffle_epi32(pre->r4, _MM_SHUFFLE(0,0,0,0)); - pre->r8 = _mm_load_si128((xmmi*)r + 2); - pre->r9 = _mm_shuffle_epi32(pre->r8, _MM_SHUFFLE(3,1,3,1)); - pre->r9 = _mm_add_epi64(pre->r9, _mm_and_si128(pre->r9, sse2_top64bitmask.v)); - pre->r8 = _mm_shuffle_epi32(pre->r8, _MM_SHUFFLE(3,0,3,0)); - - pre->r219 = _mm_mul_epu32(pre->r2, packednineteen.v); - pre->r419 = _mm_mul_epu32(pre->r4, packednineteen.v); - pre->r619 = _mm_mul_epu32(pre->r6, packednineteen.v); - pre->r819 = _mm_mul_epu32(pre->r8, packednineteen.v); - pre->r119 = _mm_shuffle_epi32(pre->r1,_MM_SHUFFLE(0,0,2,2)); pre->r119 = _mm_mul_epu32(pre->r119, packednineteen.v); - pre->r319 = _mm_shuffle_epi32(pre->r3,_MM_SHUFFLE(0,0,2,2)); pre->r319 = _mm_mul_epu32(pre->r319, packednineteen.v); - pre->r519 = _mm_shuffle_epi32(pre->r5,_MM_SHUFFLE(0,0,2,2)); pre->r519 = _mm_mul_epu32(pre->r519, packednineteen.v); - pre->r719 = _mm_shuffle_epi32(pre->r7,_MM_SHUFFLE(0,0,2,2)); pre->r719 = _mm_mul_epu32(pre->r719, packednineteen.v); - pre->r919 = _mm_shuffle_epi32(pre->r9,_MM_SHUFFLE(0,0,2,2)); pre->r919 = _mm_mul_epu32(pre->r919, packednineteen.v); -} - - -/* multiply a bignum by a pre-computed constant */ -DONNA_INLINE static void -curve25519_mul_precomputed(bignum25519 out, const bignum25519 s, const bignum25519mulprecomp *r) { - xmmi m01,m23,m45,m67,m89; - xmmi m0123,m4567; - xmmi s0123,s4567; - xmmi s01,s23,s45,s67,s89; - xmmi s12,s34,s56,s78,s9; - xmmi r0,r1,r2,r3,r4,r5; - xmmi c1,c2,c3; - - s0123 = _mm_load_si128((xmmi*)s + 0); - s01 = _mm_shuffle_epi32(s0123,_MM_SHUFFLE(3,1,2,0)); - s12 = _mm_shuffle_epi32(s0123, _MM_SHUFFLE(2,2,1,1)); - s23 = _mm_shuffle_epi32(s0123,_MM_SHUFFLE(3,3,2,2)); - s4567 = _mm_load_si128((xmmi*)s + 1); - s34 = _mm_unpacklo_epi64(_mm_srli_si128(s0123,12),s4567); - s45 = _mm_shuffle_epi32(s4567,_MM_SHUFFLE(3,1,2,0)); - s56 = _mm_shuffle_epi32(s4567, _MM_SHUFFLE(2,2,1,1)); - s67 = _mm_shuffle_epi32(s4567,_MM_SHUFFLE(3,3,2,2)); - s89 = _mm_load_si128((xmmi*)s + 2); - s78 = _mm_unpacklo_epi64(_mm_srli_si128(s4567,12),s89); - s89 = _mm_shuffle_epi32(s89,_MM_SHUFFLE(3,1,2,0)); - s9 = _mm_shuffle_epi32(s89, _MM_SHUFFLE(3,3,2,2)); - - m01 = _mm_mul_epu32(r->r1,s01); - m23 = _mm_mul_epu32(r->r1,s23); - m45 = _mm_mul_epu32(r->r1,s45); - m67 = _mm_mul_epu32(r->r1,s67); - m23 = _mm_add_epi64(m23,_mm_mul_epu32(r->r3,s01)); - m45 = _mm_add_epi64(m45,_mm_mul_epu32(r->r3,s23)); - m67 = _mm_add_epi64(m67,_mm_mul_epu32(r->r3,s45)); - m89 = _mm_mul_epu32(r->r1,s89); - m45 = _mm_add_epi64(m45,_mm_mul_epu32(r->r5,s01)); - m67 = _mm_add_epi64(m67,_mm_mul_epu32(r->r5,s23)); - m89 = _mm_add_epi64(m89,_mm_mul_epu32(r->r3,s67)); - m67 = _mm_add_epi64(m67,_mm_mul_epu32(r->r7,s01)); - m89 = _mm_add_epi64(m89,_mm_mul_epu32(r->r5,s45)); - m89 = _mm_add_epi64(m89,_mm_mul_epu32(r->r7,s23)); - m89 = _mm_add_epi64(m89,_mm_mul_epu32(r->r9,s01)); - - /* shift up */ - m89 = _mm_unpackhi_epi64(m67,_mm_slli_si128(m89,8)); - m67 = _mm_unpackhi_epi64(m45,_mm_slli_si128(m67,8)); - m45 = _mm_unpackhi_epi64(m23,_mm_slli_si128(m45,8)); - m23 = _mm_unpackhi_epi64(m01,_mm_slli_si128(m23,8)); - m01 = _mm_unpackhi_epi64(_mm_setzero_si128(),_mm_slli_si128(m01,8)); - - m01 = _mm_add_epi64(m01,_mm_mul_epu32(r->r0,s01)); - m23 = _mm_add_epi64(m23,_mm_mul_epu32(r->r0,s23)); - m45 = _mm_add_epi64(m45,_mm_mul_epu32(r->r0,s45)); - m67 = _mm_add_epi64(m67,_mm_mul_epu32(r->r0,s67)); - m23 = _mm_add_epi64(m23,_mm_mul_epu32(r->r2,s01)); - m45 = _mm_add_epi64(m45,_mm_mul_epu32(r->r2,s23)); - m67 = _mm_add_epi64(m67,_mm_mul_epu32(r->r4,s23)); - m89 = _mm_add_epi64(m89,_mm_mul_epu32(r->r0,s89)); - m45 = _mm_add_epi64(m45,_mm_mul_epu32(r->r4,s01)); - m67 = _mm_add_epi64(m67,_mm_mul_epu32(r->r2,s45)); - m89 = _mm_add_epi64(m89,_mm_mul_epu32(r->r2,s67)); - m67 = _mm_add_epi64(m67,_mm_mul_epu32(r->r6,s01)); - m89 = _mm_add_epi64(m89,_mm_mul_epu32(r->r4,s45)); - m89 = _mm_add_epi64(m89,_mm_mul_epu32(r->r6,s23)); - m89 = _mm_add_epi64(m89,_mm_mul_epu32(r->r8,s01)); - m01 = _mm_add_epi64(m01,_mm_mul_epu32(r->r919,s12)); - m23 = _mm_add_epi64(m23,_mm_mul_epu32(r->r919,s34)); - m45 = _mm_add_epi64(m45,_mm_mul_epu32(r->r919,s56)); - m67 = _mm_add_epi64(m67,_mm_mul_epu32(r->r919,s78)); - m01 = _mm_add_epi64(m01,_mm_mul_epu32(r->r719,s34)); - m23 = _mm_add_epi64(m23,_mm_mul_epu32(r->r719,s56)); - m45 = _mm_add_epi64(m45,_mm_mul_epu32(r->r719,s78)); - m67 = _mm_add_epi64(m67,_mm_mul_epu32(r->r719,s9)); - m01 = _mm_add_epi64(m01,_mm_mul_epu32(r->r519,s56)); - m23 = _mm_add_epi64(m23,_mm_mul_epu32(r->r519,s78)); - m45 = _mm_add_epi64(m45,_mm_mul_epu32(r->r519,s9)); - m67 = _mm_add_epi64(m67,_mm_mul_epu32(r->r819,s89)); - m01 = _mm_add_epi64(m01,_mm_mul_epu32(r->r319,s78)); - m23 = _mm_add_epi64(m23,_mm_mul_epu32(r->r319,s9)); - m45 = _mm_add_epi64(m45,_mm_mul_epu32(r->r619,s89)); - m89 = _mm_add_epi64(m89,_mm_mul_epu32(r->r919,s9)); - m01 = _mm_add_epi64(m01,_mm_mul_epu32(r->r819,s23)); - m23 = _mm_add_epi64(m23,_mm_mul_epu32(r->r819,s45)); - m45 = _mm_add_epi64(m45,_mm_mul_epu32(r->r819,s67)); - m01 = _mm_add_epi64(m01,_mm_mul_epu32(r->r619,s45)); - m23 = _mm_add_epi64(m23,_mm_mul_epu32(r->r619,s67)); - m01 = _mm_add_epi64(m01,_mm_mul_epu32(r->r419,s67)); - m23 = _mm_add_epi64(m23,_mm_mul_epu32(r->r419,s89)); - m01 = _mm_add_epi64(m01,_mm_mul_epu32(r->r219,s89)); - m01 = _mm_add_epi64(m01,_mm_mul_epu32(r->r119,s9)); - - r0 = _mm_unpacklo_epi64(m01, m45); - r1 = _mm_unpackhi_epi64(m01, m45); - r2 = _mm_unpacklo_epi64(m23, m67); - r3 = _mm_unpackhi_epi64(m23, m67); - r4 = _mm_unpacklo_epi64(m89, m89); - r5 = _mm_unpackhi_epi64(m89, m89); - - c1 = _mm_srli_epi64(r0, 26); c2 = _mm_srli_epi64(r2, 26); r0 = _mm_and_si128(r0, packedmask26.v); r2 = _mm_and_si128(r2, packedmask26.v); r1 = _mm_add_epi64(r1, c1); r3 = _mm_add_epi64(r3, c2); - c1 = _mm_srli_epi64(r1, 25); c2 = _mm_srli_epi64(r3, 25); r1 = _mm_and_si128(r1, packedmask25.v); r3 = _mm_and_si128(r3, packedmask25.v); r2 = _mm_add_epi64(r2, c1); r4 = _mm_add_epi64(r4, c2); c3 = _mm_slli_si128(c2, 8); - c1 = _mm_srli_epi64(r4, 26); r4 = _mm_and_si128(r4, packedmask26.v); r5 = _mm_add_epi64(r5, c1); - c1 = _mm_srli_epi64(r5, 25); r5 = _mm_and_si128(r5, packedmask25.v); r0 = _mm_add_epi64(r0, _mm_unpackhi_epi64(_mm_mul_epu32(c1, packednineteen.v), c3)); - c1 = _mm_srli_epi64(r0, 26); c2 = _mm_srli_epi64(r2, 26); r0 = _mm_and_si128(r0, packedmask26.v); r2 = _mm_and_si128(r2, packedmask26.v); r1 = _mm_add_epi64(r1, c1); r3 = _mm_add_epi64(r3, c2); - - m0123 = _mm_unpacklo_epi32(r0, r1); - m4567 = _mm_unpackhi_epi32(r0, r1); - m0123 = _mm_unpacklo_epi64(m0123, _mm_unpacklo_epi32(r2, r3)); - m4567 = _mm_unpacklo_epi64(m4567, _mm_unpackhi_epi32(r2, r3)); - m89 = _mm_unpackhi_epi32(r4, r5); - - _mm_store_si128((xmmi*)out + 0, m0123); - _mm_store_si128((xmmi*)out + 1, m4567); - _mm_store_si128((xmmi*)out + 2, m89); -} - -/* square a bignum 'count' times */ -#define curve25519_square(r,x) curve25519_square_times(r,x,1) - -static void -curve25519_square_times(bignum25519 r, const bignum25519 in, int count) { - xmmi m01,m23,m45,m67,m89; - xmmi r0,r1,r2,r3,r4,r5,r6,r7,r8,r9; - xmmi r0a,r1a,r2a,r3a,r7a,r9a; - xmmi r0123,r4567; - xmmi r01,r23,r45,r67,r6x,r89,r8x; - xmmi r12,r34,r56,r78,r9x; - xmmi r5619; - xmmi c1,c2,c3; - - r0123 = _mm_load_si128((xmmi*)in + 0); - r01 = _mm_shuffle_epi32(r0123,_MM_SHUFFLE(3,1,2,0)); - r23 = _mm_shuffle_epi32(r0123,_MM_SHUFFLE(3,3,2,2)); - r4567 = _mm_load_si128((xmmi*)in + 1); - r45 = _mm_shuffle_epi32(r4567,_MM_SHUFFLE(3,1,2,0)); - r67 = _mm_shuffle_epi32(r4567,_MM_SHUFFLE(3,3,2,2)); - r89 = _mm_load_si128((xmmi*)in + 2); - r89 = _mm_shuffle_epi32(r89,_MM_SHUFFLE(3,1,2,0)); - - do { - r12 = _mm_unpackhi_epi64(r01, _mm_slli_si128(r23, 8)); - r0 = _mm_shuffle_epi32(r01, _MM_SHUFFLE(0,0,0,0)); - r0 = _mm_add_epi64(r0, _mm_and_si128(r0, sse2_top64bitmask.v)); - r0a = _mm_shuffle_epi32(r0,_MM_SHUFFLE(3,2,1,2)); - r1 = _mm_shuffle_epi32(r01, _MM_SHUFFLE(2,2,2,2)); - r2 = _mm_shuffle_epi32(r23, _MM_SHUFFLE(0,0,0,0)); - r2 = _mm_add_epi64(r2, _mm_and_si128(r2, sse2_top64bitmask.v)); - r2a = _mm_shuffle_epi32(r2,_MM_SHUFFLE(3,2,1,2)); - r3 = _mm_shuffle_epi32(r23, _MM_SHUFFLE(2,2,2,2)); - r34 = _mm_unpackhi_epi64(r23, _mm_slli_si128(r45, 8)); - r4 = _mm_shuffle_epi32(r45, _MM_SHUFFLE(0,0,0,0)); - r4 = _mm_add_epi64(r4, _mm_and_si128(r4, sse2_top64bitmask.v)); - r56 = _mm_unpackhi_epi64(r45, _mm_slli_si128(r67, 8)); - r5619 = _mm_mul_epu32(r56, packednineteen.v); - r5 = _mm_shuffle_epi32(r5619, _MM_SHUFFLE(1,1,1,0)); - r6 = _mm_shuffle_epi32(r5619, _MM_SHUFFLE(3,2,3,2)); - r78 = _mm_unpackhi_epi64(r67, _mm_slli_si128(r89, 8)); - r6x = _mm_unpacklo_epi64(r67, _mm_setzero_si128()); - r7 = _mm_shuffle_epi32(r67, _MM_SHUFFLE(2,2,2,2)); - r7 = _mm_mul_epu32(r7, packed3819.v); - r7a = _mm_shuffle_epi32(r7, _MM_SHUFFLE(3,3,3,2)); - r8x = _mm_unpacklo_epi64(r89, _mm_setzero_si128()); - r8 = _mm_shuffle_epi32(r89, _MM_SHUFFLE(0,0,0,0)); - r8 = _mm_mul_epu32(r8, packednineteen.v); - r9 = _mm_shuffle_epi32(r89, _MM_SHUFFLE(2,2,2,2)); - r9x = _mm_slli_epi32(_mm_shuffle_epi32(r89, _MM_SHUFFLE(3,3,3,2)), 1); - r9 = _mm_mul_epu32(r9, packed3819.v); - r9a = _mm_shuffle_epi32(r9, _MM_SHUFFLE(2,2,2,2)); - - m01 = _mm_mul_epu32(r01, r0); - m23 = _mm_mul_epu32(r23, r0a); - m45 = _mm_mul_epu32(r45, r0a); - m45 = _mm_add_epi64(m45, _mm_mul_epu32(r23, r2)); - r23 = _mm_slli_epi32(r23, 1); - m67 = _mm_mul_epu32(r67, r0a); - m67 = _mm_add_epi64(m67, _mm_mul_epu32(r45, r2a)); - m89 = _mm_mul_epu32(r89, r0a); - m89 = _mm_add_epi64(m89, _mm_mul_epu32(r67, r2a)); - r67 = _mm_slli_epi32(r67, 1); - m89 = _mm_add_epi64(m89, _mm_mul_epu32(r45, r4)); - r45 = _mm_slli_epi32(r45, 1); - - r1 = _mm_slli_epi32(r1, 1); - r3 = _mm_slli_epi32(r3, 1); - r1a = _mm_add_epi64(r1, _mm_and_si128(r1, sse2_bot64bitmask.v)); - r3a = _mm_add_epi64(r3, _mm_and_si128(r3, sse2_bot64bitmask.v)); - - m23 = _mm_add_epi64(m23, _mm_mul_epu32(r12, r1)); - m45 = _mm_add_epi64(m45, _mm_mul_epu32(r34, r1a)); - m67 = _mm_add_epi64(m67, _mm_mul_epu32(r56, r1a)); - m67 = _mm_add_epi64(m67, _mm_mul_epu32(r34, r3)); - r34 = _mm_slli_epi32(r34, 1); - m89 = _mm_add_epi64(m89, _mm_mul_epu32(r78, r1a)); - r78 = _mm_slli_epi32(r78, 1); - m89 = _mm_add_epi64(m89, _mm_mul_epu32(r56, r3a)); - r56 = _mm_slli_epi32(r56, 1); - - m01 = _mm_add_epi64(m01, _mm_mul_epu32(_mm_slli_epi32(r12, 1), r9)); - m01 = _mm_add_epi64(m01, _mm_mul_epu32(r34, r7)); - m23 = _mm_add_epi64(m23, _mm_mul_epu32(r34, r9)); - m01 = _mm_add_epi64(m01, _mm_mul_epu32(r56, r5)); - m23 = _mm_add_epi64(m23, _mm_mul_epu32(r56, r7)); - m45 = _mm_add_epi64(m45, _mm_mul_epu32(r56, r9)); - m01 = _mm_add_epi64(m01, _mm_mul_epu32(r23, r8)); - m01 = _mm_add_epi64(m01, _mm_mul_epu32(r45, r6)); - m23 = _mm_add_epi64(m23, _mm_mul_epu32(r45, r8)); - m23 = _mm_add_epi64(m23, _mm_mul_epu32(r6x, r6)); - m45 = _mm_add_epi64(m45, _mm_mul_epu32(r78, r7a)); - m67 = _mm_add_epi64(m67, _mm_mul_epu32(r78, r9)); - m45 = _mm_add_epi64(m45, _mm_mul_epu32(r67, r8)); - m67 = _mm_add_epi64(m67, _mm_mul_epu32(r8x, r8)); - m89 = _mm_add_epi64(m89, _mm_mul_epu32(r9x, r9a)); - - r0 = _mm_unpacklo_epi64(m01, m45); - r1 = _mm_unpackhi_epi64(m01, m45); - r2 = _mm_unpacklo_epi64(m23, m67); - r3 = _mm_unpackhi_epi64(m23, m67); - r4 = _mm_unpacklo_epi64(m89, m89); - r5 = _mm_unpackhi_epi64(m89, m89); - - c1 = _mm_srli_epi64(r0, 26); c2 = _mm_srli_epi64(r2, 26); r0 = _mm_and_si128(r0, packedmask26.v); r2 = _mm_and_si128(r2, packedmask26.v); r1 = _mm_add_epi64(r1, c1); r3 = _mm_add_epi64(r3, c2); - c1 = _mm_srli_epi64(r1, 25); c2 = _mm_srli_epi64(r3, 25); r1 = _mm_and_si128(r1, packedmask25.v); r3 = _mm_and_si128(r3, packedmask25.v); r2 = _mm_add_epi64(r2, c1); r4 = _mm_add_epi64(r4, c2); c3 = _mm_slli_si128(c2, 8); - c1 = _mm_srli_epi64(r4, 26); r4 = _mm_and_si128(r4, packedmask26.v); r5 = _mm_add_epi64(r5, c1); - c1 = _mm_srli_epi64(r5, 25); r5 = _mm_and_si128(r5, packedmask25.v); r0 = _mm_add_epi64(r0, _mm_unpackhi_epi64(_mm_mul_epu32(c1, packednineteen.v), c3)); - c1 = _mm_srli_epi64(r0, 26); c2 = _mm_srli_epi64(r2, 26); r0 = _mm_and_si128(r0, packedmask26.v); r2 = _mm_and_si128(r2, packedmask26.v); r1 = _mm_add_epi64(r1, c1); r3 = _mm_add_epi64(r3, c2); - - r01 = _mm_unpacklo_epi64(r0, r1); - r45 = _mm_unpackhi_epi64(r0, r1); - r23 = _mm_unpacklo_epi64(r2, r3); - r67 = _mm_unpackhi_epi64(r2, r3); - r89 = _mm_unpackhi_epi64(r4, r5); - } while (--count); - - r0123 = _mm_shuffle_epi32(r23, _MM_SHUFFLE(2,0,3,3)); - r4567 = _mm_shuffle_epi32(r67, _MM_SHUFFLE(2,0,3,3)); - r0123 = _mm_or_si128(r0123, _mm_shuffle_epi32(r01, _MM_SHUFFLE(3,3,2,0))); - r4567 = _mm_or_si128(r4567, _mm_shuffle_epi32(r45, _MM_SHUFFLE(3,3,2,0))); - r89 = _mm_shuffle_epi32(r89, _MM_SHUFFLE(3,3,2,0)); - - _mm_store_si128((xmmi*)r + 0, r0123); - _mm_store_si128((xmmi*)r + 1, r4567); - _mm_store_si128((xmmi*)r + 2, r89); -} - -/* square two packed bignums */ -DONNA_INLINE static void -curve25519_square_packed64(packedelem64 *out, const packedelem64 *r) { - xmmi r0,r1,r2,r3; - xmmi r1_2,r3_2,r4_2,r5_2,r6_2,r7_2; - xmmi d5,d6,d7,d8,d9; - xmmi c1,c2; - - r0 = r[0].v; - r1 = r[1].v; - r2 = r[2].v; - r3 = r[3].v; - - out[0].v = _mm_mul_epu32(r0, r0); - r0 = _mm_slli_epi32(r0, 1); - out[1].v = _mm_mul_epu32(r0, r1); - r1_2 = _mm_slli_epi32(r1, 1); - out[2].v = _mm_add_epi64(_mm_mul_epu32(r0, r2 ), _mm_mul_epu32(r1, r1_2)); - r1 = r1_2; - out[3].v = _mm_add_epi64(_mm_mul_epu32(r0, r3 ), _mm_mul_epu32(r1, r2 )); - r3_2 = _mm_slli_epi32(r3, 1); - out[4].v = _mm_add_epi64(_mm_mul_epu32(r0, r[4].v), _mm_add_epi64(_mm_mul_epu32(r1, r3_2 ), _mm_mul_epu32(r2, r2))); - r2 = _mm_slli_epi32(r2, 1); - out[5].v = _mm_add_epi64(_mm_mul_epu32(r0, r[5].v), _mm_add_epi64(_mm_mul_epu32(r1, r[4].v), _mm_mul_epu32(r2, r3))); - r5_2 = _mm_slli_epi32(r[5].v, 1); - out[6].v = _mm_add_epi64(_mm_mul_epu32(r0, r[6].v), _mm_add_epi64(_mm_mul_epu32(r1, r5_2 ), _mm_add_epi64(_mm_mul_epu32(r2, r[4].v), _mm_mul_epu32(r3, r3_2 )))); - r3 = r3_2; - out[7].v = _mm_add_epi64(_mm_mul_epu32(r0, r[7].v), _mm_add_epi64(_mm_mul_epu32(r1, r[6].v), _mm_add_epi64(_mm_mul_epu32(r2, r[5].v), _mm_mul_epu32(r3, r[4].v)))); - r7_2 = _mm_slli_epi32(r[7].v, 1); - out[8].v = _mm_add_epi64(_mm_mul_epu32(r0, r[8].v), _mm_add_epi64(_mm_mul_epu32(r1, r7_2 ), _mm_add_epi64(_mm_mul_epu32(r2, r[6].v), _mm_add_epi64(_mm_mul_epu32(r3, r5_2 ), _mm_mul_epu32(r[4].v, r[4].v))))); - out[9].v = _mm_add_epi64(_mm_mul_epu32(r0, r[9].v), _mm_add_epi64(_mm_mul_epu32(r1, r[8].v), _mm_add_epi64(_mm_mul_epu32(r2, r[7].v), _mm_add_epi64(_mm_mul_epu32(r3, r[6].v), _mm_mul_epu32(r[4].v, r5_2 ))))); - - d5 = _mm_mul_epu32(r[5].v, packedthirtyeight.v); - d6 = _mm_mul_epu32(r[6].v, packednineteen.v); - d7 = _mm_mul_epu32(r[7].v, packedthirtyeight.v); - d8 = _mm_mul_epu32(r[8].v, packednineteen.v); - d9 = _mm_mul_epu32(r[9].v, packedthirtyeight.v); - - r4_2 = _mm_slli_epi32(r[4].v, 1); - r6_2 = _mm_slli_epi32(r[6].v, 1); - out[0].v = _mm_add_epi64(out[0].v, _mm_add_epi64(_mm_mul_epu32(d9, r1 ), _mm_add_epi64(_mm_mul_epu32(d8, r2 ), _mm_add_epi64(_mm_mul_epu32(d7, r3 ), _mm_add_epi64(_mm_mul_epu32(d6, r4_2), _mm_mul_epu32(d5, r[5].v)))))); - out[1].v = _mm_add_epi64(out[1].v, _mm_add_epi64(_mm_mul_epu32(d9, _mm_srli_epi32(r2, 1)), _mm_add_epi64(_mm_mul_epu32(d8, r3 ), _mm_add_epi64(_mm_mul_epu32(d7, r[4].v), _mm_mul_epu32(d6, r5_2 ))))); - out[2].v = _mm_add_epi64(out[2].v, _mm_add_epi64(_mm_mul_epu32(d9, r3 ), _mm_add_epi64(_mm_mul_epu32(d8, r4_2), _mm_add_epi64(_mm_mul_epu32(d7, r5_2 ), _mm_mul_epu32(d6, r[6].v))))); - out[3].v = _mm_add_epi64(out[3].v, _mm_add_epi64(_mm_mul_epu32(d9, r[4].v ), _mm_add_epi64(_mm_mul_epu32(d8, r5_2), _mm_mul_epu32(d7, r[6].v)))); - out[4].v = _mm_add_epi64(out[4].v, _mm_add_epi64(_mm_mul_epu32(d9, r5_2 ), _mm_add_epi64(_mm_mul_epu32(d8, r6_2), _mm_mul_epu32(d7, r[7].v)))); - out[5].v = _mm_add_epi64(out[5].v, _mm_add_epi64(_mm_mul_epu32(d9, r[6].v ), _mm_mul_epu32(d8, r7_2 ))); - out[6].v = _mm_add_epi64(out[6].v, _mm_add_epi64(_mm_mul_epu32(d9, r7_2 ), _mm_mul_epu32(d8, r[8].v))); - out[7].v = _mm_add_epi64(out[7].v, _mm_mul_epu32(d9, r[8].v)); - out[8].v = _mm_add_epi64(out[8].v, _mm_mul_epu32(d9, r[9].v)); - - c1 = _mm_srli_epi64(out[0].v, 26); c2 = _mm_srli_epi64(out[4].v, 26); out[0].v = _mm_and_si128(out[0].v, packedmask26.v); out[4].v = _mm_and_si128(out[4].v, packedmask26.v); out[1].v = _mm_add_epi64(out[1].v, c1); out[5].v = _mm_add_epi64(out[5].v, c2); - c1 = _mm_srli_epi64(out[1].v, 25); c2 = _mm_srli_epi64(out[5].v, 25); out[1].v = _mm_and_si128(out[1].v, packedmask25.v); out[5].v = _mm_and_si128(out[5].v, packedmask25.v); out[2].v = _mm_add_epi64(out[2].v, c1); out[6].v = _mm_add_epi64(out[6].v, c2); - c1 = _mm_srli_epi64(out[2].v, 26); c2 = _mm_srli_epi64(out[6].v, 26); out[2].v = _mm_and_si128(out[2].v, packedmask26.v); out[6].v = _mm_and_si128(out[6].v, packedmask26.v); out[3].v = _mm_add_epi64(out[3].v, c1); out[7].v = _mm_add_epi64(out[7].v, c2); - c1 = _mm_srli_epi64(out[3].v, 25); c2 = _mm_srli_epi64(out[7].v, 25); out[3].v = _mm_and_si128(out[3].v, packedmask25.v); out[7].v = _mm_and_si128(out[7].v, packedmask25.v); out[4].v = _mm_add_epi64(out[4].v, c1); out[8].v = _mm_add_epi64(out[8].v, c2); - c2 = _mm_srli_epi64(out[8].v, 26); out[8].v = _mm_and_si128(out[8].v, packedmask26.v); out[9].v = _mm_add_epi64(out[9].v, c2); - c2 = _mm_srli_epi64(out[9].v, 25); out[9].v = _mm_and_si128(out[9].v, packedmask25.v); out[0].v = _mm_add_epi64(out[0].v, _mm_mul_epu32(c2, packednineteen.v)); - c1 = _mm_srli_epi64(out[0].v, 26); c2 = _mm_srli_epi64(out[4].v, 26); out[0].v = _mm_and_si128(out[0].v, packedmask26.v); out[4].v = _mm_and_si128(out[4].v, packedmask26.v); out[1].v = _mm_add_epi64(out[1].v, c1); out[5].v = _mm_add_epi64(out[5].v, c2); -} - -/* make [nqx+nqz,nqpqx+nqpqz], [nqpqx-nqpqz,nqx-nqz] from [nqx+nqz,nqpqx+nqpqz], [nqx-nqz,nqpqx-nqpqz] */ -DONNA_INLINE static void -curve25519_make_nqpq(packedelem64 *primex, packedelem64 *primez, const packedelem32 *pqx, const packedelem32 *pqz) { - primex[0].v = _mm_shuffle_epi32(pqx[0].v, _MM_SHUFFLE(1,1,0,0)); - primex[1].v = _mm_shuffle_epi32(pqx[0].v, _MM_SHUFFLE(3,3,2,2)); - primex[2].v = _mm_shuffle_epi32(pqx[1].v, _MM_SHUFFLE(1,1,0,0)); - primex[3].v = _mm_shuffle_epi32(pqx[1].v, _MM_SHUFFLE(3,3,2,2)); - primex[4].v = _mm_shuffle_epi32(pqx[2].v, _MM_SHUFFLE(1,1,0,0)); - primex[5].v = _mm_shuffle_epi32(pqx[2].v, _MM_SHUFFLE(3,3,2,2)); - primex[6].v = _mm_shuffle_epi32(pqx[3].v, _MM_SHUFFLE(1,1,0,0)); - primex[7].v = _mm_shuffle_epi32(pqx[3].v, _MM_SHUFFLE(3,3,2,2)); - primex[8].v = _mm_shuffle_epi32(pqx[4].v, _MM_SHUFFLE(1,1,0,0)); - primex[9].v = _mm_shuffle_epi32(pqx[4].v, _MM_SHUFFLE(3,3,2,2)); - primez[0].v = _mm_shuffle_epi32(pqz[0].v, _MM_SHUFFLE(0,0,1,1)); - primez[1].v = _mm_shuffle_epi32(pqz[0].v, _MM_SHUFFLE(2,2,3,3)); - primez[2].v = _mm_shuffle_epi32(pqz[1].v, _MM_SHUFFLE(0,0,1,1)); - primez[3].v = _mm_shuffle_epi32(pqz[1].v, _MM_SHUFFLE(2,2,3,3)); - primez[4].v = _mm_shuffle_epi32(pqz[2].v, _MM_SHUFFLE(0,0,1,1)); - primez[5].v = _mm_shuffle_epi32(pqz[2].v, _MM_SHUFFLE(2,2,3,3)); - primez[6].v = _mm_shuffle_epi32(pqz[3].v, _MM_SHUFFLE(0,0,1,1)); - primez[7].v = _mm_shuffle_epi32(pqz[3].v, _MM_SHUFFLE(2,2,3,3)); - primez[8].v = _mm_shuffle_epi32(pqz[4].v, _MM_SHUFFLE(0,0,1,1)); - primez[9].v = _mm_shuffle_epi32(pqz[4].v, _MM_SHUFFLE(2,2,3,3)); -} - -/* make [nqx+nqz,nqx-nqz] from [nqx+nqz,nqpqx+nqpqz], [nqx-nqz,nqpqx-nqpqz] */ -DONNA_INLINE static void -curve25519_make_nq(packedelem64 *nq, const packedelem32 *pqx, const packedelem32 *pqz) { - nq[0].v = _mm_unpacklo_epi64(pqx[0].v, pqz[0].v); - nq[1].v = _mm_unpackhi_epi64(pqx[0].v, pqz[0].v); - nq[2].v = _mm_unpacklo_epi64(pqx[1].v, pqz[1].v); - nq[3].v = _mm_unpackhi_epi64(pqx[1].v, pqz[1].v); - nq[4].v = _mm_unpacklo_epi64(pqx[2].v, pqz[2].v); - nq[5].v = _mm_unpackhi_epi64(pqx[2].v, pqz[2].v); - nq[6].v = _mm_unpacklo_epi64(pqx[3].v, pqz[3].v); - nq[7].v = _mm_unpackhi_epi64(pqx[3].v, pqz[3].v); - nq[8].v = _mm_unpacklo_epi64(pqx[4].v, pqz[4].v); - nq[9].v = _mm_unpackhi_epi64(pqx[4].v, pqz[4].v); -} - -/* compute [nqx+nqz,nqx-nqz] from nqx, nqz */ -DONNA_INLINE static void -curve25519_compute_nq(packedelem64 *nq, const bignum25519 nqx, const bignum25519 nqz) { - xmmi x0,x1,x2; - xmmi z0,z1,z2; - xmmi a0,a1,a2; - xmmi s0,s1,s2; - xmmi r0,r1; - xmmi c1,c2; - x0 = _mm_load_si128((xmmi*)nqx + 0); - x1 = _mm_load_si128((xmmi*)nqx + 1); - x2 = _mm_load_si128((xmmi*)nqx + 2); - z0 = _mm_load_si128((xmmi*)nqz + 0); - z1 = _mm_load_si128((xmmi*)nqz + 1); - z2 = _mm_load_si128((xmmi*)nqz + 2); - a0 = _mm_add_epi32(x0, z0); - a1 = _mm_add_epi32(x1, z1); - a2 = _mm_add_epi32(x2, z2); - s0 = _mm_add_epi32(x0, packed2p0.v); - s1 = _mm_add_epi32(x1, packed2p1.v); - s2 = _mm_add_epi32(x2, packed2p2.v); - s0 = _mm_sub_epi32(s0, z0); - s1 = _mm_sub_epi32(s1, z1); - s2 = _mm_sub_epi32(s2, z2); - r0 = _mm_and_si128(_mm_shuffle_epi32(s0, _MM_SHUFFLE(2,2,0,0)), sse2_bot32bitmask.v); - r1 = _mm_and_si128(_mm_shuffle_epi32(s0, _MM_SHUFFLE(3,3,1,1)), sse2_bot32bitmask.v); - c1 = _mm_srli_epi32(r0, 26); - c2 = _mm_srli_epi32(r1, 25); - r0 = _mm_and_si128(r0, packedmask26.v); - r1 = _mm_and_si128(r1, packedmask25.v); - r0 = _mm_add_epi32(r0, _mm_slli_si128(c2, 8)); - r1 = _mm_add_epi32(r1, c1); - s0 = _mm_unpacklo_epi64(_mm_unpacklo_epi32(r0, r1), _mm_unpackhi_epi32(r0, r1)); - s1 = _mm_add_epi32(s1, _mm_srli_si128(c2, 8)); - nq[0].v = _mm_unpacklo_epi64(a0, s0); - nq[2].v = _mm_unpackhi_epi64(a0, s0); - nq[4].v = _mm_unpacklo_epi64(a1, s1); - nq[6].v = _mm_unpackhi_epi64(a1, s1); - nq[8].v = _mm_unpacklo_epi64(a2, s2); - nq[1].v = _mm_shuffle_epi32(nq[0].v, _MM_SHUFFLE(3,3,1,1)); - nq[3].v = _mm_shuffle_epi32(nq[2].v, _MM_SHUFFLE(3,3,1,1)); - nq[5].v = _mm_shuffle_epi32(nq[4].v, _MM_SHUFFLE(3,3,1,1)); - nq[7].v = _mm_shuffle_epi32(nq[6].v, _MM_SHUFFLE(3,3,1,1)); - nq[9].v = _mm_shuffle_epi32(nq[8].v, _MM_SHUFFLE(3,3,1,1)); -} - - -/* compute [x+z,x-z] from [x,z] */ -DONNA_INLINE static void -curve25519_addsub_packed64(packedelem64 *r) { - packed32bignum25519 x,z,add,sub; - - x[0].v = _mm_unpacklo_epi64(r[0].v, r[1].v); - z[0].v = _mm_unpackhi_epi64(r[0].v, r[1].v); - x[1].v = _mm_unpacklo_epi64(r[2].v, r[3].v); - z[1].v = _mm_unpackhi_epi64(r[2].v, r[3].v); - x[2].v = _mm_unpacklo_epi64(r[4].v, r[5].v); - z[2].v = _mm_unpackhi_epi64(r[4].v, r[5].v); - x[3].v = _mm_unpacklo_epi64(r[6].v, r[7].v); - z[3].v = _mm_unpackhi_epi64(r[6].v, r[7].v); - x[4].v = _mm_unpacklo_epi64(r[8].v, r[9].v); - z[4].v = _mm_unpackhi_epi64(r[8].v, r[9].v); - - curve25519_add_packed32(add, x, z); - curve25519_sub_packed32(sub, x, z); - - r[0].v = _mm_unpacklo_epi64(add[0].v, sub[0].v); - r[1].v = _mm_unpackhi_epi64(add[0].v, sub[0].v); - r[2].v = _mm_unpacklo_epi64(add[1].v, sub[1].v); - r[3].v = _mm_unpackhi_epi64(add[1].v, sub[1].v); - r[4].v = _mm_unpacklo_epi64(add[2].v, sub[2].v); - r[5].v = _mm_unpackhi_epi64(add[2].v, sub[2].v); - r[6].v = _mm_unpacklo_epi64(add[3].v, sub[3].v); - r[7].v = _mm_unpackhi_epi64(add[3].v, sub[3].v); - r[8].v = _mm_unpacklo_epi64(add[4].v, sub[4].v); - r[9].v = _mm_unpackhi_epi64(add[4].v, sub[4].v); -} - -/* compute [x,z] * [121666,121665] */ -DONNA_INLINE static void -curve25519_121665_packed64(packedelem64 *out, const packedelem64 *in) { - xmmi c1,c2; - - out[0].v = _mm_mul_epu32(in[0].v, packed121666121665.v); - out[1].v = _mm_mul_epu32(in[1].v, packed121666121665.v); - out[2].v = _mm_mul_epu32(in[2].v, packed121666121665.v); - out[3].v = _mm_mul_epu32(in[3].v, packed121666121665.v); - out[4].v = _mm_mul_epu32(in[4].v, packed121666121665.v); - out[5].v = _mm_mul_epu32(in[5].v, packed121666121665.v); - out[6].v = _mm_mul_epu32(in[6].v, packed121666121665.v); - out[7].v = _mm_mul_epu32(in[7].v, packed121666121665.v); - out[8].v = _mm_mul_epu32(in[8].v, packed121666121665.v); - out[9].v = _mm_mul_epu32(in[9].v, packed121666121665.v); - - c1 = _mm_srli_epi64(out[0].v, 26); c2 = _mm_srli_epi64(out[4].v, 26); out[0].v = _mm_and_si128(out[0].v, packedmask26.v); out[4].v = _mm_and_si128(out[4].v, packedmask26.v); out[1].v = _mm_add_epi64(out[1].v, c1); out[5].v = _mm_add_epi64(out[5].v, c2); - c1 = _mm_srli_epi64(out[1].v, 25); c2 = _mm_srli_epi64(out[5].v, 25); out[1].v = _mm_and_si128(out[1].v, packedmask25.v); out[5].v = _mm_and_si128(out[5].v, packedmask25.v); out[2].v = _mm_add_epi64(out[2].v, c1); out[6].v = _mm_add_epi64(out[6].v, c2); - c1 = _mm_srli_epi64(out[2].v, 26); c2 = _mm_srli_epi64(out[6].v, 26); out[2].v = _mm_and_si128(out[2].v, packedmask26.v); out[6].v = _mm_and_si128(out[6].v, packedmask26.v); out[3].v = _mm_add_epi64(out[3].v, c1); out[7].v = _mm_add_epi64(out[7].v, c2); - c1 = _mm_srli_epi64(out[3].v, 25); c2 = _mm_srli_epi64(out[7].v, 25); out[3].v = _mm_and_si128(out[3].v, packedmask25.v); out[7].v = _mm_and_si128(out[7].v, packedmask25.v); out[4].v = _mm_add_epi64(out[4].v, c1); out[8].v = _mm_add_epi64(out[8].v, c2); - c2 = _mm_srli_epi64(out[8].v, 26); out[8].v = _mm_and_si128(out[8].v, packedmask26.v); out[9].v = _mm_add_epi64(out[9].v, c2); - c2 = _mm_srli_epi64(out[9].v, 25); out[9].v = _mm_and_si128(out[9].v, packedmask25.v); out[0].v = _mm_add_epi64(out[0].v, _mm_mul_epu32(c2, packednineteen.v)); - c1 = _mm_srli_epi64(out[0].v, 26); c2 = _mm_srli_epi64(out[4].v, 26); out[0].v = _mm_and_si128(out[0].v, packedmask26.v); out[4].v = _mm_and_si128(out[4].v, packedmask26.v); out[1].v = _mm_add_epi64(out[1].v, c1); out[5].v = _mm_add_epi64(out[5].v, c2); -} - -/* compute [sq.x,sqscalar.x-sqscalar.z] * [sq.z,sq.x-sq.z] */ -DONNA_INLINE static void -curve25519_final_nq(packedelem64 *nq, const packedelem64 *sq, const packedelem64 *sq121665) { - packed32bignum25519 x, z, sub; - packed64bignum25519 t, nqa, nqb; - - x[0].v = _mm_or_si128(_mm_unpacklo_epi64(sq[0].v, sq[1].v), _mm_slli_si128(_mm_unpacklo_epi64(sq121665[0].v, sq121665[1].v), 4)); - z[0].v = _mm_or_si128(_mm_unpackhi_epi64(sq[0].v, sq[1].v), _mm_slli_si128(_mm_unpackhi_epi64(sq121665[0].v, sq121665[1].v), 4)); - x[1].v = _mm_or_si128(_mm_unpacklo_epi64(sq[2].v, sq[3].v), _mm_slli_si128(_mm_unpacklo_epi64(sq121665[2].v, sq121665[3].v), 4)); - z[1].v = _mm_or_si128(_mm_unpackhi_epi64(sq[2].v, sq[3].v), _mm_slli_si128(_mm_unpackhi_epi64(sq121665[2].v, sq121665[3].v), 4)); - x[2].v = _mm_or_si128(_mm_unpacklo_epi64(sq[4].v, sq[5].v), _mm_slli_si128(_mm_unpacklo_epi64(sq121665[4].v, sq121665[5].v), 4)); - z[2].v = _mm_or_si128(_mm_unpackhi_epi64(sq[4].v, sq[5].v), _mm_slli_si128(_mm_unpackhi_epi64(sq121665[4].v, sq121665[5].v), 4)); - x[3].v = _mm_or_si128(_mm_unpacklo_epi64(sq[6].v, sq[7].v), _mm_slli_si128(_mm_unpacklo_epi64(sq121665[6].v, sq121665[7].v), 4)); - z[3].v = _mm_or_si128(_mm_unpackhi_epi64(sq[6].v, sq[7].v), _mm_slli_si128(_mm_unpackhi_epi64(sq121665[6].v, sq121665[7].v), 4)); - x[4].v = _mm_or_si128(_mm_unpacklo_epi64(sq[8].v, sq[9].v), _mm_slli_si128(_mm_unpacklo_epi64(sq121665[8].v, sq121665[9].v), 4)); - z[4].v = _mm_or_si128(_mm_unpackhi_epi64(sq[8].v, sq[9].v), _mm_slli_si128(_mm_unpackhi_epi64(sq121665[8].v, sq121665[9].v), 4)); - - curve25519_sub_packed32(sub, x, z); - - t[0].v = _mm_shuffle_epi32(sub[0].v, _MM_SHUFFLE(1,1,0,0)); - t[1].v = _mm_shuffle_epi32(sub[0].v, _MM_SHUFFLE(3,3,2,2)); - t[2].v = _mm_shuffle_epi32(sub[1].v, _MM_SHUFFLE(1,1,0,0)); - t[3].v = _mm_shuffle_epi32(sub[1].v, _MM_SHUFFLE(3,3,2,2)); - t[4].v = _mm_shuffle_epi32(sub[2].v, _MM_SHUFFLE(1,1,0,0)); - t[5].v = _mm_shuffle_epi32(sub[2].v, _MM_SHUFFLE(3,3,2,2)); - t[6].v = _mm_shuffle_epi32(sub[3].v, _MM_SHUFFLE(1,1,0,0)); - t[7].v = _mm_shuffle_epi32(sub[3].v, _MM_SHUFFLE(3,3,2,2)); - t[8].v = _mm_shuffle_epi32(sub[4].v, _MM_SHUFFLE(1,1,0,0)); - t[9].v = _mm_shuffle_epi32(sub[4].v, _MM_SHUFFLE(3,3,2,2)); - - nqa[0].v = _mm_unpacklo_epi64(sq[0].v, t[0].v); - nqb[0].v = _mm_unpackhi_epi64(sq[0].v, t[0].v); - nqa[1].v = _mm_unpacklo_epi64(sq[1].v, t[1].v); - nqb[1].v = _mm_unpackhi_epi64(sq[1].v, t[1].v); - nqa[2].v = _mm_unpacklo_epi64(sq[2].v, t[2].v); - nqb[2].v = _mm_unpackhi_epi64(sq[2].v, t[2].v); - nqa[3].v = _mm_unpacklo_epi64(sq[3].v, t[3].v); - nqb[3].v = _mm_unpackhi_epi64(sq[3].v, t[3].v); - nqa[4].v = _mm_unpacklo_epi64(sq[4].v, t[4].v); - nqb[4].v = _mm_unpackhi_epi64(sq[4].v, t[4].v); - nqa[5].v = _mm_unpacklo_epi64(sq[5].v, t[5].v); - nqb[5].v = _mm_unpackhi_epi64(sq[5].v, t[5].v); - nqa[6].v = _mm_unpacklo_epi64(sq[6].v, t[6].v); - nqb[6].v = _mm_unpackhi_epi64(sq[6].v, t[6].v); - nqa[7].v = _mm_unpacklo_epi64(sq[7].v, t[7].v); - nqb[7].v = _mm_unpackhi_epi64(sq[7].v, t[7].v); - nqa[8].v = _mm_unpacklo_epi64(sq[8].v, t[8].v); - nqb[8].v = _mm_unpackhi_epi64(sq[8].v, t[8].v); - nqa[9].v = _mm_unpacklo_epi64(sq[9].v, t[9].v); - nqb[9].v = _mm_unpackhi_epi64(sq[9].v, t[9].v); - - curve25519_mul_packed64(nq, nqa, nqb); -} - diff --git a/curve25519-donna/curve25519-donna.h b/curve25519-donna/curve25519-donna.h deleted file mode 100644 index e707e2293..000000000 --- a/curve25519-donna/curve25519-donna.h +++ /dev/null @@ -1,32 +0,0 @@ -#include "curve25519.h" -#include "curve25519-donna-portable.h" - -#if defined(CURVE25519_SSE2) -#else - #if defined(HAVE_UINT128) && !defined(CURVE25519_FORCE_32BIT) - #define CURVE25519_64BIT - #else - #define CURVE25519_32BIT - #endif -#endif - -#if !defined(CURVE25519_NO_INLINE_ASM) -#endif - - -#if defined(CURVE25519_SSE2) - #include "curve25519-donna-sse2.h" -#elif defined(CURVE25519_64BIT) - #include "curve25519-donna-64bit.h" -#else - #include "curve25519-donna-32bit.h" -#endif - -#include "curve25519-donna-common.h" - -#if defined(CURVE25519_SSE2) - #include "curve25519-donna-scalarmult-sse2.h" -#else - #include "curve25519-donna-scalarmult-base.h" -#endif - diff --git a/ed25519-donna/curve25519-donna-32bit.h b/ed25519-donna/curve25519-donna-32bit.h index b8fa37d6e..16606fcfe 100644 --- a/ed25519-donna/curve25519-donna-32bit.h +++ b/ed25519-donna/curve25519-donna-32bit.h @@ -6,12 +6,10 @@ */ typedef uint32_t bignum25519[10]; -typedef uint32_t bignum25519align16[12]; static const uint32_t reduce_mask_25 = (1 << 25) - 1; static const uint32_t reduce_mask_26 = (1 << 26) - 1; - /* out = in */ DONNA_INLINE static void curve25519_copy(bignum25519 out, const bignum25519 in) { @@ -98,6 +96,24 @@ curve25519_sub(bignum25519 out, const bignum25519 a, const bignum25519 b) { out[9] = twoP13579 + a[9] - b[9] ; } +/* out = in * scalar */ +DONNA_INLINE static void +curve25519_scalar_product(bignum25519 out, const bignum25519 in, const uint32_t scalar) { + uint64_t a; + uint32_t c; + a = mul32x32_64(in[0], scalar); out[0] = (uint32_t)a & reduce_mask_26; c = (uint32_t)(a >> 26); + a = mul32x32_64(in[1], scalar) + c; out[1] = (uint32_t)a & reduce_mask_25; c = (uint32_t)(a >> 25); + a = mul32x32_64(in[2], scalar) + c; out[2] = (uint32_t)a & reduce_mask_26; c = (uint32_t)(a >> 26); + a = mul32x32_64(in[3], scalar) + c; out[3] = (uint32_t)a & reduce_mask_25; c = (uint32_t)(a >> 25); + a = mul32x32_64(in[4], scalar) + c; out[4] = (uint32_t)a & reduce_mask_26; c = (uint32_t)(a >> 26); + a = mul32x32_64(in[5], scalar) + c; out[5] = (uint32_t)a & reduce_mask_25; c = (uint32_t)(a >> 25); + a = mul32x32_64(in[6], scalar) + c; out[6] = (uint32_t)a & reduce_mask_26; c = (uint32_t)(a >> 26); + a = mul32x32_64(in[7], scalar) + c; out[7] = (uint32_t)a & reduce_mask_25; c = (uint32_t)(a >> 25); + a = mul32x32_64(in[8], scalar) + c; out[8] = (uint32_t)a & reduce_mask_26; c = (uint32_t)(a >> 26); + a = mul32x32_64(in[9], scalar) + c; out[9] = (uint32_t)a & reduce_mask_25; c = (uint32_t)(a >> 25); + out[0] += c * 19; +} + /* out = a - b, where a is the result of a basic op (add,sub) */ DONNA_INLINE static void curve25519_sub_after_basic(bignum25519 out, const bignum25519 a, const bignum25519 b) { @@ -150,7 +166,7 @@ curve25519_neg(bignum25519 out, const bignum25519 a) { /* out = a * b */ #define curve25519_mul_noinline curve25519_mul -static void +DONNA_INLINE static void curve25519_mul(bignum25519 out, const bignum25519 a, const bignum25519 b) { uint32_t r0,r1,r2,r3,r4,r5,r6,r7,r8,r9; uint32_t s0,s1,s2,s3,s4,s5,s6,s7,s8,s9; @@ -247,8 +263,8 @@ curve25519_mul(bignum25519 out, const bignum25519 a, const bignum25519 b) { out[9] = r9; } -/* out = in*in */ -static void +/* out = in * in */ +DONNA_INLINE static void curve25519_square(bignum25519 out, const bignum25519 in) { uint32_t r0,r1,r2,r3,r4,r5,r6,r7,r8,r9; uint32_t d6,d7,d8,d9; @@ -321,7 +337,6 @@ curve25519_square(bignum25519 out, const bignum25519 in) { out[9] = r9; } - /* out = in ^ (2 * count) */ static void curve25519_square_times(bignum25519 out, const bignum25519 in, int count) { @@ -430,16 +445,16 @@ curve25519_expand(bignum25519 out, const unsigned char in[32]) { #undef F } - out[0] = ( x0 ) & 0x3ffffff; - out[1] = ((((uint64_t)x1 << 32) | x0) >> 26) & 0x1ffffff; - out[2] = ((((uint64_t)x2 << 32) | x1) >> 19) & 0x3ffffff; - out[3] = ((((uint64_t)x3 << 32) | x2) >> 13) & 0x1ffffff; - out[4] = (( x3) >> 6) & 0x3ffffff; - out[5] = ( x4 ) & 0x1ffffff; - out[6] = ((((uint64_t)x5 << 32) | x4) >> 25) & 0x3ffffff; - out[7] = ((((uint64_t)x6 << 32) | x5) >> 19) & 0x1ffffff; - out[8] = ((((uint64_t)x7 << 32) | x6) >> 12) & 0x3ffffff; - out[9] = (( x7) >> 6) & 0x1ffffff; + out[0] = ( x0 ) & reduce_mask_26; + out[1] = ((((uint64_t)x1 << 32) | x0) >> 26) & reduce_mask_25; + out[2] = ((((uint64_t)x2 << 32) | x1) >> 19) & reduce_mask_26; + out[3] = ((((uint64_t)x3 << 32) | x2) >> 13) & reduce_mask_25; + out[4] = (( x3) >> 6) & reduce_mask_26; + out[5] = ( x4 ) & reduce_mask_25; + out[6] = ((((uint64_t)x5 << 32) | x4) >> 25) & reduce_mask_26; + out[7] = ((((uint64_t)x6 << 32) | x5) >> 19) & reduce_mask_25; + out[8] = ((((uint64_t)x7 << 32) | x6) >> 12) & reduce_mask_26; + out[9] = (( x7) >> 6) & reduce_mask_25; /* ignore the top bit */ } /* Take a fully reduced polynomial form number and contract it into a @@ -526,7 +541,6 @@ curve25519_contract(unsigned char out[32], const bignum25519 in) { #undef F } - /* out = (flag) ? in : out */ DONNA_INLINE static void curve25519_move_conditional_bytes(uint8_t out[96], const uint8_t in[96], uint32_t flag) { diff --git a/curve25519-donna/curve25519-donna-scalarmult-base.h b/ed25519-donna/curve25519-donna-scalarmult-base.h similarity index 100% rename from curve25519-donna/curve25519-donna-scalarmult-base.h rename to ed25519-donna/curve25519-donna-scalarmult-base.h diff --git a/ed25519-donna/curve25519-donna.h b/ed25519-donna/curve25519-donna.h new file mode 100644 index 000000000..feba4c6c8 --- /dev/null +++ b/ed25519-donna/curve25519-donna.h @@ -0,0 +1,9 @@ +#include "curve25519.h" + +#include "ed25519-donna-portable.h" + +#include "curve25519-donna-32bit.h" + +#include "curve25519-donna-helpers.h" + +#include "curve25519-donna-scalarmult-base.h" diff --git a/curve25519-donna/curve25519.c b/ed25519-donna/curve25519.c similarity index 100% rename from curve25519-donna/curve25519.c rename to ed25519-donna/curve25519.c diff --git a/curve25519-donna/curve25519.h b/ed25519-donna/curve25519.h similarity index 100% rename from curve25519-donna/curve25519.h rename to ed25519-donna/curve25519.h