diff --git a/Makefile b/Makefile
index c62447b171..252a06f22f 100644
--- a/Makefile
+++ b/Makefile
@@ -46,7 +46,7 @@ SRCS += sha2.c
SRCS += sha3.c
SRCS += aescrypt.c aeskey.c aestab.c aes_modes.c
SRCS += ed25519-donna/ed25519.c
-SRCS += curve25519-donna/curve25519-donna.c
+SRCS += curve25519-donna/curve25519.c
OBJS = $(SRCS:.c=.o)
@@ -62,7 +62,7 @@ tests: tests.o $(OBJS)
$(CC) tests.o $(OBJS) $(TESTLIBS) -o tests
test_speed: test_speed.o $(OBJS)
- $(CC) test_speed.o $(OBJS) $(TESTLIBS) -o test_speed
+ $(CC) test_speed.o $(OBJS) -o test_speed
test-openssl: test-openssl.o $(OBJS)
$(CC) test-openssl.o $(OBJS) $(TESTSSLLIBS) -o test-openssl
diff --git a/bip32.c b/bip32.c
index a4d22e72b4..19d45402d5 100644
--- a/bip32.c
+++ b/bip32.c
@@ -37,7 +37,7 @@
#include "secp256k1.h"
#include "nist256p1.h"
#include "ed25519.h"
-#include "curve25519-donna.h"
+#include "curve25519.h"
#if USE_ETHEREUM
#include "sha3.h"
#endif
@@ -400,7 +400,7 @@ void hdnode_fill_public_key(HDNode *node)
ed25519_publickey(node->private_key, node->public_key + 1);
} else if (node->curve == &curve25519_info) {
node->public_key[0] = 1;
- curve25519_publickey(node->public_key + 1, node->private_key);
+ curve25519_donna_basepoint(node->public_key + 1, node->private_key);
} else {
ecdsa_get_public_key33(node->curve->params, node->private_key, node->public_key);
}
@@ -466,7 +466,7 @@ int hdnode_get_shared_key(const HDNode *node, const uint8_t *peer_public_key, ui
if (peer_public_key[0] != 0x40) {
return 1; // Curve25519 public key should start with 0x40 byte.
}
- curve25519_scalarmult(session_key + 1, node->private_key, peer_public_key + 1);
+ curve25519_donna(session_key + 1, node->private_key, peer_public_key + 1);
*result_size = 33;
return 0;
} else {
diff --git a/curve25519-donna/README.md b/curve25519-donna/README.md
new file mode 100644
index 0000000000..4d7dfb7af9
--- /dev/null
+++ b/curve25519-donna/README.md
@@ -0,0 +1,107 @@
+[curve25519](http://cr.yp.to/ecdh.html) is an elliptic curve, developed by
+[Dan Bernstein](http://cr.yp.to/djb.html), for fast
+[Diffie-Hellman](http://en.wikipedia.org/wiki/Diffie-Hellman) key agreement.
+DJB's [original implementation](http://cr.yp.to/ecdh.html) was written in a
+language of his own devising called [qhasm](http://cr.yp.to/qhasm.html).
+The original qhasm source isn't available, only the x86 32-bit assembly output.
+
+This project provides performant, portable 32-bit & 64-bit implementations.
+All implementations are of course constant time in regard to secret data.
+
+#### Performance
+
+Compilers versions are gcc 4.6.3, icc 13.1.1, clang 3.4-1~exp1.
+
+Counts are in thousands of cycles.
+
+Note that SSE2 performance may be less impressive on AMD & older CPUs with slower SSE ops!
+
+##### E5200 @ 2.5ghz, march=core2
+
+
+Version | gcc | icc | clang |
+
+64-bit SSE2 | 278k | 265k | 302k |
+64-bit | 273k | 271k | 377k |
+32-bit SSE2 | 304k | 289k | 317k |
+32-bit | 1417k | 845k | 981k |
+
+
+
+##### E3-1270 @ 3.4ghz, march=corei7-avx
+
+
+Version | gcc | icc | clang |
+
+64-bit | 201k | 192k | 233k |
+64-bit SSE2 | 201k | 201k | 261k |
+32-bit SSE2 | 238k | 225k | 250k |
+32-bit | 1293k | 822k | 848k |
+
+
+
+#### Compilation
+
+No configuration is needed.
+
+##### 32-bit
+
+ gcc curve25519.c -m32 -O3 -c
+
+##### 64-bit
+
+ gcc curve25519.c -m64 -O3 -c
+
+##### SSE2
+
+ gcc curve25519.c -m32 -O3 -c -DCURVE25519_SSE2 -msse2
+ gcc curve25519.c -m64 -O3 -c -DCURVE25519_SSE2
+
+clang, icc, and msvc are also supported
+
+##### Named Versions
+
+Define CURVE25519_SUFFIX to append a suffix to public functions, e.g.
+`-DCURVE25519_SUFFIX=_sse2` to create curve25519_donna_sse2 and
+curve25519_donna_basepoint_sse2.
+
+#### Usage
+
+To use the code, link against `curve25519.o` and:
+
+ #include "curve25519.h"
+
+To generate a private/secret key, generate 32 cryptographically random bytes:
+
+ curve25519_key sk;
+ randombytes(sk, sizeof(curve25519_key));
+
+Manual clamping is not needed, and it is actually not possible to use unclamped
+keys due to the code taking advantage of the clamped bits internally.
+
+To generate the public key from the private/secret key:
+
+ curve25519_key pk;
+ curve25519_donna_basepoint(pk, sk);
+
+To generate a shared key with your private/secret key and someone elses public key:
+
+ curve25519_key shared;
+ curve25519_donna(shared, mysk, yourpk);
+
+And hash `shared` with a cryptographic hash before using, or e.g. pass `shared` through
+HSalsa20/HChacha as NaCl does.
+
+#### Testing
+
+Fuzzing against a reference implemenation is now available. See [fuzz/README](fuzz/README.md).
+
+Building `curve25519.c` and linking with `test.c` will run basic sanity tests and benchmark curve25519_donna.
+
+#### Papers
+
+[djb's curve25519 paper](http://cr.yp.to/ecdh/curve25519-20060209.pdf)
+
+#### License
+
+Public Domain, or MIT
\ No newline at end of file
diff --git a/curve25519-donna/curve25519-donna-32bit.h b/curve25519-donna/curve25519-donna-32bit.h
new file mode 100644
index 0000000000..5ef91a2022
--- /dev/null
+++ b/curve25519-donna/curve25519-donna-32bit.h
@@ -0,0 +1,466 @@
+typedef uint32_t bignum25519[10];
+
+static const uint32_t reduce_mask_26 = (1 << 26) - 1;
+static const uint32_t reduce_mask_25 = (1 << 25) - 1;
+
+/* out = in */
+DONNA_INLINE static void
+curve25519_copy(bignum25519 out, const bignum25519 in) {
+ out[0] = in[0];
+ out[1] = in[1];
+ out[2] = in[2];
+ out[3] = in[3];
+ out[4] = in[4];
+ out[5] = in[5];
+ out[6] = in[6];
+ out[7] = in[7];
+ out[8] = in[8];
+ out[9] = in[9];
+}
+
+/* out = a + b */
+DONNA_INLINE static void
+curve25519_add(bignum25519 out, const bignum25519 a, const bignum25519 b) {
+ out[0] = a[0] + b[0];
+ out[1] = a[1] + b[1];
+ out[2] = a[2] + b[2];
+ out[3] = a[3] + b[3];
+ out[4] = a[4] + b[4];
+ out[5] = a[5] + b[5];
+ out[6] = a[6] + b[6];
+ out[7] = a[7] + b[7];
+ out[8] = a[8] + b[8];
+ out[9] = a[9] + b[9];
+}
+
+/* out = a - b */
+DONNA_INLINE static void
+curve25519_sub(bignum25519 out, const bignum25519 a, const bignum25519 b) {
+ uint32_t c;
+ out[0] = 0x7ffffda + a[0] - b[0] ; c = (out[0] >> 26); out[0] &= reduce_mask_26;
+ out[1] = 0x3fffffe + a[1] - b[1] + c; c = (out[1] >> 25); out[1] &= reduce_mask_25;
+ out[2] = 0x7fffffe + a[2] - b[2] + c; c = (out[2] >> 26); out[2] &= reduce_mask_26;
+ out[3] = 0x3fffffe + a[3] - b[3] + c; c = (out[3] >> 25); out[3] &= reduce_mask_25;
+ out[4] = 0x7fffffe + a[4] - b[4] + c; c = (out[4] >> 26); out[4] &= reduce_mask_26;
+ out[5] = 0x3fffffe + a[5] - b[5] + c; c = (out[5] >> 25); out[5] &= reduce_mask_25;
+ out[6] = 0x7fffffe + a[6] - b[6] + c; c = (out[6] >> 26); out[6] &= reduce_mask_26;
+ out[7] = 0x3fffffe + a[7] - b[7] + c; c = (out[7] >> 25); out[7] &= reduce_mask_25;
+ out[8] = 0x7fffffe + a[8] - b[8] + c; c = (out[8] >> 26); out[8] &= reduce_mask_26;
+ out[9] = 0x3fffffe + a[9] - b[9] + c; c = (out[9] >> 25); out[9] &= reduce_mask_25;
+ out[0] += 19 * c;
+}
+
+/* out = in * scalar */
+DONNA_INLINE static void
+curve25519_scalar_product(bignum25519 out, const bignum25519 in, const uint32_t scalar) {
+ uint64_t a;
+ uint32_t c;
+ a = mul32x32_64(in[0], scalar); out[0] = (uint32_t)a & reduce_mask_26; c = (uint32_t)(a >> 26);
+ a = mul32x32_64(in[1], scalar) + c; out[1] = (uint32_t)a & reduce_mask_25; c = (uint32_t)(a >> 25);
+ a = mul32x32_64(in[2], scalar) + c; out[2] = (uint32_t)a & reduce_mask_26; c = (uint32_t)(a >> 26);
+ a = mul32x32_64(in[3], scalar) + c; out[3] = (uint32_t)a & reduce_mask_25; c = (uint32_t)(a >> 25);
+ a = mul32x32_64(in[4], scalar) + c; out[4] = (uint32_t)a & reduce_mask_26; c = (uint32_t)(a >> 26);
+ a = mul32x32_64(in[5], scalar) + c; out[5] = (uint32_t)a & reduce_mask_25; c = (uint32_t)(a >> 25);
+ a = mul32x32_64(in[6], scalar) + c; out[6] = (uint32_t)a & reduce_mask_26; c = (uint32_t)(a >> 26);
+ a = mul32x32_64(in[7], scalar) + c; out[7] = (uint32_t)a & reduce_mask_25; c = (uint32_t)(a >> 25);
+ a = mul32x32_64(in[8], scalar) + c; out[8] = (uint32_t)a & reduce_mask_26; c = (uint32_t)(a >> 26);
+ a = mul32x32_64(in[9], scalar) + c; out[9] = (uint32_t)a & reduce_mask_25; c = (uint32_t)(a >> 25);
+ out[0] += c * 19;
+}
+
+/* out = a * b */
+DONNA_INLINE static void
+curve25519_mul(bignum25519 out, const bignum25519 a, const bignum25519 b) {
+ uint32_t r0,r1,r2,r3,r4,r5,r6,r7,r8,r9;
+ uint32_t s0,s1,s2,s3,s4,s5,s6,s7,s8,s9;
+ uint64_t m0,m1,m2,m3,m4,m5,m6,m7,m8,m9,c;
+ uint32_t p;
+
+ r0 = b[0];
+ r1 = b[1];
+ r2 = b[2];
+ r3 = b[3];
+ r4 = b[4];
+ r5 = b[5];
+ r6 = b[6];
+ r7 = b[7];
+ r8 = b[8];
+ r9 = b[9];
+
+ s0 = a[0];
+ s1 = a[1];
+ s2 = a[2];
+ s3 = a[3];
+ s4 = a[4];
+ s5 = a[5];
+ s6 = a[6];
+ s7 = a[7];
+ s8 = a[8];
+ s9 = a[9];
+
+ m1 = mul32x32_64(r0, s1) + mul32x32_64(r1, s0);
+ m3 = mul32x32_64(r0, s3) + mul32x32_64(r1, s2) + mul32x32_64(r2, s1) + mul32x32_64(r3, s0);
+ m5 = mul32x32_64(r0, s5) + mul32x32_64(r1, s4) + mul32x32_64(r2, s3) + mul32x32_64(r3, s2) + mul32x32_64(r4, s1) + mul32x32_64(r5, s0);
+ m7 = mul32x32_64(r0, s7) + mul32x32_64(r1, s6) + mul32x32_64(r2, s5) + mul32x32_64(r3, s4) + mul32x32_64(r4, s3) + mul32x32_64(r5, s2) + mul32x32_64(r6, s1) + mul32x32_64(r7, s0);
+ m9 = mul32x32_64(r0, s9) + mul32x32_64(r1, s8) + mul32x32_64(r2, s7) + mul32x32_64(r3, s6) + mul32x32_64(r4, s5) + mul32x32_64(r5, s4) + mul32x32_64(r6, s3) + mul32x32_64(r7, s2) + mul32x32_64(r8, s1) + mul32x32_64(r9, s0);
+
+ r1 *= 2;
+ r3 *= 2;
+ r5 *= 2;
+ r7 *= 2;
+
+ m0 = mul32x32_64(r0, s0);
+ m2 = mul32x32_64(r0, s2) + mul32x32_64(r1, s1) + mul32x32_64(r2, s0);
+ m4 = mul32x32_64(r0, s4) + mul32x32_64(r1, s3) + mul32x32_64(r2, s2) + mul32x32_64(r3, s1) + mul32x32_64(r4, s0);
+ m6 = mul32x32_64(r0, s6) + mul32x32_64(r1, s5) + mul32x32_64(r2, s4) + mul32x32_64(r3, s3) + mul32x32_64(r4, s2) + mul32x32_64(r5, s1) + mul32x32_64(r6, s0);
+ m8 = mul32x32_64(r0, s8) + mul32x32_64(r1, s7) + mul32x32_64(r2, s6) + mul32x32_64(r3, s5) + mul32x32_64(r4, s4) + mul32x32_64(r5, s3) + mul32x32_64(r6, s2) + mul32x32_64(r7, s1) + mul32x32_64(r8, s0);
+
+ r1 *= 19;
+ r2 *= 19;
+ r3 = (r3 / 2) * 19;
+ r4 *= 19;
+ r5 = (r5 / 2) * 19;
+ r6 *= 19;
+ r7 = (r7 / 2) * 19;
+ r8 *= 19;
+ r9 *= 19;
+
+ m1 += (mul32x32_64(r9, s2) + mul32x32_64(r8, s3) + mul32x32_64(r7, s4) + mul32x32_64(r6, s5) + mul32x32_64(r5, s6) + mul32x32_64(r4, s7) + mul32x32_64(r3, s8) + mul32x32_64(r2, s9));
+ m3 += (mul32x32_64(r9, s4) + mul32x32_64(r8, s5) + mul32x32_64(r7, s6) + mul32x32_64(r6, s7) + mul32x32_64(r5, s8) + mul32x32_64(r4, s9));
+ m5 += (mul32x32_64(r9, s6) + mul32x32_64(r8, s7) + mul32x32_64(r7, s8) + mul32x32_64(r6, s9));
+ m7 += (mul32x32_64(r9, s8) + mul32x32_64(r8, s9));
+
+ r3 *= 2;
+ r5 *= 2;
+ r7 *= 2;
+ r9 *= 2;
+
+ m0 += (mul32x32_64(r9, s1) + mul32x32_64(r8, s2) + mul32x32_64(r7, s3) + mul32x32_64(r6, s4) + mul32x32_64(r5, s5) + mul32x32_64(r4, s6) + mul32x32_64(r3, s7) + mul32x32_64(r2, s8) + mul32x32_64(r1, s9));
+ m2 += (mul32x32_64(r9, s3) + mul32x32_64(r8, s4) + mul32x32_64(r7, s5) + mul32x32_64(r6, s6) + mul32x32_64(r5, s7) + mul32x32_64(r4, s8) + mul32x32_64(r3, s9));
+ m4 += (mul32x32_64(r9, s5) + mul32x32_64(r8, s6) + mul32x32_64(r7, s7) + mul32x32_64(r6, s8) + mul32x32_64(r5, s9));
+ m6 += (mul32x32_64(r9, s7) + mul32x32_64(r8, s8) + mul32x32_64(r7, s9));
+ m8 += (mul32x32_64(r9, s9));
+
+ r0 = (uint32_t)m0 & reduce_mask_26; c = (m0 >> 26);
+ m1 += c; r1 = (uint32_t)m1 & reduce_mask_25; c = (m1 >> 25);
+ m2 += c; r2 = (uint32_t)m2 & reduce_mask_26; c = (m2 >> 26);
+ m3 += c; r3 = (uint32_t)m3 & reduce_mask_25; c = (m3 >> 25);
+ m4 += c; r4 = (uint32_t)m4 & reduce_mask_26; c = (m4 >> 26);
+ m5 += c; r5 = (uint32_t)m5 & reduce_mask_25; c = (m5 >> 25);
+ m6 += c; r6 = (uint32_t)m6 & reduce_mask_26; c = (m6 >> 26);
+ m7 += c; r7 = (uint32_t)m7 & reduce_mask_25; c = (m7 >> 25);
+ m8 += c; r8 = (uint32_t)m8 & reduce_mask_26; c = (m8 >> 26);
+ m9 += c; r9 = (uint32_t)m9 & reduce_mask_25; p = (uint32_t)(m9 >> 25);
+ m0 = r0 + mul32x32_64(p,19); r0 = (uint32_t)m0 & reduce_mask_26; p = (uint32_t)(m0 >> 26);
+ r1 += p;
+
+ out[0] = r0;
+ out[1] = r1;
+ out[2] = r2;
+ out[3] = r3;
+ out[4] = r4;
+ out[5] = r5;
+ out[6] = r6;
+ out[7] = r7;
+ out[8] = r8;
+ out[9] = r9;
+}
+
+/* out = in * in */
+DONNA_INLINE static void
+curve25519_square(bignum25519 out, const bignum25519 in) {
+ uint32_t r0,r1,r2,r3,r4,r5,r6,r7,r8,r9;
+ uint32_t d6,d7,d8,d9;
+ uint64_t m0,m1,m2,m3,m4,m5,m6,m7,m8,m9,c;
+ uint32_t p;
+
+ r0 = in[0];
+ r1 = in[1];
+ r2 = in[2];
+ r3 = in[3];
+ r4 = in[4];
+ r5 = in[5];
+ r6 = in[6];
+ r7 = in[7];
+ r8 = in[8];
+ r9 = in[9];
+
+
+ m0 = mul32x32_64(r0, r0);
+ r0 *= 2;
+ m1 = mul32x32_64(r0, r1);
+ m2 = mul32x32_64(r0, r2) + mul32x32_64(r1, r1 * 2);
+ r1 *= 2;
+ m3 = mul32x32_64(r0, r3) + mul32x32_64(r1, r2 );
+ m4 = mul32x32_64(r0, r4) + mul32x32_64(r1, r3 * 2) + mul32x32_64(r2, r2);
+ r2 *= 2;
+ m5 = mul32x32_64(r0, r5) + mul32x32_64(r1, r4 ) + mul32x32_64(r2, r3);
+ m6 = mul32x32_64(r0, r6) + mul32x32_64(r1, r5 * 2) + mul32x32_64(r2, r4) + mul32x32_64(r3, r3 * 2);
+ r3 *= 2;
+ m7 = mul32x32_64(r0, r7) + mul32x32_64(r1, r6 ) + mul32x32_64(r2, r5) + mul32x32_64(r3, r4 );
+ m8 = mul32x32_64(r0, r8) + mul32x32_64(r1, r7 * 2) + mul32x32_64(r2, r6) + mul32x32_64(r3, r5 * 2) + mul32x32_64(r4, r4 );
+ m9 = mul32x32_64(r0, r9) + mul32x32_64(r1, r8 ) + mul32x32_64(r2, r7) + mul32x32_64(r3, r6 ) + mul32x32_64(r4, r5 * 2);
+
+ d6 = r6 * 19;
+ d7 = r7 * 2 * 19;
+ d8 = r8 * 19;
+ d9 = r9 * 2 * 19;
+
+ m0 += (mul32x32_64(d9, r1 ) + mul32x32_64(d8, r2 ) + mul32x32_64(d7, r3 ) + mul32x32_64(d6, r4 * 2) + mul32x32_64(r5, r5 * 2 * 19));
+ m1 += (mul32x32_64(d9, r2 / 2) + mul32x32_64(d8, r3 ) + mul32x32_64(d7, r4 ) + mul32x32_64(d6, r5 * 2));
+ m2 += (mul32x32_64(d9, r3 ) + mul32x32_64(d8, r4 * 2) + mul32x32_64(d7, r5 * 2) + mul32x32_64(d6, r6 ));
+ m3 += (mul32x32_64(d9, r4 ) + mul32x32_64(d8, r5 * 2) + mul32x32_64(d7, r6 ));
+ m4 += (mul32x32_64(d9, r5 * 2) + mul32x32_64(d8, r6 * 2) + mul32x32_64(d7, r7 ));
+ m5 += (mul32x32_64(d9, r6 ) + mul32x32_64(d8, r7 * 2));
+ m6 += (mul32x32_64(d9, r7 * 2) + mul32x32_64(d8, r8 ));
+ m7 += (mul32x32_64(d9, r8 ));
+ m8 += (mul32x32_64(d9, r9 ));
+
+ r0 = (uint32_t)m0 & reduce_mask_26; c = (m0 >> 26);
+ m1 += c; r1 = (uint32_t)m1 & reduce_mask_25; c = (m1 >> 25);
+ m2 += c; r2 = (uint32_t)m2 & reduce_mask_26; c = (m2 >> 26);
+ m3 += c; r3 = (uint32_t)m3 & reduce_mask_25; c = (m3 >> 25);
+ m4 += c; r4 = (uint32_t)m4 & reduce_mask_26; c = (m4 >> 26);
+ m5 += c; r5 = (uint32_t)m5 & reduce_mask_25; c = (m5 >> 25);
+ m6 += c; r6 = (uint32_t)m6 & reduce_mask_26; c = (m6 >> 26);
+ m7 += c; r7 = (uint32_t)m7 & reduce_mask_25; c = (m7 >> 25);
+ m8 += c; r8 = (uint32_t)m8 & reduce_mask_26; c = (m8 >> 26);
+ m9 += c; r9 = (uint32_t)m9 & reduce_mask_25; p = (uint32_t)(m9 >> 25);
+ m0 = r0 + mul32x32_64(p,19); r0 = (uint32_t)m0 & reduce_mask_26; p = (uint32_t)(m0 >> 26);
+ r1 += p;
+
+ out[0] = r0;
+ out[1] = r1;
+ out[2] = r2;
+ out[3] = r3;
+ out[4] = r4;
+ out[5] = r5;
+ out[6] = r6;
+ out[7] = r7;
+ out[8] = r8;
+ out[9] = r9;
+}
+
+/* out = in^(2 * count) */
+static void
+curve25519_square_times(bignum25519 out, const bignum25519 in, int count) {
+ uint32_t r0,r1,r2,r3,r4,r5,r6,r7,r8,r9;
+ uint32_t d6,d7,d8,d9;
+ uint64_t m0,m1,m2,m3,m4,m5,m6,m7,m8,m9,c;
+ uint32_t p;
+
+ r0 = in[0];
+ r1 = in[1];
+ r2 = in[2];
+ r3 = in[3];
+ r4 = in[4];
+ r5 = in[5];
+ r6 = in[6];
+ r7 = in[7];
+ r8 = in[8];
+ r9 = in[9];
+
+ do {
+ m0 = mul32x32_64(r0, r0);
+ r0 *= 2;
+ m1 = mul32x32_64(r0, r1);
+ m2 = mul32x32_64(r0, r2) + mul32x32_64(r1, r1 * 2);
+ r1 *= 2;
+ m3 = mul32x32_64(r0, r3) + mul32x32_64(r1, r2 );
+ m4 = mul32x32_64(r0, r4) + mul32x32_64(r1, r3 * 2) + mul32x32_64(r2, r2);
+ r2 *= 2;
+ m5 = mul32x32_64(r0, r5) + mul32x32_64(r1, r4 ) + mul32x32_64(r2, r3);
+ m6 = mul32x32_64(r0, r6) + mul32x32_64(r1, r5 * 2) + mul32x32_64(r2, r4) + mul32x32_64(r3, r3 * 2);
+ r3 *= 2;
+ m7 = mul32x32_64(r0, r7) + mul32x32_64(r1, r6 ) + mul32x32_64(r2, r5) + mul32x32_64(r3, r4 );
+ m8 = mul32x32_64(r0, r8) + mul32x32_64(r1, r7 * 2) + mul32x32_64(r2, r6) + mul32x32_64(r3, r5 * 2) + mul32x32_64(r4, r4 );
+ m9 = mul32x32_64(r0, r9) + mul32x32_64(r1, r8 ) + mul32x32_64(r2, r7) + mul32x32_64(r3, r6 ) + mul32x32_64(r4, r5 * 2);
+
+ d6 = r6 * 19;
+ d7 = r7 * 2 * 19;
+ d8 = r8 * 19;
+ d9 = r9 * 2 * 19;
+
+ m0 += (mul32x32_64(d9, r1 ) + mul32x32_64(d8, r2 ) + mul32x32_64(d7, r3 ) + mul32x32_64(d6, r4 * 2) + mul32x32_64(r5, r5 * 2 * 19));
+ m1 += (mul32x32_64(d9, r2 / 2) + mul32x32_64(d8, r3 ) + mul32x32_64(d7, r4 ) + mul32x32_64(d6, r5 * 2));
+ m2 += (mul32x32_64(d9, r3 ) + mul32x32_64(d8, r4 * 2) + mul32x32_64(d7, r5 * 2) + mul32x32_64(d6, r6 ));
+ m3 += (mul32x32_64(d9, r4 ) + mul32x32_64(d8, r5 * 2) + mul32x32_64(d7, r6 ));
+ m4 += (mul32x32_64(d9, r5 * 2) + mul32x32_64(d8, r6 * 2) + mul32x32_64(d7, r7 ));
+ m5 += (mul32x32_64(d9, r6 ) + mul32x32_64(d8, r7 * 2));
+ m6 += (mul32x32_64(d9, r7 * 2) + mul32x32_64(d8, r8 ));
+ m7 += (mul32x32_64(d9, r8 ));
+ m8 += (mul32x32_64(d9, r9 ));
+
+ r0 = (uint32_t)m0 & reduce_mask_26; c = (m0 >> 26);
+ m1 += c; r1 = (uint32_t)m1 & reduce_mask_25; c = (m1 >> 25);
+ m2 += c; r2 = (uint32_t)m2 & reduce_mask_26; c = (m2 >> 26);
+ m3 += c; r3 = (uint32_t)m3 & reduce_mask_25; c = (m3 >> 25);
+ m4 += c; r4 = (uint32_t)m4 & reduce_mask_26; c = (m4 >> 26);
+ m5 += c; r5 = (uint32_t)m5 & reduce_mask_25; c = (m5 >> 25);
+ m6 += c; r6 = (uint32_t)m6 & reduce_mask_26; c = (m6 >> 26);
+ m7 += c; r7 = (uint32_t)m7 & reduce_mask_25; c = (m7 >> 25);
+ m8 += c; r8 = (uint32_t)m8 & reduce_mask_26; c = (m8 >> 26);
+ m9 += c; r9 = (uint32_t)m9 & reduce_mask_25; p = (uint32_t)(m9 >> 25);
+ m0 = r0 + mul32x32_64(p,19); r0 = (uint32_t)m0 & reduce_mask_26; p = (uint32_t)(m0 >> 26);
+ r1 += p;
+ } while (--count);
+
+ out[0] = r0;
+ out[1] = r1;
+ out[2] = r2;
+ out[3] = r3;
+ out[4] = r4;
+ out[5] = r5;
+ out[6] = r6;
+ out[7] = r7;
+ out[8] = r8;
+ out[9] = r9;
+}
+
+
+/* Take a little-endian, 32-byte number and expand it into polynomial form */
+static void
+curve25519_expand(bignum25519 out, const unsigned char in[32]) {
+ static const union { uint8_t b[2]; uint16_t s; } endian_check = {{1,0}};
+ uint32_t x0,x1,x2,x3,x4,x5,x6,x7;
+
+ if (endian_check.s == 1) {
+ x0 = *(uint32_t *)(in + 0);
+ x1 = *(uint32_t *)(in + 4);
+ x2 = *(uint32_t *)(in + 8);
+ x3 = *(uint32_t *)(in + 12);
+ x4 = *(uint32_t *)(in + 16);
+ x5 = *(uint32_t *)(in + 20);
+ x6 = *(uint32_t *)(in + 24);
+ x7 = *(uint32_t *)(in + 28);
+ } else {
+ #define F(s) \
+ ((((uint32_t)in[s + 0]) ) | \
+ (((uint32_t)in[s + 1]) << 8) | \
+ (((uint32_t)in[s + 2]) << 16) | \
+ (((uint32_t)in[s + 3]) << 24))
+ x0 = F(0);
+ x1 = F(4);
+ x2 = F(8);
+ x3 = F(12);
+ x4 = F(16);
+ x5 = F(20);
+ x6 = F(24);
+ x7 = F(28);
+ #undef F
+ }
+
+ out[0] = ( x0 ) & reduce_mask_26;
+ out[1] = ((((uint64_t)x1 << 32) | x0) >> 26) & reduce_mask_25;
+ out[2] = ((((uint64_t)x2 << 32) | x1) >> 19) & reduce_mask_26;
+ out[3] = ((((uint64_t)x3 << 32) | x2) >> 13) & reduce_mask_25;
+ out[4] = (( x3) >> 6) & reduce_mask_26;
+ out[5] = ( x4 ) & reduce_mask_25;
+ out[6] = ((((uint64_t)x5 << 32) | x4) >> 25) & reduce_mask_26;
+ out[7] = ((((uint64_t)x6 << 32) | x5) >> 19) & reduce_mask_25;
+ out[8] = ((((uint64_t)x7 << 32) | x6) >> 12) & reduce_mask_26;
+ out[9] = (( x7) >> 6) & reduce_mask_25; /* ignore the top bit */
+}
+
+/* Take a fully reduced polynomial form number and contract it into a little-endian, 32-byte array */
+static void
+curve25519_contract(unsigned char out[32], const bignum25519 in) {
+ bignum25519 f;
+ curve25519_copy(f, in);
+
+ #define carry_pass() \
+ f[1] += f[0] >> 26; f[0] &= reduce_mask_26; \
+ f[2] += f[1] >> 25; f[1] &= reduce_mask_25; \
+ f[3] += f[2] >> 26; f[2] &= reduce_mask_26; \
+ f[4] += f[3] >> 25; f[3] &= reduce_mask_25; \
+ f[5] += f[4] >> 26; f[4] &= reduce_mask_26; \
+ f[6] += f[5] >> 25; f[5] &= reduce_mask_25; \
+ f[7] += f[6] >> 26; f[6] &= reduce_mask_26; \
+ f[8] += f[7] >> 25; f[7] &= reduce_mask_25; \
+ f[9] += f[8] >> 26; f[8] &= reduce_mask_26;
+
+ #define carry_pass_full() \
+ carry_pass() \
+ f[0] += 19 * (f[9] >> 25); f[9] &= reduce_mask_25;
+
+ #define carry_pass_final() \
+ carry_pass() \
+ f[9] &= reduce_mask_25;
+
+ carry_pass_full()
+ carry_pass_full()
+
+ /* now t is between 0 and 2^255-1, properly carried. */
+ /* case 1: between 0 and 2^255-20. case 2: between 2^255-19 and 2^255-1. */
+ f[0] += 19;
+ carry_pass_full()
+
+ /* now between 19 and 2^255-1 in both cases, and offset by 19. */
+ f[0] += (1 << 26) - 19;
+ f[1] += (1 << 25) - 1;
+ f[2] += (1 << 26) - 1;
+ f[3] += (1 << 25) - 1;
+ f[4] += (1 << 26) - 1;
+ f[5] += (1 << 25) - 1;
+ f[6] += (1 << 26) - 1;
+ f[7] += (1 << 25) - 1;
+ f[8] += (1 << 26) - 1;
+ f[9] += (1 << 25) - 1;
+
+ /* now between 2^255 and 2^256-20, and offset by 2^255. */
+ carry_pass_final()
+
+ #undef carry_pass
+ #undef carry_full
+ #undef carry_final
+
+ f[1] <<= 2;
+ f[2] <<= 3;
+ f[3] <<= 5;
+ f[4] <<= 6;
+ f[6] <<= 1;
+ f[7] <<= 3;
+ f[8] <<= 4;
+ f[9] <<= 6;
+
+ #define F(i, s) \
+ out[s+0] |= (unsigned char )(f[i] & 0xff); \
+ out[s+1] = (unsigned char )((f[i] >> 8) & 0xff); \
+ out[s+2] = (unsigned char )((f[i] >> 16) & 0xff); \
+ out[s+3] = (unsigned char )((f[i] >> 24) & 0xff);
+
+ out[0] = 0;
+ out[16] = 0;
+ F(0,0);
+ F(1,3);
+ F(2,6);
+ F(3,9);
+ F(4,12);
+ F(5,16);
+ F(6,19);
+ F(7,22);
+ F(8,25);
+ F(9,28);
+ #undef F
+}
+
+/*
+ * Swap the contents of [qx] and [qpx] iff @swap is non-zero
+ */
+DONNA_INLINE static void
+curve25519_swap_conditional(bignum25519 x, bignum25519 qpx, uint32_t iswap) {
+ const uint32_t swap = (uint32_t)(-(int32_t)iswap);
+ uint32_t x0,x1,x2,x3,x4,x5,x6,x7,x8,x9;
+
+ x0 = swap & (x[0] ^ qpx[0]); x[0] ^= x0; qpx[0] ^= x0;
+ x1 = swap & (x[1] ^ qpx[1]); x[1] ^= x1; qpx[1] ^= x1;
+ x2 = swap & (x[2] ^ qpx[2]); x[2] ^= x2; qpx[2] ^= x2;
+ x3 = swap & (x[3] ^ qpx[3]); x[3] ^= x3; qpx[3] ^= x3;
+ x4 = swap & (x[4] ^ qpx[4]); x[4] ^= x4; qpx[4] ^= x4;
+ x5 = swap & (x[5] ^ qpx[5]); x[5] ^= x5; qpx[5] ^= x5;
+ x6 = swap & (x[6] ^ qpx[6]); x[6] ^= x6; qpx[6] ^= x6;
+ x7 = swap & (x[7] ^ qpx[7]); x[7] ^= x7; qpx[7] ^= x7;
+ x8 = swap & (x[8] ^ qpx[8]); x[8] ^= x8; qpx[8] ^= x8;
+ x9 = swap & (x[9] ^ qpx[9]); x[9] ^= x9; qpx[9] ^= x9;
+}
+
diff --git a/curve25519-donna/curve25519-donna-64bit.h b/curve25519-donna/curve25519-donna-64bit.h
new file mode 100644
index 0000000000..ec4df526b4
--- /dev/null
+++ b/curve25519-donna/curve25519-donna-64bit.h
@@ -0,0 +1,345 @@
+typedef uint64_t bignum25519[5];
+
+static const uint64_t reduce_mask_51 = ((uint64_t)1 << 51) - 1;
+static const uint64_t reduce_mask_52 = ((uint64_t)1 << 52) - 1;
+
+/* out = in */
+DONNA_INLINE static void
+curve25519_copy(bignum25519 out, const bignum25519 in) {
+ out[0] = in[0];
+ out[1] = in[1];
+ out[2] = in[2];
+ out[3] = in[3];
+ out[4] = in[4];
+}
+
+/* out = a + b */
+DONNA_INLINE static void
+curve25519_add(bignum25519 out, const bignum25519 a, const bignum25519 b) {
+ out[0] = a[0] + b[0];
+ out[1] = a[1] + b[1];
+ out[2] = a[2] + b[2];
+ out[3] = a[3] + b[3];
+ out[4] = a[4] + b[4];
+}
+
+static const uint64_t two54m152 = (((uint64_t)1) << 54) - 152;
+static const uint64_t two54m8 = (((uint64_t)1) << 54) - 8;
+
+/* out = a - b */
+DONNA_INLINE static void
+curve25519_sub(bignum25519 out, const bignum25519 a, const bignum25519 b) {
+ out[0] = a[0] + two54m152 - b[0];
+ out[1] = a[1] + two54m8 - b[1];
+ out[2] = a[2] + two54m8 - b[2];
+ out[3] = a[3] + two54m8 - b[3];
+ out[4] = a[4] + two54m8 - b[4];
+}
+
+
+/* out = (in * scalar) */
+DONNA_INLINE static void
+curve25519_scalar_product(bignum25519 out, const bignum25519 in, const uint64_t scalar) {
+ uint128_t a;
+ uint64_t c;
+
+#if defined(HAVE_NATIVE_UINT128)
+ a = ((uint128_t) in[0]) * scalar; out[0] = (uint64_t)a & reduce_mask_51; c = (uint64_t)(a >> 51);
+ a = ((uint128_t) in[1]) * scalar + c; out[1] = (uint64_t)a & reduce_mask_51; c = (uint64_t)(a >> 51);
+ a = ((uint128_t) in[2]) * scalar + c; out[2] = (uint64_t)a & reduce_mask_51; c = (uint64_t)(a >> 51);
+ a = ((uint128_t) in[3]) * scalar + c; out[3] = (uint64_t)a & reduce_mask_51; c = (uint64_t)(a >> 51);
+ a = ((uint128_t) in[4]) * scalar + c; out[4] = (uint64_t)a & reduce_mask_51; c = (uint64_t)(a >> 51);
+ out[0] += c * 19;
+#else
+ mul64x64_128(a, in[0], scalar) out[0] = lo128(a) & reduce_mask_51; shr128(c, a, 51);
+ mul64x64_128(a, in[1], scalar) add128_64(a, c) out[1] = lo128(a) & reduce_mask_51; shr128(c, a, 51);
+ mul64x64_128(a, in[2], scalar) add128_64(a, c) out[2] = lo128(a) & reduce_mask_51; shr128(c, a, 51);
+ mul64x64_128(a, in[3], scalar) add128_64(a, c) out[3] = lo128(a) & reduce_mask_51; shr128(c, a, 51);
+ mul64x64_128(a, in[4], scalar) add128_64(a, c) out[4] = lo128(a) & reduce_mask_51; shr128(c, a, 51);
+ out[0] += c * 19;
+#endif
+}
+
+/* out = a * b */
+DONNA_INLINE static void
+curve25519_mul(bignum25519 out, const bignum25519 a, const bignum25519 b) {
+#if !defined(HAVE_NATIVE_UINT128)
+ uint128_t mul;
+#endif
+ uint128_t t[5];
+ uint64_t r0,r1,r2,r3,r4,s0,s1,s2,s3,s4,c;
+
+ r0 = b[0];
+ r1 = b[1];
+ r2 = b[2];
+ r3 = b[3];
+ r4 = b[4];
+
+ s0 = a[0];
+ s1 = a[1];
+ s2 = a[2];
+ s3 = a[3];
+ s4 = a[4];
+
+#if defined(HAVE_NATIVE_UINT128)
+ t[0] = ((uint128_t) r0) * s0;
+ t[1] = ((uint128_t) r0) * s1 + ((uint128_t) r1) * s0;
+ t[2] = ((uint128_t) r0) * s2 + ((uint128_t) r2) * s0 + ((uint128_t) r1) * s1;
+ t[3] = ((uint128_t) r0) * s3 + ((uint128_t) r3) * s0 + ((uint128_t) r1) * s2 + ((uint128_t) r2) * s1;
+ t[4] = ((uint128_t) r0) * s4 + ((uint128_t) r4) * s0 + ((uint128_t) r3) * s1 + ((uint128_t) r1) * s3 + ((uint128_t) r2) * s2;
+#else
+ mul64x64_128(t[0], r0, s0)
+ mul64x64_128(t[1], r0, s1) mul64x64_128(mul, r1, s0) add128(t[1], mul)
+ mul64x64_128(t[2], r0, s2) mul64x64_128(mul, r2, s0) add128(t[2], mul) mul64x64_128(mul, r1, s1) add128(t[2], mul)
+ mul64x64_128(t[3], r0, s3) mul64x64_128(mul, r3, s0) add128(t[3], mul) mul64x64_128(mul, r1, s2) add128(t[3], mul) mul64x64_128(mul, r2, s1) add128(t[3], mul)
+ mul64x64_128(t[4], r0, s4) mul64x64_128(mul, r4, s0) add128(t[4], mul) mul64x64_128(mul, r3, s1) add128(t[4], mul) mul64x64_128(mul, r1, s3) add128(t[4], mul) mul64x64_128(mul, r2, s2) add128(t[4], mul)
+#endif
+
+ r1 *= 19;
+ r2 *= 19;
+ r3 *= 19;
+ r4 *= 19;
+
+#if defined(HAVE_NATIVE_UINT128)
+ t[0] += ((uint128_t) r4) * s1 + ((uint128_t) r1) * s4 + ((uint128_t) r2) * s3 + ((uint128_t) r3) * s2;
+ t[1] += ((uint128_t) r4) * s2 + ((uint128_t) r2) * s4 + ((uint128_t) r3) * s3;
+ t[2] += ((uint128_t) r4) * s3 + ((uint128_t) r3) * s4;
+ t[3] += ((uint128_t) r4) * s4;
+#else
+ mul64x64_128(mul, r4, s1) add128(t[0], mul) mul64x64_128(mul, r1, s4) add128(t[0], mul) mul64x64_128(mul, r2, s3) add128(t[0], mul) mul64x64_128(mul, r3, s2) add128(t[0], mul)
+ mul64x64_128(mul, r4, s2) add128(t[1], mul) mul64x64_128(mul, r2, s4) add128(t[1], mul) mul64x64_128(mul, r3, s3) add128(t[1], mul)
+ mul64x64_128(mul, r4, s3) add128(t[2], mul) mul64x64_128(mul, r3, s4) add128(t[2], mul)
+ mul64x64_128(mul, r4, s4) add128(t[3], mul)
+#endif
+
+ r0 = lo128(t[0]) & reduce_mask_51; shr128(c, t[0], 51);
+ add128_64(t[1], c) r1 = lo128(t[1]) & reduce_mask_51; shr128(c, t[1], 51);
+ add128_64(t[2], c) r2 = lo128(t[2]) & reduce_mask_51; shr128(c, t[2], 51);
+ add128_64(t[3], c) r3 = lo128(t[3]) & reduce_mask_51; shr128(c, t[3], 51);
+ add128_64(t[4], c) r4 = lo128(t[4]) & reduce_mask_51; shr128(c, t[4], 51);
+ r0 += c * 19; c = r0 >> 51; r0 = r0 & reduce_mask_51;
+ r1 += c;
+
+ out[0] = r0;
+ out[1] = r1;
+ out[2] = r2;
+ out[3] = r3;
+ out[4] = r4;
+}
+
+/* out = in^(2 * count) */
+DONNA_INLINE static void
+curve25519_square_times(bignum25519 out, const bignum25519 in, uint64_t count) {
+#if !defined(HAVE_NATIVE_UINT128)
+ uint128_t mul;
+#endif
+ uint128_t t[5];
+ uint64_t r0,r1,r2,r3,r4,c;
+ uint64_t d0,d1,d2,d4,d419;
+
+ r0 = in[0];
+ r1 = in[1];
+ r2 = in[2];
+ r3 = in[3];
+ r4 = in[4];
+
+ do {
+ d0 = r0 * 2;
+ d1 = r1 * 2;
+ d2 = r2 * 2 * 19;
+ d419 = r4 * 19;
+ d4 = d419 * 2;
+
+#if defined(HAVE_NATIVE_UINT128)
+ t[0] = ((uint128_t) r0) * r0 + ((uint128_t) d4) * r1 + (((uint128_t) d2) * (r3 ));
+ t[1] = ((uint128_t) d0) * r1 + ((uint128_t) d4) * r2 + (((uint128_t) r3) * (r3 * 19));
+ t[2] = ((uint128_t) d0) * r2 + ((uint128_t) r1) * r1 + (((uint128_t) d4) * (r3 ));
+ t[3] = ((uint128_t) d0) * r3 + ((uint128_t) d1) * r2 + (((uint128_t) r4) * (d419 ));
+ t[4] = ((uint128_t) d0) * r4 + ((uint128_t) d1) * r3 + (((uint128_t) r2) * (r2 ));
+#else
+ mul64x64_128(t[0], r0, r0) mul64x64_128(mul, d4, r1) add128(t[0], mul) mul64x64_128(mul, d2, r3) add128(t[0], mul)
+ mul64x64_128(t[1], d0, r1) mul64x64_128(mul, d4, r2) add128(t[1], mul) mul64x64_128(mul, r3, r3 * 19) add128(t[1], mul)
+ mul64x64_128(t[2], d0, r2) mul64x64_128(mul, r1, r1) add128(t[2], mul) mul64x64_128(mul, d4, r3) add128(t[2], mul)
+ mul64x64_128(t[3], d0, r3) mul64x64_128(mul, d1, r2) add128(t[3], mul) mul64x64_128(mul, r4, d419) add128(t[3], mul)
+ mul64x64_128(t[4], d0, r4) mul64x64_128(mul, d1, r3) add128(t[4], mul) mul64x64_128(mul, r2, r2) add128(t[4], mul)
+#endif
+
+ r0 = lo128(t[0]) & reduce_mask_51; shr128(c, t[0], 51);
+ add128_64(t[1], c) r1 = lo128(t[1]) & reduce_mask_51; shr128(c, t[1], 51);
+ add128_64(t[2], c) r2 = lo128(t[2]) & reduce_mask_51; shr128(c, t[2], 51);
+ add128_64(t[3], c) r3 = lo128(t[3]) & reduce_mask_51; shr128(c, t[3], 51);
+ add128_64(t[4], c) r4 = lo128(t[4]) & reduce_mask_51; shr128(c, t[4], 51);
+ r0 += c * 19; c = r0 >> 51; r0 = r0 & reduce_mask_51;
+ r1 += c;
+ } while(--count);
+
+ out[0] = r0;
+ out[1] = r1;
+ out[2] = r2;
+ out[3] = r3;
+ out[4] = r4;
+}
+
+DONNA_INLINE static void
+curve25519_square(bignum25519 out, const bignum25519 in) {
+#if !defined(HAVE_NATIVE_UINT128)
+ uint128_t mul;
+#endif
+ uint128_t t[5];
+ uint64_t r0,r1,r2,r3,r4,c;
+ uint64_t d0,d1,d2,d4,d419;
+
+ r0 = in[0];
+ r1 = in[1];
+ r2 = in[2];
+ r3 = in[3];
+ r4 = in[4];
+
+ d0 = r0 * 2;
+ d1 = r1 * 2;
+ d2 = r2 * 2 * 19;
+ d419 = r4 * 19;
+ d4 = d419 * 2;
+
+#if defined(HAVE_NATIVE_UINT128)
+ t[0] = ((uint128_t) r0) * r0 + ((uint128_t) d4) * r1 + (((uint128_t) d2) * (r3 ));
+ t[1] = ((uint128_t) d0) * r1 + ((uint128_t) d4) * r2 + (((uint128_t) r3) * (r3 * 19));
+ t[2] = ((uint128_t) d0) * r2 + ((uint128_t) r1) * r1 + (((uint128_t) d4) * (r3 ));
+ t[3] = ((uint128_t) d0) * r3 + ((uint128_t) d1) * r2 + (((uint128_t) r4) * (d419 ));
+ t[4] = ((uint128_t) d0) * r4 + ((uint128_t) d1) * r3 + (((uint128_t) r2) * (r2 ));
+#else
+ mul64x64_128(t[0], r0, r0) mul64x64_128(mul, d4, r1) add128(t[0], mul) mul64x64_128(mul, d2, r3) add128(t[0], mul)
+ mul64x64_128(t[1], d0, r1) mul64x64_128(mul, d4, r2) add128(t[1], mul) mul64x64_128(mul, r3, r3 * 19) add128(t[1], mul)
+ mul64x64_128(t[2], d0, r2) mul64x64_128(mul, r1, r1) add128(t[2], mul) mul64x64_128(mul, d4, r3) add128(t[2], mul)
+ mul64x64_128(t[3], d0, r3) mul64x64_128(mul, d1, r2) add128(t[3], mul) mul64x64_128(mul, r4, d419) add128(t[3], mul)
+ mul64x64_128(t[4], d0, r4) mul64x64_128(mul, d1, r3) add128(t[4], mul) mul64x64_128(mul, r2, r2) add128(t[4], mul)
+#endif
+
+ r0 = lo128(t[0]) & reduce_mask_51; shr128(c, t[0], 51);
+ add128_64(t[1], c) r1 = lo128(t[1]) & reduce_mask_51; shr128(c, t[1], 51);
+ add128_64(t[2], c) r2 = lo128(t[2]) & reduce_mask_51; shr128(c, t[2], 51);
+ add128_64(t[3], c) r3 = lo128(t[3]) & reduce_mask_51; shr128(c, t[3], 51);
+ add128_64(t[4], c) r4 = lo128(t[4]) & reduce_mask_51; shr128(c, t[4], 51);
+ r0 += c * 19; c = r0 >> 51; r0 = r0 & reduce_mask_51;
+ r1 += c;
+
+ out[0] = r0;
+ out[1] = r1;
+ out[2] = r2;
+ out[3] = r3;
+ out[4] = r4;
+}
+
+
+/* Take a little-endian, 32-byte number and expand it into polynomial form */
+DONNA_INLINE static void
+curve25519_expand(bignum25519 out, const unsigned char *in) {
+ static const union { uint8_t b[2]; uint16_t s; } endian_check = {{1,0}};
+ uint64_t x0,x1,x2,x3;
+
+ if (endian_check.s == 1) {
+ x0 = *(uint64_t *)(in + 0);
+ x1 = *(uint64_t *)(in + 8);
+ x2 = *(uint64_t *)(in + 16);
+ x3 = *(uint64_t *)(in + 24);
+ } else {
+ #define F(s) \
+ ((((uint64_t)in[s + 0]) ) | \
+ (((uint64_t)in[s + 1]) << 8) | \
+ (((uint64_t)in[s + 2]) << 16) | \
+ (((uint64_t)in[s + 3]) << 24) | \
+ (((uint64_t)in[s + 4]) << 32) | \
+ (((uint64_t)in[s + 5]) << 40) | \
+ (((uint64_t)in[s + 6]) << 48) | \
+ (((uint64_t)in[s + 7]) << 56))
+
+ x0 = F(0);
+ x1 = F(8);
+ x2 = F(16);
+ x3 = F(24);
+ }
+
+ out[0] = x0 & reduce_mask_51; x0 = (x0 >> 51) | (x1 << 13);
+ out[1] = x0 & reduce_mask_51; x1 = (x1 >> 38) | (x2 << 26);
+ out[2] = x1 & reduce_mask_51; x2 = (x2 >> 25) | (x3 << 39);
+ out[3] = x2 & reduce_mask_51; x3 = (x3 >> 12);
+ out[4] = x3 & reduce_mask_51; /* ignore the top bit */
+}
+
+/* Take a fully reduced polynomial form number and contract it into a
+ * little-endian, 32-byte array
+ */
+DONNA_INLINE static void
+curve25519_contract(unsigned char *out, const bignum25519 input) {
+ uint64_t t[5];
+ uint64_t f, i;
+
+ t[0] = input[0];
+ t[1] = input[1];
+ t[2] = input[2];
+ t[3] = input[3];
+ t[4] = input[4];
+
+ #define curve25519_contract_carry() \
+ t[1] += t[0] >> 51; t[0] &= reduce_mask_51; \
+ t[2] += t[1] >> 51; t[1] &= reduce_mask_51; \
+ t[3] += t[2] >> 51; t[2] &= reduce_mask_51; \
+ t[4] += t[3] >> 51; t[3] &= reduce_mask_51;
+
+ #define curve25519_contract_carry_full() curve25519_contract_carry() \
+ t[0] += 19 * (t[4] >> 51); t[4] &= reduce_mask_51;
+
+ #define curve25519_contract_carry_final() curve25519_contract_carry() \
+ t[4] &= reduce_mask_51;
+
+ curve25519_contract_carry_full()
+ curve25519_contract_carry_full()
+
+ /* now t is between 0 and 2^255-1, properly carried. */
+ /* case 1: between 0 and 2^255-20. case 2: between 2^255-19 and 2^255-1. */
+ t[0] += 19;
+ curve25519_contract_carry_full()
+
+ /* now between 19 and 2^255-1 in both cases, and offset by 19. */
+ t[0] += 0x8000000000000 - 19;
+ t[1] += 0x8000000000000 - 1;
+ t[2] += 0x8000000000000 - 1;
+ t[3] += 0x8000000000000 - 1;
+ t[4] += 0x8000000000000 - 1;
+
+ /* now between 2^255 and 2^256-20, and offset by 2^255. */
+ curve25519_contract_carry_final()
+
+ #define write51full(n,shift) \
+ f = ((t[n] >> shift) | (t[n+1] << (51 - shift))); \
+ for (i = 0; i < 8; i++, f >>= 8) *out++ = (unsigned char)f;
+ #define write51(n) write51full(n,13*n)
+
+ write51(0)
+ write51(1)
+ write51(2)
+ write51(3)
+
+ #undef curve25519_contract_carry
+ #undef curve25519_contract_carry_full
+ #undef curve25519_contract_carry_final
+ #undef write51full
+ #undef write51
+}
+
+/*
+ * Swap the contents of [qx] and [qpx] iff @swap is non-zero
+ */
+DONNA_INLINE static void
+curve25519_swap_conditional(bignum25519 x, bignum25519 qpx, uint64_t iswap) {
+ const uint64_t swap = (uint64_t)(-(int64_t)iswap);
+ uint64_t x0,x1,x2,x3,x4;
+
+ x0 = swap & (x[0] ^ qpx[0]); x[0] ^= x0; qpx[0] ^= x0;
+ x1 = swap & (x[1] ^ qpx[1]); x[1] ^= x1; qpx[1] ^= x1;
+ x2 = swap & (x[2] ^ qpx[2]); x[2] ^= x2; qpx[2] ^= x2;
+ x3 = swap & (x[3] ^ qpx[3]); x[3] ^= x3; qpx[3] ^= x3;
+ x4 = swap & (x[4] ^ qpx[4]); x[4] ^= x4; qpx[4] ^= x4;
+
+}
+
diff --git a/curve25519-donna/curve25519-donna-common.h b/curve25519-donna/curve25519-donna-common.h
new file mode 100644
index 0000000000..6b3ed2ad65
--- /dev/null
+++ b/curve25519-donna/curve25519-donna-common.h
@@ -0,0 +1,43 @@
+/*
+ * In: b = 2^5 - 2^0
+ * Out: b = 2^250 - 2^0
+ */
+static void
+curve25519_pow_two5mtwo0_two250mtwo0(bignum25519 b) {
+ bignum25519 ALIGN(16) t0,c;
+
+ /* 2^5 - 2^0 */ /* b */
+ /* 2^10 - 2^5 */ curve25519_square_times(t0, b, 5);
+ /* 2^10 - 2^0 */ curve25519_mul(b, t0, b);
+ /* 2^20 - 2^10 */ curve25519_square_times(t0, b, 10);
+ /* 2^20 - 2^0 */ curve25519_mul(c, t0, b);
+ /* 2^40 - 2^20 */ curve25519_square_times(t0, c, 20);
+ /* 2^40 - 2^0 */ curve25519_mul(t0, t0, c);
+ /* 2^50 - 2^10 */ curve25519_square_times(t0, t0, 10);
+ /* 2^50 - 2^0 */ curve25519_mul(b, t0, b);
+ /* 2^100 - 2^50 */ curve25519_square_times(t0, b, 50);
+ /* 2^100 - 2^0 */ curve25519_mul(c, t0, b);
+ /* 2^200 - 2^100 */ curve25519_square_times(t0, c, 100);
+ /* 2^200 - 2^0 */ curve25519_mul(t0, t0, c);
+ /* 2^250 - 2^50 */ curve25519_square_times(t0, t0, 50);
+ /* 2^250 - 2^0 */ curve25519_mul(b, t0, b);
+}
+
+/*
+ * z^(p - 2) = z(2^255 - 21)
+ */
+static void
+curve25519_recip(bignum25519 out, const bignum25519 z) {
+ bignum25519 ALIGN(16) a,t0,b;
+
+ /* 2 */ curve25519_square(a, z); /* a = 2 */
+ /* 8 */ curve25519_square_times(t0, a, 2);
+ /* 9 */ curve25519_mul(b, t0, z); /* b = 9 */
+ /* 11 */ curve25519_mul(a, b, a); /* a = 11 */
+ /* 22 */ curve25519_square(t0, a);
+ /* 2^5 - 2^0 = 31 */ curve25519_mul(b, t0, b);
+ /* 2^250 - 2^0 */ curve25519_pow_two5mtwo0_two250mtwo0(b);
+ /* 2^255 - 2^5 */ curve25519_square_times(b, b, 5);
+ /* 2^255 - 21 */ curve25519_mul(out, b, a);
+}
+
diff --git a/curve25519-donna/curve25519-donna-portable-identify.h b/curve25519-donna/curve25519-donna-portable-identify.h
new file mode 100644
index 0000000000..26a264cf9e
--- /dev/null
+++ b/curve25519-donna/curve25519-donna-portable-identify.h
@@ -0,0 +1,103 @@
+/* os */
+#if defined(_WIN32) || defined(_WIN64) || defined(__TOS_WIN__) || defined(__WINDOWS__)
+ #define OS_WINDOWS
+#elif defined(sun) || defined(__sun) || defined(__SVR4) || defined(__svr4__)
+ #define OS_SOLARIS
+#else
+ #include /* need this to define BSD */
+ #define OS_NIX
+ #if defined(__linux__)
+ #define OS_LINUX
+ #elif defined(BSD)
+ #define OS_BSD
+ #if defined(MACOS_X) || (defined(__APPLE__) & defined(__MACH__))
+ #define OS_OSX
+ #elif defined(macintosh) || defined(Macintosh)
+ #define OS_MAC
+ #elif defined(__OpenBSD__)
+ #define OS_OPENBSD
+ #endif
+ #endif
+#endif
+
+
+/* compiler */
+#if defined(_MSC_VER)
+ #define COMPILER_MSVC
+#endif
+#if defined(__ICC)
+ #define COMPILER_INTEL
+#endif
+#if defined(__GNUC__)
+ #if (__GNUC__ >= 3)
+ #define COMPILER_GCC ((__GNUC__ * 10000) + (__GNUC_MINOR__ * 100) + (__GNUC_PATCHLEVEL__))
+ #else
+ #define COMPILER_GCC ((__GNUC__ * 10000) + (__GNUC_MINOR__ * 100) )
+ #endif
+#endif
+#if defined(__PATHCC__)
+ #define COMPILER_PATHCC
+#endif
+#if defined(__clang__)
+ #define COMPILER_CLANG ((__clang_major__ * 10000) + (__clang_minor__ * 100) + (__clang_patchlevel__))
+#endif
+
+
+
+/* cpu */
+#if defined(__amd64__) || defined(__amd64) || defined(__x86_64__ ) || defined(_M_X64)
+ #define CPU_X86_64
+#elif defined(__i586__) || defined(__i686__) || (defined(_M_IX86) && (_M_IX86 >= 500))
+ #define CPU_X86 500
+#elif defined(__i486__) || (defined(_M_IX86) && (_M_IX86 >= 400))
+ #define CPU_X86 400
+#elif defined(__i386__) || (defined(_M_IX86) && (_M_IX86 >= 300)) || defined(__X86__) || defined(_X86_) || defined(__I86__)
+ #define CPU_X86 300
+#elif defined(__ia64__) || defined(_IA64) || defined(__IA64__) || defined(_M_IA64) || defined(__ia64)
+ #define CPU_IA64
+#endif
+
+#if defined(__sparc__) || defined(__sparc) || defined(__sparcv9)
+ #define CPU_SPARC
+ #if defined(__sparcv9)
+ #define CPU_SPARC64
+ #endif
+#endif
+
+#if defined(powerpc) || defined(__PPC__) || defined(__ppc__) || defined(_ARCH_PPC) || defined(__powerpc__) || defined(__powerpc) || defined(POWERPC) || defined(_M_PPC)
+ #define CPU_PPC
+ #if defined(_ARCH_PWR7)
+ #define CPU_POWER7
+ #elif defined(__64BIT__)
+ #define CPU_PPC64
+ #else
+ #define CPU_PPC32
+ #endif
+#endif
+
+#if defined(__hppa__) || defined(__hppa)
+ #define CPU_HPPA
+#endif
+
+#if defined(__alpha__) || defined(__alpha) || defined(_M_ALPHA)
+ #define CPU_ALPHA
+#endif
+
+/* 64 bit cpu */
+#if defined(CPU_X86_64) || defined(CPU_IA64) || defined(CPU_SPARC64) || defined(__64BIT__) || defined(__LP64__) || defined(_LP64) || (defined(_MIPS_SZLONG) && (_MIPS_SZLONG == 64))
+ #define CPU_64BITS
+#endif
+
+#if defined(COMPILER_MSVC)
+ typedef signed char int8_t;
+ typedef unsigned char uint8_t;
+ typedef signed short int16_t;
+ typedef unsigned short uint16_t;
+ typedef signed int int32_t;
+ typedef unsigned int uint32_t;
+ typedef signed __int64 int64_t;
+ typedef unsigned __int64 uint64_t;
+#else
+ #include
+#endif
+
diff --git a/curve25519-donna/curve25519-donna-portable.h b/curve25519-donna/curve25519-donna-portable.h
new file mode 100644
index 0000000000..71da697d54
--- /dev/null
+++ b/curve25519-donna/curve25519-donna-portable.h
@@ -0,0 +1,92 @@
+#include "curve25519-donna-portable-identify.h"
+
+#define mul32x32_64(a,b) (((uint64_t)(a))*(b))
+
+/* platform */
+#if defined(COMPILER_MSVC)
+ #include
+ #if !defined(_DEBUG)
+ #undef mul32x32_64
+ #define mul32x32_64(a,b) __emulu(a,b)
+ #endif
+ #undef inline
+ #define inline __forceinline
+ #define DONNA_INLINE __forceinline
+ #define DONNA_NOINLINE __declspec(noinline)
+ #define ALIGN(x) __declspec(align(x))
+ #define ROTL32(a,b) _rotl(a,b)
+ #define ROTR32(a,b) _rotr(a,b)
+#else
+ #include
+ #define DONNA_INLINE inline __attribute__((always_inline))
+ #define DONNA_NOINLINE __attribute__((noinline))
+ #define ALIGN(x) __attribute__((aligned(x)))
+ #define ROTL32(a,b) (((a) << (b)) | ((a) >> (32 - b)))
+ #define ROTR32(a,b) (((a) >> (b)) | ((a) << (32 - b)))
+#endif
+
+/* uint128_t */
+#if defined(CPU_64BITS) && !defined(ED25519_FORCE_32BIT)
+ #if defined(COMPILER_CLANG) && (COMPILER_CLANG >= 30100)
+ #define HAVE_NATIVE_UINT128
+ typedef unsigned __int128 uint128_t;
+ #elif defined(COMPILER_MSVC)
+ #define HAVE_UINT128
+ typedef struct uint128_t {
+ uint64_t lo, hi;
+ } uint128_t;
+ #define mul64x64_128(out,a,b) out.lo = _umul128(a,b,&out.hi);
+ #define shr128_pair(out,hi,lo,shift) out = __shiftright128(lo, hi, shift);
+ #define shl128_pair(out,hi,lo,shift) out = __shiftleft128(lo, hi, shift);
+ #define shr128(out,in,shift) shr128_pair(out, in.hi, in.lo, shift)
+ #define shl128(out,in,shift) shl128_pair(out, in.hi, in.lo, shift)
+ #define add128(a,b) { uint64_t p = a.lo; a.lo += b.lo; a.hi += b.hi + (a.lo < p); }
+ #define add128_64(a,b) { uint64_t p = a.lo; a.lo += b; a.hi += (a.lo < p); }
+ #define lo128(a) (a.lo)
+ #define hi128(a) (a.hi)
+ #elif defined(COMPILER_GCC) && !defined(HAVE_NATIVE_UINT128)
+ #if defined(__SIZEOF_INT128__)
+ #define HAVE_NATIVE_UINT128
+ typedef unsigned __int128 uint128_t;
+ #elif (COMPILER_GCC >= 40400)
+ #define HAVE_NATIVE_UINT128
+ typedef unsigned uint128_t __attribute__((mode(TI)));
+ #elif defined(CPU_X86_64)
+ #define HAVE_UINT128
+ typedef struct uint128_t {
+ uint64_t lo, hi;
+ } uint128_t;
+ #define mul64x64_128(out,a,b) __asm__ ("mulq %3" : "=a" (out.lo), "=d" (out.hi) : "a" (a), "rm" (b));
+ #define shr128_pair(out,hi,lo,shift) __asm__ ("shrdq %2,%1,%0" : "+r" (lo) : "r" (hi), "J" (shift)); out = lo;
+ #define shl128_pair(out,hi,lo,shift) __asm__ ("shldq %2,%1,%0" : "+r" (hi) : "r" (lo), "J" (shift)); out = hi;
+ #define shr128(out,in,shift) shr128_pair(out,in.hi, in.lo, shift)
+ #define shl128(out,in,shift) shl128_pair(out,in.hi, in.lo, shift)
+ #define add128(a,b) __asm__ ("addq %4,%2; adcq %5,%3" : "=r" (a.hi), "=r" (a.lo) : "1" (a.lo), "0" (a.hi), "rm" (b.lo), "rm" (b.hi) : "cc");
+ #define add128_64(a,b) __asm__ ("addq %4,%2; adcq $0,%3" : "=r" (a.hi), "=r" (a.lo) : "1" (a.lo), "0" (a.hi), "rm" (b) : "cc");
+ #define lo128(a) (a.lo)
+ #define hi128(a) (a.hi)
+ #endif
+ #endif
+
+ #if defined(HAVE_NATIVE_UINT128)
+ #define HAVE_UINT128
+ #define mul64x64_128(out,a,b) out = (uint128_t)a * b;
+ #define shr128_pair(out,hi,lo,shift) out = (uint64_t)((((uint128_t)hi << 64) | lo) >> (shift));
+ #define shl128_pair(out,hi,lo,shift) out = (uint64_t)(((((uint128_t)hi << 64) | lo) << (shift)) >> 64);
+ #define shr128(out,in,shift) out = (uint64_t)(in >> (shift));
+ #define shl128(out,in,shift) out = (uint64_t)((in << shift) >> 64);
+ #define add128(a,b) a += b;
+ #define add128_64(a,b) a += (uint64_t)b;
+ #define lo128(a) ((uint64_t)a)
+ #define hi128(a) ((uint64_t)(a >> 64))
+ #endif
+
+ #if !defined(HAVE_UINT128)
+ #error Need a uint128_t implementation!
+ #endif
+#endif
+
+#include
+#include
+
+
diff --git a/curve25519-donna/curve25519-donna-scalarmult-base.h b/curve25519-donna/curve25519-donna-scalarmult-base.h
new file mode 100644
index 0000000000..061759fa69
--- /dev/null
+++ b/curve25519-donna/curve25519-donna-scalarmult-base.h
@@ -0,0 +1,66 @@
+/* Calculates nQ where Q is the x-coordinate of a point on the curve
+ *
+ * mypublic: the packed little endian x coordinate of the resulting curve point
+ * n: a little endian, 32-byte number
+ * basepoint: a packed little endian point of the curve
+ */
+
+static void
+curve25519_scalarmult_donna(curve25519_key mypublic, const curve25519_key n, const curve25519_key basepoint) {
+ bignum25519 nqpqx = {1}, nqpqz = {0}, nqz = {1}, nqx;
+ bignum25519 q, qx, qpqx, qqx, zzz, zmone;
+ size_t bit, lastbit;
+ int32_t i;
+
+ curve25519_expand(q, basepoint);
+ curve25519_copy(nqx, q);
+
+ /* bit 255 is always 0, and bit 254 is always 1, so skip bit 255 and
+ start pre-swapped on bit 254 */
+ lastbit = 1;
+
+ /* we are doing bits 254..3 in the loop, but are swapping in bits 253..2 */
+ for (i = 253; i >= 2; i--) {
+ curve25519_add(qx, nqx, nqz);
+ curve25519_sub(nqz, nqx, nqz);
+ curve25519_add(qpqx, nqpqx, nqpqz);
+ curve25519_sub(nqpqz, nqpqx, nqpqz);
+ curve25519_mul(nqpqx, qpqx, nqz);
+ curve25519_mul(nqpqz, qx, nqpqz);
+ curve25519_add(qqx, nqpqx, nqpqz);
+ curve25519_sub(nqpqz, nqpqx, nqpqz);
+ curve25519_square(nqpqz, nqpqz);
+ curve25519_square(nqpqx, qqx);
+ curve25519_mul(nqpqz, nqpqz, q);
+ curve25519_square(qx, qx);
+ curve25519_square(nqz, nqz);
+ curve25519_mul(nqx, qx, nqz);
+ curve25519_sub(nqz, qx, nqz);
+ curve25519_scalar_product(zzz, nqz, 121665);
+ curve25519_add(zzz, zzz, qx);
+ curve25519_mul(nqz, nqz, zzz);
+
+ bit = (n[i/8] >> (i & 7)) & 1;
+ curve25519_swap_conditional(nqx, nqpqx, bit ^ lastbit);
+ curve25519_swap_conditional(nqz, nqpqz, bit ^ lastbit);
+ lastbit = bit;
+ }
+
+ /* the final 3 bits are always zero, so we only need to double */
+ for (i = 0; i < 3; i++) {
+ curve25519_add(qx, nqx, nqz);
+ curve25519_sub(nqz, nqx, nqz);
+ curve25519_square(qx, qx);
+ curve25519_square(nqz, nqz);
+ curve25519_mul(nqx, qx, nqz);
+ curve25519_sub(nqz, qx, nqz);
+ curve25519_scalar_product(zzz, nqz, 121665);
+ curve25519_add(zzz, zzz, qx);
+ curve25519_mul(nqz, nqz, zzz);
+ }
+
+ curve25519_recip(zmone, nqz);
+ curve25519_mul(nqz, nqx, zmone);
+ curve25519_contract(mypublic, nqz);
+}
+
diff --git a/curve25519-donna/curve25519-donna-scalarmult-sse2.h b/curve25519-donna/curve25519-donna-scalarmult-sse2.h
new file mode 100644
index 0000000000..e0ef14c118
--- /dev/null
+++ b/curve25519-donna/curve25519-donna-scalarmult-sse2.h
@@ -0,0 +1,65 @@
+
+/* Calculates nQ where Q is the x-coordinate of a point on the curve
+ *
+ * mypublic: the packed little endian x coordinate of the resulting curve point
+ * n: a little endian, 32-byte number
+ * basepoint: a packed little endian point of the curve
+ */
+static void
+curve25519_scalarmult_donna(curve25519_key mypublic, const curve25519_key n, const curve25519_key basepoint) {
+ bignum25519 ALIGN(16) nqx = {1}, nqpqz = {1}, nqz = {0}, nqpqx, zmone;
+ packed32bignum25519 qx, qz, pqz, pqx;
+ packed64bignum25519 nq, sq, sqscalar, prime, primex, primez, nqpq;
+ bignum25519mulprecomp preq;
+ size_t bit, lastbit, i;
+
+ curve25519_expand(nqpqx, basepoint);
+ curve25519_mul_precompute(&preq, nqpqx);
+
+ /* do bits 254..3 */
+ for (i = 254, lastbit = 0; i >= 3; i--) {
+ bit = (n[i/8] >> (i & 7)) & 1;
+ curve25519_swap_conditional(nqx, nqpqx, bit ^ lastbit);
+ curve25519_swap_conditional(nqz, nqpqz, bit ^ lastbit);
+ lastbit = bit;
+
+ curve25519_tangle32(qx, nqx, nqpqx); /* qx = [nqx,nqpqx] */
+ curve25519_tangle32(qz, nqz, nqpqz); /* qz = [nqz,nqpqz] */
+
+ curve25519_add_packed32(pqx, qx, qz); /* pqx = [nqx+nqz,nqpqx+nqpqz] */
+ curve25519_sub_packed32(pqz, qx, qz); /* pqz = [nqx-nqz,nqpqx-nqpqz] */
+
+ curve25519_make_nqpq(primex, primez, pqx, pqz); /* primex = [nqx+nqz,nqpqx+nqpqz], primez = [nqpqx-nqpqz,nqx-nqz] */
+ curve25519_mul_packed64(prime, primex, primez); /* prime = [nqx+nqz,nqpqx+nqpqz] * [nqpqx-nqpqz,nqx-nqz] */
+ curve25519_addsub_packed64(prime); /* prime = [prime.x+prime.z,prime.x-prime.z] */
+ curve25519_square_packed64(nqpq, prime); /* nqpq = prime^2 */
+ curve25519_untangle64(nqpqx, nqpqz, nqpq);
+ curve25519_mul_precomputed(nqpqz, nqpqz, &preq); /* nqpqz = nqpqz * q */
+
+ /* (((sq.x-sq.z)*121665)+sq.x) * (sq.x-sq.z) is equivalent to (sq.x*121666-sq.z*121665) * (sq.x-sq.z) */
+ curve25519_make_nq(nq, pqx, pqz); /* nq = [nqx+nqz,nqx-nqz] */
+ curve25519_square_packed64(sq, nq); /* sq = nq^2 */
+ curve25519_121665_packed64(sqscalar, sq); /* sqscalar = sq * [121666,121665] */
+ curve25519_final_nq(nq, sq, sqscalar); /* nq = [sq.x,sqscalar.x-sqscalar.z] * [sq.z,sq.x-sq.z] */
+ curve25519_untangle64(nqx, nqz, nq);
+ };
+
+ /* it's possible to get rid of this swap with the swap in the above loop
+ at the bottom instead of the top, but compilers seem to optimize better this way */
+ curve25519_swap_conditional(nqx, nqpqx, bit);
+ curve25519_swap_conditional(nqz, nqpqz, bit);
+
+ /* do bits 2..0 */
+ for (i = 0; i < 3; i++) {
+ curve25519_compute_nq(nq, nqx, nqz);
+ curve25519_square_packed64(sq, nq); /* sq = nq^2 */
+ curve25519_121665_packed64(sqscalar, sq); /* sqscalar = sq * [121666,121665] */
+ curve25519_final_nq(nq, sq, sqscalar); /* nq = [sq.x,sqscalar.x-sqscalar.z] * [sq.z,sq.x-sq.z] */
+ curve25519_untangle64(nqx, nqz, nq);
+ }
+
+ curve25519_recip(zmone, nqz);
+ curve25519_mul(nqz, nqx, zmone);
+ curve25519_contract(mypublic, nqz);
+}
+
diff --git a/curve25519-donna/curve25519-donna-sse2.h b/curve25519-donna/curve25519-donna-sse2.h
new file mode 100644
index 0000000000..ff2416209a
--- /dev/null
+++ b/curve25519-donna/curve25519-donna-sse2.h
@@ -0,0 +1,1009 @@
+#include
+typedef __m128i xmmi;
+
+typedef union packedelem8_t {
+ unsigned char u[16];
+ xmmi v;
+} packedelem8;
+
+typedef union packedelem32_t {
+ uint32_t u[4];
+ xmmi v;
+} packedelem32;
+
+typedef union packedelem64_t {
+ uint64_t u[2];
+ xmmi v;
+} packedelem64;
+
+/* 10 elements + an extra 2 to fit in 3 xmm registers */
+typedef uint32_t bignum25519[10+2];
+typedef packedelem32 packed32bignum25519[5];
+typedef packedelem64 packed64bignum25519[10];
+
+static const uint32_t reduce_mask_26 = (1 << 26) - 1;
+static const uint32_t reduce_mask_25 = (1 << 25) - 1;
+
+static const packedelem32 sse2_bot32bitmask = {{0xffffffff, 0x00000000, 0xffffffff, 0x00000000}};
+static const packedelem32 sse2_top32bitmask = {{0x00000000, 0xffffffff, 0x00000000, 0xffffffff}};
+static const packedelem32 sse2_top64bitmask = {{0x00000000, 0x00000000, 0xffffffff, 0xffffffff}};
+static const packedelem32 sse2_bot64bitmask = {{0xffffffff, 0xffffffff, 0x00000000, 0x00000000}};
+
+/* reduction masks */
+static const packedelem64 packedmask26 = {{0x03ffffff, 0x03ffffff}};
+static const packedelem64 packedmask25 = {{0x01ffffff, 0x01ffffff}};
+static const packedelem32 packedmask2625 = {{0x3ffffff,0,0x1ffffff,0}};
+static const packedelem32 packedmask26262626 = {{0x03ffffff, 0x03ffffff, 0x03ffffff, 0x03ffffff}};
+static const packedelem32 packedmask25252525 = {{0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff}};
+
+/* multipliers */
+static const packedelem64 packednineteen = {{19, 19}};
+static const packedelem64 packednineteenone = {{19, 1}};
+static const packedelem64 packedthirtyeight = {{38, 38}};
+static const packedelem64 packed3819 = {{19*2,19}};
+static const packedelem64 packed9638 = {{19*4,19*2}};
+
+/* 121666,121665 */
+static const packedelem64 packed121666121665 = {{121666, 121665}};
+
+/* 2*(2^255 - 19) = 0 mod p */
+static const packedelem32 packed2p0 = {{0x7ffffda,0x3fffffe,0x7fffffe,0x3fffffe}};
+static const packedelem32 packed2p1 = {{0x7fffffe,0x3fffffe,0x7fffffe,0x3fffffe}};
+static const packedelem32 packed2p2 = {{0x7fffffe,0x3fffffe,0x0000000,0x0000000}};
+
+static const packedelem32 packed32zeromodp0 = {{0x7ffffda,0x7ffffda,0x3fffffe,0x3fffffe}};
+static const packedelem32 packed32zeromodp1 = {{0x7fffffe,0x7fffffe,0x3fffffe,0x3fffffe}};
+
+/* Copy a bignum to another: out = in */
+DONNA_INLINE static void
+curve25519_copy(bignum25519 out, const bignum25519 in) {
+ xmmi x0,x1,x2;
+ x0 = _mm_load_si128((xmmi*)in + 0);
+ x1 = _mm_load_si128((xmmi*)in + 1);
+ x2 = _mm_load_si128((xmmi*)in + 2);
+ _mm_store_si128((xmmi*)out + 0, x0);
+ _mm_store_si128((xmmi*)out + 1, x1);
+ _mm_store_si128((xmmi*)out + 2, x2);
+}
+
+/* Take a little-endian, 32-byte number and expand it into polynomial form */
+DONNA_INLINE static void
+curve25519_expand(bignum25519 out, const unsigned char in[32]) {
+ uint32_t x0,x1,x2,x3,x4,x5,x6,x7;
+
+ x0 = *(uint32_t *)(in + 0);
+ x1 = *(uint32_t *)(in + 4);
+ x2 = *(uint32_t *)(in + 8);
+ x3 = *(uint32_t *)(in + 12);
+ x4 = *(uint32_t *)(in + 16);
+ x5 = *(uint32_t *)(in + 20);
+ x6 = *(uint32_t *)(in + 24);
+ x7 = *(uint32_t *)(in + 28);
+
+ out[0] = ( x0 ) & reduce_mask_26;
+ out[1] = ((((uint64_t)x1 << 32) | x0) >> 26) & reduce_mask_25;
+ out[2] = ((((uint64_t)x2 << 32) | x1) >> 19) & reduce_mask_26;
+ out[3] = ((((uint64_t)x3 << 32) | x2) >> 13) & reduce_mask_25;
+ out[4] = (( x3) >> 6) & reduce_mask_26;
+ out[5] = ( x4 ) & reduce_mask_25;
+ out[6] = ((((uint64_t)x5 << 32) | x4) >> 25) & reduce_mask_26;
+ out[7] = ((((uint64_t)x6 << 32) | x5) >> 19) & reduce_mask_25;
+ out[8] = ((((uint64_t)x7 << 32) | x6) >> 12) & reduce_mask_26;
+ out[9] = (( x7) >> 6) & reduce_mask_25; /* ignore the top bit */
+
+ out[10] = 0;
+ out[11] = 0;
+}
+
+/* Take a fully reduced polynomial form number and contract it into a
+ * little-endian, 32-byte array
+ */
+DONNA_INLINE static void
+curve25519_contract(unsigned char out[32], const bignum25519 in) {
+ bignum25519 ALIGN(16) f;
+
+ curve25519_copy(f, in);
+
+ #define carry_pass() \
+ f[1] += f[0] >> 26; f[0] &= reduce_mask_26; \
+ f[2] += f[1] >> 25; f[1] &= reduce_mask_25; \
+ f[3] += f[2] >> 26; f[2] &= reduce_mask_26; \
+ f[4] += f[3] >> 25; f[3] &= reduce_mask_25; \
+ f[5] += f[4] >> 26; f[4] &= reduce_mask_26; \
+ f[6] += f[5] >> 25; f[5] &= reduce_mask_25; \
+ f[7] += f[6] >> 26; f[6] &= reduce_mask_26; \
+ f[8] += f[7] >> 25; f[7] &= reduce_mask_25; \
+ f[9] += f[8] >> 26; f[8] &= reduce_mask_26;
+
+ #define carry_pass_full() \
+ carry_pass() \
+ f[0] += 19 * (f[9] >> 25); f[9] &= reduce_mask_25;
+
+ #define carry_pass_final() \
+ carry_pass() \
+ f[9] &= reduce_mask_25;
+
+ carry_pass_full()
+ carry_pass_full()
+
+ /* now t is between 0 and 2^255-1, properly carried. */
+ /* case 1: between 0 and 2^255-20. case 2: between 2^255-19 and 2^255-1. */
+ f[0] += 19;
+ carry_pass_full()
+
+ /* now between 19 and 2^255-1 in both cases, and offset by 19. */
+ f[0] += (1 << 26) - 19;
+ f[1] += (1 << 25) - 1;
+ f[2] += (1 << 26) - 1;
+ f[3] += (1 << 25) - 1;
+ f[4] += (1 << 26) - 1;
+ f[5] += (1 << 25) - 1;
+ f[6] += (1 << 26) - 1;
+ f[7] += (1 << 25) - 1;
+ f[8] += (1 << 26) - 1;
+ f[9] += (1 << 25) - 1;
+
+ /* now between 2^255 and 2^256-20, and offset by 2^255. */
+ carry_pass_final()
+
+ #undef carry_pass
+ #undef carry_full
+ #undef carry_final
+
+ *(uint32_t *)(out + 0) = ((f[0] ) | (f[1] << 26));
+ *(uint32_t *)(out + 4) = ((f[1] >> 6) | (f[2] << 19));
+ *(uint32_t *)(out + 8) = ((f[2] >> 13) | (f[3] << 13));
+ *(uint32_t *)(out + 12) = ((f[3] >> 19) | (f[4] << 6));
+ *(uint32_t *)(out + 16) = ((f[5] ) | (f[6] << 25));
+ *(uint32_t *)(out + 20) = ((f[6] >> 7) | (f[7] << 19));
+ *(uint32_t *)(out + 24) = ((f[7] >> 13) | (f[8] << 12));
+ *(uint32_t *)(out + 28) = ((f[8] >> 20) | (f[9] << 6));
+}
+
+/*
+ * Maybe swap the contents of two felem arrays (@a and @b), each 5 elements
+ * long. Perform the swap iff @swap is non-zero.
+ */
+DONNA_INLINE static void
+curve25519_swap_conditional(bignum25519 a, bignum25519 b, uint32_t iswap) {
+ const uint32_t swap = (uint32_t)(-(int32_t)iswap);
+ xmmi a0,a1,a2,b0,b1,b2,x0,x1,x2;
+ xmmi mask = _mm_cvtsi32_si128(swap);
+ mask = _mm_shuffle_epi32(mask, 0);
+ a0 = _mm_load_si128((xmmi *)a + 0);
+ a1 = _mm_load_si128((xmmi *)a + 1);
+ a2 = _mm_load_si128((xmmi *)a + 2);
+ b0 = _mm_load_si128((xmmi *)b + 0);
+ b1 = _mm_load_si128((xmmi *)b + 1);
+ b2 = _mm_load_si128((xmmi *)b + 2);
+ b0 = _mm_xor_si128(a0, b0);
+ b1 = _mm_xor_si128(a1, b1);
+ b2 = _mm_xor_si128(a2, b2);
+ x0 = _mm_and_si128(b0, mask);
+ x1 = _mm_and_si128(b1, mask);
+ x2 = _mm_and_si128(b2, mask);
+ x0 = _mm_xor_si128(x0, a0);
+ x1 = _mm_xor_si128(x1, a1);
+ x2 = _mm_xor_si128(x2, a2);
+ a0 = _mm_xor_si128(x0, b0);
+ a1 = _mm_xor_si128(x1, b1);
+ a2 = _mm_xor_si128(x2, b2);
+ _mm_store_si128((xmmi *)a + 0, x0);
+ _mm_store_si128((xmmi *)a + 1, x1);
+ _mm_store_si128((xmmi *)a + 2, x2);
+ _mm_store_si128((xmmi *)b + 0, a0);
+ _mm_store_si128((xmmi *)b + 1, a1);
+ _mm_store_si128((xmmi *)b + 2, a2);
+}
+
+/* interleave two bignums */
+DONNA_INLINE static void
+curve25519_tangle32(packedelem32 *out, const bignum25519 x, const bignum25519 z) {
+ xmmi x0,x1,x2,z0,z1,z2;
+
+ x0 = _mm_load_si128((xmmi *)(x + 0));
+ x1 = _mm_load_si128((xmmi *)(x + 4));
+ x2 = _mm_load_si128((xmmi *)(x + 8));
+ z0 = _mm_load_si128((xmmi *)(z + 0));
+ z1 = _mm_load_si128((xmmi *)(z + 4));
+ z2 = _mm_load_si128((xmmi *)(z + 8));
+
+ out[0].v = _mm_unpacklo_epi32(x0, z0);
+ out[1].v = _mm_unpackhi_epi32(x0, z0);
+ out[2].v = _mm_unpacklo_epi32(x1, z1);
+ out[3].v = _mm_unpackhi_epi32(x1, z1);
+ out[4].v = _mm_unpacklo_epi32(x2, z2);
+}
+
+/* split a packed bignum in to it's two parts */
+DONNA_INLINE static void
+curve25519_untangle64(bignum25519 x, bignum25519 z, const packedelem64 *in) {
+ _mm_store_si128((xmmi *)(x + 0), _mm_unpacklo_epi64(_mm_unpacklo_epi32(in[0].v, in[1].v), _mm_unpacklo_epi32(in[2].v, in[3].v)));
+ _mm_store_si128((xmmi *)(x + 4), _mm_unpacklo_epi64(_mm_unpacklo_epi32(in[4].v, in[5].v), _mm_unpacklo_epi32(in[6].v, in[7].v)));
+ _mm_store_si128((xmmi *)(x + 8), _mm_unpacklo_epi32(in[8].v, in[9].v) );
+ _mm_store_si128((xmmi *)(z + 0), _mm_unpacklo_epi64(_mm_unpackhi_epi32(in[0].v, in[1].v), _mm_unpackhi_epi32(in[2].v, in[3].v)));
+ _mm_store_si128((xmmi *)(z + 4), _mm_unpacklo_epi64(_mm_unpackhi_epi32(in[4].v, in[5].v), _mm_unpackhi_epi32(in[6].v, in[7].v)));
+ _mm_store_si128((xmmi *)(z + 8), _mm_unpackhi_epi32(in[8].v, in[9].v) );
+}
+
+/* add two packed bignums */
+DONNA_INLINE static void
+curve25519_add_packed32(packedelem32 *out, const packedelem32 *r, const packedelem32 *s) {
+ out[0].v = _mm_add_epi32(r[0].v, s[0].v);
+ out[1].v = _mm_add_epi32(r[1].v, s[1].v);
+ out[2].v = _mm_add_epi32(r[2].v, s[2].v);
+ out[3].v = _mm_add_epi32(r[3].v, s[3].v);
+ out[4].v = _mm_add_epi32(r[4].v, s[4].v);
+}
+
+/* subtract two packed bignums */
+DONNA_INLINE static void
+curve25519_sub_packed32(packedelem32 *out, const packedelem32 *r, const packedelem32 *s) {
+ xmmi r0,r1,r2,r3,r4;
+ xmmi s0,s1,s2,s3;
+ xmmi c1,c2;
+
+ r0 = _mm_add_epi32(r[0].v, packed32zeromodp0.v);
+ r1 = _mm_add_epi32(r[1].v, packed32zeromodp1.v);
+ r2 = _mm_add_epi32(r[2].v, packed32zeromodp1.v);
+ r3 = _mm_add_epi32(r[3].v, packed32zeromodp1.v);
+ r4 = _mm_add_epi32(r[4].v, packed32zeromodp1.v);
+ r0 = _mm_sub_epi32(r0, s[0].v); /* 00 11 */
+ r1 = _mm_sub_epi32(r1, s[1].v); /* 22 33 */
+ r2 = _mm_sub_epi32(r2, s[2].v); /* 44 55 */
+ r3 = _mm_sub_epi32(r3, s[3].v); /* 66 77 */
+ r4 = _mm_sub_epi32(r4, s[4].v); /* 88 99 */
+
+ s0 = _mm_unpacklo_epi64(r0, r2); /* 00 44 */
+ s1 = _mm_unpackhi_epi64(r0, r2); /* 11 55 */
+ s2 = _mm_unpacklo_epi64(r1, r3); /* 22 66 */
+ s3 = _mm_unpackhi_epi64(r1, r3); /* 33 77 */
+
+ c1 = _mm_srli_epi32(s0, 26); c2 = _mm_srli_epi32(s2, 26); s0 = _mm_and_si128(s0, packedmask26262626.v); s2 = _mm_and_si128(s2, packedmask26262626.v); s1 = _mm_add_epi32(s1, c1); s3 = _mm_add_epi32(s3, c2);
+ c1 = _mm_srli_epi32(s1, 25); c2 = _mm_srli_epi32(s3, 25); s1 = _mm_and_si128(s1, packedmask25252525.v); s3 = _mm_and_si128(s3, packedmask25252525.v); s2 = _mm_add_epi32(s2, c1); r4 = _mm_add_epi32(r4, _mm_srli_si128(c2, 8)); s0 = _mm_add_epi32(s0, _mm_slli_si128(c2, 8));
+
+ out[0].v = _mm_unpacklo_epi64(s0, s1); /* 00 11 */
+ out[1].v = _mm_unpacklo_epi64(s2, s3); /* 22 33 */
+ out[2].v = _mm_unpackhi_epi64(s0, s1); /* 44 55 */
+ out[3].v = _mm_unpackhi_epi64(s2, s3); /* 66 77 */
+ out[4].v = r4; /* 88 99 */
+}
+
+/* multiply two packed bignums */
+DONNA_INLINE static void
+curve25519_mul_packed64(packedelem64 *out, const packedelem64 *r, const packedelem64 *s) {
+ xmmi r1,r2,r3,r4,r5,r6,r7,r8,r9;
+ xmmi r1_2,r3_2,r5_2,r7_2,r9_2;
+ xmmi c1,c2;
+
+ out[0].v = _mm_mul_epu32(r[0].v, s[0].v);
+ out[1].v = _mm_add_epi64(_mm_mul_epu32(r[0].v, s[1].v), _mm_mul_epu32(r[1].v, s[0].v));
+ r1_2 = _mm_slli_epi32(r[1].v, 1);
+ out[2].v = _mm_add_epi64(_mm_mul_epu32(r[0].v, s[2].v), _mm_add_epi64(_mm_mul_epu32(r1_2 , s[1].v), _mm_mul_epu32(r[2].v, s[0].v)));
+ out[3].v = _mm_add_epi64(_mm_mul_epu32(r[0].v, s[3].v), _mm_add_epi64(_mm_mul_epu32(r[1].v, s[2].v), _mm_add_epi64(_mm_mul_epu32(r[2].v, s[1].v), _mm_mul_epu32(r[3].v, s[0].v))));
+ r3_2 = _mm_slli_epi32(r[3].v, 1);
+ out[4].v = _mm_add_epi64(_mm_mul_epu32(r[0].v, s[4].v), _mm_add_epi64(_mm_mul_epu32(r1_2 , s[3].v), _mm_add_epi64(_mm_mul_epu32(r[2].v, s[2].v), _mm_add_epi64(_mm_mul_epu32(r3_2 , s[1].v), _mm_mul_epu32(r[4].v, s[0].v)))));
+ out[5].v = _mm_add_epi64(_mm_mul_epu32(r[0].v, s[5].v), _mm_add_epi64(_mm_mul_epu32(r[1].v, s[4].v), _mm_add_epi64(_mm_mul_epu32(r[2].v, s[3].v), _mm_add_epi64(_mm_mul_epu32(r[3].v, s[2].v), _mm_add_epi64(_mm_mul_epu32(r[4].v, s[1].v), _mm_mul_epu32(r[5].v, s[0].v))))));
+ r5_2 = _mm_slli_epi32(r[5].v, 1);
+ out[6].v = _mm_add_epi64(_mm_mul_epu32(r[0].v, s[6].v), _mm_add_epi64(_mm_mul_epu32(r1_2 , s[5].v), _mm_add_epi64(_mm_mul_epu32(r[2].v, s[4].v), _mm_add_epi64(_mm_mul_epu32(r3_2 , s[3].v), _mm_add_epi64(_mm_mul_epu32(r[4].v, s[2].v), _mm_add_epi64(_mm_mul_epu32(r5_2 , s[1].v), _mm_mul_epu32(r[6].v, s[0].v)))))));
+ out[7].v = _mm_add_epi64(_mm_mul_epu32(r[0].v, s[7].v), _mm_add_epi64(_mm_mul_epu32(r[1].v, s[6].v), _mm_add_epi64(_mm_mul_epu32(r[2].v, s[5].v), _mm_add_epi64(_mm_mul_epu32(r[3].v, s[4].v), _mm_add_epi64(_mm_mul_epu32(r[4].v, s[3].v), _mm_add_epi64(_mm_mul_epu32(r[5].v, s[2].v), _mm_add_epi64(_mm_mul_epu32(r[6].v, s[1].v), _mm_mul_epu32(r[7].v , s[0].v))))))));
+ r7_2 = _mm_slli_epi32(r[7].v, 1);
+ out[8].v = _mm_add_epi64(_mm_mul_epu32(r[0].v, s[8].v), _mm_add_epi64(_mm_mul_epu32(r1_2 , s[7].v), _mm_add_epi64(_mm_mul_epu32(r[2].v, s[6].v), _mm_add_epi64(_mm_mul_epu32(r3_2 , s[5].v), _mm_add_epi64(_mm_mul_epu32(r[4].v, s[4].v), _mm_add_epi64(_mm_mul_epu32(r5_2 , s[3].v), _mm_add_epi64(_mm_mul_epu32(r[6].v, s[2].v), _mm_add_epi64(_mm_mul_epu32(r7_2 , s[1].v), _mm_mul_epu32(r[8].v, s[0].v)))))))));
+ out[9].v = _mm_add_epi64(_mm_mul_epu32(r[0].v, s[9].v), _mm_add_epi64(_mm_mul_epu32(r[1].v, s[8].v), _mm_add_epi64(_mm_mul_epu32(r[2].v, s[7].v), _mm_add_epi64(_mm_mul_epu32(r[3].v, s[6].v), _mm_add_epi64(_mm_mul_epu32(r[4].v, s[5].v), _mm_add_epi64(_mm_mul_epu32(r[5].v, s[4].v), _mm_add_epi64(_mm_mul_epu32(r[6].v, s[3].v), _mm_add_epi64(_mm_mul_epu32(r[7].v, s[2].v), _mm_add_epi64(_mm_mul_epu32(r[8].v, s[1].v), _mm_mul_epu32(r[9].v, s[0].v))))))))));
+
+ r1 = _mm_mul_epu32(r[1].v, packednineteen.v);
+ r2 = _mm_mul_epu32(r[2].v, packednineteen.v);
+ r1_2 = _mm_slli_epi32(r1, 1);
+ r3 = _mm_mul_epu32(r[3].v, packednineteen.v);
+ r4 = _mm_mul_epu32(r[4].v, packednineteen.v);
+ r3_2 = _mm_slli_epi32(r3, 1);
+ r5 = _mm_mul_epu32(r[5].v, packednineteen.v);
+ r6 = _mm_mul_epu32(r[6].v, packednineteen.v);
+ r5_2 = _mm_slli_epi32(r5, 1);
+ r7 = _mm_mul_epu32(r[7].v, packednineteen.v);
+ r8 = _mm_mul_epu32(r[8].v, packednineteen.v);
+ r7_2 = _mm_slli_epi32(r7, 1);
+ r9 = _mm_mul_epu32(r[9].v, packednineteen.v);
+ r9_2 = _mm_slli_epi32(r9, 1);
+
+ out[0].v = _mm_add_epi64(out[0].v, _mm_add_epi64(_mm_mul_epu32(r9_2, s[1].v), _mm_add_epi64(_mm_mul_epu32(r8, s[2].v), _mm_add_epi64(_mm_mul_epu32(r7_2, s[3].v), _mm_add_epi64(_mm_mul_epu32(r6, s[4].v), _mm_add_epi64(_mm_mul_epu32(r5_2, s[5].v), _mm_add_epi64(_mm_mul_epu32(r4, s[6].v), _mm_add_epi64(_mm_mul_epu32(r3_2, s[7].v), _mm_add_epi64(_mm_mul_epu32(r2, s[8].v), _mm_mul_epu32(r1_2, s[9].v))))))))));
+ out[1].v = _mm_add_epi64(out[1].v, _mm_add_epi64(_mm_mul_epu32(r9 , s[2].v), _mm_add_epi64(_mm_mul_epu32(r8, s[3].v), _mm_add_epi64(_mm_mul_epu32(r7 , s[4].v), _mm_add_epi64(_mm_mul_epu32(r6, s[5].v), _mm_add_epi64(_mm_mul_epu32(r5 , s[6].v), _mm_add_epi64(_mm_mul_epu32(r4, s[7].v), _mm_add_epi64(_mm_mul_epu32(r3 , s[8].v), _mm_mul_epu32(r2, s[9].v)))))))));
+ out[2].v = _mm_add_epi64(out[2].v, _mm_add_epi64(_mm_mul_epu32(r9_2, s[3].v), _mm_add_epi64(_mm_mul_epu32(r8, s[4].v), _mm_add_epi64(_mm_mul_epu32(r7_2, s[5].v), _mm_add_epi64(_mm_mul_epu32(r6, s[6].v), _mm_add_epi64(_mm_mul_epu32(r5_2, s[7].v), _mm_add_epi64(_mm_mul_epu32(r4, s[8].v), _mm_mul_epu32(r3_2, s[9].v))))))));
+ out[3].v = _mm_add_epi64(out[3].v, _mm_add_epi64(_mm_mul_epu32(r9 , s[4].v), _mm_add_epi64(_mm_mul_epu32(r8, s[5].v), _mm_add_epi64(_mm_mul_epu32(r7 , s[6].v), _mm_add_epi64(_mm_mul_epu32(r6, s[7].v), _mm_add_epi64(_mm_mul_epu32(r5 , s[8].v), _mm_mul_epu32(r4, s[9].v)))))));
+ out[4].v = _mm_add_epi64(out[4].v, _mm_add_epi64(_mm_mul_epu32(r9_2, s[5].v), _mm_add_epi64(_mm_mul_epu32(r8, s[6].v), _mm_add_epi64(_mm_mul_epu32(r7_2, s[7].v), _mm_add_epi64(_mm_mul_epu32(r6, s[8].v), _mm_mul_epu32(r5_2, s[9].v))))));
+ out[5].v = _mm_add_epi64(out[5].v, _mm_add_epi64(_mm_mul_epu32(r9 , s[6].v), _mm_add_epi64(_mm_mul_epu32(r8, s[7].v), _mm_add_epi64(_mm_mul_epu32(r7 , s[8].v), _mm_mul_epu32(r6, s[9].v)))));
+ out[6].v = _mm_add_epi64(out[6].v, _mm_add_epi64(_mm_mul_epu32(r9_2, s[7].v), _mm_add_epi64(_mm_mul_epu32(r8, s[8].v), _mm_mul_epu32(r7_2, s[9].v))));
+ out[7].v = _mm_add_epi64(out[7].v, _mm_add_epi64(_mm_mul_epu32(r9 , s[8].v), _mm_mul_epu32(r8, s[9].v)));
+ out[8].v = _mm_add_epi64(out[8].v, _mm_mul_epu32(r9_2, s[9].v));
+
+ c1 = _mm_srli_epi64(out[0].v, 26); c2 = _mm_srli_epi64(out[4].v, 26); out[0].v = _mm_and_si128(out[0].v, packedmask26.v); out[4].v = _mm_and_si128(out[4].v, packedmask26.v); out[1].v = _mm_add_epi64(out[1].v, c1); out[5].v = _mm_add_epi64(out[5].v, c2);
+ c1 = _mm_srli_epi64(out[1].v, 25); c2 = _mm_srli_epi64(out[5].v, 25); out[1].v = _mm_and_si128(out[1].v, packedmask25.v); out[5].v = _mm_and_si128(out[5].v, packedmask25.v); out[2].v = _mm_add_epi64(out[2].v, c1); out[6].v = _mm_add_epi64(out[6].v, c2);
+ c1 = _mm_srli_epi64(out[2].v, 26); c2 = _mm_srli_epi64(out[6].v, 26); out[2].v = _mm_and_si128(out[2].v, packedmask26.v); out[6].v = _mm_and_si128(out[6].v, packedmask26.v); out[3].v = _mm_add_epi64(out[3].v, c1); out[7].v = _mm_add_epi64(out[7].v, c2);
+ c1 = _mm_srli_epi64(out[3].v, 25); c2 = _mm_srli_epi64(out[7].v, 25); out[3].v = _mm_and_si128(out[3].v, packedmask25.v); out[7].v = _mm_and_si128(out[7].v, packedmask25.v); out[4].v = _mm_add_epi64(out[4].v, c1); out[8].v = _mm_add_epi64(out[8].v, c2);
+ c2 = _mm_srli_epi64(out[8].v, 26); out[8].v = _mm_and_si128(out[8].v, packedmask26.v); out[9].v = _mm_add_epi64(out[9].v, c2);
+ c2 = _mm_srli_epi64(out[9].v, 25); out[9].v = _mm_and_si128(out[9].v, packedmask25.v); out[0].v = _mm_add_epi64(out[0].v, _mm_mul_epu32(c2, packednineteen.v));
+ c1 = _mm_srli_epi64(out[0].v, 26); c2 = _mm_srli_epi64(out[4].v, 26); out[0].v = _mm_and_si128(out[0].v, packedmask26.v); out[4].v = _mm_and_si128(out[4].v, packedmask26.v); out[1].v = _mm_add_epi64(out[1].v, c1); out[5].v = _mm_add_epi64(out[5].v, c2);
+}
+
+/* multiply a bignum */
+static void
+curve25519_mul(bignum25519 out, const bignum25519 r, const bignum25519 s) {
+ xmmi m01,m23,m45,m67,m89;
+ xmmi m0123,m4567;
+ xmmi s0123,s4567;
+ xmmi s01,s23,s45,s67,s89;
+ xmmi s12,s34,s56,s78,s9;
+ xmmi r0,r2,r4,r6,r8;
+ xmmi r1,r3,r5,r7,r9;
+ xmmi r119,r219,r319,r419,r519,r619,r719,r819,r919;
+ xmmi c1,c2,c3;
+
+ s0123 = _mm_load_si128((xmmi*)s + 0);
+ s01 = _mm_shuffle_epi32(s0123,_MM_SHUFFLE(3,1,2,0));
+ s12 = _mm_shuffle_epi32(s0123, _MM_SHUFFLE(2,2,1,1));
+ s23 = _mm_shuffle_epi32(s0123,_MM_SHUFFLE(3,3,2,2));
+ s4567 = _mm_load_si128((xmmi*)s + 1);
+ s34 = _mm_unpacklo_epi64(_mm_srli_si128(s0123,12),s4567);
+ s45 = _mm_shuffle_epi32(s4567,_MM_SHUFFLE(3,1,2,0));
+ s56 = _mm_shuffle_epi32(s4567, _MM_SHUFFLE(2,2,1,1));
+ s67 = _mm_shuffle_epi32(s4567,_MM_SHUFFLE(3,3,2,2));
+ s89 = _mm_load_si128((xmmi*)s + 2);
+ s78 = _mm_unpacklo_epi64(_mm_srli_si128(s4567,12),s89);
+ s89 = _mm_shuffle_epi32(s89,_MM_SHUFFLE(3,1,2,0));
+ s9 = _mm_shuffle_epi32(s89, _MM_SHUFFLE(3,3,2,2));
+
+ r0 = _mm_load_si128((xmmi*)r + 0);
+ r1 = _mm_shuffle_epi32(r0, _MM_SHUFFLE(1,1,1,1));
+ r1 = _mm_add_epi64(r1, _mm_and_si128(r1, sse2_top64bitmask.v));
+ r2 = _mm_shuffle_epi32(r0, _MM_SHUFFLE(2,2,2,2));
+ r3 = _mm_shuffle_epi32(r0, _MM_SHUFFLE(3,3,3,3));
+ r3 = _mm_add_epi64(r3, _mm_and_si128(r3, sse2_top64bitmask.v));
+ r0 = _mm_shuffle_epi32(r0, _MM_SHUFFLE(0,0,0,0));
+ r4 = _mm_load_si128((xmmi*)r + 1);
+ r5 = _mm_shuffle_epi32(r4, _MM_SHUFFLE(1,1,1,1));
+ r5 = _mm_add_epi64(r5, _mm_and_si128(r5, sse2_top64bitmask.v));
+ r6 = _mm_shuffle_epi32(r4, _MM_SHUFFLE(2,2,2,2));
+ r7 = _mm_shuffle_epi32(r4, _MM_SHUFFLE(3,3,3,3));
+ r7 = _mm_add_epi64(r7, _mm_and_si128(r7, sse2_top64bitmask.v));
+ r4 = _mm_shuffle_epi32(r4, _MM_SHUFFLE(0,0,0,0));
+ r8 = _mm_load_si128((xmmi*)r + 2);
+ r9 = _mm_shuffle_epi32(r8, _MM_SHUFFLE(3,1,3,1));
+ r9 = _mm_add_epi64(r9, _mm_and_si128(r9, sse2_top64bitmask.v));
+ r8 = _mm_shuffle_epi32(r8, _MM_SHUFFLE(3,0,3,0));
+
+ m01 = _mm_mul_epu32(r1,s01);
+ m23 = _mm_mul_epu32(r1,s23);
+ m45 = _mm_mul_epu32(r1,s45);
+ m67 = _mm_mul_epu32(r1,s67);
+ m23 = _mm_add_epi64(m23,_mm_mul_epu32(r3,s01));
+ m45 = _mm_add_epi64(m45,_mm_mul_epu32(r3,s23));
+ m67 = _mm_add_epi64(m67,_mm_mul_epu32(r3,s45));
+ m89 = _mm_mul_epu32(r1,s89);
+ m45 = _mm_add_epi64(m45,_mm_mul_epu32(r5,s01));
+ m67 = _mm_add_epi64(m67,_mm_mul_epu32(r5,s23));
+ m89 = _mm_add_epi64(m89,_mm_mul_epu32(r3,s67));
+ m67 = _mm_add_epi64(m67,_mm_mul_epu32(r7,s01));
+ m89 = _mm_add_epi64(m89,_mm_mul_epu32(r5,s45));
+ m89 = _mm_add_epi64(m89,_mm_mul_epu32(r7,s23));
+ m89 = _mm_add_epi64(m89,_mm_mul_epu32(r9,s01));
+
+ /* shift up */
+ m89 = _mm_unpackhi_epi64(m67,_mm_slli_si128(m89,8));
+ m67 = _mm_unpackhi_epi64(m45,_mm_slli_si128(m67,8));
+ m45 = _mm_unpackhi_epi64(m23,_mm_slli_si128(m45,8));
+ m23 = _mm_unpackhi_epi64(m01,_mm_slli_si128(m23,8));
+ m01 = _mm_unpackhi_epi64(_mm_setzero_si128(),_mm_slli_si128(m01,8));
+
+ m01 = _mm_add_epi64(m01,_mm_mul_epu32(r0,s01));
+ m23 = _mm_add_epi64(m23,_mm_mul_epu32(r0,s23));
+ m45 = _mm_add_epi64(m45,_mm_mul_epu32(r0,s45));
+ m67 = _mm_add_epi64(m67,_mm_mul_epu32(r0,s67));
+ m23 = _mm_add_epi64(m23,_mm_mul_epu32(r2,s01));
+ m45 = _mm_add_epi64(m45,_mm_mul_epu32(r2,s23));
+ m67 = _mm_add_epi64(m67,_mm_mul_epu32(r4,s23));
+ m89 = _mm_add_epi64(m89,_mm_mul_epu32(r0,s89));
+ m45 = _mm_add_epi64(m45,_mm_mul_epu32(r4,s01));
+ m67 = _mm_add_epi64(m67,_mm_mul_epu32(r2,s45));
+ m89 = _mm_add_epi64(m89,_mm_mul_epu32(r2,s67));
+ m67 = _mm_add_epi64(m67,_mm_mul_epu32(r6,s01));
+ m89 = _mm_add_epi64(m89,_mm_mul_epu32(r4,s45));
+ m89 = _mm_add_epi64(m89,_mm_mul_epu32(r6,s23));
+ m89 = _mm_add_epi64(m89,_mm_mul_epu32(r8,s01));
+
+ r219 = _mm_mul_epu32(r2, packednineteen.v);
+ r419 = _mm_mul_epu32(r4, packednineteen.v);
+ r619 = _mm_mul_epu32(r6, packednineteen.v);
+ r819 = _mm_mul_epu32(r8, packednineteen.v);
+ r119 = _mm_shuffle_epi32(r1,_MM_SHUFFLE(0,0,2,2)); r119 = _mm_mul_epu32(r119, packednineteen.v);
+ r319 = _mm_shuffle_epi32(r3,_MM_SHUFFLE(0,0,2,2)); r319 = _mm_mul_epu32(r319, packednineteen.v);
+ r519 = _mm_shuffle_epi32(r5,_MM_SHUFFLE(0,0,2,2)); r519 = _mm_mul_epu32(r519, packednineteen.v);
+ r719 = _mm_shuffle_epi32(r7,_MM_SHUFFLE(0,0,2,2)); r719 = _mm_mul_epu32(r719, packednineteen.v);
+ r919 = _mm_shuffle_epi32(r9,_MM_SHUFFLE(0,0,2,2)); r919 = _mm_mul_epu32(r919, packednineteen.v);
+
+ m01 = _mm_add_epi64(m01,_mm_mul_epu32(r919,s12));
+ m23 = _mm_add_epi64(m23,_mm_mul_epu32(r919,s34));
+ m45 = _mm_add_epi64(m45,_mm_mul_epu32(r919,s56));
+ m67 = _mm_add_epi64(m67,_mm_mul_epu32(r919,s78));
+ m01 = _mm_add_epi64(m01,_mm_mul_epu32(r719,s34));
+ m23 = _mm_add_epi64(m23,_mm_mul_epu32(r719,s56));
+ m45 = _mm_add_epi64(m45,_mm_mul_epu32(r719,s78));
+ m67 = _mm_add_epi64(m67,_mm_mul_epu32(r719,s9));
+ m01 = _mm_add_epi64(m01,_mm_mul_epu32(r519,s56));
+ m23 = _mm_add_epi64(m23,_mm_mul_epu32(r519,s78));
+ m45 = _mm_add_epi64(m45,_mm_mul_epu32(r519,s9));
+ m67 = _mm_add_epi64(m67,_mm_mul_epu32(r819,s89));
+ m01 = _mm_add_epi64(m01,_mm_mul_epu32(r319,s78));
+ m23 = _mm_add_epi64(m23,_mm_mul_epu32(r319,s9));
+ m45 = _mm_add_epi64(m45,_mm_mul_epu32(r619,s89));
+ m89 = _mm_add_epi64(m89,_mm_mul_epu32(r919,s9));
+ m01 = _mm_add_epi64(m01,_mm_mul_epu32(r819,s23));
+ m23 = _mm_add_epi64(m23,_mm_mul_epu32(r819,s45));
+ m45 = _mm_add_epi64(m45,_mm_mul_epu32(r819,s67));
+ m01 = _mm_add_epi64(m01,_mm_mul_epu32(r619,s45));
+ m23 = _mm_add_epi64(m23,_mm_mul_epu32(r619,s67));
+ m01 = _mm_add_epi64(m01,_mm_mul_epu32(r419,s67));
+ m23 = _mm_add_epi64(m23,_mm_mul_epu32(r419,s89));
+ m01 = _mm_add_epi64(m01,_mm_mul_epu32(r219,s89));
+ m01 = _mm_add_epi64(m01,_mm_mul_epu32(r119,s9));
+
+ r0 = _mm_unpacklo_epi64(m01, m45);
+ r1 = _mm_unpackhi_epi64(m01, m45);
+ r2 = _mm_unpacklo_epi64(m23, m67);
+ r3 = _mm_unpackhi_epi64(m23, m67);
+ r4 = _mm_unpacklo_epi64(m89, m89);
+ r5 = _mm_unpackhi_epi64(m89, m89);
+
+ c1 = _mm_srli_epi64(r0, 26); c2 = _mm_srli_epi64(r2, 26); r0 = _mm_and_si128(r0, packedmask26.v); r2 = _mm_and_si128(r2, packedmask26.v); r1 = _mm_add_epi64(r1, c1); r3 = _mm_add_epi64(r3, c2);
+ c1 = _mm_srli_epi64(r1, 25); c2 = _mm_srli_epi64(r3, 25); r1 = _mm_and_si128(r1, packedmask25.v); r3 = _mm_and_si128(r3, packedmask25.v); r2 = _mm_add_epi64(r2, c1); r4 = _mm_add_epi64(r4, c2); c3 = _mm_slli_si128(c2, 8);
+ c1 = _mm_srli_epi64(r4, 26); r4 = _mm_and_si128(r4, packedmask26.v); r5 = _mm_add_epi64(r5, c1);
+ c1 = _mm_srli_epi64(r5, 25); r5 = _mm_and_si128(r5, packedmask25.v); r0 = _mm_add_epi64(r0, _mm_unpackhi_epi64(_mm_mul_epu32(c1, packednineteen.v), c3));
+ c1 = _mm_srli_epi64(r0, 26); c2 = _mm_srli_epi64(r2, 26); r0 = _mm_and_si128(r0, packedmask26.v); r2 = _mm_and_si128(r2, packedmask26.v); r1 = _mm_add_epi64(r1, c1); r3 = _mm_add_epi64(r3, c2);
+
+ m0123 = _mm_unpacklo_epi32(r0, r1);
+ m4567 = _mm_unpackhi_epi32(r0, r1);
+ m0123 = _mm_unpacklo_epi64(m0123, _mm_unpacklo_epi32(r2, r3));
+ m4567 = _mm_unpacklo_epi64(m4567, _mm_unpackhi_epi32(r2, r3));
+ m89 = _mm_unpackhi_epi32(r4, r5);
+
+ _mm_store_si128((xmmi*)out + 0, m0123);
+ _mm_store_si128((xmmi*)out + 1, m4567);
+ _mm_store_si128((xmmi*)out + 2, m89);
+}
+
+typedef struct bignum25519mulprecomp_t {
+ xmmi r0,r2,r4,r6,r8;
+ xmmi r1,r3,r5,r7,r9;
+ xmmi r119,r219,r319,r419,r519,r619,r719,r819,r919;
+} bignum25519mulprecomp;
+
+/* precompute a constant to multiply by */
+DONNA_INLINE static void
+curve25519_mul_precompute(bignum25519mulprecomp *pre, const bignum25519 r) {
+ pre->r0 = _mm_load_si128((xmmi*)r + 0);
+ pre->r1 = _mm_shuffle_epi32(pre->r0, _MM_SHUFFLE(1,1,1,1));
+ pre->r1 = _mm_add_epi64(pre->r1, _mm_and_si128(pre->r1, sse2_top64bitmask.v));
+ pre->r2 = _mm_shuffle_epi32(pre->r0, _MM_SHUFFLE(2,2,2,2));
+ pre->r3 = _mm_shuffle_epi32(pre->r0, _MM_SHUFFLE(3,3,3,3));
+ pre->r3 = _mm_add_epi64(pre->r3, _mm_and_si128(pre->r3, sse2_top64bitmask.v));
+ pre->r0 = _mm_shuffle_epi32(pre->r0, _MM_SHUFFLE(0,0,0,0));
+ pre->r4 = _mm_load_si128((xmmi*)r + 1);
+ pre->r5 = _mm_shuffle_epi32(pre->r4, _MM_SHUFFLE(1,1,1,1));
+ pre->r5 = _mm_add_epi64(pre->r5, _mm_and_si128(pre->r5, sse2_top64bitmask.v));
+ pre->r6 = _mm_shuffle_epi32(pre->r4, _MM_SHUFFLE(2,2,2,2));
+ pre->r7 = _mm_shuffle_epi32(pre->r4, _MM_SHUFFLE(3,3,3,3));
+ pre->r7 = _mm_add_epi64(pre->r7, _mm_and_si128(pre->r7, sse2_top64bitmask.v));
+ pre->r4 = _mm_shuffle_epi32(pre->r4, _MM_SHUFFLE(0,0,0,0));
+ pre->r8 = _mm_load_si128((xmmi*)r + 2);
+ pre->r9 = _mm_shuffle_epi32(pre->r8, _MM_SHUFFLE(3,1,3,1));
+ pre->r9 = _mm_add_epi64(pre->r9, _mm_and_si128(pre->r9, sse2_top64bitmask.v));
+ pre->r8 = _mm_shuffle_epi32(pre->r8, _MM_SHUFFLE(3,0,3,0));
+
+ pre->r219 = _mm_mul_epu32(pre->r2, packednineteen.v);
+ pre->r419 = _mm_mul_epu32(pre->r4, packednineteen.v);
+ pre->r619 = _mm_mul_epu32(pre->r6, packednineteen.v);
+ pre->r819 = _mm_mul_epu32(pre->r8, packednineteen.v);
+ pre->r119 = _mm_shuffle_epi32(pre->r1,_MM_SHUFFLE(0,0,2,2)); pre->r119 = _mm_mul_epu32(pre->r119, packednineteen.v);
+ pre->r319 = _mm_shuffle_epi32(pre->r3,_MM_SHUFFLE(0,0,2,2)); pre->r319 = _mm_mul_epu32(pre->r319, packednineteen.v);
+ pre->r519 = _mm_shuffle_epi32(pre->r5,_MM_SHUFFLE(0,0,2,2)); pre->r519 = _mm_mul_epu32(pre->r519, packednineteen.v);
+ pre->r719 = _mm_shuffle_epi32(pre->r7,_MM_SHUFFLE(0,0,2,2)); pre->r719 = _mm_mul_epu32(pre->r719, packednineteen.v);
+ pre->r919 = _mm_shuffle_epi32(pre->r9,_MM_SHUFFLE(0,0,2,2)); pre->r919 = _mm_mul_epu32(pre->r919, packednineteen.v);
+}
+
+
+/* multiply a bignum by a pre-computed constant */
+DONNA_INLINE static void
+curve25519_mul_precomputed(bignum25519 out, const bignum25519 s, const bignum25519mulprecomp *r) {
+ xmmi m01,m23,m45,m67,m89;
+ xmmi m0123,m4567;
+ xmmi s0123,s4567;
+ xmmi s01,s23,s45,s67,s89;
+ xmmi s12,s34,s56,s78,s9;
+ xmmi r0,r1,r2,r3,r4,r5;
+ xmmi c1,c2,c3;
+
+ s0123 = _mm_load_si128((xmmi*)s + 0);
+ s01 = _mm_shuffle_epi32(s0123,_MM_SHUFFLE(3,1,2,0));
+ s12 = _mm_shuffle_epi32(s0123, _MM_SHUFFLE(2,2,1,1));
+ s23 = _mm_shuffle_epi32(s0123,_MM_SHUFFLE(3,3,2,2));
+ s4567 = _mm_load_si128((xmmi*)s + 1);
+ s34 = _mm_unpacklo_epi64(_mm_srli_si128(s0123,12),s4567);
+ s45 = _mm_shuffle_epi32(s4567,_MM_SHUFFLE(3,1,2,0));
+ s56 = _mm_shuffle_epi32(s4567, _MM_SHUFFLE(2,2,1,1));
+ s67 = _mm_shuffle_epi32(s4567,_MM_SHUFFLE(3,3,2,2));
+ s89 = _mm_load_si128((xmmi*)s + 2);
+ s78 = _mm_unpacklo_epi64(_mm_srli_si128(s4567,12),s89);
+ s89 = _mm_shuffle_epi32(s89,_MM_SHUFFLE(3,1,2,0));
+ s9 = _mm_shuffle_epi32(s89, _MM_SHUFFLE(3,3,2,2));
+
+ m01 = _mm_mul_epu32(r->r1,s01);
+ m23 = _mm_mul_epu32(r->r1,s23);
+ m45 = _mm_mul_epu32(r->r1,s45);
+ m67 = _mm_mul_epu32(r->r1,s67);
+ m23 = _mm_add_epi64(m23,_mm_mul_epu32(r->r3,s01));
+ m45 = _mm_add_epi64(m45,_mm_mul_epu32(r->r3,s23));
+ m67 = _mm_add_epi64(m67,_mm_mul_epu32(r->r3,s45));
+ m89 = _mm_mul_epu32(r->r1,s89);
+ m45 = _mm_add_epi64(m45,_mm_mul_epu32(r->r5,s01));
+ m67 = _mm_add_epi64(m67,_mm_mul_epu32(r->r5,s23));
+ m89 = _mm_add_epi64(m89,_mm_mul_epu32(r->r3,s67));
+ m67 = _mm_add_epi64(m67,_mm_mul_epu32(r->r7,s01));
+ m89 = _mm_add_epi64(m89,_mm_mul_epu32(r->r5,s45));
+ m89 = _mm_add_epi64(m89,_mm_mul_epu32(r->r7,s23));
+ m89 = _mm_add_epi64(m89,_mm_mul_epu32(r->r9,s01));
+
+ /* shift up */
+ m89 = _mm_unpackhi_epi64(m67,_mm_slli_si128(m89,8));
+ m67 = _mm_unpackhi_epi64(m45,_mm_slli_si128(m67,8));
+ m45 = _mm_unpackhi_epi64(m23,_mm_slli_si128(m45,8));
+ m23 = _mm_unpackhi_epi64(m01,_mm_slli_si128(m23,8));
+ m01 = _mm_unpackhi_epi64(_mm_setzero_si128(),_mm_slli_si128(m01,8));
+
+ m01 = _mm_add_epi64(m01,_mm_mul_epu32(r->r0,s01));
+ m23 = _mm_add_epi64(m23,_mm_mul_epu32(r->r0,s23));
+ m45 = _mm_add_epi64(m45,_mm_mul_epu32(r->r0,s45));
+ m67 = _mm_add_epi64(m67,_mm_mul_epu32(r->r0,s67));
+ m23 = _mm_add_epi64(m23,_mm_mul_epu32(r->r2,s01));
+ m45 = _mm_add_epi64(m45,_mm_mul_epu32(r->r2,s23));
+ m67 = _mm_add_epi64(m67,_mm_mul_epu32(r->r4,s23));
+ m89 = _mm_add_epi64(m89,_mm_mul_epu32(r->r0,s89));
+ m45 = _mm_add_epi64(m45,_mm_mul_epu32(r->r4,s01));
+ m67 = _mm_add_epi64(m67,_mm_mul_epu32(r->r2,s45));
+ m89 = _mm_add_epi64(m89,_mm_mul_epu32(r->r2,s67));
+ m67 = _mm_add_epi64(m67,_mm_mul_epu32(r->r6,s01));
+ m89 = _mm_add_epi64(m89,_mm_mul_epu32(r->r4,s45));
+ m89 = _mm_add_epi64(m89,_mm_mul_epu32(r->r6,s23));
+ m89 = _mm_add_epi64(m89,_mm_mul_epu32(r->r8,s01));
+ m01 = _mm_add_epi64(m01,_mm_mul_epu32(r->r919,s12));
+ m23 = _mm_add_epi64(m23,_mm_mul_epu32(r->r919,s34));
+ m45 = _mm_add_epi64(m45,_mm_mul_epu32(r->r919,s56));
+ m67 = _mm_add_epi64(m67,_mm_mul_epu32(r->r919,s78));
+ m01 = _mm_add_epi64(m01,_mm_mul_epu32(r->r719,s34));
+ m23 = _mm_add_epi64(m23,_mm_mul_epu32(r->r719,s56));
+ m45 = _mm_add_epi64(m45,_mm_mul_epu32(r->r719,s78));
+ m67 = _mm_add_epi64(m67,_mm_mul_epu32(r->r719,s9));
+ m01 = _mm_add_epi64(m01,_mm_mul_epu32(r->r519,s56));
+ m23 = _mm_add_epi64(m23,_mm_mul_epu32(r->r519,s78));
+ m45 = _mm_add_epi64(m45,_mm_mul_epu32(r->r519,s9));
+ m67 = _mm_add_epi64(m67,_mm_mul_epu32(r->r819,s89));
+ m01 = _mm_add_epi64(m01,_mm_mul_epu32(r->r319,s78));
+ m23 = _mm_add_epi64(m23,_mm_mul_epu32(r->r319,s9));
+ m45 = _mm_add_epi64(m45,_mm_mul_epu32(r->r619,s89));
+ m89 = _mm_add_epi64(m89,_mm_mul_epu32(r->r919,s9));
+ m01 = _mm_add_epi64(m01,_mm_mul_epu32(r->r819,s23));
+ m23 = _mm_add_epi64(m23,_mm_mul_epu32(r->r819,s45));
+ m45 = _mm_add_epi64(m45,_mm_mul_epu32(r->r819,s67));
+ m01 = _mm_add_epi64(m01,_mm_mul_epu32(r->r619,s45));
+ m23 = _mm_add_epi64(m23,_mm_mul_epu32(r->r619,s67));
+ m01 = _mm_add_epi64(m01,_mm_mul_epu32(r->r419,s67));
+ m23 = _mm_add_epi64(m23,_mm_mul_epu32(r->r419,s89));
+ m01 = _mm_add_epi64(m01,_mm_mul_epu32(r->r219,s89));
+ m01 = _mm_add_epi64(m01,_mm_mul_epu32(r->r119,s9));
+
+ r0 = _mm_unpacklo_epi64(m01, m45);
+ r1 = _mm_unpackhi_epi64(m01, m45);
+ r2 = _mm_unpacklo_epi64(m23, m67);
+ r3 = _mm_unpackhi_epi64(m23, m67);
+ r4 = _mm_unpacklo_epi64(m89, m89);
+ r5 = _mm_unpackhi_epi64(m89, m89);
+
+ c1 = _mm_srli_epi64(r0, 26); c2 = _mm_srli_epi64(r2, 26); r0 = _mm_and_si128(r0, packedmask26.v); r2 = _mm_and_si128(r2, packedmask26.v); r1 = _mm_add_epi64(r1, c1); r3 = _mm_add_epi64(r3, c2);
+ c1 = _mm_srli_epi64(r1, 25); c2 = _mm_srli_epi64(r3, 25); r1 = _mm_and_si128(r1, packedmask25.v); r3 = _mm_and_si128(r3, packedmask25.v); r2 = _mm_add_epi64(r2, c1); r4 = _mm_add_epi64(r4, c2); c3 = _mm_slli_si128(c2, 8);
+ c1 = _mm_srli_epi64(r4, 26); r4 = _mm_and_si128(r4, packedmask26.v); r5 = _mm_add_epi64(r5, c1);
+ c1 = _mm_srli_epi64(r5, 25); r5 = _mm_and_si128(r5, packedmask25.v); r0 = _mm_add_epi64(r0, _mm_unpackhi_epi64(_mm_mul_epu32(c1, packednineteen.v), c3));
+ c1 = _mm_srli_epi64(r0, 26); c2 = _mm_srli_epi64(r2, 26); r0 = _mm_and_si128(r0, packedmask26.v); r2 = _mm_and_si128(r2, packedmask26.v); r1 = _mm_add_epi64(r1, c1); r3 = _mm_add_epi64(r3, c2);
+
+ m0123 = _mm_unpacklo_epi32(r0, r1);
+ m4567 = _mm_unpackhi_epi32(r0, r1);
+ m0123 = _mm_unpacklo_epi64(m0123, _mm_unpacklo_epi32(r2, r3));
+ m4567 = _mm_unpacklo_epi64(m4567, _mm_unpackhi_epi32(r2, r3));
+ m89 = _mm_unpackhi_epi32(r4, r5);
+
+ _mm_store_si128((xmmi*)out + 0, m0123);
+ _mm_store_si128((xmmi*)out + 1, m4567);
+ _mm_store_si128((xmmi*)out + 2, m89);
+}
+
+/* square a bignum 'count' times */
+#define curve25519_square(r,x) curve25519_square_times(r,x,1)
+
+static void
+curve25519_square_times(bignum25519 r, const bignum25519 in, int count) {
+ xmmi m01,m23,m45,m67,m89;
+ xmmi r0,r1,r2,r3,r4,r5,r6,r7,r8,r9;
+ xmmi r0a,r1a,r2a,r3a,r7a,r9a;
+ xmmi r0123,r4567;
+ xmmi r01,r23,r45,r67,r6x,r89,r8x;
+ xmmi r12,r34,r56,r78,r9x;
+ xmmi r5619;
+ xmmi c1,c2,c3;
+
+ r0123 = _mm_load_si128((xmmi*)in + 0);
+ r01 = _mm_shuffle_epi32(r0123,_MM_SHUFFLE(3,1,2,0));
+ r23 = _mm_shuffle_epi32(r0123,_MM_SHUFFLE(3,3,2,2));
+ r4567 = _mm_load_si128((xmmi*)in + 1);
+ r45 = _mm_shuffle_epi32(r4567,_MM_SHUFFLE(3,1,2,0));
+ r67 = _mm_shuffle_epi32(r4567,_MM_SHUFFLE(3,3,2,2));
+ r89 = _mm_load_si128((xmmi*)in + 2);
+ r89 = _mm_shuffle_epi32(r89,_MM_SHUFFLE(3,1,2,0));
+
+ do {
+ r12 = _mm_unpackhi_epi64(r01, _mm_slli_si128(r23, 8));
+ r0 = _mm_shuffle_epi32(r01, _MM_SHUFFLE(0,0,0,0));
+ r0 = _mm_add_epi64(r0, _mm_and_si128(r0, sse2_top64bitmask.v));
+ r0a = _mm_shuffle_epi32(r0,_MM_SHUFFLE(3,2,1,2));
+ r1 = _mm_shuffle_epi32(r01, _MM_SHUFFLE(2,2,2,2));
+ r2 = _mm_shuffle_epi32(r23, _MM_SHUFFLE(0,0,0,0));
+ r2 = _mm_add_epi64(r2, _mm_and_si128(r2, sse2_top64bitmask.v));
+ r2a = _mm_shuffle_epi32(r2,_MM_SHUFFLE(3,2,1,2));
+ r3 = _mm_shuffle_epi32(r23, _MM_SHUFFLE(2,2,2,2));
+ r34 = _mm_unpackhi_epi64(r23, _mm_slli_si128(r45, 8));
+ r4 = _mm_shuffle_epi32(r45, _MM_SHUFFLE(0,0,0,0));
+ r4 = _mm_add_epi64(r4, _mm_and_si128(r4, sse2_top64bitmask.v));
+ r56 = _mm_unpackhi_epi64(r45, _mm_slli_si128(r67, 8));
+ r5619 = _mm_mul_epu32(r56, packednineteen.v);
+ r5 = _mm_shuffle_epi32(r5619, _MM_SHUFFLE(1,1,1,0));
+ r6 = _mm_shuffle_epi32(r5619, _MM_SHUFFLE(3,2,3,2));
+ r78 = _mm_unpackhi_epi64(r67, _mm_slli_si128(r89, 8));
+ r6x = _mm_unpacklo_epi64(r67, _mm_setzero_si128());
+ r7 = _mm_shuffle_epi32(r67, _MM_SHUFFLE(2,2,2,2));
+ r7 = _mm_mul_epu32(r7, packed3819.v);
+ r7a = _mm_shuffle_epi32(r7, _MM_SHUFFLE(3,3,3,2));
+ r8x = _mm_unpacklo_epi64(r89, _mm_setzero_si128());
+ r8 = _mm_shuffle_epi32(r89, _MM_SHUFFLE(0,0,0,0));
+ r8 = _mm_mul_epu32(r8, packednineteen.v);
+ r9 = _mm_shuffle_epi32(r89, _MM_SHUFFLE(2,2,2,2));
+ r9x = _mm_slli_epi32(_mm_shuffle_epi32(r89, _MM_SHUFFLE(3,3,3,2)), 1);
+ r9 = _mm_mul_epu32(r9, packed3819.v);
+ r9a = _mm_shuffle_epi32(r9, _MM_SHUFFLE(2,2,2,2));
+
+ m01 = _mm_mul_epu32(r01, r0);
+ m23 = _mm_mul_epu32(r23, r0a);
+ m45 = _mm_mul_epu32(r45, r0a);
+ m45 = _mm_add_epi64(m45, _mm_mul_epu32(r23, r2));
+ r23 = _mm_slli_epi32(r23, 1);
+ m67 = _mm_mul_epu32(r67, r0a);
+ m67 = _mm_add_epi64(m67, _mm_mul_epu32(r45, r2a));
+ m89 = _mm_mul_epu32(r89, r0a);
+ m89 = _mm_add_epi64(m89, _mm_mul_epu32(r67, r2a));
+ r67 = _mm_slli_epi32(r67, 1);
+ m89 = _mm_add_epi64(m89, _mm_mul_epu32(r45, r4));
+ r45 = _mm_slli_epi32(r45, 1);
+
+ r1 = _mm_slli_epi32(r1, 1);
+ r3 = _mm_slli_epi32(r3, 1);
+ r1a = _mm_add_epi64(r1, _mm_and_si128(r1, sse2_bot64bitmask.v));
+ r3a = _mm_add_epi64(r3, _mm_and_si128(r3, sse2_bot64bitmask.v));
+
+ m23 = _mm_add_epi64(m23, _mm_mul_epu32(r12, r1));
+ m45 = _mm_add_epi64(m45, _mm_mul_epu32(r34, r1a));
+ m67 = _mm_add_epi64(m67, _mm_mul_epu32(r56, r1a));
+ m67 = _mm_add_epi64(m67, _mm_mul_epu32(r34, r3));
+ r34 = _mm_slli_epi32(r34, 1);
+ m89 = _mm_add_epi64(m89, _mm_mul_epu32(r78, r1a));
+ r78 = _mm_slli_epi32(r78, 1);
+ m89 = _mm_add_epi64(m89, _mm_mul_epu32(r56, r3a));
+ r56 = _mm_slli_epi32(r56, 1);
+
+ m01 = _mm_add_epi64(m01, _mm_mul_epu32(_mm_slli_epi32(r12, 1), r9));
+ m01 = _mm_add_epi64(m01, _mm_mul_epu32(r34, r7));
+ m23 = _mm_add_epi64(m23, _mm_mul_epu32(r34, r9));
+ m01 = _mm_add_epi64(m01, _mm_mul_epu32(r56, r5));
+ m23 = _mm_add_epi64(m23, _mm_mul_epu32(r56, r7));
+ m45 = _mm_add_epi64(m45, _mm_mul_epu32(r56, r9));
+ m01 = _mm_add_epi64(m01, _mm_mul_epu32(r23, r8));
+ m01 = _mm_add_epi64(m01, _mm_mul_epu32(r45, r6));
+ m23 = _mm_add_epi64(m23, _mm_mul_epu32(r45, r8));
+ m23 = _mm_add_epi64(m23, _mm_mul_epu32(r6x, r6));
+ m45 = _mm_add_epi64(m45, _mm_mul_epu32(r78, r7a));
+ m67 = _mm_add_epi64(m67, _mm_mul_epu32(r78, r9));
+ m45 = _mm_add_epi64(m45, _mm_mul_epu32(r67, r8));
+ m67 = _mm_add_epi64(m67, _mm_mul_epu32(r8x, r8));
+ m89 = _mm_add_epi64(m89, _mm_mul_epu32(r9x, r9a));
+
+ r0 = _mm_unpacklo_epi64(m01, m45);
+ r1 = _mm_unpackhi_epi64(m01, m45);
+ r2 = _mm_unpacklo_epi64(m23, m67);
+ r3 = _mm_unpackhi_epi64(m23, m67);
+ r4 = _mm_unpacklo_epi64(m89, m89);
+ r5 = _mm_unpackhi_epi64(m89, m89);
+
+ c1 = _mm_srli_epi64(r0, 26); c2 = _mm_srli_epi64(r2, 26); r0 = _mm_and_si128(r0, packedmask26.v); r2 = _mm_and_si128(r2, packedmask26.v); r1 = _mm_add_epi64(r1, c1); r3 = _mm_add_epi64(r3, c2);
+ c1 = _mm_srli_epi64(r1, 25); c2 = _mm_srli_epi64(r3, 25); r1 = _mm_and_si128(r1, packedmask25.v); r3 = _mm_and_si128(r3, packedmask25.v); r2 = _mm_add_epi64(r2, c1); r4 = _mm_add_epi64(r4, c2); c3 = _mm_slli_si128(c2, 8);
+ c1 = _mm_srli_epi64(r4, 26); r4 = _mm_and_si128(r4, packedmask26.v); r5 = _mm_add_epi64(r5, c1);
+ c1 = _mm_srli_epi64(r5, 25); r5 = _mm_and_si128(r5, packedmask25.v); r0 = _mm_add_epi64(r0, _mm_unpackhi_epi64(_mm_mul_epu32(c1, packednineteen.v), c3));
+ c1 = _mm_srli_epi64(r0, 26); c2 = _mm_srli_epi64(r2, 26); r0 = _mm_and_si128(r0, packedmask26.v); r2 = _mm_and_si128(r2, packedmask26.v); r1 = _mm_add_epi64(r1, c1); r3 = _mm_add_epi64(r3, c2);
+
+ r01 = _mm_unpacklo_epi64(r0, r1);
+ r45 = _mm_unpackhi_epi64(r0, r1);
+ r23 = _mm_unpacklo_epi64(r2, r3);
+ r67 = _mm_unpackhi_epi64(r2, r3);
+ r89 = _mm_unpackhi_epi64(r4, r5);
+ } while (--count);
+
+ r0123 = _mm_shuffle_epi32(r23, _MM_SHUFFLE(2,0,3,3));
+ r4567 = _mm_shuffle_epi32(r67, _MM_SHUFFLE(2,0,3,3));
+ r0123 = _mm_or_si128(r0123, _mm_shuffle_epi32(r01, _MM_SHUFFLE(3,3,2,0)));
+ r4567 = _mm_or_si128(r4567, _mm_shuffle_epi32(r45, _MM_SHUFFLE(3,3,2,0)));
+ r89 = _mm_shuffle_epi32(r89, _MM_SHUFFLE(3,3,2,0));
+
+ _mm_store_si128((xmmi*)r + 0, r0123);
+ _mm_store_si128((xmmi*)r + 1, r4567);
+ _mm_store_si128((xmmi*)r + 2, r89);
+}
+
+/* square two packed bignums */
+DONNA_INLINE static void
+curve25519_square_packed64(packedelem64 *out, const packedelem64 *r) {
+ xmmi r0,r1,r2,r3;
+ xmmi r1_2,r3_2,r4_2,r5_2,r6_2,r7_2;
+ xmmi d5,d6,d7,d8,d9;
+ xmmi c1,c2;
+
+ r0 = r[0].v;
+ r1 = r[1].v;
+ r2 = r[2].v;
+ r3 = r[3].v;
+
+ out[0].v = _mm_mul_epu32(r0, r0);
+ r0 = _mm_slli_epi32(r0, 1);
+ out[1].v = _mm_mul_epu32(r0, r1);
+ r1_2 = _mm_slli_epi32(r1, 1);
+ out[2].v = _mm_add_epi64(_mm_mul_epu32(r0, r2 ), _mm_mul_epu32(r1, r1_2));
+ r1 = r1_2;
+ out[3].v = _mm_add_epi64(_mm_mul_epu32(r0, r3 ), _mm_mul_epu32(r1, r2 ));
+ r3_2 = _mm_slli_epi32(r3, 1);
+ out[4].v = _mm_add_epi64(_mm_mul_epu32(r0, r[4].v), _mm_add_epi64(_mm_mul_epu32(r1, r3_2 ), _mm_mul_epu32(r2, r2)));
+ r2 = _mm_slli_epi32(r2, 1);
+ out[5].v = _mm_add_epi64(_mm_mul_epu32(r0, r[5].v), _mm_add_epi64(_mm_mul_epu32(r1, r[4].v), _mm_mul_epu32(r2, r3)));
+ r5_2 = _mm_slli_epi32(r[5].v, 1);
+ out[6].v = _mm_add_epi64(_mm_mul_epu32(r0, r[6].v), _mm_add_epi64(_mm_mul_epu32(r1, r5_2 ), _mm_add_epi64(_mm_mul_epu32(r2, r[4].v), _mm_mul_epu32(r3, r3_2 ))));
+ r3 = r3_2;
+ out[7].v = _mm_add_epi64(_mm_mul_epu32(r0, r[7].v), _mm_add_epi64(_mm_mul_epu32(r1, r[6].v), _mm_add_epi64(_mm_mul_epu32(r2, r[5].v), _mm_mul_epu32(r3, r[4].v))));
+ r7_2 = _mm_slli_epi32(r[7].v, 1);
+ out[8].v = _mm_add_epi64(_mm_mul_epu32(r0, r[8].v), _mm_add_epi64(_mm_mul_epu32(r1, r7_2 ), _mm_add_epi64(_mm_mul_epu32(r2, r[6].v), _mm_add_epi64(_mm_mul_epu32(r3, r5_2 ), _mm_mul_epu32(r[4].v, r[4].v)))));
+ out[9].v = _mm_add_epi64(_mm_mul_epu32(r0, r[9].v), _mm_add_epi64(_mm_mul_epu32(r1, r[8].v), _mm_add_epi64(_mm_mul_epu32(r2, r[7].v), _mm_add_epi64(_mm_mul_epu32(r3, r[6].v), _mm_mul_epu32(r[4].v, r5_2 )))));
+
+ d5 = _mm_mul_epu32(r[5].v, packedthirtyeight.v);
+ d6 = _mm_mul_epu32(r[6].v, packednineteen.v);
+ d7 = _mm_mul_epu32(r[7].v, packedthirtyeight.v);
+ d8 = _mm_mul_epu32(r[8].v, packednineteen.v);
+ d9 = _mm_mul_epu32(r[9].v, packedthirtyeight.v);
+
+ r4_2 = _mm_slli_epi32(r[4].v, 1);
+ r6_2 = _mm_slli_epi32(r[6].v, 1);
+ out[0].v = _mm_add_epi64(out[0].v, _mm_add_epi64(_mm_mul_epu32(d9, r1 ), _mm_add_epi64(_mm_mul_epu32(d8, r2 ), _mm_add_epi64(_mm_mul_epu32(d7, r3 ), _mm_add_epi64(_mm_mul_epu32(d6, r4_2), _mm_mul_epu32(d5, r[5].v))))));
+ out[1].v = _mm_add_epi64(out[1].v, _mm_add_epi64(_mm_mul_epu32(d9, _mm_srli_epi32(r2, 1)), _mm_add_epi64(_mm_mul_epu32(d8, r3 ), _mm_add_epi64(_mm_mul_epu32(d7, r[4].v), _mm_mul_epu32(d6, r5_2 )))));
+ out[2].v = _mm_add_epi64(out[2].v, _mm_add_epi64(_mm_mul_epu32(d9, r3 ), _mm_add_epi64(_mm_mul_epu32(d8, r4_2), _mm_add_epi64(_mm_mul_epu32(d7, r5_2 ), _mm_mul_epu32(d6, r[6].v)))));
+ out[3].v = _mm_add_epi64(out[3].v, _mm_add_epi64(_mm_mul_epu32(d9, r[4].v ), _mm_add_epi64(_mm_mul_epu32(d8, r5_2), _mm_mul_epu32(d7, r[6].v))));
+ out[4].v = _mm_add_epi64(out[4].v, _mm_add_epi64(_mm_mul_epu32(d9, r5_2 ), _mm_add_epi64(_mm_mul_epu32(d8, r6_2), _mm_mul_epu32(d7, r[7].v))));
+ out[5].v = _mm_add_epi64(out[5].v, _mm_add_epi64(_mm_mul_epu32(d9, r[6].v ), _mm_mul_epu32(d8, r7_2 )));
+ out[6].v = _mm_add_epi64(out[6].v, _mm_add_epi64(_mm_mul_epu32(d9, r7_2 ), _mm_mul_epu32(d8, r[8].v)));
+ out[7].v = _mm_add_epi64(out[7].v, _mm_mul_epu32(d9, r[8].v));
+ out[8].v = _mm_add_epi64(out[8].v, _mm_mul_epu32(d9, r[9].v));
+
+ c1 = _mm_srli_epi64(out[0].v, 26); c2 = _mm_srli_epi64(out[4].v, 26); out[0].v = _mm_and_si128(out[0].v, packedmask26.v); out[4].v = _mm_and_si128(out[4].v, packedmask26.v); out[1].v = _mm_add_epi64(out[1].v, c1); out[5].v = _mm_add_epi64(out[5].v, c2);
+ c1 = _mm_srli_epi64(out[1].v, 25); c2 = _mm_srli_epi64(out[5].v, 25); out[1].v = _mm_and_si128(out[1].v, packedmask25.v); out[5].v = _mm_and_si128(out[5].v, packedmask25.v); out[2].v = _mm_add_epi64(out[2].v, c1); out[6].v = _mm_add_epi64(out[6].v, c2);
+ c1 = _mm_srli_epi64(out[2].v, 26); c2 = _mm_srli_epi64(out[6].v, 26); out[2].v = _mm_and_si128(out[2].v, packedmask26.v); out[6].v = _mm_and_si128(out[6].v, packedmask26.v); out[3].v = _mm_add_epi64(out[3].v, c1); out[7].v = _mm_add_epi64(out[7].v, c2);
+ c1 = _mm_srli_epi64(out[3].v, 25); c2 = _mm_srli_epi64(out[7].v, 25); out[3].v = _mm_and_si128(out[3].v, packedmask25.v); out[7].v = _mm_and_si128(out[7].v, packedmask25.v); out[4].v = _mm_add_epi64(out[4].v, c1); out[8].v = _mm_add_epi64(out[8].v, c2);
+ c2 = _mm_srli_epi64(out[8].v, 26); out[8].v = _mm_and_si128(out[8].v, packedmask26.v); out[9].v = _mm_add_epi64(out[9].v, c2);
+ c2 = _mm_srli_epi64(out[9].v, 25); out[9].v = _mm_and_si128(out[9].v, packedmask25.v); out[0].v = _mm_add_epi64(out[0].v, _mm_mul_epu32(c2, packednineteen.v));
+ c1 = _mm_srli_epi64(out[0].v, 26); c2 = _mm_srli_epi64(out[4].v, 26); out[0].v = _mm_and_si128(out[0].v, packedmask26.v); out[4].v = _mm_and_si128(out[4].v, packedmask26.v); out[1].v = _mm_add_epi64(out[1].v, c1); out[5].v = _mm_add_epi64(out[5].v, c2);
+}
+
+/* make [nqx+nqz,nqpqx+nqpqz], [nqpqx-nqpqz,nqx-nqz] from [nqx+nqz,nqpqx+nqpqz], [nqx-nqz,nqpqx-nqpqz] */
+DONNA_INLINE static void
+curve25519_make_nqpq(packedelem64 *primex, packedelem64 *primez, const packedelem32 *pqx, const packedelem32 *pqz) {
+ primex[0].v = _mm_shuffle_epi32(pqx[0].v, _MM_SHUFFLE(1,1,0,0));
+ primex[1].v = _mm_shuffle_epi32(pqx[0].v, _MM_SHUFFLE(3,3,2,2));
+ primex[2].v = _mm_shuffle_epi32(pqx[1].v, _MM_SHUFFLE(1,1,0,0));
+ primex[3].v = _mm_shuffle_epi32(pqx[1].v, _MM_SHUFFLE(3,3,2,2));
+ primex[4].v = _mm_shuffle_epi32(pqx[2].v, _MM_SHUFFLE(1,1,0,0));
+ primex[5].v = _mm_shuffle_epi32(pqx[2].v, _MM_SHUFFLE(3,3,2,2));
+ primex[6].v = _mm_shuffle_epi32(pqx[3].v, _MM_SHUFFLE(1,1,0,0));
+ primex[7].v = _mm_shuffle_epi32(pqx[3].v, _MM_SHUFFLE(3,3,2,2));
+ primex[8].v = _mm_shuffle_epi32(pqx[4].v, _MM_SHUFFLE(1,1,0,0));
+ primex[9].v = _mm_shuffle_epi32(pqx[4].v, _MM_SHUFFLE(3,3,2,2));
+ primez[0].v = _mm_shuffle_epi32(pqz[0].v, _MM_SHUFFLE(0,0,1,1));
+ primez[1].v = _mm_shuffle_epi32(pqz[0].v, _MM_SHUFFLE(2,2,3,3));
+ primez[2].v = _mm_shuffle_epi32(pqz[1].v, _MM_SHUFFLE(0,0,1,1));
+ primez[3].v = _mm_shuffle_epi32(pqz[1].v, _MM_SHUFFLE(2,2,3,3));
+ primez[4].v = _mm_shuffle_epi32(pqz[2].v, _MM_SHUFFLE(0,0,1,1));
+ primez[5].v = _mm_shuffle_epi32(pqz[2].v, _MM_SHUFFLE(2,2,3,3));
+ primez[6].v = _mm_shuffle_epi32(pqz[3].v, _MM_SHUFFLE(0,0,1,1));
+ primez[7].v = _mm_shuffle_epi32(pqz[3].v, _MM_SHUFFLE(2,2,3,3));
+ primez[8].v = _mm_shuffle_epi32(pqz[4].v, _MM_SHUFFLE(0,0,1,1));
+ primez[9].v = _mm_shuffle_epi32(pqz[4].v, _MM_SHUFFLE(2,2,3,3));
+}
+
+/* make [nqx+nqz,nqx-nqz] from [nqx+nqz,nqpqx+nqpqz], [nqx-nqz,nqpqx-nqpqz] */
+DONNA_INLINE static void
+curve25519_make_nq(packedelem64 *nq, const packedelem32 *pqx, const packedelem32 *pqz) {
+ nq[0].v = _mm_unpacklo_epi64(pqx[0].v, pqz[0].v);
+ nq[1].v = _mm_unpackhi_epi64(pqx[0].v, pqz[0].v);
+ nq[2].v = _mm_unpacklo_epi64(pqx[1].v, pqz[1].v);
+ nq[3].v = _mm_unpackhi_epi64(pqx[1].v, pqz[1].v);
+ nq[4].v = _mm_unpacklo_epi64(pqx[2].v, pqz[2].v);
+ nq[5].v = _mm_unpackhi_epi64(pqx[2].v, pqz[2].v);
+ nq[6].v = _mm_unpacklo_epi64(pqx[3].v, pqz[3].v);
+ nq[7].v = _mm_unpackhi_epi64(pqx[3].v, pqz[3].v);
+ nq[8].v = _mm_unpacklo_epi64(pqx[4].v, pqz[4].v);
+ nq[9].v = _mm_unpackhi_epi64(pqx[4].v, pqz[4].v);
+}
+
+/* compute [nqx+nqz,nqx-nqz] from nqx, nqz */
+DONNA_INLINE static void
+curve25519_compute_nq(packedelem64 *nq, const bignum25519 nqx, const bignum25519 nqz) {
+ xmmi x0,x1,x2;
+ xmmi z0,z1,z2;
+ xmmi a0,a1,a2;
+ xmmi s0,s1,s2;
+ xmmi r0,r1;
+ xmmi c1,c2;
+ x0 = _mm_load_si128((xmmi*)nqx + 0);
+ x1 = _mm_load_si128((xmmi*)nqx + 1);
+ x2 = _mm_load_si128((xmmi*)nqx + 2);
+ z0 = _mm_load_si128((xmmi*)nqz + 0);
+ z1 = _mm_load_si128((xmmi*)nqz + 1);
+ z2 = _mm_load_si128((xmmi*)nqz + 2);
+ a0 = _mm_add_epi32(x0, z0);
+ a1 = _mm_add_epi32(x1, z1);
+ a2 = _mm_add_epi32(x2, z2);
+ s0 = _mm_add_epi32(x0, packed2p0.v);
+ s1 = _mm_add_epi32(x1, packed2p1.v);
+ s2 = _mm_add_epi32(x2, packed2p2.v);
+ s0 = _mm_sub_epi32(s0, z0);
+ s1 = _mm_sub_epi32(s1, z1);
+ s2 = _mm_sub_epi32(s2, z2);
+ r0 = _mm_and_si128(_mm_shuffle_epi32(s0, _MM_SHUFFLE(2,2,0,0)), sse2_bot32bitmask.v);
+ r1 = _mm_and_si128(_mm_shuffle_epi32(s0, _MM_SHUFFLE(3,3,1,1)), sse2_bot32bitmask.v);
+ c1 = _mm_srli_epi32(r0, 26);
+ c2 = _mm_srli_epi32(r1, 25);
+ r0 = _mm_and_si128(r0, packedmask26.v);
+ r1 = _mm_and_si128(r1, packedmask25.v);
+ r0 = _mm_add_epi32(r0, _mm_slli_si128(c2, 8));
+ r1 = _mm_add_epi32(r1, c1);
+ s0 = _mm_unpacklo_epi64(_mm_unpacklo_epi32(r0, r1), _mm_unpackhi_epi32(r0, r1));
+ s1 = _mm_add_epi32(s1, _mm_srli_si128(c2, 8));
+ nq[0].v = _mm_unpacklo_epi64(a0, s0);
+ nq[2].v = _mm_unpackhi_epi64(a0, s0);
+ nq[4].v = _mm_unpacklo_epi64(a1, s1);
+ nq[6].v = _mm_unpackhi_epi64(a1, s1);
+ nq[8].v = _mm_unpacklo_epi64(a2, s2);
+ nq[1].v = _mm_shuffle_epi32(nq[0].v, _MM_SHUFFLE(3,3,1,1));
+ nq[3].v = _mm_shuffle_epi32(nq[2].v, _MM_SHUFFLE(3,3,1,1));
+ nq[5].v = _mm_shuffle_epi32(nq[4].v, _MM_SHUFFLE(3,3,1,1));
+ nq[7].v = _mm_shuffle_epi32(nq[6].v, _MM_SHUFFLE(3,3,1,1));
+ nq[9].v = _mm_shuffle_epi32(nq[8].v, _MM_SHUFFLE(3,3,1,1));
+}
+
+
+/* compute [x+z,x-z] from [x,z] */
+DONNA_INLINE static void
+curve25519_addsub_packed64(packedelem64 *r) {
+ packed32bignum25519 x,z,add,sub;
+
+ x[0].v = _mm_unpacklo_epi64(r[0].v, r[1].v);
+ z[0].v = _mm_unpackhi_epi64(r[0].v, r[1].v);
+ x[1].v = _mm_unpacklo_epi64(r[2].v, r[3].v);
+ z[1].v = _mm_unpackhi_epi64(r[2].v, r[3].v);
+ x[2].v = _mm_unpacklo_epi64(r[4].v, r[5].v);
+ z[2].v = _mm_unpackhi_epi64(r[4].v, r[5].v);
+ x[3].v = _mm_unpacklo_epi64(r[6].v, r[7].v);
+ z[3].v = _mm_unpackhi_epi64(r[6].v, r[7].v);
+ x[4].v = _mm_unpacklo_epi64(r[8].v, r[9].v);
+ z[4].v = _mm_unpackhi_epi64(r[8].v, r[9].v);
+
+ curve25519_add_packed32(add, x, z);
+ curve25519_sub_packed32(sub, x, z);
+
+ r[0].v = _mm_unpacklo_epi64(add[0].v, sub[0].v);
+ r[1].v = _mm_unpackhi_epi64(add[0].v, sub[0].v);
+ r[2].v = _mm_unpacklo_epi64(add[1].v, sub[1].v);
+ r[3].v = _mm_unpackhi_epi64(add[1].v, sub[1].v);
+ r[4].v = _mm_unpacklo_epi64(add[2].v, sub[2].v);
+ r[5].v = _mm_unpackhi_epi64(add[2].v, sub[2].v);
+ r[6].v = _mm_unpacklo_epi64(add[3].v, sub[3].v);
+ r[7].v = _mm_unpackhi_epi64(add[3].v, sub[3].v);
+ r[8].v = _mm_unpacklo_epi64(add[4].v, sub[4].v);
+ r[9].v = _mm_unpackhi_epi64(add[4].v, sub[4].v);
+}
+
+/* compute [x,z] * [121666,121665] */
+DONNA_INLINE static void
+curve25519_121665_packed64(packedelem64 *out, const packedelem64 *in) {
+ xmmi c1,c2;
+
+ out[0].v = _mm_mul_epu32(in[0].v, packed121666121665.v);
+ out[1].v = _mm_mul_epu32(in[1].v, packed121666121665.v);
+ out[2].v = _mm_mul_epu32(in[2].v, packed121666121665.v);
+ out[3].v = _mm_mul_epu32(in[3].v, packed121666121665.v);
+ out[4].v = _mm_mul_epu32(in[4].v, packed121666121665.v);
+ out[5].v = _mm_mul_epu32(in[5].v, packed121666121665.v);
+ out[6].v = _mm_mul_epu32(in[6].v, packed121666121665.v);
+ out[7].v = _mm_mul_epu32(in[7].v, packed121666121665.v);
+ out[8].v = _mm_mul_epu32(in[8].v, packed121666121665.v);
+ out[9].v = _mm_mul_epu32(in[9].v, packed121666121665.v);
+
+ c1 = _mm_srli_epi64(out[0].v, 26); c2 = _mm_srli_epi64(out[4].v, 26); out[0].v = _mm_and_si128(out[0].v, packedmask26.v); out[4].v = _mm_and_si128(out[4].v, packedmask26.v); out[1].v = _mm_add_epi64(out[1].v, c1); out[5].v = _mm_add_epi64(out[5].v, c2);
+ c1 = _mm_srli_epi64(out[1].v, 25); c2 = _mm_srli_epi64(out[5].v, 25); out[1].v = _mm_and_si128(out[1].v, packedmask25.v); out[5].v = _mm_and_si128(out[5].v, packedmask25.v); out[2].v = _mm_add_epi64(out[2].v, c1); out[6].v = _mm_add_epi64(out[6].v, c2);
+ c1 = _mm_srli_epi64(out[2].v, 26); c2 = _mm_srli_epi64(out[6].v, 26); out[2].v = _mm_and_si128(out[2].v, packedmask26.v); out[6].v = _mm_and_si128(out[6].v, packedmask26.v); out[3].v = _mm_add_epi64(out[3].v, c1); out[7].v = _mm_add_epi64(out[7].v, c2);
+ c1 = _mm_srli_epi64(out[3].v, 25); c2 = _mm_srli_epi64(out[7].v, 25); out[3].v = _mm_and_si128(out[3].v, packedmask25.v); out[7].v = _mm_and_si128(out[7].v, packedmask25.v); out[4].v = _mm_add_epi64(out[4].v, c1); out[8].v = _mm_add_epi64(out[8].v, c2);
+ c2 = _mm_srli_epi64(out[8].v, 26); out[8].v = _mm_and_si128(out[8].v, packedmask26.v); out[9].v = _mm_add_epi64(out[9].v, c2);
+ c2 = _mm_srli_epi64(out[9].v, 25); out[9].v = _mm_and_si128(out[9].v, packedmask25.v); out[0].v = _mm_add_epi64(out[0].v, _mm_mul_epu32(c2, packednineteen.v));
+ c1 = _mm_srli_epi64(out[0].v, 26); c2 = _mm_srli_epi64(out[4].v, 26); out[0].v = _mm_and_si128(out[0].v, packedmask26.v); out[4].v = _mm_and_si128(out[4].v, packedmask26.v); out[1].v = _mm_add_epi64(out[1].v, c1); out[5].v = _mm_add_epi64(out[5].v, c2);
+}
+
+/* compute [sq.x,sqscalar.x-sqscalar.z] * [sq.z,sq.x-sq.z] */
+DONNA_INLINE static void
+curve25519_final_nq(packedelem64 *nq, const packedelem64 *sq, const packedelem64 *sq121665) {
+ packed32bignum25519 x, z, sub;
+ packed64bignum25519 t, nqa, nqb;
+
+ x[0].v = _mm_or_si128(_mm_unpacklo_epi64(sq[0].v, sq[1].v), _mm_slli_si128(_mm_unpacklo_epi64(sq121665[0].v, sq121665[1].v), 4));
+ z[0].v = _mm_or_si128(_mm_unpackhi_epi64(sq[0].v, sq[1].v), _mm_slli_si128(_mm_unpackhi_epi64(sq121665[0].v, sq121665[1].v), 4));
+ x[1].v = _mm_or_si128(_mm_unpacklo_epi64(sq[2].v, sq[3].v), _mm_slli_si128(_mm_unpacklo_epi64(sq121665[2].v, sq121665[3].v), 4));
+ z[1].v = _mm_or_si128(_mm_unpackhi_epi64(sq[2].v, sq[3].v), _mm_slli_si128(_mm_unpackhi_epi64(sq121665[2].v, sq121665[3].v), 4));
+ x[2].v = _mm_or_si128(_mm_unpacklo_epi64(sq[4].v, sq[5].v), _mm_slli_si128(_mm_unpacklo_epi64(sq121665[4].v, sq121665[5].v), 4));
+ z[2].v = _mm_or_si128(_mm_unpackhi_epi64(sq[4].v, sq[5].v), _mm_slli_si128(_mm_unpackhi_epi64(sq121665[4].v, sq121665[5].v), 4));
+ x[3].v = _mm_or_si128(_mm_unpacklo_epi64(sq[6].v, sq[7].v), _mm_slli_si128(_mm_unpacklo_epi64(sq121665[6].v, sq121665[7].v), 4));
+ z[3].v = _mm_or_si128(_mm_unpackhi_epi64(sq[6].v, sq[7].v), _mm_slli_si128(_mm_unpackhi_epi64(sq121665[6].v, sq121665[7].v), 4));
+ x[4].v = _mm_or_si128(_mm_unpacklo_epi64(sq[8].v, sq[9].v), _mm_slli_si128(_mm_unpacklo_epi64(sq121665[8].v, sq121665[9].v), 4));
+ z[4].v = _mm_or_si128(_mm_unpackhi_epi64(sq[8].v, sq[9].v), _mm_slli_si128(_mm_unpackhi_epi64(sq121665[8].v, sq121665[9].v), 4));
+
+ curve25519_sub_packed32(sub, x, z);
+
+ t[0].v = _mm_shuffle_epi32(sub[0].v, _MM_SHUFFLE(1,1,0,0));
+ t[1].v = _mm_shuffle_epi32(sub[0].v, _MM_SHUFFLE(3,3,2,2));
+ t[2].v = _mm_shuffle_epi32(sub[1].v, _MM_SHUFFLE(1,1,0,0));
+ t[3].v = _mm_shuffle_epi32(sub[1].v, _MM_SHUFFLE(3,3,2,2));
+ t[4].v = _mm_shuffle_epi32(sub[2].v, _MM_SHUFFLE(1,1,0,0));
+ t[5].v = _mm_shuffle_epi32(sub[2].v, _MM_SHUFFLE(3,3,2,2));
+ t[6].v = _mm_shuffle_epi32(sub[3].v, _MM_SHUFFLE(1,1,0,0));
+ t[7].v = _mm_shuffle_epi32(sub[3].v, _MM_SHUFFLE(3,3,2,2));
+ t[8].v = _mm_shuffle_epi32(sub[4].v, _MM_SHUFFLE(1,1,0,0));
+ t[9].v = _mm_shuffle_epi32(sub[4].v, _MM_SHUFFLE(3,3,2,2));
+
+ nqa[0].v = _mm_unpacklo_epi64(sq[0].v, t[0].v);
+ nqb[0].v = _mm_unpackhi_epi64(sq[0].v, t[0].v);
+ nqa[1].v = _mm_unpacklo_epi64(sq[1].v, t[1].v);
+ nqb[1].v = _mm_unpackhi_epi64(sq[1].v, t[1].v);
+ nqa[2].v = _mm_unpacklo_epi64(sq[2].v, t[2].v);
+ nqb[2].v = _mm_unpackhi_epi64(sq[2].v, t[2].v);
+ nqa[3].v = _mm_unpacklo_epi64(sq[3].v, t[3].v);
+ nqb[3].v = _mm_unpackhi_epi64(sq[3].v, t[3].v);
+ nqa[4].v = _mm_unpacklo_epi64(sq[4].v, t[4].v);
+ nqb[4].v = _mm_unpackhi_epi64(sq[4].v, t[4].v);
+ nqa[5].v = _mm_unpacklo_epi64(sq[5].v, t[5].v);
+ nqb[5].v = _mm_unpackhi_epi64(sq[5].v, t[5].v);
+ nqa[6].v = _mm_unpacklo_epi64(sq[6].v, t[6].v);
+ nqb[6].v = _mm_unpackhi_epi64(sq[6].v, t[6].v);
+ nqa[7].v = _mm_unpacklo_epi64(sq[7].v, t[7].v);
+ nqb[7].v = _mm_unpackhi_epi64(sq[7].v, t[7].v);
+ nqa[8].v = _mm_unpacklo_epi64(sq[8].v, t[8].v);
+ nqb[8].v = _mm_unpackhi_epi64(sq[8].v, t[8].v);
+ nqa[9].v = _mm_unpacklo_epi64(sq[9].v, t[9].v);
+ nqb[9].v = _mm_unpackhi_epi64(sq[9].v, t[9].v);
+
+ curve25519_mul_packed64(nq, nqa, nqb);
+}
+
diff --git a/curve25519-donna/curve25519-donna.c b/curve25519-donna/curve25519-donna.c
deleted file mode 100644
index 936aa890b5..0000000000
--- a/curve25519-donna/curve25519-donna.c
+++ /dev/null
@@ -1,863 +0,0 @@
-/* Copyright 2008, Google Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * curve25519-donna: Curve25519 elliptic curve, public key function
- *
- * http://code.google.com/p/curve25519-donna/
- *
- * Adam Langley
- *
- * Derived from public domain C code by Daniel J. Bernstein
- *
- * More information about curve25519 can be found here
- * http://cr.yp.to/ecdh.html
- *
- * djb's sample implementation of curve25519 is written in a special assembly
- * language called qhasm and uses the floating point registers.
- *
- * This is, almost, a clean room reimplementation from the curve25519 paper. It
- * uses many of the tricks described therein. Only the crecip function is taken
- * from the sample implementation. */
-
-#include
-
-#include "curve25519-donna.h"
-
-#ifdef _MSC_VER
-#define inline __inline
-#endif
-
-typedef int32_t s32;
-typedef int64_t limb;
-
-/* Field element representation:
- *
- * Field elements are written as an array of signed, 64-bit limbs, least
- * significant first. The value of the field element is:
- * x[0] + 2^26·x[1] + x^51·x[2] + 2^102·x[3] + ...
- *
- * i.e. the limbs are 26, 25, 26, 25, ... bits wide. */
-
-/* Sum two numbers: output += in */
-static void fsum(limb *output, const limb *in) {
- unsigned i;
- for (i = 0; i < 10; i += 2) {
- output[0+i] = output[0+i] + in[0+i];
- output[1+i] = output[1+i] + in[1+i];
- }
-}
-
-/* Find the difference of two numbers: output = in - output
- * (note the order of the arguments!). */
-static void fdifference(limb *output, const limb *in) {
- unsigned i;
- for (i = 0; i < 10; ++i) {
- output[i] = in[i] - output[i];
- }
-}
-
-/* Multiply a number by a scalar: output = in * scalar */
-static void fscalar_product(limb *output, const limb *in, const limb scalar) {
- unsigned i;
- for (i = 0; i < 10; ++i) {
- output[i] = in[i] * scalar;
- }
-}
-
-/* Multiply two numbers: output = in2 * in
- *
- * output must be distinct to both inputs. The inputs are reduced coefficient
- * form, the output is not.
- *
- * output[x] <= 14 * the largest product of the input limbs. */
-static void fproduct(limb *output, const limb *in2, const limb *in) {
- output[0] = ((limb) ((s32) in2[0])) * ((s32) in[0]);
- output[1] = ((limb) ((s32) in2[0])) * ((s32) in[1]) +
- ((limb) ((s32) in2[1])) * ((s32) in[0]);
- output[2] = 2 * ((limb) ((s32) in2[1])) * ((s32) in[1]) +
- ((limb) ((s32) in2[0])) * ((s32) in[2]) +
- ((limb) ((s32) in2[2])) * ((s32) in[0]);
- output[3] = ((limb) ((s32) in2[1])) * ((s32) in[2]) +
- ((limb) ((s32) in2[2])) * ((s32) in[1]) +
- ((limb) ((s32) in2[0])) * ((s32) in[3]) +
- ((limb) ((s32) in2[3])) * ((s32) in[0]);
- output[4] = ((limb) ((s32) in2[2])) * ((s32) in[2]) +
- 2 * (((limb) ((s32) in2[1])) * ((s32) in[3]) +
- ((limb) ((s32) in2[3])) * ((s32) in[1])) +
- ((limb) ((s32) in2[0])) * ((s32) in[4]) +
- ((limb) ((s32) in2[4])) * ((s32) in[0]);
- output[5] = ((limb) ((s32) in2[2])) * ((s32) in[3]) +
- ((limb) ((s32) in2[3])) * ((s32) in[2]) +
- ((limb) ((s32) in2[1])) * ((s32) in[4]) +
- ((limb) ((s32) in2[4])) * ((s32) in[1]) +
- ((limb) ((s32) in2[0])) * ((s32) in[5]) +
- ((limb) ((s32) in2[5])) * ((s32) in[0]);
- output[6] = 2 * (((limb) ((s32) in2[3])) * ((s32) in[3]) +
- ((limb) ((s32) in2[1])) * ((s32) in[5]) +
- ((limb) ((s32) in2[5])) * ((s32) in[1])) +
- ((limb) ((s32) in2[2])) * ((s32) in[4]) +
- ((limb) ((s32) in2[4])) * ((s32) in[2]) +
- ((limb) ((s32) in2[0])) * ((s32) in[6]) +
- ((limb) ((s32) in2[6])) * ((s32) in[0]);
- output[7] = ((limb) ((s32) in2[3])) * ((s32) in[4]) +
- ((limb) ((s32) in2[4])) * ((s32) in[3]) +
- ((limb) ((s32) in2[2])) * ((s32) in[5]) +
- ((limb) ((s32) in2[5])) * ((s32) in[2]) +
- ((limb) ((s32) in2[1])) * ((s32) in[6]) +
- ((limb) ((s32) in2[6])) * ((s32) in[1]) +
- ((limb) ((s32) in2[0])) * ((s32) in[7]) +
- ((limb) ((s32) in2[7])) * ((s32) in[0]);
- output[8] = ((limb) ((s32) in2[4])) * ((s32) in[4]) +
- 2 * (((limb) ((s32) in2[3])) * ((s32) in[5]) +
- ((limb) ((s32) in2[5])) * ((s32) in[3]) +
- ((limb) ((s32) in2[1])) * ((s32) in[7]) +
- ((limb) ((s32) in2[7])) * ((s32) in[1])) +
- ((limb) ((s32) in2[2])) * ((s32) in[6]) +
- ((limb) ((s32) in2[6])) * ((s32) in[2]) +
- ((limb) ((s32) in2[0])) * ((s32) in[8]) +
- ((limb) ((s32) in2[8])) * ((s32) in[0]);
- output[9] = ((limb) ((s32) in2[4])) * ((s32) in[5]) +
- ((limb) ((s32) in2[5])) * ((s32) in[4]) +
- ((limb) ((s32) in2[3])) * ((s32) in[6]) +
- ((limb) ((s32) in2[6])) * ((s32) in[3]) +
- ((limb) ((s32) in2[2])) * ((s32) in[7]) +
- ((limb) ((s32) in2[7])) * ((s32) in[2]) +
- ((limb) ((s32) in2[1])) * ((s32) in[8]) +
- ((limb) ((s32) in2[8])) * ((s32) in[1]) +
- ((limb) ((s32) in2[0])) * ((s32) in[9]) +
- ((limb) ((s32) in2[9])) * ((s32) in[0]);
- output[10] = 2 * (((limb) ((s32) in2[5])) * ((s32) in[5]) +
- ((limb) ((s32) in2[3])) * ((s32) in[7]) +
- ((limb) ((s32) in2[7])) * ((s32) in[3]) +
- ((limb) ((s32) in2[1])) * ((s32) in[9]) +
- ((limb) ((s32) in2[9])) * ((s32) in[1])) +
- ((limb) ((s32) in2[4])) * ((s32) in[6]) +
- ((limb) ((s32) in2[6])) * ((s32) in[4]) +
- ((limb) ((s32) in2[2])) * ((s32) in[8]) +
- ((limb) ((s32) in2[8])) * ((s32) in[2]);
- output[11] = ((limb) ((s32) in2[5])) * ((s32) in[6]) +
- ((limb) ((s32) in2[6])) * ((s32) in[5]) +
- ((limb) ((s32) in2[4])) * ((s32) in[7]) +
- ((limb) ((s32) in2[7])) * ((s32) in[4]) +
- ((limb) ((s32) in2[3])) * ((s32) in[8]) +
- ((limb) ((s32) in2[8])) * ((s32) in[3]) +
- ((limb) ((s32) in2[2])) * ((s32) in[9]) +
- ((limb) ((s32) in2[9])) * ((s32) in[2]);
- output[12] = ((limb) ((s32) in2[6])) * ((s32) in[6]) +
- 2 * (((limb) ((s32) in2[5])) * ((s32) in[7]) +
- ((limb) ((s32) in2[7])) * ((s32) in[5]) +
- ((limb) ((s32) in2[3])) * ((s32) in[9]) +
- ((limb) ((s32) in2[9])) * ((s32) in[3])) +
- ((limb) ((s32) in2[4])) * ((s32) in[8]) +
- ((limb) ((s32) in2[8])) * ((s32) in[4]);
- output[13] = ((limb) ((s32) in2[6])) * ((s32) in[7]) +
- ((limb) ((s32) in2[7])) * ((s32) in[6]) +
- ((limb) ((s32) in2[5])) * ((s32) in[8]) +
- ((limb) ((s32) in2[8])) * ((s32) in[5]) +
- ((limb) ((s32) in2[4])) * ((s32) in[9]) +
- ((limb) ((s32) in2[9])) * ((s32) in[4]);
- output[14] = 2 * (((limb) ((s32) in2[7])) * ((s32) in[7]) +
- ((limb) ((s32) in2[5])) * ((s32) in[9]) +
- ((limb) ((s32) in2[9])) * ((s32) in[5])) +
- ((limb) ((s32) in2[6])) * ((s32) in[8]) +
- ((limb) ((s32) in2[8])) * ((s32) in[6]);
- output[15] = ((limb) ((s32) in2[7])) * ((s32) in[8]) +
- ((limb) ((s32) in2[8])) * ((s32) in[7]) +
- ((limb) ((s32) in2[6])) * ((s32) in[9]) +
- ((limb) ((s32) in2[9])) * ((s32) in[6]);
- output[16] = ((limb) ((s32) in2[8])) * ((s32) in[8]) +
- 2 * (((limb) ((s32) in2[7])) * ((s32) in[9]) +
- ((limb) ((s32) in2[9])) * ((s32) in[7]));
- output[17] = ((limb) ((s32) in2[8])) * ((s32) in[9]) +
- ((limb) ((s32) in2[9])) * ((s32) in[8]);
- output[18] = 2 * ((limb) ((s32) in2[9])) * ((s32) in[9]);
-}
-
-/* Reduce a long form to a short form by taking the input mod 2^255 - 19.
- *
- * On entry: |output[i]| < 14*2^54
- * On exit: |output[0..8]| < 280*2^54 */
-static void freduce_degree(limb *output) {
- /* Each of these shifts and adds ends up multiplying the value by 19.
- *
- * For output[0..8], the absolute entry value is < 14*2^54 and we add, at
- * most, 19*14*2^54 thus, on exit, |output[0..8]| < 280*2^54. */
- output[8] += output[18] << 4;
- output[8] += output[18] << 1;
- output[8] += output[18];
- output[7] += output[17] << 4;
- output[7] += output[17] << 1;
- output[7] += output[17];
- output[6] += output[16] << 4;
- output[6] += output[16] << 1;
- output[6] += output[16];
- output[5] += output[15] << 4;
- output[5] += output[15] << 1;
- output[5] += output[15];
- output[4] += output[14] << 4;
- output[4] += output[14] << 1;
- output[4] += output[14];
- output[3] += output[13] << 4;
- output[3] += output[13] << 1;
- output[3] += output[13];
- output[2] += output[12] << 4;
- output[2] += output[12] << 1;
- output[2] += output[12];
- output[1] += output[11] << 4;
- output[1] += output[11] << 1;
- output[1] += output[11];
- output[0] += output[10] << 4;
- output[0] += output[10] << 1;
- output[0] += output[10];
-}
-
-#if (-1 & 3) != 3
-#error "This code only works on a two's complement system"
-#endif
-
-/* return v / 2^26, using only shifts and adds.
- *
- * On entry: v can take any value. */
-static inline limb
-div_by_2_26(const limb v)
-{
- /* High word of v; no shift needed. */
- const uint32_t highword = (uint32_t) (((uint64_t) v) >> 32);
- /* Set to all 1s if v was negative; else set to 0s. */
- const int32_t sign = ((int32_t) highword) >> 31;
- /* Set to 0x3ffffff if v was negative; else set to 0. */
- const int32_t roundoff = ((uint32_t) sign) >> 6;
- /* Should return v / (1<<26) */
- return (v + roundoff) >> 26;
-}
-
-/* return v / (2^25), using only shifts and adds.
- *
- * On entry: v can take any value. */
-static inline limb
-div_by_2_25(const limb v)
-{
- /* High word of v; no shift needed*/
- const uint32_t highword = (uint32_t) (((uint64_t) v) >> 32);
- /* Set to all 1s if v was negative; else set to 0s. */
- const int32_t sign = ((int32_t) highword) >> 31;
- /* Set to 0x1ffffff if v was negative; else set to 0. */
- const int32_t roundoff = ((uint32_t) sign) >> 7;
- /* Should return v / (1<<25) */
- return (v + roundoff) >> 25;
-}
-
-/* Reduce all coefficients of the short form input so that |x| < 2^26.
- *
- * On entry: |output[i]| < 280*2^54 */
-static void freduce_coefficients(limb *output) {
- unsigned i;
-
- output[10] = 0;
-
- for (i = 0; i < 10; i += 2) {
- limb over = div_by_2_26(output[i]);
- /* The entry condition (that |output[i]| < 280*2^54) means that over is, at
- * most, 280*2^28 in the first iteration of this loop. This is added to the
- * next limb and we can approximate the resulting bound of that limb by
- * 281*2^54. */
- output[i] -= over << 26;
- output[i+1] += over;
-
- /* For the first iteration, |output[i+1]| < 281*2^54, thus |over| <
- * 281*2^29. When this is added to the next limb, the resulting bound can
- * be approximated as 281*2^54.
- *
- * For subsequent iterations of the loop, 281*2^54 remains a conservative
- * bound and no overflow occurs. */
- over = div_by_2_25(output[i+1]);
- output[i+1] -= over << 25;
- output[i+2] += over;
- }
- /* Now |output[10]| < 281*2^29 and all other coefficients are reduced. */
- output[0] += output[10] << 4;
- output[0] += output[10] << 1;
- output[0] += output[10];
-
- output[10] = 0;
-
- /* Now output[1..9] are reduced, and |output[0]| < 2^26 + 19*281*2^29
- * So |over| will be no more than 2^16. */
- {
- limb over = div_by_2_26(output[0]);
- output[0] -= over << 26;
- output[1] += over;
- }
-
- /* Now output[0,2..9] are reduced, and |output[1]| < 2^25 + 2^16 < 2^26. The
- * bound on |output[1]| is sufficient to meet our needs. */
-}
-
-/* A helpful wrapper around fproduct: output = in * in2.
- *
- * On entry: |in[i]| < 2^27 and |in2[i]| < 2^27.
- *
- * output must be distinct to both inputs. The output is reduced degree
- * (indeed, one need only provide storage for 10 limbs) and |output[i]| < 2^26. */
-static void
-fmul(limb *output, const limb *in, const limb *in2) {
- limb t[19];
- fproduct(t, in, in2);
- /* |t[i]| < 14*2^54 */
- freduce_degree(t);
- freduce_coefficients(t);
- /* |t[i]| < 2^26 */
- memcpy(output, t, sizeof(limb) * 10);
-}
-
-/* Square a number: output = in**2
- *
- * output must be distinct from the input. The inputs are reduced coefficient
- * form, the output is not.
- *
- * output[x] <= 14 * the largest product of the input limbs. */
-static void fsquare_inner(limb *output, const limb *in) {
- output[0] = ((limb) ((s32) in[0])) * ((s32) in[0]);
- output[1] = 2 * ((limb) ((s32) in[0])) * ((s32) in[1]);
- output[2] = 2 * (((limb) ((s32) in[1])) * ((s32) in[1]) +
- ((limb) ((s32) in[0])) * ((s32) in[2]));
- output[3] = 2 * (((limb) ((s32) in[1])) * ((s32) in[2]) +
- ((limb) ((s32) in[0])) * ((s32) in[3]));
- output[4] = ((limb) ((s32) in[2])) * ((s32) in[2]) +
- 4 * ((limb) ((s32) in[1])) * ((s32) in[3]) +
- 2 * ((limb) ((s32) in[0])) * ((s32) in[4]);
- output[5] = 2 * (((limb) ((s32) in[2])) * ((s32) in[3]) +
- ((limb) ((s32) in[1])) * ((s32) in[4]) +
- ((limb) ((s32) in[0])) * ((s32) in[5]));
- output[6] = 2 * (((limb) ((s32) in[3])) * ((s32) in[3]) +
- ((limb) ((s32) in[2])) * ((s32) in[4]) +
- ((limb) ((s32) in[0])) * ((s32) in[6]) +
- 2 * ((limb) ((s32) in[1])) * ((s32) in[5]));
- output[7] = 2 * (((limb) ((s32) in[3])) * ((s32) in[4]) +
- ((limb) ((s32) in[2])) * ((s32) in[5]) +
- ((limb) ((s32) in[1])) * ((s32) in[6]) +
- ((limb) ((s32) in[0])) * ((s32) in[7]));
- output[8] = ((limb) ((s32) in[4])) * ((s32) in[4]) +
- 2 * (((limb) ((s32) in[2])) * ((s32) in[6]) +
- ((limb) ((s32) in[0])) * ((s32) in[8]) +
- 2 * (((limb) ((s32) in[1])) * ((s32) in[7]) +
- ((limb) ((s32) in[3])) * ((s32) in[5])));
- output[9] = 2 * (((limb) ((s32) in[4])) * ((s32) in[5]) +
- ((limb) ((s32) in[3])) * ((s32) in[6]) +
- ((limb) ((s32) in[2])) * ((s32) in[7]) +
- ((limb) ((s32) in[1])) * ((s32) in[8]) +
- ((limb) ((s32) in[0])) * ((s32) in[9]));
- output[10] = 2 * (((limb) ((s32) in[5])) * ((s32) in[5]) +
- ((limb) ((s32) in[4])) * ((s32) in[6]) +
- ((limb) ((s32) in[2])) * ((s32) in[8]) +
- 2 * (((limb) ((s32) in[3])) * ((s32) in[7]) +
- ((limb) ((s32) in[1])) * ((s32) in[9])));
- output[11] = 2 * (((limb) ((s32) in[5])) * ((s32) in[6]) +
- ((limb) ((s32) in[4])) * ((s32) in[7]) +
- ((limb) ((s32) in[3])) * ((s32) in[8]) +
- ((limb) ((s32) in[2])) * ((s32) in[9]));
- output[12] = ((limb) ((s32) in[6])) * ((s32) in[6]) +
- 2 * (((limb) ((s32) in[4])) * ((s32) in[8]) +
- 2 * (((limb) ((s32) in[5])) * ((s32) in[7]) +
- ((limb) ((s32) in[3])) * ((s32) in[9])));
- output[13] = 2 * (((limb) ((s32) in[6])) * ((s32) in[7]) +
- ((limb) ((s32) in[5])) * ((s32) in[8]) +
- ((limb) ((s32) in[4])) * ((s32) in[9]));
- output[14] = 2 * (((limb) ((s32) in[7])) * ((s32) in[7]) +
- ((limb) ((s32) in[6])) * ((s32) in[8]) +
- 2 * ((limb) ((s32) in[5])) * ((s32) in[9]));
- output[15] = 2 * (((limb) ((s32) in[7])) * ((s32) in[8]) +
- ((limb) ((s32) in[6])) * ((s32) in[9]));
- output[16] = ((limb) ((s32) in[8])) * ((s32) in[8]) +
- 4 * ((limb) ((s32) in[7])) * ((s32) in[9]);
- output[17] = 2 * ((limb) ((s32) in[8])) * ((s32) in[9]);
- output[18] = 2 * ((limb) ((s32) in[9])) * ((s32) in[9]);
-}
-
-/* fsquare sets output = in^2.
- *
- * On entry: The |in| argument is in reduced coefficients form and |in[i]| <
- * 2^27.
- *
- * On exit: The |output| argument is in reduced coefficients form (indeed, one
- * need only provide storage for 10 limbs) and |out[i]| < 2^26. */
-static void
-fsquare(limb *output, const limb *in) {
- limb t[19];
- fsquare_inner(t, in);
- /* |t[i]| < 14*2^54 because the largest product of two limbs will be <
- * 2^(27+27) and fsquare_inner adds together, at most, 14 of those
- * products. */
- freduce_degree(t);
- freduce_coefficients(t);
- /* |t[i]| < 2^26 */
- memcpy(output, t, sizeof(limb) * 10);
-}
-
-/* Take a little-endian, 32-byte number and expand it into polynomial form */
-static void
-fexpand(limb *output, const u8 *input) {
-#define F(n,start,shift,mask) \
- output[n] = ((((limb) input[start + 0]) | \
- ((limb) input[start + 1]) << 8 | \
- ((limb) input[start + 2]) << 16 | \
- ((limb) input[start + 3]) << 24) >> shift) & mask;
- F(0, 0, 0, 0x3ffffff);
- F(1, 3, 2, 0x1ffffff);
- F(2, 6, 3, 0x3ffffff);
- F(3, 9, 5, 0x1ffffff);
- F(4, 12, 6, 0x3ffffff);
- F(5, 16, 0, 0x1ffffff);
- F(6, 19, 1, 0x3ffffff);
- F(7, 22, 3, 0x1ffffff);
- F(8, 25, 4, 0x3ffffff);
- F(9, 28, 6, 0x1ffffff);
-#undef F
-}
-
-#if (-32 >> 1) != -16
-#error "This code only works when >> does sign-extension on negative numbers"
-#endif
-
-/* s32_eq returns 0xffffffff iff a == b and zero otherwise. */
-static s32 s32_eq(s32 a, s32 b) {
- a = ~(a ^ b);
- a &= a << 16;
- a &= a << 8;
- a &= a << 4;
- a &= a << 2;
- a &= a << 1;
- return a >> 31;
-}
-
-/* s32_gte returns 0xffffffff if a >= b and zero otherwise, where a and b are
- * both non-negative. */
-static s32 s32_gte(s32 a, s32 b) {
- a -= b;
- /* a >= 0 iff a >= b. */
- return ~(a >> 31);
-}
-
-/* Take a fully reduced polynomial form number and contract it into a
- * little-endian, 32-byte array.
- *
- * On entry: |input_limbs[i]| < 2^26 */
-static void
-fcontract(u8 *output, limb *input_limbs) {
- int i;
- int j;
- s32 input[10];
-
- /* |input_limbs[i]| < 2^26, so it's valid to convert to an s32. */
- for (i = 0; i < 10; i++) {
- input[i] = input_limbs[i];
- }
-
- for (j = 0; j < 2; ++j) {
- for (i = 0; i < 9; ++i) {
- if ((i & 1) == 1) {
- /* This calculation is a time-invariant way to make input[i]
- * non-negative by borrowing from the next-larger limb. */
- const s32 mask = input[i] >> 31;
- const s32 carry = -((input[i] & mask) >> 25);
- input[i] = input[i] + (carry << 25);
- input[i+1] = input[i+1] - carry;
- } else {
- const s32 mask = input[i] >> 31;
- const s32 carry = -((input[i] & mask) >> 26);
- input[i] = input[i] + (carry << 26);
- input[i+1] = input[i+1] - carry;
- }
- }
-
- /* There's no greater limb for input[9] to borrow from, but we can multiply
- * by 19 and borrow from input[0], which is valid mod 2^255-19. */
- {
- const s32 mask = input[9] >> 31;
- const s32 carry = -((input[9] & mask) >> 25);
- input[9] = input[9] + (carry << 25);
- input[0] = input[0] - (carry * 19);
- }
-
- /* After the first iteration, input[1..9] are non-negative and fit within
- * 25 or 26 bits, depending on position. However, input[0] may be
- * negative. */
- }
-
- /* The first borrow-propagation pass above ended with every limb
- except (possibly) input[0] non-negative.
-
- If input[0] was negative after the first pass, then it was because of a
- carry from input[9]. On entry, input[9] < 2^26 so the carry was, at most,
- one, since (2**26-1) >> 25 = 1. Thus input[0] >= -19.
-
- In the second pass, each limb is decreased by at most one. Thus the second
- borrow-propagation pass could only have wrapped around to decrease
- input[0] again if the first pass left input[0] negative *and* input[1]
- through input[9] were all zero. In that case, input[1] is now 2^25 - 1,
- and this last borrow-propagation step will leave input[1] non-negative. */
- {
- const s32 mask = input[0] >> 31;
- const s32 carry = -((input[0] & mask) >> 26);
- input[0] = input[0] + (carry << 26);
- input[1] = input[1] - carry;
- }
-
- /* All input[i] are now non-negative. However, there might be values between
- * 2^25 and 2^26 in a limb which is, nominally, 25 bits wide. */
- for (j = 0; j < 2; j++) {
- for (i = 0; i < 9; i++) {
- if ((i & 1) == 1) {
- const s32 carry = input[i] >> 25;
- input[i] &= 0x1ffffff;
- input[i+1] += carry;
- } else {
- const s32 carry = input[i] >> 26;
- input[i] &= 0x3ffffff;
- input[i+1] += carry;
- }
- }
-
- {
- const s32 carry = input[9] >> 25;
- input[9] &= 0x1ffffff;
- input[0] += 19*carry;
- }
- }
-
- /* If the first carry-chain pass, just above, ended up with a carry from
- * input[9], and that caused input[0] to be out-of-bounds, then input[0] was
- * < 2^26 + 2*19, because the carry was, at most, two.
- *
- * If the second pass carried from input[9] again then input[0] is < 2*19 and
- * the input[9] -> input[0] carry didn't push input[0] out of bounds. */
-
- /* It still remains the case that input might be between 2^255-19 and 2^255.
- * In this case, input[1..9] must take their maximum value and input[0] must
- * be >= (2^255-19) & 0x3ffffff, which is 0x3ffffed. */
- s32 mask = s32_gte(input[0], 0x3ffffed);
- for (i = 1; i < 10; i++) {
- if ((i & 1) == 1) {
- mask &= s32_eq(input[i], 0x1ffffff);
- } else {
- mask &= s32_eq(input[i], 0x3ffffff);
- }
- }
-
- /* mask is either 0xffffffff (if input >= 2^255-19) and zero otherwise. Thus
- * this conditionally subtracts 2^255-19. */
- input[0] -= mask & 0x3ffffed;
-
- for (i = 1; i < 10; i++) {
- if ((i & 1) == 1) {
- input[i] -= mask & 0x1ffffff;
- } else {
- input[i] -= mask & 0x3ffffff;
- }
- }
-
- input[1] <<= 2;
- input[2] <<= 3;
- input[3] <<= 5;
- input[4] <<= 6;
- input[6] <<= 1;
- input[7] <<= 3;
- input[8] <<= 4;
- input[9] <<= 6;
-#define F(i, s) \
- output[s+0] |= input[i] & 0xff; \
- output[s+1] = (input[i] >> 8) & 0xff; \
- output[s+2] = (input[i] >> 16) & 0xff; \
- output[s+3] = (input[i] >> 24) & 0xff;
- output[0] = 0;
- output[16] = 0;
- F(0,0);
- F(1,3);
- F(2,6);
- F(3,9);
- F(4,12);
- F(5,16);
- F(6,19);
- F(7,22);
- F(8,25);
- F(9,28);
-#undef F
-}
-
-/* Input: Q, Q', Q-Q'
- * Output: 2Q, Q+Q'
- *
- * x2 z3: long form
- * x3 z3: long form
- * x z: short form, destroyed
- * xprime zprime: short form, destroyed
- * qmqp: short form, preserved
- *
- * On entry and exit, the absolute value of the limbs of all inputs and outputs
- * are < 2^26. */
-static void fmonty(limb *x2, limb *z2, /* output 2Q */
- limb *x3, limb *z3, /* output Q + Q' */
- limb *x, limb *z, /* input Q */
- limb *xprime, limb *zprime, /* input Q' */
- const limb *qmqp /* input Q - Q' */) {
- limb origx[10], origxprime[10], zzz[19], xx[19], zz[19], xxprime[19],
- zzprime[19], zzzprime[19], xxxprime[19];
-
- memcpy(origx, x, 10 * sizeof(limb));
- fsum(x, z);
- /* |x[i]| < 2^27 */
- fdifference(z, origx); /* does x - z */
- /* |z[i]| < 2^27 */
-
- memcpy(origxprime, xprime, sizeof(limb) * 10);
- fsum(xprime, zprime);
- /* |xprime[i]| < 2^27 */
- fdifference(zprime, origxprime);
- /* |zprime[i]| < 2^27 */
- fproduct(xxprime, xprime, z);
- /* |xxprime[i]| < 14*2^54: the largest product of two limbs will be <
- * 2^(27+27) and fproduct adds together, at most, 14 of those products.
- * (Approximating that to 2^58 doesn't work out.) */
- fproduct(zzprime, x, zprime);
- /* |zzprime[i]| < 14*2^54 */
- freduce_degree(xxprime);
- freduce_coefficients(xxprime);
- /* |xxprime[i]| < 2^26 */
- freduce_degree(zzprime);
- freduce_coefficients(zzprime);
- /* |zzprime[i]| < 2^26 */
- memcpy(origxprime, xxprime, sizeof(limb) * 10);
- fsum(xxprime, zzprime);
- /* |xxprime[i]| < 2^27 */
- fdifference(zzprime, origxprime);
- /* |zzprime[i]| < 2^27 */
- fsquare(xxxprime, xxprime);
- /* |xxxprime[i]| < 2^26 */
- fsquare(zzzprime, zzprime);
- /* |zzzprime[i]| < 2^26 */
- fproduct(zzprime, zzzprime, qmqp);
- /* |zzprime[i]| < 14*2^52 */
- freduce_degree(zzprime);
- freduce_coefficients(zzprime);
- /* |zzprime[i]| < 2^26 */
- memcpy(x3, xxxprime, sizeof(limb) * 10);
- memcpy(z3, zzprime, sizeof(limb) * 10);
-
- fsquare(xx, x);
- /* |xx[i]| < 2^26 */
- fsquare(zz, z);
- /* |zz[i]| < 2^26 */
- fproduct(x2, xx, zz);
- /* |x2[i]| < 14*2^52 */
- freduce_degree(x2);
- freduce_coefficients(x2);
- /* |x2[i]| < 2^26 */
- fdifference(zz, xx); // does zz = xx - zz
- /* |zz[i]| < 2^27 */
- memset(zzz + 10, 0, sizeof(limb) * 9);
- fscalar_product(zzz, zz, 121665);
- /* |zzz[i]| < 2^(27+17) */
- /* No need to call freduce_degree here:
- fscalar_product doesn't increase the degree of its input. */
- freduce_coefficients(zzz);
- /* |zzz[i]| < 2^26 */
- fsum(zzz, xx);
- /* |zzz[i]| < 2^27 */
- fproduct(z2, zz, zzz);
- /* |z2[i]| < 14*2^(26+27) */
- freduce_degree(z2);
- freduce_coefficients(z2);
- /* |z2|i| < 2^26 */
-}
-
-/* Conditionally swap two reduced-form limb arrays if 'iswap' is 1, but leave
- * them unchanged if 'iswap' is 0. Runs in data-invariant time to avoid
- * side-channel attacks.
- *
- * NOTE that this function requires that 'iswap' be 1 or 0; other values give
- * wrong results. Also, the two limb arrays must be in reduced-coefficient,
- * reduced-degree form: the values in a[10..19] or b[10..19] aren't swapped,
- * and all all values in a[0..9],b[0..9] must have magnitude less than
- * INT32_MAX. */
-static void
-swap_conditional(limb a[19], limb b[19], limb iswap) {
- unsigned i;
- const s32 swap = (s32) -iswap;
-
- for (i = 0; i < 10; ++i) {
- const s32 x = swap & ( ((s32)a[i]) ^ ((s32)b[i]) );
- a[i] = ((s32)a[i]) ^ x;
- b[i] = ((s32)b[i]) ^ x;
- }
-}
-
-/* Calculates nQ where Q is the x-coordinate of a point on the curve
- *
- * resultx/resultz: the x coordinate of the resulting curve point (short form)
- * n: a little endian, 32-byte number
- * q: a point of the curve (short form) */
-static void
-cmult(limb *resultx, limb *resultz, const u8 *n, const limb *q) {
- limb a[19] = {0}, b[19] = {1}, c[19] = {1}, d[19] = {0};
- limb *nqpqx = a, *nqpqz = b, *nqx = c, *nqz = d, *t;
- limb e[19] = {0}, f[19] = {1}, g[19] = {0}, h[19] = {1};
- limb *nqpqx2 = e, *nqpqz2 = f, *nqx2 = g, *nqz2 = h;
-
- unsigned i, j;
-
- memcpy(nqpqx, q, sizeof(limb) * 10);
-
- for (i = 0; i < 32; ++i) {
- u8 byte = n[31 - i];
- for (j = 0; j < 8; ++j) {
- const limb bit = byte >> 7;
-
- swap_conditional(nqx, nqpqx, bit);
- swap_conditional(nqz, nqpqz, bit);
- fmonty(nqx2, nqz2,
- nqpqx2, nqpqz2,
- nqx, nqz,
- nqpqx, nqpqz,
- q);
- swap_conditional(nqx2, nqpqx2, bit);
- swap_conditional(nqz2, nqpqz2, bit);
-
- t = nqx;
- nqx = nqx2;
- nqx2 = t;
- t = nqz;
- nqz = nqz2;
- nqz2 = t;
- t = nqpqx;
- nqpqx = nqpqx2;
- nqpqx2 = t;
- t = nqpqz;
- nqpqz = nqpqz2;
- nqpqz2 = t;
-
- byte <<= 1;
- }
- }
-
- memcpy(resultx, nqx, sizeof(limb) * 10);
- memcpy(resultz, nqz, sizeof(limb) * 10);
-}
-
-// -----------------------------------------------------------------------------
-// Shamelessly copied from djb's code
-// -----------------------------------------------------------------------------
-static void
-crecip(limb *out, const limb *z) {
- limb z2[10];
- limb z9[10];
- limb z11[10];
- limb z2_5_0[10];
- limb z2_10_0[10];
- limb z2_20_0[10];
- limb z2_50_0[10];
- limb z2_100_0[10];
- limb t0[10];
- limb t1[10];
- int i;
-
- /* 2 */ fsquare(z2,z);
- /* 4 */ fsquare(t1,z2);
- /* 8 */ fsquare(t0,t1);
- /* 9 */ fmul(z9,t0,z);
- /* 11 */ fmul(z11,z9,z2);
- /* 22 */ fsquare(t0,z11);
- /* 2^5 - 2^0 = 31 */ fmul(z2_5_0,t0,z9);
-
- /* 2^6 - 2^1 */ fsquare(t0,z2_5_0);
- /* 2^7 - 2^2 */ fsquare(t1,t0);
- /* 2^8 - 2^3 */ fsquare(t0,t1);
- /* 2^9 - 2^4 */ fsquare(t1,t0);
- /* 2^10 - 2^5 */ fsquare(t0,t1);
- /* 2^10 - 2^0 */ fmul(z2_10_0,t0,z2_5_0);
-
- /* 2^11 - 2^1 */ fsquare(t0,z2_10_0);
- /* 2^12 - 2^2 */ fsquare(t1,t0);
- /* 2^20 - 2^10 */ for (i = 2;i < 10;i += 2) { fsquare(t0,t1); fsquare(t1,t0); }
- /* 2^20 - 2^0 */ fmul(z2_20_0,t1,z2_10_0);
-
- /* 2^21 - 2^1 */ fsquare(t0,z2_20_0);
- /* 2^22 - 2^2 */ fsquare(t1,t0);
- /* 2^40 - 2^20 */ for (i = 2;i < 20;i += 2) { fsquare(t0,t1); fsquare(t1,t0); }
- /* 2^40 - 2^0 */ fmul(t0,t1,z2_20_0);
-
- /* 2^41 - 2^1 */ fsquare(t1,t0);
- /* 2^42 - 2^2 */ fsquare(t0,t1);
- /* 2^50 - 2^10 */ for (i = 2;i < 10;i += 2) { fsquare(t1,t0); fsquare(t0,t1); }
- /* 2^50 - 2^0 */ fmul(z2_50_0,t0,z2_10_0);
-
- /* 2^51 - 2^1 */ fsquare(t0,z2_50_0);
- /* 2^52 - 2^2 */ fsquare(t1,t0);
- /* 2^100 - 2^50 */ for (i = 2;i < 50;i += 2) { fsquare(t0,t1); fsquare(t1,t0); }
- /* 2^100 - 2^0 */ fmul(z2_100_0,t1,z2_50_0);
-
- /* 2^101 - 2^1 */ fsquare(t1,z2_100_0);
- /* 2^102 - 2^2 */ fsquare(t0,t1);
- /* 2^200 - 2^100 */ for (i = 2;i < 100;i += 2) { fsquare(t1,t0); fsquare(t0,t1); }
- /* 2^200 - 2^0 */ fmul(t1,t0,z2_100_0);
-
- /* 2^201 - 2^1 */ fsquare(t0,t1);
- /* 2^202 - 2^2 */ fsquare(t1,t0);
- /* 2^250 - 2^50 */ for (i = 2;i < 50;i += 2) { fsquare(t0,t1); fsquare(t1,t0); }
- /* 2^250 - 2^0 */ fmul(t0,t1,z2_50_0);
-
- /* 2^251 - 2^1 */ fsquare(t1,t0);
- /* 2^252 - 2^2 */ fsquare(t0,t1);
- /* 2^253 - 2^3 */ fsquare(t1,t0);
- /* 2^254 - 2^4 */ fsquare(t0,t1);
- /* 2^255 - 2^5 */ fsquare(t1,t0);
- /* 2^255 - 21 */ fmul(out,t1,z11);
-}
-
-static const u8 curve25519_basepoint[32] = {9};
-
-void curve25519_scalarmult(u8 *result, const u8 *secret, const u8 *basepoint) {
- limb bp[10], x[10], z[11], zmone[10];
- uint8_t e[32];
- int i;
-
- for (i = 0; i < 32; ++i) e[i] = secret[i];
- e[0] &= 248;
- e[31] &= 127;
- e[31] |= 64;
-
- fexpand(bp, basepoint);
- cmult(x, z, e, bp);
- crecip(zmone, z);
- fmul(z, x, zmone);
- fcontract(result, z);
-}
-
-void curve25519_publickey(u8 *public, const u8 *secret) {
- curve25519_scalarmult(public, secret, curve25519_basepoint);
-}
diff --git a/curve25519-donna/curve25519-donna.h b/curve25519-donna/curve25519-donna.h
index 2f6bb3d115..e707e22934 100644
--- a/curve25519-donna/curve25519-donna.h
+++ b/curve25519-donna/curve25519-donna.h
@@ -1,11 +1,32 @@
-#ifndef CURVE25519_H
-#define CURVE25519_H
+#include "curve25519.h"
+#include "curve25519-donna-portable.h"
-#include
+#if defined(CURVE25519_SSE2)
+#else
+ #if defined(HAVE_UINT128) && !defined(CURVE25519_FORCE_32BIT)
+ #define CURVE25519_64BIT
+ #else
+ #define CURVE25519_32BIT
+ #endif
+#endif
-typedef uint8_t u8;
+#if !defined(CURVE25519_NO_INLINE_ASM)
+#endif
-void curve25519_scalarmult(u8 *result, const u8 *secret, const u8 *basepoint);
-void curve25519_publickey(u8 *public, const u8 *secret);
-#endif // CURVE25519_H
+#if defined(CURVE25519_SSE2)
+ #include "curve25519-donna-sse2.h"
+#elif defined(CURVE25519_64BIT)
+ #include "curve25519-donna-64bit.h"
+#else
+ #include "curve25519-donna-32bit.h"
+#endif
+
+#include "curve25519-donna-common.h"
+
+#if defined(CURVE25519_SSE2)
+ #include "curve25519-donna-scalarmult-sse2.h"
+#else
+ #include "curve25519-donna-scalarmult-base.h"
+#endif
+
diff --git a/curve25519-donna/curve25519.c b/curve25519-donna/curve25519.c
new file mode 100644
index 0000000000..bfd2f58ec3
--- /dev/null
+++ b/curve25519-donna/curve25519.c
@@ -0,0 +1,27 @@
+#include "curve25519-donna.h"
+
+#if !defined(CURVE25519_SUFFIX)
+#define CURVE25519_SUFFIX
+#endif
+
+#define CURVE25519_FN3(fn,suffix) fn##suffix
+#define CURVE25519_FN2(fn,suffix) CURVE25519_FN3(fn,suffix)
+#define CURVE25519_FN(fn) CURVE25519_FN2(fn,CURVE25519_SUFFIX)
+
+void
+CURVE25519_FN(curve25519_donna) (curve25519_key mypublic, const curve25519_key secret, const curve25519_key basepoint) {
+ curve25519_key e;
+ size_t i;
+
+ for (i = 0;i < 32;++i) e[i] = secret[i];
+ e[0] &= 0xf8;
+ e[31] &= 0x7f;
+ e[31] |= 0x40;
+ curve25519_scalarmult_donna(mypublic, e, basepoint);
+}
+
+void
+CURVE25519_FN(curve25519_donna_basepoint) (curve25519_key mypublic, const curve25519_key secret) {
+ static const curve25519_key basepoint = {9};
+ CURVE25519_FN(curve25519_donna)(mypublic, secret, basepoint);
+}
diff --git a/curve25519-donna/curve25519.h b/curve25519-donna/curve25519.h
new file mode 100644
index 0000000000..51edd1e946
--- /dev/null
+++ b/curve25519-donna/curve25519.h
@@ -0,0 +1,10 @@
+#ifndef CURVE25519_H
+#define CURVE25519_H
+
+typedef unsigned char curve25519_key[32];
+
+void curve25519_donna(curve25519_key mypublic, const curve25519_key secret, const curve25519_key basepoint);
+void curve25519_donna_basepoint(curve25519_key mypublic, const curve25519_key secret);
+
+#endif /* CURVE25519_H */
+
diff --git a/test_speed.c b/test_speed.c
index 3a9b08b4b1..d9010bb90d 100644
--- a/test_speed.c
+++ b/test_speed.c
@@ -8,7 +8,7 @@
#include "secp256k1.h"
#include "nist256p1.h"
#include "ed25519.h"
-#include "curve25519-donna.h"
+#include "curve25519.h"
static uint8_t msg[32];
@@ -92,7 +92,7 @@ void bench_curve25519(void)
clock_t t = clock();
for (int i = 0 ; i < 500; i++) {
- curve25519_scalarmult(result, secret, basepoint);
+ curve25519_donna(result, secret, basepoint);
}
printf("Curve25519 multiplying speed: %0.2f mul/s\n", 500.0f / ((float)(clock() - t) / CLOCKS_PER_SEC));
}