From e7c1b4490f2c559b050b8255df2d347dd0e4945f Mon Sep 17 00:00:00 2001 From: Pieter Wuille Date: Fri, 5 May 2017 11:12:58 -0700 Subject: [PATCH] Squashed 'src/secp256k1/' changes from 8225239..84973d3 84973d3 Merge #454: Remove residual parts from the schnorr expirement. 5e95bf2 Remove residual parts from the schnorr expirement. cbc20b8 Merge #452: Minor optimizations to _scalar_inverse to save 4M 4cc8f52 Merge #437: Unroll secp256k1_fe_(get|set)_b32 to make them much faster. 465159c Further shorten the addition chain for scalar inversion. a2b6b19 Fix benchmark print_number infinite loop. 8b7680a Unroll secp256k1_fe_(get|set)_b32 for 10x26. aa84990 Unroll secp256k1_fe_(get|set)_b32 for 5x52. cf12fa1 Minor optimizations to _scalar_inverse to save 4M 1199492 Merge #408: Add `secp256k1_ec_pubkey_negate` and `secp256k1_ec_privkey_negate` 6af0871 Merge #441: secp256k1_context_randomize: document. ab31a52 Merge #444: test: Use checked_alloc eda5c1a Merge #449: Remove executable bit from secp256k1.c 51b77ae Remove executable bit from secp256k1.c 5eb030c test: Use checked_alloc 72d952c FIXUP: Missing "is" 70ff29b secp256k1_context_randomize: document. 9d560f9 Merge #428: Exhaustive recovery 8e48aa6 Add `secp256k1_ec_pubkey_negate` and `secp256k1_ec_privkey_negate` 2cee5fd exhaustive tests: add recovery module 678b0e5 exhaustive tests: remove erroneous comment from ecdsa_sig_sign 03ff8c2 group_impl.h: remove unused `secp256k1_ge_set_infinity` function a724d72 configure: add --enable-coverage to set options for coverage analysis b595163 recovery: add tests to cover API misusage 6f8ae2f ecdh: test NULL-checking of arguments 25e3cfb ecdsa_impl: replace scalar if-checks with VERIFY_CHECKs in ecdsa_sig_sign git-subtree-dir: src/secp256k1 git-subtree-split: 84973d393ac240a90b2e1a6538c5368202bc2224 --- Makefile.am | 10 +- configure.ac | 17 ++- include/secp256k1.h | 39 ++++++- src/bench.h | 2 +- src/bench_schnorr_verify.c | 73 ------------- src/ecdsa_impl.h | 16 +-- src/field_10x26_impl.h | 69 +++++++----- src/field_5x52_impl.h | 91 +++++++++++----- src/group_impl.h | 6 -- src/modules/ecdh/main_impl.h | 2 +- src/modules/ecdh/tests_impl.h | 30 ++++++ src/modules/recovery/main_impl.h | 2 +- src/modules/recovery/tests_impl.h | 143 +++++++++++++++++++++++++ src/scalar_impl.h | 171 ++++++++++++------------------ src/secp256k1.c | 31 +++++- src/tests.c | 51 +++++---- src/tests_exhaustive.c | 143 ++++++++++++++++++++++++- src/util.h | 5 +- 18 files changed, 623 insertions(+), 278 deletions(-) delete mode 100644 src/bench_schnorr_verify.c mode change 100755 => 100644 src/secp256k1.c diff --git a/Makefile.am b/Makefile.am index e5657f7f3..c071fbe27 100644 --- a/Makefile.am +++ b/Makefile.am @@ -93,7 +93,10 @@ TESTS = if USE_TESTS noinst_PROGRAMS += tests tests_SOURCES = src/tests.c -tests_CPPFLAGS = -DSECP256K1_BUILD -DVERIFY -I$(top_srcdir)/src -I$(top_srcdir)/include $(SECP_INCLUDES) $(SECP_TEST_INCLUDES) +tests_CPPFLAGS = -DSECP256K1_BUILD -I$(top_srcdir)/src -I$(top_srcdir)/include $(SECP_INCLUDES) $(SECP_TEST_INCLUDES) +if !ENABLE_COVERAGE +tests_CPPFLAGS += -DVERIFY +endif tests_LDADD = $(SECP_LIBS) $(SECP_TEST_LIBS) $(COMMON_LIB) tests_LDFLAGS = -static TESTS += tests @@ -102,7 +105,10 @@ endif if USE_EXHAUSTIVE_TESTS noinst_PROGRAMS += exhaustive_tests exhaustive_tests_SOURCES = src/tests_exhaustive.c -exhaustive_tests_CPPFLAGS = -DSECP256K1_BUILD -DVERIFY -I$(top_srcdir)/src $(SECP_INCLUDES) +exhaustive_tests_CPPFLAGS = -DSECP256K1_BUILD -I$(top_srcdir)/src $(SECP_INCLUDES) +if !ENABLE_COVERAGE +exhaustive_tests_CPPFLAGS += -DVERIFY +endif exhaustive_tests_LDADD = $(SECP_LIBS) exhaustive_tests_LDFLAGS = -static TESTS += exhaustive_tests diff --git a/configure.ac b/configure.ac index ec50ffe3a..e5fcbcb4e 100644 --- a/configure.ac +++ b/configure.ac @@ -20,7 +20,7 @@ AC_PATH_TOOL(STRIP, strip) AX_PROG_CC_FOR_BUILD if test "x$CFLAGS" = "x"; then - CFLAGS="-O3 -g" + CFLAGS="-g" fi AM_PROG_CC_C_O @@ -89,6 +89,11 @@ AC_ARG_ENABLE(benchmark, [use_benchmark=$enableval], [use_benchmark=no]) +AC_ARG_ENABLE(coverage, + AS_HELP_STRING([--enable-coverage],[enable compiler flags to support kcov coverage analysis]), + [enable_coverage=$enableval], + [enable_coverage=no]) + AC_ARG_ENABLE(tests, AS_HELP_STRING([--enable-tests],[compile tests (default is yes)]), [use_tests=$enableval], @@ -154,6 +159,14 @@ AC_COMPILE_IFELSE([AC_LANG_SOURCE([[void myfunc() {__builtin_expect(0,0);}]])], [ AC_MSG_RESULT([no]) ]) +if test x"$enable_coverage" = x"yes"; then + AC_DEFINE(COVERAGE, 1, [Define this symbol to compile out all VERIFY code]) + CFLAGS="$CFLAGS -O0 --coverage" + LDFLAGS="--coverage" +else + CFLAGS="$CFLAGS -O3" +fi + if test x"$use_ecmult_static_precomputation" != x"no"; then save_cross_compiling=$cross_compiling cross_compiling=no @@ -434,6 +447,7 @@ AC_MSG_NOTICE([Using field implementation: $set_field]) AC_MSG_NOTICE([Using bignum implementation: $set_bignum]) AC_MSG_NOTICE([Using scalar implementation: $set_scalar]) AC_MSG_NOTICE([Using endomorphism optimizations: $use_endomorphism]) +AC_MSG_NOTICE([Building for coverage analysis: $enable_coverage]) AC_MSG_NOTICE([Building ECDH module: $enable_module_ecdh]) AC_MSG_NOTICE([Building ECDSA pubkey recovery module: $enable_module_recovery]) AC_MSG_NOTICE([Using jni: $use_jni]) @@ -460,6 +474,7 @@ AC_SUBST(SECP_INCLUDES) AC_SUBST(SECP_LIBS) AC_SUBST(SECP_TEST_LIBS) AC_SUBST(SECP_TEST_INCLUDES) +AM_CONDITIONAL([ENABLE_COVERAGE], [test x"$enable_coverage" = x"yes"]) AM_CONDITIONAL([USE_TESTS], [test x"$use_tests" != x"no"]) AM_CONDITIONAL([USE_EXHAUSTIVE_TESTS], [test x"$use_exhaustive_tests" != x"no"]) AM_CONDITIONAL([USE_BENCHMARK], [test x"$use_benchmark" = x"yes"]) diff --git a/include/secp256k1.h b/include/secp256k1.h index f268e309d..fc4c5cefb 100644 --- a/include/secp256k1.h +++ b/include/secp256k1.h @@ -163,6 +163,8 @@ typedef int (*secp256k1_nonce_function)( * * Returns: a newly created context object. * In: flags: which parts of the context to initialize. + * + * See also secp256k1_context_randomize. */ SECP256K1_API secp256k1_context* secp256k1_context_create( unsigned int flags @@ -485,6 +487,28 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int secp256k1_ec_pubkey_create( const unsigned char *seckey ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); +/** Negates a private key in place. + * + * Returns: 1 always + * Args: ctx: pointer to a context object + * In/Out: pubkey: pointer to the public key to be negated (cannot be NULL) + */ +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int secp256k1_ec_privkey_negate( + const secp256k1_context* ctx, + unsigned char *seckey +) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2); + +/** Negates a public key in place. + * + * Returns: 1 always + * Args: ctx: pointer to a context object + * In/Out: pubkey: pointer to the public key to be negated (cannot be NULL) + */ +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int secp256k1_ec_pubkey_negate( + const secp256k1_context* ctx, + secp256k1_pubkey *pubkey +) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2); + /** Tweak a private key by adding tweak to it. * Returns: 0 if the tweak was out of range (chance of around 1 in 2^128 for * uniformly random 32-byte arrays, or if the resulting private key @@ -543,11 +567,24 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int secp256k1_ec_pubkey_tweak_mul( const unsigned char *tweak ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); -/** Updates the context randomization. +/** Updates the context randomization to protect against side-channel leakage. * Returns: 1: randomization successfully updated * 0: error * Args: ctx: pointer to a context object (cannot be NULL) * In: seed32: pointer to a 32-byte random seed (NULL resets to initial state) + * + * While secp256k1 code is written to be constant-time no matter what secret + * values are, it's possible that a future compiler may output code which isn't, + * and also that the CPU may not emit the same radio frequencies or draw the same + * amount power for all values. + * + * This function provides a seed which is combined into the blinding value: that + * blinding value is added before each multiplication (and removed afterwards) so + * that it does not affect function results, but shields against attacks which + * rely on any input-dependent behaviour. + * + * You should call this after secp256k1_context_create or + * secp256k1_context_clone, and may call this repeatedly afterwards. */ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int secp256k1_context_randomize( secp256k1_context* ctx, diff --git a/src/bench.h b/src/bench.h index 3a71b4aaf..d67f08a42 100644 --- a/src/bench.h +++ b/src/bench.h @@ -23,7 +23,7 @@ void print_number(double x) { if (y < 0.0) { y = -y; } - while (y < 100.0) { + while (y > 0 && y < 100.0) { y *= 10.0; c++; } diff --git a/src/bench_schnorr_verify.c b/src/bench_schnorr_verify.c deleted file mode 100644 index 5f137dda2..000000000 --- a/src/bench_schnorr_verify.c +++ /dev/null @@ -1,73 +0,0 @@ -/********************************************************************** - * Copyright (c) 2014 Pieter Wuille * - * Distributed under the MIT software license, see the accompanying * - * file COPYING or http://www.opensource.org/licenses/mit-license.php.* - **********************************************************************/ - -#include -#include - -#include "include/secp256k1.h" -#include "include/secp256k1_schnorr.h" -#include "util.h" -#include "bench.h" - -typedef struct { - unsigned char key[32]; - unsigned char sig[64]; - unsigned char pubkey[33]; - size_t pubkeylen; -} benchmark_schnorr_sig_t; - -typedef struct { - secp256k1_context *ctx; - unsigned char msg[32]; - benchmark_schnorr_sig_t sigs[64]; - int numsigs; -} benchmark_schnorr_verify_t; - -static void benchmark_schnorr_init(void* arg) { - int i, k; - benchmark_schnorr_verify_t* data = (benchmark_schnorr_verify_t*)arg; - - for (i = 0; i < 32; i++) { - data->msg[i] = 1 + i; - } - for (k = 0; k < data->numsigs; k++) { - secp256k1_pubkey pubkey; - for (i = 0; i < 32; i++) { - data->sigs[k].key[i] = 33 + i + k; - } - secp256k1_schnorr_sign(data->ctx, data->sigs[k].sig, data->msg, data->sigs[k].key, NULL, NULL); - data->sigs[k].pubkeylen = 33; - CHECK(secp256k1_ec_pubkey_create(data->ctx, &pubkey, data->sigs[k].key)); - CHECK(secp256k1_ec_pubkey_serialize(data->ctx, data->sigs[k].pubkey, &data->sigs[k].pubkeylen, &pubkey, SECP256K1_EC_COMPRESSED)); - } -} - -static void benchmark_schnorr_verify(void* arg) { - int i; - benchmark_schnorr_verify_t* data = (benchmark_schnorr_verify_t*)arg; - - for (i = 0; i < 20000 / data->numsigs; i++) { - secp256k1_pubkey pubkey; - data->sigs[0].sig[(i >> 8) % 64] ^= (i & 0xFF); - CHECK(secp256k1_ec_pubkey_parse(data->ctx, &pubkey, data->sigs[0].pubkey, data->sigs[0].pubkeylen)); - CHECK(secp256k1_schnorr_verify(data->ctx, data->sigs[0].sig, data->msg, &pubkey) == ((i & 0xFF) == 0)); - data->sigs[0].sig[(i >> 8) % 64] ^= (i & 0xFF); - } -} - - - -int main(void) { - benchmark_schnorr_verify_t data; - - data.ctx = secp256k1_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY); - - data.numsigs = 1; - run_benchmark("schnorr_verify", benchmark_schnorr_verify, benchmark_schnorr_init, NULL, &data, 10, 20000); - - secp256k1_context_destroy(data.ctx); - return 0; -} diff --git a/src/ecdsa_impl.h b/src/ecdsa_impl.h index 9a42e519b..453bb1188 100644 --- a/src/ecdsa_impl.h +++ b/src/ecdsa_impl.h @@ -225,14 +225,12 @@ static int secp256k1_ecdsa_sig_verify(const secp256k1_ecmult_context *ctx, const #if defined(EXHAUSTIVE_TEST_ORDER) { secp256k1_scalar computed_r; - int overflow = 0; secp256k1_ge pr_ge; secp256k1_ge_set_gej(&pr_ge, &pr); secp256k1_fe_normalize(&pr_ge.x); secp256k1_fe_get_b32(c, &pr_ge.x); - secp256k1_scalar_set_b32(&computed_r, c, &overflow); - /* we fully expect overflow */ + secp256k1_scalar_set_b32(&computed_r, c, NULL); return secp256k1_scalar_eq(sigr, &computed_r); } #else @@ -285,14 +283,10 @@ static int secp256k1_ecdsa_sig_sign(const secp256k1_ecmult_gen_context *ctx, sec secp256k1_fe_normalize(&r.y); secp256k1_fe_get_b32(b, &r.x); secp256k1_scalar_set_b32(sigr, b, &overflow); - if (secp256k1_scalar_is_zero(sigr)) { - /* P.x = order is on the curve, so technically sig->r could end up zero, which would be an invalid signature. - * This branch is cryptographically unreachable as hitting it requires finding the discrete log of P.x = N. - */ - secp256k1_gej_clear(&rp); - secp256k1_ge_clear(&r); - return 0; - } + /* These two conditions should be checked before calling */ + VERIFY_CHECK(!secp256k1_scalar_is_zero(sigr)); + VERIFY_CHECK(overflow == 0); + if (recid) { /* The overflow condition is cryptographically unreachable as hitting it requires finding the discrete log * of some P where P.x >= order, and only 1 in about 2^127 points meet this criteria. diff --git a/src/field_10x26_impl.h b/src/field_10x26_impl.h index 7b8c07960..234c13a64 100644 --- a/src/field_10x26_impl.h +++ b/src/field_10x26_impl.h @@ -38,10 +38,6 @@ static void secp256k1_fe_verify(const secp256k1_fe *a) { } VERIFY_CHECK(r == 1); } -#else -static void secp256k1_fe_verify(const secp256k1_fe *a) { - (void)a; -} #endif static void secp256k1_fe_normalize(secp256k1_fe *r) { @@ -325,17 +321,17 @@ static int secp256k1_fe_cmp_var(const secp256k1_fe *a, const secp256k1_fe *b) { } static int secp256k1_fe_set_b32(secp256k1_fe *r, const unsigned char *a) { - int i; - r->n[0] = r->n[1] = r->n[2] = r->n[3] = r->n[4] = 0; - r->n[5] = r->n[6] = r->n[7] = r->n[8] = r->n[9] = 0; - for (i=0; i<32; i++) { - int j; - for (j=0; j<4; j++) { - int limb = (8*i+2*j)/26; - int shift = (8*i+2*j)%26; - r->n[limb] |= (uint32_t)((a[31-i] >> (2*j)) & 0x3) << shift; - } - } + r->n[0] = (uint32_t)a[31] | ((uint32_t)a[30] << 8) | ((uint32_t)a[29] << 16) | ((uint32_t)(a[28] & 0x3) << 24); + r->n[1] = (uint32_t)((a[28] >> 2) & 0x3f) | ((uint32_t)a[27] << 6) | ((uint32_t)a[26] << 14) | ((uint32_t)(a[25] & 0xf) << 22); + r->n[2] = (uint32_t)((a[25] >> 4) & 0xf) | ((uint32_t)a[24] << 4) | ((uint32_t)a[23] << 12) | ((uint32_t)(a[22] & 0x3f) << 20); + r->n[3] = (uint32_t)((a[22] >> 6) & 0x3) | ((uint32_t)a[21] << 2) | ((uint32_t)a[20] << 10) | ((uint32_t)a[19] << 18); + r->n[4] = (uint32_t)a[18] | ((uint32_t)a[17] << 8) | ((uint32_t)a[16] << 16) | ((uint32_t)(a[15] & 0x3) << 24); + r->n[5] = (uint32_t)((a[15] >> 2) & 0x3f) | ((uint32_t)a[14] << 6) | ((uint32_t)a[13] << 14) | ((uint32_t)(a[12] & 0xf) << 22); + r->n[6] = (uint32_t)((a[12] >> 4) & 0xf) | ((uint32_t)a[11] << 4) | ((uint32_t)a[10] << 12) | ((uint32_t)(a[9] & 0x3f) << 20); + r->n[7] = (uint32_t)((a[9] >> 6) & 0x3) | ((uint32_t)a[8] << 2) | ((uint32_t)a[7] << 10) | ((uint32_t)a[6] << 18); + r->n[8] = (uint32_t)a[5] | ((uint32_t)a[4] << 8) | ((uint32_t)a[3] << 16) | ((uint32_t)(a[2] & 0x3) << 24); + r->n[9] = (uint32_t)((a[2] >> 2) & 0x3f) | ((uint32_t)a[1] << 6) | ((uint32_t)a[0] << 14); + if (r->n[9] == 0x3FFFFFUL && (r->n[8] & r->n[7] & r->n[6] & r->n[5] & r->n[4] & r->n[3] & r->n[2]) == 0x3FFFFFFUL && (r->n[1] + 0x40UL + ((r->n[0] + 0x3D1UL) >> 26)) > 0x3FFFFFFUL) { return 0; } @@ -349,21 +345,42 @@ static int secp256k1_fe_set_b32(secp256k1_fe *r, const unsigned char *a) { /** Convert a field element to a 32-byte big endian value. Requires the input to be normalized */ static void secp256k1_fe_get_b32(unsigned char *r, const secp256k1_fe *a) { - int i; #ifdef VERIFY VERIFY_CHECK(a->normalized); secp256k1_fe_verify(a); #endif - for (i=0; i<32; i++) { - int j; - int c = 0; - for (j=0; j<4; j++) { - int limb = (8*i+2*j)/26; - int shift = (8*i+2*j)%26; - c |= ((a->n[limb] >> shift) & 0x3) << (2 * j); - } - r[31-i] = c; - } + r[0] = (a->n[9] >> 14) & 0xff; + r[1] = (a->n[9] >> 6) & 0xff; + r[2] = ((a->n[9] & 0x3F) << 2) | ((a->n[8] >> 24) & 0x3); + r[3] = (a->n[8] >> 16) & 0xff; + r[4] = (a->n[8] >> 8) & 0xff; + r[5] = a->n[8] & 0xff; + r[6] = (a->n[7] >> 18) & 0xff; + r[7] = (a->n[7] >> 10) & 0xff; + r[8] = (a->n[7] >> 2) & 0xff; + r[9] = ((a->n[7] & 0x3) << 6) | ((a->n[6] >> 20) & 0x3f); + r[10] = (a->n[6] >> 12) & 0xff; + r[11] = (a->n[6] >> 4) & 0xff; + r[12] = ((a->n[6] & 0xf) << 4) | ((a->n[5] >> 22) & 0xf); + r[13] = (a->n[5] >> 14) & 0xff; + r[14] = (a->n[5] >> 6) & 0xff; + r[15] = ((a->n[5] & 0x3f) << 2) | ((a->n[4] >> 24) & 0x3); + r[16] = (a->n[4] >> 16) & 0xff; + r[17] = (a->n[4] >> 8) & 0xff; + r[18] = a->n[4] & 0xff; + r[19] = (a->n[3] >> 18) & 0xff; + r[20] = (a->n[3] >> 10) & 0xff; + r[21] = (a->n[3] >> 2) & 0xff; + r[22] = ((a->n[3] & 0x3) << 6) | ((a->n[2] >> 20) & 0x3f); + r[23] = (a->n[2] >> 12) & 0xff; + r[24] = (a->n[2] >> 4) & 0xff; + r[25] = ((a->n[2] & 0xf) << 4) | ((a->n[1] >> 22) & 0xf); + r[26] = (a->n[1] >> 14) & 0xff; + r[27] = (a->n[1] >> 6) & 0xff; + r[28] = ((a->n[1] & 0x3f) << 2) | ((a->n[0] >> 24) & 0x3); + r[29] = (a->n[0] >> 16) & 0xff; + r[30] = (a->n[0] >> 8) & 0xff; + r[31] = a->n[0] & 0xff; } SECP256K1_INLINE static void secp256k1_fe_negate(secp256k1_fe *r, const secp256k1_fe *a, int m) { diff --git a/src/field_5x52_impl.h b/src/field_5x52_impl.h index 7a99eb21e..8e8b286ba 100644 --- a/src/field_5x52_impl.h +++ b/src/field_5x52_impl.h @@ -49,10 +49,6 @@ static void secp256k1_fe_verify(const secp256k1_fe *a) { } VERIFY_CHECK(r == 1); } -#else -static void secp256k1_fe_verify(const secp256k1_fe *a) { - (void)a; -} #endif static void secp256k1_fe_normalize(secp256k1_fe *r) { @@ -288,16 +284,40 @@ static int secp256k1_fe_cmp_var(const secp256k1_fe *a, const secp256k1_fe *b) { } static int secp256k1_fe_set_b32(secp256k1_fe *r, const unsigned char *a) { - int i; - r->n[0] = r->n[1] = r->n[2] = r->n[3] = r->n[4] = 0; - for (i=0; i<32; i++) { - int j; - for (j=0; j<2; j++) { - int limb = (8*i+4*j)/52; - int shift = (8*i+4*j)%52; - r->n[limb] |= (uint64_t)((a[31-i] >> (4*j)) & 0xF) << shift; - } - } + r->n[0] = (uint64_t)a[31] + | ((uint64_t)a[30] << 8) + | ((uint64_t)a[29] << 16) + | ((uint64_t)a[28] << 24) + | ((uint64_t)a[27] << 32) + | ((uint64_t)a[26] << 40) + | ((uint64_t)(a[25] & 0xF) << 48); + r->n[1] = (uint64_t)((a[25] >> 4) & 0xF) + | ((uint64_t)a[24] << 4) + | ((uint64_t)a[23] << 12) + | ((uint64_t)a[22] << 20) + | ((uint64_t)a[21] << 28) + | ((uint64_t)a[20] << 36) + | ((uint64_t)a[19] << 44); + r->n[2] = (uint64_t)a[18] + | ((uint64_t)a[17] << 8) + | ((uint64_t)a[16] << 16) + | ((uint64_t)a[15] << 24) + | ((uint64_t)a[14] << 32) + | ((uint64_t)a[13] << 40) + | ((uint64_t)(a[12] & 0xF) << 48); + r->n[3] = (uint64_t)((a[12] >> 4) & 0xF) + | ((uint64_t)a[11] << 4) + | ((uint64_t)a[10] << 12) + | ((uint64_t)a[9] << 20) + | ((uint64_t)a[8] << 28) + | ((uint64_t)a[7] << 36) + | ((uint64_t)a[6] << 44); + r->n[4] = (uint64_t)a[5] + | ((uint64_t)a[4] << 8) + | ((uint64_t)a[3] << 16) + | ((uint64_t)a[2] << 24) + | ((uint64_t)a[1] << 32) + | ((uint64_t)a[0] << 40); if (r->n[4] == 0x0FFFFFFFFFFFFULL && (r->n[3] & r->n[2] & r->n[1]) == 0xFFFFFFFFFFFFFULL && r->n[0] >= 0xFFFFEFFFFFC2FULL) { return 0; } @@ -311,21 +331,42 @@ static int secp256k1_fe_set_b32(secp256k1_fe *r, const unsigned char *a) { /** Convert a field element to a 32-byte big endian value. Requires the input to be normalized */ static void secp256k1_fe_get_b32(unsigned char *r, const secp256k1_fe *a) { - int i; #ifdef VERIFY VERIFY_CHECK(a->normalized); secp256k1_fe_verify(a); #endif - for (i=0; i<32; i++) { - int j; - int c = 0; - for (j=0; j<2; j++) { - int limb = (8*i+4*j)/52; - int shift = (8*i+4*j)%52; - c |= ((a->n[limb] >> shift) & 0xF) << (4 * j); - } - r[31-i] = c; - } + r[0] = (a->n[4] >> 40) & 0xFF; + r[1] = (a->n[4] >> 32) & 0xFF; + r[2] = (a->n[4] >> 24) & 0xFF; + r[3] = (a->n[4] >> 16) & 0xFF; + r[4] = (a->n[4] >> 8) & 0xFF; + r[5] = a->n[4] & 0xFF; + r[6] = (a->n[3] >> 44) & 0xFF; + r[7] = (a->n[3] >> 36) & 0xFF; + r[8] = (a->n[3] >> 28) & 0xFF; + r[9] = (a->n[3] >> 20) & 0xFF; + r[10] = (a->n[3] >> 12) & 0xFF; + r[11] = (a->n[3] >> 4) & 0xFF; + r[12] = ((a->n[2] >> 48) & 0xF) | ((a->n[3] & 0xF) << 4); + r[13] = (a->n[2] >> 40) & 0xFF; + r[14] = (a->n[2] >> 32) & 0xFF; + r[15] = (a->n[2] >> 24) & 0xFF; + r[16] = (a->n[2] >> 16) & 0xFF; + r[17] = (a->n[2] >> 8) & 0xFF; + r[18] = a->n[2] & 0xFF; + r[19] = (a->n[1] >> 44) & 0xFF; + r[20] = (a->n[1] >> 36) & 0xFF; + r[21] = (a->n[1] >> 28) & 0xFF; + r[22] = (a->n[1] >> 20) & 0xFF; + r[23] = (a->n[1] >> 12) & 0xFF; + r[24] = (a->n[1] >> 4) & 0xFF; + r[25] = ((a->n[0] >> 48) & 0xF) | ((a->n[1] & 0xF) << 4); + r[26] = (a->n[0] >> 40) & 0xFF; + r[27] = (a->n[0] >> 32) & 0xFF; + r[28] = (a->n[0] >> 24) & 0xFF; + r[29] = (a->n[0] >> 16) & 0xFF; + r[30] = (a->n[0] >> 8) & 0xFF; + r[31] = a->n[0] & 0xFF; } SECP256K1_INLINE static void secp256k1_fe_negate(secp256k1_fe *r, const secp256k1_fe *a, int m) { diff --git a/src/group_impl.h b/src/group_impl.h index 2e192b62f..7d723532f 100644 --- a/src/group_impl.h +++ b/src/group_impl.h @@ -200,12 +200,6 @@ static void secp256k1_gej_set_infinity(secp256k1_gej *r) { secp256k1_fe_clear(&r->z); } -static void secp256k1_ge_set_infinity(secp256k1_ge *r) { - r->infinity = 1; - secp256k1_fe_clear(&r->x); - secp256k1_fe_clear(&r->y); -} - static void secp256k1_gej_clear(secp256k1_gej *r) { r->infinity = 0; secp256k1_fe_clear(&r->x); diff --git a/src/modules/ecdh/main_impl.h b/src/modules/ecdh/main_impl.h index c23e4f82f..9e30fb73d 100644 --- a/src/modules/ecdh/main_impl.h +++ b/src/modules/ecdh/main_impl.h @@ -16,10 +16,10 @@ int secp256k1_ecdh(const secp256k1_context* ctx, unsigned char *result, const se secp256k1_gej res; secp256k1_ge pt; secp256k1_scalar s; + VERIFY_CHECK(ctx != NULL); ARG_CHECK(result != NULL); ARG_CHECK(point != NULL); ARG_CHECK(scalar != NULL); - (void)ctx; secp256k1_pubkey_load(ctx, &pt, point); secp256k1_scalar_set_b32(&s, scalar, &overflow); diff --git a/src/modules/ecdh/tests_impl.h b/src/modules/ecdh/tests_impl.h index 7badc9033..85a5d0a9a 100644 --- a/src/modules/ecdh/tests_impl.h +++ b/src/modules/ecdh/tests_impl.h @@ -7,6 +7,35 @@ #ifndef _SECP256K1_MODULE_ECDH_TESTS_ #define _SECP256K1_MODULE_ECDH_TESTS_ +void test_ecdh_api(void) { + /* Setup context that just counts errors */ + secp256k1_context *tctx = secp256k1_context_create(SECP256K1_CONTEXT_SIGN); + secp256k1_pubkey point; + unsigned char res[32]; + unsigned char s_one[32] = { 0 }; + int32_t ecount = 0; + s_one[31] = 1; + + secp256k1_context_set_error_callback(tctx, counting_illegal_callback_fn, &ecount); + secp256k1_context_set_illegal_callback(tctx, counting_illegal_callback_fn, &ecount); + CHECK(secp256k1_ec_pubkey_create(tctx, &point, s_one) == 1); + + /* Check all NULLs are detected */ + CHECK(secp256k1_ecdh(tctx, res, &point, s_one) == 1); + CHECK(ecount == 0); + CHECK(secp256k1_ecdh(tctx, NULL, &point, s_one) == 0); + CHECK(ecount == 1); + CHECK(secp256k1_ecdh(tctx, res, NULL, s_one) == 0); + CHECK(ecount == 2); + CHECK(secp256k1_ecdh(tctx, res, &point, NULL) == 0); + CHECK(ecount == 3); + CHECK(secp256k1_ecdh(tctx, res, &point, s_one) == 1); + CHECK(ecount == 3); + + /* Cleanup */ + secp256k1_context_destroy(tctx); +} + void test_ecdh_generator_basepoint(void) { unsigned char s_one[32] = { 0 }; secp256k1_pubkey point[2]; @@ -68,6 +97,7 @@ void test_bad_scalar(void) { } void run_ecdh_tests(void) { + test_ecdh_api(); test_ecdh_generator_basepoint(); test_bad_scalar(); } diff --git a/src/modules/recovery/main_impl.h b/src/modules/recovery/main_impl.h index 86f2f0cb2..c6fbe2398 100755 --- a/src/modules/recovery/main_impl.h +++ b/src/modules/recovery/main_impl.h @@ -179,7 +179,7 @@ int secp256k1_ecdsa_recover(const secp256k1_context* ctx, secp256k1_pubkey *pubk ARG_CHECK(pubkey != NULL); secp256k1_ecdsa_recoverable_signature_load(ctx, &r, &s, &recid, signature); - ARG_CHECK(recid >= 0 && recid < 4); + VERIFY_CHECK(recid >= 0 && recid < 4); /* should have been caught in parse_compact */ secp256k1_scalar_set_b32(&m, msg32, NULL); if (secp256k1_ecdsa_sig_recover(&ctx->ecmult_ctx, &r, &s, &q, &m, recid)) { secp256k1_pubkey_save(pubkey, &q); diff --git a/src/modules/recovery/tests_impl.h b/src/modules/recovery/tests_impl.h index 8932d5f0a..765c7dd81 100644 --- a/src/modules/recovery/tests_impl.h +++ b/src/modules/recovery/tests_impl.h @@ -7,6 +7,146 @@ #ifndef _SECP256K1_MODULE_RECOVERY_TESTS_ #define _SECP256K1_MODULE_RECOVERY_TESTS_ +static int recovery_test_nonce_function(unsigned char *nonce32, const unsigned char *msg32, const unsigned char *key32, const unsigned char *algo16, void *data, unsigned int counter) { + (void) msg32; + (void) key32; + (void) algo16; + (void) data; + + /* On the first run, return 0 to force a second run */ + if (counter == 0) { + memset(nonce32, 0, 32); + return 1; + } + /* On the second run, return an overflow to force a third run */ + if (counter == 1) { + memset(nonce32, 0xff, 32); + return 1; + } + /* On the next run, return a valid nonce, but flip a coin as to whether or not to fail signing. */ + memset(nonce32, 1, 32); + return secp256k1_rand_bits(1); +} + +void test_ecdsa_recovery_api(void) { + /* Setup contexts that just count errors */ + secp256k1_context *none = secp256k1_context_create(SECP256K1_CONTEXT_NONE); + secp256k1_context *sign = secp256k1_context_create(SECP256K1_CONTEXT_SIGN); + secp256k1_context *vrfy = secp256k1_context_create(SECP256K1_CONTEXT_VERIFY); + secp256k1_context *both = secp256k1_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY); + secp256k1_pubkey pubkey; + secp256k1_pubkey recpubkey; + secp256k1_ecdsa_signature normal_sig; + secp256k1_ecdsa_recoverable_signature recsig; + unsigned char privkey[32] = { 1 }; + unsigned char message[32] = { 2 }; + int32_t ecount = 0; + int recid = 0; + unsigned char sig[74]; + unsigned char zero_privkey[32] = { 0 }; + unsigned char over_privkey[32] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; + + secp256k1_context_set_error_callback(none, counting_illegal_callback_fn, &ecount); + secp256k1_context_set_error_callback(sign, counting_illegal_callback_fn, &ecount); + secp256k1_context_set_error_callback(vrfy, counting_illegal_callback_fn, &ecount); + secp256k1_context_set_error_callback(both, counting_illegal_callback_fn, &ecount); + secp256k1_context_set_illegal_callback(none, counting_illegal_callback_fn, &ecount); + secp256k1_context_set_illegal_callback(sign, counting_illegal_callback_fn, &ecount); + secp256k1_context_set_illegal_callback(vrfy, counting_illegal_callback_fn, &ecount); + secp256k1_context_set_illegal_callback(both, counting_illegal_callback_fn, &ecount); + + /* Construct and verify corresponding public key. */ + CHECK(secp256k1_ec_seckey_verify(ctx, privkey) == 1); + CHECK(secp256k1_ec_pubkey_create(ctx, &pubkey, privkey) == 1); + + /* Check bad contexts and NULLs for signing */ + ecount = 0; + CHECK(secp256k1_ecdsa_sign_recoverable(none, &recsig, message, privkey, NULL, NULL) == 0); + CHECK(ecount == 1); + CHECK(secp256k1_ecdsa_sign_recoverable(sign, &recsig, message, privkey, NULL, NULL) == 1); + CHECK(ecount == 1); + CHECK(secp256k1_ecdsa_sign_recoverable(vrfy, &recsig, message, privkey, NULL, NULL) == 0); + CHECK(ecount == 2); + CHECK(secp256k1_ecdsa_sign_recoverable(both, &recsig, message, privkey, NULL, NULL) == 1); + CHECK(ecount == 2); + CHECK(secp256k1_ecdsa_sign_recoverable(both, NULL, message, privkey, NULL, NULL) == 0); + CHECK(ecount == 3); + CHECK(secp256k1_ecdsa_sign_recoverable(both, &recsig, NULL, privkey, NULL, NULL) == 0); + CHECK(ecount == 4); + CHECK(secp256k1_ecdsa_sign_recoverable(both, &recsig, message, NULL, NULL, NULL) == 0); + CHECK(ecount == 5); + /* This will fail or succeed randomly, and in either case will not ARG_CHECK failure */ + secp256k1_ecdsa_sign_recoverable(both, &recsig, message, privkey, recovery_test_nonce_function, NULL); + CHECK(ecount == 5); + /* These will all fail, but not in ARG_CHECK way */ + CHECK(secp256k1_ecdsa_sign_recoverable(both, &recsig, message, zero_privkey, NULL, NULL) == 0); + CHECK(secp256k1_ecdsa_sign_recoverable(both, &recsig, message, over_privkey, NULL, NULL) == 0); + /* This one will succeed. */ + CHECK(secp256k1_ecdsa_sign_recoverable(both, &recsig, message, privkey, NULL, NULL) == 1); + CHECK(ecount == 5); + + /* Check signing with a goofy nonce function */ + + /* Check bad contexts and NULLs for recovery */ + ecount = 0; + CHECK(secp256k1_ecdsa_recover(none, &recpubkey, &recsig, message) == 0); + CHECK(ecount == 1); + CHECK(secp256k1_ecdsa_recover(sign, &recpubkey, &recsig, message) == 0); + CHECK(ecount == 2); + CHECK(secp256k1_ecdsa_recover(vrfy, &recpubkey, &recsig, message) == 1); + CHECK(ecount == 2); + CHECK(secp256k1_ecdsa_recover(both, &recpubkey, &recsig, message) == 1); + CHECK(ecount == 2); + CHECK(secp256k1_ecdsa_recover(both, NULL, &recsig, message) == 0); + CHECK(ecount == 3); + CHECK(secp256k1_ecdsa_recover(both, &recpubkey, NULL, message) == 0); + CHECK(ecount == 4); + CHECK(secp256k1_ecdsa_recover(both, &recpubkey, &recsig, NULL) == 0); + CHECK(ecount == 5); + + /* Check NULLs for conversion */ + CHECK(secp256k1_ecdsa_sign(both, &normal_sig, message, privkey, NULL, NULL) == 1); + ecount = 0; + CHECK(secp256k1_ecdsa_recoverable_signature_convert(both, NULL, &recsig) == 0); + CHECK(ecount == 1); + CHECK(secp256k1_ecdsa_recoverable_signature_convert(both, &normal_sig, NULL) == 0); + CHECK(ecount == 2); + CHECK(secp256k1_ecdsa_recoverable_signature_convert(both, &normal_sig, &recsig) == 1); + + /* Check NULLs for de/serialization */ + CHECK(secp256k1_ecdsa_sign_recoverable(both, &recsig, message, privkey, NULL, NULL) == 1); + ecount = 0; + CHECK(secp256k1_ecdsa_recoverable_signature_serialize_compact(both, NULL, &recid, &recsig) == 0); + CHECK(ecount == 1); + CHECK(secp256k1_ecdsa_recoverable_signature_serialize_compact(both, sig, NULL, &recsig) == 0); + CHECK(ecount == 2); + CHECK(secp256k1_ecdsa_recoverable_signature_serialize_compact(both, sig, &recid, NULL) == 0); + CHECK(ecount == 3); + CHECK(secp256k1_ecdsa_recoverable_signature_serialize_compact(both, sig, &recid, &recsig) == 1); + + CHECK(secp256k1_ecdsa_recoverable_signature_parse_compact(both, NULL, sig, recid) == 0); + CHECK(ecount == 4); + CHECK(secp256k1_ecdsa_recoverable_signature_parse_compact(both, &recsig, NULL, recid) == 0); + CHECK(ecount == 5); + CHECK(secp256k1_ecdsa_recoverable_signature_parse_compact(both, &recsig, sig, -1) == 0); + CHECK(ecount == 6); + CHECK(secp256k1_ecdsa_recoverable_signature_parse_compact(both, &recsig, sig, 5) == 0); + CHECK(ecount == 7); + /* overflow in signature will fail but not affect ecount */ + memcpy(sig, over_privkey, 32); + CHECK(secp256k1_ecdsa_recoverable_signature_parse_compact(both, &recsig, sig, recid) == 0); + CHECK(ecount == 7); + + /* cleanup */ + secp256k1_context_destroy(none); + secp256k1_context_destroy(sign); + secp256k1_context_destroy(vrfy); + secp256k1_context_destroy(both); +} + void test_ecdsa_recovery_end_to_end(void) { unsigned char extra[32] = {0x00}; unsigned char privkey[32]; @@ -241,6 +381,9 @@ void test_ecdsa_recovery_edge_cases(void) { void run_recovery_tests(void) { int i; + for (i = 0; i < count; i++) { + test_ecdsa_recovery_api(); + } for (i = 0; i < 64*count; i++) { test_ecdsa_recovery_end_to_end(); } diff --git a/src/scalar_impl.h b/src/scalar_impl.h index f5b237640..2690d8655 100644 --- a/src/scalar_impl.h +++ b/src/scalar_impl.h @@ -66,88 +66,79 @@ static void secp256k1_scalar_inverse(secp256k1_scalar *r, const secp256k1_scalar #else secp256k1_scalar *t; int i; - /* First compute x ^ (2^N - 1) for some values of N. */ - secp256k1_scalar x2, x3, x4, x6, x7, x8, x15, x30, x60, x120, x127; + /* First compute xN as x ^ (2^N - 1) for some values of N, + * and uM as x ^ M for some values of M. */ + secp256k1_scalar x2, x3, x6, x8, x14, x28, x56, x112, x126; + secp256k1_scalar u2, u5, u9, u11, u13; - secp256k1_scalar_sqr(&x2, x); - secp256k1_scalar_mul(&x2, &x2, x); + secp256k1_scalar_sqr(&u2, x); + secp256k1_scalar_mul(&x2, &u2, x); + secp256k1_scalar_mul(&u5, &u2, &x2); + secp256k1_scalar_mul(&x3, &u5, &u2); + secp256k1_scalar_mul(&u9, &x3, &u2); + secp256k1_scalar_mul(&u11, &u9, &u2); + secp256k1_scalar_mul(&u13, &u11, &u2); - secp256k1_scalar_sqr(&x3, &x2); - secp256k1_scalar_mul(&x3, &x3, x); - - secp256k1_scalar_sqr(&x4, &x3); - secp256k1_scalar_mul(&x4, &x4, x); - - secp256k1_scalar_sqr(&x6, &x4); + secp256k1_scalar_sqr(&x6, &u13); secp256k1_scalar_sqr(&x6, &x6); - secp256k1_scalar_mul(&x6, &x6, &x2); - - secp256k1_scalar_sqr(&x7, &x6); - secp256k1_scalar_mul(&x7, &x7, x); + secp256k1_scalar_mul(&x6, &x6, &u11); - secp256k1_scalar_sqr(&x8, &x7); - secp256k1_scalar_mul(&x8, &x8, x); + secp256k1_scalar_sqr(&x8, &x6); + secp256k1_scalar_sqr(&x8, &x8); + secp256k1_scalar_mul(&x8, &x8, &x2); - secp256k1_scalar_sqr(&x15, &x8); - for (i = 0; i < 6; i++) { - secp256k1_scalar_sqr(&x15, &x15); + secp256k1_scalar_sqr(&x14, &x8); + for (i = 0; i < 5; i++) { + secp256k1_scalar_sqr(&x14, &x14); } - secp256k1_scalar_mul(&x15, &x15, &x7); + secp256k1_scalar_mul(&x14, &x14, &x6); - secp256k1_scalar_sqr(&x30, &x15); - for (i = 0; i < 14; i++) { - secp256k1_scalar_sqr(&x30, &x30); + secp256k1_scalar_sqr(&x28, &x14); + for (i = 0; i < 13; i++) { + secp256k1_scalar_sqr(&x28, &x28); } - secp256k1_scalar_mul(&x30, &x30, &x15); + secp256k1_scalar_mul(&x28, &x28, &x14); - secp256k1_scalar_sqr(&x60, &x30); - for (i = 0; i < 29; i++) { - secp256k1_scalar_sqr(&x60, &x60); + secp256k1_scalar_sqr(&x56, &x28); + for (i = 0; i < 27; i++) { + secp256k1_scalar_sqr(&x56, &x56); } - secp256k1_scalar_mul(&x60, &x60, &x30); + secp256k1_scalar_mul(&x56, &x56, &x28); - secp256k1_scalar_sqr(&x120, &x60); - for (i = 0; i < 59; i++) { - secp256k1_scalar_sqr(&x120, &x120); + secp256k1_scalar_sqr(&x112, &x56); + for (i = 0; i < 55; i++) { + secp256k1_scalar_sqr(&x112, &x112); } - secp256k1_scalar_mul(&x120, &x120, &x60); + secp256k1_scalar_mul(&x112, &x112, &x56); - secp256k1_scalar_sqr(&x127, &x120); - for (i = 0; i < 6; i++) { - secp256k1_scalar_sqr(&x127, &x127); + secp256k1_scalar_sqr(&x126, &x112); + for (i = 0; i < 13; i++) { + secp256k1_scalar_sqr(&x126, &x126); } - secp256k1_scalar_mul(&x127, &x127, &x7); + secp256k1_scalar_mul(&x126, &x126, &x14); - /* Then accumulate the final result (t starts at x127). */ - t = &x127; - for (i = 0; i < 2; i++) { /* 0 */ + /* Then accumulate the final result (t starts at x126). */ + t = &x126; + for (i = 0; i < 3; i++) { secp256k1_scalar_sqr(t, t); } - secp256k1_scalar_mul(t, t, x); /* 1 */ + secp256k1_scalar_mul(t, t, &u5); /* 101 */ for (i = 0; i < 4; i++) { /* 0 */ secp256k1_scalar_sqr(t, t); } secp256k1_scalar_mul(t, t, &x3); /* 111 */ - for (i = 0; i < 2; i++) { /* 0 */ - secp256k1_scalar_sqr(t, t); - } - secp256k1_scalar_mul(t, t, x); /* 1 */ - for (i = 0; i < 2; i++) { /* 0 */ - secp256k1_scalar_sqr(t, t); - } - secp256k1_scalar_mul(t, t, x); /* 1 */ - for (i = 0; i < 2; i++) { /* 0 */ + for (i = 0; i < 4; i++) { /* 0 */ secp256k1_scalar_sqr(t, t); } - secp256k1_scalar_mul(t, t, x); /* 1 */ - for (i = 0; i < 4; i++) { /* 0 */ + secp256k1_scalar_mul(t, t, &u5); /* 101 */ + for (i = 0; i < 5; i++) { /* 0 */ secp256k1_scalar_sqr(t, t); } - secp256k1_scalar_mul(t, t, &x3); /* 111 */ - for (i = 0; i < 3; i++) { /* 0 */ + secp256k1_scalar_mul(t, t, &u11); /* 1011 */ + for (i = 0; i < 4; i++) { secp256k1_scalar_sqr(t, t); } - secp256k1_scalar_mul(t, t, &x2); /* 11 */ + secp256k1_scalar_mul(t, t, &u11); /* 1011 */ for (i = 0; i < 4; i++) { /* 0 */ secp256k1_scalar_sqr(t, t); } @@ -156,38 +147,26 @@ static void secp256k1_scalar_inverse(secp256k1_scalar *r, const secp256k1_scalar secp256k1_scalar_sqr(t, t); } secp256k1_scalar_mul(t, t, &x3); /* 111 */ - for (i = 0; i < 4; i++) { /* 00 */ + for (i = 0; i < 6; i++) { /* 00 */ secp256k1_scalar_sqr(t, t); } - secp256k1_scalar_mul(t, t, &x2); /* 11 */ - for (i = 0; i < 2; i++) { /* 0 */ + secp256k1_scalar_mul(t, t, &u13); /* 1101 */ + for (i = 0; i < 4; i++) { /* 0 */ secp256k1_scalar_sqr(t, t); } - secp256k1_scalar_mul(t, t, x); /* 1 */ - for (i = 0; i < 2; i++) { /* 0 */ + secp256k1_scalar_mul(t, t, &u5); /* 101 */ + for (i = 0; i < 3; i++) { secp256k1_scalar_sqr(t, t); } - secp256k1_scalar_mul(t, t, x); /* 1 */ + secp256k1_scalar_mul(t, t, &x3); /* 111 */ for (i = 0; i < 5; i++) { /* 0 */ secp256k1_scalar_sqr(t, t); } - secp256k1_scalar_mul(t, t, &x4); /* 1111 */ - for (i = 0; i < 2; i++) { /* 0 */ - secp256k1_scalar_sqr(t, t); - } - secp256k1_scalar_mul(t, t, x); /* 1 */ - for (i = 0; i < 3; i++) { /* 00 */ - secp256k1_scalar_sqr(t, t); - } - secp256k1_scalar_mul(t, t, x); /* 1 */ - for (i = 0; i < 4; i++) { /* 000 */ - secp256k1_scalar_sqr(t, t); - } - secp256k1_scalar_mul(t, t, x); /* 1 */ - for (i = 0; i < 2; i++) { /* 0 */ + secp256k1_scalar_mul(t, t, &u9); /* 1001 */ + for (i = 0; i < 6; i++) { /* 000 */ secp256k1_scalar_sqr(t, t); } - secp256k1_scalar_mul(t, t, x); /* 1 */ + secp256k1_scalar_mul(t, t, &u5); /* 101 */ for (i = 0; i < 10; i++) { /* 0000000 */ secp256k1_scalar_sqr(t, t); } @@ -200,50 +179,34 @@ static void secp256k1_scalar_inverse(secp256k1_scalar *r, const secp256k1_scalar secp256k1_scalar_sqr(t, t); } secp256k1_scalar_mul(t, t, &x8); /* 11111111 */ - for (i = 0; i < 2; i++) { /* 0 */ - secp256k1_scalar_sqr(t, t); - } - secp256k1_scalar_mul(t, t, x); /* 1 */ - for (i = 0; i < 3; i++) { /* 00 */ - secp256k1_scalar_sqr(t, t); - } - secp256k1_scalar_mul(t, t, x); /* 1 */ - for (i = 0; i < 3; i++) { /* 00 */ - secp256k1_scalar_sqr(t, t); - } - secp256k1_scalar_mul(t, t, x); /* 1 */ for (i = 0; i < 5; i++) { /* 0 */ secp256k1_scalar_sqr(t, t); } - secp256k1_scalar_mul(t, t, &x4); /* 1111 */ - for (i = 0; i < 2; i++) { /* 0 */ + secp256k1_scalar_mul(t, t, &u9); /* 1001 */ + for (i = 0; i < 6; i++) { /* 00 */ secp256k1_scalar_sqr(t, t); } - secp256k1_scalar_mul(t, t, x); /* 1 */ - for (i = 0; i < 5; i++) { /* 000 */ + secp256k1_scalar_mul(t, t, &u11); /* 1011 */ + for (i = 0; i < 4; i++) { secp256k1_scalar_sqr(t, t); } - secp256k1_scalar_mul(t, t, &x2); /* 11 */ - for (i = 0; i < 4; i++) { /* 00 */ + secp256k1_scalar_mul(t, t, &u13); /* 1101 */ + for (i = 0; i < 5; i++) { secp256k1_scalar_sqr(t, t); } secp256k1_scalar_mul(t, t, &x2); /* 11 */ - for (i = 0; i < 2; i++) { /* 0 */ + for (i = 0; i < 6; i++) { /* 00 */ secp256k1_scalar_sqr(t, t); } - secp256k1_scalar_mul(t, t, x); /* 1 */ - for (i = 0; i < 8; i++) { /* 000000 */ - secp256k1_scalar_sqr(t, t); - } - secp256k1_scalar_mul(t, t, &x2); /* 11 */ - for (i = 0; i < 3; i++) { /* 0 */ + secp256k1_scalar_mul(t, t, &u13); /* 1101 */ + for (i = 0; i < 10; i++) { /* 000000 */ secp256k1_scalar_sqr(t, t); } - secp256k1_scalar_mul(t, t, &x2); /* 11 */ - for (i = 0; i < 3; i++) { /* 00 */ + secp256k1_scalar_mul(t, t, &u13); /* 1101 */ + for (i = 0; i < 4; i++) { secp256k1_scalar_sqr(t, t); } - secp256k1_scalar_mul(t, t, x); /* 1 */ + secp256k1_scalar_mul(t, t, &u9); /* 1001 */ for (i = 0; i < 6; i++) { /* 00000 */ secp256k1_scalar_sqr(t, t); } diff --git a/src/secp256k1.c b/src/secp256k1.c old mode 100755 new mode 100644 index fb8b882fa..4f8c01655 --- a/src/secp256k1.c +++ b/src/secp256k1.c @@ -424,6 +424,33 @@ int secp256k1_ec_pubkey_create(const secp256k1_context* ctx, secp256k1_pubkey *p return ret; } +int secp256k1_ec_privkey_negate(const secp256k1_context* ctx, unsigned char *seckey) { + secp256k1_scalar sec; + VERIFY_CHECK(ctx != NULL); + ARG_CHECK(seckey != NULL); + + secp256k1_scalar_set_b32(&sec, seckey, NULL); + secp256k1_scalar_negate(&sec, &sec); + secp256k1_scalar_get_b32(seckey, &sec); + + return 1; +} + +int secp256k1_ec_pubkey_negate(const secp256k1_context* ctx, secp256k1_pubkey *pubkey) { + int ret = 0; + secp256k1_ge p; + VERIFY_CHECK(ctx != NULL); + ARG_CHECK(pubkey != NULL); + + ret = secp256k1_pubkey_load(ctx, &p, pubkey); + memset(pubkey, 0, sizeof(*pubkey)); + if (ret) { + secp256k1_ge_neg(&p, &p); + secp256k1_pubkey_save(pubkey, &p); + } + return ret; +} + int secp256k1_ec_privkey_tweak_add(const secp256k1_context* ctx, unsigned char *seckey, const unsigned char *tweak) { secp256k1_scalar term; secp256k1_scalar sec; @@ -552,10 +579,6 @@ int secp256k1_ec_pubkey_combine(const secp256k1_context* ctx, secp256k1_pubkey * # include "modules/ecdh/main_impl.h" #endif -#ifdef ENABLE_MODULE_SCHNORR -# include "modules/schnorr/main_impl.h" -#endif - #ifdef ENABLE_MODULE_RECOVERY # include "modules/recovery/main_impl.h" #endif diff --git a/src/tests.c b/src/tests.c index 9ae7d3028..3d9bd5ebb 100644 --- a/src/tests.c +++ b/src/tests.c @@ -10,6 +10,7 @@ #include #include +#include #include @@ -135,6 +136,7 @@ void random_scalar_order(secp256k1_scalar *num) { void run_context_tests(void) { secp256k1_pubkey pubkey; + secp256k1_pubkey zero_pubkey; secp256k1_ecdsa_signature sig; unsigned char ctmp[32]; int32_t ecount; @@ -149,6 +151,8 @@ void run_context_tests(void) { secp256k1_scalar msg, key, nonce; secp256k1_scalar sigr, sigs; + memset(&zero_pubkey, 0, sizeof(zero_pubkey)); + ecount = 0; ecount2 = 10; secp256k1_context_set_illegal_callback(vrfy, counting_illegal_callback_fn, &ecount); @@ -201,12 +205,20 @@ void run_context_tests(void) { CHECK(ecount == 2); CHECK(secp256k1_ec_pubkey_tweak_mul(sign, &pubkey, ctmp) == 0); CHECK(ecount2 == 13); - CHECK(secp256k1_ec_pubkey_tweak_mul(vrfy, &pubkey, ctmp) == 1); + CHECK(secp256k1_ec_pubkey_negate(vrfy, &pubkey) == 1); CHECK(ecount == 2); - CHECK(secp256k1_context_randomize(vrfy, ctmp) == 0); + CHECK(secp256k1_ec_pubkey_negate(sign, &pubkey) == 1); + CHECK(ecount == 2); + CHECK(secp256k1_ec_pubkey_negate(sign, NULL) == 0); + CHECK(ecount2 == 14); + CHECK(secp256k1_ec_pubkey_negate(vrfy, &zero_pubkey) == 0); CHECK(ecount == 3); + CHECK(secp256k1_ec_pubkey_tweak_mul(vrfy, &pubkey, ctmp) == 1); + CHECK(ecount == 3); + CHECK(secp256k1_context_randomize(vrfy, ctmp) == 0); + CHECK(ecount == 4); CHECK(secp256k1_context_randomize(sign, NULL) == 1); - CHECK(ecount2 == 13); + CHECK(ecount2 == 14); secp256k1_context_set_illegal_callback(vrfy, NULL, NULL); secp256k1_context_set_illegal_callback(sign, NULL, NULL); @@ -1879,9 +1891,9 @@ void test_ge(void) { * * When the endomorphism code is compiled in, p5 = lambda*p1 and p6 = lambda^2*p1 are added as well. */ - secp256k1_ge *ge = (secp256k1_ge *)malloc(sizeof(secp256k1_ge) * (1 + 4 * runs)); - secp256k1_gej *gej = (secp256k1_gej *)malloc(sizeof(secp256k1_gej) * (1 + 4 * runs)); - secp256k1_fe *zinv = (secp256k1_fe *)malloc(sizeof(secp256k1_fe) * (1 + 4 * runs)); + secp256k1_ge *ge = (secp256k1_ge *)checked_malloc(&ctx->error_callback, sizeof(secp256k1_ge) * (1 + 4 * runs)); + secp256k1_gej *gej = (secp256k1_gej *)checked_malloc(&ctx->error_callback, sizeof(secp256k1_gej) * (1 + 4 * runs)); + secp256k1_fe *zinv = (secp256k1_fe *)checked_malloc(&ctx->error_callback, sizeof(secp256k1_fe) * (1 + 4 * runs)); secp256k1_fe zf; secp256k1_fe zfi2, zfi3; @@ -1919,7 +1931,7 @@ void test_ge(void) { /* Compute z inverses. */ { - secp256k1_fe *zs = malloc(sizeof(secp256k1_fe) * (1 + 4 * runs)); + secp256k1_fe *zs = checked_malloc(&ctx->error_callback, sizeof(secp256k1_fe) * (1 + 4 * runs)); for (i = 0; i < 4 * runs + 1; i++) { if (i == 0) { /* The point at infinity does not have a meaningful z inverse. Any should do. */ @@ -2020,7 +2032,7 @@ void test_ge(void) { /* Test adding all points together in random order equals infinity. */ { secp256k1_gej sum = SECP256K1_GEJ_CONST_INFINITY; - secp256k1_gej *gej_shuffled = (secp256k1_gej *)malloc((4 * runs + 1) * sizeof(secp256k1_gej)); + secp256k1_gej *gej_shuffled = (secp256k1_gej *)checked_malloc(&ctx->error_callback, (4 * runs + 1) * sizeof(secp256k1_gej)); for (i = 0; i < 4 * runs + 1; i++) { gej_shuffled[i] = gej[i]; } @@ -2041,9 +2053,9 @@ void test_ge(void) { /* Test batch gej -> ge conversion with and without known z ratios. */ { - secp256k1_fe *zr = (secp256k1_fe *)malloc((4 * runs + 1) * sizeof(secp256k1_fe)); - secp256k1_ge *ge_set_table = (secp256k1_ge *)malloc((4 * runs + 1) * sizeof(secp256k1_ge)); - secp256k1_ge *ge_set_all = (secp256k1_ge *)malloc((4 * runs + 1) * sizeof(secp256k1_ge)); + secp256k1_fe *zr = (secp256k1_fe *)checked_malloc(&ctx->error_callback, (4 * runs + 1) * sizeof(secp256k1_fe)); + secp256k1_ge *ge_set_table = (secp256k1_ge *)checked_malloc(&ctx->error_callback, (4 * runs + 1) * sizeof(secp256k1_ge)); + secp256k1_ge *ge_set_all = (secp256k1_ge *)checked_malloc(&ctx->error_callback, (4 * runs + 1) * sizeof(secp256k1_ge)); for (i = 0; i < 4 * runs + 1; i++) { /* Compute gej[i + 1].z / gez[i].z (with gej[n].z taken to be 1). */ if (i < 4 * runs) { @@ -3436,6 +3448,7 @@ void test_ecdsa_end_to_end(void) { unsigned char pubkeyc[65]; size_t pubkeyclen = 65; secp256k1_pubkey pubkey; + secp256k1_pubkey pubkey_tmp; unsigned char seckey[300]; size_t seckeylen = 300; @@ -3457,6 +3470,13 @@ void test_ecdsa_end_to_end(void) { memset(&pubkey, 0, sizeof(pubkey)); CHECK(secp256k1_ec_pubkey_parse(ctx, &pubkey, pubkeyc, pubkeyclen) == 1); + /* Verify negation changes the key and changes it back */ + memcpy(&pubkey_tmp, &pubkey, sizeof(pubkey)); + CHECK(secp256k1_ec_pubkey_negate(ctx, &pubkey_tmp) == 1); + CHECK(memcmp(&pubkey_tmp, &pubkey, sizeof(pubkey)) != 0); + CHECK(secp256k1_ec_pubkey_negate(ctx, &pubkey_tmp) == 1); + CHECK(memcmp(&pubkey_tmp, &pubkey, sizeof(pubkey)) == 0); + /* Verify private key import and export. */ CHECK(ec_privkey_export_der(ctx, seckey, &seckeylen, privkey, secp256k1_rand_bits(1) == 1)); CHECK(ec_privkey_import_der(ctx, privkey2, seckey, seckeylen) == 1); @@ -4383,10 +4403,6 @@ void run_ecdsa_openssl(void) { # include "modules/ecdh/tests_impl.h" #endif -#ifdef ENABLE_MODULE_SCHNORR -# include "modules/schnorr/tests_impl.h" -#endif - #ifdef ENABLE_MODULE_RECOVERY # include "modules/recovery/tests_impl.h" #endif @@ -4504,11 +4520,6 @@ int main(int argc, char **argv) { run_ecdsa_openssl(); #endif -#ifdef ENABLE_MODULE_SCHNORR - /* Schnorr tests */ - run_schnorr_tests(); -#endif - #ifdef ENABLE_MODULE_RECOVERY /* ECDSA pubkey recovery tests */ run_recovery_tests(); diff --git a/src/tests_exhaustive.c b/src/tests_exhaustive.c index bda6ee475..b040bb073 100644 --- a/src/tests_exhaustive.c +++ b/src/tests_exhaustive.c @@ -26,6 +26,11 @@ #include "secp256k1.c" #include "testrand_impl.h" +#ifdef ENABLE_MODULE_RECOVERY +#include "src/modules/recovery/main_impl.h" +#include "include/secp256k1_recovery.h" +#endif + /** stolen from tests.c */ void ge_equals_ge(const secp256k1_ge *a, const secp256k1_ge *b) { CHECK(a->infinity == b->infinity); @@ -77,7 +82,7 @@ int secp256k1_nonce_function_smallint(unsigned char *nonce32, const unsigned cha * function with an increased `attempt`. So if attempt > 0 this means we * need to change the nonce to avoid an infinite loop. */ if (attempt > 0) { - (*idata)++; + *idata = (*idata + 1) % EXHAUSTIVE_TEST_ORDER; } secp256k1_scalar_set_int(&s, *idata); secp256k1_scalar_get_b32(nonce32, &s); @@ -244,6 +249,7 @@ void test_exhaustive_sign(const secp256k1_context *ctx, const secp256k1_ge *grou for (i = 1; i < order; i++) { /* message */ for (j = 1; j < order; j++) { /* key */ for (k = 1; k < order; k++) { /* nonce */ + const int starting_k = k; secp256k1_ecdsa_signature sig; secp256k1_scalar sk, msg, r, s, expected_r; unsigned char sk32[32], msg32[32]; @@ -262,6 +268,11 @@ void test_exhaustive_sign(const secp256k1_context *ctx, const secp256k1_ge *grou CHECK(r == expected_r); CHECK((k * s) % order == (i + r * j) % order || (k * (EXHAUSTIVE_TEST_ORDER - s)) % order == (i + r * j) % order); + + /* Overflow means we've tried every possible nonce */ + if (k < starting_k) { + break; + } } } } @@ -276,6 +287,130 @@ void test_exhaustive_sign(const secp256k1_context *ctx, const secp256k1_ge *grou */ } +#ifdef ENABLE_MODULE_RECOVERY +void test_exhaustive_recovery_sign(const secp256k1_context *ctx, const secp256k1_ge *group, int order) { + int i, j, k; + + /* Loop */ + for (i = 1; i < order; i++) { /* message */ + for (j = 1; j < order; j++) { /* key */ + for (k = 1; k < order; k++) { /* nonce */ + const int starting_k = k; + secp256k1_fe r_dot_y_normalized; + secp256k1_ecdsa_recoverable_signature rsig; + secp256k1_ecdsa_signature sig; + secp256k1_scalar sk, msg, r, s, expected_r; + unsigned char sk32[32], msg32[32]; + int expected_recid; + int recid; + secp256k1_scalar_set_int(&msg, i); + secp256k1_scalar_set_int(&sk, j); + secp256k1_scalar_get_b32(sk32, &sk); + secp256k1_scalar_get_b32(msg32, &msg); + + secp256k1_ecdsa_sign_recoverable(ctx, &rsig, msg32, sk32, secp256k1_nonce_function_smallint, &k); + + /* Check directly */ + secp256k1_ecdsa_recoverable_signature_load(ctx, &r, &s, &recid, &rsig); + r_from_k(&expected_r, group, k); + CHECK(r == expected_r); + CHECK((k * s) % order == (i + r * j) % order || + (k * (EXHAUSTIVE_TEST_ORDER - s)) % order == (i + r * j) % order); + /* In computing the recid, there is an overflow condition that is disabled in + * scalar_low_impl.h `secp256k1_scalar_set_b32` because almost every r.y value + * will exceed the group order, and our signing code always holds out for r + * values that don't overflow, so with a proper overflow check the tests would + * loop indefinitely. */ + r_dot_y_normalized = group[k].y; + secp256k1_fe_normalize(&r_dot_y_normalized); + /* Also the recovery id is flipped depending if we hit the low-s branch */ + if ((k * s) % order == (i + r * j) % order) { + expected_recid = secp256k1_fe_is_odd(&r_dot_y_normalized) ? 1 : 0; + } else { + expected_recid = secp256k1_fe_is_odd(&r_dot_y_normalized) ? 0 : 1; + } + CHECK(recid == expected_recid); + + /* Convert to a standard sig then check */ + secp256k1_ecdsa_recoverable_signature_convert(ctx, &sig, &rsig); + secp256k1_ecdsa_signature_load(ctx, &r, &s, &sig); + /* Note that we compute expected_r *after* signing -- this is important + * because our nonce-computing function function might change k during + * signing. */ + r_from_k(&expected_r, group, k); + CHECK(r == expected_r); + CHECK((k * s) % order == (i + r * j) % order || + (k * (EXHAUSTIVE_TEST_ORDER - s)) % order == (i + r * j) % order); + + /* Overflow means we've tried every possible nonce */ + if (k < starting_k) { + break; + } + } + } + } +} + +void test_exhaustive_recovery_verify(const secp256k1_context *ctx, const secp256k1_ge *group, int order) { + /* This is essentially a copy of test_exhaustive_verify, with recovery added */ + int s, r, msg, key; + for (s = 1; s < order; s++) { + for (r = 1; r < order; r++) { + for (msg = 1; msg < order; msg++) { + for (key = 1; key < order; key++) { + secp256k1_ge nonconst_ge; + secp256k1_ecdsa_recoverable_signature rsig; + secp256k1_ecdsa_signature sig; + secp256k1_pubkey pk; + secp256k1_scalar sk_s, msg_s, r_s, s_s; + secp256k1_scalar s_times_k_s, msg_plus_r_times_sk_s; + int recid = 0; + int k, should_verify; + unsigned char msg32[32]; + + secp256k1_scalar_set_int(&s_s, s); + secp256k1_scalar_set_int(&r_s, r); + secp256k1_scalar_set_int(&msg_s, msg); + secp256k1_scalar_set_int(&sk_s, key); + secp256k1_scalar_get_b32(msg32, &msg_s); + + /* Verify by hand */ + /* Run through every k value that gives us this r and check that *one* works. + * Note there could be none, there could be multiple, ECDSA is weird. */ + should_verify = 0; + for (k = 0; k < order; k++) { + secp256k1_scalar check_x_s; + r_from_k(&check_x_s, group, k); + if (r_s == check_x_s) { + secp256k1_scalar_set_int(&s_times_k_s, k); + secp256k1_scalar_mul(&s_times_k_s, &s_times_k_s, &s_s); + secp256k1_scalar_mul(&msg_plus_r_times_sk_s, &r_s, &sk_s); + secp256k1_scalar_add(&msg_plus_r_times_sk_s, &msg_plus_r_times_sk_s, &msg_s); + should_verify |= secp256k1_scalar_eq(&s_times_k_s, &msg_plus_r_times_sk_s); + } + } + /* nb we have a "high s" rule */ + should_verify &= !secp256k1_scalar_is_high(&s_s); + + /* We would like to try recovering the pubkey and checking that it matches, + * but pubkey recovery is impossible in the exhaustive tests (the reason + * being that there are 12 nonzero r values, 12 nonzero points, and no + * overlap between the sets, so there are no valid signatures). */ + + /* Verify by converting to a standard signature and calling verify */ + secp256k1_ecdsa_recoverable_signature_save(&rsig, &r_s, &s_s, recid); + secp256k1_ecdsa_recoverable_signature_convert(ctx, &sig, &rsig); + memcpy(&nonconst_ge, &group[sk_s], sizeof(nonconst_ge)); + secp256k1_pubkey_save(&pk, &nonconst_ge); + CHECK(should_verify == + secp256k1_ecdsa_verify(ctx, &sig, msg32, &pk)); + } + } + } + } +} +#endif + int main(void) { int i; secp256k1_gej groupj[EXHAUSTIVE_TEST_ORDER]; @@ -324,6 +459,12 @@ int main(void) { test_exhaustive_sign(ctx, group, EXHAUSTIVE_TEST_ORDER); test_exhaustive_verify(ctx, group, EXHAUSTIVE_TEST_ORDER); +#ifdef ENABLE_MODULE_RECOVERY + test_exhaustive_recovery_sign(ctx, group, EXHAUSTIVE_TEST_ORDER); + test_exhaustive_recovery_verify(ctx, group, EXHAUSTIVE_TEST_ORDER); +#endif + + secp256k1_context_destroy(ctx); return 0; } diff --git a/src/util.h b/src/util.h index 4eef4ded4..4092a86c9 100644 --- a/src/util.h +++ b/src/util.h @@ -57,7 +57,10 @@ static SECP256K1_INLINE void secp256k1_callback_call(const secp256k1_callback * #endif /* Like assert(), but when VERIFY is defined, and side-effect safe. */ -#ifdef VERIFY +#if defined(COVERAGE) +#define VERIFY_CHECK(check) +#define VERIFY_SETUP(stmt) +#elif defined(VERIFY) #define VERIFY_CHECK CHECK #define VERIFY_SETUP(stmt) do { stmt; } while(0) #else