diff --git a/depends/packages/gmp.mk b/depends/packages/gmp.mk deleted file mode 100644 index bcbf50cea..000000000 --- a/depends/packages/gmp.mk +++ /dev/null @@ -1,30 +0,0 @@ -package=gmp -$(package)_version=6.0.0a -$(package)_download_path=https://gmplib.org/download/gmp -$(package)_file_name=$(package)-$($(package)_version).tar.bz2 -$(package)_sha256_hash=7f8e9a804b9c6d07164cf754207be838ece1219425d64e28cfa3e70d5c759aaf -$(package)_patches=arm_gmp_build_fix.patch darwin_gmp_build_fix.patch - -define $(package)_preprocess_cmds - patch -p1 < $($(package)_patch_dir)/arm_gmp_build_fix.patch && \ - patch -p1 < $($(package)_patch_dir)/darwin_gmp_build_fix.patch -endef - -define $(package)_set_vars - $(package)_config_opts=--disable-shared CC_FOR_BUILD=$(build_CC) - $(package)_config_opts_x86_64_darwin=--with-pic - $(package)_config_opts_x86_64_linux=--with-pic - $(package)_config_opts_arm_linux=--with-pic -endef - -define $(package)_config_cmds - $($(package)_autoconf) -endef - -define $(package)_build_cmds - $(MAKE) -endef - -define $(package)_stage_cmds - $(MAKE) DESTDIR=$($(package)_staging_dir) install -endef diff --git a/depends/packages/packages.mk b/depends/packages/packages.mk index 305d21cb2..bbf53cc2d 100644 --- a/depends/packages/packages.mk +++ b/depends/packages/packages.mk @@ -1,4 +1,4 @@ -packages:=boost openssl gmp +packages:=boost openssl native_packages := native_ccache native_comparisontool qt_native_packages = native_protobuf diff --git a/depends/patches/gmp/arm_gmp_build_fix.patch b/depends/patches/gmp/arm_gmp_build_fix.patch deleted file mode 100644 index 666cf58cf..000000000 --- a/depends/patches/gmp/arm_gmp_build_fix.patch +++ /dev/null @@ -1,21 +0,0 @@ - -# HG changeset patch -# User Torbjorn Granlund -# Date 1396602422 -7200 -# Node ID 676e2d0f0e4dd301a7066079d2c9326c25c34a40 -# Parent 0194a75b56b21a9196626430af86c5bd9110c42d -Conditionalise ARM asm on !__thumb__. - -diff -r 0194a75b56b2 -r 676e2d0f0e4d mpn/generic/div_qr_1n_pi1.c ---- a/mpn/generic/div_qr_1n_pi1.c Thu Apr 03 23:58:51 2014 +0200 -+++ b/mpn/generic/div_qr_1n_pi1.c Fri Apr 04 11:07:02 2014 +0200 -@@ -130,7 +130,7 @@ - "%2" ((UDItype)(a0)), "r" ((UDItype)(b0)) __CLOBBER_CC) - #endif - --#if defined (__arm__) && W_TYPE_SIZE == 32 -+#if defined (__arm__) && !defined (__thumb__) && W_TYPE_SIZE == 32 - #define add_mssaaaa(m, sh, sl, ah, al, bh, bl) \ - __asm__ ( "adds %2, %5, %6\n\t" \ - "adcs %1, %3, %4\n\t" \ - diff --git a/depends/patches/gmp/darwin_gmp_build_fix.patch b/depends/patches/gmp/darwin_gmp_build_fix.patch deleted file mode 100644 index b9cfd80e7..000000000 --- a/depends/patches/gmp/darwin_gmp_build_fix.patch +++ /dev/null @@ -1,29 +0,0 @@ - -# HG changeset patch -# User Torbjorn Granlund -# Date 1396470504 -7200 -# Node ID 1fab0adc5ff7d9ecddcbda96f407da58347bb49c -# Parent db645603dcdb41afcf78b19b551ecd5a01c3841c -Workaround for Darwin assembler quirk. - -diff -r db645603dcdb -r 1fab0adc5ff7 mpn/x86_64/k8/redc_1.asm ---- a/mpn/x86_64/k8/redc_1.asm Mon Mar 31 23:04:32 2014 +0200 -+++ b/mpn/x86_64/k8/redc_1.asm Wed Apr 02 22:28:24 2014 +0200 -@@ -114,7 +114,7 @@ - - JUMPTABSECT - ALIGN(8) --L(tab): JMPENT( L(0m4), L(tab)) -+L(tab): JMPENT( L(0), L(tab)) - JMPENT( L(1), L(tab)) - JMPENT( L(2), L(tab)) - JMPENT( L(3), L(tab)) -@@ -397,6 +397,7 @@ - - - ALIGN(16) -+L(0): - L(0m4): - L(lo0): mov (mp,nneg,8), %rax - mov nneg, i - diff --git a/doc/build-osx.md b/doc/build-osx.md index 491c5c468..c41820f2b 100644 --- a/doc/build-osx.md +++ b/doc/build-osx.md @@ -38,7 +38,7 @@ Instructions: Homebrew #### Install dependencies using Homebrew - brew install autoconf automake libtool boost miniupnpc openssl pkg-config protobuf qt gmp + brew install autoconf automake libtool boost miniupnpc openssl pkg-config protobuf qt #### Installing berkeley-db4 using Homebrew diff --git a/doc/build-unix.md b/doc/build-unix.md index 9bb1a53ab..8ddee3b75 100644 --- a/doc/build-unix.md +++ b/doc/build-unix.md @@ -33,7 +33,6 @@ These dependencies are required: ------------|------------------|---------------------- libssl | SSL Support | Secure communications libboost | Boost | C++ Library - libgmp | secp256k1 | Arbitrary-precision arithmetic (version >= 3.1) Optional dependencies: @@ -58,7 +57,7 @@ Dependency Build Instructions: Ubuntu & Debian ---------------------------------------------- Build requirements: - sudo apt-get install build-essential libtool autotools-dev autoconf pkg-config libssl-dev libgmp-dev + sudo apt-get install build-essential libtool autotools-dev autoconf pkg-config libssl-dev for Ubuntu 12.04 and later or Debian 7 and later libboost-all-dev has to be installed: diff --git a/src/secp256k1/.travis.yml b/src/secp256k1/.travis.yml index 24a86b561..3a85e8cba 100644 --- a/src/secp256k1/.travis.yml +++ b/src/secp256k1/.travis.yml @@ -18,6 +18,8 @@ env: - FIELD=64bit ENDOMORPHISM=yes - FIELD=32bit - FIELD=32bit ENDOMORPHISM=yes + - BIGNUM=none + - BIGNUM=none ENDOMORPHISM=yes - BUILD=distcheck - EXTRAFLAGS=CFLAGS=-DDETERMINISTIC before_script: ./autogen.sh diff --git a/src/secp256k1/Makefile.am b/src/secp256k1/Makefile.am index d527da6b7..dbf1790f3 100644 --- a/src/secp256k1/Makefile.am +++ b/src/secp256k1/Makefile.am @@ -68,12 +68,13 @@ bench_sign_LDFLAGS = -static bench_inv_SOURCES = src/bench_inv.c bench_inv_LDADD = $(COMMON_LIB) $(SECP_LIBS) bench_inv_LDFLAGS = -static +bench_inv_CPPFLAGS = $(SECP_INCLUDES) endif if USE_TESTS noinst_PROGRAMS += tests tests_SOURCES = src/tests.c -tests_CPPFLAGS = -DVERIFY $(SECP_TEST_INCLUDES) +tests_CPPFLAGS = -DVERIFY $(SECP_INCLUDES) $(SECP_TEST_INCLUDES) tests_LDADD = $(COMMON_LIB) $(SECP_LIBS) $(SECP_TEST_LIBS) tests_LDFLAGS = -static TESTS = tests diff --git a/src/secp256k1/build-aux/m4/bitcoin_secp.m4 b/src/secp256k1/build-aux/m4/bitcoin_secp.m4 index e6f3470ed..4ca28f99c 100644 --- a/src/secp256k1/build-aux/m4/bitcoin_secp.m4 +++ b/src/secp256k1/build-aux/m4/bitcoin_secp.m4 @@ -78,7 +78,13 @@ fi dnl AC_DEFUN([SECP_GMP_CHECK],[ if test x"$has_gmp" != x"yes"; then - AC_CHECK_HEADER(gmp.h,[AC_CHECK_LIB(gmp, __gmpz_init,[has_gmp=yes; GMP_LIBS=-lgmp; AC_DEFINE(HAVE_LIBGMP,1,[Define this symbol if libgmp is installed])])]) + CPPFLAGS_TEMP="$CPPFLAGS" + CPPFLAGS="$GMP_CPPFLAGS $CPPFLAGS" + LIBS_TEMP="$LIBS" + LIBS="$GMP_LIBS $LIBS" + AC_CHECK_HEADER(gmp.h,[AC_CHECK_LIB(gmp, __gmpz_init,[has_gmp=yes; GMP_LIBS="$GMP_LIBS -lgmp"; AC_DEFINE(HAVE_LIBGMP,1,[Define this symbol if libgmp is installed])])]) + CPPFLAGS="$CPPFLAGS_TEMP" + LIBS="$LIBS_TEMP" fi if test x"$set_field" = x"gmp" && test x"$has_gmp" != x"yes"; then AC_MSG_ERROR([$set_field field support explicitly requested but libgmp was not found]) diff --git a/src/secp256k1/configure.ac b/src/secp256k1/configure.ac index 2da570983..6e6fccd7f 100644 --- a/src/secp256k1/configure.ac +++ b/src/secp256k1/configure.ac @@ -33,10 +33,35 @@ case $host in esac case $host_os in - darwin*) - CPPFLAGS="$CPPFLAGS -I/opt/local/include" - LDFLAGS="$LDFLAGS -L/opt/local/lib" - ;; + *darwin*) + if test x$cross_compiling != xyes; then + AC_PATH_PROG([BREW],brew,) + if test x$BREW != x; then + dnl These Homebrew packages may be keg-only, meaning that they won't be found + dnl in expected paths because they may conflict with system files. Ask + dnl Homebrew where each one is located, then adjust paths accordingly. + + openssl_prefix=`$BREW --prefix openssl 2>/dev/null` + gmp_prefix=`$BREW --prefix gmp 2>/dev/null` + if test x$openssl_prefix != x; then + PKG_CONFIG_PATH="$openssl_prefix/lib/pkgconfig:$PKG_CONFIG_PATH" + export PKG_CONFIG_PATH + fi + if test x$gmp_prefix != x; then + GMP_CPPFLAGS="-I$gmp_prefix/include" + GMP_LIBS="-L$gmp_prefix/lib" + fi + else + AC_PATH_PROG([PORT],port,) + dnl if homebrew isn't installed and macports is, add the macports default paths + dnl as a last resort. + if test x$PORT != x; then + CPPFLAGS="$CPPFLAGS -isystem /opt/local/include" + LDFLAGS="$LDFLAGS -L/opt/local/lib" + fi + fi + fi + ;; esac CFLAGS="$CFLAGS -W" @@ -70,7 +95,7 @@ AC_ARG_ENABLE(endomorphism, AC_ARG_WITH([field], [AS_HELP_STRING([--with-field=gmp|64bit|64bit_asm|32bit|auto], [Specify Field Implementation. Default is auto])],[req_field=$withval], [req_field=auto]) -AC_ARG_WITH([bignum], [AS_HELP_STRING([--with-bignum=gmp|auto], +AC_ARG_WITH([bignum], [AS_HELP_STRING([--with-bignum=gmp|none|auto], [Specify Bignum Implementation. Default is auto])],[req_bignum=$withval], [req_bignum=auto]) AC_ARG_WITH([scalar], [AS_HELP_STRING([--with-scalar=64bit|32bit|auto], @@ -154,7 +179,7 @@ if test x"$req_bignum" = x"auto"; then fi if test x"$set_bignum" = x; then - AC_MSG_ERROR([no working bignum implementation found]) + set_bignum=none fi else set_bignum=$req_bignum @@ -162,8 +187,7 @@ else gmp) SECP_GMP_CHECK ;; - openssl) - SECP_OPENSSL_CHECK + none) ;; *) AC_MSG_ERROR([invalid bignum implementation selection]) @@ -196,9 +220,15 @@ esac # select bignum implementation case $set_bignum in gmp) - AC_DEFINE(HAVE_LIBGMP,1,[Define this symbol if libgmp is installed]) - AC_DEFINE(USE_NUM_GMP, 1, [Define this symbol to use the gmp implementation]) - AC_DEFINE(USE_FIELD_INV_NUM, 1, [Define this symbol to use the USE_FIELD_INV_NUM implementation]) + AC_DEFINE(HAVE_LIBGMP, 1, [Define this symbol if libgmp is installed]) + AC_DEFINE(USE_NUM_GMP, 1, [Define this symbol to use the gmp implementation for num]) + AC_DEFINE(USE_FIELD_INV_NUM, 1, [Define this symbol to use the num-based field inverse implementation]) + AC_DEFINE(USE_SCALAR_INV_NUM, 1, [Define this symbol to use the num-based scalar inverse implementation]) + ;; +none) + AC_DEFINE(USE_NUM_NONE, 1, [Define this symbol to use no num implementation]) + AC_DEFINE(USE_FIELD_INV_BUILTIN, 1, [Define this symbol to use the native field inverse implementation]) + AC_DEFINE(USE_SCALAR_INV_BUILTIN, 1, [Define this symbol to use the native scalar inverse implementation]) ;; *) AC_MSG_ERROR([invalid bignum implementation]) @@ -236,10 +266,11 @@ fi if test x"$set_field" = x"gmp" || test x"$set_bignum" = x"gmp"; then SECP_LIBS="$SECP_LIBS $GMP_LIBS" + SECP_INCLUDES="$SECP_INCLUDES $GMP_CPPFLAGS" fi if test x"$use_endomorphism" = x"yes"; then - AC_DEFINE(USE_ENDOMORPHISM, 1, [Define this symbol to use endomorphism]) + AC_DEFINE(USE_ENDOMORPHISM, 1, [Define this symbol to use endomorphism optimization]) fi AC_MSG_NOTICE([Using field implementation: $set_field]) @@ -256,4 +287,10 @@ AC_SUBST(YASM_BINFMT) AM_CONDITIONAL([USE_ASM], [test x"$set_field" == x"64bit_asm"]) AM_CONDITIONAL([USE_TESTS], [test x"$use_tests" != x"no"]) AM_CONDITIONAL([USE_BENCHMARK], [test x"$use_benchmark" != x"no"]) + +dnl make sure nothing new is exported so that we don't break the cache +PKGCONFIG_PATH_TEMP="$PKG_CONFIG_PATH" +unset PKG_CONFIG_PATH +PKG_CONFIG_PATH="$PKGCONFIG_PATH_TEMP" + AC_OUTPUT diff --git a/src/secp256k1/include/secp256k1.h b/src/secp256k1/include/secp256k1.h index 932bf0279..94a6ef483 100644 --- a/src/secp256k1/include/secp256k1.h +++ b/src/secp256k1/include/secp256k1.h @@ -14,18 +14,6 @@ extern "C" { # endif # endif -# if (!defined(__STDC_VERSION__) || (__STDC_VERSION__ < 199901L) ) -# if SECP256K1_GNUC_PREREQ(3,0) -# define SECP256K1_RESTRICT __restrict__ -# elif (defined(_MSC_VER) && _MSC_VER >= 1400) -# define SECP256K1_RESTRICT __restrict -# else -# define SECP256K1_RESTRICT -# endif -# else -# define SECP256K1_RESTRICT restrict -# endif - # if (!defined(__STDC_VERSION__) || (__STDC_VERSION__ < 199901L) ) # if SECP256K1_GNUC_PREREQ(2,7) # define SECP256K1_INLINE __inline__ diff --git a/src/secp256k1/src/ecdsa.h b/src/secp256k1/src/ecdsa.h index 3b1e0484e..5fc5230c3 100644 --- a/src/secp256k1/src/ecdsa.h +++ b/src/secp256k1/src/ecdsa.h @@ -7,17 +7,21 @@ #ifndef _SECP256K1_ECDSA_ #define _SECP256K1_ECDSA_ -#include "num.h" +#include "scalar.h" +#include "group.h" + +static void secp256k1_ecsda_start(void); +static void secp256k1_ecdsa_stop(void); typedef struct { - secp256k1_num_t r, s; + secp256k1_scalar_t r, s; } secp256k1_ecdsa_sig_t; static int secp256k1_ecdsa_sig_parse(secp256k1_ecdsa_sig_t *r, const unsigned char *sig, int size); static int secp256k1_ecdsa_sig_serialize(unsigned char *sig, int *size, const secp256k1_ecdsa_sig_t *a); -static int secp256k1_ecdsa_sig_verify(const secp256k1_ecdsa_sig_t *sig, const secp256k1_ge_t *pubkey, const secp256k1_num_t *message); +static int secp256k1_ecdsa_sig_verify(const secp256k1_ecdsa_sig_t *sig, const secp256k1_ge_t *pubkey, const secp256k1_scalar_t *message); static int secp256k1_ecdsa_sig_sign(secp256k1_ecdsa_sig_t *sig, const secp256k1_scalar_t *seckey, const secp256k1_scalar_t *message, const secp256k1_scalar_t *nonce, int *recid); -static int secp256k1_ecdsa_sig_recover(const secp256k1_ecdsa_sig_t *sig, secp256k1_ge_t *pubkey, const secp256k1_num_t *message, int recid); -static void secp256k1_ecdsa_sig_set_rs(secp256k1_ecdsa_sig_t *sig, const secp256k1_num_t *r, const secp256k1_num_t *s); +static int secp256k1_ecdsa_sig_recover(const secp256k1_ecdsa_sig_t *sig, secp256k1_ge_t *pubkey, const secp256k1_scalar_t *message, int recid); +static void secp256k1_ecdsa_sig_set_rs(secp256k1_ecdsa_sig_t *sig, const secp256k1_scalar_t *r, const secp256k1_scalar_t *s); #endif diff --git a/src/secp256k1/src/ecdsa_impl.h b/src/secp256k1/src/ecdsa_impl.h index 4c05ec39f..a951d0b4a 100644 --- a/src/secp256k1/src/ecdsa_impl.h +++ b/src/secp256k1/src/ecdsa_impl.h @@ -8,13 +8,51 @@ #ifndef _SECP256K1_ECDSA_IMPL_H_ #define _SECP256K1_ECDSA_IMPL_H_ -#include "num.h" +#include "scalar.h" #include "field.h" #include "group.h" #include "ecmult.h" #include "ecmult_gen.h" #include "ecdsa.h" +typedef struct { + secp256k1_fe_t order_as_fe; + secp256k1_fe_t p_minus_order; +} secp256k1_ecdsa_consts_t; + +static const secp256k1_ecdsa_consts_t *secp256k1_ecdsa_consts = NULL; + +static void secp256k1_ecdsa_start(void) { + if (secp256k1_ecdsa_consts != NULL) + return; + + /* Allocate. */ + secp256k1_ecdsa_consts_t *ret = (secp256k1_ecdsa_consts_t*)malloc(sizeof(secp256k1_ecdsa_consts_t)); + + static const unsigned char order[] = { + 0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF, + 0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFE, + 0xBA,0xAE,0xDC,0xE6,0xAF,0x48,0xA0,0x3B, + 0xBF,0xD2,0x5E,0x8C,0xD0,0x36,0x41,0x41 + }; + + secp256k1_fe_set_b32(&ret->order_as_fe, order); + secp256k1_fe_negate(&ret->p_minus_order, &ret->order_as_fe, 1); + secp256k1_fe_normalize(&ret->p_minus_order); + + /* Set the global pointer. */ + secp256k1_ecdsa_consts = ret; +} + +static void secp256k1_ecdsa_stop(void) { + if (secp256k1_ecdsa_consts == NULL) + return; + + secp256k1_ecdsa_consts_t *c = (secp256k1_ecdsa_consts_t*)secp256k1_ecdsa_consts; + secp256k1_ecdsa_consts = NULL; + free(c); +} + static int secp256k1_ecdsa_sig_parse(secp256k1_ecdsa_sig_t *r, const unsigned char *sig, int size) { if (sig[0] != 0x30) return 0; int lenr = sig[3]; @@ -26,18 +64,37 @@ static int secp256k1_ecdsa_sig_parse(secp256k1_ecdsa_sig_t *r, const unsigned ch if (lenr == 0) return 0; if (sig[lenr+4] != 0x02) return 0; if (lens == 0) return 0; - secp256k1_num_set_bin(&r->r, sig+4, lenr); - secp256k1_num_set_bin(&r->s, sig+6+lenr, lens); + const unsigned char *sp = sig + 6 + lenr; + while (lens > 0 && sp[0] == 0) { + lens--; + sp++; + } + if (lens > 32) return 0; + const unsigned char *rp = sig + 4; + while (lenr > 0 && rp[0] == 0) { + lenr--; + rp++; + } + if (lenr > 32) return 0; + unsigned char ra[32] = {0}, sa[32] = {0}; + memcpy(ra + 32 - lenr, rp, lenr); + memcpy(sa + 32 - lens, sp, lens); + int overflow = 0; + secp256k1_scalar_set_b32(&r->r, ra, &overflow); + if (overflow) return 0; + secp256k1_scalar_set_b32(&r->s, sa, &overflow); + if (overflow) return 0; return 1; } static int secp256k1_ecdsa_sig_serialize(unsigned char *sig, int *size, const secp256k1_ecdsa_sig_t *a) { - int lenR = (secp256k1_num_bits(&a->r) + 7)/8; - if (lenR == 0 || secp256k1_num_get_bit(&a->r, lenR*8-1)) - lenR++; - int lenS = (secp256k1_num_bits(&a->s) + 7)/8; - if (lenS == 0 || secp256k1_num_get_bit(&a->s, lenS*8-1)) - lenS++; + unsigned char r[33] = {0}, s[33] = {0}; + secp256k1_scalar_get_b32(&r[1], &a->r); + secp256k1_scalar_get_b32(&s[1], &a->s); + unsigned char *rp = r, *sp = s; + int lenR = 33, lenS = 33; + while (lenR > 1 && rp[0] == 0 && rp[1] < 0x80) { lenR--; rp++; } + while (lenS > 1 && sp[0] == 0 && sp[1] < 0x80) { lenS--; sp++; } if (*size < 6+lenS+lenR) return 0; *size = 6 + lenS + lenR; @@ -45,98 +102,67 @@ static int secp256k1_ecdsa_sig_serialize(unsigned char *sig, int *size, const se sig[1] = 4 + lenS + lenR; sig[2] = 0x02; sig[3] = lenR; - secp256k1_num_get_bin(sig+4, lenR, &a->r); + memcpy(sig+4, rp, lenR); sig[4+lenR] = 0x02; sig[5+lenR] = lenS; - secp256k1_num_get_bin(sig+lenR+6, lenS, &a->s); + memcpy(sig+lenR+6, sp, lenS); return 1; } -static int secp256k1_ecdsa_sig_recompute(secp256k1_num_t *r2, const secp256k1_ecdsa_sig_t *sig, const secp256k1_ge_t *pubkey, const secp256k1_num_t *message) { - const secp256k1_ge_consts_t *c = secp256k1_ge_consts; - - if (secp256k1_num_is_neg(&sig->r) || secp256k1_num_is_neg(&sig->s)) - return 0; - if (secp256k1_num_is_zero(&sig->r) || secp256k1_num_is_zero(&sig->s)) - return 0; - if (secp256k1_num_cmp(&sig->r, &c->order) >= 0 || secp256k1_num_cmp(&sig->s, &c->order) >= 0) +static int secp256k1_ecdsa_sig_recompute(secp256k1_scalar_t *r2, const secp256k1_ecdsa_sig_t *sig, const secp256k1_ge_t *pubkey, const secp256k1_scalar_t *message) { + if (secp256k1_scalar_is_zero(&sig->r) || secp256k1_scalar_is_zero(&sig->s)) return 0; int ret = 0; - secp256k1_num_t sn, u1, u2; - secp256k1_num_init(&sn); - secp256k1_num_init(&u1); - secp256k1_num_init(&u2); - secp256k1_num_mod_inverse(&sn, &sig->s, &c->order); - secp256k1_num_mod_mul(&u1, &sn, message, &c->order); - secp256k1_num_mod_mul(&u2, &sn, &sig->r, &c->order); + secp256k1_scalar_t sn, u1, u2; + secp256k1_scalar_inverse_var(&sn, &sig->s); + secp256k1_scalar_mul(&u1, &sn, message); + secp256k1_scalar_mul(&u2, &sn, &sig->r); secp256k1_gej_t pubkeyj; secp256k1_gej_set_ge(&pubkeyj, pubkey); secp256k1_gej_t pr; secp256k1_ecmult(&pr, &pubkeyj, &u2, &u1); if (!secp256k1_gej_is_infinity(&pr)) { secp256k1_fe_t xr; secp256k1_gej_get_x_var(&xr, &pr); secp256k1_fe_normalize(&xr); unsigned char xrb[32]; secp256k1_fe_get_b32(xrb, &xr); - secp256k1_num_set_bin(r2, xrb, 32); - secp256k1_num_mod(r2, &c->order); + secp256k1_scalar_set_b32(r2, xrb, NULL); ret = 1; } - secp256k1_num_free(&sn); - secp256k1_num_free(&u1); - secp256k1_num_free(&u2); return ret; } -static int secp256k1_ecdsa_sig_recover(const secp256k1_ecdsa_sig_t *sig, secp256k1_ge_t *pubkey, const secp256k1_num_t *message, int recid) { - const secp256k1_ge_consts_t *c = secp256k1_ge_consts; - - if (secp256k1_num_is_neg(&sig->r) || secp256k1_num_is_neg(&sig->s)) - return 0; - if (secp256k1_num_is_zero(&sig->r) || secp256k1_num_is_zero(&sig->s)) - return 0; - if (secp256k1_num_cmp(&sig->r, &c->order) >= 0 || secp256k1_num_cmp(&sig->s, &c->order) >= 0) +static int secp256k1_ecdsa_sig_recover(const secp256k1_ecdsa_sig_t *sig, secp256k1_ge_t *pubkey, const secp256k1_scalar_t *message, int recid) { + if (secp256k1_scalar_is_zero(&sig->r) || secp256k1_scalar_is_zero(&sig->s)) return 0; - secp256k1_num_t rx; - secp256k1_num_init(&rx); - secp256k1_num_copy(&rx, &sig->r); + unsigned char brx[32]; + secp256k1_scalar_get_b32(brx, &sig->r); + secp256k1_fe_t fx; + VERIFY_CHECK(secp256k1_fe_set_b32(&fx, brx)); /* brx comes from a scalar, so is less than the order; certainly less than p */ if (recid & 2) { - secp256k1_num_add(&rx, &rx, &c->order); - if (secp256k1_num_cmp(&rx, &secp256k1_fe_consts->p) >= 0) + if (secp256k1_fe_cmp_var(&fx, &secp256k1_ecdsa_consts->p_minus_order) >= 0) return 0; + secp256k1_fe_add(&fx, &secp256k1_ecdsa_consts->order_as_fe); } - unsigned char brx[32]; - secp256k1_num_get_bin(brx, 32, &rx); - secp256k1_num_free(&rx); - secp256k1_fe_t fx; - secp256k1_fe_set_b32(&fx, brx); secp256k1_ge_t x; if (!secp256k1_ge_set_xo(&x, &fx, recid & 1)) return 0; secp256k1_gej_t xj; secp256k1_gej_set_ge(&xj, &x); - secp256k1_num_t rn, u1, u2; - secp256k1_num_init(&rn); - secp256k1_num_init(&u1); - secp256k1_num_init(&u2); - secp256k1_num_mod_inverse(&rn, &sig->r, &c->order); - secp256k1_num_mod_mul(&u1, &rn, message, &c->order); - secp256k1_num_sub(&u1, &c->order, &u1); - secp256k1_num_mod_mul(&u2, &rn, &sig->s, &c->order); + secp256k1_scalar_t rn, u1, u2; + secp256k1_scalar_inverse_var(&rn, &sig->r); + secp256k1_scalar_mul(&u1, &rn, message); + secp256k1_scalar_negate(&u1, &u1); + secp256k1_scalar_mul(&u2, &rn, &sig->s); secp256k1_gej_t qj; secp256k1_ecmult(&qj, &xj, &u2, &u1); secp256k1_ge_set_gej_var(pubkey, &qj); - secp256k1_num_free(&rn); - secp256k1_num_free(&u1); - secp256k1_num_free(&u2); return !secp256k1_gej_is_infinity(&qj); } -static int secp256k1_ecdsa_sig_verify(const secp256k1_ecdsa_sig_t *sig, const secp256k1_ge_t *pubkey, const secp256k1_num_t *message) { - secp256k1_num_t r2; - secp256k1_num_init(&r2); +static int secp256k1_ecdsa_sig_verify(const secp256k1_ecdsa_sig_t *sig, const secp256k1_ge_t *pubkey, const secp256k1_scalar_t *message) { + secp256k1_scalar_t r2; int ret = 0; - ret = secp256k1_ecdsa_sig_recompute(&r2, sig, pubkey, message) && secp256k1_num_cmp(&sig->r, &r2) == 0; - secp256k1_num_free(&r2); + ret = secp256k1_ecdsa_sig_recompute(&r2, sig, pubkey, message) && secp256k1_scalar_eq(&sig->r, &r2); return ret; } @@ -150,34 +176,30 @@ static int secp256k1_ecdsa_sig_sign(secp256k1_ecdsa_sig_t *sig, const secp256k1_ secp256k1_fe_normalize(&r.y); secp256k1_fe_get_b32(b, &r.x); int overflow = 0; - secp256k1_scalar_t sigr; - secp256k1_scalar_set_b32(&sigr, b, &overflow); + secp256k1_scalar_set_b32(&sig->r, b, &overflow); if (recid) *recid = (overflow ? 2 : 0) | (secp256k1_fe_is_odd(&r.y) ? 1 : 0); secp256k1_scalar_t n; - secp256k1_scalar_mul(&n, &sigr, seckey); + secp256k1_scalar_mul(&n, &sig->r, seckey); secp256k1_scalar_add(&n, &n, message); - secp256k1_scalar_t sigs; - secp256k1_scalar_inverse(&sigs, nonce); - secp256k1_scalar_mul(&sigs, &sigs, &n); + secp256k1_scalar_inverse(&sig->s, nonce); + secp256k1_scalar_mul(&sig->s, &sig->s, &n); secp256k1_scalar_clear(&n); secp256k1_gej_clear(&rp); secp256k1_ge_clear(&r); - if (secp256k1_scalar_is_zero(&sigs)) + if (secp256k1_scalar_is_zero(&sig->s)) return 0; - if (secp256k1_scalar_is_high(&sigs)) { - secp256k1_scalar_negate(&sigs, &sigs); + if (secp256k1_scalar_is_high(&sig->s)) { + secp256k1_scalar_negate(&sig->s, &sig->s); if (recid) *recid ^= 1; } - secp256k1_scalar_get_num(&sig->s, &sigs); - secp256k1_scalar_get_num(&sig->r, &sigr); return 1; } -static void secp256k1_ecdsa_sig_set_rs(secp256k1_ecdsa_sig_t *sig, const secp256k1_num_t *r, const secp256k1_num_t *s) { - secp256k1_num_copy(&sig->r, r); - secp256k1_num_copy(&sig->s, s); +static void secp256k1_ecdsa_sig_set_rs(secp256k1_ecdsa_sig_t *sig, const secp256k1_scalar_t *r, const secp256k1_scalar_t *s) { + sig->r = *r; + sig->s = *s; } #endif diff --git a/src/secp256k1/src/eckey.h b/src/secp256k1/src/eckey.h index 024c8b821..6de5dc0a5 100644 --- a/src/secp256k1/src/eckey.h +++ b/src/secp256k1/src/eckey.h @@ -9,7 +9,6 @@ #include "group.h" #include "scalar.h" -#include "num.h" static int secp256k1_eckey_pubkey_parse(secp256k1_ge_t *elem, const unsigned char *pub, int size); static int secp256k1_eckey_pubkey_serialize(secp256k1_ge_t *elem, unsigned char *pub, int *size, int compressed); @@ -18,8 +17,8 @@ static int secp256k1_eckey_privkey_parse(secp256k1_scalar_t *key, const unsigned static int secp256k1_eckey_privkey_serialize(unsigned char *privkey, int *privkeylen, const secp256k1_scalar_t *key, int compressed); static int secp256k1_eckey_privkey_tweak_add(secp256k1_scalar_t *key, const secp256k1_scalar_t *tweak); -static int secp256k1_eckey_pubkey_tweak_add(secp256k1_ge_t *key, const secp256k1_num_t *tweak); +static int secp256k1_eckey_pubkey_tweak_add(secp256k1_ge_t *key, const secp256k1_scalar_t *tweak); static int secp256k1_eckey_privkey_tweak_mul(secp256k1_scalar_t *key, const secp256k1_scalar_t *tweak); -static int secp256k1_eckey_pubkey_tweak_mul(secp256k1_ge_t *key, const secp256k1_num_t *tweak); +static int secp256k1_eckey_pubkey_tweak_mul(secp256k1_ge_t *key, const secp256k1_scalar_t *tweak); #endif diff --git a/src/secp256k1/src/eckey_impl.h b/src/secp256k1/src/eckey_impl.h index 290b1f090..0f218ced9 100644 --- a/src/secp256k1/src/eckey_impl.h +++ b/src/secp256k1/src/eckey_impl.h @@ -9,7 +9,7 @@ #include "eckey.h" -#include "num.h" +#include "scalar.h" #include "field.h" #include "group.h" #include "ecmult_gen.h" @@ -17,12 +17,12 @@ static int secp256k1_eckey_pubkey_parse(secp256k1_ge_t *elem, const unsigned char *pub, int size) { if (size == 33 && (pub[0] == 0x02 || pub[0] == 0x03)) { secp256k1_fe_t x; - secp256k1_fe_set_b32(&x, pub+1); - return secp256k1_ge_set_xo(elem, &x, pub[0] == 0x03); + return secp256k1_fe_set_b32(&x, pub+1) && secp256k1_ge_set_xo(elem, &x, pub[0] == 0x03); } else if (size == 65 && (pub[0] == 0x04 || pub[0] == 0x06 || pub[0] == 0x07)) { secp256k1_fe_t x, y; - secp256k1_fe_set_b32(&x, pub+1); - secp256k1_fe_set_b32(&y, pub+33); + if (!secp256k1_fe_set_b32(&x, pub+1) || !secp256k1_fe_set_b32(&y, pub+33)) { + return 0; + } secp256k1_ge_set_xy(elem, &x, &y); if ((pub[0] == 0x06 || pub[0] == 0x07) && secp256k1_fe_is_odd(&y) != (pub[0] == 0x07)) return 0; @@ -154,17 +154,12 @@ static int secp256k1_eckey_privkey_tweak_add(secp256k1_scalar_t *key, const secp return 1; } -static int secp256k1_eckey_pubkey_tweak_add(secp256k1_ge_t *key, const secp256k1_num_t *tweak) { - if (secp256k1_num_cmp(tweak, &secp256k1_ge_consts->order) >= 0) - return 0; - +static int secp256k1_eckey_pubkey_tweak_add(secp256k1_ge_t *key, const secp256k1_scalar_t *tweak) { secp256k1_gej_t pt; secp256k1_gej_set_ge(&pt, key); - secp256k1_num_t one; - secp256k1_num_init(&one); - secp256k1_num_set_int(&one, 1); + secp256k1_scalar_t one; + secp256k1_scalar_set_int(&one, 1); secp256k1_ecmult(&pt, &pt, &one, tweak); - secp256k1_num_free(&one); if (secp256k1_gej_is_infinity(&pt)) return 0; @@ -180,19 +175,15 @@ static int secp256k1_eckey_privkey_tweak_mul(secp256k1_scalar_t *key, const secp return 1; } -static int secp256k1_eckey_pubkey_tweak_mul(secp256k1_ge_t *key, const secp256k1_num_t *tweak) { - if (secp256k1_num_is_zero(tweak)) - return 0; - if (secp256k1_num_cmp(tweak, &secp256k1_ge_consts->order) >= 0) +static int secp256k1_eckey_pubkey_tweak_mul(secp256k1_ge_t *key, const secp256k1_scalar_t *tweak) { + if (secp256k1_scalar_is_zero(tweak)) return 0; - secp256k1_num_t zero; - secp256k1_num_init(&zero); - secp256k1_num_set_int(&zero, 0); + secp256k1_scalar_t zero; + secp256k1_scalar_set_int(&zero, 0); secp256k1_gej_t pt; secp256k1_gej_set_ge(&pt, key); secp256k1_ecmult(&pt, &pt, tweak, &zero); - secp256k1_num_free(&zero); secp256k1_ge_set_gej(key, &pt); return 1; } diff --git a/src/secp256k1/src/ecmult.h b/src/secp256k1/src/ecmult.h index e3cf18b68..15a7100a4 100644 --- a/src/secp256k1/src/ecmult.h +++ b/src/secp256k1/src/ecmult.h @@ -14,6 +14,6 @@ static void secp256k1_ecmult_start(void); static void secp256k1_ecmult_stop(void); /** Double multiply: R = na*A + ng*G */ -static void secp256k1_ecmult(secp256k1_gej_t *r, const secp256k1_gej_t *a, const secp256k1_num_t *na, const secp256k1_num_t *ng); +static void secp256k1_ecmult(secp256k1_gej_t *r, const secp256k1_gej_t *a, const secp256k1_scalar_t *na, const secp256k1_scalar_t *ng); #endif diff --git a/src/secp256k1/src/ecmult_gen_impl.h b/src/secp256k1/src/ecmult_gen_impl.h index 07859ab04..af0ead522 100644 --- a/src/secp256k1/src/ecmult_gen_impl.h +++ b/src/secp256k1/src/ecmult_gen_impl.h @@ -23,8 +23,8 @@ typedef struct { * precomputed (call it prec(i, n_i)). The formula now becomes sum(prec(i, n_i), i=0..63). * None of the resulting prec group elements have a known scalar, and neither do any of * the intermediate sums while computing a*G. - * To make memory access uniform, the bytes of prec(i, n_i) are sliced per value of n_i. */ - unsigned char prec[64][sizeof(secp256k1_ge_t)][16]; /* prec[j][k][i] = k'th byte of (16^j * i * G + U_i) */ + */ + secp256k1_fe_t prec[64][16][2]; /* prec[j][i] = (16^j * i * G + U_i).{x,y} */ } secp256k1_ecmult_gen_consts_t; static const secp256k1_ecmult_gen_consts_t *secp256k1_ecmult_gen_consts = NULL; @@ -45,7 +45,7 @@ static void secp256k1_ecmult_gen_start(void) { { static const unsigned char nums_b32[32] = "The scalar for this x is unknown"; secp256k1_fe_t nums_x; - secp256k1_fe_set_b32(&nums_x, nums_b32); + VERIFY_CHECK(secp256k1_fe_set_b32(&nums_x, nums_b32)); secp256k1_ge_t nums_ge; VERIFY_CHECK(secp256k1_ge_set_xo(&nums_ge, &nums_x, 0)); secp256k1_gej_set_ge(&nums_gej, &nums_ge); @@ -81,9 +81,9 @@ static void secp256k1_ecmult_gen_start(void) { } for (int j=0; j<64; j++) { for (int i=0; i<16; i++) { - const unsigned char* raw = (const unsigned char*)(&prec[j*16 + i]); - for (size_t k=0; kprec[j][k][i] = raw[k]; + VERIFY_CHECK(!secp256k1_ge_is_infinity(&prec[j*16 + i])); + ret->prec[j][i][0] = prec[j*16 + i].x; + ret->prec[j][i][1] = prec[j*16 + i].y; } } @@ -104,11 +104,14 @@ static void secp256k1_ecmult_gen(secp256k1_gej_t *r, const secp256k1_scalar_t *g const secp256k1_ecmult_gen_consts_t *c = secp256k1_ecmult_gen_consts; secp256k1_gej_set_infinity(r); secp256k1_ge_t add; + add.infinity = 0; int bits; for (int j=0; j<64; j++) { bits = secp256k1_scalar_get_bits(gn, j * 4, 4); - for (size_t k=0; kprec[j][k][bits]; + for (int i=0; i<16; i++) { + secp256k1_fe_cmov(&add.x, &c->prec[j][i][0], i == bits); + secp256k1_fe_cmov(&add.y, &c->prec[j][i][1], i == bits); + } secp256k1_gej_add_ge(r, r, &add); } bits = 0; diff --git a/src/secp256k1/src/ecmult_impl.h b/src/secp256k1/src/ecmult_impl.h index 508902564..445b81593 100644 --- a/src/secp256k1/src/ecmult_impl.h +++ b/src/secp256k1/src/ecmult_impl.h @@ -7,8 +7,8 @@ #ifndef _SECP256K1_ECMULT_IMPL_H_ #define _SECP256K1_ECMULT_IMPL_H_ -#include "num.h" #include "group.h" +#include "scalar.h" #include "ecmult.h" /* optimal for 128-bit and 256-bit exponents. */ @@ -16,7 +16,11 @@ /** larger numbers may result in slightly better performance, at the cost of exponentially larger precomputed tables. WINDOW_G == 14 results in 640 KiB. */ +#ifdef USE_ENDOMORPHISM #define WINDOW_G 14 +#else +#define WINDOW_G 15 +#endif /** Fill a table 'pre' with precomputed odd multiples of a. W determines the size of the table. * pre will contains the values [1*a,3*a,5*a,...,(2^(w-1)-1)*a], so it needs place for @@ -69,7 +73,9 @@ static void secp256k1_ecmult_table_precomp_ge_var(secp256k1_ge_t *pre, const sec typedef struct { /* For accelerating the computation of a*P + b*G: */ secp256k1_ge_t pre_g[ECMULT_TABLE_SIZE(WINDOW_G)]; /* odd multiples of the generator */ +#ifdef USE_ENDOMORPHISM secp256k1_ge_t pre_g_128[ECMULT_TABLE_SIZE(WINDOW_G)]; /* odd multiples of 2^128*generator */ +#endif } secp256k1_ecmult_consts_t; static const secp256k1_ecmult_consts_t *secp256k1_ecmult_consts = NULL; @@ -85,14 +91,18 @@ static void secp256k1_ecmult_start(void) { const secp256k1_ge_t *g = &secp256k1_ge_consts->g; secp256k1_gej_t gj; secp256k1_gej_set_ge(&gj, g); +#ifdef USE_ENDOMORPHISM /* calculate 2^128*generator */ secp256k1_gej_t g_128j = gj; for (int i=0; i<128; i++) secp256k1_gej_double_var(&g_128j, &g_128j); +#endif /* precompute the tables with odd multiples */ secp256k1_ecmult_table_precomp_ge_var(ret->pre_g, &gj, WINDOW_G); +#ifdef USE_ENDOMORPHISM secp256k1_ecmult_table_precomp_ge_var(ret->pre_g_128, &g_128j, WINDOW_G); +#endif /* Set the global pointer to the precomputation table. */ secp256k1_ecmult_consts = ret; @@ -111,56 +121,62 @@ static void secp256k1_ecmult_stop(void) { * with the following guarantees: * - each wnaf[i] is either 0, or an odd integer between -(1<<(w-1) - 1) and (1<<(w-1) - 1) * - two non-zero entries in wnaf are separated by at least w-1 zeroes. - * - the index of the highest non-zero entry in wnaf (=return value-1) is at most bits, where - * bits is the number of bits necessary to represent the absolute value of the input. + * - the number of set values in wnaf is returned. This number is at most 256, and at most one more + * - than the number of bits in the (absolute value) of the input. */ -static int secp256k1_ecmult_wnaf(int *wnaf, const secp256k1_num_t *a, int w) { - int ret = 0; - int zeroes = 0; - secp256k1_num_t x; - secp256k1_num_copy(&x, a); +static int secp256k1_ecmult_wnaf(int *wnaf, const secp256k1_scalar_t *a, int w) { + secp256k1_scalar_t s = *a; + int sign = 1; - if (secp256k1_num_is_neg(&x)) { + if (secp256k1_scalar_get_bits(&s, 255, 1)) { + secp256k1_scalar_negate(&s, &s); sign = -1; - secp256k1_num_negate(&x); } - while (!secp256k1_num_is_zero(&x)) { - while (!secp256k1_num_is_odd(&x)) { - zeroes++; - secp256k1_num_shift(&x, 1); + + int set_bits = 0; + int bit = 0; + while (bit < 256) { + if (secp256k1_scalar_get_bits(&s, bit, 1) == 0) { + bit++; + continue; + } + while (set_bits < bit) { + wnaf[set_bits++] = 0; } - int word = secp256k1_num_shift(&x, w); - while (zeroes) { - wnaf[ret++] = 0; - zeroes--; + int now = w; + if (bit + now > 256) { + now = 256 - bit; } + int word = secp256k1_scalar_get_bits_var(&s, bit, now); if (word & (1 << (w-1))) { - secp256k1_num_inc(&x); - wnaf[ret++] = sign * (word - (1 << w)); + secp256k1_scalar_add_bit(&s, bit + w); + wnaf[set_bits++] = sign * (word - (1 << w)); } else { - wnaf[ret++] = sign * word; + wnaf[set_bits++] = sign * word; } - zeroes = w-1; + bit += now; } - return ret; + return set_bits; } -static void secp256k1_ecmult(secp256k1_gej_t *r, const secp256k1_gej_t *a, const secp256k1_num_t *na, const secp256k1_num_t *ng) { +static void secp256k1_ecmult(secp256k1_gej_t *r, const secp256k1_gej_t *a, const secp256k1_scalar_t *na, const secp256k1_scalar_t *ng) { const secp256k1_ecmult_consts_t *c = secp256k1_ecmult_consts; #ifdef USE_ENDOMORPHISM - secp256k1_num_t na_1, na_lam; + secp256k1_scalar_t na_1, na_lam; /* split na into na_1 and na_lam (where na = na_1 + na_lam*lambda, and na_1 and na_lam are ~128 bit) */ - secp256k1_gej_split_exp_var(&na_1, &na_lam, na); + secp256k1_scalar_split_lambda_var(&na_1, &na_lam, na); /* build wnaf representation for na_1 and na_lam. */ - int wnaf_na_1[129]; int bits_na_1 = secp256k1_ecmult_wnaf(wnaf_na_1, &na_1, WINDOW_A); - int wnaf_na_lam[129]; int bits_na_lam = secp256k1_ecmult_wnaf(wnaf_na_lam, &na_lam, WINDOW_A); + int wnaf_na_1[130]; int bits_na_1 = secp256k1_ecmult_wnaf(wnaf_na_1, &na_1, WINDOW_A); + int wnaf_na_lam[130]; int bits_na_lam = secp256k1_ecmult_wnaf(wnaf_na_lam, &na_lam, WINDOW_A); + VERIFY_CHECK(bits_na_1 <= 130); + VERIFY_CHECK(bits_na_lam <= 130); int bits = bits_na_1; if (bits_na_lam > bits) bits = bits_na_lam; #else /* build wnaf representation for na. */ - int wnaf_na[257]; int bits_na = secp256k1_ecmult_wnaf(wnaf_na, na, WINDOW_A); + int wnaf_na[256]; int bits_na = secp256k1_ecmult_wnaf(wnaf_na, na, WINDOW_A); int bits = bits_na; #endif @@ -172,19 +188,22 @@ static void secp256k1_ecmult(secp256k1_gej_t *r, const secp256k1_gej_t *a, const secp256k1_gej_t pre_a_lam[ECMULT_TABLE_SIZE(WINDOW_A)]; for (int i=0; i bits) bits = bits_ng_1; if (bits_ng_128 > bits) bits = bits_ng_128; +#else + int wnaf_ng[257]; int bits_ng = secp256k1_ecmult_wnaf(wnaf_ng, ng, WINDOW_G); + if (bits_ng > bits) bits = bits_ng; +#endif secp256k1_gej_set_infinity(r); secp256k1_gej_t tmpj; @@ -202,12 +221,6 @@ static void secp256k1_ecmult(secp256k1_gej_t *r, const secp256k1_gej_t *a, const ECMULT_TABLE_GET_GEJ(&tmpj, pre_a_lam, n, WINDOW_A); secp256k1_gej_add_var(r, r, &tmpj); } -#else - if (i < bits_na && (n = wnaf_na[i])) { - ECMULT_TABLE_GET_GEJ(&tmpj, pre_a, n, WINDOW_A); - secp256k1_gej_add_var(r, r, &tmpj); - } -#endif if (i < bits_ng_1 && (n = wnaf_ng_1[i])) { ECMULT_TABLE_GET_GE(&tmpa, c->pre_g, n, WINDOW_G); secp256k1_gej_add_ge_var(r, r, &tmpa); @@ -216,6 +229,16 @@ static void secp256k1_ecmult(secp256k1_gej_t *r, const secp256k1_gej_t *a, const ECMULT_TABLE_GET_GE(&tmpa, c->pre_g_128, n, WINDOW_G); secp256k1_gej_add_ge_var(r, r, &tmpa); } +#else + if (i < bits_na && (n = wnaf_na[i])) { + ECMULT_TABLE_GET_GEJ(&tmpj, pre_a, n, WINDOW_A); + secp256k1_gej_add_var(r, r, &tmpj); + } + if (i < bits_ng && (n = wnaf_ng[i])) { + ECMULT_TABLE_GET_GE(&tmpa, c->pre_g, n, WINDOW_G); + secp256k1_gej_add_ge_var(r, r, &tmpa); + } +#endif } } diff --git a/src/secp256k1/src/field.h b/src/secp256k1/src/field.h index c7feead90..0cdf0fb47 100644 --- a/src/secp256k1/src/field.h +++ b/src/secp256k1/src/field.h @@ -33,7 +33,10 @@ #endif typedef struct { +#ifndef USE_NUM_NONE secp256k1_num_t p; +#endif + secp256k1_fe_t order; } secp256k1_fe_consts_t; static const secp256k1_fe_consts_t *secp256k1_fe_consts = NULL; @@ -59,8 +62,11 @@ static int secp256k1_fe_is_odd(const secp256k1_fe_t *a); /** Compare two field elements. Requires both inputs to be normalized */ static int secp256k1_fe_equal(const secp256k1_fe_t *a, const secp256k1_fe_t *b); -/** Set a field element equal to 32-byte big endian value. Resulting field element is normalized. */ -static void secp256k1_fe_set_b32(secp256k1_fe_t *r, const unsigned char *a); +/** Compare two field elements. Requires both inputs to be normalized */ +static int secp256k1_fe_cmp_var(const secp256k1_fe_t *a, const secp256k1_fe_t *b); + +/** Set a field element equal to 32-byte big endian value. If succesful, the resulting field element is normalized. */ +static int secp256k1_fe_set_b32(secp256k1_fe_t *r, const unsigned char *a); /** Convert a field element to a 32-byte big endian value. Requires the input to be normalized */ static void secp256k1_fe_get_b32(unsigned char *r, const secp256k1_fe_t *a); @@ -78,7 +84,7 @@ static void secp256k1_fe_add(secp256k1_fe_t *r, const secp256k1_fe_t *a); /** Sets a field element to be the product of two others. Requires the inputs' magnitudes to be at most 8. * The output magnitude is 1 (but not guaranteed to be normalized). */ -static void secp256k1_fe_mul(secp256k1_fe_t *r, const secp256k1_fe_t *a, const secp256k1_fe_t *b); +static void secp256k1_fe_mul(secp256k1_fe_t *r, const secp256k1_fe_t *a, const secp256k1_fe_t * SECP256K1_RESTRICT b); /** Sets a field element to be the square of another. Requires the input's magnitude to be at most 8. * The output magnitude is 1 (but not guaranteed to be normalized). */ @@ -104,11 +110,13 @@ static void secp256k1_fe_inv_all(size_t len, secp256k1_fe_t r[len], const secp25 /** Potentially faster version of secp256k1_fe_inv_all, without constant-time guarantee. */ static void secp256k1_fe_inv_all_var(size_t len, secp256k1_fe_t r[len], const secp256k1_fe_t a[len]); - /** Convert a field element to a hexadecimal string. */ static void secp256k1_fe_get_hex(char *r, int *rlen, const secp256k1_fe_t *a); /** Convert a hexadecimal string to a field element. */ -static void secp256k1_fe_set_hex(secp256k1_fe_t *r, const char *a, int alen); +static int secp256k1_fe_set_hex(secp256k1_fe_t *r, const char *a, int alen); + +/** If flag is true, set *r equal to *a; otherwise leave it. Constant-time. */ +static void secp256k1_fe_cmov(secp256k1_fe_t *r, const secp256k1_fe_t *a, int flag); #endif diff --git a/src/secp256k1/src/field_10x26_impl.h b/src/secp256k1/src/field_10x26_impl.h index c0f1be0b2..c4403fba2 100644 --- a/src/secp256k1/src/field_10x26_impl.h +++ b/src/secp256k1/src/field_10x26_impl.h @@ -152,7 +152,21 @@ SECP256K1_INLINE static int secp256k1_fe_equal(const secp256k1_fe_t *a, const se | (t[5]^u[5]) | (t[6]^u[6]) | (t[7]^u[7]) | (t[8]^u[8]) | (t[9]^u[9])) == 0; } -static void secp256k1_fe_set_b32(secp256k1_fe_t *r, const unsigned char *a) { +static int secp256k1_fe_cmp_var(const secp256k1_fe_t *a, const secp256k1_fe_t *b) { +#ifdef VERIFY + VERIFY_CHECK(a->normalized); + VERIFY_CHECK(b->normalized); + secp256k1_fe_verify(a); + secp256k1_fe_verify(b); +#endif + for (int i = 9; i >= 0; i--) { + if (a->n[i] > b->n[i]) return 1; + if (a->n[i] < b->n[i]) return -1; + } + return 0; +} + +static int secp256k1_fe_set_b32(secp256k1_fe_t *r, const unsigned char *a) { r->n[0] = r->n[1] = r->n[2] = r->n[3] = r->n[4] = 0; r->n[5] = r->n[6] = r->n[7] = r->n[8] = r->n[9] = 0; for (int i=0; i<32; i++) { @@ -162,11 +176,15 @@ static void secp256k1_fe_set_b32(secp256k1_fe_t *r, const unsigned char *a) { r->n[limb] |= (uint32_t)((a[31-i] >> (2*j)) & 0x3) << shift; } } + if (r->n[9] == 0x3FFFFFUL && (r->n[8] & r->n[7] & r->n[6] & r->n[5] & r->n[4] & r->n[3] & r->n[2]) == 0x3FFFFFFUL && (r->n[1] + 0x40UL + ((r->n[0] + 0x3D1UL) >> 26)) > 0x3FFFFFFUL) { + return 0; + } #ifdef VERIFY r->magnitude = 1; r->normalized = 1; secp256k1_fe_verify(r); #endif + return 1; } /** Convert a field element to a 32-byte big endian value. Requires the input to be normalized */ @@ -253,7 +271,7 @@ SECP256K1_INLINE static void secp256k1_fe_add(secp256k1_fe_t *r, const secp256k1 #define VERIFY_BITS(x, n) do { } while(0) #endif -SECP256K1_INLINE static void secp256k1_fe_mul_inner(const uint32_t *a, const uint32_t *b, uint32_t *r) { +SECP256K1_INLINE static void secp256k1_fe_mul_inner(const uint32_t *a, const uint32_t * SECP256K1_RESTRICT b, uint32_t *r) { VERIFY_BITS(a[0], 30); VERIFY_BITS(a[1], 30); VERIFY_BITS(a[2], 30); @@ -853,12 +871,13 @@ SECP256K1_INLINE static void secp256k1_fe_sqr_inner(const uint32_t *a, uint32_t } -static void secp256k1_fe_mul(secp256k1_fe_t *r, const secp256k1_fe_t *a, const secp256k1_fe_t *b) { +static void secp256k1_fe_mul(secp256k1_fe_t *r, const secp256k1_fe_t *a, const secp256k1_fe_t * SECP256K1_RESTRICT b) { #ifdef VERIFY VERIFY_CHECK(a->magnitude <= 8); VERIFY_CHECK(b->magnitude <= 8); secp256k1_fe_verify(a); secp256k1_fe_verify(b); + VERIFY_CHECK(r != b); #endif secp256k1_fe_mul_inner(a->n, b->n, r->n); #ifdef VERIFY @@ -881,4 +900,24 @@ static void secp256k1_fe_sqr(secp256k1_fe_t *r, const secp256k1_fe_t *a) { #endif } +static void secp256k1_fe_cmov(secp256k1_fe_t *r, const secp256k1_fe_t *a, int flag) { + uint32_t mask0 = flag + ~((uint32_t)0), mask1 = ~mask0; + r->n[0] = (r->n[0] & mask0) | (a->n[0] & mask1); + r->n[1] = (r->n[1] & mask0) | (a->n[1] & mask1); + r->n[2] = (r->n[2] & mask0) | (a->n[2] & mask1); + r->n[3] = (r->n[3] & mask0) | (a->n[3] & mask1); + r->n[4] = (r->n[4] & mask0) | (a->n[4] & mask1); + r->n[5] = (r->n[5] & mask0) | (a->n[5] & mask1); + r->n[6] = (r->n[6] & mask0) | (a->n[6] & mask1); + r->n[7] = (r->n[7] & mask0) | (a->n[7] & mask1); + r->n[8] = (r->n[8] & mask0) | (a->n[8] & mask1); + r->n[9] = (r->n[9] & mask0) | (a->n[9] & mask1); +#ifdef VERIFY + if (flag) { + r->magnitude = a->magnitude; + r->normalized = a->normalized; + } +#endif +} + #endif diff --git a/src/secp256k1/src/field_5x52_impl.h b/src/secp256k1/src/field_5x52_impl.h index d1b06d05a..75b210eaf 100644 --- a/src/secp256k1/src/field_5x52_impl.h +++ b/src/secp256k1/src/field_5x52_impl.h @@ -150,7 +150,21 @@ SECP256K1_INLINE static int secp256k1_fe_equal(const secp256k1_fe_t *a, const se return ((t[0]^u[0]) | (t[1]^u[1]) | (t[2]^u[2]) | (t[3]^u[3]) | (t[4]^u[4])) == 0; } -static void secp256k1_fe_set_b32(secp256k1_fe_t *r, const unsigned char *a) { +static int secp256k1_fe_cmp_var(const secp256k1_fe_t *a, const secp256k1_fe_t *b) { +#ifdef VERIFY + VERIFY_CHECK(a->normalized); + VERIFY_CHECK(b->normalized); + secp256k1_fe_verify(a); + secp256k1_fe_verify(b); +#endif + for (int i = 4; i >= 0; i--) { + if (a->n[i] > b->n[i]) return 1; + if (a->n[i] < b->n[i]) return -1; + } + return 0; +} + +static int secp256k1_fe_set_b32(secp256k1_fe_t *r, const unsigned char *a) { r->n[0] = r->n[1] = r->n[2] = r->n[3] = r->n[4] = 0; for (int i=0; i<32; i++) { for (int j=0; j<2; j++) { @@ -159,11 +173,15 @@ static void secp256k1_fe_set_b32(secp256k1_fe_t *r, const unsigned char *a) { r->n[limb] |= (uint64_t)((a[31-i] >> (4*j)) & 0xF) << shift; } } + if (r->n[4] == 0x0FFFFFFFFFFFFULL && (r->n[3] & r->n[2] & r->n[1]) == 0xFFFFFFFFFFFFFULL && r->n[0] >= 0xFFFFEFFFFFC2FULL) { + return 0; + } #ifdef VERIFY r->magnitude = 1; r->normalized = 1; secp256k1_fe_verify(r); #endif + return 1; } /** Convert a field element to a 32-byte big endian value. Requires the input to be normalized */ @@ -229,12 +247,13 @@ SECP256K1_INLINE static void secp256k1_fe_add(secp256k1_fe_t *r, const secp256k1 #endif } -static void secp256k1_fe_mul(secp256k1_fe_t *r, const secp256k1_fe_t *a, const secp256k1_fe_t *b) { +static void secp256k1_fe_mul(secp256k1_fe_t *r, const secp256k1_fe_t *a, const secp256k1_fe_t * SECP256K1_RESTRICT b) { #ifdef VERIFY VERIFY_CHECK(a->magnitude <= 8); VERIFY_CHECK(b->magnitude <= 8); secp256k1_fe_verify(a); secp256k1_fe_verify(b); + VERIFY_CHECK(r != b); #endif secp256k1_fe_mul_inner(a->n, b->n, r->n); #ifdef VERIFY @@ -257,4 +276,19 @@ static void secp256k1_fe_sqr(secp256k1_fe_t *r, const secp256k1_fe_t *a) { #endif } +static void secp256k1_fe_cmov(secp256k1_fe_t *r, const secp256k1_fe_t *a, int flag) { + uint64_t mask0 = flag + ~((uint64_t)0), mask1 = ~mask0; + r->n[0] = (r->n[0] & mask0) | (a->n[0] & mask1); + r->n[1] = (r->n[1] & mask0) | (a->n[1] & mask1); + r->n[2] = (r->n[2] & mask0) | (a->n[2] & mask1); + r->n[3] = (r->n[3] & mask0) | (a->n[3] & mask1); + r->n[4] = (r->n[4] & mask0) | (a->n[4] & mask1); +#ifdef VERIFY + if (flag) { + r->magnitude = a->magnitude; + r->normalized = a->normalized; + } +#endif +} + #endif diff --git a/src/secp256k1/src/field_5x52_int128_impl.h b/src/secp256k1/src/field_5x52_int128_impl.h index c47642867..e552fb431 100644 --- a/src/secp256k1/src/field_5x52_int128_impl.h +++ b/src/secp256k1/src/field_5x52_int128_impl.h @@ -15,7 +15,7 @@ #define VERIFY_BITS(x, n) do { } while(0) #endif -SECP256K1_INLINE static void secp256k1_fe_mul_inner(const uint64_t *a, const uint64_t *b, uint64_t *r) { +SECP256K1_INLINE static void secp256k1_fe_mul_inner(const uint64_t *a, const uint64_t * SECP256K1_RESTRICT b, uint64_t *r) { VERIFY_BITS(a[0], 56); VERIFY_BITS(a[1], 56); VERIFY_BITS(a[2], 56); @@ -26,6 +26,7 @@ SECP256K1_INLINE static void secp256k1_fe_mul_inner(const uint64_t *a, const uin VERIFY_BITS(b[2], 56); VERIFY_BITS(b[3], 56); VERIFY_BITS(b[4], 52); + VERIFY_CHECK(r != b); const uint64_t M = 0xFFFFFFFFFFFFFULL, R = 0x1000003D10ULL; /* [... a b c] is a shorthand for ... + a<<104 + b<<52 + c<<0 mod n. @@ -33,15 +34,17 @@ SECP256K1_INLINE static void secp256k1_fe_mul_inner(const uint64_t *a, const uin * Note that [x 0 0 0 0 0] = [x*R]. */ + uint64_t a0 = a[0], a1 = a[1], a2 = a[2], a3 = a[3], a4 = a[4]; + __int128 c, d; - d = (__int128)a[0] * b[3] - + (__int128)a[1] * b[2] - + (__int128)a[2] * b[1] - + (__int128)a[3] * b[0]; + d = (__int128)a0 * b[3] + + (__int128)a1 * b[2] + + (__int128)a2 * b[1] + + (__int128)a3 * b[0]; VERIFY_BITS(d, 114); /* [d 0 0 0] = [p3 0 0 0] */ - c = (__int128)a[4] * b[4]; + c = (__int128)a4 * b[4]; VERIFY_BITS(c, 112); /* [c 0 0 0 0 d 0 0 0] = [p8 0 0 0 0 p3 0 0 0] */ d += (c & M) * R; c >>= 52; @@ -53,11 +56,11 @@ SECP256K1_INLINE static void secp256k1_fe_mul_inner(const uint64_t *a, const uin VERIFY_BITS(d, 63); /* [c 0 0 0 0 d t3 0 0 0] = [p8 0 0 0 0 p3 0 0 0] */ - d += (__int128)a[0] * b[4] - + (__int128)a[1] * b[3] - + (__int128)a[2] * b[2] - + (__int128)a[3] * b[1] - + (__int128)a[4] * b[0]; + d += (__int128)a0 * b[4] + + (__int128)a1 * b[3] + + (__int128)a2 * b[2] + + (__int128)a3 * b[1] + + (__int128)a4 * b[0]; VERIFY_BITS(d, 115); /* [c 0 0 0 0 d t3 0 0 0] = [p8 0 0 0 p4 p3 0 0 0] */ d += c * R; @@ -72,13 +75,13 @@ SECP256K1_INLINE static void secp256k1_fe_mul_inner(const uint64_t *a, const uin VERIFY_BITS(t4, 48); /* [d t4+(tx<<48) t3 0 0 0] = [p8 0 0 0 p4 p3 0 0 0] */ - c = (__int128)a[0] * b[0]; + c = (__int128)a0 * b[0]; VERIFY_BITS(c, 112); /* [d t4+(tx<<48) t3 0 0 c] = [p8 0 0 0 p4 p3 0 0 p0] */ - d += (__int128)a[1] * b[4] - + (__int128)a[2] * b[3] - + (__int128)a[3] * b[2] - + (__int128)a[4] * b[1]; + d += (__int128)a1 * b[4] + + (__int128)a2 * b[3] + + (__int128)a3 * b[2] + + (__int128)a4 * b[1]; VERIFY_BITS(d, 115); /* [d t4+(tx<<48) t3 0 0 c] = [p8 0 0 p5 p4 p3 0 0 p0] */ uint64_t u0 = d & M; d >>= 52; @@ -92,48 +95,43 @@ SECP256K1_INLINE static void secp256k1_fe_mul_inner(const uint64_t *a, const uin c += (__int128)u0 * (R >> 4); VERIFY_BITS(c, 115); /* [d 0 t4 t3 0 0 c] = [p8 0 0 p5 p4 p3 0 0 p0] */ - uint64_t t0 = c & M; c >>= 52; - VERIFY_BITS(t0, 52); + r[0] = c & M; c >>= 52; + VERIFY_BITS(r[0], 52); VERIFY_BITS(c, 61); - /* [d 0 t4 t3 0 c t0] = [p8 0 0 p5 p4 p3 0 0 p0] */ + /* [d 0 t4 t3 0 c r0] = [p8 0 0 p5 p4 p3 0 0 p0] */ - c += (__int128)a[0] * b[1] - + (__int128)a[1] * b[0]; + c += (__int128)a0 * b[1] + + (__int128)a1 * b[0]; VERIFY_BITS(c, 114); - /* [d 0 t4 t3 0 c t0] = [p8 0 0 p5 p4 p3 0 p1 p0] */ - d += (__int128)a[2] * b[4] - + (__int128)a[3] * b[3] - + (__int128)a[4] * b[2]; + /* [d 0 t4 t3 0 c r0] = [p8 0 0 p5 p4 p3 0 p1 p0] */ + d += (__int128)a2 * b[4] + + (__int128)a3 * b[3] + + (__int128)a4 * b[2]; VERIFY_BITS(d, 114); - /* [d 0 t4 t3 0 c t0] = [p8 0 p6 p5 p4 p3 0 p1 p0] */ + /* [d 0 t4 t3 0 c r0] = [p8 0 p6 p5 p4 p3 0 p1 p0] */ c += (d & M) * R; d >>= 52; VERIFY_BITS(c, 115); VERIFY_BITS(d, 62); - /* [d 0 0 t4 t3 0 c t0] = [p8 0 p6 p5 p4 p3 0 p1 p0] */ - uint64_t t1 = c & M; c >>= 52; - VERIFY_BITS(t1, 52); + /* [d 0 0 t4 t3 0 c r0] = [p8 0 p6 p5 p4 p3 0 p1 p0] */ + r[1] = c & M; c >>= 52; + VERIFY_BITS(r[1], 52); VERIFY_BITS(c, 63); - /* [d 0 0 t4 t3 c t1 t0] = [p8 0 p6 p5 p4 p3 0 p1 p0] */ + /* [d 0 0 t4 t3 c r1 r0] = [p8 0 p6 p5 p4 p3 0 p1 p0] */ - c += (__int128)a[0] * b[2] - + (__int128)a[1] * b[1] - + (__int128)a[2] * b[0]; + c += (__int128)a0 * b[2] + + (__int128)a1 * b[1] + + (__int128)a2 * b[0]; VERIFY_BITS(c, 114); - /* [d 0 0 t4 t3 c t1 t0] = [p8 0 p6 p5 p4 p3 p2 p1 p0] */ - d += (__int128)a[3] * b[4] - + (__int128)a[4] * b[3]; + /* [d 0 0 t4 t3 c r1 r0] = [p8 0 p6 p5 p4 p3 p2 p1 p0] */ + d += (__int128)a3 * b[4] + + (__int128)a4 * b[3]; VERIFY_BITS(d, 114); - /* [d 0 0 t4 t3 c t1 t0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] */ + /* [d 0 0 t4 t3 c t1 r0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] */ c += (d & M) * R; d >>= 52; VERIFY_BITS(c, 115); VERIFY_BITS(d, 62); - /* [d 0 0 0 t4 t3 c t1 t0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] */ + /* [d 0 0 0 t4 t3 c r1 r0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] */ - r[0] = t0; - VERIFY_BITS(r[0], 52); - /* [d 0 0 0 t4 t3 c t1 r0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] */ - r[1] = t1; - VERIFY_BITS(r[1], 52); /* [d 0 0 0 t4 t3 c r1 r0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] */ r[2] = c & M; c >>= 52; VERIFY_BITS(r[2], 52); diff --git a/src/secp256k1/src/field_gmp_impl.h b/src/secp256k1/src/field_gmp_impl.h index af4728e5b..8af7dd68f 100644 --- a/src/secp256k1/src/field_gmp_impl.h +++ b/src/secp256k1/src/field_gmp_impl.h @@ -75,7 +75,15 @@ SECP256K1_INLINE static int secp256k1_fe_equal(const secp256k1_fe_t *a, const se return ret; } -static void secp256k1_fe_set_b32(secp256k1_fe_t *r, const unsigned char *a) { +SECP256K1_INLINE static int secp256k1_fe_cmp_var(const secp256k1_fe_t *a, const secp256k1_fe_t *b) { + for (int i=FIELD_LIMBS; i>=0; i--) { + if (a->n[i] > b->n[i]) return 1; + if (a->n[i] < b->n[i]) return -1; + } + return 0; +} + +static int secp256k1_fe_set_b32(secp256k1_fe_t *r, const unsigned char *a) { for (int i=0; in[i] = 0; for (int i=0; i<256; i++) { @@ -83,6 +91,7 @@ static void secp256k1_fe_set_b32(secp256k1_fe_t *r, const unsigned char *a) { int shift = i%GMP_NUMB_BITS; r->n[limb] |= (mp_limb_t)((a[31-i/8] >> (i%8)) & 0x1) << shift; } + return (mpn_cmp(r->n, secp256k1_field_p, FIELD_LIMBS) < 0); } /** Convert a field element to a 32-byte big endian value. Requires the input to be normalized */ @@ -142,7 +151,8 @@ static void secp256k1_fe_reduce(secp256k1_fe_t *r, mp_limb_t *tmp) { r->n[FIELD_LIMBS] = mpn_add(r->n, tmp, FIELD_LIMBS, q, 1+(33+GMP_NUMB_BITS-1)/GMP_NUMB_BITS); } -static void secp256k1_fe_mul(secp256k1_fe_t *r, const secp256k1_fe_t *a, const secp256k1_fe_t *b) { +static void secp256k1_fe_mul(secp256k1_fe_t *r, const secp256k1_fe_t *a, const secp256k1_fe_t * SECP256K1_RESTRICT b) { + VERIFY_CHECK(r != b); secp256k1_fe_t ac = *a; secp256k1_fe_t bc = *b; secp256k1_fe_normalize(&ac); @@ -160,4 +170,11 @@ static void secp256k1_fe_sqr(secp256k1_fe_t *r, const secp256k1_fe_t *a) { secp256k1_fe_reduce(r, tmp); } +static void secp256k1_fe_cmov(secp256k1_fe_t *r, const secp256k1_fe_t *a, int flag) { + mp_limb_t mask0 = flag + ~((mp_limb_t)0), mask1 = ~mask0; + for (int i = 0; i <= FIELD_LIMBS; i++) { + r->n[i] = (r->n[i] & mask0) | (a->n[i] & mask1); + } +} + #endif diff --git a/src/secp256k1/src/field_impl.h b/src/secp256k1/src/field_impl.h index 3a31e1844..4d25e5371 100644 --- a/src/secp256k1/src/field_impl.h +++ b/src/secp256k1/src/field_impl.h @@ -41,7 +41,7 @@ static void secp256k1_fe_get_hex(char *r, int *rlen, const secp256k1_fe_t *a) { r[64] = 0x00; } -static void secp256k1_fe_set_hex(secp256k1_fe_t *r, const char *a, int alen) { +static int secp256k1_fe_set_hex(secp256k1_fe_t *r, const char *a, int alen) { unsigned char tmp[32] = {}; static const int cvt[256] = {0, 0, 0, 0, 0, 0, 0,0,0,0,0,0,0,0,0,0, 0, 0, 0, 0, 0, 0, 0,0,0,0,0,0,0,0,0,0, @@ -63,7 +63,7 @@ static void secp256k1_fe_set_hex(secp256k1_fe_t *r, const char *a, int alen) { if (alen > i*2) tmp[32 - alen/2 + i] = (cvt[(unsigned char)a[2*i]] << 4) + cvt[(unsigned char)a[2*i+1]]; } - secp256k1_fe_set_b32(r, tmp); + return secp256k1_fe_set_b32(r, tmp); } static int secp256k1_fe_sqrt(secp256k1_fe_t *r, const secp256k1_fe_t *a) { @@ -197,7 +197,7 @@ static void secp256k1_fe_inv(secp256k1_fe_t *r, const secp256k1_fe_t *a) { for (int j=0; j<3; j++) secp256k1_fe_sqr(&t1, &t1); secp256k1_fe_mul(&t1, &t1, &x2); for (int j=0; j<2; j++) secp256k1_fe_sqr(&t1, &t1); - secp256k1_fe_mul(r, &t1, a); + secp256k1_fe_mul(r, a, &t1); } static void secp256k1_fe_inv_var(secp256k1_fe_t *r, const secp256k1_fe_t *a) { @@ -212,7 +212,7 @@ static void secp256k1_fe_inv_var(secp256k1_fe_t *r, const secp256k1_fe_t *a) { secp256k1_num_set_bin(&n, b, 32); secp256k1_num_mod_inverse(&n, &n, &secp256k1_fe_consts->p); secp256k1_num_get_bin(b, 32, &n); - secp256k1_fe_set_b32(r, b); + VERIFY_CHECK(secp256k1_fe_set_b32(r, b)); #else #error "Please select field inverse implementation" #endif @@ -267,16 +267,20 @@ static void secp256k1_fe_inv_all_var(size_t len, secp256k1_fe_t r[len], const se } static void secp256k1_fe_start(void) { +#ifndef USE_NUM_NONE static const unsigned char secp256k1_fe_consts_p[] = { 0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF, 0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF, 0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF, 0xFF,0xFF,0xFF,0xFE,0xFF,0xFF,0xFC,0x2F }; +#endif if (secp256k1_fe_consts == NULL) { secp256k1_fe_inner_start(); secp256k1_fe_consts_t *ret = (secp256k1_fe_consts_t*)malloc(sizeof(secp256k1_fe_consts_t)); +#ifndef USE_NUM_NONE secp256k1_num_set_bin(&ret->p, secp256k1_fe_consts_p, sizeof(secp256k1_fe_consts_p)); +#endif secp256k1_fe_consts = ret; } } diff --git a/src/secp256k1/src/group.h b/src/secp256k1/src/group.h index ba0254982..0f14bd25f 100644 --- a/src/secp256k1/src/group.h +++ b/src/secp256k1/src/group.h @@ -27,14 +27,11 @@ typedef struct { /** Global constants related to the group */ typedef struct { - secp256k1_num_t order; /* the order of the curve (= order of its generator) */ - secp256k1_num_t half_order; /* half the order of the curve (= order of its generator) */ secp256k1_ge_t g; /* the generator point */ #ifdef USE_ENDOMORPHISM /* constants related to secp256k1's efficiently computable endomorphism */ secp256k1_fe_t beta; - secp256k1_num_t lambda, a1b2, b1, a2; #endif } secp256k1_ge_consts_t; @@ -112,10 +109,6 @@ static void secp256k1_gej_get_hex(char *r, int *rlen, const secp256k1_gej_t *a); #ifdef USE_ENDOMORPHISM /** Set r to be equal to lambda times a, where lambda is chosen in a way such that this is very fast. */ static void secp256k1_gej_mul_lambda(secp256k1_gej_t *r, const secp256k1_gej_t *a); - -/** Find r1 and r2 such that r1+r2*lambda = a, and r1 and r2 are maximum 128 bits long (given that a is - not more than 256 bits). */ -static void secp256k1_gej_split_exp_var(secp256k1_num_t *r1, secp256k1_num_t *r2, const secp256k1_num_t *a); #endif /** Clear a secp256k1_gej_t to prevent leaking sensitive information. */ @@ -124,5 +117,4 @@ static void secp256k1_gej_clear(secp256k1_gej_t *r); /** Clear a secp256k1_ge_t to prevent leaking sensitive information. */ static void secp256k1_ge_clear(secp256k1_ge_t *r); - #endif diff --git a/src/secp256k1/src/group_impl.h b/src/secp256k1/src/group_impl.h index 1edbc6e09..cbd0d8c4f 100644 --- a/src/secp256k1/src/group_impl.h +++ b/src/secp256k1/src/group_impl.h @@ -208,29 +208,25 @@ static int secp256k1_ge_is_valid(const secp256k1_ge_t *a) { } static void secp256k1_gej_double_var(secp256k1_gej_t *r, const secp256k1_gej_t *a) { - if (a->infinity) { - r->infinity = 1; - return; - } - - secp256k1_fe_t t5 = a->y; - secp256k1_fe_normalize(&t5); - if (secp256k1_fe_is_zero(&t5)) { - r->infinity = 1; + // For secp256k1, 2Q is infinity if and only if Q is infinity. This is because if 2Q = infinity, + // Q must equal -Q, or that Q.y == -(Q.y), or Q.y is 0. For a point on y^2 = x^3 + 7 to have + // y=0, x^3 must be -7 mod p. However, -7 has no cube root mod p. + r->infinity = a->infinity; + if (r->infinity) { return; } secp256k1_fe_t t1,t2,t3,t4; - secp256k1_fe_mul(&r->z, &t5, &a->z); + secp256k1_fe_mul(&r->z, &a->z, &a->y); secp256k1_fe_mul_int(&r->z, 2); /* Z' = 2*Y*Z (2) */ secp256k1_fe_sqr(&t1, &a->x); secp256k1_fe_mul_int(&t1, 3); /* T1 = 3*X^2 (3) */ secp256k1_fe_sqr(&t2, &t1); /* T2 = 9*X^4 (1) */ - secp256k1_fe_sqr(&t3, &t5); + secp256k1_fe_sqr(&t3, &a->y); secp256k1_fe_mul_int(&t3, 2); /* T3 = 2*Y^2 (2) */ secp256k1_fe_sqr(&t4, &t3); secp256k1_fe_mul_int(&t4, 2); /* T4 = 8*Y^4 (2) */ - secp256k1_fe_mul(&t3, &a->x, &t3); /* T3 = 2*X*Y^2 (1) */ + secp256k1_fe_mul(&t3, &t3, &a->x); /* T3 = 2*X*Y^2 (1) */ r->x = t3; secp256k1_fe_mul_int(&r->x, 4); /* X' = 8*X*Y^2 (4) */ secp256k1_fe_negate(&r->x, &r->x, 4); /* X' = -8*X*Y^2 (5) */ @@ -241,7 +237,6 @@ static void secp256k1_gej_double_var(secp256k1_gej_t *r, const secp256k1_gej_t * secp256k1_fe_mul(&r->y, &t1, &t3); /* Y' = 36*X^3*Y^2 - 27*X^6 (1) */ secp256k1_fe_negate(&t2, &t4, 2); /* T2 = -8*Y^4 (3) */ secp256k1_fe_add(&r->y, &t2); /* Y' = 36*X^3*Y^2 - 27*X^6 - 8*Y^4 (4) */ - r->infinity = 0; } static void secp256k1_gej_add_var(secp256k1_gej_t *r, const secp256k1_gej_t *a, const secp256k1_gej_t *b) { @@ -342,7 +337,7 @@ static void secp256k1_gej_add_ge(secp256k1_gej_t *r, const secp256k1_gej_t *a, c * * Substituting x_i = Xi / Zi^2 and yi = Yi / Zi^3, for i=1,2,3, gives: * U1 = X1*Z2^2, U2 = X2*Z1^2 - * S1 = X1*Z2^3, S2 = X2*Z2^3 + * S1 = Y1*Z2^3, S2 = Y2*Z1^3 * Z = Z1*Z2 * T = U1+U2 * M = S1+S2 @@ -414,40 +409,9 @@ static void secp256k1_gej_mul_lambda(secp256k1_gej_t *r, const secp256k1_gej_t * *r = *a; secp256k1_fe_mul(&r->x, &r->x, beta); } - -static void secp256k1_gej_split_exp_var(secp256k1_num_t *r1, secp256k1_num_t *r2, const secp256k1_num_t *a) { - const secp256k1_ge_consts_t *c = secp256k1_ge_consts; - secp256k1_num_t bnc1, bnc2, bnt1, bnt2, bnn2; - - secp256k1_num_copy(&bnn2, &c->order); - secp256k1_num_shift(&bnn2, 1); - - secp256k1_num_mul(&bnc1, a, &c->a1b2); - secp256k1_num_add(&bnc1, &bnc1, &bnn2); - secp256k1_num_div(&bnc1, &bnc1, &c->order); - - secp256k1_num_mul(&bnc2, a, &c->b1); - secp256k1_num_add(&bnc2, &bnc2, &bnn2); - secp256k1_num_div(&bnc2, &bnc2, &c->order); - - secp256k1_num_mul(&bnt1, &bnc1, &c->a1b2); - secp256k1_num_mul(&bnt2, &bnc2, &c->a2); - secp256k1_num_add(&bnt1, &bnt1, &bnt2); - secp256k1_num_sub(r1, a, &bnt1); - secp256k1_num_mul(&bnt1, &bnc1, &c->b1); - secp256k1_num_mul(&bnt2, &bnc2, &c->a1b2); - secp256k1_num_sub(r2, &bnt1, &bnt2); -} #endif - static void secp256k1_ge_start(void) { - static const unsigned char secp256k1_ge_consts_order[] = { - 0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF, - 0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFE, - 0xBA,0xAE,0xDC,0xE6,0xAF,0x48,0xA0,0x3B, - 0xBF,0xD2,0x5E,0x8C,0xD0,0x36,0x41,0x41 - }; static const unsigned char secp256k1_ge_consts_g_x[] = { 0x79,0xBE,0x66,0x7E,0xF9,0xDC,0xBB,0xAC, 0x55,0xA0,0x62,0x95,0xCE,0x87,0x0B,0x07, @@ -462,47 +426,21 @@ static void secp256k1_ge_start(void) { }; #ifdef USE_ENDOMORPHISM /* properties of secp256k1's efficiently computable endomorphism */ - static const unsigned char secp256k1_ge_consts_lambda[] = { - 0x53,0x63,0xad,0x4c,0xc0,0x5c,0x30,0xe0, - 0xa5,0x26,0x1c,0x02,0x88,0x12,0x64,0x5a, - 0x12,0x2e,0x22,0xea,0x20,0x81,0x66,0x78, - 0xdf,0x02,0x96,0x7c,0x1b,0x23,0xbd,0x72 - }; static const unsigned char secp256k1_ge_consts_beta[] = { 0x7a,0xe9,0x6a,0x2b,0x65,0x7c,0x07,0x10, 0x6e,0x64,0x47,0x9e,0xac,0x34,0x34,0xe9, 0x9c,0xf0,0x49,0x75,0x12,0xf5,0x89,0x95, 0xc1,0x39,0x6c,0x28,0x71,0x95,0x01,0xee }; - static const unsigned char secp256k1_ge_consts_a1b2[] = { - 0x30,0x86,0xd2,0x21,0xa7,0xd4,0x6b,0xcd, - 0xe8,0x6c,0x90,0xe4,0x92,0x84,0xeb,0x15 - }; - static const unsigned char secp256k1_ge_consts_b1[] = { - 0xe4,0x43,0x7e,0xd6,0x01,0x0e,0x88,0x28, - 0x6f,0x54,0x7f,0xa9,0x0a,0xbf,0xe4,0xc3 - }; - static const unsigned char secp256k1_ge_consts_a2[] = { - 0x01, - 0x14,0xca,0x50,0xf7,0xa8,0xe2,0xf3,0xf6, - 0x57,0xc1,0x10,0x8d,0x9d,0x44,0xcf,0xd8 - }; #endif if (secp256k1_ge_consts == NULL) { secp256k1_ge_consts_t *ret = (secp256k1_ge_consts_t*)malloc(sizeof(secp256k1_ge_consts_t)); - secp256k1_num_set_bin(&ret->order, secp256k1_ge_consts_order, sizeof(secp256k1_ge_consts_order)); - secp256k1_num_copy(&ret->half_order, &ret->order); - secp256k1_num_shift(&ret->half_order, 1); #ifdef USE_ENDOMORPHISM - secp256k1_num_set_bin(&ret->lambda, secp256k1_ge_consts_lambda, sizeof(secp256k1_ge_consts_lambda)); - secp256k1_num_set_bin(&ret->a1b2, secp256k1_ge_consts_a1b2, sizeof(secp256k1_ge_consts_a1b2)); - secp256k1_num_set_bin(&ret->a2, secp256k1_ge_consts_a2, sizeof(secp256k1_ge_consts_a2)); - secp256k1_num_set_bin(&ret->b1, secp256k1_ge_consts_b1, sizeof(secp256k1_ge_consts_b1)); - secp256k1_fe_set_b32(&ret->beta, secp256k1_ge_consts_beta); + VERIFY_CHECK(secp256k1_fe_set_b32(&ret->beta, secp256k1_ge_consts_beta)); #endif secp256k1_fe_t g_x, g_y; - secp256k1_fe_set_b32(&g_x, secp256k1_ge_consts_g_x); - secp256k1_fe_set_b32(&g_y, secp256k1_ge_consts_g_y); + VERIFY_CHECK(secp256k1_fe_set_b32(&g_x, secp256k1_ge_consts_g_x)); + VERIFY_CHECK(secp256k1_fe_set_b32(&g_y, secp256k1_ge_consts_g_y)); secp256k1_ge_set_xy(&ret->g, &g_x, &g_y); secp256k1_ge_consts = ret; } diff --git a/src/secp256k1/src/num.h b/src/secp256k1/src/num.h index c86f84785..339b6bb6e 100644 --- a/src/secp256k1/src/num.h +++ b/src/secp256k1/src/num.h @@ -7,6 +7,8 @@ #ifndef _SECP256K1_NUM_ #define _SECP256K1_NUM_ +#ifndef USE_NUM_NONE + #if defined HAVE_CONFIG_H #include "libsecp256k1-config.h" #endif @@ -17,9 +19,6 @@ #error "Please select num implementation" #endif -/** Clear a number to prevent the leak of sensitive data. */ -static void secp256k1_num_clear(secp256k1_num_t *r); - /** Copy a number. */ static void secp256k1_num_copy(secp256k1_num_t *r, const secp256k1_num_t *a); @@ -30,15 +29,9 @@ static void secp256k1_num_get_bin(unsigned char *r, unsigned int rlen, const sec /** Set a number to the value of a binary big-endian string. */ static void secp256k1_num_set_bin(secp256k1_num_t *r, const unsigned char *a, unsigned int alen); -/** Set a number equal to a (signed) integer. */ -static void secp256k1_num_set_int(secp256k1_num_t *r, int a); - /** Compute a modular inverse. The input must be less than the modulus. */ static void secp256k1_num_mod_inverse(secp256k1_num_t *r, const secp256k1_num_t *a, const secp256k1_num_t *m); -/** Multiply two numbers modulo another. */ -static void secp256k1_num_mod_mul(secp256k1_num_t *r, const secp256k1_num_t *a, const secp256k1_num_t *b, const secp256k1_num_t *m); - /** Compare the absolute value of two numbers. */ static int secp256k1_num_cmp(const secp256k1_num_t *a, const secp256k1_num_t *b); @@ -54,47 +47,22 @@ static void secp256k1_num_sub(secp256k1_num_t *r, const secp256k1_num_t *a, cons /** Multiply two (signed) numbers. */ static void secp256k1_num_mul(secp256k1_num_t *r, const secp256k1_num_t *a, const secp256k1_num_t *b); -/** Divide two (signed) numbers. */ -static void secp256k1_num_div(secp256k1_num_t *r, const secp256k1_num_t *a, const secp256k1_num_t *b); - /** Replace a number by its remainder modulo m. M's sign is ignored. The result is a number between 0 and m-1, even if r was negative. */ static void secp256k1_num_mod(secp256k1_num_t *r, const secp256k1_num_t *m); -/** Calculate the number of bits in (the absolute value of) a number. */ -static int secp256k1_num_bits(const secp256k1_num_t *a); - -/** Right-shift the passed number by bits bits, and return those bits. */ -static int secp256k1_num_shift(secp256k1_num_t *r, int bits); +/** Right-shift the passed number by bits bits. */ +static void secp256k1_num_shift(secp256k1_num_t *r, int bits); /** Check whether a number is zero. */ static int secp256k1_num_is_zero(const secp256k1_num_t *a); -/** Check whether a number is odd. */ -static int secp256k1_num_is_odd(const secp256k1_num_t *a); - /** Check whether a number is strictly negative. */ static int secp256k1_num_is_neg(const secp256k1_num_t *a); -/** Check whether a particular bit is set in a number. */ -static int secp256k1_num_get_bit(const secp256k1_num_t *a, int pos); - -/** Increase a number by 1. */ -static void secp256k1_num_inc(secp256k1_num_t *r); - -/** Set a number equal to the value of a hex string (unsigned). */ -static void secp256k1_num_set_hex(secp256k1_num_t *r, const char *a, int alen); - -/** Convert (the absolute value of) a number to a hexadecimal string. */ -static void secp256k1_num_get_hex(char *r, int rlen, const secp256k1_num_t *a); - -/** Split a number into a low and high part. */ -static void secp256k1_num_split(secp256k1_num_t *rl, secp256k1_num_t *rh, const secp256k1_num_t *a, int bits); - /** Change a number's sign. */ static void secp256k1_num_negate(secp256k1_num_t *r); -/** Get a bunch of bits from a number. */ -static int secp256k1_num_get_bits(const secp256k1_num_t *a, int offset, int count); +#endif #endif diff --git a/src/secp256k1/src/num_gmp_impl.h b/src/secp256k1/src/num_gmp_impl.h index e45a59e0c..19d474e59 100644 --- a/src/secp256k1/src/num_gmp_impl.h +++ b/src/secp256k1/src/num_gmp_impl.h @@ -22,35 +22,10 @@ static void secp256k1_num_sanity(const secp256k1_num_t *a) { #define secp256k1_num_sanity(a) do { } while(0) #endif -static void secp256k1_num_init(secp256k1_num_t *r) { - r->neg = 0; - r->limbs = 1; - r->data[0] = 0; -} - -static void secp256k1_num_clear(secp256k1_num_t *r) { - memset(r, 0, sizeof(*r)); -} - -static void secp256k1_num_free(secp256k1_num_t *r) { - (void)r; -} - static void secp256k1_num_copy(secp256k1_num_t *r, const secp256k1_num_t *a) { *r = *a; } -static int secp256k1_num_bits(const secp256k1_num_t *a) { - int ret=(a->limbs-1)*GMP_NUMB_BITS; - mp_limb_t x=a->data[a->limbs-1]; - while (x) { - x >>= 1; - ret++; - } - return ret; -} - - static void secp256k1_num_get_bin(unsigned char *r, unsigned int rlen, const secp256k1_num_t *a) { unsigned char tmp[65]; int len = 0; @@ -71,18 +46,16 @@ static void secp256k1_num_set_bin(secp256k1_num_t *r, const unsigned char *a, un VERIFY_CHECK(alen > 0); VERIFY_CHECK(alen <= 64); int len = mpn_set_str(r->data, a, alen, 256); + if (len == 0) { + r->data[0] = 0; + len = 1; + } VERIFY_CHECK(len <= NUM_LIMBS*2); r->limbs = len; r->neg = 0; while (r->limbs > 1 && r->data[r->limbs-1]==0) r->limbs--; } -static void secp256k1_num_set_int(secp256k1_num_t *r, int a) { - r->limbs = 1; - r->neg = (a < 0); - r->data[0] = (a < 0) ? -a : a; -} - static void secp256k1_num_add_abs(secp256k1_num_t *r, const secp256k1_num_t *a, const secp256k1_num_t *b) { mp_limb_t c = mpn_add(r->data, a->data, a->limbs, b->data, b->limbs); r->limbs = a->limbs; @@ -161,10 +134,6 @@ static int secp256k1_num_is_zero(const secp256k1_num_t *a) { return (a->limbs == 1 && a->data[0] == 0); } -static int secp256k1_num_is_odd(const secp256k1_num_t *a) { - return a->data[0] & 1; -} - static int secp256k1_num_is_neg(const secp256k1_num_t *a) { return (a->limbs > 1 || a->data[0] != 0) && a->neg; } @@ -237,140 +206,27 @@ static void secp256k1_num_mul(secp256k1_num_t *r, const secp256k1_num_t *a, cons memset(tmp, 0, sizeof(tmp)); } -static void secp256k1_num_div(secp256k1_num_t *r, const secp256k1_num_t *a, const secp256k1_num_t *b) { - secp256k1_num_sanity(a); - secp256k1_num_sanity(b); - if (b->limbs > a->limbs) { - r->limbs = 1; - r->data[0] = 0; - r->neg = 0; - return; - } - - mp_limb_t quo[2*NUM_LIMBS+1]; - mp_limb_t rem[2*NUM_LIMBS+1]; - mpn_tdiv_qr(quo, rem, 0, a->data, a->limbs, b->data, b->limbs); - mpn_copyi(r->data, quo, a->limbs - b->limbs + 1); - r->limbs = a->limbs - b->limbs + 1; - while (r->limbs > 1 && r->data[r->limbs - 1]==0) r->limbs--; - r->neg = a->neg ^ b->neg; -} - -static void secp256k1_num_mod_mul(secp256k1_num_t *r, const secp256k1_num_t *a, const secp256k1_num_t *b, const secp256k1_num_t *m) { - secp256k1_num_mul(r, a, b); - secp256k1_num_mod(r, m); -} - - -static int secp256k1_num_shift(secp256k1_num_t *r, int bits) { - VERIFY_CHECK(bits <= GMP_NUMB_BITS); - mp_limb_t ret = mpn_rshift(r->data, r->data, r->limbs, bits); - if (r->limbs>1 && r->data[r->limbs-1]==0) r->limbs--; - ret >>= (GMP_NUMB_BITS - bits); - return ret; -} - -static int secp256k1_num_get_bit(const secp256k1_num_t *a, int pos) { - return (a->limbs*GMP_NUMB_BITS > pos) && ((a->data[pos/GMP_NUMB_BITS] >> (pos % GMP_NUMB_BITS)) & 1); -} - -static void secp256k1_num_inc(secp256k1_num_t *r) { - mp_limb_t ret = mpn_add_1(r->data, r->data, r->limbs, (mp_limb_t)1); - if (ret) { - VERIFY_CHECK(r->limbs < 2*NUM_LIMBS); - r->data[r->limbs++] = ret; - } -} - -static void secp256k1_num_set_hex(secp256k1_num_t *r, const char *a, int alen) { - static const unsigned char cvt[256] = { - 0, 0, 0, 0, 0, 0, 0,0,0,0,0,0,0,0,0,0, - 0, 0, 0, 0, 0, 0, 0,0,0,0,0,0,0,0,0,0, - 0, 0, 0, 0, 0, 0, 0,0,0,0,0,0,0,0,0,0, - 0, 1, 2, 3, 4, 5, 6,7,8,9,0,0,0,0,0,0, - 0,10,11,12,13,14,15,0,0,0,0,0,0,0,0,0, - 0, 0, 0, 0, 0, 0, 0,0,0,0,0,0,0,0,0,0, - 0,10,11,12,13,14,15,0,0,0,0,0,0,0,0,0, - 0, 0, 0, 0, 0, 0, 0,0,0,0,0,0,0,0,0,0, - 0, 0, 0, 0, 0, 0, 0,0,0,0,0,0,0,0,0,0, - 0, 0, 0, 0, 0, 0, 0,0,0,0,0,0,0,0,0,0, - 0, 0, 0, 0, 0, 0, 0,0,0,0,0,0,0,0,0,0, - 0, 0, 0, 0, 0, 0, 0,0,0,0,0,0,0,0,0,0, - 0, 0, 0, 0, 0, 0, 0,0,0,0,0,0,0,0,0,0, - 0, 0, 0, 0, 0, 0, 0,0,0,0,0,0,0,0,0,0, - 0, 0, 0, 0, 0, 0, 0,0,0,0,0,0,0,0,0,0, - 0, 0, 0, 0, 0, 0, 0,0,0,0,0,0,0,0,0,0 - }; - unsigned char num[257] = {}; - for (int i=0; ilimbs = mpn_set_str(r->data, num, alen, 16); - r->neg = 0; - while (r->limbs > 1 && r->data[r->limbs-1] == 0) r->limbs--; -} - -static void secp256k1_num_get_hex(char *r, int rlen, const secp256k1_num_t *a) { - static const unsigned char cvt[16] = {'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F'}; - unsigned char *tmp = malloc(257); - mp_size_t len = mpn_get_str(tmp, 16, (mp_limb_t*)a->data, a->limbs); - VERIFY_CHECK(len <= rlen); - for (int i=0; i= 0); - VERIFY_CHECK(rlen-len+i < rlen); - VERIFY_CHECK(tmp[i] < 16); - r[rlen-len+i] = cvt[tmp[i]]; - } - for (int i=0; i= 0); - VERIFY_CHECK(i < rlen); - r[i] = cvt[0]; - } - free(tmp); -} - -static void secp256k1_num_split(secp256k1_num_t *rl, secp256k1_num_t *rh, const secp256k1_num_t *a, int bits) { - VERIFY_CHECK(bits > 0); - rh->neg = a->neg; - if (bits >= a->limbs * GMP_NUMB_BITS) { - *rl = *a; - rh->limbs = 1; - rh->data[0] = 0; - return; - } - rl->limbs = 0; - rl->neg = a->neg; - int left = bits; - while (left >= GMP_NUMB_BITS) { - rl->data[rl->limbs] = a->data[rl->limbs]; - rl->limbs++; - left -= GMP_NUMB_BITS; - } - if (left == 0) { - mpn_copyi(rh->data, a->data + rl->limbs, a->limbs - rl->limbs); - rh->limbs = a->limbs - rl->limbs; - } else { - mpn_rshift(rh->data, a->data + rl->limbs, a->limbs - rl->limbs, left); - rh->limbs = a->limbs - rl->limbs; - while (rh->limbs>1 && rh->data[rh->limbs-1]==0) rh->limbs--; - } - if (left > 0) { - rl->data[rl->limbs] = a->data[rl->limbs] & ((((mp_limb_t)1) << left) - 1); - rl->limbs++; +static void secp256k1_num_shift(secp256k1_num_t *r, int bits) { + if (bits % GMP_NUMB_BITS) { + // Shift within limbs. + mpn_rshift(r->data, r->data, r->limbs, bits % GMP_NUMB_BITS); + } + if (bits >= GMP_NUMB_BITS) { + // Shift full limbs. + for (int i = 0; i < r->limbs; i++) { + int index = i + (bits / GMP_NUMB_BITS); + if (index < r->limbs && index < 2*NUM_LIMBS) { + r->data[i] = r->data[index]; + } else { + r->data[i] = 0; + } + } } - while (rl->limbs>1 && rl->data[rl->limbs-1]==0) rl->limbs--; + while (r->limbs>1 && r->data[r->limbs-1]==0) r->limbs--; } static void secp256k1_num_negate(secp256k1_num_t *r) { r->neg ^= 1; } -static int secp256k1_num_get_bits(const secp256k1_num_t *a, int offset, int count) { - int ret = 0; - for (int i = 0; i < count; i++) { - ret |= ((a->data[(offset + i) / GMP_NUMB_BITS] >> ((offset + i) % GMP_NUMB_BITS)) & 1) << i; - } - return ret; -} - #endif diff --git a/src/secp256k1/src/num_impl.h b/src/secp256k1/src/num_impl.h index f73d3ceea..0b0e3a072 100644 --- a/src/secp256k1/src/num_impl.h +++ b/src/secp256k1/src/num_impl.h @@ -15,6 +15,8 @@ #if defined(USE_NUM_GMP) #include "num_gmp_impl.h" +#elif defined(USE_NUM_NONE) +/* Nothing. */ #else #error "Please select num implementation" #endif diff --git a/src/secp256k1/src/scalar.h b/src/secp256k1/src/scalar.h index 3baacb372..2f5ba0d44 100644 --- a/src/secp256k1/src/scalar.h +++ b/src/secp256k1/src/scalar.h @@ -21,20 +21,32 @@ #error "Please select scalar implementation" #endif +static void secp256k1_scalar_start(void); +static void secp256k1_scalar_stop(void); + /** Clear a scalar to prevent the leak of sensitive data. */ static void secp256k1_scalar_clear(secp256k1_scalar_t *r); -/** Access bits from a scalar. */ -static int secp256k1_scalar_get_bits(const secp256k1_scalar_t *a, int offset, int count); +/** Access bits from a scalar. All requested bits must belong to the same 32-bit limb. */ +static unsigned int secp256k1_scalar_get_bits(const secp256k1_scalar_t *a, unsigned int offset, unsigned int count); + +/** Access bits from a scalar. Not constant time. */ +static unsigned int secp256k1_scalar_get_bits_var(const secp256k1_scalar_t *a, unsigned int offset, unsigned int count); /** Set a scalar from a big endian byte array. */ static void secp256k1_scalar_set_b32(secp256k1_scalar_t *r, const unsigned char *bin, int *overflow); +/** Set a scalar to an unsigned integer. */ +static void secp256k1_scalar_set_int(secp256k1_scalar_t *r, unsigned int v); + /** Convert a scalar to a byte array. */ static void secp256k1_scalar_get_b32(unsigned char *bin, const secp256k1_scalar_t* a); -/** Add two scalars together (modulo the group order). */ -static void secp256k1_scalar_add(secp256k1_scalar_t *r, const secp256k1_scalar_t *a, const secp256k1_scalar_t *b); +/** Add two scalars together (modulo the group order). Returns whether it overflowed. */ +static int secp256k1_scalar_add(secp256k1_scalar_t *r, const secp256k1_scalar_t *a, const secp256k1_scalar_t *b); + +/** Add a power of two to a scalar. The result is not allowed to overflow. */ +static void secp256k1_scalar_add_bit(secp256k1_scalar_t *r, unsigned int bit); /** Multiply two scalars (modulo the group order). */ static void secp256k1_scalar_mul(secp256k1_scalar_t *r, const secp256k1_scalar_t *a, const secp256k1_scalar_t *b); @@ -45,6 +57,9 @@ static void secp256k1_scalar_sqr(secp256k1_scalar_t *r, const secp256k1_scalar_t /** Compute the inverse of a scalar (modulo the group order). */ static void secp256k1_scalar_inverse(secp256k1_scalar_t *r, const secp256k1_scalar_t *a); +/** Compute the inverse of a scalar (modulo the group order), without constant-time guarantee. */ +static void secp256k1_scalar_inverse_var(secp256k1_scalar_t *r, const secp256k1_scalar_t *a); + /** Compute the complement of a scalar (modulo the group order). */ static void secp256k1_scalar_negate(secp256k1_scalar_t *r, const secp256k1_scalar_t *a); @@ -57,7 +72,25 @@ static int secp256k1_scalar_is_one(const secp256k1_scalar_t *a); /** Check whether a scalar is higher than the group order divided by 2. */ static int secp256k1_scalar_is_high(const secp256k1_scalar_t *a); +#ifndef USE_NUM_NONE /** Convert a scalar to a number. */ static void secp256k1_scalar_get_num(secp256k1_num_t *r, const secp256k1_scalar_t *a); +/** Get the order of the group as a number. */ +static void secp256k1_scalar_order_get_num(secp256k1_num_t *r); +#endif + +/** Compare two scalars. */ +static int secp256k1_scalar_eq(const secp256k1_scalar_t *a, const secp256k1_scalar_t *b); + +static void secp256k1_scalar_split_128(secp256k1_scalar_t *r1, secp256k1_scalar_t *r2, const secp256k1_scalar_t *a); + +#ifdef USE_ENDOMORPHISM +/** Find r1 and r2 such that r1+r2*lambda = a, and r1 and r2 are maximum 128 bits long (see secp256k1_gej_mul_lambda). */ +static void secp256k1_scalar_split_lambda_var(secp256k1_scalar_t *r1, secp256k1_scalar_t *r2, const secp256k1_scalar_t *a); +#endif + +/** Multiply a and b (without taking the modulus!), divide by 2**shift, and round to the nearest integer. Shift must be at least 256. */ +static void secp256k1_scalar_mul_shift_var(secp256k1_scalar_t *r, const secp256k1_scalar_t *a, const secp256k1_scalar_t *b, unsigned int shift); + #endif diff --git a/src/secp256k1/src/scalar_4x64_impl.h b/src/secp256k1/src/scalar_4x64_impl.h index f78718234..d14477522 100644 --- a/src/secp256k1/src/scalar_4x64_impl.h +++ b/src/secp256k1/src/scalar_4x64_impl.h @@ -33,9 +33,27 @@ SECP256K1_INLINE static void secp256k1_scalar_clear(secp256k1_scalar_t *r) { r->d[3] = 0; } -SECP256K1_INLINE static int secp256k1_scalar_get_bits(const secp256k1_scalar_t *a, int offset, int count) { - VERIFY_CHECK((offset + count - 1) / 64 == offset / 64); - return (a->d[offset / 64] >> (offset % 64)) & ((((uint64_t)1) << count) - 1); +SECP256K1_INLINE static void secp256k1_scalar_set_int(secp256k1_scalar_t *r, unsigned int v) { + r->d[0] = v; + r->d[1] = 0; + r->d[2] = 0; + r->d[3] = 0; +} + +SECP256K1_INLINE static unsigned int secp256k1_scalar_get_bits(const secp256k1_scalar_t *a, unsigned int offset, unsigned int count) { + VERIFY_CHECK((offset + count - 1) >> 6 == offset >> 6); + return (a->d[offset >> 6] >> (offset & 0x3F)) & ((((uint64_t)1) << count) - 1); +} + +SECP256K1_INLINE static unsigned int secp256k1_scalar_get_bits_var(const secp256k1_scalar_t *a, unsigned int offset, unsigned int count) { + VERIFY_CHECK(count < 32); + VERIFY_CHECK(offset + count <= 256); + if ((offset + count - 1) >> 6 == offset >> 6) { + return secp256k1_scalar_get_bits(a, offset, count); + } else { + VERIFY_CHECK((offset >> 6) + 1 < 4); + return ((a->d[offset >> 6] >> (offset & 0x3F)) | (a->d[(offset >> 6) + 1] << (64 - (offset & 0x3F)))) & ((((uint64_t)1) << count) - 1); + } } SECP256K1_INLINE static int secp256k1_scalar_check_overflow(const secp256k1_scalar_t *a) { @@ -63,7 +81,7 @@ SECP256K1_INLINE static int secp256k1_scalar_reduce(secp256k1_scalar_t *r, unsig return overflow; } -static void secp256k1_scalar_add(secp256k1_scalar_t *r, const secp256k1_scalar_t *a, const secp256k1_scalar_t *b) { +static int secp256k1_scalar_add(secp256k1_scalar_t *r, const secp256k1_scalar_t *a, const secp256k1_scalar_t *b) { uint128_t t = (uint128_t)a->d[0] + b->d[0]; r->d[0] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64; t += (uint128_t)a->d[1] + b->d[1]; @@ -72,7 +90,26 @@ static void secp256k1_scalar_add(secp256k1_scalar_t *r, const secp256k1_scalar_t r->d[2] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64; t += (uint128_t)a->d[3] + b->d[3]; r->d[3] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64; - secp256k1_scalar_reduce(r, t + secp256k1_scalar_check_overflow(r)); + int overflow = t + secp256k1_scalar_check_overflow(r); + VERIFY_CHECK(overflow == 0 || overflow == 1); + secp256k1_scalar_reduce(r, overflow); + return overflow; +} + +static void secp256k1_scalar_add_bit(secp256k1_scalar_t *r, unsigned int bit) { + VERIFY_CHECK(bit < 256); + uint128_t t = (uint128_t)r->d[0] + (((uint64_t)((bit >> 6) == 0)) << (bit & 0x3F)); + r->d[0] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64; + t += (uint128_t)r->d[1] + (((uint64_t)((bit >> 6) == 1)) << (bit & 0x3F)); + r->d[1] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64; + t += (uint128_t)r->d[2] + (((uint64_t)((bit >> 6) == 2)) << (bit & 0x3F)); + r->d[2] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64; + t += (uint128_t)r->d[3] + (((uint64_t)((bit >> 6) == 3)) << (bit & 0x3F)); + r->d[3] = t & 0xFFFFFFFFFFFFFFFFULL; +#ifdef VERIFY + VERIFY_CHECK((t >> 64) == 0); + VERIFY_CHECK(secp256k1_scalar_check_overflow(r) == 0); +#endif } static void secp256k1_scalar_set_b32(secp256k1_scalar_t *r, const unsigned char *b32, int *overflow) { @@ -280,13 +317,11 @@ static void secp256k1_scalar_reduce_512(secp256k1_scalar_t *r, const uint64_t *l secp256k1_scalar_reduce(r, c + secp256k1_scalar_check_overflow(r)); } -static void secp256k1_scalar_mul(secp256k1_scalar_t *r, const secp256k1_scalar_t *a, const secp256k1_scalar_t *b) { +static void secp256k1_scalar_mul_512(uint64_t l[8], const secp256k1_scalar_t *a, const secp256k1_scalar_t *b) { /* 160 bit accumulator. */ uint64_t c0 = 0, c1 = 0; uint32_t c2 = 0; - uint64_t l[8]; - /* l[0..7] = a[0..3] * b[0..3]. */ muladd_fast(a->d[0], b->d[0]); extract_fast(l[0]); @@ -313,17 +348,13 @@ static void secp256k1_scalar_mul(secp256k1_scalar_t *r, const secp256k1_scalar_t extract_fast(l[6]); VERIFY_CHECK(c1 <= 0); l[7] = c0; - - secp256k1_scalar_reduce_512(r, l); } -static void secp256k1_scalar_sqr(secp256k1_scalar_t *r, const secp256k1_scalar_t *a) { +static void secp256k1_scalar_sqr_512(uint64_t l[8], const secp256k1_scalar_t *a) { /* 160 bit accumulator. */ uint64_t c0 = 0, c1 = 0; uint32_t c2 = 0; - uint64_t l[8]; - /* l[0..7] = a[0..3] * b[0..3]. */ muladd_fast(a->d[0], a->d[0]); extract_fast(l[0]); @@ -344,8 +375,6 @@ static void secp256k1_scalar_sqr(secp256k1_scalar_t *r, const secp256k1_scalar_t extract_fast(l[6]); VERIFY_CHECK(c1 == 0); l[7] = c0; - - secp256k1_scalar_reduce_512(r, l); } #undef sumadd @@ -356,4 +385,47 @@ static void secp256k1_scalar_sqr(secp256k1_scalar_t *r, const secp256k1_scalar_t #undef extract #undef extract_fast +static void secp256k1_scalar_mul(secp256k1_scalar_t *r, const secp256k1_scalar_t *a, const secp256k1_scalar_t *b) { + uint64_t l[8]; + secp256k1_scalar_mul_512(l, a, b); + secp256k1_scalar_reduce_512(r, l); +} + +static void secp256k1_scalar_sqr(secp256k1_scalar_t *r, const secp256k1_scalar_t *a) { + uint64_t l[8]; + secp256k1_scalar_sqr_512(l, a); + secp256k1_scalar_reduce_512(r, l); +} + +static void secp256k1_scalar_split_128(secp256k1_scalar_t *r1, secp256k1_scalar_t *r2, const secp256k1_scalar_t *a) { + r1->d[0] = a->d[0]; + r1->d[1] = a->d[1]; + r1->d[2] = 0; + r1->d[3] = 0; + r2->d[0] = a->d[2]; + r2->d[1] = a->d[3]; + r2->d[2] = 0; + r2->d[3] = 0; +} + +SECP256K1_INLINE static int secp256k1_scalar_eq(const secp256k1_scalar_t *a, const secp256k1_scalar_t *b) { + return ((a->d[0] ^ b->d[0]) | (a->d[1] ^ b->d[1]) | (a->d[2] ^ b->d[2]) | (a->d[3] ^ b->d[3])) == 0; +} + +SECP256K1_INLINE static void secp256k1_scalar_mul_shift_var(secp256k1_scalar_t *r, const secp256k1_scalar_t *a, const secp256k1_scalar_t *b, unsigned int shift) { + VERIFY_CHECK(shift >= 256); + uint64_t l[8]; + secp256k1_scalar_mul_512(l, a, b); + unsigned int shiftlimbs = shift >> 6; + unsigned int shiftlow = shift & 0x3F; + unsigned int shifthigh = 64 - shiftlow; + r->d[0] = shift < 512 ? (l[0 + shiftlimbs] >> shiftlow | (shift < 448 && shiftlow ? (l[1 + shiftlimbs] << shifthigh) : 0)) : 0; + r->d[1] = shift < 448 ? (l[1 + shiftlimbs] >> shiftlow | (shift < 384 && shiftlow ? (l[2 + shiftlimbs] << shifthigh) : 0)) : 0; + r->d[2] = shift < 384 ? (l[2 + shiftlimbs] >> shiftlow | (shift < 320 && shiftlow ? (l[3 + shiftlimbs] << shifthigh) : 0)) : 0; + r->d[3] = shift < 320 ? (l[3 + shiftlimbs] >> shiftlow) : 0; + if ((l[(shift - 1) >> 6] >> ((shift - 1) & 0x3f)) & 1) { + secp256k1_scalar_add_bit(r, 0); + } +} + #endif diff --git a/src/secp256k1/src/scalar_8x32_impl.h b/src/secp256k1/src/scalar_8x32_impl.h index e58be1365..915cbcddb 100644 --- a/src/secp256k1/src/scalar_8x32_impl.h +++ b/src/secp256k1/src/scalar_8x32_impl.h @@ -45,9 +45,31 @@ SECP256K1_INLINE static void secp256k1_scalar_clear(secp256k1_scalar_t *r) { r->d[7] = 0; } -SECP256K1_INLINE static int secp256k1_scalar_get_bits(const secp256k1_scalar_t *a, int offset, int count) { - VERIFY_CHECK((offset + count - 1) / 32 == offset / 32); - return (a->d[offset / 32] >> (offset % 32)) & ((1 << count) - 1); +SECP256K1_INLINE static void secp256k1_scalar_set_int(secp256k1_scalar_t *r, unsigned int v) { + r->d[0] = v; + r->d[1] = 0; + r->d[2] = 0; + r->d[3] = 0; + r->d[4] = 0; + r->d[5] = 0; + r->d[6] = 0; + r->d[7] = 0; +} + +SECP256K1_INLINE static unsigned int secp256k1_scalar_get_bits(const secp256k1_scalar_t *a, unsigned int offset, unsigned int count) { + VERIFY_CHECK((offset + count - 1) >> 5 == offset >> 5); + return (a->d[offset >> 5] >> (offset & 0x1F)) & ((1 << count) - 1); +} + +SECP256K1_INLINE static unsigned int secp256k1_scalar_get_bits_var(const secp256k1_scalar_t *a, unsigned int offset, unsigned int count) { + VERIFY_CHECK(count < 32); + VERIFY_CHECK(offset + count <= 256); + if ((offset + count - 1) >> 5 == offset >> 5) { + return secp256k1_scalar_get_bits(a, offset, count); + } else { + VERIFY_CHECK((offset >> 5) + 1 < 8); + return ((a->d[offset >> 5] >> (offset & 0x1F)) | (a->d[(offset >> 5) + 1] << (32 - (offset & 0x1F)))) & ((((uint32_t)1) << count) - 1); + } } SECP256K1_INLINE static int secp256k1_scalar_check_overflow(const secp256k1_scalar_t *a) { @@ -89,7 +111,7 @@ SECP256K1_INLINE static int secp256k1_scalar_reduce(secp256k1_scalar_t *r, uint3 return overflow; } -static void secp256k1_scalar_add(secp256k1_scalar_t *r, const secp256k1_scalar_t *a, const secp256k1_scalar_t *b) { +static int secp256k1_scalar_add(secp256k1_scalar_t *r, const secp256k1_scalar_t *a, const secp256k1_scalar_t *b) { uint64_t t = (uint64_t)a->d[0] + b->d[0]; r->d[0] = t & 0xFFFFFFFFULL; t >>= 32; t += (uint64_t)a->d[1] + b->d[1]; @@ -106,7 +128,34 @@ static void secp256k1_scalar_add(secp256k1_scalar_t *r, const secp256k1_scalar_t r->d[6] = t & 0xFFFFFFFFULL; t >>= 32; t += (uint64_t)a->d[7] + b->d[7]; r->d[7] = t & 0xFFFFFFFFULL; t >>= 32; - secp256k1_scalar_reduce(r, t + secp256k1_scalar_check_overflow(r)); + int overflow = t + secp256k1_scalar_check_overflow(r); + VERIFY_CHECK(overflow == 0 || overflow == 1); + secp256k1_scalar_reduce(r, overflow); + return overflow; +} + +static void secp256k1_scalar_add_bit(secp256k1_scalar_t *r, unsigned int bit) { + VERIFY_CHECK(bit < 256); + uint64_t t = (uint64_t)r->d[0] + (((uint32_t)((bit >> 5) == 0)) << (bit & 0x1F)); + r->d[0] = t & 0xFFFFFFFFULL; t >>= 32; + t += (uint64_t)r->d[1] + (((uint32_t)((bit >> 5) == 1)) << (bit & 0x1F)); + r->d[1] = t & 0xFFFFFFFFULL; t >>= 32; + t += (uint64_t)r->d[2] + (((uint32_t)((bit >> 5) == 2)) << (bit & 0x1F)); + r->d[2] = t & 0xFFFFFFFFULL; t >>= 32; + t += (uint64_t)r->d[3] + (((uint32_t)((bit >> 5) == 3)) << (bit & 0x1F)); + r->d[3] = t & 0xFFFFFFFFULL; t >>= 32; + t += (uint64_t)r->d[4] + (((uint32_t)((bit >> 5) == 4)) << (bit & 0x1F)); + r->d[4] = t & 0xFFFFFFFFULL; t >>= 32; + t += (uint64_t)r->d[5] + (((uint32_t)((bit >> 5) == 5)) << (bit & 0x1F)); + r->d[5] = t & 0xFFFFFFFFULL; t >>= 32; + t += (uint64_t)r->d[6] + (((uint32_t)((bit >> 5) == 6)) << (bit & 0x1F)); + r->d[6] = t & 0xFFFFFFFFULL; t >>= 32; + t += (uint64_t)r->d[7] + (((uint32_t)((bit >> 5) == 7)) << (bit & 0x1F)); + r->d[7] = t & 0xFFFFFFFFULL; +#ifdef VERIFY + VERIFY_CHECK((t >> 32) == 0); + VERIFY_CHECK(secp256k1_scalar_check_overflow(r) == 0); +#endif } static void secp256k1_scalar_set_b32(secp256k1_scalar_t *r, const unsigned char *b32, int *overflow) { @@ -405,12 +454,10 @@ static void secp256k1_scalar_reduce_512(secp256k1_scalar_t *r, const uint32_t *l secp256k1_scalar_reduce(r, c + secp256k1_scalar_check_overflow(r)); } -static void secp256k1_scalar_mul(secp256k1_scalar_t *r, const secp256k1_scalar_t *a, const secp256k1_scalar_t *b) { +static void secp256k1_scalar_mul_512(uint32_t l[16], const secp256k1_scalar_t *a, const secp256k1_scalar_t *b) { /* 96 bit accumulator. */ uint32_t c0 = 0, c1 = 0, c2 = 0; - uint32_t l[16]; - /* l[0..15] = a[0..7] * b[0..7]. */ muladd_fast(a->d[0], b->d[0]); extract_fast(l[0]); @@ -493,16 +540,12 @@ static void secp256k1_scalar_mul(secp256k1_scalar_t *r, const secp256k1_scalar_t extract_fast(l[14]); VERIFY_CHECK(c1 == 0); l[15] = c0; - - secp256k1_scalar_reduce_512(r, l); } -static void secp256k1_scalar_sqr(secp256k1_scalar_t *r, const secp256k1_scalar_t *a) { +static void secp256k1_scalar_sqr_512(uint32_t l[16], const secp256k1_scalar_t *a) { /* 96 bit accumulator. */ uint32_t c0 = 0, c1 = 0, c2 = 0; - uint32_t l[16]; - /* l[0..15] = a[0..7]^2. */ muladd_fast(a->d[0], a->d[0]); extract_fast(l[0]); @@ -557,8 +600,6 @@ static void secp256k1_scalar_sqr(secp256k1_scalar_t *r, const secp256k1_scalar_t extract_fast(l[14]); VERIFY_CHECK(c1 == 0); l[15] = c0; - - secp256k1_scalar_reduce_512(r, l); } #undef sumadd @@ -569,4 +610,59 @@ static void secp256k1_scalar_sqr(secp256k1_scalar_t *r, const secp256k1_scalar_t #undef extract #undef extract_fast +static void secp256k1_scalar_mul(secp256k1_scalar_t *r, const secp256k1_scalar_t *a, const secp256k1_scalar_t *b) { + uint32_t l[16]; + secp256k1_scalar_mul_512(l, a, b); + secp256k1_scalar_reduce_512(r, l); +} + +static void secp256k1_scalar_sqr(secp256k1_scalar_t *r, const secp256k1_scalar_t *a) { + uint32_t l[16]; + secp256k1_scalar_sqr_512(l, a); + secp256k1_scalar_reduce_512(r, l); +} + +static void secp256k1_scalar_split_128(secp256k1_scalar_t *r1, secp256k1_scalar_t *r2, const secp256k1_scalar_t *a) { + r1->d[0] = a->d[0]; + r1->d[1] = a->d[1]; + r1->d[2] = a->d[2]; + r1->d[3] = a->d[3]; + r1->d[4] = 0; + r1->d[5] = 0; + r1->d[6] = 0; + r1->d[7] = 0; + r2->d[0] = a->d[4]; + r2->d[1] = a->d[5]; + r2->d[2] = a->d[6]; + r2->d[3] = a->d[7]; + r2->d[4] = 0; + r2->d[5] = 0; + r2->d[6] = 0; + r2->d[7] = 0; +} + +SECP256K1_INLINE static int secp256k1_scalar_eq(const secp256k1_scalar_t *a, const secp256k1_scalar_t *b) { + return ((a->d[0] ^ b->d[0]) | (a->d[1] ^ b->d[1]) | (a->d[2] ^ b->d[2]) | (a->d[3] ^ b->d[3]) | (a->d[4] ^ b->d[4]) | (a->d[5] ^ b->d[5]) | (a->d[6] ^ b->d[6]) | (a->d[7] ^ b->d[7])) == 0; +} + +SECP256K1_INLINE static void secp256k1_scalar_mul_shift_var(secp256k1_scalar_t *r, const secp256k1_scalar_t *a, const secp256k1_scalar_t *b, unsigned int shift) { + VERIFY_CHECK(shift >= 256); + uint32_t l[16]; + secp256k1_scalar_mul_512(l, a, b); + unsigned int shiftlimbs = shift >> 5; + unsigned int shiftlow = shift & 0x1F; + unsigned int shifthigh = 32 - shiftlow; + r->d[0] = shift < 512 ? (l[0 + shiftlimbs] >> shiftlow | (shift < 480 && shiftlow ? (l[1 + shiftlimbs] << shifthigh) : 0)) : 0; + r->d[1] = shift < 480 ? (l[1 + shiftlimbs] >> shiftlow | (shift < 448 && shiftlow ? (l[2 + shiftlimbs] << shifthigh) : 0)) : 0; + r->d[2] = shift < 448 ? (l[2 + shiftlimbs] >> shiftlow | (shift < 416 && shiftlow ? (l[3 + shiftlimbs] << shifthigh) : 0)) : 0; + r->d[3] = shift < 416 ? (l[3 + shiftlimbs] >> shiftlow | (shift < 384 && shiftlow ? (l[4 + shiftlimbs] << shifthigh) : 0)) : 0; + r->d[4] = shift < 384 ? (l[4 + shiftlimbs] >> shiftlow | (shift < 352 && shiftlow ? (l[5 + shiftlimbs] << shifthigh) : 0)) : 0; + r->d[5] = shift < 352 ? (l[5 + shiftlimbs] >> shiftlow | (shift < 320 && shiftlow ? (l[6 + shiftlimbs] << shifthigh) : 0)) : 0; + r->d[6] = shift < 320 ? (l[6 + shiftlimbs] >> shiftlow | (shift < 288 && shiftlow ? (l[7 + shiftlimbs] << shifthigh) : 0)) : 0; + r->d[7] = shift < 288 ? (l[7 + shiftlimbs] >> shiftlow) : 0; + if ((l[(shift - 1) >> 5] >> ((shift - 1) & 0x1f)) & 1) { + secp256k1_scalar_add_bit(r, 0); + } +} + #endif diff --git a/src/secp256k1/src/scalar_impl.h b/src/secp256k1/src/scalar_impl.h index ddc5061c7..7fc159df7 100644 --- a/src/secp256k1/src/scalar_impl.h +++ b/src/secp256k1/src/scalar_impl.h @@ -9,6 +9,7 @@ #include +#include "group.h" #include "scalar.h" #if defined HAVE_CONFIG_H @@ -23,12 +24,132 @@ #error "Please select scalar implementation" #endif +typedef struct { +#ifndef USE_NUM_NONE + secp256k1_num_t order; +#endif +#ifdef USE_ENDOMORPHISM + secp256k1_scalar_t minus_lambda, minus_b1, minus_b2, g1, g2; +#endif +} secp256k1_scalar_consts_t; + +static const secp256k1_scalar_consts_t *secp256k1_scalar_consts = NULL; + +static void secp256k1_scalar_start(void) { + if (secp256k1_scalar_consts != NULL) + return; + + /* Allocate. */ + secp256k1_scalar_consts_t *ret = (secp256k1_scalar_consts_t*)malloc(sizeof(secp256k1_scalar_consts_t)); + +#ifndef USE_NUM_NONE + static const unsigned char secp256k1_scalar_consts_order[] = { + 0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF, + 0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFE, + 0xBA,0xAE,0xDC,0xE6,0xAF,0x48,0xA0,0x3B, + 0xBF,0xD2,0x5E,0x8C,0xD0,0x36,0x41,0x41 + }; + secp256k1_num_set_bin(&ret->order, secp256k1_scalar_consts_order, sizeof(secp256k1_scalar_consts_order)); +#endif +#ifdef USE_ENDOMORPHISM + /** + * Lambda is a scalar which has the property for secp256k1 that point multiplication by + * it is efficiently computable (see secp256k1_gej_mul_lambda). */ + static const unsigned char secp256k1_scalar_consts_lambda[32] = { + 0x53,0x63,0xad,0x4c,0xc0,0x5c,0x30,0xe0, + 0xa5,0x26,0x1c,0x02,0x88,0x12,0x64,0x5a, + 0x12,0x2e,0x22,0xea,0x20,0x81,0x66,0x78, + 0xdf,0x02,0x96,0x7c,0x1b,0x23,0xbd,0x72 + }; + /** + * "Guide to Elliptic Curve Cryptography" (Hankerson, Menezes, Vanstone) gives an algorithm + * (algorithm 3.74) to find k1 and k2 given k, such that k1 + k2 * lambda == k mod n, and k1 + * and k2 have a small size. + * It relies on constants a1, b1, a2, b2. These constants for the value of lambda above are: + * + * - a1 = {0x30,0x86,0xd2,0x21,0xa7,0xd4,0x6b,0xcd,0xe8,0x6c,0x90,0xe4,0x92,0x84,0xeb,0x15} + * - b1 = -{0xe4,0x43,0x7e,0xd6,0x01,0x0e,0x88,0x28,0x6f,0x54,0x7f,0xa9,0x0a,0xbf,0xe4,0xc3} + * - a2 = {0x01,0x14,0xca,0x50,0xf7,0xa8,0xe2,0xf3,0xf6,0x57,0xc1,0x10,0x8d,0x9d,0x44,0xcf,0xd8} + * - b2 = {0x30,0x86,0xd2,0x21,0xa7,0xd4,0x6b,0xcd,0xe8,0x6c,0x90,0xe4,0x92,0x84,0xeb,0x15} + * + * The algorithm then computes c1 = round(b1 * k / n) and c2 = round(b2 * k / n), and gives + * k1 = k - (c1*a1 + c2*a2) and k2 = -(c1*b1 + c2*b2). Instead, we use modular arithmetic, and + * compute k1 as k - k2 * lambda, avoiding the need for constants a1 and a2. + * + * g1, g2 are precomputed constants used to replace division with a rounded multiplication + * when decomposing the scalar for an endomorphism-based point multiplication. + * + * The possibility of using precomputed estimates is mentioned in "Guide to Elliptic Curve + * Cryptography" (Hankerson, Menezes, Vanstone) in section 3.5. + * + * The derivation is described in the paper "Efficient Software Implementation of Public-Key + * Cryptography on Sensor Networks Using the MSP430X Microcontroller" (Gouvea, Oliveira, Lopez), + * Section 4.3 (here we use a somewhat higher-precision estimate): + * d = a1*b2 - b1*a2 + * g1 = round((2^272)*b2/d) + * g2 = round((2^272)*b1/d) + * + * (Note that 'd' is also equal to the curve order here because [a1,b1] and [a2,b2] are found + * as outputs of the Extended Euclidean Algorithm on inputs 'order' and 'lambda'). + */ + static const unsigned char secp256k1_scalar_consts_minus_b1[32] = { + 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0xe4,0x43,0x7e,0xd6,0x01,0x0e,0x88,0x28, + 0x6f,0x54,0x7f,0xa9,0x0a,0xbf,0xe4,0xc3 + }; + static const unsigned char secp256k1_scalar_consts_b2[32] = { + 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x30,0x86,0xd2,0x21,0xa7,0xd4,0x6b,0xcd, + 0xe8,0x6c,0x90,0xe4,0x92,0x84,0xeb,0x15 + }; + static const unsigned char secp256k1_scalar_consts_g1[32] = { + 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x00,0x00,0x30,0x86, + 0xd2,0x21,0xa7,0xd4,0x6b,0xcd,0xe8,0x6c, + 0x90,0xe4,0x92,0x84,0xeb,0x15,0x3d,0xab + }; + static const unsigned char secp256k1_scalar_consts_g2[32] = { + 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x00,0x00,0xe4,0x43, + 0x7e,0xd6,0x01,0x0e,0x88,0x28,0x6f,0x54, + 0x7f,0xa9,0x0a,0xbf,0xe4,0xc4,0x22,0x12 + }; + + secp256k1_scalar_set_b32(&ret->minus_lambda, secp256k1_scalar_consts_lambda, NULL); + secp256k1_scalar_negate(&ret->minus_lambda, &ret->minus_lambda); + secp256k1_scalar_set_b32(&ret->minus_b1, secp256k1_scalar_consts_minus_b1, NULL); + secp256k1_scalar_set_b32(&ret->minus_b2, secp256k1_scalar_consts_b2, NULL); + secp256k1_scalar_negate(&ret->minus_b2, &ret->minus_b2); + secp256k1_scalar_set_b32(&ret->g1, secp256k1_scalar_consts_g1, NULL); + secp256k1_scalar_set_b32(&ret->g2, secp256k1_scalar_consts_g2, NULL); +#endif + + /* Set the global pointer. */ + secp256k1_scalar_consts = ret; +} + +static void secp256k1_scalar_stop(void) { + if (secp256k1_scalar_consts == NULL) + return; + + secp256k1_scalar_consts_t *c = (secp256k1_scalar_consts_t*)secp256k1_scalar_consts; + secp256k1_scalar_consts = NULL; + free(c); +} + +#ifndef USE_NUM_NONE static void secp256k1_scalar_get_num(secp256k1_num_t *r, const secp256k1_scalar_t *a) { unsigned char c[32]; secp256k1_scalar_get_b32(c, a); secp256k1_num_set_bin(r, c, 32); } +static void secp256k1_scalar_order_get_num(secp256k1_num_t *r) { + *r = secp256k1_scalar_consts->order; +} +#endif static void secp256k1_scalar_inverse(secp256k1_scalar_t *r, const secp256k1_scalar_t *x) { /* First compute x ^ (2^N - 1) for some values of N. */ @@ -181,4 +302,35 @@ static void secp256k1_scalar_inverse(secp256k1_scalar_t *r, const secp256k1_scal secp256k1_scalar_mul(r, t, &x6); /* 111111 */ } +static void secp256k1_scalar_inverse_var(secp256k1_scalar_t *r, const secp256k1_scalar_t *x) { +#if defined(USE_SCALAR_INV_BUILTIN) + secp256k1_scalar_inverse(r, x); +#elif defined(USE_SCALAR_INV_NUM) + unsigned char b[32]; + secp256k1_scalar_get_b32(b, x); + secp256k1_num_t n; + secp256k1_num_set_bin(&n, b, 32); + secp256k1_num_mod_inverse(&n, &n, &secp256k1_scalar_consts->order); + secp256k1_num_get_bin(b, 32, &n); + secp256k1_scalar_set_b32(r, b, NULL); +#else +#error "Please select scalar inverse implementation" +#endif +} + +#ifdef USE_ENDOMORPHISM +static void secp256k1_scalar_split_lambda_var(secp256k1_scalar_t *r1, secp256k1_scalar_t *r2, const secp256k1_scalar_t *a) { + VERIFY_CHECK(r1 != a); + VERIFY_CHECK(r2 != a); + secp256k1_scalar_t c1, c2; + secp256k1_scalar_mul_shift_var(&c1, a, &secp256k1_scalar_consts->g1, 272); + secp256k1_scalar_mul_shift_var(&c2, a, &secp256k1_scalar_consts->g2, 272); + secp256k1_scalar_mul(&c1, &c1, &secp256k1_scalar_consts->minus_b1); + secp256k1_scalar_mul(&c2, &c2, &secp256k1_scalar_consts->minus_b2); + secp256k1_scalar_add(r2, &c1, &c2); + secp256k1_scalar_mul(r1, r2, &secp256k1_scalar_consts->minus_lambda); + secp256k1_scalar_add(r1, r1, a); +} +#endif + #endif diff --git a/src/secp256k1/src/secp256k1.c b/src/secp256k1/src/secp256k1.c index 1ab5b3722..20fc27df7 100644 --- a/src/secp256k1/src/secp256k1.c +++ b/src/secp256k1/src/secp256k1.c @@ -21,6 +21,8 @@ void secp256k1_start(unsigned int flags) { secp256k1_fe_start(); secp256k1_ge_start(); + secp256k1_scalar_start(); + secp256k1_ecdsa_start(); if (flags & SECP256K1_START_SIGN) { secp256k1_ecmult_gen_start(); } @@ -32,6 +34,8 @@ void secp256k1_start(unsigned int flags) { void secp256k1_stop(void) { secp256k1_ecmult_stop(); secp256k1_ecmult_gen_stop(); + secp256k1_ecdsa_stop(); + secp256k1_scalar_stop(); secp256k1_ge_stop(); secp256k1_fe_stop(); } @@ -43,11 +47,13 @@ int secp256k1_ecdsa_verify(const unsigned char *msg, int msglen, const unsigned DEBUG_CHECK(sig != NULL); DEBUG_CHECK(pubkey != NULL); + unsigned char msg32[32] = {0}; + memcpy(msg32 + 32 - msglen, msg, msglen); int ret = -3; - secp256k1_num_t m; + secp256k1_scalar_t m; secp256k1_ecdsa_sig_t s; secp256k1_ge_t q; - secp256k1_num_set_bin(&m, msg, msglen); + secp256k1_scalar_set_b32(&m, msg32, NULL); if (!secp256k1_eckey_pubkey_parse(&q, pubkey, pubkeylen)) { ret = -1; @@ -123,8 +129,8 @@ int secp256k1_ecdsa_sign_compact(const unsigned char *message, int messagelen, u ret = secp256k1_ecdsa_sig_sign(&sig, &sec, &msg, &non, recid); } if (ret) { - secp256k1_num_get_bin(sig64, 32, &sig.r); - secp256k1_num_get_bin(sig64 + 32, 32, &sig.s); + secp256k1_scalar_get_b32(sig64, &sig.r); + secp256k1_scalar_get_b32(sig64 + 32, &sig.s); } secp256k1_scalar_clear(&msg); secp256k1_scalar_clear(&non); @@ -142,11 +148,20 @@ int secp256k1_ecdsa_recover_compact(const unsigned char *msg, int msglen, const DEBUG_CHECK(recid >= 0 && recid <= 3); int ret = 0; - secp256k1_num_t m; + unsigned char msg32[32] = {0}; + memcpy(msg32 + 32 - msglen, msg, msglen); + secp256k1_scalar_t m; secp256k1_ecdsa_sig_t sig; - secp256k1_num_set_bin(&sig.r, sig64, 32); - secp256k1_num_set_bin(&sig.s, sig64 + 32, 32); - secp256k1_num_set_bin(&m, msg, msglen); + int overflow = 0; + secp256k1_scalar_set_b32(&sig.r, sig64, &overflow); + if (overflow) { + return 0; + } + secp256k1_scalar_set_b32(&sig.s, sig64 + 32, &overflow); + if (overflow) { + return 0; + } + secp256k1_scalar_set_b32(&m, msg32, NULL); secp256k1_ge_t q; if (secp256k1_ecdsa_sig_recover(&sig, &q, &m, recid)) { @@ -224,8 +239,12 @@ int secp256k1_ec_pubkey_tweak_add(unsigned char *pubkey, int pubkeylen, const un DEBUG_CHECK(pubkey != NULL); DEBUG_CHECK(tweak != NULL); - secp256k1_num_t term; - secp256k1_num_set_bin(&term, tweak, 32); + secp256k1_scalar_t term; + int overflow = 0; + secp256k1_scalar_set_b32(&term, tweak, &overflow); + if (overflow) { + return 0; + } secp256k1_ge_t p; int ret = secp256k1_eckey_pubkey_parse(&p, pubkey, pubkeylen); if (ret) { @@ -264,8 +283,12 @@ int secp256k1_ec_pubkey_tweak_mul(unsigned char *pubkey, int pubkeylen, const un DEBUG_CHECK(pubkey != NULL); DEBUG_CHECK(tweak != NULL); - secp256k1_num_t factor; - secp256k1_num_set_bin(&factor, tweak, 32); + secp256k1_scalar_t factor; + int overflow = 0; + secp256k1_scalar_set_b32(&factor, tweak, &overflow); + if (overflow) { + return 0; + } secp256k1_ge_t p; int ret = secp256k1_eckey_pubkey_parse(&p, pubkey, pubkeylen); if (ret) { diff --git a/src/secp256k1/src/tests.c b/src/secp256k1/src/tests.c index 5d9b8344d..78cdd67f2 100644 --- a/src/secp256k1/src/tests.c +++ b/src/secp256k1/src/tests.c @@ -23,23 +23,13 @@ static int count = 64; -/***** NUM TESTS *****/ - -void random_num_negate(secp256k1_num_t *num) { - if (secp256k1_rand32() & 1) - secp256k1_num_negate(num); -} - void random_field_element_test(secp256k1_fe_t *fe) { do { unsigned char b32[32]; secp256k1_rand256_test(b32); - secp256k1_num_t num; - secp256k1_num_set_bin(&num, b32, 32); - if (secp256k1_num_cmp(&num, &secp256k1_fe_consts->p) >= 0) - continue; - secp256k1_fe_set_b32(fe, b32); - break; + if (secp256k1_fe_set_b32(fe, b32)) { + break; + } } while(1); } @@ -75,19 +65,6 @@ void random_group_element_jacobian_test(secp256k1_gej_t *gej, const secp256k1_ge gej->infinity = ge->infinity; } -void random_num_order_test(secp256k1_num_t *num) { - do { - unsigned char b32[32]; - secp256k1_rand256_test(b32); - secp256k1_num_set_bin(num, b32, 32); - if (secp256k1_num_is_zero(num)) - continue; - if (secp256k1_num_cmp(num, &secp256k1_ge_consts->order) >= 0) - continue; - break; - } while(1); -} - void random_scalar_order_test(secp256k1_scalar_t *num) { do { unsigned char b32[32]; @@ -100,82 +77,36 @@ void random_scalar_order_test(secp256k1_scalar_t *num) { } while(1); } -void random_num_order(secp256k1_num_t *num) { +void random_scalar_order(secp256k1_scalar_t *num) { do { unsigned char b32[32]; secp256k1_rand256(b32); - secp256k1_num_set_bin(num, b32, 32); - if (secp256k1_num_is_zero(num)) - continue; - if (secp256k1_num_cmp(num, &secp256k1_ge_consts->order) >= 0) + int overflow = 0; + secp256k1_scalar_set_b32(num, b32, &overflow); + if (overflow || secp256k1_scalar_is_zero(num)) continue; break; } while(1); } -void test_num_copy_inc_cmp(void) { - secp256k1_num_t n1,n2; - random_num_order(&n1); - secp256k1_num_copy(&n2, &n1); - CHECK(secp256k1_num_eq(&n1, &n2)); - CHECK(secp256k1_num_eq(&n2, &n1)); - secp256k1_num_inc(&n2); - CHECK(!secp256k1_num_eq(&n1, &n2)); - CHECK(!secp256k1_num_eq(&n2, &n1)); -} - +/***** NUM TESTS *****/ -void test_num_get_set_hex(void) { - secp256k1_num_t n1,n2; - random_num_order_test(&n1); - char c[64]; - secp256k1_num_get_hex(c, 64, &n1); - secp256k1_num_set_hex(&n2, c, 64); - CHECK(secp256k1_num_eq(&n1, &n2)); - for (int i=0; i<64; i++) { - /* check whether the lower 4 bits correspond to the last hex character */ - int low1 = secp256k1_num_shift(&n1, 4); - int lowh = c[63]; - int low2 = ((lowh>>6)*9+(lowh-'0'))&15; - CHECK(low1 == low2); - /* shift bits off the hex representation, and compare */ - memmove(c+1, c, 63); - c[0] = '0'; - secp256k1_num_set_hex(&n2, c, 64); - CHECK(secp256k1_num_eq(&n1, &n2)); - } +#ifndef USE_NUM_NONE +void random_num_negate(secp256k1_num_t *num) { + if (secp256k1_rand32() & 1) + secp256k1_num_negate(num); } -void test_num_get_set_bin(void) { - secp256k1_num_t n1,n2; - random_num_order_test(&n1); - unsigned char c[32]; - secp256k1_num_get_bin(c, 32, &n1); - secp256k1_num_set_bin(&n2, c, 32); - CHECK(secp256k1_num_eq(&n1, &n2)); - for (int i=0; i<32; i++) { - /* check whether the lower 8 bits correspond to the last byte */ - int low1 = secp256k1_num_shift(&n1, 8); - int low2 = c[31]; - CHECK(low1 == low2); - /* shift bits off the byte representation, and compare */ - memmove(c+1, c, 31); - c[0] = 0; - secp256k1_num_set_bin(&n2, c, 32); - CHECK(secp256k1_num_eq(&n1, &n2)); - } +void random_num_order_test(secp256k1_num_t *num) { + secp256k1_scalar_t sc; + random_scalar_order_test(&sc); + secp256k1_scalar_get_num(num, &sc); } -void run_num_int(void) { - secp256k1_num_t n1; - for (int i=-255; i<256; i++) { - unsigned char c1[3] = {}; - c1[2] = abs(i); - unsigned char c2[3] = {0x11,0x22,0x33}; - secp256k1_num_set_int(&n1, i); - secp256k1_num_get_bin(c2, 3, &n1); - CHECK(memcmp(c1, c2, 3) == 0); - } +void random_num_order(secp256k1_num_t *num) { + secp256k1_scalar_t sc; + random_scalar_order(&sc); + secp256k1_scalar_get_num(num, &sc); } void test_num_negate(void) { @@ -229,82 +160,84 @@ void test_num_add_sub(void) { void run_num_smalltests(void) { for (int i=0; i<100*count; i++) { - test_num_copy_inc_cmp(); - test_num_get_set_hex(); - test_num_get_set_bin(); test_num_negate(); test_num_add_sub(); } - run_num_int(); } +#endif /***** SCALAR TESTS *****/ -int secp256k1_scalar_eq(const secp256k1_scalar_t *s1, const secp256k1_scalar_t *s2) { - secp256k1_scalar_t t; - secp256k1_scalar_negate(&t, s2); - secp256k1_scalar_add(&t, &t, s1); - int ret = secp256k1_scalar_is_zero(&t); - return ret; -} - void scalar_test(void) { unsigned char c[32]; /* Set 's' to a random scalar, with value 'snum'. */ - secp256k1_rand256_test(c); secp256k1_scalar_t s; - secp256k1_scalar_set_b32(&s, c, NULL); - secp256k1_num_t snum; - secp256k1_num_set_bin(&snum, c, 32); - secp256k1_num_mod(&snum, &secp256k1_ge_consts->order); + random_scalar_order_test(&s); /* Set 's1' to a random scalar, with value 's1num'. */ - secp256k1_rand256_test(c); secp256k1_scalar_t s1; - secp256k1_scalar_set_b32(&s1, c, NULL); - secp256k1_num_t s1num; - secp256k1_num_set_bin(&s1num, c, 32); - secp256k1_num_mod(&s1num, &secp256k1_ge_consts->order); + random_scalar_order_test(&s1); /* Set 's2' to a random scalar, with value 'snum2', and byte array representation 'c'. */ - secp256k1_rand256_test(c); secp256k1_scalar_t s2; - int overflow = 0; - secp256k1_scalar_set_b32(&s2, c, &overflow); - secp256k1_num_t s2num; - secp256k1_num_set_bin(&s2num, c, 32); - secp256k1_num_mod(&s2num, &secp256k1_ge_consts->order); + random_scalar_order_test(&s2); + secp256k1_scalar_get_b32(c, &s2); + +#ifndef USE_NUM_NONE + secp256k1_num_t snum, s1num, s2num; + secp256k1_scalar_get_num(&snum, &s); + secp256k1_scalar_get_num(&s1num, &s1); + secp256k1_scalar_get_num(&s2num, &s2); + + secp256k1_num_t order; + secp256k1_scalar_order_get_num(&order); + secp256k1_num_t half_order = order; + secp256k1_num_shift(&half_order, 1); +#endif { /* Test that fetching groups of 4 bits from a scalar and recursing n(i)=16*n(i-1)+p(i) reconstructs it. */ - secp256k1_num_t n, t, m; - secp256k1_num_set_int(&n, 0); - secp256k1_num_set_int(&m, 16); + secp256k1_scalar_t n; + secp256k1_scalar_set_int(&n, 0); for (int i = 0; i < 256; i += 4) { - secp256k1_num_set_int(&t, secp256k1_scalar_get_bits(&s, 256 - 4 - i, 4)); - secp256k1_num_mul(&n, &n, &m); - secp256k1_num_add(&n, &n, &t); + secp256k1_scalar_t t; + secp256k1_scalar_set_int(&t, secp256k1_scalar_get_bits(&s, 256 - 4 - i, 4)); + for (int j = 0; j < 4; j++) { + secp256k1_scalar_add(&n, &n, &n); + } + secp256k1_scalar_add(&n, &n, &t); } - CHECK(secp256k1_num_eq(&n, &snum)); + CHECK(secp256k1_scalar_eq(&n, &s)); } { - /* Test that get_b32 returns the same as get_bin on the number. */ - unsigned char r1[32]; - secp256k1_scalar_get_b32(r1, &s2); - unsigned char r2[32]; - secp256k1_num_get_bin(r2, 32, &s2num); - CHECK(memcmp(r1, r2, 32) == 0); - /* If no overflow occurred when assigning, it should also be equal to the original byte array. */ - CHECK((memcmp(r1, c, 32) == 0) == (overflow == 0)); + /* Test that fetching groups of randomly-sized bits from a scalar and recursing n(i)=b*n(i-1)+p(i) reconstructs it. */ + secp256k1_scalar_t n; + secp256k1_scalar_set_int(&n, 0); + int i = 0; + while (i < 256) { + int now = (secp256k1_rand32() % 15) + 1; + if (now + i > 256) { + now = 256 - i; + } + secp256k1_scalar_t t; + secp256k1_scalar_set_int(&t, secp256k1_scalar_get_bits_var(&s, 256 - now - i, now)); + for (int j = 0; j < now; j++) { + secp256k1_scalar_add(&n, &n, &n); + } + secp256k1_scalar_add(&n, &n, &t); + i += now; + } + CHECK(secp256k1_scalar_eq(&n, &s)); } +#ifndef USE_NUM_NONE { /* Test that adding the scalars together is equal to adding their numbers together modulo the order. */ secp256k1_num_t rnum; secp256k1_num_add(&rnum, &snum, &s2num); - secp256k1_num_mod(&rnum, &secp256k1_ge_consts->order); + secp256k1_num_mod(&rnum, &order); secp256k1_scalar_t r; secp256k1_scalar_add(&r, &s, &s2); secp256k1_num_t r2num; @@ -316,7 +249,7 @@ void scalar_test(void) { /* Test that multipying the scalars is equal to multiplying their numbers modulo the order. */ secp256k1_num_t rnum; secp256k1_num_mul(&rnum, &snum, &s2num); - secp256k1_num_mod(&rnum, &secp256k1_ge_consts->order); + secp256k1_num_mod(&rnum, &order); secp256k1_scalar_t r; secp256k1_scalar_mul(&r, &s, &s2); secp256k1_num_t r2num; @@ -333,14 +266,14 @@ void scalar_test(void) { /* Check that comparison with zero matches comparison with zero on the number. */ CHECK(secp256k1_num_is_zero(&snum) == secp256k1_scalar_is_zero(&s)); /* Check that comparison with the half order is equal to testing for high scalar. */ - CHECK(secp256k1_scalar_is_high(&s) == (secp256k1_num_cmp(&snum, &secp256k1_ge_consts->half_order) > 0)); + CHECK(secp256k1_scalar_is_high(&s) == (secp256k1_num_cmp(&snum, &half_order) > 0)); secp256k1_scalar_t neg; secp256k1_scalar_negate(&neg, &s); secp256k1_num_t negnum; - secp256k1_num_sub(&negnum, &secp256k1_ge_consts->order, &snum); - secp256k1_num_mod(&negnum, &secp256k1_ge_consts->order); + secp256k1_num_sub(&negnum, &order, &snum); + secp256k1_num_mod(&negnum, &order); /* Check that comparison with the half order is equal to testing for high scalar after negation. */ - CHECK(secp256k1_scalar_is_high(&neg) == (secp256k1_num_cmp(&negnum, &secp256k1_ge_consts->half_order) > 0)); + CHECK(secp256k1_scalar_is_high(&neg) == (secp256k1_num_cmp(&negnum, &half_order) > 0)); /* Negating should change the high property, unless the value was already zero. */ CHECK((secp256k1_scalar_is_high(&s) == secp256k1_scalar_is_high(&neg)) == secp256k1_scalar_is_zero(&s)); secp256k1_num_t negnum2; @@ -355,16 +288,37 @@ void scalar_test(void) { CHECK(secp256k1_scalar_is_zero(&neg)); } + { + /* Test secp256k1_scalar_mul_shift_var. */ + secp256k1_scalar_t r; + unsigned int shift = 256 + (secp256k1_rand32() % 257); + secp256k1_scalar_mul_shift_var(&r, &s1, &s2, shift); + secp256k1_num_t rnum; + secp256k1_num_mul(&rnum, &s1num, &s2num); + secp256k1_num_shift(&rnum, shift - 1); + secp256k1_num_t one; + unsigned char cone[1] = {0x01}; + secp256k1_num_set_bin(&one, cone, 1); + secp256k1_num_add(&rnum, &rnum, &one); + secp256k1_num_shift(&rnum, 1); + secp256k1_num_t rnum2; + secp256k1_scalar_get_num(&rnum2, &r); + CHECK(secp256k1_num_eq(&rnum, &rnum2)); + } +#endif + { /* Test that scalar inverses are equal to the inverse of their number modulo the order. */ if (!secp256k1_scalar_is_zero(&s)) { secp256k1_scalar_t inv; secp256k1_scalar_inverse(&inv, &s); +#ifndef USE_NUM_NONE secp256k1_num_t invnum; - secp256k1_num_mod_inverse(&invnum, &snum, &secp256k1_ge_consts->order); + secp256k1_num_mod_inverse(&invnum, &snum, &order); secp256k1_num_t invnum2; secp256k1_scalar_get_num(&invnum2, &inv); CHECK(secp256k1_num_eq(&invnum, &invnum2)); +#endif secp256k1_scalar_mul(&inv, &inv, &s); /* Multiplying a scalar with its inverse must result in one. */ CHECK(secp256k1_scalar_is_one(&inv)); @@ -382,6 +336,23 @@ void scalar_test(void) { CHECK(secp256k1_scalar_eq(&r1, &r2)); } + { + /* Test add_bit. */ + int bit = secp256k1_rand32() % 256; + secp256k1_scalar_t b; + secp256k1_scalar_set_int(&b, 1); + CHECK(secp256k1_scalar_is_one(&b)); + for (int i = 0; i < bit; i++) { + secp256k1_scalar_add(&b, &b, &b); + } + secp256k1_scalar_t r1 = s1, r2 = s1; + if (!secp256k1_scalar_add(&r1, &r1, &b)) { + /* No overflow happened. */ + secp256k1_scalar_add_bit(&r2, bit); + CHECK(secp256k1_scalar_eq(&r1, &r2)); + } + } + { /* Test commutativity of mul. */ secp256k1_scalar_t r1, r2; @@ -428,20 +399,49 @@ void scalar_test(void) { secp256k1_scalar_mul(&r2, &s1, &s1); CHECK(secp256k1_scalar_eq(&r1, &r2)); } + } void run_scalar_tests(void) { for (int i = 0; i < 128 * count; i++) { scalar_test(); } + + { + /* (-1)+1 should be zero. */ + secp256k1_scalar_t s, o; + secp256k1_scalar_set_int(&s, 1); + secp256k1_scalar_negate(&o, &s); + secp256k1_scalar_add(&o, &o, &s); + CHECK(secp256k1_scalar_is_zero(&o)); + } + +#ifndef USE_NUM_NONE + { + /* A scalar with value of the curve order should be 0. */ + secp256k1_num_t order; + secp256k1_scalar_order_get_num(&order); + unsigned char bin[32]; + secp256k1_num_get_bin(bin, 32, &order); + secp256k1_scalar_t zero; + int overflow = 0; + secp256k1_scalar_set_b32(&zero, bin, &overflow); + CHECK(overflow == 1); + CHECK(secp256k1_scalar_is_zero(&zero)); + } +#endif } /***** FIELD TESTS *****/ void random_fe(secp256k1_fe_t *x) { unsigned char bin[32]; - secp256k1_rand256(bin); - secp256k1_fe_set_b32(x, bin); + do { + secp256k1_rand256(bin); + if (secp256k1_fe_set_b32(x, bin)) { + return; + } + } while(1); } void random_fe_non_zero(secp256k1_fe_t *nz) { @@ -617,9 +617,17 @@ void gej_equals_gej(const secp256k1_gej_t *a, const secp256k1_gej_t *b) { } void test_ge(void) { + char ca[135]; + char cb[68]; + int rlen; secp256k1_ge_t a, b, i, n; random_group_element_test(&a); random_group_element_test(&b); + rlen = sizeof(ca); + secp256k1_ge_get_hex(ca,&rlen,&a); + CHECK(rlen > 4 && rlen <= (int)sizeof(ca)); + rlen = sizeof(cb); + secp256k1_ge_get_hex(cb,&rlen,&b); /* Intentionally undersized buffer. */ n = a; secp256k1_fe_normalize(&a.y); secp256k1_fe_negate(&n.y, &a.y, 1); @@ -697,39 +705,51 @@ void run_ge(void) { void run_ecmult_chain(void) { /* random starting point A (on the curve) */ - secp256k1_fe_t ax; secp256k1_fe_set_hex(&ax, "8b30bbe9ae2a990696b22f670709dff3727fd8bc04d3362c6c7bf458e2846004", 64); - secp256k1_fe_t ay; secp256k1_fe_set_hex(&ay, "a357ae915c4a65281309edf20504740f0eb3343990216b4f81063cb65f2f7e0f", 64); + secp256k1_fe_t ax; VERIFY_CHECK(secp256k1_fe_set_hex(&ax, "8b30bbe9ae2a990696b22f670709dff3727fd8bc04d3362c6c7bf458e2846004", 64)); + secp256k1_fe_t ay; VERIFY_CHECK(secp256k1_fe_set_hex(&ay, "a357ae915c4a65281309edf20504740f0eb3343990216b4f81063cb65f2f7e0f", 64)); secp256k1_gej_t a; secp256k1_gej_set_xy(&a, &ax, &ay); /* two random initial factors xn and gn */ - secp256k1_num_t xn; - secp256k1_num_set_hex(&xn, "84cc5452f7fde1edb4d38a8ce9b1b84ccef31f146e569be9705d357a42985407", 64); - secp256k1_num_t gn; - secp256k1_num_set_hex(&gn, "a1e58d22553dcd42b23980625d4c57a96e9323d42b3152e5ca2c3990edc7c9de", 64); + static const unsigned char xni[32] = { + 0x84, 0xcc, 0x54, 0x52, 0xf7, 0xfd, 0xe1, 0xed, + 0xb4, 0xd3, 0x8a, 0x8c, 0xe9, 0xb1, 0xb8, 0x4c, + 0xce, 0xf3, 0x1f, 0x14, 0x6e, 0x56, 0x9b, 0xe9, + 0x70, 0x5d, 0x35, 0x7a, 0x42, 0x98, 0x54, 0x07 + }; + secp256k1_scalar_t xn; + secp256k1_scalar_set_b32(&xn, xni, NULL); + static const unsigned char gni[32] = { + 0xa1, 0xe5, 0x8d, 0x22, 0x55, 0x3d, 0xcd, 0x42, + 0xb2, 0x39, 0x80, 0x62, 0x5d, 0x4c, 0x57, 0xa9, + 0x6e, 0x93, 0x23, 0xd4, 0x2b, 0x31, 0x52, 0xe5, + 0xca, 0x2c, 0x39, 0x90, 0xed, 0xc7, 0xc9, 0xde + }; + secp256k1_scalar_t gn; + secp256k1_scalar_set_b32(&gn, gni, NULL); /* two small multipliers to be applied to xn and gn in every iteration: */ - secp256k1_num_t xf; - secp256k1_num_set_hex(&xf, "1337", 4); - secp256k1_num_t gf; - secp256k1_num_set_hex(&gf, "7113", 4); + static const unsigned char xfi[32] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0x13,0x37}; + secp256k1_scalar_t xf; + secp256k1_scalar_set_b32(&xf, xfi, NULL); + static const unsigned char gfi[32] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0x71,0x13}; + secp256k1_scalar_t gf; + secp256k1_scalar_set_b32(&gf, gfi, NULL); /* accumulators with the resulting coefficients to A and G */ - secp256k1_num_t ae; - secp256k1_num_set_int(&ae, 1); - secp256k1_num_t ge; - secp256k1_num_set_int(&ge, 0); + secp256k1_scalar_t ae; + secp256k1_scalar_set_int(&ae, 1); + secp256k1_scalar_t ge; + secp256k1_scalar_set_int(&ge, 0); /* the point being computed */ secp256k1_gej_t x = a; - const secp256k1_num_t *order = &secp256k1_ge_consts->order; for (int i=0; i<200*count; i++) { /* in each iteration, compute X = xn*X + gn*G; */ secp256k1_ecmult(&x, &x, &xn, &gn); /* also compute ae and ge: the actual accumulated factors for A and G */ /* if X was (ae*A+ge*G), xn*X + gn*G results in (xn*ae*A + (xn*ge+gn)*G) */ - secp256k1_num_mod_mul(&ae, &ae, &xn, order); - secp256k1_num_mod_mul(&ge, &ge, &xn, order); - secp256k1_num_add(&ge, &ge, &gn); - secp256k1_num_mod(&ge, order); + secp256k1_scalar_mul(&ae, &ae, &xn); + secp256k1_scalar_mul(&ge, &ge, &xn); + secp256k1_scalar_add(&ge, &ge, &gn); /* modify xn and gn */ - secp256k1_num_mod_mul(&xn, &xn, &xf, order); - secp256k1_num_mod_mul(&gn, &gn, &gf, order); + secp256k1_scalar_mul(&xn, &xn, &xf); + secp256k1_scalar_mul(&gn, &gn, &gf); /* verify */ if (i == 19999) { @@ -749,17 +769,25 @@ void run_ecmult_chain(void) { } void test_point_times_order(const secp256k1_gej_t *point) { - /* multiplying a point by the order results in O */ - const secp256k1_num_t *order = &secp256k1_ge_consts->order; - secp256k1_num_t zero; - secp256k1_num_set_int(&zero, 0); - secp256k1_gej_t res; - secp256k1_ecmult(&res, point, order, order); /* calc res = order * point + order * G; */ - CHECK(secp256k1_gej_is_infinity(&res)); + /* X * (point + G) + (order-X) * (pointer + G) = 0 */ + secp256k1_scalar_t x; + random_scalar_order_test(&x); + secp256k1_scalar_t nx; + secp256k1_scalar_negate(&nx, &x); + secp256k1_gej_t res1, res2; + secp256k1_ecmult(&res1, point, &x, &x); /* calc res1 = x * point + x * G; */ + secp256k1_ecmult(&res2, point, &nx, &nx); /* calc res2 = (order - x) * point + (order - x) * G; */ + secp256k1_gej_add_var(&res1, &res1, &res2); + CHECK(secp256k1_gej_is_infinity(&res1)); + CHECK(secp256k1_gej_is_valid(&res1) == 0); + secp256k1_ge_t res3; + secp256k1_ge_set_gej(&res3, &res1); + CHECK(secp256k1_ge_is_infinity(&res3)); + CHECK(secp256k1_ge_is_valid(&res3) == 0); } void run_point_times_order(void) { - secp256k1_fe_t x; secp256k1_fe_set_hex(&x, "02", 2); + secp256k1_fe_t x; VERIFY_CHECK(secp256k1_fe_set_hex(&x, "02", 2)); for (int i=0; i<500; i++) { secp256k1_ge_t p; if (secp256k1_ge_set_xo(&p, &x, 1)) { @@ -776,15 +804,16 @@ void run_point_times_order(void) { CHECK(strcmp(c, "7603CB59B0EF6C63FE6084792A0C378CDB3233A80F8A9A09A877DEAD31B38C45") == 0); } -void test_wnaf(const secp256k1_num_t *number, int w) { - secp256k1_num_t x, two, t; - secp256k1_num_set_int(&x, 0); - secp256k1_num_set_int(&two, 2); - int wnaf[257]; +void test_wnaf(const secp256k1_scalar_t *number, int w) { + secp256k1_scalar_t x, two, t; + secp256k1_scalar_set_int(&x, 0); + secp256k1_scalar_set_int(&two, 2); + int wnaf[256]; int bits = secp256k1_ecmult_wnaf(wnaf, number, w); + CHECK(bits <= 256); int zeroes = -1; for (int i=bits-1; i>=0; i--) { - secp256k1_num_mul(&x, &x, &two); + secp256k1_scalar_mul(&x, &x, &two); int v = wnaf[i]; if (v) { CHECK(zeroes == -1 || zeroes >= w-1); /* check that distance between non-zero elements is at least w-1 */ @@ -796,18 +825,23 @@ void test_wnaf(const secp256k1_num_t *number, int w) { CHECK(zeroes != -1); /* check that no unnecessary zero padding exists */ zeroes++; } - secp256k1_num_set_int(&t, v); - secp256k1_num_add(&x, &x, &t); + if (v >= 0) { + secp256k1_scalar_set_int(&t, v); + } else { + secp256k1_scalar_set_int(&t, -v); + secp256k1_scalar_negate(&t, &t); + } + secp256k1_scalar_add(&x, &x, &t); } - CHECK(secp256k1_num_eq(&x, number)); /* check that wnaf represents number */ + CHECK(secp256k1_scalar_eq(&x, number)); /* check that wnaf represents number */ } void run_wnaf(void) { - secp256k1_num_t n; + secp256k1_scalar_t n; for (int i=0; i= 0 && recid < 4); + CHECK(secp256k1_ecdsa_sig_verify(&sig, &pub, &msg)); + secp256k1_scalar_t one; + secp256k1_scalar_set_int(&one, 1); + secp256k1_scalar_add(&msg, &msg, &one); + CHECK(!secp256k1_ecdsa_sig_verify(&sig, &pub, &msg)); } void run_ecdsa_sign_verify(void) { @@ -846,11 +884,11 @@ void test_ecdsa_end_to_end(void) { /* Generate a random key and message. */ { - secp256k1_num_t msg, key; - random_num_order_test(&msg); - random_num_order_test(&key); - secp256k1_num_get_bin(privkey, 32, &key); - secp256k1_num_get_bin(message, 32, &msg); + secp256k1_scalar_t msg, key; + random_scalar_order_test(&msg); + random_scalar_order_test(&key); + secp256k1_scalar_get_b32(privkey, &key); + secp256k1_scalar_get_b32(message, &msg); } /* Construct and verify corresponding public key. */ @@ -935,7 +973,8 @@ void run_ecdsa_end_to_end(void) { } } -void test_ecdsa_infinity(void) { +/* Tests several edge cases. */ +void test_ecdsa_edge_cases(void) { const unsigned char msg32[32] = { 'T', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 'v', 'e', 'r', 'y', ' ', 's', @@ -943,8 +982,8 @@ void test_ecdsa_infinity(void) { 's', 's', 'a', 'g', 'e', '.', '.', '.' }; const unsigned char sig64[64] = { - // Generated by signing the above message with nonce 'This is the nonce we will use...' - // and secret key 0 (which is not valid), resulting in recid 0. + /* Generated by signing the above message with nonce 'This is the nonce we will use...' + * and secret key 0 (which is not valid), resulting in recid 0. */ 0x67, 0xCB, 0x28, 0x5F, 0x9C, 0xD1, 0x94, 0xE8, 0x40, 0xD6, 0x29, 0x39, 0x7A, 0xF5, 0x56, 0x96, 0x62, 0xFD, 0xE4, 0x46, 0x49, 0x99, 0x59, 0x63, @@ -960,10 +999,93 @@ void test_ecdsa_infinity(void) { CHECK(secp256k1_ecdsa_recover_compact(msg32, 32, sig64, pubkey, &pubkeylen, 0, 1)); CHECK(!secp256k1_ecdsa_recover_compact(msg32, 32, sig64, pubkey, &pubkeylen, 0, 2)); CHECK(!secp256k1_ecdsa_recover_compact(msg32, 32, sig64, pubkey, &pubkeylen, 0, 3)); + + /* signature (r,s) = (4,4), which can be recovered with all 4 recids. */ + const unsigned char sigb64[64] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + }; + unsigned char pubkeyb[33]; + int pubkeyblen = 33; + for (int recid = 0; recid < 4; recid++) { + /* (4,4) encoded in DER. */ + unsigned char sigbder[8] = {0x30, 0x06, 0x02, 0x01, 0x04, 0x02, 0x01, 0x04}; + /* (order + r,4) encoded in DER. */ + unsigned char sigbderlong[40] = { + 0x30, 0x26, 0x02, 0x21, 0x00, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFE, 0xBA, 0xAE, 0xDC, + 0xE6, 0xAF, 0x48, 0xA0, 0x3B, 0xBF, 0xD2, 0x5E, + 0x8C, 0xD0, 0x36, 0x41, 0x45, 0x02, 0x01, 0x04 + }; + CHECK(secp256k1_ecdsa_recover_compact(msg32, 32, sigb64, pubkeyb, &pubkeyblen, 1, recid)); + CHECK(secp256k1_ecdsa_verify(msg32, 32, sigbder, sizeof(sigbder), pubkeyb, pubkeyblen) == 1); + for (int recid2 = 0; recid2 < 4; recid2++) { + unsigned char pubkey2b[33]; + int pubkey2blen = 33; + CHECK(secp256k1_ecdsa_recover_compact(msg32, 32, sigb64, pubkey2b, &pubkey2blen, 1, recid2)); + /* Verifying with (order + r,4) should always fail. */ + CHECK(secp256k1_ecdsa_verify(msg32, 32, sigbderlong, sizeof(sigbderlong), pubkey2b, pubkey2blen) != 1); + } + /* Damage signature. */ + sigbder[7]++; + CHECK(secp256k1_ecdsa_verify(msg32, 32, sigbder, sizeof(sigbder), pubkeyb, pubkeyblen) == 0); + } + + /* Test the case where ECDSA recomputes a point that is infinity. */ + { + secp256k1_ecdsa_sig_t sig; + secp256k1_scalar_set_int(&sig.s, 1); + secp256k1_scalar_negate(&sig.s, &sig.s); + secp256k1_scalar_inverse(&sig.s, &sig.s); + secp256k1_scalar_set_int(&sig.r, 1); + secp256k1_gej_t keyj; + secp256k1_ecmult_gen(&keyj, &sig.r); + secp256k1_ge_t key; + secp256k1_ge_set_gej(&key, &keyj); + secp256k1_scalar_t msg = sig.s; + CHECK(secp256k1_ecdsa_sig_verify(&sig, &key, &msg) == 0); + } + + /* Test r/s equal to zero */ + { + /* (1,1) encoded in DER. */ + unsigned char sigcder[8] = {0x30, 0x06, 0x02, 0x01, 0x01, 0x02, 0x01, 0x01}; + unsigned char sigc64[64] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + }; + unsigned char pubkeyc[65]; + int pubkeyclen = 65; + CHECK(secp256k1_ecdsa_recover_compact(msg32, 32, sigc64, pubkeyc, &pubkeyclen, 0, 0) == 1); + CHECK(secp256k1_ecdsa_verify(msg32, 32, sigcder, sizeof(sigcder), pubkeyc, pubkeyclen) == 1); + sigcder[4] = 0; + sigc64[31] = 0; + CHECK(secp256k1_ecdsa_recover_compact(msg32, 32, sigc64, pubkeyb, &pubkeyblen, 1, 0) == 0); + CHECK(secp256k1_ecdsa_verify(msg32, 32, sigcder, sizeof(sigcder), pubkeyc, pubkeyclen) == 0); + sigcder[4] = 1; + sigcder[7] = 0; + sigc64[31] = 1; + sigc64[63] = 0; + CHECK(secp256k1_ecdsa_recover_compact(msg32, 32, sigc64, pubkeyb, &pubkeyblen, 1, 0) == 0); + CHECK(secp256k1_ecdsa_verify(msg32, 32, sigcder, sizeof(sigcder), pubkeyc, pubkeyclen) == 0); + } } -void run_ecdsa_infinity(void) { - test_ecdsa_infinity(); +void run_ecdsa_edge_cases(void) { + test_ecdsa_edge_cases(); } #ifdef ENABLE_OPENSSL_TESTS @@ -996,11 +1118,12 @@ void test_ecdsa_openssl(void) { CHECK(ECDSA_sign(0, message, sizeof(message), signature, &sigsize, ec_key)); secp256k1_ecdsa_sig_t sig; CHECK(secp256k1_ecdsa_sig_parse(&sig, signature, sigsize)); - secp256k1_num_t msg_num; - secp256k1_scalar_get_num(&msg_num, &msg); - CHECK(secp256k1_ecdsa_sig_verify(&sig, &q, &msg_num)); - secp256k1_num_inc(&sig.r); - CHECK(!secp256k1_ecdsa_sig_verify(&sig, &q, &msg_num)); + CHECK(secp256k1_ecdsa_sig_verify(&sig, &q, &msg)); + secp256k1_scalar_t one; + secp256k1_scalar_set_int(&one, 1); + secp256k1_scalar_t msg2; + secp256k1_scalar_add(&msg2, &msg, &one); + CHECK(!secp256k1_ecdsa_sig_verify(&sig, &q, &msg2)); random_sign(&sig, &key, &msg, NULL); int secp_sigsize = 80; @@ -1042,8 +1165,19 @@ int main(int argc, char **argv) { /* initialize */ secp256k1_start(SECP256K1_START_SIGN | SECP256K1_START_VERIFY); + /* initializing a second time shouldn't cause any harm or memory leaks. */ + secp256k1_start(SECP256K1_START_SIGN | SECP256K1_START_VERIFY); + + /* Likewise, re-running the internal init functions should be harmless. */ + secp256k1_fe_start(); + secp256k1_ge_start(); + secp256k1_scalar_start(); + secp256k1_ecdsa_start(); + +#ifndef USE_NUM_NONE /* num tests */ run_num_smalltests(); +#endif /* scalar tests */ run_scalar_tests(); @@ -1067,7 +1201,7 @@ int main(int argc, char **argv) { /* ecdsa tests */ run_ecdsa_sign_verify(); run_ecdsa_end_to_end(); - run_ecdsa_infinity(); + run_ecdsa_edge_cases(); #ifdef ENABLE_OPENSSL_TESTS run_ecdsa_openssl(); #endif @@ -1076,5 +1210,14 @@ int main(int argc, char **argv) { /* shutdown */ secp256k1_stop(); + + /* shutting down twice shouldn't cause any double frees. */ + secp256k1_stop(); + + /* Same for the internal shutdown functions. */ + secp256k1_fe_stop(); + secp256k1_ge_stop(); + secp256k1_scalar_stop(); + secp256k1_ecdsa_stop(); return 0; } diff --git a/src/secp256k1/src/util.h b/src/secp256k1/src/util.h index 96b47057c..08b23a9d3 100644 --- a/src/secp256k1/src/util.h +++ b/src/secp256k1/src/util.h @@ -61,4 +61,21 @@ #define VERIFY_CHECK(cond) do { (void)(cond); } while(0) #endif +/* Macro for restrict, when available and not in a VERIFY build. */ +#if defined(SECP256K1_BUILD) && defined(VERIFY) +# define SECP256K1_RESTRICT +#else +# if (!defined(__STDC_VERSION__) || (__STDC_VERSION__ < 199901L) ) +# if SECP256K1_GNUC_PREREQ(3,0) +# define SECP256K1_RESTRICT __restrict__ +# elif (defined(_MSC_VER) && _MSC_VER >= 1400) +# define SECP256K1_RESTRICT __restrict +# else +# define SECP256K1_RESTRICT +# endif +# else +# define SECP256K1_RESTRICT restrict +# endif +#endif + #endif