Skip to content

Commit 2896833

Browse files
committed
Implement endomorphism optimization for secp256k1_ecdh_point_multiply
1 parent 7f07b87 commit 2896833

File tree

2 files changed

+147
-13
lines changed

2 files changed

+147
-13
lines changed

src/ecdh_impl.h

Lines changed: 114 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,11 @@
1212
#include "ecdh.h"
1313
#include "ecmult_impl.h"
1414

15-
#define WNAF_BITS 256
15+
#ifdef USE_ENDOMORPHISM
16+
#define WNAF_BITS 128
17+
#else
18+
#define WNAF_BITS 256
19+
#endif
1620
#define WNAF_SIZE(w) ((WNAF_BITS + (w) - 1) / (w))
1721

1822
/** Convert a number to WNAF notation. The number becomes represented by sum(2^{wi} * wnaf[i], i=0..return_val)
@@ -27,17 +31,47 @@
2731
*
2832
* Numbers reference steps of `Algorithm SPA-resistant Width-w NAF with Odd Scalar` on pp. 335
2933
*/
30-
static void secp256k1_ecdh_wnaf(int *wnaf, const secp256k1_scalar_t *a, int w) {
31-
secp256k1_scalar_t s = *a;
32-
/* Negate to force oddness */
33-
int is_even = secp256k1_scalar_is_even(&s);
34-
int global_sign = secp256k1_scalar_wnaf_cond_negate(&s, is_even);
35-
34+
static int secp256k1_ecdh_wnaf(int *wnaf, secp256k1_scalar_t s, int w) {
35+
int global_sign = 1;
36+
int skew = 0;
3637
int word = 0;
3738
/* 1 2 3 */
38-
int u_last = secp256k1_scalar_shr_int(&s, w);
39+
int u_last;
3940
int u;
41+
42+
#ifdef USE_ENDOMORPHISM
43+
/* If we are using the endomorphism, we cannot handle even numbers by negating
44+
* them, since we are working with 128-bit numbers whose negations would be 256
45+
* bits, eliminating the performance advantage. Instead we use a technique from
46+
* Section 4.2 of the Okeya/Tagaki paper, which is to add either 1 (for even)
47+
* or 2 (for odd) to the number we are encoding, then compensating after the
48+
* multiplication. */
49+
/* Negative 128-bit numbers will be negated, since otherwise they are 256-bit */
50+
int flip = secp256k1_scalar_is_high(&s);
51+
/* We add 1 to even numbers, 2 to odd ones, noting that negation flips parity */
52+
int bit = flip ^ (s.d[0] & 1);
53+
/* We check for negative one, since adding 2 to it will cause an overflow */
54+
secp256k1_scalar_t neg_s;
55+
int not_neg_one;
56+
secp256k1_scalar_negate(&neg_s, &s);
57+
not_neg_one = !secp256k1_scalar_is_one(&neg_s);
58+
secp256k1_scalar_cadd_bit(&s, bit, not_neg_one);
59+
/* If we had negative one, flip == 1, s.d[0] == 0, bit == 1, so caller expects
60+
* that we added two to it and flipped it. In fact for -1 these operations are
61+
* identical. We only flipped, but since skewing is required (in the sense that
62+
* the skew must be 1 or 2, never zero) and flipping is not, we need to change
63+
* our flags to claim that we only skewed. */
64+
global_sign = secp256k1_scalar_wnaf_cond_negate(&s, flip);
65+
global_sign *= not_neg_one * 2 - 1;
66+
skew = 1 << bit;
67+
#else
68+
/* Otherwise, we just negate to force oddness */
69+
int is_even = secp256k1_scalar_is_even(&s);
70+
global_sign = secp256k1_scalar_wnaf_cond_negate(&s, is_even);
71+
#endif
72+
4073
/* 4 */
74+
u_last = secp256k1_scalar_shr_int(&s, w);
4175
while (word * w < WNAF_BITS) {
4276
int sign;
4377
int even;
@@ -59,6 +93,7 @@ static void secp256k1_ecdh_wnaf(int *wnaf, const secp256k1_scalar_t *a, int w) {
5993

6094
VERIFY_CHECK(secp256k1_scalar_is_zero(&s));
6195
VERIFY_CHECK(word == WNAF_SIZE(w));
96+
return skew;
6297
}
6398

6499

@@ -67,17 +102,37 @@ static void secp256k1_ecdh_point_multiply(secp256k1_gej_t *r, const secp256k1_ge
67102
secp256k1_ge_t tmpa;
68103
secp256k1_fe_t Z;
69104

105+
#ifdef USE_ENDOMORPHISM
106+
secp256k1_ge_t pre_a_lam[ECMULT_TABLE_SIZE(WINDOW_A)];
107+
int wnaf_1[1 + WNAF_SIZE(WINDOW_A - 1)];
108+
int wnaf_lam[1 + WNAF_SIZE(WINDOW_A - 1)];
109+
int skew_1;
110+
int skew_lam;
111+
secp256k1_scalar_t q_1, q_lam;
112+
#else
70113
int wnaf[1 + WNAF_SIZE(WINDOW_A - 1)];
114+
#endif
71115

72116
int i;
73-
int is_zero = secp256k1_scalar_is_zero(scalar);
74117
secp256k1_scalar_t sc = *scalar;
118+
119+
/* build wnaf representation for q. */
120+
#ifdef USE_ENDOMORPHISM
121+
/* split q into q_1 and q_lam (where q = q_1 + q_lam*lambda, and q_1 and q_lam are ~128 bit) */
122+
secp256k1_scalar_split_lambda(&q_1, &q_lam, &sc);
123+
/* no need for zero correction when using endomorphism since even
124+
* numbers have one added to them anyway */
125+
skew_1 = secp256k1_ecdh_wnaf(wnaf_1, q_1, WINDOW_A - 1);
126+
skew_lam = secp256k1_ecdh_wnaf(wnaf_lam, q_lam, WINDOW_A - 1);
127+
#else
128+
int is_zero = secp256k1_scalar_is_zero(scalar);
75129
/* the wNAF ladder cannot handle zero, so bump this to one .. we will
76130
* correct the result after the fact */
77131
sc.d[0] += is_zero;
132+
VERIFY_CHECK(!secp256k1_scalar_is_zero(&sc));
78133

79-
/* build wnaf representation for q. */
80-
secp256k1_ecdh_wnaf(wnaf, &sc, WINDOW_A - 1);
134+
secp256k1_ecdh_wnaf(wnaf, sc, WINDOW_A - 1);
135+
#endif
81136

82137
/* Calculate odd multiples of a.
83138
* All multiples are brought to the same Z 'denominator', which is stored
@@ -87,6 +142,11 @@ static void secp256k1_ecdh_point_multiply(secp256k1_gej_t *r, const secp256k1_ge
87142
*/
88143
secp256k1_gej_set_ge(r, a);
89144
secp256k1_ecmult_odd_multiples_table_globalz_windowa(pre_a, &Z, r);
145+
#ifdef USE_ENDOMORPHISM
146+
for (i = 0; i < ECMULT_TABLE_SIZE(WINDOW_A); i++) {
147+
secp256k1_ge_mul_lambda(&pre_a_lam[i], &pre_a[i]);
148+
}
149+
#endif
90150
secp256k1_gej_set_infinity(r);
91151

92152
for (i = WNAF_SIZE(WINDOW_A - 1); i >= 0; i--) {
@@ -95,18 +155,61 @@ static void secp256k1_ecdh_point_multiply(secp256k1_gej_t *r, const secp256k1_ge
95155
for (j = 0; j < WINDOW_A - 1; ++j) {
96156
secp256k1_gej_double_var(r, r, NULL);
97157
}
158+
#ifdef USE_ENDOMORPHISM
159+
n = wnaf_1[i];
160+
ECMULT_TABLE_GET_GE(&tmpa, pre_a, n, WINDOW_A);
161+
VERIFY_CHECK(n != 0);
162+
secp256k1_gej_add_ge(r, r, &tmpa);
163+
164+
n = wnaf_lam[i];
165+
ECMULT_TABLE_GET_GE(&tmpa, pre_a_lam, n, WINDOW_A);
166+
VERIFY_CHECK(n != 0);
167+
secp256k1_gej_add_ge(r, r, &tmpa);
168+
#else
98169
n = wnaf[i];
99170
VERIFY_CHECK(n != 0);
100171
ECMULT_TABLE_GET_GE(&tmpa, pre_a, n, WINDOW_A);
101172
secp256k1_gej_add_ge(r, r, &tmpa);
173+
#endif
102174
}
103175

104176
if (!r->infinity) {
105177
secp256k1_fe_mul(&r->z, &r->z, &Z);
106178
}
107179

180+
#ifdef USE_ENDOMORPHISM
181+
{
182+
/* Correct for wNAF skew */
183+
secp256k1_ge_t correction = *a;
184+
secp256k1_ge_storage_t correction_1_stor;
185+
secp256k1_ge_storage_t correction_lam_stor;
186+
secp256k1_ge_storage_t a2_stor;
187+
secp256k1_gej_t tmpj;
188+
secp256k1_gej_set_ge(&tmpj, &correction);
189+
secp256k1_gej_double_var(&tmpj, &tmpj, NULL);
190+
secp256k1_ge_set_gej(&correction, &tmpj);
191+
secp256k1_ge_to_storage(&correction_1_stor, a);
192+
secp256k1_ge_to_storage(&correction_lam_stor, a);
193+
secp256k1_ge_to_storage(&a2_stor, &correction);
194+
195+
/* For odd numbers this is 2a (so replace it), for even ones a (so no-op) */
196+
secp256k1_ge_storage_cmov(&correction_1_stor, &a2_stor, skew_1 == 2);
197+
secp256k1_ge_storage_cmov(&correction_lam_stor, &a2_stor, skew_lam == 2);
198+
199+
/* Apply the correction */
200+
secp256k1_ge_from_storage(&correction, &correction_1_stor);
201+
secp256k1_ge_neg(&correction, &correction);
202+
secp256k1_gej_add_ge(r, r, &correction);
203+
204+
secp256k1_ge_from_storage(&correction, &correction_lam_stor);
205+
secp256k1_ge_neg(&correction, &correction);
206+
secp256k1_ge_mul_lambda(&correction, &correction);
207+
secp256k1_gej_add_ge(r, r, &correction);
208+
}
209+
#else
108210
/* correct for zero */
109211
r->infinity |= is_zero;
212+
#endif
110213
}
111214

112215
#endif

src/tests.c

Lines changed: 33 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1311,10 +1311,26 @@ void ecdh_mult_zero_one(void) {
13111311
ge_equals_ge(&res2, &point);
13121312
}
13131313

1314+
void ecdh_mult_generator(void) {
1315+
secp256k1_gej_t ecdh_out, ecmult_out;
1316+
secp256k1_ge_t out;
1317+
secp256k1_scalar_t x;
1318+
1319+
int i;
1320+
for (i = 0; i < count; ++i) {
1321+
random_scalar_order(&x);
1322+
secp256k1_ecmult_gen(&ctx->ecmult_gen_ctx, &ecmult_out, &x);
1323+
secp256k1_ecdh_point_multiply(&ecdh_out, &secp256k1_ge_const_g, &x);
1324+
secp256k1_ge_set_gej(&out, &ecdh_out);
1325+
ge_equals_gej(&out, &ecmult_out);
1326+
}
1327+
}
1328+
13141329
void run_ecdh_tests(void) {
13151330
ecdh_mult_zero_one();
13161331
ecdh_random_mult();
13171332
ecdh_commutativity();
1333+
ecdh_mult_generator();
13181334
}
13191335

13201336
void run_ecdh_api_tests(void) {
@@ -1538,10 +1554,21 @@ void test_constant_wnaf(const secp256k1_scalar_t *number, int w) {
15381554
secp256k1_scalar_t x, shift;
15391555
int wnaf[256] = {0};
15401556
int i;
1557+
#ifdef USE_ENDOMORPHISM
1558+
int skew;
1559+
#endif
1560+
secp256k1_scalar_t num = *number;
15411561

15421562
secp256k1_scalar_set_int(&x, 0);
15431563
secp256k1_scalar_set_int(&shift, 1 << w);
1544-
secp256k1_ecdh_wnaf(wnaf, number, w);
1564+
/* With USE_ENDOMORPHISM on we only consider 128-bit numbers */
1565+
#ifdef USE_ENDOMORPHISM
1566+
for (i = 0; i < 16; ++i)
1567+
secp256k1_scalar_shr_int(&num, 8);
1568+
skew = secp256k1_ecdh_wnaf(wnaf, num, w);
1569+
#else
1570+
secp256k1_ecdh_wnaf(wnaf, num, w);
1571+
#endif
15451572

15461573
for (i = WNAF_SIZE(w); i >= 0; --i) {
15471574
secp256k1_scalar_t t;
@@ -1560,7 +1587,11 @@ void test_constant_wnaf(const secp256k1_scalar_t *number, int w) {
15601587
}
15611588
secp256k1_scalar_add(&x, &x, &t);
15621589
}
1563-
CHECK(secp256k1_scalar_eq(&x, number));
1590+
#ifdef USE_ENDOMORPHISM
1591+
/* Skew num because when encoding 128-bit numbers as odd we use an offset */
1592+
secp256k1_scalar_cadd_bit(&num, skew == 2, 1);
1593+
#endif
1594+
CHECK(secp256k1_scalar_eq(&x, &num));
15641595
}
15651596

15661597
void run_wnaf(void) {

0 commit comments

Comments
 (0)