Wire Sysio Wire Sysion 1.0.0
Loading...
Searching...
No Matches
ecmult_const_impl.h
Go to the documentation of this file.
1/***********************************************************************
2 * Copyright (c) 2015 Pieter Wuille, Andrew Poelstra *
3 * Distributed under the MIT software license, see the accompanying *
4 * file COPYING or https://www.opensource.org/licenses/mit-license.php.*
5 ***********************************************************************/
6
7#ifndef SECP256K1_ECMULT_CONST_IMPL_H
8#define SECP256K1_ECMULT_CONST_IMPL_H
9
10#include "scalar.h"
11#include "group.h"
12#include "ecmult_const.h"
13#include "ecmult_impl.h"
14
21static void secp256k1_ecmult_odd_multiples_table_globalz_windowa(secp256k1_ge *pre, secp256k1_fe *globalz, const secp256k1_gej *a) {
23
24 secp256k1_ecmult_odd_multiples_table(ECMULT_TABLE_SIZE(WINDOW_A), pre, zr, globalz, a);
25 secp256k1_ge_table_set_globalz(ECMULT_TABLE_SIZE(WINDOW_A), pre, zr);
26}
27
28/* This is like `ECMULT_TABLE_GET_GE` but is constant time */
29#define ECMULT_CONST_TABLE_GET_GE(r,pre,n,w) do { \
30 int m = 0; \
31 /* Extract the sign-bit for a constant time absolute-value. */ \
32 int mask = (n) >> (sizeof(n) * CHAR_BIT - 1); \
33 int abs_n = ((n) + mask) ^ mask; \
34 int idx_n = abs_n >> 1; \
35 secp256k1_fe neg_y; \
36 VERIFY_CHECK(((n) & 1) == 1); \
37 VERIFY_CHECK((n) >= -((1 << ((w)-1)) - 1)); \
38 VERIFY_CHECK((n) <= ((1 << ((w)-1)) - 1)); \
39 VERIFY_SETUP(secp256k1_fe_clear(&(r)->x)); \
40 VERIFY_SETUP(secp256k1_fe_clear(&(r)->y)); \
41 /* Unconditionally set r->x = (pre)[m].x. r->y = (pre)[m].y. because it's either the correct one \
42 * or will get replaced in the later iterations, this is needed to make sure `r` is initialized. */ \
43 (r)->x = (pre)[m].x; \
44 (r)->y = (pre)[m].y; \
45 for (m = 1; m < ECMULT_TABLE_SIZE(w); m++) { \
46 /* This loop is used to avoid secret data in array indices. See
47 * the comment in ecmult_gen_impl.h for rationale. */ \
48 secp256k1_fe_cmov(&(r)->x, &(pre)[m].x, m == idx_n); \
49 secp256k1_fe_cmov(&(r)->y, &(pre)[m].y, m == idx_n); \
50 } \
51 (r)->infinity = 0; \
52 secp256k1_fe_negate(&neg_y, &(r)->y, 1); \
53 secp256k1_fe_cmov(&(r)->y, &neg_y, (n) != abs_n); \
54} while(0)
55
69static int secp256k1_wnaf_const(int *wnaf, const secp256k1_scalar *scalar, int w, int size) {
70 int global_sign;
71 int skew;
72 int word = 0;
73
74 /* 1 2 3 */
75 int u_last;
76 int u;
77
78 int flip;
79 secp256k1_scalar s = *scalar;
80
81 VERIFY_CHECK(w > 0);
82 VERIFY_CHECK(size > 0);
83
84 /* Note that we cannot handle even numbers by negating them to be odd, as is
85 * done in other implementations, since if our scalars were specified to have
86 * width < 256 for performance reasons, their negations would have width 256
87 * and we'd lose any performance benefit. Instead, we use a variation of a
88 * technique from Section 4.2 of the Okeya/Tagaki paper, which is to add 1 to the
89 * number we are encoding when it is even, returning a skew value indicating
90 * this, and having the caller compensate after doing the multiplication.
91 *
92 * In fact, we _do_ want to negate numbers to minimize their bit-lengths (and in
93 * particular, to ensure that the outputs from the endomorphism-split fit into
94 * 128 bits). If we negate, the parity of our number flips, affecting whether
95 * we want to add to the scalar to ensure that it's odd. */
96 flip = secp256k1_scalar_is_high(&s);
97 skew = flip ^ secp256k1_scalar_is_even(&s);
98 secp256k1_scalar_cadd_bit(&s, 0, skew);
99 global_sign = secp256k1_scalar_cond_negate(&s, flip);
100
101 /* 4 */
102 u_last = secp256k1_scalar_shr_int(&s, w);
103 do {
104 int even;
105
106 /* 4.1 4.4 */
107 u = secp256k1_scalar_shr_int(&s, w);
108 /* 4.2 */
109 even = ((u & 1) == 0);
110 /* In contrast to the original algorithm, u_last is always > 0 and
111 * therefore we do not need to check its sign. In particular, it's easy
112 * to see that u_last is never < 0 because u is never < 0. Moreover,
113 * u_last is never = 0 because u is never even after a loop
114 * iteration. The same holds analogously for the initial value of
115 * u_last (in the first loop iteration). */
116 VERIFY_CHECK(u_last > 0);
117 VERIFY_CHECK((u_last & 1) == 1);
118 u += even;
119 u_last -= even * (1 << w);
120
121 /* 4.3, adapted for global sign change */
122 wnaf[word++] = u_last * global_sign;
123
124 u_last = u;
125 } while (word * w < size);
126 wnaf[word] = u * global_sign;
127
128 VERIFY_CHECK(secp256k1_scalar_is_zero(&s));
129 VERIFY_CHECK(word == WNAF_SIZE_BITS(size, w));
130 return skew;
131}
132
133static void secp256k1_ecmult_const(secp256k1_gej *r, const secp256k1_ge *a, const secp256k1_scalar *scalar, int size) {
135 secp256k1_ge tmpa;
136 secp256k1_fe Z;
137
138 int skew_1;
140 int wnaf_lam[1 + WNAF_SIZE(WINDOW_A - 1)];
141 int skew_lam;
142 secp256k1_scalar q_1, q_lam;
143 int wnaf_1[1 + WNAF_SIZE(WINDOW_A - 1)];
144
145 int i;
146
147 /* build wnaf representation for q. */
148 int rsize = size;
149 if (size > 128) {
150 rsize = 128;
151 /* split q into q_1 and q_lam (where q = q_1 + q_lam*lambda, and q_1 and q_lam are ~128 bit) */
152 secp256k1_scalar_split_lambda(&q_1, &q_lam, scalar);
153 skew_1 = secp256k1_wnaf_const(wnaf_1, &q_1, WINDOW_A - 1, 128);
154 skew_lam = secp256k1_wnaf_const(wnaf_lam, &q_lam, WINDOW_A - 1, 128);
155 } else
156 {
157 skew_1 = secp256k1_wnaf_const(wnaf_1, scalar, WINDOW_A - 1, size);
158 skew_lam = 0;
159 }
160
161 /* Calculate odd multiples of a.
162 * All multiples are brought to the same Z 'denominator', which is stored
163 * in Z. Due to secp256k1' isomorphism we can do all operations pretending
164 * that the Z coordinate was 1, use affine addition formulae, and correct
165 * the Z coordinate of the result once at the end.
166 */
167 VERIFY_CHECK(!a->infinity);
168 secp256k1_gej_set_ge(r, a);
169 secp256k1_ecmult_odd_multiples_table_globalz_windowa(pre_a, &Z, r);
170 for (i = 0; i < ECMULT_TABLE_SIZE(WINDOW_A); i++) {
171 secp256k1_fe_normalize_weak(&pre_a[i].y);
172 }
173 if (size > 128) {
174 for (i = 0; i < ECMULT_TABLE_SIZE(WINDOW_A); i++) {
175 secp256k1_ge_mul_lambda(&pre_a_lam[i], &pre_a[i]);
176 }
177
178 }
179
180 /* first loop iteration (separated out so we can directly set r, rather
181 * than having it start at infinity, get doubled several times, then have
182 * its new value added to it) */
183 i = wnaf_1[WNAF_SIZE_BITS(rsize, WINDOW_A - 1)];
184 VERIFY_CHECK(i != 0);
185 ECMULT_CONST_TABLE_GET_GE(&tmpa, pre_a, i, WINDOW_A);
186 secp256k1_gej_set_ge(r, &tmpa);
187 if (size > 128) {
188 i = wnaf_lam[WNAF_SIZE_BITS(rsize, WINDOW_A - 1)];
189 VERIFY_CHECK(i != 0);
190 ECMULT_CONST_TABLE_GET_GE(&tmpa, pre_a_lam, i, WINDOW_A);
191 secp256k1_gej_add_ge(r, r, &tmpa);
192 }
193 /* remaining loop iterations */
194 for (i = WNAF_SIZE_BITS(rsize, WINDOW_A - 1) - 1; i >= 0; i--) {
195 int n;
196 int j;
197 for (j = 0; j < WINDOW_A - 1; ++j) {
198 secp256k1_gej_double(r, r);
199 }
200
201 n = wnaf_1[i];
202 ECMULT_CONST_TABLE_GET_GE(&tmpa, pre_a, n, WINDOW_A);
203 VERIFY_CHECK(n != 0);
204 secp256k1_gej_add_ge(r, r, &tmpa);
205 if (size > 128) {
206 n = wnaf_lam[i];
207 ECMULT_CONST_TABLE_GET_GE(&tmpa, pre_a_lam, n, WINDOW_A);
208 VERIFY_CHECK(n != 0);
209 secp256k1_gej_add_ge(r, r, &tmpa);
210 }
211 }
212
213 {
214 /* Correct for wNAF skew */
215 secp256k1_gej tmpj;
216
217 secp256k1_ge_neg(&tmpa, &pre_a[0]);
218 secp256k1_gej_add_ge(&tmpj, r, &tmpa);
219 secp256k1_gej_cmov(r, &tmpj, skew_1);
220
221 if (size > 128) {
222 secp256k1_ge_neg(&tmpa, &pre_a_lam[0]);
223 secp256k1_gej_add_ge(&tmpj, r, &tmpa);
224 secp256k1_gej_cmov(r, &tmpj, skew_lam);
225 }
226 }
227
228 secp256k1_fe_mul(&r->z, &r->z, &Z);
229}
230
231#endif /* SECP256K1_ECMULT_CONST_IMPL_H */
const mie::Vuint & r
Definition bn.cpp:28
#define ECMULT_TABLE_SIZE(w)
Definition ecmult.h:30
#define ECMULT_CONST_TABLE_GET_GE(r, pre, n, w)
#define WNAF_SIZE(w)
Definition ecmult_impl.h:46
#define WINDOW_A
Definition ecmult_impl.h:32
#define WNAF_SIZE_BITS(bits, w)
Definition ecmult_impl.h:45
#define VERIFY_CHECK(cond)
Definition util.h:95
const GenericPointer< typename T::ValueType > T2 T::AllocatorType & a
Definition pointer.h:1181
char * s
uint16_t j