7#ifndef SECP256K1_ECMULT_IMPL_H
8#define SECP256K1_ECMULT_IMPL_H
19#if defined(EXHAUSTIVE_TEST_ORDER)
23# if EXHAUSTIVE_TEST_ORDER > 128
25# elif EXHAUSTIVE_TEST_ORDER > 8
45#define WNAF_SIZE_BITS(bits, w) (((bits) + (w) - 1) / (w))
46#define WNAF_SIZE(w) WNAF_SIZE_BITS(WNAF_BITS, w)
49#define PIPPENGER_SCRATCH_OBJECTS 6
50#define STRAUSS_SCRATCH_OBJECTS 5
52#define PIPPENGER_MAX_BUCKET_WINDOW 12
55#define ECMULT_PIPPENGER_THRESHOLD 88
57#define ECMULT_MAX_POINTS_PER_BATCH 5000000
80 secp256k1_gej_double_var(&
d,
a, NULL);
95 secp256k1_ge_set_xy(&d_ge, &
d.x, &
d.y);
96 secp256k1_ge_set_gej_zinv(&pre_a[0],
a, &
d.z);
97 secp256k1_gej_set_ge(&ai, &pre_a[0]);
105 for (i = 1; i < n; i++) {
106 secp256k1_gej_add_ge_var(&ai, &ai, &d_ge, &zr[i]);
107 secp256k1_ge_set_xy(&pre_a[i], &ai.
x, &ai.
y);
114 secp256k1_fe_mul(z, &ai.
z, &
d.z);
117#define SECP256K1_ECMULT_TABLE_VERIFY(n,w) \
118 VERIFY_CHECK(((n) & 1) == 1); \
119 VERIFY_CHECK((n) >= -((1 << ((w)-1)) - 1)); \
120 VERIFY_CHECK((n) <= ((1 << ((w)-1)) - 1));
128 secp256k1_fe_negate(&(
r->y), &(
r->y), 1);
135 secp256k1_ge_set_xy(
r, &x[(n-1)/2], &pre[(n-1)/2].y);
137 secp256k1_ge_set_xy(
r, &x[(-n-1)/2], &pre[(-n-1)/2].y);
138 secp256k1_fe_negate(&(
r->y), &(
r->y), 1);
145 secp256k1_ge_from_storage(
r, &pre[(n-1)/2]);
147 secp256k1_ge_from_storage(
r, &pre[(-n-1)/2]);
148 secp256k1_fe_negate(&(
r->y), &(
r->y), 1);
161 int last_set_bit = -1;
174 if (secp256k1_scalar_get_bits(&
s, 255, 1)) {
175 secp256k1_scalar_negate(&
s, &
s);
182 if (secp256k1_scalar_get_bits(&
s,
bit, 1) == (
unsigned int)carry) {
192 word = secp256k1_scalar_get_bits_var(&
s,
bit, now) + carry;
194 carry = (
word >> (w-1)) & 1;
205 CHECK(secp256k1_scalar_get_bits(&
s,
bit++, 1) == 0);
208 return last_set_bit + 1;
232 int wnaf_ng_128[129];
239 secp256k1_fe_set_int(&Z, 1);
240 for (np = 0; np < num; ++np) {
243 if (secp256k1_scalar_is_zero(&na[np]) || secp256k1_gej_is_infinity(&
a[np])) {
247 secp256k1_scalar_split_lambda(&na_1, &na_lam, &na[np]);
250 state->ps[no].bits_na_1 = secp256k1_ecmult_wnaf(
state->ps[no].wnaf_na_1, 129, &na_1,
WINDOW_A);
251 state->ps[no].bits_na_lam = secp256k1_ecmult_wnaf(
state->ps[no].wnaf_na_lam, 129, &na_lam,
WINDOW_A);
254 if (
state->ps[no].bits_na_1 > bits) {
255 bits =
state->ps[no].bits_na_1;
257 if (
state->ps[no].bits_na_lam > bits) {
258 bits =
state->ps[no].bits_na_lam;
274 secp256k1_fe_normalize_var(&Z);
276 secp256k1_gej_rescale(&tmp, &Z);
287 for (np = 0; np < no; ++np) {
295 secp256k1_scalar_split_128(&ng_1, &ng_128, ng);
298 bits_ng_1 = secp256k1_ecmult_wnaf(wnaf_ng_1, 129, &ng_1,
WINDOW_G);
299 bits_ng_128 = secp256k1_ecmult_wnaf(wnaf_ng_128, 129, &ng_128,
WINDOW_G);
300 if (bits_ng_1 > bits) {
303 if (bits_ng_128 > bits) {
308 secp256k1_gej_set_infinity(
r);
310 for (i = bits - 1; i >= 0; i--) {
312 secp256k1_gej_double_var(
r,
r, NULL);
313 for (np = 0; np < no; ++np) {
314 if (i < state->ps[np].
bits_na_1 && (n =
state->ps[np].wnaf_na_1[i])) {
316 secp256k1_gej_add_ge_var(
r,
r, &tmpa, NULL);
320 secp256k1_gej_add_ge_var(
r,
r, &tmpa, NULL);
323 if (i < bits_ng_1 && (n = wnaf_ng_1[i])) {
325 secp256k1_gej_add_zinv_var(
r,
r, &tmpa, &Z);
327 if (i < bits_ng_128 && (n = wnaf_ng_128[i])) {
329 secp256k1_gej_add_zinv_var(
r,
r, &tmpa, &Z);
334 secp256k1_fe_mul(&
r->z, &
r->z, &Z);
347 secp256k1_ecmult_strauss_wnaf(&
state,
r, 1,
a, na, ng);
350static size_t secp256k1_strauss_scratch_size(
size_t n_points) {
352 return n_points*point_size;
360 const size_t scratch_checkpoint = secp256k1_scratch_checkpoint(error_callback, scratch);
362 secp256k1_gej_set_infinity(
r);
363 if (inp_g_sc == NULL && n_points == 0) {
376 if (points == NULL || scalars == NULL ||
state.aux == NULL ||
state.pre_a == NULL ||
state.ps == NULL) {
377 secp256k1_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint);
381 for (i = 0; i < n_points; i++) {
383 if (!cb(&scalars[i], &
point, i+cb_offset, cbdata)) {
384 secp256k1_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint);
387 secp256k1_gej_set_ge(&points[i], &
point);
389 secp256k1_ecmult_strauss_wnaf(&
state,
r, n_points, points, scalars, inp_g_sc);
390 secp256k1_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint);
396 return secp256k1_ecmult_strauss_batch(error_callback, scratch,
r, inp_g_sc, cb, cbdata, n, 0);
400 return secp256k1_scratch_max_allocation(error_callback, scratch,
STRAUSS_SCRATCH_OBJECTS) / secp256k1_strauss_scratch_size(1);
417 if (secp256k1_scalar_is_zero(
s)) {
418 for (pos = 0; pos <
WNAF_SIZE(w); pos++) {
424 if (secp256k1_scalar_is_even(
s)) {
428 wnaf[0] = secp256k1_scalar_get_bits_var(work, 0, w) + skew;
435 for (pos =
WNAF_SIZE(w) - 1; pos > 0; pos--) {
436 int val = secp256k1_scalar_get_bits_var(work, pos * w, pos ==
WNAF_SIZE(w)-1 ? last_w : w);
445 while (pos <= max_pos) {
446 int val = secp256k1_scalar_get_bits_var(work, pos * w, pos ==
WNAF_SIZE(w)-1 ? last_w : w);
447 if ((val & 1) == 0) {
448 wnaf[pos - 1] -= (1 << w);
449 wnaf[pos] = (val + 1);
458 if (pos >= 2 && ((wnaf[pos - 1] == 1 && wnaf[pos - 2] < 0) || (wnaf[pos - 1] == -1 && wnaf[pos - 2] > 0))) {
459 if (wnaf[pos - 1] == 1) {
460 wnaf[pos - 2] += 1 << w;
462 wnaf[pos - 2] -= 1 << w;
490 size_t n_wnaf =
WNAF_SIZE(bucket_window+1);
496 for (np = 0; np < num; ++np) {
497 if (secp256k1_scalar_is_zero(&sc[np]) || secp256k1_ge_is_infinity(&pt[np])) {
500 state->ps[no].input_pos = np;
501 state->ps[no].skew_na = secp256k1_wnaf_fixed(&
state->wnaf_na[no*n_wnaf], &sc[np], bucket_window+1);
504 secp256k1_gej_set_infinity(
r);
510 for (i = n_wnaf - 1; i >= 0; i--) {
514 secp256k1_gej_set_infinity(&buckets[
j]);
517 for (np = 0; np < no; ++np) {
518 int n =
state->wnaf_na[np*n_wnaf + i];
525 int skew = point_state.
skew_na;
527 secp256k1_ge_neg(&tmp, &pt[point_state.
input_pos]);
528 secp256k1_gej_add_ge_var(&buckets[0], &buckets[0], &tmp, NULL);
533 secp256k1_gej_add_ge_var(&buckets[idx], &buckets[idx], &pt[point_state.
input_pos], NULL);
536 secp256k1_ge_neg(&tmp, &pt[point_state.
input_pos]);
537 secp256k1_gej_add_ge_var(&buckets[idx], &buckets[idx], &tmp, NULL);
541 for(
j = 0;
j < bucket_window;
j++) {
542 secp256k1_gej_double_var(
r,
r, NULL);
545 secp256k1_gej_set_infinity(&running_sum);
555 secp256k1_gej_add_var(&running_sum, &running_sum, &buckets[
j], NULL);
556 secp256k1_gej_add_var(
r,
r, &running_sum, NULL);
559 secp256k1_gej_add_var(&running_sum, &running_sum, &buckets[0], NULL);
560 secp256k1_gej_double_var(
r,
r, NULL);
561 secp256k1_gej_add_var(
r,
r, &running_sum, NULL);
570static int secp256k1_pippenger_bucket_window(
size_t n) {
575 }
else if (n <= 20) {
577 }
else if (n <= 57) {
579 }
else if (n <= 136) {
581 }
else if (n <= 235) {
583 }
else if (n <= 1260) {
585 }
else if (n <= 4420) {
587 }
else if (n <= 7880) {
589 }
else if (n <= 16050) {
599static size_t secp256k1_pippenger_bucket_window_inv(
int bucket_window) {
600 switch(bucket_window) {
610 case 10:
return 7880;
611 case 11:
return 16050;
620 secp256k1_scalar_split_lambda(s1, s2, &tmp);
621 secp256k1_ge_mul_lambda(p2, p1);
623 if (secp256k1_scalar_is_high(s1)) {
624 secp256k1_scalar_negate(s1, s1);
625 secp256k1_ge_neg(p1, p1);
627 if (secp256k1_scalar_is_high(s2)) {
628 secp256k1_scalar_negate(s2, s2);
629 secp256k1_ge_neg(p2, p2);
637static size_t secp256k1_pippenger_scratch_size(
size_t n_points,
int bucket_window) {
638 size_t entries = 2*n_points + 2;
644 const size_t scratch_checkpoint = secp256k1_scratch_checkpoint(error_callback, scratch);
648 size_t entries = 2*n_points + 2;
654 size_t point_idx = 0;
658 secp256k1_gej_set_infinity(
r);
659 if (inp_g_sc == NULL && n_points == 0) {
662 bucket_window = secp256k1_pippenger_bucket_window(n_points);
668 points = (
secp256k1_ge *) secp256k1_scratch_alloc(error_callback, scratch, entries *
sizeof(*points));
669 scalars = (
secp256k1_scalar *) secp256k1_scratch_alloc(error_callback, scratch, entries *
sizeof(*scalars));
670 state_space = (
struct secp256k1_pippenger_state *) secp256k1_scratch_alloc(error_callback, scratch,
sizeof(*state_space));
671 if (points == NULL || scalars == NULL || state_space == NULL) {
672 secp256k1_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint);
676 state_space->
wnaf_na = (
int *) secp256k1_scratch_alloc(error_callback, scratch, entries*(
WNAF_SIZE(bucket_window+1)) *
sizeof(
int));
677 buckets = (
secp256k1_gej *) secp256k1_scratch_alloc(error_callback, scratch, (1<<bucket_window) *
sizeof(*buckets));
678 if (state_space->
ps == NULL || state_space->
wnaf_na == NULL || buckets == NULL) {
679 secp256k1_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint);
683 if (inp_g_sc != NULL) {
684 scalars[0] = *inp_g_sc;
685 points[0] = secp256k1_ge_const_g;
687 secp256k1_ecmult_endo_split(&scalars[0], &scalars[1], &points[0], &points[1]);
691 while (point_idx < n_points) {
692 if (!cb(&scalars[idx], &points[idx], point_idx + cb_offset, cbdata)) {
693 secp256k1_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint);
697 secp256k1_ecmult_endo_split(&scalars[idx - 1], &scalars[idx], &points[idx - 1], &points[idx]);
702 secp256k1_ecmult_pippenger_wnaf(buckets, bucket_window, state_space,
r, scalars, points, idx);
705 for(i = 0; (size_t)i < idx; i++) {
706 secp256k1_scalar_clear(&scalars[i]);
707 state_space->
ps[i].skew_na = 0;
712 for(i = 0; i < 1<<bucket_window; i++) {
713 secp256k1_gej_clear(&buckets[i]);
715 secp256k1_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint);
721 return secp256k1_ecmult_pippenger_batch(error_callback, scratch,
r, inp_g_sc, cb, cbdata, n, 0);
736 size_t max_points = secp256k1_pippenger_bucket_window_inv(bucket_window);
737 size_t space_for_points;
738 size_t space_overhead;
741 entry_size = 2*entry_size;
743 if (space_overhead > max_alloc) {
746 space_for_points = max_alloc - space_overhead;
748 n_points = space_for_points/entry_size;
749 n_points = n_points > max_points ? max_points : n_points;
750 if (n_points > res) {
753 if (n_points < max_points) {
770 secp256k1_scalar_set_int(&szero, 0);
771 secp256k1_gej_set_infinity(
r);
772 secp256k1_gej_set_infinity(&tmpj);
774 secp256k1_ecmult(
r, &tmpj, &szero, inp_g_sc);
775 for (point_idx = 0; point_idx < n_points; point_idx++) {
779 if (!cb(&scalar, &
point, point_idx, cbdata)) {
783 secp256k1_gej_set_ge(&pointj, &
point);
784 secp256k1_ecmult(&tmpj, &pointj, &scalar, NULL);
785 secp256k1_gej_add_var(
r,
r, &tmpj, NULL);
792static int secp256k1_ecmult_multi_batch_size_helper(
size_t *n_batches,
size_t *n_batch_points,
size_t max_n_batch_points,
size_t n) {
793 if (max_n_batch_points == 0) {
805 *n_batches = 1 + (n - 1) / max_n_batch_points;
806 *n_batch_points = 1 + (n - 1) / *n_batches;
816 size_t n_batch_points;
818 secp256k1_gej_set_infinity(
r);
819 if (inp_g_sc == NULL && n == 0) {
823 secp256k1_scalar_set_int(&szero, 0);
824 secp256k1_ecmult(
r,
r, &szero, inp_g_sc);
827 if (scratch == NULL) {
828 return secp256k1_ecmult_multi_simple_var(
r, inp_g_sc, cb, cbdata, n);
835 if (!secp256k1_ecmult_multi_batch_size_helper(&n_batches, &n_batch_points, secp256k1_pippenger_max_points(error_callback, scratch), n)) {
836 return secp256k1_ecmult_multi_simple_var(
r, inp_g_sc, cb, cbdata, n);
839 f = secp256k1_ecmult_pippenger_batch;
841 if (!secp256k1_ecmult_multi_batch_size_helper(&n_batches, &n_batch_points, secp256k1_strauss_max_points(error_callback, scratch), n)) {
842 return secp256k1_ecmult_multi_simple_var(
r, inp_g_sc, cb, cbdata, n);
844 f = secp256k1_ecmult_strauss_batch;
846 for(i = 0; i < n_batches; i++) {
847 size_t nbp = n < n_batch_points ? n : n_batch_points;
848 size_t offset = n_batch_points*i;
850 if (!
f(error_callback, scratch, &tmp, i == 0 ? inp_g_sc : NULL, cb, cbdata, nbp, offset)) {
853 secp256k1_gej_add_var(
r,
r, &tmp, NULL);
int secp256k1_ecmult_multi_callback(secp256k1_scalar *sc, secp256k1_ge *pt, size_t idx, void *data)
#define ECMULT_TABLE_SIZE(w)
#define STRAUSS_SCRATCH_OBJECTS
#define SECP256K1_ECMULT_TABLE_VERIFY(n, w)
#define ECMULT_PIPPENGER_THRESHOLD
#define ECMULT_MAX_POINTS_PER_BATCH
#define PIPPENGER_MAX_BUCKET_WINDOW
#define PIPPENGER_SCRATCH_OBJECTS
int(* secp256k1_ecmult_multi_func)(const secp256k1_callback *error_callback, secp256k1_scratch *, secp256k1_gej *, const secp256k1_scalar *, secp256k1_ecmult_multi_callback cb, void *, size_t)
#define VERIFY_CHECK(cond)
static const AddressFrame word(16)
const GenericPointer< typename T::ValueType > T2 T::AllocatorType & a
const secp256k1_ge_storage secp256k1_pre_g_128[ECMULT_TABLE_SIZE(WINDOW_G)]
const secp256k1_ge_storage secp256k1_pre_g[ECMULT_TABLE_SIZE(WINDOW_G)]
struct secp256k1_pippenger_point_state * ps
struct secp256k1_strauss_point_state * ps
memset(pInfo->slotDescription, ' ', 64)
c_gkp_out sizeof(template))