7#ifndef SECP256K1_SCALAR_REPR_IMPL_H
8#define SECP256K1_SCALAR_REPR_IMPL_H
13#define SECP256K1_N_0 ((uint32_t)0xD0364141UL)
14#define SECP256K1_N_1 ((uint32_t)0xBFD25E8CUL)
15#define SECP256K1_N_2 ((uint32_t)0xAF48A03BUL)
16#define SECP256K1_N_3 ((uint32_t)0xBAAEDCE6UL)
17#define SECP256K1_N_4 ((uint32_t)0xFFFFFFFEUL)
18#define SECP256K1_N_5 ((uint32_t)0xFFFFFFFFUL)
19#define SECP256K1_N_6 ((uint32_t)0xFFFFFFFFUL)
20#define SECP256K1_N_7 ((uint32_t)0xFFFFFFFFUL)
23#define SECP256K1_N_C_0 (~SECP256K1_N_0 + 1)
24#define SECP256K1_N_C_1 (~SECP256K1_N_1)
25#define SECP256K1_N_C_2 (~SECP256K1_N_2)
26#define SECP256K1_N_C_3 (~SECP256K1_N_3)
27#define SECP256K1_N_C_4 (1)
30#define SECP256K1_N_H_0 ((uint32_t)0x681B20A0UL)
31#define SECP256K1_N_H_1 ((uint32_t)0xDFE92F46UL)
32#define SECP256K1_N_H_2 ((uint32_t)0x57A4501DUL)
33#define SECP256K1_N_H_3 ((uint32_t)0x5D576E73UL)
34#define SECP256K1_N_H_4 ((uint32_t)0xFFFFFFFFUL)
35#define SECP256K1_N_H_5 ((uint32_t)0xFFFFFFFFUL)
36#define SECP256K1_N_H_6 ((uint32_t)0xFFFFFFFFUL)
37#define SECP256K1_N_H_7 ((uint32_t)0x7FFFFFFFUL)
63 return (
a->d[offset >> 5] >> (offset & 0x1F)) & ((1 <<
count) - 1);
69 if ((offset +
count - 1) >> 5 == offset >> 5) {
70 return secp256k1_scalar_get_bits(
a, offset,
count);
73 return ((
a->d[offset >> 5] >> (offset & 0x1F)) | (
a->d[(offset >> 5) + 1] << (32 - (offset & 0x1F)))) & ((((
uint32_t)1) <<
count) - 1);
99 r->d[0] = t & 0xFFFFFFFFUL; t >>= 32;
101 r->d[1] = t & 0xFFFFFFFFUL; t >>= 32;
103 r->d[2] = t & 0xFFFFFFFFUL; t >>= 32;
105 r->d[3] = t & 0xFFFFFFFFUL; t >>= 32;
107 r->d[4] = t & 0xFFFFFFFFUL; t >>= 32;
109 r->d[5] = t & 0xFFFFFFFFUL; t >>= 32;
111 r->d[6] = t & 0xFFFFFFFFUL; t >>= 32;
113 r->d[7] = t & 0xFFFFFFFFUL;
120 r->d[0] = t & 0xFFFFFFFFULL; t >>= 32;
122 r->d[1] = t & 0xFFFFFFFFULL; t >>= 32;
124 r->d[2] = t & 0xFFFFFFFFULL; t >>= 32;
126 r->d[3] = t & 0xFFFFFFFFULL; t >>= 32;
128 r->d[4] = t & 0xFFFFFFFFULL; t >>= 32;
130 r->d[5] = t & 0xFFFFFFFFULL; t >>= 32;
132 r->d[6] = t & 0xFFFFFFFFULL; t >>= 32;
134 r->d[7] = t & 0xFFFFFFFFULL; t >>= 32;
135 overflow = t + secp256k1_scalar_check_overflow(
r);
146 r->d[0] = t & 0xFFFFFFFFULL; t >>= 32;
148 r->d[1] = t & 0xFFFFFFFFULL; t >>= 32;
150 r->d[2] = t & 0xFFFFFFFFULL; t >>= 32;
152 r->d[3] = t & 0xFFFFFFFFULL; t >>= 32;
154 r->d[4] = t & 0xFFFFFFFFULL; t >>= 32;
156 r->d[5] = t & 0xFFFFFFFFULL; t >>= 32;
158 r->d[6] = t & 0xFFFFFFFFULL; t >>= 32;
160 r->d[7] = t & 0xFFFFFFFFULL;
177 over = secp256k1_scalar_reduce(
r, secp256k1_scalar_check_overflow(
r));
183static void secp256k1_scalar_get_b32(
unsigned char *bin,
const secp256k1_scalar*
a) {
184 bin[0] =
a->d[7] >> 24; bin[1] =
a->d[7] >> 16; bin[2] =
a->d[7] >> 8; bin[3] =
a->d[7];
185 bin[4] =
a->d[6] >> 24; bin[5] =
a->d[6] >> 16; bin[6] =
a->d[6] >> 8; bin[7] =
a->d[6];
186 bin[8] =
a->d[5] >> 24; bin[9] =
a->d[5] >> 16; bin[10] =
a->d[5] >> 8; bin[11] =
a->d[5];
187 bin[12] =
a->d[4] >> 24; bin[13] =
a->d[4] >> 16; bin[14] =
a->d[4] >> 8; bin[15] =
a->d[4];
188 bin[16] =
a->d[3] >> 24; bin[17] =
a->d[3] >> 16; bin[18] =
a->d[3] >> 8; bin[19] =
a->d[3];
189 bin[20] =
a->d[2] >> 24; bin[21] =
a->d[2] >> 16; bin[22] =
a->d[2] >> 8; bin[23] =
a->d[2];
190 bin[24] =
a->d[1] >> 24; bin[25] =
a->d[1] >> 16; bin[26] =
a->d[1] >> 8; bin[27] =
a->d[1];
191 bin[28] =
a->d[0] >> 24; bin[29] =
a->d[0] >> 16; bin[30] =
a->d[0] >> 8; bin[31] =
a->d[0];
195 return (
a->d[0] |
a->d[1] |
a->d[2] |
a->d[3] |
a->d[4] |
a->d[5] |
a->d[6] |
a->d[7]) == 0;
199 uint32_t nonzero = 0xFFFFFFFFUL * (secp256k1_scalar_is_zero(
a) == 0);
201 r->d[0] = t & nonzero; t >>= 32;
203 r->d[1] = t & nonzero; t >>= 32;
205 r->d[2] = t & nonzero; t >>= 32;
207 r->d[3] = t & nonzero; t >>= 32;
209 r->d[4] = t & nonzero; t >>= 32;
211 r->d[5] = t & nonzero; t >>= 32;
213 r->d[6] = t & nonzero; t >>= 32;
215 r->d[7] = t & nonzero;
219 return ((
a->d[0] ^ 1) |
a->d[1] |
a->d[2] |
a->d[3] |
a->d[4] |
a->d[5] |
a->d[6] |
a->d[7]) == 0;
244 uint32_t nonzero = 0xFFFFFFFFUL * (secp256k1_scalar_is_zero(
r) == 0);
246 r->d[0] = t & nonzero; t >>= 32;
248 r->d[1] = t & nonzero; t >>= 32;
250 r->d[2] = t & nonzero; t >>= 32;
252 r->d[3] = t & nonzero; t >>= 32;
254 r->d[4] = t & nonzero; t >>= 32;
256 r->d[5] = t & nonzero; t >>= 32;
258 r->d[6] = t & nonzero; t >>= 32;
260 r->d[7] = t & nonzero;
261 return 2 * (mask == 0) - 1;
268#define muladd(a,b) { \
271 uint64_t t = (uint64_t)a * b; \
279 VERIFY_CHECK((c1 >= th) || (c2 != 0)); \
283#define muladd_fast(a,b) { \
286 uint64_t t = (uint64_t)a * b; \
293 VERIFY_CHECK(c1 >= th); \
306#define sumadd_fast(a) { \
309 VERIFY_CHECK((c1 != 0) | (c0 >= (a))); \
310 VERIFY_CHECK(c2 == 0); \
314#define extract(n) { \
322#define extract_fast(n) { \
326 VERIFY_CHECK(c2 == 0); \
331 uint32_t n0 =
l[8], n1 =
l[9], n2 =
l[10], n3 =
l[11], n4 =
l[12], n5 =
l[13], n6 =
l[14], n7 =
l[15];
332 uint32_t m0, m1, m2, m3, m4, m5, m6, m7, m8, m9, m10, m11, m12;
333 uint32_t p0, p1, p2, p3, p4, p5, p6, p7, p8;
340 c0 =
l[0]; c1 = 0; c2 = 0;
405 c0 = m0; c1 = 0; c2 = 0;
451 r->d[0] = c & 0xFFFFFFFFUL; c >>= 32;
453 r->d[1] = c & 0xFFFFFFFFUL; c >>= 32;
455 r->d[2] = c & 0xFFFFFFFFUL; c >>= 32;
457 r->d[3] = c & 0xFFFFFFFFUL; c >>= 32;
459 r->d[4] = c & 0xFFFFFFFFUL; c >>= 32;
461 r->d[5] = c & 0xFFFFFFFFUL; c >>= 32;
463 r->d[6] = c & 0xFFFFFFFFUL; c >>= 32;
465 r->d[7] = c & 0xFFFFFFFFUL; c >>= 32;
468 secp256k1_scalar_reduce(
r, c + secp256k1_scalar_check_overflow(
r));
568 secp256k1_scalar_mul_512(
l,
a, b);
569 secp256k1_scalar_reduce_512(
r,
l);
576 ret =
r->d[0] & ((1 << n) - 1);
577 r->d[0] = (
r->d[0] >> n) + (
r->d[1] << (32 - n));
578 r->d[1] = (
r->d[1] >> n) + (
r->d[2] << (32 - n));
579 r->d[2] = (
r->d[2] >> n) + (
r->d[3] << (32 - n));
580 r->d[3] = (
r->d[3] >> n) + (
r->d[4] << (32 - n));
581 r->d[4] = (
r->d[4] >> n) + (
r->d[5] << (32 - n));
582 r->d[5] = (
r->d[5] >> n) + (
r->d[6] << (32 - n));
583 r->d[6] = (
r->d[6] >> n) + (
r->d[7] << (32 - n));
584 r->d[7] = (
r->d[7] >> n);
608 return ((
a->d[0] ^ b->
d[0]) | (
a->d[1] ^ b->
d[1]) | (
a->d[2] ^ b->
d[2]) | (
a->d[3] ^ b->
d[3]) | (
a->d[4] ^ b->
d[4]) | (
a->d[5] ^ b->
d[5]) | (
a->d[6] ^ b->
d[6]) | (
a->d[7] ^ b->
d[7])) == 0;
613 unsigned int shiftlimbs;
614 unsigned int shiftlow;
615 unsigned int shifthigh;
617 secp256k1_scalar_mul_512(
l,
a, b);
618 shiftlimbs = shift >> 5;
619 shiftlow = shift & 0x1F;
620 shifthigh = 32 - shiftlow;
621 r->d[0] = shift < 512 ? (
l[0 + shiftlimbs] >> shiftlow | (shift < 480 && shiftlow ? (
l[1 + shiftlimbs] << shifthigh) : 0)) : 0;
622 r->d[1] = shift < 480 ? (
l[1 + shiftlimbs] >> shiftlow | (shift < 448 && shiftlow ? (
l[2 + shiftlimbs] << shifthigh) : 0)) : 0;
623 r->d[2] = shift < 448 ? (
l[2 + shiftlimbs] >> shiftlow | (shift < 416 && shiftlow ? (
l[3 + shiftlimbs] << shifthigh) : 0)) : 0;
624 r->d[3] = shift < 416 ? (
l[3 + shiftlimbs] >> shiftlow | (shift < 384 && shiftlow ? (
l[4 + shiftlimbs] << shifthigh) : 0)) : 0;
625 r->d[4] = shift < 384 ? (
l[4 + shiftlimbs] >> shiftlow | (shift < 352 && shiftlow ? (
l[5 + shiftlimbs] << shifthigh) : 0)) : 0;
626 r->d[5] = shift < 352 ? (
l[5 + shiftlimbs] >> shiftlow | (shift < 320 && shiftlow ? (
l[6 + shiftlimbs] << shifthigh) : 0)) : 0;
627 r->d[6] = shift < 320 ? (
l[6 + shiftlimbs] >> shiftlow | (shift < 288 && shiftlow ? (
l[7 + shiftlimbs] << shifthigh) : 0)) : 0;
628 r->d[7] = shift < 288 ? (
l[7 + shiftlimbs] >> shiftlow) : 0;
629 secp256k1_scalar_cadd_bit(
r, 0, (
l[(shift - 1) >> 5] >> ((shift - 1) & 0x1f)) & 1);
637 r->d[0] = (
r->d[0] & mask0) | (
a->d[0] & mask1);
638 r->d[1] = (
r->d[1] & mask0) | (
a->d[1] & mask1);
639 r->d[2] = (
r->d[2] & mask0) | (
a->d[2] & mask1);
640 r->d[3] = (
r->d[3] & mask0) | (
a->d[3] & mask1);
641 r->d[4] = (
r->d[4] & mask0) | (
a->d[4] & mask1);
642 r->d[5] = (
r->d[5] & mask0) | (
a->d[5] & mask1);
643 r->d[6] = (
r->d[6] & mask0) | (
a->d[6] & mask1);
644 r->d[7] = (
r->d[7] & mask0) | (
a->d[7] & mask1);
648 const uint32_t a0 =
a->v[0], a1 =
a->v[1], a2 =
a->v[2], a3 =
a->v[3], a4 =
a->v[4],
649 a5 =
a->v[5], a6 =
a->v[6], a7 =
a->v[7], a8 =
a->v[8];
664 r->d[0] = a0 | a1 << 30;
665 r->d[1] = a1 >> 2 | a2 << 28;
666 r->d[2] = a2 >> 4 | a3 << 26;
667 r->d[3] = a3 >> 6 | a4 << 24;
668 r->d[4] = a4 >> 8 | a5 << 22;
669 r->d[5] = a5 >> 10 | a6 << 20;
670 r->d[6] = a6 >> 12 | a7 << 18;
671 r->d[7] = a7 >> 14 | a8 << 16;
680 const uint32_t a0 =
a->d[0], a1 =
a->d[1], a2 =
a->d[2], a3 =
a->d[3],
681 a4 =
a->d[4], a5 =
a->d[5], a6 =
a->d[6], a7 =
a->d[7];
688 r->v[1] = (a0 >> 30 | a1 << 2) & M30;
689 r->v[2] = (a1 >> 28 | a2 << 4) & M30;
690 r->v[3] = (a2 >> 26 | a3 << 6) & M30;
691 r->v[4] = (a3 >> 24 | a4 << 8) & M30;
692 r->v[5] = (a4 >> 22 | a5 << 10) & M30;
693 r->v[6] = (a5 >> 20 | a6 << 12) & M30;
694 r->v[7] = (a6 >> 18 | a7 << 14) & M30;
699 {{0x10364141L, 0x3F497A33L, 0x348A03BBL, 0x2BB739ABL, -0x146L, 0, 0, 0, 65536}},
706 int zero_in = secp256k1_scalar_is_zero(x);
708 secp256k1_scalar_to_signed30(&
s, x);
709 secp256k1_modinv32(&
s, &secp256k1_const_modinfo_scalar);
710 secp256k1_scalar_from_signed30(
r, &
s);
720 int zero_in = secp256k1_scalar_is_zero(x);
722 secp256k1_scalar_to_signed30(&
s, x);
723 secp256k1_modinv32_var(&
s, &secp256k1_const_modinfo_scalar);
724 secp256k1_scalar_from_signed30(
r, &
s);
732 return !(
a->d[0] & 1);
#define VG_CHECK_VERIFY(x, y)
#define VERIFY_CHECK(cond)
const GenericPointer< typename T::ValueType > T2 T::AllocatorType & a
#define muladd_fast(a, b)
unsigned __int64 uint64_t