7#ifndef SECP256K1_FIELD_REPR_IMPL_H
8#define SECP256K1_FIELD_REPR_IMPL_H
26 int m =
a->normalized ? 1 : 2 *
a->magnitude,
r = 1;
27 r &= (
d[0] <= 0x3FFFFFFUL * m);
28 r &= (
d[1] <= 0x3FFFFFFUL * m);
29 r &= (
d[2] <= 0x3FFFFFFUL * m);
30 r &= (
d[3] <= 0x3FFFFFFUL * m);
31 r &= (
d[4] <= 0x3FFFFFFUL * m);
32 r &= (
d[5] <= 0x3FFFFFFUL * m);
33 r &= (
d[6] <= 0x3FFFFFFUL * m);
34 r &= (
d[7] <= 0x3FFFFFFUL * m);
35 r &= (
d[8] <= 0x3FFFFFFUL * m);
36 r &= (
d[9] <= 0x03FFFFFUL * m);
37 r &= (
a->magnitude >= 0);
38 r &= (
a->magnitude <= 32);
40 r &= (
a->magnitude <= 1);
41 if (
r && (d[9] == 0x03FFFFFUL)) {
43 if (mid == 0x3FFFFFFUL) {
44 r &= ((
d[1] + 0x40UL + ((
d[0] + 0x3D1UL) >> 26)) <= 0x3FFFFFFUL);
55 r->n[0] = 0x3FFFFFFUL * 2 * m;
56 r->n[1] = 0x3FFFFFFUL * 2 * m;
57 r->n[2] = 0x3FFFFFFUL * 2 * m;
58 r->n[3] = 0x3FFFFFFUL * 2 * m;
59 r->n[4] = 0x3FFFFFFUL * 2 * m;
60 r->n[5] = 0x3FFFFFFUL * 2 * m;
61 r->n[6] = 0x3FFFFFFUL * 2 * m;
62 r->n[7] = 0x3FFFFFFUL * 2 * m;
63 r->n[8] = 0x3FFFFFFUL * 2 * m;
64 r->n[9] = 0x03FFFFFUL * 2 * m;
67 r->normalized = (m == 0);
68 secp256k1_fe_verify(
r);
73 uint32_t t0 =
r->n[0], t1 =
r->n[1], t2 =
r->n[2], t3 =
r->n[3], t4 =
r->n[4],
74 t5 =
r->n[5], t6 =
r->n[6], t7 =
r->n[7], t8 =
r->n[8], t9 =
r->n[9];
78 uint32_t x = t9 >> 22; t9 &= 0x03FFFFFUL;
81 t0 += x * 0x3D1UL; t1 += (x << 6);
82 t1 += (t0 >> 26); t0 &= 0x3FFFFFFUL;
83 t2 += (t1 >> 26); t1 &= 0x3FFFFFFUL;
84 t3 += (t2 >> 26); t2 &= 0x3FFFFFFUL; m = t2;
85 t4 += (t3 >> 26); t3 &= 0x3FFFFFFUL; m &= t3;
86 t5 += (t4 >> 26); t4 &= 0x3FFFFFFUL; m &= t4;
87 t6 += (t5 >> 26); t5 &= 0x3FFFFFFUL; m &= t5;
88 t7 += (t6 >> 26); t6 &= 0x3FFFFFFUL; m &= t6;
89 t8 += (t7 >> 26); t7 &= 0x3FFFFFFUL; m &= t7;
90 t9 += (t8 >> 26); t8 &= 0x3FFFFFFUL; m &= t8;
96 x = (t9 >> 22) | ((t9 == 0x03FFFFFUL) & (m == 0x3FFFFFFUL)
97 & ((t1 + 0x40UL + ((t0 + 0x3D1UL) >> 26)) > 0x3FFFFFFUL));
100 t0 += x * 0x3D1UL; t1 += (x << 6);
101 t1 += (t0 >> 26); t0 &= 0x3FFFFFFUL;
102 t2 += (t1 >> 26); t1 &= 0x3FFFFFFUL;
103 t3 += (t2 >> 26); t2 &= 0x3FFFFFFUL;
104 t4 += (t3 >> 26); t3 &= 0x3FFFFFFUL;
105 t5 += (t4 >> 26); t4 &= 0x3FFFFFFUL;
106 t6 += (t5 >> 26); t5 &= 0x3FFFFFFUL;
107 t7 += (t6 >> 26); t6 &= 0x3FFFFFFUL;
108 t8 += (t7 >> 26); t7 &= 0x3FFFFFFUL;
109 t9 += (t8 >> 26); t8 &= 0x3FFFFFFUL;
117 r->n[0] = t0;
r->n[1] = t1;
r->n[2] = t2;
r->n[3] = t3;
r->n[4] = t4;
118 r->n[5] = t5;
r->n[6] = t6;
r->n[7] = t7;
r->n[8] = t8;
r->n[9] = t9;
123 secp256k1_fe_verify(
r);
128 uint32_t t0 =
r->n[0], t1 =
r->n[1], t2 =
r->n[2], t3 =
r->n[3], t4 =
r->n[4],
129 t5 =
r->n[5], t6 =
r->n[6], t7 =
r->n[7], t8 =
r->n[8], t9 =
r->n[9];
132 uint32_t x = t9 >> 22; t9 &= 0x03FFFFFUL;
135 t0 += x * 0x3D1UL; t1 += (x << 6);
136 t1 += (t0 >> 26); t0 &= 0x3FFFFFFUL;
137 t2 += (t1 >> 26); t1 &= 0x3FFFFFFUL;
138 t3 += (t2 >> 26); t2 &= 0x3FFFFFFUL;
139 t4 += (t3 >> 26); t3 &= 0x3FFFFFFUL;
140 t5 += (t4 >> 26); t4 &= 0x3FFFFFFUL;
141 t6 += (t5 >> 26); t5 &= 0x3FFFFFFUL;
142 t7 += (t6 >> 26); t6 &= 0x3FFFFFFUL;
143 t8 += (t7 >> 26); t7 &= 0x3FFFFFFUL;
144 t9 += (t8 >> 26); t8 &= 0x3FFFFFFUL;
149 r->n[0] = t0;
r->n[1] = t1;
r->n[2] = t2;
r->n[3] = t3;
r->n[4] = t4;
150 r->n[5] = t5;
r->n[6] = t6;
r->n[7] = t7;
r->n[8] = t8;
r->n[9] = t9;
154 secp256k1_fe_verify(
r);
159 uint32_t t0 =
r->n[0], t1 =
r->n[1], t2 =
r->n[2], t3 =
r->n[3], t4 =
r->n[4],
160 t5 =
r->n[5], t6 =
r->n[6], t7 =
r->n[7], t8 =
r->n[8], t9 =
r->n[9];
164 uint32_t x = t9 >> 22; t9 &= 0x03FFFFFUL;
167 t0 += x * 0x3D1UL; t1 += (x << 6);
168 t1 += (t0 >> 26); t0 &= 0x3FFFFFFUL;
169 t2 += (t1 >> 26); t1 &= 0x3FFFFFFUL;
170 t3 += (t2 >> 26); t2 &= 0x3FFFFFFUL; m = t2;
171 t4 += (t3 >> 26); t3 &= 0x3FFFFFFUL; m &= t3;
172 t5 += (t4 >> 26); t4 &= 0x3FFFFFFUL; m &= t4;
173 t6 += (t5 >> 26); t5 &= 0x3FFFFFFUL; m &= t5;
174 t7 += (t6 >> 26); t6 &= 0x3FFFFFFUL; m &= t6;
175 t8 += (t7 >> 26); t7 &= 0x3FFFFFFUL; m &= t7;
176 t9 += (t8 >> 26); t8 &= 0x3FFFFFFUL; m &= t8;
182 x = (t9 >> 22) | ((t9 == 0x03FFFFFUL) & (m == 0x3FFFFFFUL)
183 & ((t1 + 0x40UL + ((t0 + 0x3D1UL) >> 26)) > 0x3FFFFFFUL));
186 t0 += 0x3D1UL; t1 += (x << 6);
187 t1 += (t0 >> 26); t0 &= 0x3FFFFFFUL;
188 t2 += (t1 >> 26); t1 &= 0x3FFFFFFUL;
189 t3 += (t2 >> 26); t2 &= 0x3FFFFFFUL;
190 t4 += (t3 >> 26); t3 &= 0x3FFFFFFUL;
191 t5 += (t4 >> 26); t4 &= 0x3FFFFFFUL;
192 t6 += (t5 >> 26); t5 &= 0x3FFFFFFUL;
193 t7 += (t6 >> 26); t6 &= 0x3FFFFFFUL;
194 t8 += (t7 >> 26); t7 &= 0x3FFFFFFUL;
195 t9 += (t8 >> 26); t8 &= 0x3FFFFFFUL;
204 r->n[0] = t0;
r->n[1] = t1;
r->n[2] = t2;
r->n[3] = t3;
r->n[4] = t4;
205 r->n[5] = t5;
r->n[6] = t6;
r->n[7] = t7;
r->n[8] = t8;
r->n[9] = t9;
210 secp256k1_fe_verify(
r);
214static int secp256k1_fe_normalizes_to_zero(
const secp256k1_fe *
r) {
215 uint32_t t0 =
r->n[0], t1 =
r->n[1], t2 =
r->n[2], t3 =
r->n[3], t4 =
r->n[4],
216 t5 =
r->n[5], t6 =
r->n[6], t7 =
r->n[7], t8 =
r->n[8], t9 =
r->n[9];
222 uint32_t x = t9 >> 22; t9 &= 0x03FFFFFUL;
225 t0 += x * 0x3D1UL; t1 += (x << 6);
226 t1 += (t0 >> 26); t0 &= 0x3FFFFFFUL; z0 = t0; z1 = t0 ^ 0x3D0UL;
227 t2 += (t1 >> 26); t1 &= 0x3FFFFFFUL; z0 |= t1; z1 &= t1 ^ 0x40UL;
228 t3 += (t2 >> 26); t2 &= 0x3FFFFFFUL; z0 |= t2; z1 &= t2;
229 t4 += (t3 >> 26); t3 &= 0x3FFFFFFUL; z0 |= t3; z1 &= t3;
230 t5 += (t4 >> 26); t4 &= 0x3FFFFFFUL; z0 |= t4; z1 &= t4;
231 t6 += (t5 >> 26); t5 &= 0x3FFFFFFUL; z0 |= t5; z1 &= t5;
232 t7 += (t6 >> 26); t6 &= 0x3FFFFFFUL; z0 |= t6; z1 &= t6;
233 t8 += (t7 >> 26); t7 &= 0x3FFFFFFUL; z0 |= t7; z1 &= t7;
234 t9 += (t8 >> 26); t8 &= 0x3FFFFFFUL; z0 |= t8; z1 &= t8;
235 z0 |= t9; z1 &= t9 ^ 0x3C00000UL;
240 return (z0 == 0) | (z1 == 0x3FFFFFFUL);
243static int secp256k1_fe_normalizes_to_zero_var(
const secp256k1_fe *
r) {
244 uint32_t t0, t1, t2, t3, t4, t5, t6, t7, t8, t9;
258 z0 = t0 & 0x3FFFFFFUL;
262 if ((z0 != 0UL) & (z1 != 0x3FFFFFFUL)) {
279 t2 += (t1 >> 26); t1 &= 0x3FFFFFFUL; z0 |= t1; z1 &= t1 ^ 0x40UL;
280 t3 += (t2 >> 26); t2 &= 0x3FFFFFFUL; z0 |= t2; z1 &= t2;
281 t4 += (t3 >> 26); t3 &= 0x3FFFFFFUL; z0 |= t3; z1 &= t3;
282 t5 += (t4 >> 26); t4 &= 0x3FFFFFFUL; z0 |= t4; z1 &= t4;
283 t6 += (t5 >> 26); t5 &= 0x3FFFFFFUL; z0 |= t5; z1 &= t5;
284 t7 += (t6 >> 26); t6 &= 0x3FFFFFFUL; z0 |= t6; z1 &= t6;
285 t8 += (t7 >> 26); t7 &= 0x3FFFFFFUL; z0 |= t7; z1 &= t7;
286 t9 += (t8 >> 26); t8 &= 0x3FFFFFFUL; z0 |= t8; z1 &= t8;
287 z0 |= t9; z1 &= t9 ^ 0x3C00000UL;
292 return (z0 == 0) | (z1 == 0x3FFFFFFUL);
298 r->n[1] =
r->n[2] =
r->n[3] =
r->n[4] =
r->n[5] =
r->n[6] =
r->n[7] =
r->n[8] =
r->n[9] = 0;
300 r->magnitude = (
a != 0);
302 secp256k1_fe_verify(
r);
310 secp256k1_fe_verify(
a);
312 return (t[0] | t[1] | t[2] | t[3] | t[4] | t[5] | t[6] | t[7] | t[8] | t[9]) == 0;
318 secp256k1_fe_verify(
a);
329 for (i=0; i<10; i++) {
339 secp256k1_fe_verify(
a);
340 secp256k1_fe_verify(b);
342 for (i = 9; i >= 0; i--) {
343 if (
a->n[i] > b->
n[i]) {
346 if (
a->n[i] < b->
n[i]) {
353static int secp256k1_fe_set_b32(
secp256k1_fe *
r,
const unsigned char *
a) {
366 ret = !((
r->n[9] == 0x3FFFFFUL) & ((
r->n[8] &
r->n[7] &
r->n[6] &
r->n[5] &
r->n[4] &
r->n[3] &
r->n[2]) == 0x3FFFFFFUL) & ((
r->n[1] + 0x40UL + ((
r->n[0] + 0x3D1UL) >> 26)) > 0x3FFFFFFUL));
371 secp256k1_fe_verify(
r);
380static void secp256k1_fe_get_b32(
unsigned char *
r,
const secp256k1_fe *
a) {
383 secp256k1_fe_verify(
a);
385 r[0] = (
a->n[9] >> 14) & 0xff;
386 r[1] = (
a->n[9] >> 6) & 0xff;
387 r[2] = ((
a->n[9] & 0x3F) << 2) | ((
a->n[8] >> 24) & 0x3);
388 r[3] = (
a->n[8] >> 16) & 0xff;
389 r[4] = (
a->n[8] >> 8) & 0xff;
390 r[5] =
a->n[8] & 0xff;
391 r[6] = (
a->n[7] >> 18) & 0xff;
392 r[7] = (
a->n[7] >> 10) & 0xff;
393 r[8] = (
a->n[7] >> 2) & 0xff;
394 r[9] = ((
a->n[7] & 0x3) << 6) | ((
a->n[6] >> 20) & 0x3f);
395 r[10] = (
a->n[6] >> 12) & 0xff;
396 r[11] = (
a->n[6] >> 4) & 0xff;
397 r[12] = ((
a->n[6] & 0xf) << 4) | ((
a->n[5] >> 22) & 0xf);
398 r[13] = (
a->n[5] >> 14) & 0xff;
399 r[14] = (
a->n[5] >> 6) & 0xff;
400 r[15] = ((
a->n[5] & 0x3f) << 2) | ((
a->n[4] >> 24) & 0x3);
401 r[16] = (
a->n[4] >> 16) & 0xff;
402 r[17] = (
a->n[4] >> 8) & 0xff;
403 r[18] =
a->n[4] & 0xff;
404 r[19] = (
a->n[3] >> 18) & 0xff;
405 r[20] = (
a->n[3] >> 10) & 0xff;
406 r[21] = (
a->n[3] >> 2) & 0xff;
407 r[22] = ((
a->n[3] & 0x3) << 6) | ((
a->n[2] >> 20) & 0x3f);
408 r[23] = (
a->n[2] >> 12) & 0xff;
409 r[24] = (
a->n[2] >> 4) & 0xff;
410 r[25] = ((
a->n[2] & 0xf) << 4) | ((
a->n[1] >> 22) & 0xf);
411 r[26] = (
a->n[1] >> 14) & 0xff;
412 r[27] = (
a->n[1] >> 6) & 0xff;
413 r[28] = ((
a->n[1] & 0x3f) << 2) | ((
a->n[0] >> 24) & 0x3);
414 r[29] = (
a->n[0] >> 16) & 0xff;
415 r[30] = (
a->n[0] >> 8) & 0xff;
416 r[31] =
a->n[0] & 0xff;
422 secp256k1_fe_verify(
a);
423 VERIFY_CHECK(0x3FFFC2FUL * 2 * (m + 1) >= 0x3FFFFFFUL * 2 * m);
424 VERIFY_CHECK(0x3FFFFBFUL * 2 * (m + 1) >= 0x3FFFFFFUL * 2 * m);
425 VERIFY_CHECK(0x3FFFFFFUL * 2 * (m + 1) >= 0x3FFFFFFUL * 2 * m);
426 VERIFY_CHECK(0x03FFFFFUL * 2 * (m + 1) >= 0x03FFFFFUL * 2 * m);
428 r->n[0] = 0x3FFFC2FUL * 2 * (m + 1) -
a->n[0];
429 r->n[1] = 0x3FFFFBFUL * 2 * (m + 1) -
a->n[1];
430 r->n[2] = 0x3FFFFFFUL * 2 * (m + 1) -
a->n[2];
431 r->n[3] = 0x3FFFFFFUL * 2 * (m + 1) -
a->n[3];
432 r->n[4] = 0x3FFFFFFUL * 2 * (m + 1) -
a->n[4];
433 r->n[5] = 0x3FFFFFFUL * 2 * (m + 1) -
a->n[5];
434 r->n[6] = 0x3FFFFFFUL * 2 * (m + 1) -
a->n[6];
435 r->n[7] = 0x3FFFFFFUL * 2 * (m + 1) -
a->n[7];
436 r->n[8] = 0x3FFFFFFUL * 2 * (m + 1) -
a->n[8];
437 r->n[9] = 0x03FFFFFUL * 2 * (m + 1) -
a->n[9];
439 r->magnitude = m + 1;
441 secp256k1_fe_verify(
r);
459 secp256k1_fe_verify(
r);
465 secp256k1_fe_verify(
a);
478 r->magnitude +=
a->magnitude;
480 secp256k1_fe_verify(
r);
484#if defined(USE_EXTERNAL_ASM)
493#define VERIFY_BITS(x, n) VERIFY_CHECK(((x) >> (n)) == 0)
495#define VERIFY_BITS(x, n) do { } while(0)
500 uint64_t u0, u1, u2, u3, u4, u5, u6, u7, u8;
501 uint32_t t9, t1, t0, t2, t3, t4, t5, t6, t7;
502 const uint32_t M = 0x3FFFFFFUL, R0 = 0x3D10UL, R1 = 0x400UL;
543 t9 =
d & M;
d >>= 26;
562 u0 =
d & M;
d >>= 26; c += u0 * R0;
567 t0 = c & M; c >>= 26; c += u0 * R1;
587 u1 =
d & M;
d >>= 26; c += u1 * R0;
592 t1 = c & M; c >>= 26; c += u1 * R1;
612 u2 =
d & M;
d >>= 26; c += u2 * R0;
617 t2 = c & M; c >>= 26; c += u2 * R1;
637 u3 =
d & M;
d >>= 26; c += u3 * R0;
642 t3 = c & M; c >>= 26; c += u3 * R1;
662 u4 =
d & M;
d >>= 26; c += u4 * R0;
667 t4 = c & M; c >>= 26; c += u4 * R1;
687 u5 =
d & M;
d >>= 26; c += u5 * R0;
692 t5 = c & M; c >>= 26; c += u5 * R1;
712 u6 =
d & M;
d >>= 26; c += u6 * R0;
717 t6 = c & M; c >>= 26; c += u6 * R1;
738 u7 =
d & M;
d >>= 26; c += u7 * R0;
744 t7 = c & M; c >>= 26; c += u7 * R1;
765 u8 =
d & M;
d >>= 26; c += u8 * R0;
788 r[8] = c & M; c >>= 26; c += u8 * R1;
796 r[9] = c & (M >> 4); c >>= 22; c +=
d * (R1 << 4);
803 d = c * (R0 >> 4) + t0;
806 r[0] =
d & M;
d >>= 26;
810 d += c * (R1 >> 4) + t1;
815 r[1] =
d & M;
d >>= 26;
830 uint64_t u0, u1, u2, u3, u4, u5, u6, u7, u8;
831 uint32_t t9, t0, t1, t2, t3, t4, t5, t6, t7;
832 const uint32_t M = 0x3FFFFFFUL, R0 = 0x3D10UL, R1 = 0x400UL;
857 t9 =
d & M;
d >>= 26;
872 u0 =
d & M;
d >>= 26; c += u0 * R0;
877 t0 = c & M; c >>= 26; c += u0 * R1;
892 u1 =
d & M;
d >>= 26; c += u1 * R0;
897 t1 = c & M; c >>= 26; c += u1 * R1;
913 u2 =
d & M;
d >>= 26; c += u2 * R0;
918 t2 = c & M; c >>= 26; c += u2 * R1;
933 u3 =
d & M;
d >>= 26; c += u3 * R0;
938 t3 = c & M; c >>= 26; c += u3 * R1;
954 u4 =
d & M;
d >>= 26; c += u4 * R0;
959 t4 = c & M; c >>= 26; c += u4 * R1;
974 u5 =
d & M;
d >>= 26; c += u5 * R0;
979 t5 = c & M; c >>= 26; c += u5 * R1;
995 u6 =
d & M;
d >>= 26; c += u6 * R0;
1000 t6 = c & M; c >>= 26; c += u6 * R1;
1016 u7 =
d & M;
d >>= 26; c += u7 * R0;
1022 t7 = c & M; c >>= 26; c += u7 * R1;
1039 u8 =
d & M;
d >>= 26; c += u8 * R0;
1062 r[8] = c & M; c >>= 26; c += u8 * R1;
1070 r[9] = c & (M >> 4); c >>= 22; c +=
d * (R1 << 4);
1077 d = c * (R0 >> 4) + t0;
1080 r[0] =
d & M;
d >>= 26;
1084 d += c * (R1 >> 4) + t1;
1089 r[1] =
d & M;
d >>= 26;
1107 secp256k1_fe_verify(
a);
1108 secp256k1_fe_verify(b);
1112 secp256k1_fe_mul_inner(
r->n,
a->n, b->n);
1116 secp256k1_fe_verify(
r);
1123 secp256k1_fe_verify(
a);
1125 secp256k1_fe_sqr_inner(
r->n,
a->n);
1129 secp256k1_fe_verify(
r);
1138 r->n[0] = (
r->n[0] & mask0) | (
a->n[0] & mask1);
1139 r->n[1] = (
r->n[1] & mask0) | (
a->n[1] & mask1);
1140 r->n[2] = (
r->n[2] & mask0) | (
a->n[2] & mask1);
1141 r->n[3] = (
r->n[3] & mask0) | (
a->n[3] & mask1);
1142 r->n[4] = (
r->n[4] & mask0) | (
a->n[4] & mask1);
1143 r->n[5] = (
r->n[5] & mask0) | (
a->n[5] & mask1);
1144 r->n[6] = (
r->n[6] & mask0) | (
a->n[6] & mask1);
1145 r->n[7] = (
r->n[7] & mask0) | (
a->n[7] & mask1);
1146 r->n[8] = (
r->n[8] & mask0) | (
a->n[8] & mask1);
1147 r->n[9] = (
r->n[9] & mask0) | (
a->n[9] & mask1);
1150 r->magnitude =
a->magnitude;
1151 r->normalized =
a->normalized;
1157 uint32_t t0 =
r->n[0], t1 =
r->n[1], t2 =
r->n[2], t3 =
r->n[3], t4 =
r->n[4],
1158 t5 =
r->n[5], t6 =
r->n[6], t7 =
r->n[7], t8 =
r->n[8], t9 =
r->n[9];
1163 secp256k1_fe_verify(
r);
1177 t0 += 0x3FFFC2FUL & mask;
1178 t1 += 0x3FFFFBFUL & mask;
1197 r->n[0] = (t0 >> 1) + ((t1 &
one) << 25);
1198 r->n[1] = (t1 >> 1) + ((t2 &
one) << 25);
1199 r->n[2] = (t2 >> 1) + ((t3 &
one) << 25);
1200 r->n[3] = (t3 >> 1) + ((t4 &
one) << 25);
1201 r->n[4] = (t4 >> 1) + ((t5 &
one) << 25);
1202 r->n[5] = (t5 >> 1) + ((t6 &
one) << 25);
1203 r->n[6] = (t6 >> 1) + ((t7 &
one) << 25);
1204 r->n[7] = (t7 >> 1) + ((t8 &
one) << 25);
1205 r->n[8] = (t8 >> 1) + ((t9 &
one) << 25);
1206 r->n[9] = (t9 >> 1);
1226 r->magnitude = (
r->magnitude >> 1) + 1;
1228 secp256k1_fe_verify(
r);
1237 r->n[0] = (
r->n[0] & mask0) | (
a->n[0] & mask1);
1238 r->n[1] = (
r->n[1] & mask0) | (
a->n[1] & mask1);
1239 r->n[2] = (
r->n[2] & mask0) | (
a->n[2] & mask1);
1240 r->n[3] = (
r->n[3] & mask0) | (
a->n[3] & mask1);
1241 r->n[4] = (
r->n[4] & mask0) | (
a->n[4] & mask1);
1242 r->n[5] = (
r->n[5] & mask0) | (
a->n[5] & mask1);
1243 r->n[6] = (
r->n[6] & mask0) | (
a->n[6] & mask1);
1244 r->n[7] = (
r->n[7] & mask0) | (
a->n[7] & mask1);
1251 r->n[0] =
a->n[0] |
a->n[1] << 26;
1252 r->n[1] =
a->n[1] >> 6 |
a->n[2] << 20;
1253 r->n[2] =
a->n[2] >> 12 |
a->n[3] << 14;
1254 r->n[3] =
a->n[3] >> 18 |
a->n[4] << 8;
1255 r->n[4] =
a->n[4] >> 24 |
a->n[5] << 2 |
a->n[6] << 28;
1256 r->n[5] =
a->n[6] >> 4 |
a->n[7] << 22;
1257 r->n[6] =
a->n[7] >> 10 |
a->n[8] << 16;
1258 r->n[7] =
a->n[8] >> 16 |
a->n[9] << 10;
1262 r->n[0] =
a->n[0] & 0x3FFFFFFUL;
1263 r->n[1] =
a->n[0] >> 26 | ((
a->n[1] << 6) & 0x3FFFFFFUL);
1264 r->n[2] =
a->n[1] >> 20 | ((
a->n[2] << 12) & 0x3FFFFFFUL);
1265 r->n[3] =
a->n[2] >> 14 | ((
a->n[3] << 18) & 0x3FFFFFFUL);
1266 r->n[4] =
a->n[3] >> 8 | ((
a->n[4] << 24) & 0x3FFFFFFUL);
1267 r->n[5] = (
a->n[4] >> 2) & 0x3FFFFFFUL;
1268 r->n[6] =
a->n[4] >> 28 | ((
a->n[5] << 4) & 0x3FFFFFFUL);
1269 r->n[7] =
a->n[5] >> 22 | ((
a->n[6] << 10) & 0x3FFFFFFUL);
1270 r->n[8] =
a->n[6] >> 16 | ((
a->n[7] << 16) & 0x3FFFFFFUL);
1271 r->n[9] =
a->n[7] >> 10;
1275 secp256k1_fe_verify(
r);
1281 const uint32_t a0 =
a->v[0], a1 =
a->v[1], a2 =
a->v[2], a3 =
a->v[3], a4 =
a->v[4],
1282 a5 =
a->v[5], a6 =
a->v[6], a7 =
a->v[7], a8 =
a->v[8];
1298 r->n[1] = (a0 >> 26 | a1 << 4) & M26;
1299 r->n[2] = (a1 >> 22 | a2 << 8) & M26;
1300 r->n[3] = (a2 >> 18 | a3 << 12) & M26;
1301 r->n[4] = (a3 >> 14 | a4 << 16) & M26;
1302 r->n[5] = (a4 >> 10 | a5 << 20) & M26;
1303 r->n[6] = (a5 >> 6 | a6 << 24) & M26;
1304 r->n[7] = (a6 >> 2 ) & M26;
1305 r->n[8] = (a6 >> 28 | a7 << 2) & M26;
1306 r->n[9] = (a7 >> 24 | a8 << 6);
1311 secp256k1_fe_verify(
r);
1317 const uint64_t a0 =
a->n[0], a1 =
a->n[1], a2 =
a->n[2], a3 =
a->n[3], a4 =
a->n[4],
1318 a5 =
a->n[5], a6 =
a->n[6], a7 =
a->n[7], a8 =
a->n[8], a9 =
a->n[9];
1324 r->v[0] = (a0 | a1 << 26) & M30;
1325 r->v[1] = (a1 >> 4 | a2 << 22) & M30;
1326 r->v[2] = (a2 >> 8 | a3 << 18) & M30;
1327 r->v[3] = (a3 >> 12 | a4 << 14) & M30;
1328 r->v[4] = (a4 >> 16 | a5 << 10) & M30;
1329 r->v[5] = (a5 >> 20 | a6 << 6) & M30;
1330 r->v[6] = (a6 >> 24 | a7 << 2
1332 r->v[7] = (a8 >> 2 | a9 << 24) & M30;
1337 {{-0x3D1, -4, 0, 0, 0, 0, 0, 0, 65536}},
1346 secp256k1_fe_normalize(&tmp);
1347 secp256k1_fe_to_signed30(&
s, &tmp);
1348 secp256k1_modinv32(&
s, &secp256k1_const_modinfo_fe);
1349 secp256k1_fe_from_signed30(
r, &
s);
1351 VERIFY_CHECK(secp256k1_fe_normalizes_to_zero(
r) == secp256k1_fe_normalizes_to_zero(&tmp));
1359 secp256k1_fe_normalize_var(&tmp);
1360 secp256k1_fe_to_signed30(&
s, &tmp);
1361 secp256k1_modinv32_var(&
s, &secp256k1_const_modinfo_fe);
1362 secp256k1_fe_from_signed30(
r, &
s);
1364 VERIFY_CHECK(secp256k1_fe_normalizes_to_zero(
r) == secp256k1_fe_normalizes_to_zero(&tmp));
#define VG_CHECK_VERIFY(x, y)
#define VERIFY_CHECK(cond)
#define SECP256K1_RESTRICT
#define VERIFY_BITS(x, n)
const GenericPointer< typename T::ValueType > T2 T::AllocatorType & a
unsigned __int64 uint64_t