7#ifndef SECP256K1_FIELD_INNER5X52_IMPL_H
8#define SECP256K1_FIELD_INNER5X52_IMPL_H
13#define VERIFY_BITS(x, n) VERIFY_CHECK(((x) >> (n)) == 0)
15#define VERIFY_BITS(x, n) do { } while(0)
20 uint64_t t3, t4, tx, u0;
21 uint64_t a0 = a[0], a1 = a[1], a2 = a[2], a3 = a[3], a4 = a[4];
22 const uint64_t M = 0xFFFFFFFFFFFFFULL, R = 0x1000003D10ULL;
43 d = (uint128_t)a0 * b[3]
44 + (uint128_t)a1 * b[2]
45 + (uint128_t)a2 * b[1]
46 + (uint128_t)a3 * b[0];
49 c = (uint128_t)a4 * b[4];
52 d += (uint128_t)R * (uint64_t)c; c >>= 64;
61 d += (uint128_t)a0 * b[4]
62 + (uint128_t)a1 * b[3]
63 + (uint128_t)a2 * b[2]
64 + (uint128_t)a3 * b[1]
65 + (uint128_t)a4 * b[0];
68 d += (uint128_t)(R << 12) * (uint64_t)c;
75 tx = (t4 >> 48); t4 &= (M >> 4);
80 c = (uint128_t)a0 * b[0];
83 d += (uint128_t)a1 * b[4]
84 + (uint128_t)a2 * b[3]
85 + (uint128_t)a3 * b[2]
86 + (uint128_t)a4 * b[1];
97 c += (uint128_t)u0 * (R >> 4);
100 r[0] = c & M; c >>= 52;
105 c += (uint128_t)a0 * b[1]
106 + (uint128_t)a1 * b[0];
109 d += (uint128_t)a2 * b[4]
110 + (uint128_t)a3 * b[3]
111 + (uint128_t)a4 * b[2];
114 c += (d & M) * R; d >>= 52;
118 r[1] = c & M; c >>= 52;
123 c += (uint128_t)a0 * b[2]
124 + (uint128_t)a1 * b[1]
125 + (uint128_t)a2 * b[0];
128 d += (uint128_t)a3 * b[4]
129 + (uint128_t)a4 * b[3];
132 c += (uint128_t)R * (uint64_t)d; d >>= 64;
137 r[2] = c & M; c >>= 52;
141 c += (uint128_t)(R << 12) * (uint64_t)d + t3;
144 r[3] = c & M; c >>= 52;
158 uint64_t a0 = a[0], a1 = a[1], a2 = a[2], a3 = a[3], a4 = a[4];
159 int64_t t3, t4, tx, u0;
160 const uint64_t M = 0xFFFFFFFFFFFFFULL, R = 0x1000003D10ULL;
173 d = (uint128_t)(a0*2) * a3
174 + (uint128_t)(a1*2) * a2;
177 c = (uint128_t)a4 * a4;
180 d += (uint128_t)R * (uint64_t)c; c >>= 64;
184 t3 = d & M; d >>= 52;
190 d += (uint128_t)a0 * a4
191 + (uint128_t)(a1*2) * a3
192 + (uint128_t)a2 * a2;
195 d += (uint128_t)(R << 12) * (uint64_t)c;
198 t4 = d & M; d >>= 52;
202 tx = (t4 >> 48); t4 &= (M >> 4);
207 c = (uint128_t)a0 * a0;
210 d += (uint128_t)a1 * a4
211 + (uint128_t)(a2*2) * a3;
214 u0 = d & M; d >>= 52;
222 c += (uint128_t)u0 * (R >> 4);
225 r[0] = c & M; c >>= 52;
231 c += (uint128_t)a0 * a1;
234 d += (uint128_t)a2 * a4
235 + (uint128_t)a3 * a3;
238 c += (d & M) * R; d >>= 52;
242 r[1] = c & M; c >>= 52;
247 c += (uint128_t)a0 * a2
248 + (uint128_t)a1 * a1;
251 d += (uint128_t)a3 * a4;
254 c += (uint128_t)R * (uint64_t)d; d >>= 64;
258 r[2] = c & M; c >>= 52;
263 c += (uint128_t)(R << 12) * (uint64_t)d + t3;
266 r[3] = c & M; c >>= 52;
static SECP256K1_INLINE void secp256k1_fe_mul_inner(uint64_t *r, const uint64_t *a, const uint64_t *SECP256K1_RESTRICT b)
#define VERIFY_BITS(x, n)
static SECP256K1_INLINE void secp256k1_fe_sqr_inner(uint64_t *r, const uint64_t *a)
#define VERIFY_CHECK(cond)
#define SECP256K1_RESTRICT