|
#define | SECP256K1_N_0 ((uint64_t)0xBFD25E8CD0364141ULL) |
|
#define | SECP256K1_N_1 ((uint64_t)0xBAAEDCE6AF48A03BULL) |
|
#define | SECP256K1_N_2 ((uint64_t)0xFFFFFFFFFFFFFFFEULL) |
|
#define | SECP256K1_N_3 ((uint64_t)0xFFFFFFFFFFFFFFFFULL) |
|
#define | SECP256K1_N_C_0 (~SECP256K1_N_0 + 1) |
|
#define | SECP256K1_N_C_1 (~SECP256K1_N_1) |
|
#define | SECP256K1_N_C_2 (1) |
|
#define | SECP256K1_N_H_0 ((uint64_t)0xDFE92F46681B20A0ULL) |
|
#define | SECP256K1_N_H_1 ((uint64_t)0x5D576E7357A4501DULL) |
|
#define | SECP256K1_N_H_2 ((uint64_t)0xFFFFFFFFFFFFFFFFULL) |
|
#define | SECP256K1_N_H_3 ((uint64_t)0x7FFFFFFFFFFFFFFFULL) |
|
#define | muladd(a, b) |
| Add a*b to the number defined by (c0,c1,c2). More...
|
|
#define | muladd_fast(a, b) |
| Add a*b to the number defined by (c0,c1). More...
|
|
#define | sumadd(a) |
| Add a to the number defined by (c0,c1,c2). More...
|
|
#define | sumadd_fast(a) |
| Add a to the number defined by (c0,c1). More...
|
|
#define | extract(n) |
| Extract the lowest 64 bits of (c0,c1,c2) into n, and left shift the number 64 bits. More...
|
|
#define | extract_fast(n) |
| Extract the lowest 64 bits of (c0,c1,c2) into n, and left shift the number 64 bits. More...
|
|
|
static SECP256K1_INLINE void | secp256k1_scalar_clear (secp256k1_scalar *r) |
|
static SECP256K1_INLINE void | secp256k1_scalar_set_int (secp256k1_scalar *r, unsigned int v) |
|
static SECP256K1_INLINE unsigned int | secp256k1_scalar_get_bits (const secp256k1_scalar *a, unsigned int offset, unsigned int count) |
|
static SECP256K1_INLINE unsigned int | secp256k1_scalar_get_bits_var (const secp256k1_scalar *a, unsigned int offset, unsigned int count) |
|
static SECP256K1_INLINE int | secp256k1_scalar_check_overflow (const secp256k1_scalar *a) |
|
static SECP256K1_INLINE int | secp256k1_scalar_reduce (secp256k1_scalar *r, unsigned int overflow) |
|
static int | secp256k1_scalar_add (secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b) |
|
static void | secp256k1_scalar_cadd_bit (secp256k1_scalar *r, unsigned int bit, int flag) |
|
static void | secp256k1_scalar_set_b32 (secp256k1_scalar *r, const unsigned char *b32, int *overflow) |
|
static void | secp256k1_scalar_get_b32 (unsigned char *bin, const secp256k1_scalar *a) |
|
static SECP256K1_INLINE int | secp256k1_scalar_is_zero (const secp256k1_scalar *a) |
|
static void | secp256k1_scalar_negate (secp256k1_scalar *r, const secp256k1_scalar *a) |
|
static SECP256K1_INLINE int | secp256k1_scalar_is_one (const secp256k1_scalar *a) |
|
static int | secp256k1_scalar_is_high (const secp256k1_scalar *a) |
|
static int | secp256k1_scalar_cond_negate (secp256k1_scalar *r, int flag) |
|
static void | secp256k1_scalar_reduce_512 (secp256k1_scalar *r, const uint64_t *l) |
|
static void | secp256k1_scalar_mul_512 (uint64_t l[8], const secp256k1_scalar *a, const secp256k1_scalar *b) |
|
static void | secp256k1_scalar_mul (secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b) |
|
static int | secp256k1_scalar_shr_int (secp256k1_scalar *r, int n) |
|
static void | secp256k1_scalar_split_128 (secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *k) |
|
static SECP256K1_INLINE int | secp256k1_scalar_eq (const secp256k1_scalar *a, const secp256k1_scalar *b) |
|
static SECP256K1_INLINE void | secp256k1_scalar_mul_shift_var (secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b, unsigned int shift) |
|
static SECP256K1_INLINE void | secp256k1_scalar_cmov (secp256k1_scalar *r, const secp256k1_scalar *a, int flag) |
|
static void | secp256k1_scalar_from_signed62 (secp256k1_scalar *r, const secp256k1_modinv64_signed62 *a) |
|
static void | secp256k1_scalar_to_signed62 (secp256k1_modinv64_signed62 *r, const secp256k1_scalar *a) |
|
static void | secp256k1_scalar_inverse (secp256k1_scalar *r, const secp256k1_scalar *x) |
|
static void | secp256k1_scalar_inverse_var (secp256k1_scalar *r, const secp256k1_scalar *x) |
|
static SECP256K1_INLINE int | secp256k1_scalar_is_even (const secp256k1_scalar *a) |
|