#include "ecdsa.h"
#include "stdio.h"
#include "stdlib.h"

/* One digit is uint64_t qword. */
#define ECC_CURVE_NIST_P192_DIGITS  3
#define ECC_CURVE_NIST_P256_DIGITS  4
#define ECC_MAX_DIGITS             (512 / 64)

#define ECC_DIGITS_TO_BYTES_SHIFT 3

#ifndef bool
#define bool int
#define true 1
#define false 0
#endif

#define max(a, b) ((a) > (b) ? (a) : (b))
#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))

/**
 * struct ecc_point - elliptic curve point in affine coordinates
 *
 * @x:        X coordinate in vli form.
 * @y:        Y coordinate in vli form.
 * @ndigits:    Length of vlis in uint64_t qwords.
 */
struct ecc_point {
    u64 *x;
    u64 *y;
    u8 ndigits;
};

#define ECC_POINT_INIT(x, y, ndigits)    (struct ecc_point) { x, y, ndigits }

/**
 * struct ecc_curve - definition of elliptic curve
 *
 * @name:    Short name of the curve.
 * @g:        Generator point of the curve.
 * @p:        Prime number, if Barrett's reduction is used for this curve
 *        pre-calculated value 'mu' is appended to the @p after ndigits.
 *        Use of Barrett's reduction is heuristically determined in
 *        vli_mmod_fast().
 * @n:        Order of the curve group.
 * @a:        Curve parameter a.
 * @b:        Curve parameter b.
 */
struct ecc_curve {
    char *name;
    struct ecc_point g;
    u64 *p;
    u64 *n;
    u64 *a;
    u64 *b;
};

/* NIST P-192: a = p - 3 */
static u64 nist_p192_g_x[] = { 0xF4FF0AFD82FF1012ull, 0x7CBF20EB43A18800ull,
                0x188DA80EB03090F6ull };
static u64 nist_p192_g_y[] = { 0x73F977A11E794811ull, 0x631011ED6B24CDD5ull,
                0x07192B95FFC8DA78ull };
static u64 nist_p192_p[] = { 0xFFFFFFFFFFFFFFFFull, 0xFFFFFFFFFFFFFFFEull,
                0xFFFFFFFFFFFFFFFFull };
static u64 nist_p192_n[] = { 0x146BC9B1B4D22831ull, 0xFFFFFFFF99DEF836ull,
                0xFFFFFFFFFFFFFFFFull };
static u64 nist_p192_a[] = { 0xFFFFFFFFFFFFFFFCull, 0xFFFFFFFFFFFFFFFEull,
                0xFFFFFFFFFFFFFFFFull };
static u64 nist_p192_b[] = { 0xFEB8DEECC146B9B1ull, 0x0FA7E9AB72243049ull,
                0x64210519E59C80E7ull };
static struct ecc_curve nist_p192 = {
    .name = "nist_192",
    .g = {
        .x = nist_p192_g_x,
        .y = nist_p192_g_y,
        .ndigits = 3,
    },
    .p = nist_p192_p,
    .n = nist_p192_n,
    .a = nist_p192_a,
    .b = nist_p192_b
};

/* NIST P-256: a = p - 3 */
static u64 nist_p256_g_x[] = { 0xF4A13945D898C296ull, 0x77037D812DEB33A0ull,
                0xF8BCE6E563A440F2ull, 0x6B17D1F2E12C4247ull };
static u64 nist_p256_g_y[] = { 0xCBB6406837BF51F5ull, 0x2BCE33576B315ECEull,
                0x8EE7EB4A7C0F9E16ull, 0x4FE342E2FE1A7F9Bull };
static u64 nist_p256_p[] = { 0xFFFFFFFFFFFFFFFFull, 0x00000000FFFFFFFFull,
                0x0000000000000000ull, 0xFFFFFFFF00000001ull };
static u64 nist_p256_n[] = { 0xF3B9CAC2FC632551ull, 0xBCE6FAADA7179E84ull,
                0xFFFFFFFFFFFFFFFFull, 0xFFFFFFFF00000000ull };
static u64 nist_p256_a[] = { 0xFFFFFFFFFFFFFFFCull, 0x00000000FFFFFFFFull,
                0x0000000000000000ull, 0xFFFFFFFF00000001ull };
static u64 nist_p256_b[] = { 0x3BCE3C3E27D2604Bull, 0x651D06B0CC53B0F6ull,
                0xB3EBBD55769886BCull, 0x5AC635D8AA3A93E7ull };
static struct ecc_curve nist_p256 = {
    .name = "nist_256",
    .g = {
        .x = nist_p256_g_x,
        .y = nist_p256_g_y,
        .ndigits = 4,
    },
    .p = nist_p256_p,
    .n = nist_p256_n,
    .a = nist_p256_a,
    .b = nist_p256_b
};

/* secp256k1 */
static u64 nist_k256_g_x[] = { 0x59F2815B16F81798ull, 0x029BFCDB2DCE28D9ull,
                0x55A06295CE870B07ull, 0x79BE667EF9DCBBACull };
static u64 nist_k256_g_y[] = { 0x9C47D08FFB10D4B8ull, 0xFD17B448A6855419ull,
                0x5DA4FBFC0E1108A8ull, 0x483ADA7726A3C465ull };
static u64 nist_k256_p[] = { 0xFFFFFFFEFFFFFC2Full, 0xFFFFFFFFFFFFFFFFull,
                0xFFFFFFFFFFFFFFFFull, 0xFFFFFFFFFFFFFFFFull };
static u64 nist_k256_n[] = { 0xBFD25E8CD0364141ull, 0xBAAEDCE6AF48A03Bull,
                0xFFFFFFFFFFFFFFFEull, 0xFFFFFFFFFFFFFFFFull };
static u64 nist_k256_a[] = { 0x0000000000000000ull, 0x0000000000000000ull,
                0x0000000000000000ull, 0x0000000000000000ull };
static u64 nist_k256_b[] = { 0x0000000000000007ull, 0x0000000000000000ull,
                0x0000000000000000ull, 0x0000000000000000ull };
static struct ecc_curve sepc256k1 = {
    .name = "secp256k1",
    .g = {
        .x = nist_k256_g_x,
        .y = nist_k256_g_y,
        .ndigits = 4,
    },
    .p = nist_k256_p,
    .n = nist_k256_n,
    .a = nist_k256_a,
    .b = nist_k256_b
};

typedef struct {
    u64 m_low;
    u64 m_high;
} uint128_t;


int ecc_is_pubkey_valid_partial(const struct ecc_curve *curve, struct ecc_point *pk);
void ecc_swap_digits(const u64 *in, u64 *out, unsigned int ndigits);
void vli_mod_mult_slow(u64 *result, const u64 *left, const u64 *right, const u64 *mod, unsigned int ndigits);
void vli_mod_inv(u64 *result, const u64 *input, const u64 *mod, unsigned int ndigits);
void ecc_point_mult_shamir(const struct ecc_point *result,
               const u64 *u1, const struct ecc_point *p,
               const u64 *u2, const struct ecc_point *q,
               const struct ecc_curve *curve);

bool vli_is_zero(const u64 *vli, unsigned int ndigits);
int vli_cmp(const u64 *left, const u64 *right, unsigned int ndigits);
u64 vli_sub(u64 *result, const u64 *left, const u64 *right, unsigned int ndigits);


#ifdef LINUX_DEV_RANDOM
#include <sys/types.h>
#include <fcntl.h>
#include <unistd.h>

#ifndef O_CLOEXEC
#define O_CLOEXEC 0
#endif

static int get_random_number(u64 *p_vli, unsigned int nbytes)
{
    int l_fd = open("/dev/urandom", O_RDONLY | O_CLOEXEC);
    if(l_fd == -1)
    {
        l_fd = open("/dev/random", O_RDONLY | O_CLOEXEC);
        if(l_fd == -1)
        {
            return -1;
        }
    }

    char *l_ptr = (char *)p_vli;
    size_t l_left = nbytes;
    while(l_left > 0)
    {
        int l_read = read(l_fd, l_ptr, l_left);
        if(l_read <= 0)
        {
            close(l_fd);
            return -1;
        }
        l_left -= l_read;
        l_ptr += l_read;
    }

    close(l_fd);
    return 0;
}
#else
int (*external_get_random_number)(u64 *p_vli, unsigned int nbytes) = NULL;
static int get_random_number(u64 *p_vli, unsigned int nbytes)
{
    if(external_get_random_number)
        return external_get_random_number(p_vli, nbytes);
    return 0;
}

int register_external_get_random_number(int (*external)(u64 *p_vli, unsigned int nbytes))
{
    if(!external)
        return -EINVAL;
    external_get_random_number = external;
    return 0;
}
#endif
static inline const struct ecc_curve *ecc_get_curve(unsigned int curve_id)
{
    switch (curve_id) {
    /* In FIPS mode only allow P256 and higher */
    case ECC_CURVE_NIST_P192:
        return &nist_p192;
    case ECC_CURVE_NIST_P256:
        return &nist_p256;
    case ECC_CURVE_SECP256K1:
        return &sepc256k1;

    default:
        return NULL;
    }
}

static u64 *ecc_alloc_digits_space(unsigned int ndigits)
{
    size_t len = ndigits * sizeof(u64);

    if (!len)
        return NULL;

    return (u64 *)malloc(len);
}

static void ecc_free_digits_space(u64 *space)
{
    free(space);
}

static struct ecc_point *ecc_alloc_point(unsigned int ndigits)
{
    struct ecc_point *p = (struct ecc_point *)malloc(sizeof(*p));

    if (!p)
        return NULL;

    p->x = ecc_alloc_digits_space(ndigits);
    if (!p->x)
        goto err_alloc_x;

    p->y = ecc_alloc_digits_space(ndigits);
    if (!p->y)
        goto err_alloc_y;

    p->ndigits = ndigits;

    return p;

err_alloc_y:
    ecc_free_digits_space(p->x);
err_alloc_x:
    free(p);
    return NULL;
}

static void ecc_free_point(struct ecc_point *p)
{
    if (!p)
        return;

    free(p->x);
    free(p->y);
    free(p);
}

static void vli_clear(u64 *vli, unsigned int ndigits)
{
    unsigned int i;

    for (i = 0; i < ndigits; i++)
        vli[i] = 0;
}

/* Returns true if vli == 0, false otherwise. */
bool vli_is_zero(const u64 *vli, unsigned int ndigits)
{
    unsigned int i;

    for (i = 0; i < ndigits; i++) {
        if (vli[i])
            return false;
    }

    return true;
}

/* Returns nonzero if bit bit of vli is set. */
static u64 vli_test_bit(const u64 *vli, unsigned int bit)
{
    return (vli[bit / 64] & ((u64)1 << (bit % 64)));
}

/* Counts the number of 64-bit "digits" in vli. */
static unsigned int vli_num_digits(const u64 *vli, unsigned int ndigits)
{
    int i;

    /* Search from the end until we find a non-zero digit.
     * We do it in reverse because we expect that most digits will
     * be nonzero.
     */
    for (i = ndigits - 1; i >= 0 && vli[i] == 0; i--);

    return (i + 1);
}

/* Counts the number of bits required for vli. */
static unsigned int vli_num_bits(const u64 *vli, unsigned int ndigits)
{
    unsigned int i, num_digits;
    u64 digit;

    num_digits = vli_num_digits(vli, ndigits);
    if (num_digits == 0)
        return 0;

    digit = vli[num_digits - 1];
    for (i = 0; digit; i++)
        digit >>= 1;

    return ((num_digits - 1) * 64 + i);
}

/* Sets dest = src. */
static void vli_set(u64 *dest, const u64 *src, unsigned int ndigits)
{
    unsigned int i;

    for (i = 0; i < ndigits; i++)
        dest[i] = src[i];
}

/* Returns sign of left - right. */
int vli_cmp(const u64 *left, const u64 *right, unsigned int ndigits)
{
    int i;

    for (i = ndigits - 1; i >= 0; i--) {
        if (left[i] > right[i])
            return 1;
        else if (left[i] < right[i])
            return -1;
    }

    return 0;
}

/* Computes vli = vli >> 1. */
static void vli_rshift1(u64 *vli, unsigned int ndigits)
{
    u64 *end = vli;
    u64 carry = 0;

    vli += ndigits;

    while (vli-- > end) {
        u64 temp = *vli;
        *vli = (temp >> 1) | carry;
        carry = temp << 63;
    }
}

/* Computes result = left + right, returning carry. Can modify in place. */
static u64 vli_add(u64 *result, const u64 *left, const u64 *right,
           unsigned int ndigits)
{
    u64 carry = 0;
    unsigned int i;

    for (i = 0; i < ndigits; i++) {
        u64 sum;

        sum = left[i] + right[i] + carry;
        if (sum != left[i])
            carry = (sum < left[i]);

        result[i] = sum;
    }

    return carry;
}

/* Computes result = left - right, returning borrow. Can modify in place. */
u64 vli_sub(u64 *result, const u64 *left, const u64 *right,
           unsigned int ndigits)
{
    u64 borrow = 0;
    unsigned int i;

    for (i = 0; i < ndigits; i++) {
        u64 diff;

        diff = left[i] - right[i] - borrow;
        if (diff != left[i])
            borrow = (diff > left[i]);

        result[i] = diff;
    }

    return borrow;
}

static uint128_t mul_64_64(u64 left, u64 right)
{
    uint128_t result;
#if defined(CONFIG_ARCH_SUPPORTS_INT128)
    unsigned __int128 m = (unsigned __int128)left * right;

    result.m_low  = m;
    result.m_high = m >> 64;
#else
    u64 a0 = left & 0xffffffffull;
    u64 a1 = left >> 32;
    u64 b0 = right & 0xffffffffull;
    u64 b1 = right >> 32;
    u64 m0 = a0 * b0;
    u64 m1 = a0 * b1;
    u64 m2 = a1 * b0;
    u64 m3 = a1 * b1;

    m2 += (m0 >> 32);
    m2 += m1;

    /* Overflow */
    if (m2 < m1)
        m3 += 0x100000000ull;

    result.m_low = (m0 & 0xffffffffull) | (m2 << 32);
    result.m_high = m3 + (m2 >> 32);
#endif
    return result;
}

static uint128_t add_128_128(uint128_t a, uint128_t b)
{
    uint128_t result;

    result.m_low = a.m_low + b.m_low;
    result.m_high = a.m_high + b.m_high + (result.m_low < a.m_low);

    return result;
}

static void vli_mult(u64 *result, const u64 *left, const u64 *right,
             unsigned int ndigits)
{
    uint128_t r01 = { 0, 0 };
    u64 r2 = 0;
    unsigned int i, k;

    /* Compute each digit of result in sequence, maintaining the
     * carries.
     */
    for (k = 0; k < ndigits * 2 - 1; k++) {
        unsigned int min;

        if (k < ndigits)
            min = 0;
        else
            min = (k + 1) - ndigits;

        for (i = min; i <= k && i < ndigits; i++) {
            uint128_t product;

            product = mul_64_64(left[i], right[k - i]);

            r01 = add_128_128(r01, product);
            r2 += (r01.m_high < product.m_high);
        }

        result[k] = r01.m_low;
        r01.m_low = r01.m_high;
        r01.m_high = r2;
        r2 = 0;
    }

    result[ndigits * 2 - 1] = r01.m_low;
}

static void vli_square(u64 *result, const u64 *left, unsigned int ndigits)
{
    uint128_t r01 = { 0, 0 };
    u64 r2 = 0;
    unsigned int i, k;

    for (k = 0; k < ndigits * 2 - 1; k++) {
        unsigned int min;

        if (k < ndigits)
            min = 0;
        else
            min = (k + 1) - ndigits;

        for (i = min; i <= k && i <= k - i; i++) {
            uint128_t product;

            product = mul_64_64(left[i], left[k - i]);

            if (i < k - i) {
                r2 += product.m_high >> 63;
                product.m_high = (product.m_high << 1) |
                         (product.m_low >> 63);
                product.m_low <<= 1;
            }

            r01 = add_128_128(r01, product);
            r2 += (r01.m_high < product.m_high);
        }

        result[k] = r01.m_low;
        r01.m_low = r01.m_high;
        r01.m_high = r2;
        r2 = 0;
    }

    result[ndigits * 2 - 1] = r01.m_low;
}

/* Computes result = (left + right) % mod.
 * Assumes that left < mod and right < mod, result != mod.
 */
static void vli_mod_add(u64 *result, const u64 *left, const u64 *right,
            const u64 *mod, unsigned int ndigits)
{
    u64 carry;

    carry = vli_add(result, left, right, ndigits);

    /* result > mod (result = mod + remainder), so subtract mod to
     * get remainder.
     */
    if (carry || vli_cmp(result, mod, ndigits) >= 0)
        vli_sub(result, result, mod, ndigits);
}

/* Computes result = (left - right) % mod.
 * Assumes that left < mod and right < mod, result != mod.
 */
static void vli_mod_sub(u64 *result, const u64 *left, const u64 *right,
            const u64 *mod, unsigned int ndigits)
{
    u64 borrow = vli_sub(result, left, right, ndigits);

    /* In this case, p_result == -diff == (max int) - diff.
     * Since -x % d == d - x, we can get the correct result from
     * result + mod (with overflow).
     */
    if (borrow)
        vli_add(result, result, mod, ndigits);
}

/*
 * Computes result = product % mod, where product is 2N words long.
 * Reference: Ken MacKay's micro-ecc.
 * Currently only designed to work for curve_p or curve_n.
 */
static void vli_mmod_slow(u64 *result, u64 *product, const u64 *mod,
              unsigned int ndigits)
{
    u64 mod_m[2 * ECC_MAX_DIGITS];
    u64 tmp[2 * ECC_MAX_DIGITS];
    u64 *v[2] = { tmp, product };
    u64 carry = 0;
    unsigned int i;
    /* Shift mod so its highest set bit is at the maximum position. */
    int shift = (ndigits * 2 * 64) - vli_num_bits(mod, ndigits);
    int word_shift = shift / 64;
    int bit_shift = shift % 64;

    vli_clear(mod_m, word_shift);
    if (bit_shift > 0) {
        for (i = 0; i < ndigits; ++i) {
            mod_m[word_shift + i] = (mod[i] << bit_shift) | carry;
            carry = mod[i] >> (64 - bit_shift);
        }
    } else
        vli_set(mod_m + word_shift, mod, ndigits);

    for (i = 1; shift >= 0; --shift) {
        u64 borrow = 0;
        unsigned int j;

        for (j = 0; j < ndigits * 2; ++j) {
            u64 diff = v[i][j] - mod_m[j] - borrow;

            if (diff != v[i][j])
                borrow = (diff > v[i][j]);
            v[1 - i][j] = diff;
        }
        i = !(i ^ borrow); /* Swap the index if there was no borrow */
        vli_rshift1(mod_m, ndigits);
        mod_m[ndigits - 1] |= mod_m[ndigits] << (64 - 1);
        vli_rshift1(mod_m + ndigits, ndigits);
    }
    vli_set(result, v[i], ndigits);
}

/* Computes result = (left * right) % mod.
 * Assumes that mod is big enough curve order.
 */
void vli_mod_mult_slow(u64 *result, const u64 *left, const u64 *right,
               const u64 *mod, unsigned int ndigits)
{
    u64 product[ECC_MAX_DIGITS * 2];

    vli_mult(product, left, right, ndigits);
    vli_mmod_slow(result, product, mod, ndigits);
}

/* Computes result = left^2 % curve_prime. */
static void vli_mod_square_slow(u64 *result, const u64 *left,
                const u64 *curve_prime, unsigned int ndigits)
{
    u64 product[2 * ECC_MAX_DIGITS];

    vli_square(product, left, ndigits);
    vli_mmod_slow(result, product, curve_prime, ndigits);
}

#define EVEN(vli) (!(vli[0] & 1))
/* Computes result = (1 / p_input) % mod. All VLIs are the same size.
 * See "From Euclid's GCD to Montgomery Multiplication to the Great Divide"
 * https://labs.oracle.com/techrep/2001/smli_tr-2001-95.pdf
 */
void vli_mod_inv(u64 *result, const u64 *input, const u64 *mod,
            unsigned int ndigits)
{
    u64 a[ECC_MAX_DIGITS], b[ECC_MAX_DIGITS];
    u64 u[ECC_MAX_DIGITS], v[ECC_MAX_DIGITS];
    u64 carry;
    int cmp_result;

    if (vli_is_zero(input, ndigits)) {
        vli_clear(result, ndigits);
        return;
    }

    vli_set(a, input, ndigits);
    vli_set(b, mod, ndigits);
    vli_clear(u, ndigits);
    u[0] = 1;
    vli_clear(v, ndigits);

    while ((cmp_result = vli_cmp(a, b, ndigits)) != 0) {
        carry = 0;

        if (EVEN(a)) {
            vli_rshift1(a, ndigits);

            if (!EVEN(u))
                carry = vli_add(u, u, mod, ndigits);

            vli_rshift1(u, ndigits);
            if (carry)
                u[ndigits - 1] |= 0x8000000000000000ull;
        } else if (EVEN(b)) {
            vli_rshift1(b, ndigits);

            if (!EVEN(v))
                carry = vli_add(v, v, mod, ndigits);

            vli_rshift1(v, ndigits);
            if (carry)
                v[ndigits - 1] |= 0x8000000000000000ull;
        } else if (cmp_result > 0) {
            vli_sub(a, a, b, ndigits);
            vli_rshift1(a, ndigits);

            if (vli_cmp(u, v, ndigits) < 0)
                vli_add(u, u, mod, ndigits);

            vli_sub(u, u, v, ndigits);
            if (!EVEN(u))
                carry = vli_add(u, u, mod, ndigits);

            vli_rshift1(u, ndigits);
            if (carry)
                u[ndigits - 1] |= 0x8000000000000000ull;
        } else {
            vli_sub(b, b, a, ndigits);
            vli_rshift1(b, ndigits);

            if (vli_cmp(v, u, ndigits) < 0)
                vli_add(v, v, mod, ndigits);

            vli_sub(v, v, u, ndigits);
            if (!EVEN(v))
                carry = vli_add(v, v, mod, ndigits);

            vli_rshift1(v, ndigits);
            if (carry)
                v[ndigits - 1] |= 0x8000000000000000ull;
        }
    }

    vli_set(result, u, ndigits);
}

/* ------ Point operations ------ */

/* Returns true if p_point is the point at infinity, false otherwise. */
static bool ecc_point_is_zero(const struct ecc_point *point)
{
    return (vli_is_zero(point->x, point->ndigits) &&
        vli_is_zero(point->y, point->ndigits));
}

/* Point multiplication algorithm using Montgomery's ladder with co-Z
 * coordinates. From http://eprint.iacr.org/2011/338.pdf
 */

/* Double in place */
static void ecc_point_double_jacobian(u64 *x1, u64 *y1, u64 *z1,
                      u64 *curve_prime, unsigned int ndigits)
{
    /* t1 = x, t2 = y, t3 = z */
    u64 t4[ECC_MAX_DIGITS];
    u64 t5[ECC_MAX_DIGITS];

    if (vli_is_zero(z1, ndigits))
        return;

    /* t4 = y1^2 */
    vli_mod_square_slow(t4, y1, curve_prime, ndigits);
    /* t5 = x1*y1^2 = A */
    vli_mod_mult_slow(t5, x1, t4, curve_prime, ndigits);
    /* t4 = y1^4 */
    vli_mod_square_slow(t4, t4, curve_prime, ndigits);
    /* t2 = y1*z1 = z3 */
    vli_mod_mult_slow(y1, y1, z1, curve_prime, ndigits);
    /* t3 = z1^2 */
    vli_mod_square_slow(z1, z1, curve_prime, ndigits);

    /* t1 = x1 + z1^2 */
    vli_mod_add(x1, x1, z1, curve_prime, ndigits);
    /* t3 = 2*z1^2 */
    vli_mod_add(z1, z1, z1, curve_prime, ndigits);
    /* t3 = x1 - z1^2 */
    vli_mod_sub(z1, x1, z1, curve_prime, ndigits);
    /* t1 = x1^2 - z1^4 */
    vli_mod_mult_slow(x1, x1, z1, curve_prime, ndigits);

    /* t3 = 2*(x1^2 - z1^4) */
    vli_mod_add(z1, x1, x1, curve_prime, ndigits);
    /* t1 = 3*(x1^2 - z1^4) */
    vli_mod_add(x1, x1, z1, curve_prime, ndigits);
    if (vli_test_bit(x1, 0)) {
        u64 carry = vli_add(x1, x1, curve_prime, ndigits);

        vli_rshift1(x1, ndigits);
        x1[ndigits - 1] |= carry << 63;
    } else {
        vli_rshift1(x1, ndigits);
    }
    /* t1 = 3/2*(x1^2 - z1^4) = B */

    /* t3 = B^2 */
    vli_mod_square_slow(z1, x1, curve_prime, ndigits);
    /* t3 = B^2 - A */
    vli_mod_sub(z1, z1, t5, curve_prime, ndigits);
    /* t3 = B^2 - 2A = x3 */
    vli_mod_sub(z1, z1, t5, curve_prime, ndigits);
    /* t5 = A - x3 */
    vli_mod_sub(t5, t5, z1, curve_prime, ndigits);
    /* t1 = B * (A - x3) */
    vli_mod_mult_slow(x1, x1, t5, curve_prime, ndigits);
    /* t4 = B * (A - x3) - y1^4 = y3 */
    vli_mod_sub(t4, x1, t4, curve_prime, ndigits);

    vli_set(x1, z1, ndigits);
    vli_set(z1, y1, ndigits);
    vli_set(y1, t4, ndigits);
}

static void ecc_point_double_jacobian_secp256k1(u64 * X1, u64 * Y1,u64 * Z1,
                      u64 *curve_prime, unsigned int ndigits)
{

    /* t1 = X, t2 = Y, t3 = Z */
    u64 t4[ECC_MAX_DIGITS];
    u64 t5[ECC_MAX_DIGITS];

    if (vli_is_zero(Z1, ndigits)) {
        return;
    }

    /* t5 = y1^2 */
    vli_mod_square_slow(t5, Y1, curve_prime, ndigits);
    /* t4 = x1*y1^2 = A */
    vli_mod_mult_slow(t4, X1, t5, curve_prime, ndigits);
    /* t1 = x1^2 */
    vli_mod_square_slow(X1, X1, curve_prime, ndigits);
    /* t5 = y1^4 */
    vli_mod_square_slow(t5, t5, curve_prime, ndigits);
    /* t3 = y1*z1 = z3 */
    vli_mod_mult_slow(Z1, Y1, Z1, curve_prime, ndigits);

    /* t2 = 2*x1^2 */
    vli_mod_add(Y1, X1, X1, curve_prime, ndigits);
    /* t2 = 3*x1^2 */
    vli_mod_add(Y1, Y1, X1, curve_prime, ndigits);
    if (vli_test_bit(Y1, 0)) {
        u64 carry = vli_add(Y1, Y1, curve_prime, ndigits);
        vli_rshift1(Y1, ndigits);
        Y1[ndigits - 1] |= carry << 63;
    } else {
        vli_rshift1(Y1, ndigits);
    }
    /* t2 = 3/2*(x1^2) = B */

    /* t1 = B^2 */
    vli_mod_square_slow(X1, Y1, curve_prime, ndigits);
    /* t1 = B^2 - A */
    vli_mod_sub(X1, X1, t4, curve_prime, ndigits);
    /* t1 = B^2 - 2A = x3 */
    vli_mod_sub(X1, X1, t4, curve_prime, ndigits);

    /* t4 = A - x3 */
    vli_mod_sub(t4, t4, X1, curve_prime, ndigits);
    /* t2 = B * (A - x3) */
    vli_mod_mult_slow(Y1, Y1, t4, curve_prime, ndigits);
    /* t2 = B * (A - x3) - y1^4 = y3 */
    vli_mod_sub(Y1, Y1, t5, curve_prime, ndigits);
}

/* Modify (x1, y1) => (x1 * z^2, y1 * z^3) */
static void apply_z(u64 *x1, u64 *y1, u64 *z, u64 *curve_prime,
            unsigned int ndigits)
{
    u64 t1[ECC_MAX_DIGITS];

    vli_mod_square_slow(t1, z, curve_prime, ndigits);    /* z^2 */
    vli_mod_mult_slow(x1, x1, t1, curve_prime, ndigits); /* x1 * z^2 */
    vli_mod_mult_slow(t1, t1, z, curve_prime, ndigits);  /* z^3 */
    vli_mod_mult_slow(y1, y1, t1, curve_prime, ndigits); /* y1 * z^3 */
}

/* P = (x1, y1) => 2P, (x2, y2) => P' */
static void xycz_initial_double(u64 *x1, u64 *y1, u64 *x2, u64 *y2,
                u64 *p_initial_z, u64 *curve_prime,
                unsigned int ndigits, const struct ecc_curve *curve)
{
    u64 z[ECC_MAX_DIGITS];

    vli_set(x2, x1, ndigits);
    vli_set(y2, y1, ndigits);

    vli_clear(z, ndigits);
    z[0] = 1;

    if (p_initial_z)
        vli_set(z, p_initial_z, ndigits);

    apply_z(x1, y1, z, curve_prime, ndigits);

    if(curve == &sepc256k1)
        ecc_point_double_jacobian_secp256k1(x1, y1, z, curve_prime, ndigits);
    else
        ecc_point_double_jacobian(x1, y1, z, curve_prime, ndigits);

    apply_z(x2, y2, z, curve_prime, ndigits);
}

/* Input P = (x1, y1, Z), Q = (x2, y2, Z)
 * Output P' = (x1', y1', Z3), P + Q = (x3, y3, Z3)
 * or P => P', Q => P + Q
 */
static void xycz_add(u64 *x1, u64 *y1, u64 *x2, u64 *y2, u64 *curve_prime,
             unsigned int ndigits)
{
    /* t1 = X1, t2 = Y1, t3 = X2, t4 = Y2 */
    u64 t5[ECC_MAX_DIGITS];

    /* t5 = x2 - x1 */
    vli_mod_sub(t5, x2, x1, curve_prime, ndigits);
    /* t5 = (x2 - x1)^2 = A */
    vli_mod_square_slow(t5, t5, curve_prime, ndigits);
    /* t1 = x1*A = B */
    vli_mod_mult_slow(x1, x1, t5, curve_prime, ndigits);
    /* t3 = x2*A = C */
    vli_mod_mult_slow(x2, x2, t5, curve_prime, ndigits);
    /* t4 = y2 - y1 */
    vli_mod_sub(y2, y2, y1, curve_prime, ndigits);
    /* t5 = (y2 - y1)^2 = D */
    vli_mod_square_slow(t5, y2, curve_prime, ndigits);

    /* t5 = D - B */
    vli_mod_sub(t5, t5, x1, curve_prime, ndigits);
    /* t5 = D - B - C = x3 */
    vli_mod_sub(t5, t5, x2, curve_prime, ndigits);
    /* t3 = C - B */
    vli_mod_sub(x2, x2, x1, curve_prime, ndigits);
    /* t2 = y1*(C - B) */
    vli_mod_mult_slow(y1, y1, x2, curve_prime, ndigits);
    /* t3 = B - x3 */
    vli_mod_sub(x2, x1, t5, curve_prime, ndigits);
    /* t4 = (y2 - y1)*(B - x3) */
    vli_mod_mult_slow(y2, y2, x2, curve_prime, ndigits);
    /* t4 = y3 */
    vli_mod_sub(y2, y2, y1, curve_prime, ndigits);

    vli_set(x2, t5, ndigits);
}

/* Input P = (x1, y1, Z), Q = (x2, y2, Z)
 * Output P + Q = (x3, y3, Z3), P - Q = (x3', y3', Z3)
 * or P => P - Q, Q => P + Q
 */
static void xycz_add_c(u64 *x1, u64 *y1, u64 *x2, u64 *y2, u64 *curve_prime,
               unsigned int ndigits)
{
    /* t1 = X1, t2 = Y1, t3 = X2, t4 = Y2 */
    u64 t5[ECC_MAX_DIGITS];
    u64 t6[ECC_MAX_DIGITS];
    u64 t7[ECC_MAX_DIGITS];

    /* t5 = x2 - x1 */
    vli_mod_sub(t5, x2, x1, curve_prime, ndigits);
    /* t5 = (x2 - x1)^2 = A */
    vli_mod_square_slow(t5, t5, curve_prime, ndigits);
    /* t1 = x1*A = B */
    vli_mod_mult_slow(x1, x1, t5, curve_prime, ndigits);
    /* t3 = x2*A = C */
    vli_mod_mult_slow(x2, x2, t5, curve_prime, ndigits);
    /* t4 = y2 + y1 */
    vli_mod_add(t5, y2, y1, curve_prime, ndigits);
    /* t4 = y2 - y1 */
    vli_mod_sub(y2, y2, y1, curve_prime, ndigits);

    /* t6 = C - B */
    vli_mod_sub(t6, x2, x1, curve_prime, ndigits);
    /* t2 = y1 * (C - B) */
    vli_mod_mult_slow(y1, y1, t6, curve_prime, ndigits);
    /* t6 = B + C */
    vli_mod_add(t6, x1, x2, curve_prime, ndigits);
    /* t3 = (y2 - y1)^2 */
    vli_mod_square_slow(x2, y2, curve_prime, ndigits);
    /* t3 = x3 */
    vli_mod_sub(x2, x2, t6, curve_prime, ndigits);

    /* t7 = B - x3 */
    vli_mod_sub(t7, x1, x2, curve_prime, ndigits);
    /* t4 = (y2 - y1)*(B - x3) */
    vli_mod_mult_slow(y2, y2, t7, curve_prime, ndigits);
    /* t4 = y3 */
    vli_mod_sub(y2, y2, y1, curve_prime, ndigits);

    /* t7 = (y2 + y1)^2 = F */
    vli_mod_square_slow(t7, t5, curve_prime, ndigits);
    /* t7 = x3' */
    vli_mod_sub(t7, t7, t6, curve_prime, ndigits);
    /* t6 = x3' - B */
    vli_mod_sub(t6, t7, x1, curve_prime, ndigits);
    /* t6 = (y2 + y1)*(x3' - B) */
    vli_mod_mult_slow(t6, t6, t5, curve_prime, ndigits);
    /* t2 = y3' */
    vli_mod_sub(y1, t6, y1, curve_prime, ndigits);

    vli_set(x1, t7, ndigits);
}

static void ecc_point_mult(struct ecc_point *result,
               const struct ecc_point *point, const u64 *scalar,
               u64 *initial_z, const struct ecc_curve *curve,
               unsigned int ndigits)
{
    /* R0 and R1 */
    u64 rx[2][ECC_MAX_DIGITS];
    u64 ry[2][ECC_MAX_DIGITS];
    u64 z[ECC_MAX_DIGITS];
    u64 sk[2][ECC_MAX_DIGITS];
    u64 *curve_prime = curve->p;
    int i, nb;
    int num_bits;
    int carry;

    carry = vli_add(sk[0], scalar, curve->n, ndigits);
    vli_add(sk[1], sk[0], curve->n, ndigits);
    scalar = sk[!carry];
    num_bits = sizeof(u64) * ndigits * 8 + 1;

    vli_set(rx[1], point->x, ndigits);
    vli_set(ry[1], point->y, ndigits);

    xycz_initial_double(rx[1], ry[1], rx[0], ry[0], initial_z, curve_prime,
                ndigits, curve);

    for (i = num_bits - 2; i > 0; i--) {
        nb = !vli_test_bit(scalar, i);
        xycz_add_c(rx[1 - nb], ry[1 - nb], rx[nb], ry[nb], curve_prime,
               ndigits);
        xycz_add(rx[nb], ry[nb], rx[1 - nb], ry[1 - nb], curve_prime,
             ndigits);
    }

    nb = !vli_test_bit(scalar, 0);
    xycz_add_c(rx[1 - nb], ry[1 - nb], rx[nb], ry[nb], curve_prime,
           ndigits);

    /* Find final 1/Z value. */
    /* X1 - X0 */
    vli_mod_sub(z, rx[1], rx[0], curve_prime, ndigits);
    /* Yb * (X1 - X0) */
    vli_mod_mult_slow(z, z, ry[1 - nb], curve_prime, ndigits);
    /* xP * Yb * (X1 - X0) */
    vli_mod_mult_slow(z, z, point->x, curve_prime, ndigits);

    /* 1 / (xP * Yb * (X1 - X0)) */
    vli_mod_inv(z, z, curve_prime, point->ndigits);

    /* yP / (xP * Yb * (X1 - X0)) */
    vli_mod_mult_slow(z, z, point->y, curve_prime, ndigits);
    /* Xb * yP / (xP * Yb * (X1 - X0)) */
    vli_mod_mult_slow(z, z, rx[1 - nb], curve_prime, ndigits);
    /* End 1/Z calculation */

    xycz_add(rx[nb], ry[nb], rx[1 - nb], ry[1 - nb], curve_prime, ndigits);

    apply_z(rx[0], ry[0], z, curve_prime, ndigits);

    vli_set(result->x, rx[0], ndigits);
    vli_set(result->y, ry[0], ndigits);
}

/* Computes R = P + Q mod p */
static void ecc_point_add(const struct ecc_point *result,
           const struct ecc_point *p, const struct ecc_point *q,
           const struct ecc_curve *curve)
{
    u64 z[ECC_MAX_DIGITS];
    u64 px[ECC_MAX_DIGITS];
    u64 py[ECC_MAX_DIGITS];
    unsigned int ndigits = curve->g.ndigits;

    vli_set(result->x, q->x, ndigits);
    vli_set(result->y, q->y, ndigits);
    vli_mod_sub(z, result->x, p->x, curve->p, ndigits);
    vli_set(px, p->x, ndigits);
    vli_set(py, p->y, ndigits);
    xycz_add(px, py, result->x, result->y, curve->p, ndigits);
    vli_mod_inv(z, z, curve->p, ndigits);
    apply_z(result->x, result->y, z, curve->p, ndigits);
}

/* Computes R = u1P + u2Q mod p using Shamir's trick.
 * Based on: Kenneth MacKay's micro-ecc (2014).
 */
void ecc_point_mult_shamir(const struct ecc_point *result,
               const u64 *u1, const struct ecc_point *p,
               const u64 *u2, const struct ecc_point *q,
               const struct ecc_curve *curve)
{
    u64 z[ECC_MAX_DIGITS];
    u64 sump[2][ECC_MAX_DIGITS];
    u64 *rx = result->x;
    u64 *ry = result->y;
    unsigned int ndigits = curve->g.ndigits;
    unsigned int num_bits;
    struct ecc_point sum = ECC_POINT_INIT(sump[0], sump[1], ndigits);
    const struct ecc_point *points[4];
    const struct ecc_point *point;
    unsigned int idx;
    int i;

    ecc_point_add(&sum, p, q, curve);
    points[0] = NULL;
    points[1] = p;
    points[2] = q;
    points[3] = &sum;

    num_bits = max(vli_num_bits(u1, ndigits),
               vli_num_bits(u2, ndigits));
    i = num_bits - 1;
    idx = (!!vli_test_bit(u1, i)) | ((!!vli_test_bit(u2, i)) << 1);
    point = points[idx];

    vli_set(rx, point->x, ndigits);
    vli_set(ry, point->y, ndigits);
    vli_clear(z + 1, ndigits - 1);
    z[0] = 1;

    for (--i; i >= 0; i--) {

        if(curve == &sepc256k1)
            ecc_point_double_jacobian_secp256k1(rx, ry, z, curve->p, ndigits);
        else
            ecc_point_double_jacobian(rx, ry, z, curve->p, ndigits);

        idx = (!!vli_test_bit(u1, i)) | ((!!vli_test_bit(u2, i)) << 1);
        point = points[idx];
        if (point) {
            u64 tx[ECC_MAX_DIGITS];
            u64 ty[ECC_MAX_DIGITS];
            u64 tz[ECC_MAX_DIGITS];

            vli_set(tx, point->x, ndigits);
            vli_set(ty, point->y, ndigits);
            apply_z(tx, ty, z, curve->p, ndigits);
            vli_mod_sub(tz, rx, tx, curve->p, ndigits);
            xycz_add(tx, ty, rx, ry, curve->p, ndigits);
            vli_mod_mult_slow(z, z, tz, curve->p, ndigits);
        }
    }
    vli_mod_inv(z, z, curve->p, ndigits);
    apply_z(rx, ry, z, curve->p, ndigits);
}

/* littlendian return 1, bigendian return 0 */
static inline int check_cpu_type(void)
{
    union w {
        int a;
        char b;
    } c;
    c.a = 1;
    if (c.b == 1)
        return 1;
    else
        return 0;
}

void ecc_swap_digits(const u64 *in, u64 *out,
                   unsigned int ndigits)
{
    unsigned int i;
    for (i = 0; i < ndigits; i++){
        out[i] = in[ndigits - 1 - i];
        if(check_cpu_type()){
            u64 tmp = out[i];
            ecc_big_u8_to_little_u64((u8*)&tmp, &(out[i]), 8);

        }
    }
}

void ecc_big_u8_to_little_u64(const u8 *in, u64 *out, unsigned int len)
{
    unsigned int i, j;
    u8* dest = (u8*)out;
    for(i = 0; i < len; i = i + 8){
        for(j = i; j < 8 + i; j++){
            dest[j] = in[7 + i - j];
        }
    }
}

static int __ecc_is_key_valid(const struct ecc_curve *curve,
                  const u64 *private_key, unsigned int ndigits)
{
    u64 one[ECC_MAX_DIGITS] = { 1, };
    u64 res[ECC_MAX_DIGITS];

    if (!private_key)
        return -EINVAL;

    if (curve->g.ndigits != ndigits)
        return -EINVAL;

    /* Make sure the private key is in the range [2, n-3]. */
    if (vli_cmp(one, private_key, ndigits) != -1)
        return -EINVAL;
    vli_sub(res, curve->n, one, ndigits);
    vli_sub(res, res, one, ndigits);
    if (vli_cmp(res, private_key, ndigits) != 1)
        return -EINVAL;

    return 0;
}

int ecc_is_key_valid(unsigned int curve_id, unsigned int ndigits,
             const u64 *private_key, unsigned int private_key_len)
{
    unsigned int nbytes;
    const struct ecc_curve *curve = ecc_get_curve(curve_id);

    nbytes = ndigits << ECC_DIGITS_TO_BYTES_SHIFT;

    if (private_key_len != nbytes)
        return -EINVAL;

    return __ecc_is_key_valid(curve, private_key, ndigits);
}

/*
 * ECC private keys are generated using the method of extra random bits,
 * equivalent to that described in FIPS 186-4, Appendix B.4.1.
 *
 * d = (c mod(n–1)) + 1    where c is a string of random bits, 64 bits longer
 *                         than requested
 * 0 <= c mod(n-1) <= n-2  and implies that
 * 1 <= d <= n-1
 *
 * This method generates a private key uniformly distributed in the range
 * [1, n-1].
 */
int ecc_gen_privkey(unsigned int curve_id, unsigned int ndigits, u64 *privkey)
{
    const struct ecc_curve *curve = ecc_get_curve(curve_id);
    u64 priv[ECC_MAX_DIGITS];
    unsigned int nbytes = ndigits << ECC_DIGITS_TO_BYTES_SHIFT;
    unsigned int nbits = vli_num_bits(curve->n, ndigits);
    int err;

    /* Check that N is included in Table 1 of FIPS 186-4, section 6.1.1 */
    if (nbits < 160 || ndigits > ARRAY_SIZE(priv))
        return -EINVAL;

    /*
     * FIPS 186-4 recommends that the private key should be obtained from a
     * RBG with a security strength equal to or greater than the security
     * strength associated with N.
     *
     * The maximum security strength identified by NIST SP800-57pt1r4 for
     * ECC is 256 (N >= 512).
     *
     * This condition is met by the default RNG because it selects a favored
     * DRBG with a security strength of 256.
     */

    err = get_random_number(priv, nbytes);
    if(err)
        return -EINVAL;

    /* Make sure the private key is in the valid range. */
    if (__ecc_is_key_valid(curve, priv, ndigits))
        return -EINVAL;

    ecc_swap_digits(priv, privkey, ndigits);

    return 0;
}

int ecc_make_pub_key(unsigned int curve_id, unsigned int ndigits,
             const u64 *private_key, u64 *public_key)
{
    int ret = 0;
    struct ecc_point *pk;
    u64 priv[ECC_MAX_DIGITS];
    const struct ecc_curve *curve = ecc_get_curve(curve_id);

    if (!private_key || !curve || ndigits > ARRAY_SIZE(priv)) {
        ret = -EINVAL;
        goto out;
    }

    ecc_swap_digits(private_key, priv, ndigits);

    pk = ecc_alloc_point(ndigits);
    if (!pk) {
        ret = -ENOMEM;
        goto out;
    }

    ecc_point_mult(pk, &curve->g, priv, NULL, curve, ndigits);
    if (ecc_point_is_zero(pk)) {
        ret = -EAGAIN;
        goto err_free_point;
    }

    ecc_swap_digits(pk->x, public_key, ndigits);
    ecc_swap_digits(pk->y, &public_key[ndigits], ndigits);

err_free_point:
    ecc_free_point(pk);
out:
    return ret;
}

/* SP800-56A section 5.6.2.3.4 partial verification: ephemeral keys only */
int ecc_is_pubkey_valid_partial(const struct ecc_curve *curve,
                struct ecc_point *pk)
{
    u64 yy[ECC_MAX_DIGITS], xxx[ECC_MAX_DIGITS], w[ECC_MAX_DIGITS];

    if (pk->ndigits != curve->g.ndigits)
        return -EINVAL;

    /* Check 1: Verify key is not the zero point. */
    if (ecc_point_is_zero(pk))
        return -EINVAL;

    /* Check 2: Verify key is in the range [1, p-1]. */
    if (vli_cmp(curve->p, pk->x, pk->ndigits) != 1)
        return -EINVAL;
    if (vli_cmp(curve->p, pk->y, pk->ndigits) != 1)
        return -EINVAL;

    /* Check 3: Verify that y^2 == (x^3 + a·x + b) mod p */
    vli_mod_square_slow(yy, pk->y, curve->p, pk->ndigits); /* y^2 */
    vli_mod_square_slow(xxx, pk->x, curve->p, pk->ndigits); /* x^2 */
    vli_mod_mult_slow(xxx, xxx, pk->x, curve->p, pk->ndigits); /* x^3 */
    vli_mod_mult_slow(w, curve->a, pk->x, curve->p, pk->ndigits); /* a·x */
    vli_mod_add(w, w, curve->b, curve->p, pk->ndigits); /* a·x + b */
    vli_mod_add(w, w, xxx, curve->p, pk->ndigits); /* x^3 + a·x + b */
    if (vli_cmp(yy, w, pk->ndigits) != 0) /* Equation */
        return -EINVAL;

    return 0;
}

int ecdsa_sign(const unsigned int curve_id, const u64 *private_key, const u64 *p_hash, u64 *p_signature)
{
    const struct ecc_curve *curve = ecc_get_curve(curve_id);
    if(!curve)
        return -EINVAL;
    unsigned int ndigits = curve->g.ndigits;
    unsigned int nbytes = ndigits << ECC_DIGITS_TO_BYTES_SHIFT;
    struct ecc_point *x1y1 = NULL;
    u64 z[ndigits], d[ndigits];
    u64 k[ndigits], k_inv[ndigits];
    u64 r[ndigits], s[ndigits];
    u64 dr[ndigits], zdr[ndigits];
    int err;

    if(!private_key || !p_hash || !p_signature)
        return -EINVAL;

    /* z */
    ecc_swap_digits(p_hash, z, ndigits);

    /* d */
    ecc_swap_digits(private_key, d, ndigits);

    err = get_random_number(k, nbytes);
    if(err)
        return -EINVAL;

    if(vli_cmp(curve->n, k, ndigits) != 1)
    {
        vli_sub(k, k, curve->n, ndigits);
    }

    x1y1 = ecc_alloc_point(ndigits);
    if (!x1y1)
        return -ENOMEM;

    /* (x1, y1) = kG */
    ecc_point_mult(x1y1, &curve->g, k, NULL, curve, ndigits);
    /* r = x1 mod n */
    if (vli_cmp(x1y1->x, curve->n, ndigits) == 1)
        vli_sub(x1y1->x, x1y1->x, curve->n, ndigits);
    vli_set(r, x1y1->x, ndigits);

    /* k^-1 */
    vli_mod_inv(k_inv, k, curve->n, ndigits);

    /* d . r mod n */
    vli_mod_mult_slow(dr, d, r, curve->n, ndigits);

    /* z + dr mod n */
    vli_mod_add(zdr, z, dr, curve->n, ndigits);

    /* s = k^-1(z + dr) mod n */
    vli_mod_mult_slow(s, k_inv, zdr, curve->n, ndigits);


    ecc_swap_digits(r, p_signature, ndigits);
    ecc_swap_digits(s, &p_signature[ndigits], ndigits);

    ecc_free_point(x1y1);
    return 0;
}


int ecdsa_verify(const unsigned int curve_id, const u64 *public_key, const u64 *p_hash, const u64 *p_signature)
{
    u64 r[ECC_MAX_DIGITS]; /* witness (r) */
    u64 s[ECC_MAX_DIGITS]; /* second part of sig (s) */
    u64 u1[ECC_MAX_DIGITS];
    u64 u2[ECC_MAX_DIGITS];

    u64 pubx[ECC_MAX_DIGITS];
    u64 puby[ECC_MAX_DIGITS];
    u64 z[ECC_MAX_DIGITS];

    u64 rx[ECC_MAX_DIGITS];
    u64 ry[ECC_MAX_DIGITS];

    const struct ecc_curve *curve = ecc_get_curve(curve_id);
    if(!curve)
        return -EINVAL;

    unsigned int ndigits = curve->g.ndigits;
    struct ecc_point R = ECC_POINT_INIT(rx, ry, ndigits);
    struct ecc_point pub_k = ECC_POINT_INIT(pubx, puby, ndigits);

    if(!public_key || !p_hash || !p_signature)
        return -EINVAL;

    /* init public key */
    ecc_swap_digits(public_key, pubx, ndigits);
    ecc_swap_digits(&public_key[ndigits], puby, ndigits);


    /* init signature */
    ecc_swap_digits(p_signature, r, ndigits);
    ecc_swap_digits(&p_signature[ndigits], s, ndigits);

    /* Calculate u1 and u2. */
    vli_mod_inv(z, s, curve->n, ndigits); /* Z = s^-1 */

    ecc_swap_digits(p_hash, u1, ndigits);
    vli_mod_mult_slow(u1, u1, z, curve->n, ndigits); /* u1 = e/s */
    vli_mod_mult_slow(u2, r, z, curve->n, ndigits); /* u2 = r/s */

    /* Use Shamir's trick to calculate R = u1*G + u2*Q */
    ecc_point_mult_shamir(&R, u1, &curve->g, u2, &pub_k, curve);

    if (vli_cmp(R.x, curve->n, ndigits) == 1)
        vli_sub(R.x, R.x, curve->n, ndigits);

    /* if R == r signature is valid */
    if (!vli_cmp(R.x, r, ndigits))
        return 0;
    else
        return -EINVAL;

}

