use crate::U256;

#[inline]
pub fn mont_reduction<const K0: u64>(y: &U256, prime: &U256) -> U256 {
    use crate::u256::u64_smul;
    let mut r: U256 = y.clone();
    let mut s: U256;
    let mut uc: u64;
    let u = if K0 == 1 {
        r.as_slice()[0]
    } else {
        u64_smul(r.as_slice()[0], K0)
    };
    (s, uc) = prime.umult(u);
    uc += r.add_to(&s) as u64;
    r.rshift1w(uc);
    let u = if K0 == 1 {
        r.as_slice()[0]
    } else {
        u64_smul(r.as_slice()[0], K0)
    };
    (s, uc) = prime.umult(u);
    uc += r.add_to(&s) as u64;
    r.rshift1w(uc);
    let u = if K0 == 1 {
        r.as_slice()[0]
    } else {
        u64_smul(r.as_slice()[0], K0)
    };
    (s, uc) = prime.umult(u);
    uc += r.add_to(&s) as u64;
    r.rshift1w(uc);
    let u = if K0 == 1 {
        r.as_slice()[0]
    } else {
        u64_smul(r.as_slice()[0], K0)
    };
    (s, uc) = prime.umult(u);
    uc += r.add_to(&s) as u64;
    r.rshift1w(uc);
    r.modp(prime, 0)
}

#[inline]
pub fn mont_mult<const K0: u64>(x: &U256, y: &U256, p: &U256) -> U256 {
    use crate::u256::{u64_addc, u64_smul};
    // r,rc forms [u64;5],  s,sc forms [u64;5]
    let mut r: U256;
    let mut s: U256;
    let mut rc: u64;
    let mut sc: u64;
    let mut cc: u64;
    let ys = y.as_slice();
    // step 1
    (r, rc) = x.umult(ys[0]);
    let mut u = if K0 == 1 {
        r.as_slice()[0]
    } else {
        u64_smul(r.as_slice()[0], K0)
    };
    (s, sc) = p.umult(u);
    cc = r.add_to(&s) as u64;
    (rc, u) = u64_addc(rc, sc, cc);
    r.rshift1w(rc);
    rc = u;
    // step 2
    (s, sc) = x.umult(ys[1]);
    cc = r.add_to(&s) as u64;
    (rc, _) = u64_addc(rc, sc, cc);
    u = if K0 == 1 {
        r.as_slice()[0]
    } else {
        u64_smul(r.as_slice()[0], K0)
    };
    (s, sc) = p.umult(u);
    cc = r.add_to(&s) as u64;
    (rc, u) = u64_addc(rc, sc, cc);
    r.rshift1w(rc);
    rc = u;
    // step 3
    (s, sc) = x.umult(ys[2]);
    cc = r.add_to(&s) as u64;
    (rc, _) = u64_addc(rc, sc, cc);
    u = if K0 == 1 {
        r.as_slice()[0]
    } else {
        u64_smul(r.as_slice()[0], K0)
    };
    (s, sc) = p.umult(u);
    cc = r.add_to(&s) as u64;
    (rc, u) = u64_addc(rc, sc, cc);
    r.rshift1w(rc);
    rc = u;
    // step 4
    (s, sc) = x.umult(ys[3]);
    cc = r.add_to(&s) as u64;
    (rc, _) = u64_addc(rc, sc, cc);
    u = if K0 == 1 {
        r.as_slice()[0]
    } else {
        u64_smul(r.as_slice()[0], K0)
    };
    (s, sc) = p.umult(u);
    cc = r.add_to(&s) as u64;
    (rc, u) = u64_addc(rc, sc, cc);
    r.rshift1w(rc);
    r.modp(p, u)
}

/// trait Montgomery for common montgomery algos
/// the only method MUST impl is prime
pub trait Montgomery<const K0: u64 = 1> {
    fn prime(&self) -> &U256;
    fn rr(&self) -> U256 {
        let rr = calcRR(self.prime());
        rr
    }
    fn mont_one(&self) -> U256 {
        let mut res: U256 = Default::default();
        res.sub_from(self.prime());
        res
    }
    fn to_montgomery(&self, x: &U256) -> U256 {
        self.mult(x, &self.rr())
    }
    fn reduction(&self, y: &U256) -> U256 {
        mont_reduction::<K0>(y, self.prime())
    }
    fn mult(&self, x: &U256, y: &U256) -> U256 {
        mont_mult::<K0>(x, y, self.prime())
    }
    fn sqr(&self, x: &U256, n: u32) -> U256 {
        let mut res = self.mult(x, x);
        for _i in 1..n {
            res = self.mult(&res, &res);
        }
        res
    }
    /// exp calc xp^y, xp in form of montgomery
    fn exp(&self, xp: &U256, y: &U256) -> Option<U256> {
        struct BitsTable(u32, u32, u32);
        const MAPTBL: [BitsTable; 15] = [
            BitsTable(4, 0, 0), // _1
            BitsTable(3, 1, 0), // _10
            BitsTable(4, 0, 1), // _11
            BitsTable(2, 2, 0), // _100
            BitsTable(4, 0, 2), // _101
            BitsTable(3, 1, 1), // _110
            BitsTable(4, 0, 3), // _111
            BitsTable(1, 3, 0), // _1000
            BitsTable(1, 2, 0), // _1001, proc _100
            BitsTable(3, 0, 2), // _1010, proc _101
            BitsTable(3, 0, 2), // _1011, proc _101
            BitsTable(2, 2, 1), // _1100
            BitsTable(2, 1, 1), // _1101, proc _110
            BitsTable(3, 1, 3), // _1110
            BitsTable(4, 0, 4), // _1111
        ];
        let bn_tbl0: U256; // _1
        let bn_tbl1: U256; // _11
        let bn_tbl2: U256; // _101
        let bn_tbl3: U256; // _111
        let bn_tbl4: U256; // _1111
                           // precompute
        bn_tbl0 = xp.clone();
        let tmp = self.sqr(xp, 1); // _10
        bn_tbl1 = self.mult(&tmp, xp); // _11
        bn_tbl2 = self.mult(&tmp, &bn_tbl1); // _101
        bn_tbl3 = self.mult(&tmp, &bn_tbl2); // _111
        let tmp = self.mult(&bn_tbl2, &bn_tbl2); // _1010
        bn_tbl4 = self.mult(&tmp, &bn_tbl2); //_1111
        let bn_tbl = [bn_tbl0, bn_tbl1, bn_tbl2, bn_tbl3, bn_tbl4];
        let mut idx = y.num_bits();
        if idx < 4 {
            return None;
        }
        let bits = y.get_bits::<4>(idx as i32 - 4);
        let tb = &MAPTBL[(bits - 1) as usize & 0xf];
        let mut res = bn_tbl[tb.2 as usize].clone();
        if tb.1 != 0 {
            res = self.sqr(&res, tb.1);
        }
        idx -= tb.0 + tb.1;
        while idx >= 4 {
            let bits = y.get_bits::<4>(idx as i32 - 4);
            if bits == 0 {
                res = self.sqr(&res, 4);
                idx -= 4;
                continue;
            }
            let tb = &MAPTBL[bits as usize - 1];
            res = self.sqr(&res, tb.0);
            res = self.mult(&res, &bn_tbl[tb.2 as usize]);
            if tb.1 != 0 {
                res = self.sqr(&res, tb.1);
            }
            idx -= tb.0 + tb.1;
        }
        // proc remains
        if idx > 0 {
            let bits = y.get_bits::<4>(0);
            let bits = bits & ((1 << idx) - 1);
            if bits == 0 {
                res = self.sqr(&res, idx);
            } else {
                let tb = &MAPTBL[bits as usize - 1];
                if idx > tb.1 {
                    res = self.sqr(&res, idx - tb.1);
                } else if idx < tb.1 {
                    panic!("no way to here");
                }
                res = self.mult(&res, &bn_tbl[tb.2 as usize]);
                if tb.1 != 0 {
                    res = self.sqr(&res, tb.1);
                }
            }
        }
        Some(res)
    }
    /// exp_quadp calc xp^y, xp in form of montgomery
    #[inline]
    fn exp_quadp(&self, xp: &U256) -> Option<U256> {
        let mut quad = self.prime().clone();
        quad.rshift1(0);
        quad.rshift1(0);
        self.exp(xp, &quad)
    }
    /// sqrt cals square root of xp in GF(p), xp in form of montgomery
    fn sqrt(&self, xp: &U256) -> Option<U256> {
        // only for prime specially as 4 * k + 3
        #[cfg(test)]
        if (self.prime().as_slice()[0] & 3) != 3 {
            return None;
        }
        if let Some(a1) = self.exp_quadp(xp) {
            let res = self.mult(&a1, xp);
            let a0 = self.mult(&res, &a1);
            if a0 == self.mont_one() {
                Some(res)
            } else {
                None
            }
        } else {
            None
        }
    }
    /// inverse calculates the inverse of k in GF(P) using Fermat's method.
    /// This has better constant-time properties than Euclid's method
    /// (implemented in U256::mod_inverse) although isn't strictly
    /// constant-time so it's not perfect.
    fn inverse(&self, x: &U256) -> U256 {
        let mut p_minus2 = self.prime().clone();
        p_minus2.usub(2);
        let xp = self.to_montgomery(x);
        if let Some(res) = self.exp(&xp, &p_minus2) {
            self.reduction(&res)
        } else {
            // panic
            panic!("No Way to here");
        }
    }
}

#[non_exhaustive]
pub struct Mont<const K0: u64 = 1> {
    p: U256,
    rr: U256,
}

impl<const K0: u64> Montgomery<K0> for Mont<{ K0 }> {
    fn prime(&self) -> &U256 {
        &self.p
    }
    fn rr(&self) -> U256 {
        self.rr.clone()
    }
}

impl<const K0: u64> Mont<K0> {
    pub fn new(p: &U256) -> Mont<K0> {
        #[cfg(test)]
        assert_eq!(calcK0(p), K0);
        let rr = calcRR(p);
        Mont { p: p.clone(), rr }
    }
}

/// special for sm2p, maybe optimized using asm
pub struct MontSM2p(U256);
use crate::ecc_const::{SM2_P, SM2_P_RR};

impl Montgomery for MontSM2p {
    fn prime(&self) -> &U256 {
        &SM2_P
    }
    fn rr(&self) -> U256 {
        SM2_P_RR.clone()
    }
    fn mont_one(&self) -> U256 {
        self.0.clone()
    }
    #[cfg(feature = "asm")]
    fn to_montgomery(&self, x: &U256) -> U256 {
        sm2p_mult_asm(x, &SM2_P_RR)
    }
    #[cfg(feature = "asm")]
    fn reduction(&self, y: &U256) -> U256 {
        sm2p_reduction_asm(y)
    }
    #[cfg(feature = "asm")]
    fn mult(&self, x: &U256, y: &U256) -> U256 {
        sm2p_mult_asm(x, y)
    }
    #[cfg(feature = "asm")]
    fn sqr(&self, x: &U256, n: u32) -> U256 {
        let mut res = sm2p_sqr_asm(x);
        for _i in 1..n {
            res = sm2p_sqr_asm(&res);
        }
        res
    }
    /// exp_quadp calc xp^y, xp in form of montgomery
    #[inline]
    fn exp_quadp(&self, xp: &U256) -> Option<U256> {
        // felem_t	bn_tbl[5];	// _1, _11, _101, _111, _1111
        // precompute
        //let bn_tbl0 = xp.clone();
        let tmp = self.sqr(xp, 1);
        let bn_tbl1 = self.mult(&tmp, xp);
        let bn_tbl2 = self.mult(&tmp, &bn_tbl1); // _101
        let bn_tbl3 = self.mult(&tmp, &bn_tbl2); // _111
        let tmp = self.sqr(&bn_tbl2, 1); // _1010
        let bn_tbl4 = self.mult(&bn_tbl2, &tmp); // _1111
        let tmp = self.sqr(&bn_tbl4, 4);
        let x8 = self.mult(&tmp, &bn_tbl4);
        let tmp = self.sqr(&x8, 8);
        let x16 = self.mult(&tmp, &x8);
        let tmp = self.sqr(&x16, 16);
        let x32 = self.mult(&tmp, &x16);

        let res = self.sqr(&x16, 8);
        let res = self.mult(&res, &x8);
        let res = self.sqr(&res, 4);
        let res = self.mult(&res, &bn_tbl4); // _1111
        let res = self.sqr(&res, 3);
        let res = self.mult(&res, &bn_tbl3); // _111
        let res = self.sqr(&res, 1);

        let res = self.sqr(&res, 32);
        let res = self.mult(&res, &x32);
        let res = self.sqr(&res, 32);
        let res = self.mult(&res, &x32);
        let res = self.sqr(&res, 32);
        let res = self.mult(&res, &x32);
        let res = self.sqr(&res, 32);
        let res = self.mult(&res, &x32);

        let res = self.sqr(&res, 32);
        let res = self.sqr(&res, 32);
        let res = self.mult(&res, &x32);
        let res = self.sqr(&res, 16);
        let res = self.mult(&res, &x16);
        let res = self.sqr(&res, 8);
        let res = self.mult(&res, &x8);
        let res = self.sqr(&res, 4);
        let res = self.mult(&res, &bn_tbl4); // _1111
        let res = self.sqr(&res, 2);
        let res = self.mult(&res, &bn_tbl1); // _11
        Some(res)
    }
}

impl MontSM2p {
    pub fn new() -> MontSM2p {
        let mut res: U256 = Default::default();
        res.sub_from(&SM2_P);
        // res -- mont_one
        MontSM2p(res)
    }
}

#[inline]
fn sm2p_reduction_step(
    s0: u64,
    s1: u64,
    s2: u64,
    s3: u64,
    carry: u64,
) -> (u64, u64, u64, u64, u64) {
    use crate::u256::{u64_add, u64_addc, u64_sub, u64_subc};
    let u = s0;
    let mut r0 = carry;
    let mut cc: u64;
    let mut r1: u64;
    let mut r2: u64;
    let mut r3: u64;
    (r1, cc) = u64_add(s1, u);
    (r2, cc) = u64_addc(s2, 0, cc);
    (r3, cc) = u64_addc(s3, 0, cc);
    (r0, cc) = u64_addc(r0, u, cc);
    let t_low: u64 = u << 32;
    let t_high: u64 = u >> 32;
    let mut cc1: u64;
    (r1, cc1) = u64_sub(r1, t_low);
    (r2, cc1) = u64_subc(r2, t_high, cc1);
    (r3, cc1) = u64_subc(r3, t_low, cc1);
    (r0, cc1) = u64_subc(r0, t_high, cc1);
    (cc, _) = u64_sub(cc, cc1);
    (r0, r1, r2, r3, cc)
}

#[inline]
pub fn sm2p_reduction(y: &U256) -> U256 {
    let mut r0 = y.as_slice()[0];
    let mut r1 = y.as_slice()[1];
    let mut r2 = y.as_slice()[2];
    let mut r3 = y.as_slice()[3];
    let mut carry: u64 = 0;
    (r0, r1, r2, r3, carry) = sm2p_reduction_step(r0, r1, r2, r3, carry);
    (r1, r2, r3, r0, carry) = sm2p_reduction_step(r1, r2, r3, r0, carry);
    (r2, r3, r0, r1, carry) = sm2p_reduction_step(r2, r3, r0, r1, carry);
    (r3, r0, r1, r2, carry) = sm2p_reduction_step(r3, r0, r1, r2, carry);
    let res = U256::new(r0, r1, r2, r3);
    res.modp(&SM2_P, carry)
}

#[inline]
pub fn sm2p_mult(x: &U256, y: &U256) -> U256 {
    let mut r0: u64;
    let mut r1: u64;
    let mut r2: u64;
    let mut r3: u64;
    let mut carry: u64;
    let mut s: U256;
    let mut sc: u64;
    (s, carry) = x.umult(y.as_slice()[0]);
    let ss = s.as_slice();
    (r0, r1, r2, r3, carry) = sm2p_reduction_step(ss[0], ss[1], ss[2], ss[3], carry);
    (s, sc) = x.umult(y.as_slice()[1]);
    (r1, r2, r3, r0, carry) = s.addc4(r1, r2, r3, r0, sc, carry);
    (r1, r2, r3, r0, carry) = sm2p_reduction_step(r1, r2, r3, r0, carry);
    (s, sc) = x.umult(y.as_slice()[2]);
    (r2, r3, r0, r1, carry) = s.addc4(r2, r3, r0, r1, sc, carry);
    (r2, r3, r0, r1, carry) = sm2p_reduction_step(r2, r3, r0, r1, carry);
    (s, sc) = x.umult(y.as_slice()[3]);
    (r3, r0, r1, r2, carry) = s.addc4(r3, r0, r1, r2, sc, carry);
    (r3, r0, r1, r2, carry) = sm2p_reduction_step(r3, r0, r1, r2, carry);
    let res = U256::new(r0, r1, r2, r3);
    res.modp(&SM2_P, carry)
}

#[inline]
#[cfg(feature = "asm")]
#[allow(non_snake_case)]
fn sm2p_reductionS0_asm(s0: u64, s1: u64, s2: u64, s3: u64) -> (u64, u64, u64, u64) {
    use std::arch::asm;
    let r0: u64;
    let r1: u64;
    let r2: u64;
    let r3: u64;
    #[cfg(target_arch = "x86_64")]
    unsafe {
        asm!(
	// Only reduce, no multiplications are needed
	// First stage
		"MOV rax, r8",
		"MOV rdx, r8",
		"SHL rax, 32",
		"SHR rdx, 32",
		"ADD r9, r8",
		"ADC r10, 0",
		"ADC r11, 0",
		"ADC r8, 0",
		"sub r9, rax",
		"sbb r10, rdx",
		"sbb r11, rax",
		"sbb r8, rdx",
	// Second stage
		"MOV rax, r9",
		"MOV rdx, r9",
		"SHL rax, 32",
		"SHR rdx, 32",
		"ADD r10, r9",
		"adc r11, 0",
		"adc r8, 0",
		"adc r9, 0",
		"sub r10, rax",
		"sbb r11, rdx",
		"sbb r8, rax",
		"sbb r9, rdx",
	// Third stage
		"MOV rax, r10",
		"MOV rdx, r10",
		"SHL rax, 32",
		"SHR rdx, 32",
		"ADD r11, r10",
		"adc r8, 0",
		"adc r9, 0",
		"adc r10, 0",
		"sub r11, rax",
		"sbb r8, rdx",
		"sbb r9, rax",
		"sbb r10, rdx",
	// Last stage
		"MOV rax, r11",
		"MOV rdx, r11",
		"SHL rax, 32",
		"SHR rdx, 32",
		"ADD r8, r11",
		"adc r9, 0",
		"adc r10, 0",
		"adc r11, 0",
		"sub r8, rax",
		"sbb r9, rdx",
		"sbb r10, rax",
		"sbb r11, rdx",
        inout("r8") s0 => r0, inout("r9") s1 => r1,
        inout("r10") s2 => r2, inout("r11") s3 => r3,
        out("rax") _, out("rdx") _,
		options(nomem, nostack) );
    }
    #[cfg(target_arch = "aarch64")]
    unsafe {
        asm!(
	// Only reduce, no multiplications are needed
	// First reduction step
		"LSR x13, x4, 32",
		"LSL x14, x4, 32",
		"ADDS x5, x5, x4",
		"ADCS x6, x6, XZR",
		"ADCS x7, x7, XZR",
		"ADCS x4, x4, xzr",
		"SUBS x5, x5, x14",
		"SBCS x6, x6, x13",
		"SBCS x7, x7, x14",
		"SBCS x4, x4, x13",
	// Second reduction step
		"LSR x13, x5, 32",
		"LSL x14, x5, 32",
		"ADDS x6, x6, x5",
		"ADCS x7, x7, XZR",
		"ADCS x4, x4, XZR",
		"ADCS x5, x5, xzr",
		"SUBS x6, x6, x14",
		"SBCS x7, x7, x13",
		"SBCS x4, x4, x14",
		"SBCS x5, x5, x13",
	// Third reduction step
		"LSR x13, x6, 32",
		"LSL x14, x6, 32",
		"ADDS x7, x7, x6",
		"ADCS x4, x4, XZR",
		"ADCS x5, x5, XZR",
		"ADCS x6, x6, xzr",
		"SUBS x7, x7, x14",
		"SBCS x4, x4, x13",
		"SBCS x5, x5, x14",
		"SBCS x6, x6, x13",
	// Last reduction step
		"LSR x13, x7, 32",
		"LSL x14, x7, 32",
		"ADDS x4, x4, x7",
		"ADCS x5, x5, XZR",
		"ADCS x6, x6, XZR",
		"ADCS x7, x7, xzr",
		"SUBS x4, x4, x14",
		"SBCS x5, x5, x13",
		"SBCS x6, x6, x14",
		"SBCS x7, x7, x13",
        inout("x4") s0 => r0, inout("x5") s1 => r1,
        inout("x6") s2 => r2, inout("x7") s3 => r3,
        out("x13") _, out("x14") _,
		options(nomem, nostack) );
    }
    (r0, r1, r2, r3)
}

#[inline]
#[cfg(feature = "asm")]
#[allow(non_snake_case)]
fn sm2p_reductionS2_asm(s0: u64, s1: u64, s2: u64, s3: u64, cc: u64) -> U256 {
    use std::arch::asm;
    let r0: u64;
    let r1: u64;
    let r2: u64;
    let r3: u64;
    #[cfg(target_arch = "x86_64")]
    unsafe {
        asm!(
    	"MOV r12, r8",
    	"MOV r13, r9",
    	"MOV r14, r10",
    	"MOV r15, r11",
    	// Subtract sm2_p
    	"SUB r8, $-1",
        "mov rdx, 0ffffffff00000000h",
    	"SBB r9, rdx",
    	"SBB r10, $-1",
        "mov rdx, 0fffffffeffffffffh",
    	"SBB r11, rdx",
    	"SBB rax, 0",
        // if borrow < cc mov
    	"CMOVC r8, r12",
    	"CMOVC r9, r13",
    	"CMOVC r10, r14",
    	"CMOVC r11, r15",
        inout("r8") s0 => r0, inout("r9") s1 => r1,
        inout("r10") s2 => r2, inout("r11") s3 => r3,
        in("rax") cc,
        out("rdx") _,
        out("r12") _, out("r13") _, out("r14") _, out("r15") _,
		options(nomem, nostack) );
    }
    #[cfg(target_arch = "aarch64")]
    unsafe {
        asm!(
		//"MOV	x9, #-1",
        "MOV    x10, #0xffffffff00000000",
		"MOV	x11, #-1",
        "MOV    x12, #0xfffffffeffffffff",

		"SUBS	x9, x4, #-1",
		"SBCS	x10, x5, x10",
		"SBCS	x11, x6, x11",
		"SBCS	x12, x7, x12",
		"SBCS	x0, x0, XZR",

		"CSEL	x4, x4, x9, cc",
		"CSEL	x5, x5, x10, cc",
		"CSEL	x6, x6, x11, cc",
		"CSEL	x7, x7, x12, cc",

        inout("x4") s0 => r0,
        inout("x5") s1 => r1,
        inout("x6") s2 => r2,
        inout("x7") s3 => r3,
        in("x0") cc,
        out("x9") _, out("x10") _, out("x11") _, out("x12") _,
		options(nomem, nostack) );
    }
    U256::new(r0, r1, r2, r3)
}

#[inline]
#[cfg(feature = "asm")]
#[cfg(any(target_arch = "x86_64", target_arch = "aarch64"))]
pub fn sm2p_reduction_asm(y: &U256) -> U256 {
    let ys = y.as_slice();
    let (r0, r1, r2, r3) = sm2p_reductionS0_asm(ys[0], ys[1], ys[2], ys[3]);
    sm2p_reductionS2_asm(r0, r1, r2, r3, 0)
}

#[cfg(feature = "asm")]
#[cfg(any(target_arch = "x86_64", target_arch = "aarch64"))]
pub fn sm2p_mult_asm(x: &U256, y: &U256) -> U256 {
    use std::arch::asm;
    let r0: u64;
    let r1: u64;
    let r2: u64;
    let r3: u64;
    #[cfg(target_arch = "x86_64")]
    unsafe {
        asm!(
        // x * y[0]
		"MOV r14, (8*0)[rdi]",

		"MOV rax, (8*0)[rsi]",
		"MUL r14",
		"MOV r8, RAX",
		"MOV r9, RDX",

		"MOV rax, (8*1)[rsi]",
		"MUL r14",
		"ADD r9, RAX",
		"ADC rdx, 0",
		"MOV r10, RDX",

		"MOV rax, (8*2)[rsi]",
		"MUL r14",
		"ADD r10, RAX",
		"ADC rdx, 0",
		"MOV r11, RDX",

		"MOV rax, (8*3)[rsi]",
		"MUL r14",
		"ADD r11, RAX",
		"ADC rdx, 0",
		"MOV r12, RDX",
		"XOR r13, r13",
    	// First reduction step
        "MOV rax, r8",
		"MOV r15, r8",
		"SHL r8, 32",
		"SHR r15, 32",
		"ADD r9, rax",
		"ADC r10, 0",
		"ADC r11, 0",
		"ADC r12, rax",
		"ADC r13, 0",
		"SUB r9, r8",
		"SBB r10, r15",
		"SBB r11, r8",
		"SBB r12, R15",
		"SBB r13, 0",
		"XOR r8, r8",
    	// x * y[1]
		"MOV r14, (8*1)[rdi]",

		"MOV rax, (8*0)[rsi]",
		"MUL r14",
		"ADD r9, RAX",
		"ADC rdx, 0",
		"MOV r15, RDX",

		"MOV rax, (8*1)[rsi]",
		"MUL r14",
		"ADD r10, r15",
		"ADC rdx, 0",
		"ADD r10, RAX",
		"ADC rdx, 0",
		"MOV r15, RDX",

		"MOV rax, (8*2)[rsi]",
		"MUL r14",
		"ADD r11, r15",
		"ADC rdx, 0",
		"ADD r11, RAX",
		"ADC rdx, 0",
		"MOV r15, RDX",

		"MOV rax, (8*3)[rsi]",
		"MUL r14",
		"ADD r12, r15",
		"ADC rdx, 0",
		"ADD r12, RAX",
		"ADC r13, RDX",
		"ADC r8, 0",
    	// Second reduction step
		"MOV rax, r9",
		"MOV r15, r9",
		"SHL r9, 32",
		"SHR r15, 32",
		"ADD r10, rax",
		"ADC r11, 0",
		"ADC r12, 0",
		"ADC r13, rax",
		"ADC r8, 0",
		"SUB r10, r9",
		"SBB r11, r15",
		"SBB r12, r9",
		"SBB r13, r15",
		"SBB r8, 0",
		"XOR r9, r9",
    	// x * y[2]
		"MOV r14, (8*2)[rdi]",

		"MOV rax, (8*0)[rsi]",
		"MUL r14",
		"ADD r10, RAX",
		"ADC rdx, 0",
		"MOV r15, RDX",

		"MOV rax, (8*1)[rsi]",
		"MUL r14",
		"ADD r11, r15",
		"ADC rdx, 0",
		"ADD r11, RAX",
		"ADC rdx, 0",
		"MOV r15, RDX",

		"MOV rax, (8*2)[rsi]",
		"MUL r14",
		"ADD r12, r15",
		"ADC rdx, 0",
		"ADD r12, RAX",
		"ADC rdx, 0",
		"MOV r15, RDX",

		"MOV rax, (8*3)[rsi]",
		"MUL r14",
		"ADD r13, r15",
		"ADC rdx, 0",
		"ADD r13, RAX",
		"ADC r8, RDX",
		"ADC r9, 0",
    	// Third reduction step
		"MOV rax, r10",
		"MOV r15, r10",
		"SHL r10, 32",
		"SHR r15, 32",
		"ADD r11, rax",
		"ADC r12, 0",
		"ADC r13, 0",
		"ADC r8, rax",
		"ADC r9, 0",
		"SUB r11, r10",
		"SBB r12, r15",
		"SBB r13, r10",
		"SBB r8, r15",
		"SBB r9, 0",
		"XOR r10, r10",
    	// x * y[3]
		"MOV r14, (8*3)[rdi]",

		"MOV rax, (8*0)[rsi]",
		"MUL r14",
		"ADD r11, RAX",
		"ADC rdx, 0",
		"MOV r15, RDX",

		"MOV rax, (8*1)[rsi]",
		"MUL r14",
		"ADD r12, r15",
		"ADC rdx, 0",
		"ADD r12, RAX",
		"ADC rdx, 0",
		"MOV r15, RDX",

		"MOV rax, (8*2)[rsi]",
		"MUL r14",
		"ADD r13, r15",
		"ADC rdx, 0",
		"ADD r13, RAX",
		"ADC rdx, 0",
		"MOV r15, RDX",

		"MOV rax, (8*3)[rsi]",
		"MUL r14",
		"ADD r8, r15",
		"ADC rdx, 0",
		"ADD r8, RAX",
		"ADC r9, RDX",
		"ADC r10, 0",
    	// Last reduction step
		"MOV rax, r11",
		"MOV r15, r11",
		"SHL r11, 32",
		"SHR r15, 32",
		"ADD r12, rax",
		"ADC r13, 0",
		"ADC r8, 0",
		"ADC r9, rax",
		"ADC r10, 0",
		"SUB r12, r11",
		"SBB r13, r15",
		"SBB r8, r11",
		"SBB r9, r15",
		"SBB r10, 0",
    	// Copy result [255:0]
		"MOV rax, r12",
		"MOV r11, r13",
		"MOV r14, r8",
		"MOV r15, r9",
    	// Subtract sm2_p
		"SUB r12, -1",
        "mov rdx, 0ffffffff00000000h",
		"SBB r13, rdx",
		"SBB r8, -1",
        "mov rdx, 0fffffffeffffffffh",
		"SBB r9, rdx",
		"SBB r10, 0",

		"CMOVC r12, rax",
		"CMOVC r13, r11",
		"CMOVC r8, r14",
		"CMOVC r9, r15",

        out("r12") r0, out("r13") r1, out("r8") r2, out("r9") r3,
        out("rax") _, out("rdx") _, out("r10") _,
        out("r11") _, out("r14") _, out("r15") _,
        in("rdi") y, in("rsi") x,
		options(nostack) );
    }
    #[cfg(target_arch = "aarch64")]
    unsafe {
        // x0 -- x3   register %%x4 -- %%x7
        let xs = x.as_slice();
        asm!(
    	// y[0] * x
	    // load y0, y1
		"LDR x3, [{y}]",
		"MUL	x9, x3, x4",
		"UMULH	x10, x3, x4",

		"MUL	x14, x3, x5",
		"ADDS	x10, x14, x10",
		"UMULH	x11, x3, x5",

		"MUL	x14, x3, x6",
		"ADCS	x11, x14, x11",
		"UMULH	x12, x3, x6",

		"MUL	x14, x3, x7",
		"ADCS	x12, x14, x12",
		"UMULH	x13, x3, x7",
		"ADC	x13, xzr, x13",
    	// First reduction step
		"LSR	x14, x9, #32",
		"LSL	x15, x9, #32",
		"ADDS	x10, x10, x9",
		"ADCS	x11, x11, xzr",
		"ADCS	x12, x12, xzr",
		"ADCS	x13, x13, x9",
		"ADC	x9, xzr, xzr",
		"SUBS	x10, x10, x15",
		"SBCS	x11, x11, x14",
		"SBCS	x12, x12, x15",
		"SBCS	x13, x13, x14",
		"SBC	x9, x9, xzr",
    	// y[1] * x
		"LDR x3, [{y}, 8]",
		"MUL	x14, x3, x4",
		"ADDS	x10, x14, x10",
		"UMULH	x15, x3, x4",
		"ADC	x15, x15, xzr",

		"MUL	x14, x3, x5",
		"ADDS	x11, x15, x11",
		"UMULH	x15, x3, x5",
		"ADC	x15, x15, xzr",
		"ADDS	x11, x14, x11",
		"ADC	x15, x15, xzr",

		"MUL	x14, x3, x6",
		"ADDS	x12, x15, x12",
		"UMULH	x15, x3, x6",
		"ADC	x15, x15, xzr",
		"ADDS	x12, x14, x12",
		"ADC	x15, x15, xzr",

		"MUL	x14, x3, x7",
		"ADDS	x13, x15, x13",
		"UMULH	x15, x3, x7",
		"ADC	x15, x15, xzr",
		"ADDS	x13, x14, x13",
		"ADC	x9, x15, x9",

    	// Second reduction step
		"LSR	x14, x10, 32",
		"LSL	x15, x10, 32",
		"ADDS	x11, x11, x10",
		"ADCS	x12, x12, xzr",
		"ADCS	x13, x13, xzr",
		"ADCS	x9, x9, x10",
		"ADC	x10, xzr, xzr",
		"SUBS	x11, x11, x15",
		"SBCS	x12, x12, x14",
		"SBCS	x13, x13, x15",
		"SBCS	x9, x9, x14",
		"SBC	x10, x10, xzr",
    	// y[2] * x
	    // load y2, y3
		"LDR x3, [{y}, 16]",
		"MUL	x14, x3, x4",
		"ADDS	x11, x14, x11",
		"UMULH	x15, x3, x4",
		"ADC	x15, x15, xzr",

		"MUL	x14, x3, x5",
		"ADDS	x12, x15, x12",
		"UMULH	x15, x3, x5",
		"ADC	x15, x15, xzr",
		"ADDS	x12, x14, x12",
		"ADC	x15, x15, xzr",

		"MUL	x14, x3, x6",
		"ADDS	x13, x15, x13",
		"UMULH	x15, x3, x6",
		"ADC	x15, x15, xzr",
		"ADDS	x13, x14, x13",
		"ADC	x15, x15, xzr",

		"MUL	x14, x3, x7",
		"ADDS	x9, x15, x9",
		"UMULH	x15, x3, x7",
		"ADC	x15, x15, xzr",
		"ADDS	x9, x14, x9",
		"ADC	x10, x15, x10",

    	// Third reduction step
		"LSR	x14, x11, 32",
		"LSL	x15, x11, 32",
		"ADDS	x12, x12, x11",
		"ADCS	x13, x13, xzr",
		"ADCS	x9, x9, xzr",
		"ADCS	x10, x10, x11",
		"ADC	x11, xzr, xzr",
		"SUBS	x12, x12, x15",
		"SBCS	x13, x13, x14",
		"SBCS	x9, x9, x15",
		"SBCS	x10, x10, x14",
		"SBC	x11, x11, xzr",
    	// y[3] * x
		"LDR x3, [{y}, 24]",
		"MUL	x14, x3, x4",
		"ADDS	x12, x14, x12",
		"UMULH	x15, x3, x4",
		"ADC	x15, x15, xzr",

		"MUL	x14, x3, x5",
		"ADDS	x13, x15, x13",
		"UMULH	x15, x3, x5",
		"ADC	x15, x15, xzr",
		"ADDS	x13, x14, x13",
		"ADC	x15, x15, xzr",

		"MUL	x14, x3, x6",
		"ADDS	x9, x15, x9",
		"UMULH	x15, x3, x6",
		"ADC	x15, x15, xzr",
		"ADDS	x9, x14, x9",
		"ADC	x15, x15, xzr",

		"MUL	x14, x3, x7",
		"ADDS	x10, x15, x10",
		"UMULH	x15, x3, x7",
		"ADC	x15, x15, xzr",
		"ADDS	x10, x14, x10",
		"ADC	x11, x15, x11",

    	// Last reduction step
		"LSR	x14, x12, 32",
		"LSL	x15, x12, 32",
		"ADDS	x13, x13, x12",
		"ADCS	x9, x9, xzr",
		"ADCS	x10, x10, xzr",
		"ADCS	x11, x11, x12",
		"ADC	x12, xzr, xzr",
		"SUBS	x13, x13, x15",
		"SBCS	x9, x9, x14",
		"SBCS	x10, x10, x15",
		"SBCS	x11, x11, x14",
		"SBC	x12, x12, xzr",

        //"MOV   x4, -1",
        "MOV   x5, #0xffffffff00000000",
        "MOV   x6, #-1",
        "MOV   x7, #0xfffffffeffffffff",
		"SUBS	x4, x13, #-1",
		"SBCS	x5, x9, x5",
		"SBCS	x6, x10, x6",
		"SBCS	x7, x11, x7",
		"SBCS	x12, x12, xzr",

		"CSEL	x4, x4, x13, cs",
		"CSEL	x5, x5, x9, cs",
		"CSEL	x6, x6, x10, cs",
		"CSEL	x7, x7, x11, cs",
        y = in(reg) y,
        inout("x4") xs[0] => r0,
        inout("x5") xs[1] => r1,
        inout("x6") xs[2] => r2,
        inout("x7") xs[3] => r3,
        out("x3") _, out("x9") _, out("x10") _, out("x11") _, out("x12") _,
        out("x13") _, out("x14") _, out("x15") _,
		options(nostack) );
    }
    U256::new(r0, r1, r2, r3)
}

#[cfg(feature = "asm")]
#[cfg(any(target_arch = "x86_64", target_arch = "aarch64"))]
pub fn sm2p_sqr_asm(x: &U256) -> U256 {
    use std::arch::asm;
    let mut r0: u64;
    let mut r1: u64;
    let mut r2: u64;
    let mut r3: u64;
    #[cfg(target_arch = "x86_64")]
    unsafe {
        asm!(
    // y[1:] * y[0]
        "MOV r14, (8*0)[rsi]",

        "MOV rax, (8*1)[rsi]",
        "MUL r14",
        "MOV r9, rax",
        "MOV r10, rdx",

        "MOV rax, (8*2)[rsi]",
        "MUL r14",
        "ADD r10, rax",
        "ADC rdx, 0",
        "MOV r11, rdx",

        "MOV rax, (8*3)[rsi]",
        "MUL r14",
        "ADD r11, rax",
        "ADC rdx, 0",
        "MOV r12, rdx",
	// y[2:] * y[1]
        "MOV r14, (8*1)[rsi]",

        "MOV rax, (8*2)[rsi]",
        "MUL r14",
        "ADD r11, rax",
        "ADC rdx, 0",
        "MOV r15, rdx",

    	"MOV rax, (8*3)[rsi]",
	    "MUL r14",
        "ADD r12, r15",
	    "ADC rdx, 0",
    	"ADD r12, rax",
    	"ADC rdx, 0",
	    "MOV r13, rdx",
	// y[3] * y[2]
    	"MOV r14, (8*2)[rsi]",

    	"MOV rax, (8*3)[rsi]",
    	"MUL r14",
    	"ADD r13, rax",
    	"ADC rdx, 0",
    	"MOV rcx, rdx",
    	"XOR r15, r15",
	// *2
    	"ADD r9, r9",
	    "ADC r10, r10",
    	"ADC r11, r11",
	    "ADC r12, r12",
    	"ADC r13, r13",
    	"ADC rcx, rcx",
    	"ADC r15, 0",
	// Missing products
    	"MOV rax, (8*0)[rsi]",
    	"MUL rax",
    	"MOV r8, rax",
    	"MOV r14, rdx",

    	"MOV rax, (8*1)[rsi]",
    	"MUL rax",
    	"ADD r9, r14",
    	"ADC r10, rax",
    	"ADC rdx, 0",
    	"MOV r14, rdx",

    	"MOV rax, (8*2)[rsi]",
    	"MUL rax",
    	"ADD r11, r14",
    	"ADC r12, rax",
    	"ADC rdx, 0",
    	"MOV r14, rdx",

    	"MOV rax, (8*3)[rsi]",
    	"MUL rax",
    	"ADD r13, r14",
    	"ADC rcx, rax",
    	"ADC r15, rdx",
    	"MOV rdi, r15",
	// First reduction step
    	"MOV rax, r8",
    	"MOV rdx, r8",
    	"MOV r15, r8",
    	"SHL r8, 32",
    	"SHR r15, 32",
    	"ADD r9, rax",
    	"ADC r10, 0",
    	"ADC r11, 0",
    	"ADC rdx, 0",
    	"SUB r9, r8",
    	"SBB r10, r15",
    	"SBB r11, r8",
    	"SBB rdx, R15",
    	"MOV r8, rdx",
	// Second reduction step
    	"MOV rax, r9",
    	"MOV rdx, r9",
    	"MOV r15, r9",
    	"SHL r9, 32",
    	"SHR r15, 32",
    	"ADD r10, rax",
    	"ADC r11, 0",
    	"ADC r8, 0",
    	"ADC rdx, 0",
    	"SUB r10, r9",
    	"SBB r11, r15",
    	"SBB r8, r9",
    	"SBB rdx, r15",
    	"MOV r9, rdx",
	// Third reduction step
    	"MOV rax, r10",
    	"MOV rdx, r10",
    	"MOV r15, r10",
    	"SHL r10, 32",
    	"SHR r15, 32",
    	"ADD r11, rax",
    	"ADC r8, 0",
    	"ADC r9, 0",
    	"ADC rdx, 0",
    	"SUB r11, r10",
    	"SBB r8, r15",
    	"SBB r9, r10",
    	"SBB rdx, r15",
    	"MOV r10, rdx",
	// Last reduction step
    	"XOR r14, r14",
    	"MOV rax, r11",
    	"MOV rdx, r11",
    	"MOV r15, r11",
    	"SHL r11, 32",
    	"SHR r15, 32",
    	"ADD r8, rax",
    	"ADC r9, 0",
    	"ADC r10, 0",
    	"ADC rdx, 0",
    	"SUB r8, r11",
    	"SBB r9, r15",
    	"SBB r10, r11",
    	"SBB rdx, r15",
    	"MOV r11, rdx",
	// Add bits [511:256] of the sqr result
    	"ADC r8, r12",
    	"ADC r9, r13",
    	"ADC r10, rcx",
    	"ADC r11, rdi",
    	"ADC r14, 0",

    	"MOV r12, r8",
    	"MOV r13, r9",
    	"MOV rcx, r10",
    	"MOV r15, r11",
	// Subtract sm2_p
    	"SUB r8, $-1",
        "mov rdx, 0ffffffff00000000h",
    	"SBB r9, rdx",
    	"SBB r10, $-1",
        "mov rdx, 0fffffffeffffffffh",
        "SBB r11, rdx",
    	"SBB r14, 0",

    	"CMOVC r8, r12",
    	"CMOVC r9, r13",
    	"CMOVC r10, rcx",
    	"CMOVC r11, r15",

        out("r8") r0, out("r9") r1, out("r10") r2, out("r11") r3,
        out("rax") _, out("rcx") _, out("rdx") _,
        out("r12") _, out("r13") _, out("r14") _, out("r15") _,
        out("rdi") _, in("rsi") x,
        options(nostack) );
    }
    #[cfg(target_arch = "aarch64")]
    unsafe {
        let xs = x.as_slice();
        let cc: u64;
        asm!(
	// x[1:] * x[0]
		"MUL	x5, x0, x1",		// acc1
		"UMULH	x6, x0, x1",		// acc2

		"MUL	x13, x0, x2",
		"UMULH	x7, x0, x2",		//acc3

		"MUL	x14, x0, x3",
		"UMULH	x9, x0, x3",		//acc4
		"ADDS	x6, x13, x6",		//acc2
		"ADCS	x7, x14, x7",		//acc3
		"ADC	x9, xzr, x9",
	// x[2:] * x[1]
		"MUL	x13, x1, x2",
		"UMULH	x14, x1, x2",
		"ADDS	x7, x13, x7",		//acc3
		"ADCS	x9, x14, x9",		//acc4
		"ADC	x10, xzr, xzr",	//acc5

		"MUL	x13, x1, x3",
		"UMULH	x14, x1, x3",
		"ADDS	x9, x13, x9",		//acc4
		"ADC	x10, x14, x10",	//acc5
	// x[3] * x[2]
		"MUL	x13, x2, x3",
		"UMULH	x11, x2, x3",
		"ADDS	x10, x13, x10",	//acc5
		"ADC	x11, xzr, x11",	//acc6

	// *2
		"ADDS	x5, x5, x5",	// acc1
		"ADCS	x6, x6, x6",	//acc2
		"ADCS	x7, x7, x7",	//acc3
		"ADCS	x9, x9, x9",	//acc4
		"ADCS	x10, x10, x10",	//acc5
		"ADCS	x11, x11, x11",	//acc6
		"ADC	x12, xzr, xzr",	//acc7
	// Missing products
		"MUL	x4, x0, x0",	//acc0
		"UMULH	x13, x0, x0",
		"ADDS	x5, x13, x5",	//acc1

		"MUL	x13, x1, x1",
		"UMULH	x14, x1, x1",
		"ADCS	x6, x13, x6",	//acc2
		"ADCS	x7, x14, x7",	//acc3

		"MUL	x13, x2, x2",
		"UMULH	x14, x2, x2",
		"ADCS	x9, x13, x9",		//acc4
		"ADCS	x10, x14, x10",	//acc5

		"MUL	x13, x3, x3",
		"UMULH	x14, x3, x3",
		"ADCS	x11, x13, x11",	//acc6
		"ADC	x12, x14, x12",	//acc7
	// First reduction step
		"LSL	x13, x4, 32",	// LSL $32, acc0, t0
		"LSR	x14, x4, 32",	// LSR $32, acc0, t1
		"ADDS	x5, x5, x4",	// ADDS acc0, acc1
		"ADCS	x6, x6, xzr",
		"ADCS	x7, x7, xzr",
		"ADC	x4, x4, xzr",
		"SUBS	x5, x5, x13",	// SUBS t0, acc1
		"SBCS	x6, x6, x14",
		"SBCS	x7, x7, x13",
		"SBC	x4, x4, x14",
	// Second reduction step
		"LSL	x13, x5, 32",	// LSL $32, acc1, t0
		"LSR	x14, x5, 32",	// LSR $32, acc1, t1
		"ADDS	x6, x6, x5",	// ADDS acc1, acc2
		"ADCS	x7, x7, xzr",
		"ADCS	x4, x4, xzr",
		"ADC	x5, x5, xzr",
		"SUBS	x6, x6, x13",	// SUBS t0, acc2
		"SBCS	x7, x7, x14",
		"SBCS	x4, x4, x13",	// SBCS t0, acc0
		"SBC	x5, x5, x14",
	// Third reduction step
		"LSL	x13, x6, 32",	// LSL $32, acc2, t0
		"LSR	x14, x6, 32",	// LSR $32, acc2, t1
		"ADDS	x7, x7, x6",	// ADDS acc2, acc3
		"ADCS	x4, x4, xzr",
		"ADCS	x5, x5, xzr",
		"ADC	x6, x6, xzr",
		"SUBS	x7, x7, x13",	// SUBS t0, acc3
		"SBCS	x4, x4, x14",
		"SBCS	x5, x5, x13",	// SBCS t0, acc1
		"SBC	x6, x6, x14",
	// Last reduction step
		"LSL	x13, x7, 32",	// LSL $32, acc3, t0
		"LSR	x14, x7, 32",	// LSR $32, acc3, t1
		"ADDS	x4, x4, x7",	// ADDS acc3, acc0
		"ADCS	x5, x5, xzr",
		"ADCS	x6, x6, xzr",
		"ADC	x7, x7, xzr",
		"SUBS	x4, x4, x13",	// SUBS t0, acc0
		"SBCS	x5, x5, x14",
		"SBCS	x6, x6, x13",	// SBCS t0, acc2
		"SBC	x7, x7, x14",
	// Add bits [511:256] of the sqr result
		"ADDS	x4, x9, x4",
		"ADCS	x5, x10, x5",
		"ADCS	x6, x11, x6",
		"ADCS	x7, x12, x7",
		"ADC	x0, xzr, xzr",	// t0 = carry
        inout("x0") xs[0] => cc,
        in("x1") xs[1], in("x2") xs[2], in("x3") xs[3],
        out("x4") r0, out("x5") r1, out("x6") r2, out("x7") r3,
        out("x9") _, out("x10") _, out("x11") _, out("x12") _,
        out("x13") _, out("x14") _,
		options(nomem, nostack) );
        // r0,r1,r2,r3,cc  sm2p_reductionS2
        asm!(
		//"MOV	x9, #-1",
        "MOV    x10, #0xffffffff00000000",
		"MOV	x11, #-1",
        "MOV    x12, #0xfffffffeffffffff",

		"SUBS	x9, x4, #-1",
		"SBCS	x10, x5, x10",
		"SBCS	x11, x6, x11",
		"SBCS	x12, x7, x12",
		"SBCS	x0, x0, XZR",

		"CSEL	x4, x4, x9, cc",
		"CSEL	x5, x5, x10, cc",
		"CSEL	x6, x6, x11, cc",
		"CSEL	x7, x7, x12, cc",

        inout("x4") r0 => r0,
        inout("x5") r1 => r1,
        inout("x6") r2 => r2,
        inout("x7") r3 => r3,
        in("x0") cc,
        out("x9") _, out("x10") _, out("x11") _, out("x12") _,
		options(nomem, nostack) );
    }
    U256::new(r0, r1, r2, r3)
}

#[cfg(test)]
#[inline]
#[allow(non_snake_case)]
fn calcK0(p: &U256) -> u64 {
    use crate::u256::{u64_smul, u64_sub};
    let mut t: u64 = 1;
    let n = p.as_slice()[0];
    for _i in 1..64 {
        let l = u64_smul(t, t);
        t = u64_smul(l, n);
    }
    let (k0, _) = u64_sub(0, t);
    k0
}

#[inline]
#[allow(non_snake_case)]
fn calcRR(prime: &U256) -> U256 {
    let mut t = U256::from(0);
    t.sub_from(prime);
    for _i in 256..512 {
        t = t.mod_add(&t, prime);
    }
    t
}

#[cfg(test)]
mod tests {
    use super::*;
    use crate::ecc_const::{SM2_N, SM2_N_K0, SM2_N_RR, SM2_P, SM2_P_RR};
    use crate::test_data::tests::*;
    use log::{info, warn};
    use measure::Measure;
    use simple_logger::SimpleLogger;

    #[test]
    fn test_sm2_params() {
        use crate::ecc_const::SM2_A;
        let mut rr = SM2_A.clone();
        assert!(!rr.uadd(3));
        // assert a + 3 == p
        assert!(rr == SM2_P);
    }

    #[test]
    #[allow(non_snake_case)]
    fn test_calcK0() {
        use crate::ecc_const::{SM2_N, SM2_N_K0, SM2_P_K0};
        let k0 = calcK0(&SM2_P);
        assert_eq!(k0, SM2_P_K0);
        let k0 = calcK0(&SM2_N);
        assert_eq!(k0, SM2_N_K0);
    }

    #[test]
    #[allow(non_snake_case)]
    fn test_calcRR() {
        use crate::ecc_const::{SM2_N, SM2_N_RR, SM2_P_RR};
        let rr = calcRR(&SM2_P);
        assert!(rr == SM2_P_RR);
        let rr = calcRR(&SM2_N);
        assert!(rr == SM2_N_RR);
    }

    #[test]
    fn test_mont_reduction() {
        let dx1_r1 = mont_reduction::<1>(&dx1_m, &SM2_P);
        assert!(dx1_r1 == dx1);
        let dx1_r1 = mont_reduction::<SM2_N_K0>(&dx1_n, &SM2_N);
        assert!(dx1_r1 == dx1);
    }

    #[test]
    fn test_sm2p_reduction() {
        let dx1_r1 = sm2p_reduction(&dx1_m);
        assert!(dx1_r1 == dx1);
    }

    #[test]
    #[cfg(feature = "asm")]
    fn test_sm2p_asm_reduction() {
        let dx1_r1 = sm2p_reduction_asm(&dx1_m);
        assert!(dx1_r1 == dx1);
    }

    #[test]
    fn test_mont_mult() {
        let dx1_r1 = mont_mult::<1>(&dx1, &SM2_P_RR, &SM2_P);
        assert!(dx1_r1 == dx1_m);
        let dx1_r1 = mont_mult::<SM2_N_K0>(&dx1, &SM2_N_RR, &SM2_N);
        assert!(dx1_r1 == dx1_n);
    }

    #[test]
    fn test_mont_trait() {
        // following assignes forms ok
        //let mont: Mont<1> = Mont::new(&SM2_P);
        let mont: Mont = Mont::<1>::new(&SM2_P);
        let dx1_r1 = mont.mult(&dx1, &SM2_P_RR);
        assert!(dx1_r1 == dx1_m);
        // test default K0
        let mont: Mont = Mont::new(&SM2_P);
        let dx1_r1 = mont.mult(&dx1, &SM2_P_RR);
        assert!(dx1_r1 == dx1_m);
    }

    #[test]
    #[should_panic]
    fn test_mont_struct() {
        let mont = Mont::<1>::new(&SM2_N);
        let _dx1_r1 = mont.mult(&dx1, &SM2_N_RR);
    }

    #[test]
    fn test_mont_inverse() {
        let mont: Mont = Mont::new(&SM2_P);
        let r1 = mont.inverse(&dx1);
        assert!(r1 == x1_inv);
        let r2 = mont.inverse(&dx2);
        assert!(r2 == x2_inv);
    }

    #[test]
    fn test_mont_sm2p_inverse() {
        let mont = MontSM2p::new();
        let r1 = mont.inverse(&dx1);
        assert!(r1 == x1_inv);
        let r2 = mont.inverse(&dx2);
        assert!(r2 == x2_inv);
    }

    // x eq y or x eq -y modulo SM2_P
    fn is_eq_or_neg(x: &U256, y: &U256) -> bool {
        if x == y {
            return true;
        }
        warn!("try cmp x, -y");
        x.mod_add(y, &SM2_P).is_zero()
    }

    #[test]
    fn test_sqrt() {
        if let Err(s) = SimpleLogger::new().init() {
            warn!("SimpleLogger init: {}", s);
        }
        info!("test sqrt via Mont::SM2_P");
        let mont: Mont = Mont::new(&SM2_P);
        let x1s = mont.sqr(&dx1, 1);
        assert!(x1s == dx1_sq);
        if let Some(r1) = mont.sqrt(&x1s) {
            assert!(is_eq_or_neg(&r1, &dx1));
        } else {
            assert!(false, "sqrt return None");
        }
        let x1s = mont.sqr(&dx2, 1);
        if let Some(r1) = mont.sqrt(&x1s) {
            assert!(is_eq_or_neg(&r1, &dx2));
        } else {
            assert!(false, "sqrt return None");
        }
        info!("test sqrt via MontSM2p");
        // MontSM2p for asm optimized
        let mont = MontSM2p::new();
        let x1s = mont.sqr(&dx1, 1);
        assert!(x1s == dx1_sq);
        if let Some(r1) = mont.sqrt(&x1s) {
            assert!(is_eq_or_neg(&r1, &dx1));
        } else {
            assert!(false, "sqrt return None");
        }
        let x1s = mont.sqr(&dx2, 1);
        if let Some(r1) = mont.sqrt(&x1s) {
            assert!(is_eq_or_neg(&r1, &dx2));
        } else {
            assert!(false, "sqrt return None");
        }
    }

    #[test]
    fn test_mult_reduction() {
        let dx1_mo = sm2p_mult(&dx1, &SM2_P_RR);
        assert!(dx1_mo == dx1_m);
        let r_dx1 = sm2p_reduction(&dx1_m);
        assert!(dx1 == r_dx1);
    }

    #[test]
    #[cfg(feature = "asm")]
    fn test_mult_asm_reduction() {
        let dx1_mo = sm2p_mult_asm(&dx1, &SM2_P_RR);
        assert!(dx1_mo == dx1_m);
        let r_dx1 = sm2p_reduction(&dx1_m);
        assert!(dx1 == r_dx1);
    }

    #[test]
    #[cfg(feature = "asm")]
    fn test_sm2p_sqr_asm() {
        let dx1_mo = sm2p_mult_asm(&dx1, &dx1);
        let dx1_s = sm2p_sqr_asm(&dx1);
        assert!(dx1_mo == dx1_s);
        let mont = MontSM2p::new();
        let r1 = mont.sqr(&dx1, 1);
        assert!(r1 == dx1_mo);
    }

    #[test]
    #[ignore]
    fn bench_sm2p_reduction() {
        const N: u32 = 100_000;
        let mut measure = Measure::start("sm2p_reduction bench");
        for _it in 0..N {
            let _r1 = sm2p_reduction(&dx1_m);
        }
        measure.stop();
        let ns_ops = measure.as_ns() / (N as u64);
        println!("sm2p_reduction cost {} ns per Op", ns_ops);
    }

    #[test]
    #[ignore]
    fn bench_sm2p_mult() {
        const N: u32 = 100_000;
        let mut measure = Measure::start("sm2p_mult bench");
        for _it in 0..N {
            let _r1 = sm2p_mult(&dx1, &SM2_P_RR);
        }
        measure.stop();
        let ns_ops = measure.as_ns() / (N as u64);
        println!("sm2p_mult cost {} ns per Op", ns_ops);
    }

    #[test]
    #[ignore]
    fn bench_sm2n_mult() {
        const N: u32 = 100_000;
        let mut measure = Measure::start("sm2n_mult bench");
        for _it in 0..N {
            let _r1 = mont_mult::<SM2_N_K0>(&dx1, &SM2_N_RR, &SM2_N);
        }
        measure.stop();
        let ns_ops = measure.as_ns() / (N as u64);
        println!("sm2n_mult cost {} ns per Op", ns_ops);
    }
}
