use std::arch::asm;
use std::cmp::Ordering;
use std::fmt;
//use log::info;

#[inline]
pub fn u64_addc(a: u64, b: u64, carry: u64) -> (u64, u64) {
    let res: u64;
    let cc: u64;
    #[cfg(target_arch = "x86_64")]
    unsafe {
        asm!(
            "add {x}, {c}",
            "mov {c}, 0",
            "adc {c}, 0",
            "add {x}, {b}",
            "adc {c}, 0",
            x = inout(reg) a => res,
            b = in(reg) b,
            c = inout(reg) carry => cc,
            options(nomem, nostack),
        );
    }
    #[cfg(target_arch = "aarch64")]
    unsafe {
        asm!(
            "adds {x}, {x}, {c}",
            "adc {c}, xzr, xzr",
            "adds {x}, {x}, {b}",
            "adc {c}, {c}, xzr",
            x = inout(reg) a => res,
            b = in(reg) b,
            c = inout(reg) carry => cc,
            options(nomem, nostack),
        );
    }
    #[cfg(not(any(target_arch = "x86_64", target_arch = "aarch64")))]
    {
        let mut cc1: bool = carry != 0;
        (res, cc1) = a.carrying_add(b, cc1);
        cc = cc1 as u64;
    }
    (res, cc)
}

#[inline]
pub fn u64_add(a: u64, b: u64) -> (u64, u64) {
    let res: u64;
    let cc: u64;
    #[cfg(target_arch = "x86_64")]
    unsafe {
        asm!(
            "add {x}, {b}",
            "mov {c}, 0",
            "adc {c}, 0",
            x = inlateout(reg) a => res,
            b = in(reg) b,
            c = lateout(reg) cc,
            options(nomem, nostack),
        );
    }
    #[cfg(target_arch = "aarch64")]
    unsafe {
        asm!(
            "adds {x}, {x}, {b}",
            "adc {c}, xzr, xzr",
            x = inlateout(reg) a => res,
            b = in(reg) b,
            c = lateout(reg) cc,
            options(nomem, nostack),
        );
    }
    #[cfg(not(any(target_arch = "x86_64", target_arch = "aarch64")))]
    {
        let cc1: bool;
        (res, cc1) = a.carrying_add(b, false);
        cc = cc1 as u64;
    }
    (res, cc)
}

#[inline]
pub fn u64_subc(a: u64, b: u64, carry: u64) -> (u64, u64) {
    let res: u64;
    let mut cc: u64;
    #[cfg(target_arch = "x86_64")]
    unsafe {
        asm!(
            "sub {x}, {c}",
            "mov {c}, 0",
            "sbb {c}, 0",
            "sub {x}, {b}",
            "sbb {c}, 0",
            x = inout(reg) a => res,
            b = in(reg) b,
            c = inout(reg) carry => cc,
            options(nomem, nostack),
        );
        cc &= 1;
    }
    #[cfg(target_arch = "aarch64")]
    unsafe {
        asm!(
            "subs {x}, {x}, {c}",
            "sbc {c}, xzr, xzr",
            "subs {x}, {x}, {b}",
            "sbc {c}, {c}, xzr",
            x = inout(reg) a => res,
            b = in(reg) b,
            c = inout(reg) carry => cc,
            options(nomem, nostack),
        );
        cc &= 1;
    }
    #[cfg(not(any(target_arch = "x86_64", target_arch = "aarch64")))]
    {
        let mut cc1: bool = carry != 0;
        (res, cc1) = a.borrowing_sub(b, cc1);
        cc = cc1 as u64;
    }
    (res, cc)
}

#[inline]
pub fn u64_sub(a: u64, b: u64) -> (u64, u64) {
    let res: u64;
    let mut cc: u64;
    #[cfg(target_arch = "x86_64")]
    unsafe {
        asm!(
            "sub {x}, {b}",
            "mov {c}, 0",
            "sbb {c}, 0",
            x = inlateout(reg) a => res,
            b = in(reg) b,
            c = lateout(reg) cc,
            options(nomem, nostack),
        );
        cc &= 1;
    }
    #[cfg(target_arch = "aarch64")]
    unsafe {
        asm!(
            "subs {x}, {x}, {b}",
            "sbc {c}, xzr, xzr",
            x = inlateout(reg) a => res,
            b = in(reg) b,
            c = lateout(reg) cc,
            options(nomem, nostack),
        );
        cc &= 1;
    }
    #[cfg(not(any(target_arch = "x86_64", target_arch = "aarch64")))]
    {
        let cc1: bool;
        (res, cc1) = a.borrowing_sub(b, false);
        cc = cc1 as u64;
    }
    (res, cc)
}

/// u64_mulc calc: (low u64, high u64) = a * b + carry
#[inline]
pub fn u64_mulc(a: u64, b: u64, carry: u64) -> (u64, u64) {
    let res: u64;
    let cc: u64;
    #[cfg(target_arch = "x86_64")]
    unsafe {
        asm!(
            "mul rdx",
            "add rax, {c}",
            "adc rdx, 0",
            c = in(reg) carry,
            inout("rax") a => res,
            inout("rdx") b => cc,
            options(nomem, nostack),
        );
    }
    #[cfg(target_arch = "aarch64")]
    unsafe {
        asm!(
            "MUL {y}, {x}, {b}",
            "UMULH {b}, {x}, {b}",
            "ADDS {y}, {y}, {c}",
            "ADC {b}, {b}, xzr",
            x = in(reg) a,
            y = out(reg) res,
            b = inout(reg) b => cc,
            c = in(reg) carry,
            options(nomem, nostack),
        );
    }
    #[cfg(not(any(target_arch = "x86_64", target_arch = "aarch64")))]
    {
        let cc1: u64;
        // carrying_mul is nightly-only
        let (res1, hh) = a.carrying_mul(b);
        (res, cc1) = u64_add(res1, carry);
        (cc, _) = u64_add(hh, cc1);
    }
    (res, cc)
}

/// u64_mul calc: (low u64, high u64) = a * b
#[inline]
pub fn u64_mul(a: u64, b: u64) -> (u64, u64) {
    let res: u64;
    let cc: u64;
    #[cfg(target_arch = "x86_64")]
    unsafe {
        asm!(
            "mul rdx",
            inout("rax") a => res,
            inout("rdx") b => cc,
            options(nomem, nostack, preserves_flags),
        );
    }
    #[cfg(target_arch = "aarch64")]
    unsafe {
        asm!(
            "MUL {y}, {x}, {b}",
            "UMULH {b}, {x}, {b}",
            x = in(reg) a,
            y = out(reg) res,
            b = inout(reg) b => cc,
            options(nomem, nostack, preserves_flags),
        );
    }
    #[cfg(not(any(target_arch = "x86_64", target_arch = "aarch64")))]
    {
        // carrying_mul is nightly-only
        (res, cc) = a.carrying_mul(b);
    }
    (res, cc)
}

/// u64_smul: low u64 = a * b, drop high result
#[inline]
pub fn u64_smul(a: u64, b: u64) -> u64 {
    let res: u64;
    #[cfg(target_arch = "x86_64")]
    unsafe {
        asm!(
            "mul rdx",
            inlateout("rax") a => res,
            inlateout("rdx") b => _,
            options(nomem, nostack, preserves_flags),
        );
    }
    #[cfg(target_arch = "aarch64")]
    unsafe {
        asm!(
            "MUL {x}, {x}, {b}",
            x = inlateout(reg) a => res,
            b = in(reg) b,
            options(nomem, nostack, preserves_flags),
        );
    }
    #[cfg(not(any(target_arch = "x86_64", target_arch = "aarch64")))]
    {
        // carrying_mul is nightly-only
        (res, _) = a.carrying_mul(b);
    }
    res
}

#[repr(C)]
#[derive(Eq, Clone, Default)]
pub struct U256 {
    data: [u64; 4],
}

impl PartialEq for U256 {
    #[inline]
    fn eq(&self, other: &Self) -> bool {
        self.data == other.data
    }
}

impl PartialOrd for U256 {
    #[inline]
    fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
        Some(self.cmp(other))
    }
}

impl Ord for U256 {
    #[inline]
    fn cmp(&self, b: &Self) -> Ordering {
        let mut carry: u64;
        let mut res: u64;
        (res, carry) = u64_sub(self.data[0], b.data[0]);
        let mut rr: u64;
        (rr, carry) = u64_subc(self.data[1], b.data[1], carry);
        res |= rr;
        (rr, carry) = u64_subc(self.data[2], b.data[2], carry);
        res |= rr;
        (rr, carry) = u64_subc(self.data[3], b.data[3], carry);
        res |= rr;
        if res == 0 {
            Ordering::Equal
        } else if carry != 0 {
            Ordering::Less
        } else {
            Ordering::Greater
        }
    }
}

impl fmt::Display for U256 {
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
        write!(
            f,
            "({:#X}, {:#X}, {:#X}, {:#X})",
            self.data[0], self.data[1], self.data[2], self.data[3]
        )
    }
}

// impl From<U> for u64
// no need impl u32, from_uint convert uint to U256
impl From<u64> for U256 {
    fn from(a0: u64) -> U256 {
        U256 {
            data: [a0, 0, 0, 0],
        }
    }
}

impl U256 {
    #[inline]
    pub const fn new(a0: u64, a1: u64, a2: u64, a3: u64) -> U256 {
        U256 {
            data: [a0, a1, a2, a3],
        }
    }
    #[inline]
    pub fn from_int<T>(a0: T) -> U256
    where
        T: Into<i64>,
    {
        U256 {
            data: [a0.into() as u64, 0, 0, 0],
        }
    }
    #[inline]
    pub fn from_uint<T>(a0: T) -> U256
    where
        T: Into<u64>,
    {
        U256 {
            data: [a0.into(), 0, 0, 0],
        }
    }
    #[inline]
    pub fn random() -> U256 {
        use rand::Rng;
        let mut rng = rand::thread_rng();
        let mut res = U256 {
            data: rng.gen::<[u64; 4]>(),
        };
        if res.is_zero() {
            res.data = rng.gen::<[u64; 4]>();
            if res.is_zero() {
                let mut r0 = rng.gen::<u64>();
                r0 = if r0 == 0 { 0x200901035858 } else { r0 };
                res.data[0] = r0;
            }
        }
        res
    }
    #[inline]
    pub fn to_string(&self) -> String {
        format!(
            "0X{:016X}{:016X}{:016X}{:016X}",
            self.data[3], self.data[2], self.data[1], self.data[0]
        )
    }
    #[inline]
    pub fn from_string(v: &str) -> Option<U256> {
        let ss = v.to_lowercase();
        let ss = if ss.starts_with("0x") {
            let (_, ss) = ss.split_at(2);
            ss.as_bytes()
        } else {
            ss.as_bytes()
        };
        if ss.len() != 64 {
            return None;
        }
        let mut vv = [0_u8; 32];
        for i in 0..ss.len() {
            let vb: u8 = match ss[i as usize] {
                ch @ b'0'..=b'9' => ch - b'0',
                ch @ b'a'..=b'f' => ch - b'a' + 10,
                _ => {
                    return None;
                }
            };
            if i & 1 == 0 {
                vv[i >> 1 as usize] = vb << 4;
            } else {
                vv[i >> 1 as usize] |= vb;
            }
        }
        let r3 = u64::from_be_bytes(vv[0..8].try_into().unwrap());
        let r2 = u64::from_be_bytes(vv[8..16].try_into().unwrap());
        let r1 = u64::from_be_bytes(vv[16..24].try_into().unwrap());
        let r0 = u64::from_be_bytes(vv[24..].try_into().unwrap());
        Some(U256 {
            data: [r0, r1, r2, r3],
        })
    }
    #[inline]
    pub fn as_slice(&self) -> &[u64] {
        &self.data[..]
    }
    #[inline]
    pub fn mut_ptr(&mut self) -> *mut u64 {
        self.data.as_mut_ptr()
    }
    #[inline]
    pub fn ptr(&self) -> *const u64 {
        self.data.as_ptr()
    }
    #[inline]
    pub fn add(&self, b: &Self) -> (U256, u64) {
        let mut carry: u64;
        let r0: u64;
        let r1: u64;
        let r2: u64;
        let r3: u64;
        #[cfg(target_arch = "x86_64")]
        unsafe {
            asm!(
            "add {r4}, {r0}",
            "adc {r5}, {r1}",
            "adc {r6}, {r2}",
            "adc {r7}, {r3}",
            "mov {c}, 0",
            "adc {c}, 0",
            r0 = in(reg) b.data[0],
            r1 = in(reg) b.data[1],
            r2 = in(reg) b.data[2],
            r3 = in(reg) b.data[3],
            c = lateout(reg) carry,
            r4 = inout(reg) self.data[0] => r0,
            r5 = inout(reg) self.data[1] => r1,
            r6 = inout(reg) self.data[2] => r2,
            r7 = inout(reg) self.data[3] => r3,
            options(nomem, nostack),
            );
        }
        #[cfg(target_arch = "aarch64")]
        unsafe {
            asm!(
            "adds {r4}, {r4}, {r0}",
            "adcs {r5}, {r5}, {r1}",
            "adcs {r6}, {r6}, {r2}",
            "adcs {r7}, {r7}, {r3}",
            "adc {c}, xzr, xzr",
            r0 = in(reg) b.data[0],
            r1 = in(reg) b.data[1],
            r2 = in(reg) b.data[2],
            r3 = in(reg) b.data[3],
            c = lateout(reg) carry,
            r4 = inout(reg) self.data[0] => r0,
            r5 = inout(reg) self.data[1] => r1,
            r6 = inout(reg) self.data[2] => r2,
            r7 = inout(reg) self.data[3] => r3,
            options(nomem, nostack),
            );
        }
        #[cfg(not(any(target_arch = "x86_64", target_arch = "aarch64")))]
        {
            (r0, carry) = u64_add(self.data[0], b.data[0]);
            (r1, carry) = u64_addc(self.data[1], b.data[1], carry);
            (r2, carry) = u64_addc(self.data[2], b.data[2], carry);
            (r3, carry) = u64_addc(self.data[3], b.data[3], carry);
        }
        (
            U256 {
                data: [r0, r1, r2, r3],
            },
            carry,
        )
    }
    #[inline]
    pub fn add_to(&mut self, b: &Self) -> u64 {
        let (res, cc) = self.add(b);
        self.data = res.data;
        cc
    }
    #[inline]
    pub fn sub(&self, b: &Self) -> (U256, u64) {
        let mut carry: u64;
        let r0: u64;
        let r1: u64;
        let r2: u64;
        let r3: u64;
        #[cfg(target_arch = "x86_64")]
        unsafe {
            asm!(
            "sub {r4}, {r0}",
            "sbb {r5}, {r1}",
            "sbb {r6}, {r2}",
            "sbb {r7}, {r3}",
            "mov {c}, 0",
            "sbb {c}, 0",
            r0 = in(reg) b.data[0],
            r1 = in(reg) b.data[1],
            r2 = in(reg) b.data[2],
            r3 = in(reg) b.data[3],
            c = lateout(reg) carry,
            r4 = inout(reg) self.data[0] => r0,
            r5 = inout(reg) self.data[1] => r1,
            r6 = inout(reg) self.data[2] => r2,
            r7 = inout(reg) self.data[3] => r3,
            options(nomem, nostack),
            );
            carry &= 1;
        }
        #[cfg(target_arch = "aarch64")]
        unsafe {
            asm!(
            "subs {r4}, {r4}, {r0}",
            "sbcs {r5}, {r5}, {r1}",
            "sbcs {r6}, {r6}, {r2}",
            "sbcs {r7}, {r7}, {r3}",
            "sbc {c}, xzr, xzr",
            r0 = in(reg) b.data[0],
            r1 = in(reg) b.data[1],
            r2 = in(reg) b.data[2],
            r3 = in(reg) b.data[3],
            c = lateout(reg) carry,
            r4 = inout(reg) self.data[0] => r0,
            r5 = inout(reg) self.data[1] => r1,
            r6 = inout(reg) self.data[2] => r2,
            r7 = inout(reg) self.data[3] => r3,
            options(nomem, nostack),
            );
            carry &= 1;
        }
        #[cfg(not(any(target_arch = "x86_64", target_arch = "aarch64")))]
        {
            (r0, carry) = u64_sub(self.data[0], b.data[0]);
            (r1, carry) = u64_subc(self.data[1], b.data[1], carry);
            (r2, carry) = u64_subc(self.data[2], b.data[2], carry);
            (r3, carry) = u64_subc(self.data[3], b.data[3], carry);
        }
        (
            U256 {
                data: [r0, r1, r2, r3],
            },
            carry,
        )
    }
    #[inline]
    pub fn sub_from(&mut self, b: &Self) -> u64 {
        let (res, cc) = self.sub(b);
        self.data = res.data;
        cc
    }
    #[inline]
    pub fn addc4(
        &self,
        r0: u64,
        r1: u64,
        r2: u64,
        r3: u64,
        c1: u64,
        c2: u64,
    ) -> (u64, u64, u64, u64, u64) {
        let mut cc: u64;
        let s0: u64;
        let s1: u64;
        let s2: u64;
        let s3: u64;
        (s0, cc) = u64_add(r0, self.data[0]);
        (s1, cc) = u64_addc(r1, self.data[1], cc);
        (s2, cc) = u64_addc(r2, self.data[2], cc);
        (s3, cc) = u64_addc(r3, self.data[3], cc);
        (cc, _) = u64_addc(c1, c2, cc);
        (s0, s1, s2, s3, cc)
    }
    #[inline]
    pub fn modp(&self, p: &Self, cc: u64) -> U256 {
        let mut borrow: u64;
        let mut r0: u64;
        let mut r1: u64;
        let mut r2: u64;
        let mut r3: u64;
        #[cfg(target_arch = "x86_64")]
        unsafe {
            asm!(
            "sub {r4}, {r0}",
            "sbb {r5}, {r1}",
            "sbb {r6}, {r2}",
            "sbb {r7}, {r3}",
            "mov {c}, 0",
            "sbb {c}, 0",
            r0 = in(reg) p.data[0],
            r1 = in(reg) p.data[1],
            r2 = in(reg) p.data[2],
            r3 = in(reg) p.data[3],
            c = lateout(reg) borrow,
            r4 = inout(reg) self.data[0] => r0,
            r5 = inout(reg) self.data[1] => r1,
            r6 = inout(reg) self.data[2] => r2,
            r7 = inout(reg) self.data[3] => r3,
            options(nomem, nostack),
            );
            borrow &= 1;
        }
        #[cfg(target_arch = "aarch64")]
        unsafe {
            asm!(
            "subs {r4}, {r4}, {r0}",
            "sbcs {r5}, {r5}, {r1}",
            "sbcs {r6}, {r6}, {r2}",
            "sbcs {r7}, {r7}, {r3}",
            "sbc {c}, xzr, xzr",
            r0 = in(reg) p.data[0],
            r1 = in(reg) p.data[1],
            r2 = in(reg) p.data[2],
            r3 = in(reg) p.data[3],
            c = lateout(reg) borrow,
            r4 = inout(reg) self.data[0] => r0,
            r5 = inout(reg) self.data[1] => r1,
            r6 = inout(reg) self.data[2] => r2,
            r7 = inout(reg) self.data[3] => r3,
            options(nomem, nostack),
            );
            borrow &= 1;
        }
        #[cfg(not(any(target_arch = "x86_64", target_arch = "aarch64")))]
        {
            (r0, borrow) = u64_sub(self.data[0], p.data[0]);
            (r1, borrow) = u64_subc(self.data[1], p.data[1], borrow);
            (r2, borrow) = u64_subc(self.data[2], p.data[2], borrow);
            (r3, borrow) = u64_subc(self.data[3], p.data[3], borrow);
        }
        r0 = if cc < borrow { self.data[0] } else { r0 };
        r1 = if cc < borrow { self.data[1] } else { r1 };
        r2 = if cc < borrow { self.data[2] } else { r2 };
        r3 = if cc < borrow { self.data[3] } else { r3 };
        U256 {
            data: [r0, r1, r2, r3],
        }
    }
    /// uadd   add u64 to U256, return carry
    #[inline]
    pub fn uadd(&mut self, y: u64) -> bool {
        let r0: u64;
        let r1: u64;
        let r2: u64;
        let r3: u64;
        let mut cc: u64;
        #[cfg(target_arch = "x86_64")]
        unsafe {
            // inc not change flags, must use add {0}, 1
            asm!(
            "add {r0}, {y}",
            "adc {r1}, 0",
            "adc {r2}, 0",
            "adc {r3}, 0",
            "mov {c}, 0",
            "adc {c}, 0",
            c = lateout(reg) cc,
            y = in(reg) y,
            r0 = inout(reg) self.data[0] => r0,
            r1 = inout(reg) self.data[1] => r1,
            r2 = inout(reg) self.data[2] => r2,
            r3 = inout(reg) self.data[3] => r3,
            options(nomem, nostack),
            );
        }
        #[cfg(target_arch = "aarch64")]
        unsafe {
            asm!(
            "adds {r0}, {r0}, {y}",
            "adcs {r1}, {r1}, xzr",
            "adcs {r2}, {r2}, xzr",
            "adcs {r3}, {r3}, xzr",
            "adc {c}, xzr, xzr",
            c = lateout(reg) cc,
            y = in(reg) y,
            r0 = inout(reg) self.data[0] => r0,
            r1 = inout(reg) self.data[1] => r1,
            r2 = inout(reg) self.data[2] => r2,
            r3 = inout(reg) self.data[3] => r3,
            options(nomem, nostack),
            );
        }
        #[cfg(not(any(target_arch = "x86_64", target_arch = "aarch64")))]
        {
            (r0, cc) = u64_add(self.data[0], y);
            (r1, cc) = u64_addc(self.data[1], 0, cc);
            (r2, cc) = u64_addc(self.data[2], 0, cc);
            (r3, cc) = u64_addc(self.data[3], 0, cc);
        }
        self.data = [r0, r1, r2, r3];
        cc != 0
    }
    /// usub   sub u64 from U256, return carry
    #[inline]
    pub fn usub(&mut self, y: u64) -> bool {
        let r0: u64;
        let r1: u64;
        let r2: u64;
        let r3: u64;
        let mut cc: u64;
        #[cfg(target_arch = "x86_64")]
        unsafe {
            // inc not change flags, must use add {0}, 1
            asm!(
            "sub {r0}, {y}",
            "sbb {r1}, 0",
            "sbb {r2}, 0",
            "sbb {r3}, 0",
            "mov {c}, 0",
            "sbb {c}, 0",
            c = lateout(reg) cc,
            y = in(reg) y,
            r0 = inout(reg) self.data[0] => r0,
            r1 = inout(reg) self.data[1] => r1,
            r2 = inout(reg) self.data[2] => r2,
            r3 = inout(reg) self.data[3] => r3,
            options(nomem, nostack),
            );
        }
        #[cfg(target_arch = "aarch64")]
        unsafe {
            asm!(
            "subs {r0}, {r0}, {y}",
            "sbcs {r1}, {r1}, xzr",
            "sbcs {r2}, {r2}, xzr",
            "sbcs {r3}, {r3}, xzr",
            "sbc {c}, xzr, xzr",
            c = lateout(reg) cc,
            y = in(reg) y,
            r0 = inout(reg) self.data[0] => r0,
            r1 = inout(reg) self.data[1] => r1,
            r2 = inout(reg) self.data[2] => r2,
            r3 = inout(reg) self.data[3] => r3,
            options(nomem, nostack),
            );
        }
        #[cfg(not(any(target_arch = "x86_64", target_arch = "aarch64")))]
        {
            (r0, cc) = u64_sub(self.data[0], y);
            (r1, cc) = u64_subc(self.data[1], 0, cc);
            (r2, cc) = u64_subc(self.data[2], 0, cc);
            (r3, cc) = u64_subc(self.data[3], 0, cc);
        }
        self.data = [r0, r1, r2, r3];
        cc != 0
    }
    #[inline]
    pub fn rshift1(&mut self, carry: u64) {
        let mut cc: u64 = carry << 63;
        let mut temp = self.data[3];
        self.data[3] = (temp >> 1) | cc;
        cc = temp << 63;
        temp = self.data[2];
        self.data[2] = (temp >> 1) | cc;
        cc = temp << 63;
        temp = self.data[1];
        self.data[1] = (temp >> 1) | cc;
        cc = temp << 63;
        temp = self.data[0];
        self.data[0] = (temp >> 1) | cc;
    }
    /// return carry as u64
    #[inline]
    pub fn lshift1(&mut self) -> u64 {
        let mut cc: u64;
        let mut temp = self.data[0];
        self.data[0] = temp << 1;
        cc = temp >> 63;
        temp = self.data[1];
        self.data[1] = (temp << 1) | cc;
        cc = temp >> 63;
        temp = self.data[2];
        self.data[2] = (temp << 1) | cc;
        cc = temp >> 63;
        temp = self.data[3];
        self.data[3] = (temp << 1) | cc;
        cc = temp >> 63;
        cc
    }
    #[inline]
    pub fn umult(&self, u: u64) -> (U256, u64) {
        let r0: u64;
        let r1: u64;
        let r2: u64;
        let r3: u64;
        let mut cc: u64;
        (r0, cc) = u64_mul(self.data[0], u);
        (r1, cc) = u64_mulc(self.data[1], u, cc);
        (r2, cc) = u64_mulc(self.data[2], u, cc);
        (r3, cc) = u64_mulc(self.data[3], u, cc);
        (
            U256 {
                data: [r0, r1, r2, r3],
            },
            cc,
        )
    }
    #[inline]
    pub fn umult_to(&mut self, u: u64) -> u64 {
        let mut cc: u64;
        (self.data[0], cc) = u64_mul(self.data[0], u);
        (self.data[1], cc) = u64_mulc(self.data[0], u, cc);
        (self.data[2], cc) = u64_mulc(self.data[0], u, cc);
        (self.data[3], cc) = u64_mulc(self.data[0], u, cc);
        cc
    }
    #[inline]
    pub fn rshift1w(&mut self, cc: u64) {
        self.data[0] = self.data[1];
        self.data[1] = self.data[2];
        self.data[2] = self.data[3];
        self.data[3] = cc;
    }
    #[inline]
    pub fn is_even(&self) -> bool {
        (self.data[0] & 1) == 0
    }
    #[inline]
    pub fn is_odd(&self) -> bool {
        (self.data[0] & 1) != 0
    }
    #[inline]
    pub fn is_zero(&self) -> bool {
        (self.data[0] | self.data[1] | self.data[2] | self.data[3]) == 0
    }
}

impl U256 {
    #[inline]
    fn num_digits(&self) -> u32 {
        let mut idx: usize = 3;
        while idx != 0 {
            if self.data[idx] != 0 {
                break;
            }
            idx -= 1;
        }
        if idx == 0 {
            if self.data[0] == 0 {
                0
            } else {
                1
            }
        } else {
            idx as u32 + 1
        }
    }
    #[inline]
    pub fn num_bits(&self) -> u32 {
        let ndigits = self.num_digits();
        if ndigits == 0 {
            return 0;
        }
        let ioff = 64 - self.data[ndigits as usize - 1].leading_zeros();
        (ndigits - 1) * 64 + ioff
    }
    #[inline]
    pub fn get_bits<const W: u64>(&self, bit: i32) -> u32 {
        debug_assert!(W <= 8, "w of wNAF MUST no larger than 8");
        if bit < 0 {
            // bit MUST BE -1
            let rr: u64;
            rr = self.data[0] << 1;
            return rr as u32 & ((1 << W) - 1);
        }
        let mut off = bit as usize >> 6;
        if off >= 4 {
            return 0;
        }
        let rem = bit as u64 & 0x3f;
        let mut res = self.data[off] >> rem;
        if bit < 3 * 64 && rem > (64 - W) {
            off += 1;
            res |= self.data[off] << (64 - rem);
        }
        res as u32 & ((1 << W) - 1)
    }
    #[inline]
    pub fn mod_add(&self, b: &Self, prime: &Self) -> U256 {
        let mut carry: u64;
        let r0: u64;
        let r1: u64;
        let r2: u64;
        let r3: u64;
        let r4: u64;
        let r5: u64;
        let r6: u64;
        let r7: u64;
        let mut bo: u64;
        #[cfg(target_arch = "x86_64")]
        unsafe {
            asm!(
            "add {r4}, {r0}",
            "adc {r5}, {r1}",
            "adc {r6}, {r2}",
            "adc {r7}, {r3}",
            "mov {c}, 0",
            "adc {c}, 0",
            r0 = in(reg) b.data[0],
            r1 = in(reg) b.data[1],
            r2 = in(reg) b.data[2],
            r3 = in(reg) b.data[3],
            c = lateout(reg) carry,
            r4 = inout(reg) self.data[0] => r0,
            r5 = inout(reg) self.data[1] => r1,
            r6 = inout(reg) self.data[2] => r2,
            r7 = inout(reg) self.data[3] => r3,
            options(nomem, nostack),
            );
        }
        #[cfg(target_arch = "aarch64")]
        unsafe {
            asm!(
            "adds {r4}, {r4}, {r0}",
            "adcs {r5}, {r5}, {r1}",
            "adcs {r6}, {r6}, {r2}",
            "adcs {r7}, {r7}, {r3}",
            "adc {c}, xzr, xzr",
            r0 = in(reg) b.data[0],
            r1 = in(reg) b.data[1],
            r2 = in(reg) b.data[2],
            r3 = in(reg) b.data[3],
            c = lateout(reg) carry,
            r4 = inout(reg) self.data[0] => r0,
            r5 = inout(reg) self.data[1] => r1,
            r6 = inout(reg) self.data[2] => r2,
            r7 = inout(reg) self.data[3] => r3,
            options(nomem, nostack),
            );
        }
        #[cfg(target_arch = "x86_64")]
        unsafe {
            asm!(
            "sub {r4}, {r0}",
            "sbb {r5}, {r1}",
            "sbb {r6}, {r2}",
            "sbb {r7}, {r3}",
            "mov {c}, 0",
            "sbb {c}, 0",
            r0 = in(reg) prime.data[0],
            r1 = in(reg) prime.data[1],
            r2 = in(reg) prime.data[2],
            r3 = in(reg) prime.data[3],
            c = lateout(reg) bo,
            r4 = inout(reg) r0 => r4,
            r5 = inout(reg) r1 => r5,
            r6 = inout(reg) r2 => r6,
            r7 = inout(reg) r3 => r7,
            options(nomem, nostack),
            );
            bo &= 1;
        }
        #[cfg(target_arch = "aarch64")]
        unsafe {
            asm!(
            "subs {r4}, {r4}, {r0}",
            "sbcs {r5}, {r5}, {r1}",
            "sbcs {r6}, {r6}, {r2}",
            "sbcs {r7}, {r7}, {r3}",
            "sbc {c}, xzr, xzr",
            r0 = in(reg) prime.data[0],
            r1 = in(reg) prime.data[1],
            r2 = in(reg) prime.data[2],
            r3 = in(reg) prime.data[3],
            c = lateout(reg) bo,
            r4 = inout(reg) r0 => r4,
            r5 = inout(reg) r1 => r5,
            r6 = inout(reg) r2 => r6,
            r7 = inout(reg) r3 => r7,
            options(nomem, nostack),
            );
            bo &= 1;
        }
        #[cfg(not(any(target_arch = "x86_64", target_arch = "aarch64")))]
        {
            (r0, carry) = u64_add(self.data[0], b.data[0]);
            (r1, carry) = u64_addc(self.data[1], b.data[1], carry);
            (r2, carry) = u64_addc(self.data[2], b.data[2], carry);
            (r3, carry) = u64_addc(self.data[3], b.data[3], carry);
            (r4, bo) = u64_sub(r0, prime.data[0]);
            (r5, bo) = u64_subc(r1, prime.data[1], bo);
            (r6, bo) = u64_subc(r2, prime.data[2], bo);
            (r7, bo) = u64_subc(r3, prime.data[3], bo);
        }
        if carry < bo {
            U256 {
                data: [r0, r1, r2, r3],
            }
        } else {
            U256 {
                data: [r4, r5, r6, r7],
            }
        }
    }
    #[inline]
    pub fn mod_add_to(&mut self, b: &Self, prime: &Self) {
        self.data = self.mod_add(b, prime).data;
    }
    #[inline]
    pub fn mod_sub(&self, b: &Self, prime: &Self) -> U256 {
        let mut carry: u64;
        let r0: u64;
        let r1: u64;
        let r2: u64;
        let r3: u64;
        let r4: u64;
        let r5: u64;
        let r6: u64;
        let r7: u64;
        #[cfg(target_arch = "x86_64")]
        unsafe {
            asm!(
            "sub {r4}, {r0}",
            "sbb {r5}, {r1}",
            "sbb {r6}, {r2}",
            "sbb {r7}, {r3}",
            "mov {c}, 0",
            "sbb {c}, 0",
            r0 = in(reg) b.data[0],
            r1 = in(reg) b.data[1],
            r2 = in(reg) b.data[2],
            r3 = in(reg) b.data[3],
            c = lateout(reg) carry,
            r4 = inout(reg) self.data[0] => r0,
            r5 = inout(reg) self.data[1] => r1,
            r6 = inout(reg) self.data[2] => r2,
            r7 = inout(reg) self.data[3] => r3,
            options(nomem, nostack),
            );
        }
        #[cfg(target_arch = "aarch64")]
        unsafe {
            asm!(
            "subs {r4}, {r4}, {r0}",
            "sbcs {r5}, {r5}, {r1}",
            "sbcs {r6}, {r6}, {r2}",
            "sbcs {r7}, {r7}, {r3}",
            "sbc {c}, xzr, xzr",
            r0 = in(reg) b.data[0],
            r1 = in(reg) b.data[1],
            r2 = in(reg) b.data[2],
            r3 = in(reg) b.data[3],
            c = lateout(reg) carry,
            r4 = inout(reg) self.data[0] => r0,
            r5 = inout(reg) self.data[1] => r1,
            r6 = inout(reg) self.data[2] => r2,
            r7 = inout(reg) self.data[3] => r3,
            options(nomem, nostack),
            );
        }
        #[cfg(target_arch = "x86_64")]
        unsafe {
            asm!(
            "add {r4}, {r0}",
            "adc {r5}, {r1}",
            "adc {r6}, {r2}",
            "adc {r7}, {r3}",
            r0 = in(reg) prime.data[0],
            r1 = in(reg) prime.data[1],
            r2 = in(reg) prime.data[2],
            r3 = in(reg) prime.data[3],
            r4 = inout(reg) r0 => r4,
            r5 = inout(reg) r1 => r5,
            r6 = inout(reg) r2 => r6,
            r7 = inout(reg) r3 => r7,
            options(nomem, nostack),
            );
        }
        #[cfg(target_arch = "aarch64")]
        unsafe {
            asm!(
            "adds {r4}, {r4}, {r0}",
            "adcs {r5}, {r5}, {r1}",
            "adcs {r6}, {r6}, {r2}",
            "adcs {r7}, {r7}, {r3}",
            r0 = in(reg) prime.data[0],
            r1 = in(reg) prime.data[1],
            r2 = in(reg) prime.data[2],
            r3 = in(reg) prime.data[3],
            r4 = inout(reg) r0 => r4,
            r5 = inout(reg) r1 => r5,
            r6 = inout(reg) r2 => r6,
            r7 = inout(reg) r3 => r7,
            options(nomem, nostack),
            );
        }
        #[cfg(not(any(target_arch = "x86_64", target_arch = "aarch64")))]
        {
            (r0, carry) = u64_sub(self.data[0], b.data[0]);
            (r1, carry) = u64_subc(self.data[1], b.data[1], carry);
            (r2, carry) = u64_subc(self.data[2], b.data[2], carry);
            (r3, carry) = u64_subc(self.data[3], b.data[3], carry);
            let mut cc: u64;
            (r4, cc) = u64_add(r0, prime.data[0]);
            (r5, cc) = u64_addc(r1, prime.data[1], cc);
            (r6, cc) = u64_addc(r2, prime.data[2], cc);
            (r7, _) = u64_addc(r3, prime.data[3], cc);
        }
        if carry != 0 {
            U256 {
                data: [r4, r5, r6, r7],
            }
        } else {
            U256 {
                data: [r0, r1, r2, r3],
            }
        }
    }
    #[inline]
    pub fn mod_sub_from(&mut self, b: &Self, prime: &Self) {
        self.data = self.mod_sub(b, prime).data;
    }
    #[inline]
    pub fn copy_conditional(&mut self, b: &Self, cond: bool) {
        #[cfg(target_arch = "x86_64")]
        unsafe {
            asm!(
            "test {c}, {c}",
            "cmovne {r4}, {r0}",
            "cmovne {r5}, {r1}",
            "cmovne {r6}, {r2}",
            "cmovne {r7}, {r3}",
            r0 = in(reg) b.data[0],
            r1 = in(reg) b.data[1],
            r2 = in(reg) b.data[2],
            r3 = in(reg) b.data[3],
            c = in(reg) cond as u64,
            r4 = inout(reg) self.data[0] => self.data[0] ,
            r5 = inout(reg) self.data[1] => self.data[1] ,
            r6 = inout(reg) self.data[2] => self.data[2] ,
            r7 = inout(reg) self.data[3] => self.data[3] ,
            options(nomem, nostack),
            );
            //info!("copy_conditional via x86_64 asm");
        }
        #[cfg(target_arch = "aarch64")]
        unsafe {
            asm!(
            "cmp {c}, xzr",
            "csel {r4}, {r4}, {r0}, eq",
            "csel {r5}, {r5}, {r1}, eq",
            "csel {r6}, {r6}, {r2}, eq",
            "csel {r7}, {r7}, {r3}, eq",
            r0 = in(reg) b.data[0],
            r1 = in(reg) b.data[1],
            r2 = in(reg) b.data[2],
            r3 = in(reg) b.data[3],
            c = in(reg) cond as u64,
            r4 = inout(reg) self.data[0] => self.data[0] ,
            r5 = inout(reg) self.data[1] => self.data[1] ,
            r6 = inout(reg) self.data[2] => self.data[2] ,
            r7 = inout(reg) self.data[3] => self.data[3] ,
            options(nomem, nostack),
            );
            //info!("copy_conditional via arm64 asm");
        }
        #[cfg(not(any(target_arch = "x86_64", target_arch = "aarch64")))]
        {
            let mask = if cond { !0 } else { 0 };
            let tmp = mask & (b.data[0] ^ self.data[0]);
            self.data[0] ^= tmp;
            let tmp = mask & (b.data[1] ^ self.data[1]);
            self.data[1] ^= tmp;
            let tmp = mask & (b.data[2] ^ self.data[2]);
            self.data[2] ^= tmp;
            let tmp = mask & (b.data[3] ^ self.data[3]);
            self.data[3] ^= tmp;
            //info!("copy_conditional via rust");
        }
    }
    #[inline]
    pub fn move_cond(&self, b: &Self, cond: u64) -> U256 {
        let r0: u64;
        let r1: u64;
        let r2: u64;
        let r3: u64;
        #[cfg(target_arch = "x86_64")]
        unsafe {
            asm!(
            "test {c}, {c}",
            "cmovz {r4}, {r0}",
            "cmovz {r5}, {r1}",
            "cmovz {r6}, {r2}",
            "cmovz {r7}, {r3}",
            r0 = in(reg) b.data[0],
            r1 = in(reg) b.data[1],
            r2 = in(reg) b.data[2],
            r3 = in(reg) b.data[3],
            c = in(reg) cond,
            r4 = inout(reg) self.data[0] => r0 ,
            r5 = inout(reg) self.data[1] => r1 ,
            r6 = inout(reg) self.data[2] => r2 ,
            r7 = inout(reg) self.data[3] => r3 ,
            options(nomem, nostack),
            );
            //info!("move_cond via x86_64 asm");
        }
        #[cfg(target_arch = "aarch64")]
        unsafe {
            asm!(
            "cmp {c}, xzr",
            "csel {r4}, {r4}, {r0}, ne",
            "csel {r5}, {r5}, {r1}, ne",
            "csel {r6}, {r6}, {r2}, ne",
            "csel {r7}, {r7}, {r3}, ne",
            r0 = in(reg) b.data[0],
            r1 = in(reg) b.data[1],
            r2 = in(reg) b.data[2],
            r3 = in(reg) b.data[3],
            c = in(reg) cond as u64,
            r4 = inout(reg) self.data[0] => r0 ,
            r5 = inout(reg) self.data[1] => r1 ,
            r6 = inout(reg) self.data[2] => r2 ,
            r7 = inout(reg) self.data[3] => r3 ,
            options(nomem, nostack),
            );
            //info!("move_cond via arm64 asm");
        }
        #[cfg(not(any(target_arch = "x86_64", target_arch = "aarch64")))]
        {
            let mask = if cond == 0 { !0 } else { 0 };
            let tmp = mask & (b.data[0] ^ self.data[0]);
            r0 = self.data[0] ^ tmp;
            let tmp = mask & (b.data[1] ^ self.data[1]);
            r1 = self.data[1] ^ tmp;
            let tmp = mask & (b.data[2] ^ self.data[2]);
            r2 = self.data[2] ^ tmp;
            let tmp = mask & (b.data[3] ^ self.data[3]);
            r3 = self.data[3] ^ tmp;
            //info!("move_cond via rust");
        }
        U256 {
            data: [r0, r1, r2, r3],
        }
    }
    pub fn mod_inverse(&self, prime: &U256) -> U256 {
        let mut a: U256;
        let mut b: U256;
        let mut u: U256 = U256::from(1);
        let mut v: U256 = Default::default();
        if self.is_zero() {
            return v;
        }
        a = self.clone();
        b = prime.clone();
        loop {
            let cmp_res = a.cmp(&b);
            if cmp_res == Ordering::Equal {
                break;
            }
            let mut carry: u64 = 0;
            if a.is_even() {
                a.rshift1(0);
                if !u.is_even() {
                    carry = u.add_to(prime);
                }
                u.rshift1(carry);
            } else if b.is_even() {
                b.rshift1(0);
                if !v.is_even() {
                    carry = v.add_to(prime);
                }
                v.rshift1(carry);
            } else if cmp_res == Ordering::Greater {
                a.sub_from(&b);
                a.rshift1(0);

                u.mod_sub_from(&v, prime);
                if !u.is_even() {
                    carry = u.add_to(prime);
                }
                u.rshift1(carry);
            } else {
                b.sub_from(&a);
                b.rshift1(0);

                v.mod_sub_from(&u, prime);
                if !v.is_even() {
                    carry = v.add_to(prime);
                }
                v.rshift1(carry);
            }
        }
        u
    }
}

#[cfg(test)]
mod tests {
    use super::U256;
    use crate::ecc_const::SM2_P;
    use crate::test_data::tests::*;
    use measure::Measure;

    #[test]
    fn test_datatype() {
        use std::mem;
        assert_eq!(mem::size_of::<U256>(), 32);
        let bn = U256::random();
        let ptr = bn.ptr();
        let r0 = unsafe { *ptr };
        assert_eq!(r0, bn.as_slice()[0]);
        assert!(r0 != 0);
        println!("dump U256: {}", bn);
        println!("dump U256 str: {}", bn.to_string());
        let bn: U256 = Default::default();
        assert!(bn.is_zero());
        println!("datatype test pass: \u{2714} \u{1f4af}");
    }

    #[test]
    fn test_string_conv() {
        for _i in 0..16 {
            let bn = U256::random();
            let bn_str = bn.to_string();
            if let Some(bn_r) = U256::from_string(&bn_str) {
                assert!(bn == bn_r);
            } else {
                assert!(false, "from_string failed");
            }
        }
        println!("string convert test pass: \u{2705} \u{1f4af}");
    }

    #[test]
    fn test_add() {
        let a = U256::new(1, 1, 1, 1);
        let b = U256::new(0xfffffffe, 2, 2, 0xfffffffffffffffe);
        let c = U256::new(0xffffffff, 3, 3, 0xffffffffffffffff);
        let (_res, cc) = a.add(&b);
        assert_eq!(cc, 0);
        let (res, cc) = a.add(&c);
        assert_eq!(cc, 1);
        assert!(res == U256::new(0x100000000, 4, 4, 0));
    }

    #[test]
    fn test_get_bits() {
        let b = U256::new(0xfffffffe, 3, 3, 0xffffffffffffffff);
        assert_eq!(b.get_bits::<5>(-1), 28);
        assert_eq!(b.get_bits::<5>(4), 0x1f);
        assert_eq!(b.get_bits::<5>(62), 0xc);
        assert_eq!(b.get_bits::<5>(254), 3);
    }

    #[test]
    fn test_num_bits() {
        let a = U256::new(0, 0, 0, u64::MAX >> 2);
        assert_eq!(a.num_bits(), 254);
        let a = U256::new(0, 0, u64::MAX >> 2, 0);
        assert_eq!(a.num_bits(), 190);
        let a = U256::new(0, u64::MAX >> 5, 0, 0);
        assert_eq!(a.num_bits(), 123);
    }

    #[test]
    fn test_add_to() {
        let a = U256::new(1, 1, 1, 1);
        let b = U256::new(0xfffffffe, 2, 2, 0xfffffffffffffffe);
        let c = U256::new(0xffffffff, 3, 3, 0xffffffffffffffff);
        let mut res = a.clone();
        assert_eq!(res.add_to(&b), 0);
        res = a.clone();
        assert_eq!(res.add_to(&c), 1);
        assert!(res == U256::new(0x100000000, 4, 4, 0));
    }

    #[test]
    fn test_inc() {
        let mut a = U256::new(0xffffffffffffffff, 3, 3, 0xffffffffffffffff);
        let c = U256::new(0, 4, 3, 0xffffffffffffffff);
        assert!(!a.uadd(1));
        assert!(a == c);
    }

    #[test]
    fn test_dec() {
        let a = U256::new(0xffffffffffffffff, 3, 3, 0xffffffffffffffff);
        let mut c = U256::new(0, 4, 3, 0xffffffffffffffff);
        assert!(!c.usub(1));
        assert!(a == c);
    }

    #[test]
    fn test_sub() {
        let a = U256::new(1, 1, 1, 1);
        let b = U256::new(0xfffffffe, 2, 2, 0xfffffffffffffffe);
        let c = U256::new(2, 0, 1, 2);
        let d = U256::new(2, 1, 0, 2);
        let (_res, cc) = b.sub(&a);
        assert_eq!(cc, 0);
        let (res, cc) = a.sub(&c);
        assert_eq!(cc, 1);
        assert!(res == U256::new(0xffffffffffffffff, 0, 0, 0xffffffffffffffff));
        let (res, cc) = a.sub(&d);
        assert_eq!(cc, 1);
        assert!(
            res == U256::new(
                0xffffffffffffffff,
                0xffffffffffffffff,
                0,
                0xffffffffffffffff
            )
        );
    }

    #[test]
    fn test_copy_cond() {
        use log::{info, warn};
        use simple_logger::SimpleLogger;
        if let Err(s) = SimpleLogger::new().init() {
            warn!("SimpleLogger init: {}", s);
        }
        let a = U256::new(1, 1, 1, 1);
        let b = U256::new(0xfffffffe, 2, 2, 0xfffffffffffffffe);
        let mut c = b.clone();
        c.copy_conditional(&a, false);
        assert!(c == b);
        assert!(c != a);
        c.copy_conditional(&a, true);
        assert!(c != b);
        assert!(c == a);
        info!("test copy_conditional ok");
    }

    #[test]
    fn test_move_cond() {
        use log::{info, warn};
        use simple_logger::SimpleLogger;
        if let Err(s) = SimpleLogger::new().init() {
            warn!("SimpleLogger init: {}", s);
        }
        let a = U256::new(1, 1, 1, 1);
        let b = U256::new(0xfffffffe, 2, 2, 0xfffffffffffffffe);
        let c = b.clone();
        let c = c.move_cond(&a, 1);
        assert!(c == b);
        assert!(c != a);
        let c = c.move_cond(&a, 0);
        assert!(c != b);
        assert!(c == a);
        info!("test move_cond ok");
    }

    #[test]
    fn test_mod_inverse() {
        let r1 = dx1.mod_inverse(&SM2_P);
        assert!(r1 == x1_inv);
        let r2 = dx2.mod_inverse(&SM2_P);
        assert!(r2 == x2_inv);
    }

    #[test]
    fn test_lshift1() {
        let mut c = U256::new(0xffffffff, 3, 3, 0xffffffffffffffff);
        let (res, cc) = c.add(&c);
        let carry = c.lshift1();
        assert_eq!(cc as u64, carry);
        assert!(res == c);
    }

    #[test]
    fn test_u64_subc() {
        use super::u64_subc;
        assert_eq!(u64_subc(1, 0, 0), (1, 0));
        assert_eq!(u64_subc(1, 0, 1), (0, 0));
        assert_eq!(u64_subc(1, 1, 0), (0, 0));
        assert_eq!(u64_subc(1, 1, 1), (0xffffffffffffffff, 1));
        assert_eq!(u64_subc(1, 2, 0), (0xffffffffffffffff, 1));
    }

    #[test]
    fn test_u64_sub() {
        use super::u64_sub;
        assert_eq!(u64_sub(1, 0), (1, 0));
        assert_eq!(u64_sub(1, 1), (0, 0));
        assert_eq!(u64_sub(1, 2), (0xffffffffffffffff, 1));
    }

    #[test]
    fn test_u64_mul() {
        use super::{u64_mul, u64_mulc};
        let (l, h) = u64_mul(0x5555888811112222, 0x6666777744445555);
        assert_eq!(l, 3443358450896969546);
        assert_eq!(h, 2459594648461432586);
        let (l, h) = u64_mulc(0x5555888811112222, 0x6666777744445555, 11);
        assert_eq!(l, 3443358450896969557);
        assert_eq!(h, 2459594648461432586);
        let (l, h) = u64_mulc(0x5555888811112222, 0x6666777744445555, 0xfffffffffffffff0);
        assert_eq!(l, 3443358450896969530);
        assert_eq!(h, 2459594648461432587);
    }

    #[test]
    fn test_cmp() {
        let a = U256::new(1000, 1, 10, 1);
        let b = U256::new(0xffffffff, 2, 2, 0xfffffffffffffffe);
        let c = U256::new(2, 0, 1, 2);
        assert!(a.lt(&b));
        assert!(a < b);
        assert!(a < c);
        assert!(b > c);
        let d = a.clone();
        assert!(a == d);
        let d = U256::from(0);
        assert!(d.is_zero());
        assert!(!a.is_zero());
        assert!(a.is_even());
        assert!(!b.is_even());
        let d = U256::from_uint(0u32);
        assert!(d.is_zero());
        let d = U256::from_uint(0u16);
        assert!(d.is_zero());
        let mut d = U256::from_int(-1i32);
        let c = U256::new(0, 1, 0, 0);
        assert!(!d.uadd(1));
        assert!(d == c);
    }
    #[test]
    #[ignore]
    fn bench_mod_inverse() {
        const N: u32 = 1000_000;
        let mut measure = Measure::start("mod_inverse bench");
        for _it in 0..N {
            let _r1 = dx1.mod_inverse(&SM2_P);
        }
        measure.stop();
        let ns_ops = measure.as_ns() / (N as u64);
        println!("mod_inverse cost {} ns per Op", ns_ops);
        #[cfg(feature = "tsc")]
        {
            let ns_ops = measure.as_ticks() / (N as u64);
            println!("mod_inverse cost {} ticks per Op", ns_ops);
        }
    }
}
