#[path = "utils.rs"]
#[macro_use]
mod comparator;

use crate::mods::utils::fill;
use std::cmp::Ordering;

#[cfg(target_arch = "x86_64")]
#[inline]
pub fn modulo_block(a: &[u64], b: u64) -> u64 {
    let rem: u64;
    let alen = a.len();
    unsafe {
        let pa = a.as_ptr().add(alen - 1);
        llvm_asm!(
            "
                xor rdx, rdx
            2:
                mov rax, [rsi]
                div rbx
                sub rsi, 08h
                dec rcx
                jnz 2b
            "
            :   "={rdx}"(rem)
            :   "{rsi}"(pa), "{rcx}"(alen), "{rbx}"(b)
            :   "rax", "rsi", "rcx"
            :   "intel"
        );
    }
    rem
}

#[cfg(not(target_arch = "x86_64"))]
#[inline]
pub fn modulo_block(a: &[u64], b: u64) -> u64 {
    let rem = a
        .iter()
        .rev()
        .fold(0u128, |acc, n| ((acc + *n as u128) % b as u128) << 64)
        >> 64;
    rem as u64
}

#[cfg(target_arch = "x86_64")]
#[inline]
pub fn shl(a: &mut [u64], b: usize) {
    let alen = a.len();
    unsafe {
        let pa = a.as_mut_ptr().add(alen - 1);
        llvm_asm!(
            "
                mov r8, -1
                shl r8, cl
                mov r9, -1
                xor r9, r8
                mov r10, [rsi]
                shl r10, cl
                dec rbx
                jz 4f
            3:
                sub rsi, 08h
                mov rax, [rsi]
                rol rax, cl
                mov rdx, rax
                and rax, r9
                xor r10, rax
                mov [rsi + 08h], r10
                and rdx, r8
                mov r10, rdx
                dec rbx
                jnz 3b
            4:
                mov [rsi], r10
            "
            :
            :   "{rsi}"(pa), "{rbx}"(alen), "{rcx}"(b)
            :   "rax", "rbx", "rdx", "rsi", "r8", "r9", "r10"
            :   "intel"
        );
    }
}

#[cfg(not(target_arch = "x86_64"))]
#[inline]
pub fn shl(a: &mut [u64], b: usize) {
    let (alen, b) = (a.len(), b % 64);
    (1..alen)
        .rev()
        .for_each(|i| a[i] = (a[i] << b) | (a[i - 1] >> (64 - b)));
    a[0] <<= b;
}

#[cfg(target_arch = "x86_64")]
#[inline]
pub fn shr(a: &mut [u64], b: usize) {
    let alen = a.len();
    unsafe {
        let pa = a.as_mut_ptr();
        llvm_asm!(
            "
                mov r8, -1
                shr r8, cl
                mov r9, -1
                xor r9, r8
                mov r10, [rsi]
                shr r10, cl
                dec rbx
                jz 6f
            5:
                add rsi, 08h
                mov rax, [rsi]
                ror rax, cl
                mov rdx, rax
                and rax, r9
                xor r10, rax
                mov [rsi - 08h], r10
                and rdx, r8
                mov r10, rdx
                dec rbx
                jnz 5b
            6:
                mov [rsi], r10
            "
            :
            :   "{rsi}"(pa), "{rbx}"(alen), "{rcx}"(b)
            :   "rax", "rbx", "rdx", "rsi", "r8", "r9", "r10"
            :   "intel"
        );
    }
}

#[cfg(not(target_arch = "x86_64"))]
#[inline]
pub fn shr(a: &mut [u64], b: usize) {
    let (alen, b) = (a.len(), b % 64);
    (0..alen - 1)
        .rev()
        .for_each(|i| a[i] = (a[i] >> b) | (a[i + 1] << (64 - b)));
    a[alen - 1] >>= b;
}

#[cfg(target_arch = "x86_64")]
#[inline]
pub fn unsigned_compare(a: &[u64], b: &[u64]) -> i8 {
    let mut res: i8;
    let (alen, blen) = (a.len(), b.len());
    let offset = max!(alen, blen);
    unsafe {
        let (pa, pb) = (a.as_ptr(), b.as_ptr());
        llvm_asm!(
            "
                xor rax, rax
                xor rbx, rbx
                xor dl, dl
                cmp rcx, 0
                je 8f
            7:
                cmp r8, rcx
                jb 9f
                mov rax, [rsi + 8 * rcx - 8]
            9:
                cmp r9, rcx
                jb 10f
                mov rbx, [rdi + 8 * rcx - 8]
            10:
                cmp rax, rbx
                jne 11f
                dec rcx
                jnz 7b
                jmp 8f
            11:
                jb 12f
                mov dl, 1
                jmp 8f
            12:
                mov dl, -1
            8:

            "
            :   "={dl}"(res)
            :   "{rsi}"(pa), "{rdi}"(pb), "{r8}"(alen), "{r9}"(blen), "{rcx}"(offset)
            :   "rax", "rbx", "rcx"
            :   "intel"
        );
    }
    res
}

#[cfg(not(target_arch = "x86_64"))]
#[inline]
pub fn unsigned_compare(a: &[u64], b: &[u64]) -> i8 {
    let (alen, blen) = (a.len(), b.len());
    let cmp_res = match alen.cmp(&blen) {
        Ordering::Less => a
            .iter()
            .chain(vec![0; blen - alen].iter())
            .rev()
            .cmp(b.iter().rev()),
        Ordering::Greater => a
            .iter()
            .rev()
            .cmp(b.iter().chain(vec![0; alen - blen].iter()).rev()),
        Ordering::Equal => a.iter().rev().cmp(b.iter().rev()),
    };
    cmp_res as i8
}

#[cfg(all(target_arch = "x86_64", target_feature = "lzcnt"))]
#[inline]
pub fn trailing_zero(a: &[u64]) -> u32 {
    let mut cnt: u32;
    let alen = a.len();
    unsafe {
        let pa = a.as_ptr();
        llvm_asm!(
            "
                xor rax, rax
            13:
                tzcnt rbx, [rsi]
                add rax, rbx
                cmp rbx, 40h
                jb 14f
                add rsi, 08h
                dec rcx
                jnz 13b
            14:
            "
            :   "={rax}"(cnt)
            :   "{rsi}"(pa), "{rcx}"(alen)
            :   "rbx", "rcx", "rsi"
            :   "intel"
        );
    }
    cnt
}

#[cfg(not(all(target_arch = "x86_64", target_feature = "lzcnt")))]
#[inline]
pub fn trailing_zero(a: &[u64]) -> u32 {
    let zeros = a.iter().take_while(|&x| x.trailing_zeros() == 64).count();
    match zeros == a.len() {
        true => zeros as u32 * 64,
        false => zeros as u32 * 64 + a[zeros].trailing_zeros(),
    }
}

#[cfg(all(target_arch = "x86_64", target_feature = "lzcnt"))]
#[inline]
pub fn leading_zero(a: &[u64]) -> u32 {
    let mut cnt: u32;
    let alen = a.len();
    unsafe {
        let pa = a.as_ptr().add(alen - 1);
        llvm_asm!(
            "
                xor rax, rax
            15:
                lzcnt rbx, [rsi]
                add rax, rbx
                cmp rbx, 40h
                jb 16f
                sub rsi, 08h
                dec rcx
                jnz 15b
            16:
            "
            :   "={rax}"(cnt)
            :   "{rsi}"(pa), "{rcx}"(alen)
            :   "rbx", "rcx", "rsi"
            :   "intel"
        );
    }
    cnt
}

#[cfg(not(all(target_arch = "x86_64", target_feature = "lzcnt")))]
#[inline]
pub fn leading_zero(a: &[u64]) -> u32 {
    let zeros = a
        .iter()
        .rev()
        .take_while(|&x| x.leading_zeros() == 64)
        .count();
    match zeros == a.len() {
        true => zeros as u32 * 64,
        false => zeros as u32 * 64 + a[a.len() - 1 - zeros].leading_zeros(),
    }
}

/*
 * add two source slice to the destination
 * ensure the first source's length is not less than the seconed
 */
#[cfg(target_arch = "x86_64")]
#[inline]
pub fn addcarry(dst: &mut [u64], a: &[u64], b: &[u64]) {
    let (ldst, lx, ly) = (dst.len(), a.len(), b.len());
    assert!(lx >= ly && ldst == lx + 1);
    unsafe {
        let (pdst, px, py) = (dst.as_mut_ptr(), a.as_ptr(), b.as_ptr());
        llvm_asm!(
            "
                xor rax, rax
                xor rcx, rcx
                cmp rax, r10
                je 19f
                cmp rax, r11
                je 18f
            17:
                mov rbx, [r8 + 8 * rax]
                add cl, 0ffh
                adc rbx, [r9 + 8 * rax]
                setb cl
                mov [rsi + 8 * rax], rbx
                inc rax
                cmp rax, r11
                jne 17b
                cmp rax, r10
                je 19f
            18:
                mov rbx, [r8 + 8 * rax]
                add rbx, rcx
                setb cl
                mov [rsi + 8 * rax], rbx
                inc rax
                cmp rax, r10
                jne 18b
            19:
                mov [rsi + 8 * rax], rcx
            "
            :
            :   "{rsi}"(pdst), "{r8}"(px), "{r9}"(py), "{r10}"(lx), "{r11}"(ly)
            :   "rax", "rbx", "rcx"
            :   "intel"
        )
    }
}

#[cfg(not(target_arch = "x86_64"))]
#[inline]
pub fn addcarry(dst: &mut [u64], a: &[u64], b: &[u64]) {
    let (ldst, lx, ly) = (dst.len(), a.len(), b.len());
    assert!(lx >= ly && ldst == lx + 1);
    let mut overflow = 0u64;
    for i in 0..ly {
        let (x, cf1) = a[i].overflowing_add(overflow);
        let (x, cf2) = x.overflowing_add(b[i]);
        dst[i] = x;
        overflow = (cf1 && cf2) as u64;
    }
    for i in ly..lx {
        let (x, cf) = a[i].overflowing_add(overflow);
        dst[i] = x;
        overflow = cf as u64;
    }
    dst[lx] = overflow;
}

#[cfg(target_arch = "x86_64")]
#[inline]
pub fn subborrow(dst: &mut [u64], a: &[u64], b: &[u64]) {
    let (ldst, lx, ly) = (dst.len(), a.len(), b.len());
    assert!(lx >= ly && ldst == lx);
    unsafe {
        let (pdst, px, py) = (dst.as_mut_ptr(), a.as_ptr(), b.as_ptr());
        llvm_asm!(
            "
                xor rax, rax
                xor rcx, rcx
                cmp rax, r10
                je 22f
                cmp rax, r11
                je 21f
            20:
                mov rbx, [r8 + 8 * rax]
                add cl, 0ffh
                sbb rbx, [r9 + 8 * rax]
                setb cl
                mov [rsi + 8 * rax], rbx
                inc rax
                cmp rax, r11
                jne 20b
                cmp rax, r10
                je 22f
            21:
                mov rbx, [r8 + 8 * rax]
                sub rbx, rcx
                setb cl
                mov [rsi + 8 * rax], rbx
                inc rax
                cmp rax, r10
                jne 21b
            22:
                
            "
            :
            :   "{rsi}"(pdst), "{r8}"(px), "{r9}"(py), "{r10}"(lx), "{r11}"(ly)
            :   "rax", "rbx", "rcx"
            :   "intel"
        )
    }
}

#[cfg(not(target_arch = "x86_64"))]
#[inline]
pub fn subborrow(dst: &mut [u64], a: &[u64], b: &[u64]) {
    let (ldst, lx, ly) = (dst.len(), a.len(), b.len());
    assert!(lx >= ly && ldst == lx);
    let mut overflow = 0u64;
    for i in 0..ly {
        let (x, cf1) = a[i].overflowing_sub(overflow);
        let (x, cf2) = x.overflowing_sub(b[i]);
        dst[i] = x;
        overflow = (cf1 && cf2) as u64;
    }
    for i in ly..lx {
        let (x, cf) = a[i].overflowing_sub(overflow);
        dst[i] = x;
        overflow = cf as u64;
    }
}

#[cfg(target_arch = "x86_64")]
pub fn divide_block(quo: &mut [u64], a: &[u64], b: u64) -> u64 {
    let rem: u64;
    let alen = a.len();
    unsafe {
        let (pquo, pa) = (quo.as_mut_ptr().add(alen - 1), a.as_ptr().add(alen - 1));
        llvm_asm!(
            "
                xor rdx, rdx
            23:
                mov rax, [rsi]
                div rbx
                mov [rdi], rax
                sub rsi, 08h
                sub rdi, 08h
                dec rcx
                jnz 23b
            "
            :   "={rdx}"(rem)
            :   "{rsi}"(pa), "{rdi}"(pquo), "{rcx}"(alen), "{rbx}"(b)
            :   "rax", "rcx", "rsi", "rdi"
            :   "intel"
        );
    }
    rem
}

#[cfg(not(target_arch = "x86_64"))]
pub fn divide_block(quo: &mut [u64], a: &[u64], b: u64) -> u64 {
    let (alen, blen) = (a.len(), b.len());
    let (mut val, b) = (0u128, b as u128);
    for i in (0..a.len() - 1).rev() {
        val += a[i] as u128;
        let (q, r) = (val / b, val % b);
        quo[i] = q as u64;
        val = r << 64;
    }
    (val >> 64) as u64
}

#[cfg(all(
    target_arch = "x86_64",
    target_feature = "avx2",
    target_feature = "bmi2"
))]
pub fn multiply(dst: &mut [u64], a: &[u64], b: &[u64]) {
    let (ldst, lx, ly) = (dst.len() * 2, a.len() * 2, b.len() * 2);
    assert!(ldst == lx + ly);
    unsafe {
        let (pdst, px, py) = (dst.as_mut_ptr(), a.as_ptr(), b.as_ptr());
        llvm_asm!(
            "
                lea r15, [4 * r11 - 4]
                mov eax, -1
                movd xmm7, eax
                vpbroadcastq ymm7, xmm7	
                vpsllq ymm6, ymm7, 020h	
                vpxor ymm5, ymm5, ymm5	
                xor rcx, rcx
                mov rdx, 0ch
                sub r10, 04h
                cmp r10, 0
                js 25f
            24:
                movdqu xmm0, [r8 + 4 * rcx]	
                movdqu xmm1, [rsi]				
                call 29f
                push rcx                
                vpaddq ymm4, ymm1, ymm2			
                pextrd eax, xmm4, 0h
                mov DWORD PTR [rsi], eax		
                add rsi, 04h
                pextrd eax, xmm4, 01h
                pextrd ebx, xmm4, 02h
                add eax, ebx
                setb cl
                mov DWORD PTR [rsi], eax		
                add rsi, 04h
                pextrd eax, xmm4, 03h
                vextracti128 xmm4, ymm4, 01h
                pextrd ebx, xmm4, 0h
                add cl, 0ffh
                adc eax, ebx
                setb cl
                mov DWORD PTR [rsi], eax		
                add rsi, 04h
                pextrd eax, xmm4, 01h
                pextrd ebx, xmm4, 02h
                add cl, 0ffh
                adc eax, ebx
                mov DWORD PTR [rsi], eax		
                sub rsi, r15						
                pop rcx                        
                add rcx, 04h	
                cmp rcx, r10	
                jbe 24b					
            25:
                sub r10, rcx
                add r10, 04h			
                jz 36f
                cmp r10, 03h
                je 26f					
                cmp r10, 02h
                je 27f					
                jmp 28f	
            26:
                movq xmm0, QWORD PTR [r8 + 4 * rcx]
                mov eax, DWORD PTR [r8 + 4 * rcx + 8]
                pinsrd xmm0, eax, 02h
                movq xmm1, QWORD PTR [rsi]
                mov eax, DWORD PTR [rsi + 8]
                pinsrd xmm1, eax, 02h
                mov edx, 08h
                call 29f
                vpaddq ymm4, ymm1, ymm2
                pextrd eax, xmm4, 0h
                mov DWORD PTR [rsi], eax
                add rsi, 04h
                pextrd eax, xmm4, 01h
                pextrd ebx, xmm4, 02h
                add eax, ebx
                setb cl
                mov DWORD PTR [rsi], eax
                add rsi, 04h
                pextrd eax, xmm4, 03h
                vextracti128 xmm4, ymm4, 01h
                pextrd ebx, xmm4, 0h
                add cl, 0ffh
                adc eax, ebx
                mov DWORD PTR [rsi], eax
                jmp 36f
            27:
                movq xmm0, QWORD PTR [r8 + 4 * rcx]
                movq xmm1, QWORD PTR [rsi]	
                mov edx, 04h
                call 29f
                vpaddq ymm4, ymm1, ymm2
                pextrd eax, xmm4, 0h
                mov DWORD PTR [rsi], eax
                add rsi, 04h
                pextrd eax, xmm4, 01h
                pextrd ebx, xmm4, 02h
                add eax, ebx
                mov DWORD PTR [rsi], eax
                jmp 36f
            28:
                movd xmm0, [r8 + 4 * rcx]
                movd xmm1, [rsi]
                xor edx, edx
                call 29f
                movd eax, xmm2
                mov DWORD PTR [rsi], eax
                jmp 36f

            29:
                vpmovzxdq ymm0, xmm0		
                vpmovzxdq ymm1, xmm1
                vpxor  ymm2, ymm2, ymm2	
                xor rbx, rbx				
                cmp rbx, r11				
                jne 30f 		
                ret
            30:
                cmp dl, 0ch
                jne 32f
            31:
                movd xmm3, [r9 + 4 * rbx]		
                vpbroadcastq ymm3, xmm3			
                vpmuludq ymm4, ymm0, ymm3		
                vpaddq ymm4, ymm4, ymm1		
                vpaddq ymm4, ymm4, ymm2		
                vpand ymm2, ymm4, ymm6			
                vpsrlq ymm2, ymm2, 020h			
                vpand ymm1, ymm4, ymm7			
                movd eax, xmm1					
                mov DWORD PTR [rsi], eax			
                inc rbx								
                add rsi, 04h			
                mov eax, DWORD PTR [rsi + 0ch]		
                pinsrd xmm1, eax, 0h	
                vpermq ymm1, ymm1, 039h				
                cmp rbx, r11						
                jne 31b 							
                ret
            32:
                movd xmm3, [r9 + 4 * rbx]		
                vpbroadcastq ymm3, xmm3				
                vpmuludq ymm4, ymm0, ymm3			
                vpaddq ymm4, ymm4, ymm1				
                vpaddq ymm4, ymm4, ymm2				
                vpand ymm2, ymm4, ymm6				
                vpsrlq ymm2, ymm2, 020h				
                vpand ymm1, ymm4, ymm7				
                movd eax, xmm1						
                mov DWORD PTR [rsi], eax			
                inc rbx								
                add rsi, 04h						
                mov eax, DWORD PTR [rsi + rdx]						
                pinsrd xmm1, eax, 0h				
                call 33f
                cmp rbx, r11							
                jne 32b												
                ret

            33:
                cmp dl, 8h
                jne 34f
                vpermq ymm1, ymm1, 0c9h
                ret
            34:
                cmp dl, 04h
                jne 35f
                vpermq ymm1, ymm1, 0e1h
            35:
                ret

            36:       
            "
            :
            :   "{rsi}"(pdst), "{r8}"(px), "{r9}"(py), "{r10}"(lx), "{r11}"(ly)
            :   "rax", "rbx", "rcx", "rdx", "rsi", "r10", "r15", "ymm1", "ymm2", "ymm3", "ymm4", "ymm5", "ymm6", "ymm7"
            :   "intel"
        )
    }
}

#[cfg(all(
    target_arch = "x86_64",
    not(all(target_feature = "avx", target_feature = "bmi2"))
))]
pub fn multiply(dst: &mut [u64], a: &[u64], b: &[u64]) {
    let (ldst, lx, ly) = (dst.len(), a.len(), b.len());
    assert!(ldst == lx + ly);
    unsafe {
        let (pdst, px, py) = (dst.as_mut_ptr(), a.as_ptr(), b.as_ptr());
        llvm_asm!(
            "
                lea r15, [8 * r11 - 8]
                xor rcx, rcx
            37:
                xor r13, r13
                xor rbx, rbx
                mov r12, [r8 + 8 * rcx]
                cmp rbx, r11
                je 39f
            38:
                mov rax, r12
                mov rdx, [r9 + 8 * rbx]
                mul rdx
                add rax, r13
                adc rdx, 0h
                add rax, [rsi]
                adc rdx, 0h
                mov [rsi], rax
                mov r13, rdx
                add rsi, 08h
                inc rbx
                cmp rbx, r11
                jne 38b
            39:
                mov [rsi], r13
                inc rcx
                sub rsi, r15
                cmp rcx, r10
                jne 37b
            "
            :
            :   "{rsi}"(pdst), "{r8}"(px), "{r9}"(py), "{r10}"(lx), "{r11}"(ly)
            :   "rax", "rbx", "rcx", "rdx", "rsi", "r12", "r13", "r15"
            :   "intel"
        )
    }
}

#[cfg(not(target_arch = "x86_64"))]
pub fn multiply(dst: &mut [u64], a: &[u64], b: &[u64]) {
    let (ldst, lx, ly) = (dst.len(), a.len(), b.len());
    assert!(ldst == lx + ly);
    for j in 0..lx {
        let mut carry = 0u128;
        for i in 0..ly {
            let prod = a[j] as u128 * b[i] as u128 + carry + dst[j + i];
            dst[j + i] = prod as u64;
            carry = (prod >> 64) as u64;
        }
        dst[ly + i] = carry;
    }
}

/* do montgomery reduction,
 * notice that the length of n is (lm * 2 + 1), and n[2*lm] == 0.
 * the length of dst is (lm + 1).
 * the result of montReduce will be saved in dst, besides, a version of unshifted and unmoded will be saved in n.
 */
#[cfg(target_arch = "x86_64")]
pub fn montgomery_reduce(dst: &mut [u64], n: &mut [u64], modulus: &[u64], inv: u64) {
    let (ldst, ln, lm) = (dst.len(), n.len(), modulus.len());
    assert!(ldst == lm + 1 && ln == 2 * lm + 1);
    unsafe {
        let (pdst, pn, pm) = (dst.as_mut_ptr(), n.as_mut_ptr(), modulus.as_ptr());
        llvm_asm!(
            "
                lea r15, [8 * r10 - 8]
                xor rcx, rcx
            40:
                mov rax, [r8]
                mul r11
                mov r13, rax
                xor rbx, rbx
                xor r12, r12
                cmp r12, r10
                je 43f
            41:
                mov rax, [r9 + 8 * r12]
                mul r13
                add rax, rbx
                adc rdx, 0h
                add rax, [r8]
                adc rdx, 0h
                mov [r8], rax
                mov rbx, rdx
                inc r12
                add r8, 08h
                cmp r12, r10
                jne 41b
                xor rax, rax
                add [r8], rbx
                jnc 43f
            42:
                add rax, 08h
                add QWORD PTR [r8 + rax], 01h
                jc 42b
            43:
                inc rcx
                sub r8, r15
                cmp rcx, r10
                jne 40b
                xor rcx, rcx
            44:
                mov rbx, [r8 + 8 * rcx]
                mov [rsi + 8 * rcx], rbx
                inc rcx
                cmp rcx, r10
                jnz 44b
                mov rbx, [r8 + 8 * rcx]
            45:
                mov rcx, r10
                mov rax, rbx
                cmp rax, 0h
                ja 48f
            46:
                test rcx, rcx
                je 47f
                dec rcx
                mov rax, [rsi + 8 * rcx]
                cmp rax, [r9 + 8 * rcx]
                je 46b
                ja 48f
                jmp 50f
            47:
                mov QWORD PTR [rsi + 8 * rcx], 0
                inc rcx
                cmp rcx, r10
                jne 47b
                jmp 50f
            48:
                xor rcx, rcx
                xor rdx, rdx
            49:
                mov rax, [rsi + 8 * rdx]
                add cl, 0ffh
                sbb rax, [r9 + 8 * rdx]
                setb cl
                mov [rsi + 8 * rdx], rax
                inc rdx
                cmp rdx, r10
                jne 49b
                sub rbx, rcx
                jmp 45b
            50:

            "
            :
            :   "{rsi}"(pdst), "{r8}"(pn), "{r9}"(pm), "{r10}"(lm), "{r11}"(inv)
            :   "rax", "rbx", "rcx", "rdx", "r8", "r12", "r13", "r15"
            :   "intel"
        )
    }
}

#[cfg(not(target_arch = "x86_64"))]
pub fn montgomery_reduce(dst: &mut [u64], n: &mut [u64], modulus: &[u64], inv: u64) {
    let (ldst, ln, lm) = (dst.len(), n.len(), modulus.len());
    assert!(ldst == lm + 1 && ln == 2 * lm + 1);
    for i in 0..lm {
        let mut carry = 0u128;
        let invni = n[i].wrapping_mul(inv);
        for j in 0..lm {
            let nij = modulus[j] as u128 * invni as u128 + n[i + j] as u128 + carry;
            n[i + j] = nij as u64;
            carry = nij >> 64;
        }
        let mut k = lm;
        while carry > 0 {
            let nik = carry + n[i + k] as u128;
            n[i + k] = nik as u64;
            carry = nik >> 64;
            k += 1;
        }
    }
    dst.clone_from_slice(&n[lm..=2 * lm]);
    loop {
        match unsigned_compare(dst, modulus) {
            n if n < 0 => break,
            n if n == 0 => {
                fill(dst, 0);
                break;
            }
            _ => {
                let mut overflow = 0u64;
                for i in 0..lm {
                    let (x, cf1) = dst[i].overflowing_sub(overflow);
                    let (x, cf2) = x.overflowing_sub(modulus[i]);
                    dst[i] = x;
                    overflow = (cf1 && cf2) as u64;
                }
                let (x, cf) = dst[lm].overflowing_sub(overflow);
                dst[lm] = x;
            }
        }
    }
}
