use core::sync::atomic::Ordering;

use semx_bitops::make_64bit_mask;

use crate::{
    processor::isb,
    space::{
        addr::Uaddr,
        mm::{pgtabledef::PAGE_PTRS, tlb::MmuGather},
    },
};

#[inline(always)]
pub(super) fn arch_local_flush_tlb_all() {
    unsafe {
        core::arch::aarch64::__dsb(core::arch::aarch64::NSHST);
        core::arch::asm!("tlbi   vmalle1", "nop", "nop");
        core::arch::aarch64::__dsb(core::arch::aarch64::NSH);
        isb();
    }
}

const MAX_TLBI_OPS: usize = PAGE_PTRS;

const fn tlbi_vaddr(addr: usize, asid: usize) -> usize {
    let mut ta = addr >> 12;
    ta &= make_64bit_mask(0, 44) as usize;
    ta |= asid << 48;
    ta
}

#[inline(always)]
fn flush_tlb_mm(mut asid: usize) {
    unsafe {
        asid = tlbi_vaddr(0, asid);
        core::arch::aarch64::__dsb(core::arch::aarch64::ISHST);
        core::arch::asm!("tlbi aside1is, {0}", "nop", "nop", in(reg) asid);
        core::arch::aarch64::__dsb(core::arch::aarch64::ISH);
    }
}

fn flush_tlb_range(
    asid: usize,
    mut start: usize,
    mut end: usize,
    mut stride: usize,
    last_level: bool,
) {
    if (end - start) >= MAX_TLBI_OPS * stride {
        flush_tlb_mm(asid);
        return;
    }

    stride >>= 12;

    start = tlbi_vaddr(start, asid);
    end = tlbi_vaddr(end, asid);
    unsafe {
        core::arch::aarch64::__dsb(core::arch::aarch64::ISHST);
        let mut addr = start;
        loop {
            if last_level {
                core::arch::asm!("tlbi vale1is, {0}", "nop", "nop", in(reg) addr);
            } else {
                core::arch::asm!("tlbi vae1is, {0}", "nop", "nop", in(reg) addr);
            }
            addr += stride;
            if addr >= end {
                break;
            }
        }
        core::arch::aarch64::__dsb(core::arch::aarch64::ISH);
    }
}

fn flush_tlb_pgtable(asid: usize, uaddr: Uaddr) {
    let addr = tlbi_vaddr(uaddr.to_value(), asid);
    unsafe {
        core::arch::aarch64::__dsb(core::arch::aarch64::ISHST);
        core::arch::asm!("tlbi vae1is, {0}", "nop", "nop", in(reg) addr);
        core::arch::aarch64::__dsb(core::arch::aarch64::ISH);
    }
}

impl MmuGather<'_> {
    pub(super) fn arch_tlb_flush(&self) {
        let last_level = !self.freed_tables;
        let stride = self.tlb_get_unmap_size();
        let asid = self.mm.context.0.load(Ordering::Relaxed) as usize;

        if self.fullmm {
            if !last_level {
                flush_tlb_mm(asid);
            }
            return;
        }

        flush_tlb_range(asid, self.start.to_value(), self.end.to_value(), stride, last_level);
    }

    pub(super) fn arch_pte_free_tlb(&mut self, addr: Uaddr) {
        let asid = self.mm.context.0.load(Ordering::Relaxed) as usize;
        flush_tlb_pgtable(asid, addr);
    }

    pub(super) fn arch_pmd_free_tlb(&mut self, addr: Uaddr) {
        self.arch_pte_free_tlb(addr);
    }

    pub(super) fn arch_pud_free_tlb(&mut self, addr: Uaddr) {
        self.arch_pte_free_tlb(addr);
    }
}
