use core::sync::atomic::{AtomicUsize, Ordering};

use bsp_define::smp::NR_CPUS;
use unsafe_list::{UnsafeListHead, define_unsafe_list_head, init_unsafe_list_head};

use super::{
    GfpFlags,
    error::{KallocError, Result},
    page::{__free_pages_memory, Page},
    slab::slab_init,
};
use crate::{
    irq::irqflags::{local_irq_restore, local_irq_save},
    processor::{nr_cpus, this_processor_id},
    space::{
        addr::{Pfn, Vaddr, mem_setup, memblock_foreach},
        mm::pgtabledef::PAGE_SIZE,
    },
    sync::spinlock::Spinlock,
};

pub(crate) const MAX_ORDER: usize = 11;

#[derive(Clone, Copy)]
pub(crate) struct FreeArea {
    pub(crate) free_list: UnsafeListHead<Page>,
    pub(crate) nr_free: usize,
}

impl FreeArea {
    const fn new() -> Self {
        Self { free_list: UnsafeListHead::new(), nr_free: 0 }
    }

    fn init_free_area(&mut self) {
        let list = &mut self.free_list;
        init_unsafe_list_head!(list, Page, ty.Lru.0.lru);
    }
}

#[derive(Clone, Copy)]
pub(crate) struct PerdataPages {
    pub(crate) count: u32,
    pub(crate) high: u32,
    pub(crate) batch: u32,

    pub(crate) lists: UnsafeListHead<Page>,
}

impl PerdataPages {
    const fn new() -> Self {
        Self { count: 0, high: 0, batch: 0, lists: UnsafeListHead::new() }
    }

    #[inline]
    fn pcp_update(&mut self, high: u32, batch: u32) {
        self.high = high;
        self.batch = batch;
    }

    #[inline]
    fn pcp_set_batch(&mut self, batch: u32) {
        self.pcp_update(6 * batch, 1.max(batch));
    }

    #[inline]
    fn inner_batchsize(nr_manage: usize) -> usize {
        let mut batch = nr_manage / 1024;
        if batch * PAGE_SIZE > 1024 * 1024 {
            batch = (1024 * 1024) / PAGE_SIZE;
        }
        batch /= 4;
        if batch < 1 {
            batch = 1;
        }

        batch = (batch + batch / 2).next_power_of_two() - 1;
        batch
    }

    #[inline]
    fn pcp_set_high_and_batch(&mut self, nr_manage: usize) {
        let batch = PerdataPages::inner_batchsize(nr_manage) as u32;
        self.pcp_set_batch(batch);
    }

    fn init_perdata_pages(&mut self, nr_manage: usize) {
        let list = &mut self.lists;
        init_unsafe_list_head!(list, Page, ty.Lru.0.lru);

        self.pcp_set_high_and_batch(nr_manage);
    }
}

pub(crate) struct GlobalDataInner {
    pub(crate) pcp: [PerdataPages; NR_CPUS],
    pub(crate) free_area: [FreeArea; MAX_ORDER],
}

impl GlobalDataInner {
    const fn new() -> Self {
        Self { pcp: [PerdataPages::new(); NR_CPUS], free_area: [FreeArea::new(); MAX_ORDER] }
    }

    fn init_global_inner(&mut self, nr_manage: usize) {
        for i in 0..nr_cpus() {
            let this = &mut self.pcp[i];
            this.init_perdata_pages(nr_manage);
        }
        for i in 0..MAX_ORDER {
            let this = &mut self.free_area[i];
            this.init_free_area();
        }
    }
}

unsafe impl Send for GlobalDataInner {}
unsafe impl Sync for GlobalDataInner {}

pub(crate) struct GlobalData {
    pub(crate) inner: Spinlock<GlobalDataInner>,
    free_pages: AtomicUsize,
    reserved_pages: AtomicUsize,
    total_pages: AtomicUsize,
}

pub(crate) static GLOBAL_PAGE_ALLOC: GlobalData = GlobalData {
    inner: Spinlock::new(GlobalDataInner::new()),
    free_pages: AtomicUsize::new(0),
    reserved_pages: AtomicUsize::new(0),
    total_pages: AtomicUsize::new(0),
};

impl GlobalData {
    fn init_global(&self, nr_manage: usize) {
        let mut lock = self.inner.lock();
        lock.init_global_inner(nr_manage);
    }

    fn expand(page: &mut Page, low: usize, mut high: usize, area: *mut FreeArea) {
        let mut size = 1 << high;

        let mut current_area = unsafe { &mut *area };
        while high > low {
            unsafe {
                current_area = &mut *(current_area as *mut FreeArea).sub(1);
            }
            high -= 1;
            size >>= 1;

            unsafe {
                let current_page = &mut *(page as *mut Page).add(size);
                current_page.set_page_order(high);
                current_page.buddy_list_add(&mut current_area.free_list);
            }
            current_area.nr_free += 1;
        }
    }

    // 返回一个分配的 page, 如果有必要则自动的进行页拆分
    fn __rmqueue_smallest(inner: &mut GlobalDataInner, order: usize) -> Option<*mut Page> {
        for current_order in order..MAX_ORDER {
            let area = &mut inner.free_area[current_order];
            let raw_area = area as *mut FreeArea;
            let page = unsafe { area.free_list.list_first_entry_or_null_mut() };
            if let Some(p) = page {
                p.buddy_list_del();
                p.rmv_page_order();
                area.nr_free -= 1;
                GlobalData::expand(p, order, current_order, raw_area);
                return Some(p as *mut Page);
            }
        }
        None
    }

    // 批量分配 count 数量的 order 页并挂载到 head 中, 返回分配的页数量
    fn rmqueue_bulk(&self, order: usize, count: usize, head: &mut UnsafeListHead<Page>) -> u32 {
        let mut alloced = 0;
        let mut lock = self.inner.lock();
        for _ in 0..count {
            let page_raw = GlobalData::__rmqueue_smallest(&mut lock, order);
            if let Some(page) = page_raw {
                unsafe {
                    let p = &mut *page;
                    p.check_new_page().unwrap();
                    p.buddy_list_add(head);
                    alloced += 1;
                }
            } else {
                break;
            }
        }
        alloced
    }

    // 从 percpu lists 中分配一页, 如果 pcp lists 为空, 则批量从 area 中分配 batch 数量的页
    fn __rmqueue_pcplist(
        &self,
        pcp_raw: *mut PerdataPages,
        head_raw: *mut UnsafeListHead<Page>,
    ) -> Option<*mut Page> {
        let pcp = unsafe { &mut *pcp_raw };
        let head = unsafe { &mut *head_raw };

        unsafe {
            if head.list_empty() {
                pcp.count += self.rmqueue_bulk(0, pcp.batch as usize, head);
                if head.list_empty() {
                    return None;
                }
            }

            let page = head.list_first_entry_or_null_mut().unwrap();
            page.buddy_list_del();
            pcp.count -= 1;
            page.rmv_page_order();
            page.check_new_page().unwrap();
            Some(page as *mut Page)
        }
    }

    // 从 pcp list 分配一页
    fn rmqueue_pcplist(&self) -> Option<*mut Page> {
        let pcp;
        let head;

        // 对本地数据访问我们只需要禁用中断即可, 不用浪费时间在锁上
        let flags = local_irq_save();
        {
            let mut lock = self.inner.lock();
            let this_pcp = &mut lock.pcp[this_processor_id()];
            head = core::ptr::addr_of_mut!(this_pcp.lists);
            pcp = this_pcp as *mut PerdataPages;
        }
        let page = self.__rmqueue_pcplist(pcp, head);
        local_irq_restore(flags);
        page
    }

    #[inline(always)]
    pub(crate) fn rmqueue(&self, order: usize) -> Option<*mut Page> {
        if order == 0 {
            return self.rmqueue_pcplist();
        }

        let mut lock = self.inner.lock_irq_save();
        let page = GlobalData::__rmqueue_smallest(&mut lock, order);
        if let Some(p) = page {
            unsafe {
                (*p).check_new_pages(order).unwrap();
            }
            return Some(p);
        }
        None
    }

    // 从 pcp lists 中释放指定数量缓存页到 area
    pub(crate) fn free_pcppages_bulk(&self, mut count: usize, pcp_raw: *mut PerdataPages) {
        define_unsafe_list_head!(head, Page, ty.Lru.0.lru);

        let mut batch_free = 0;
        let mut prefetch_nr = 0;
        let pcp = unsafe { &mut *pcp_raw };
        while count != 0 {
            batch_free += 1;
            let list = &mut pcp.lists;
            loop {
                let page = unsafe { list.list_last_entry_mut() };
                page.buddy_list_del();
                pcp.count -= 1;

                page.free_pages_check().unwrap();

                page.buddy_list_add_tail(&mut head);

                if prefetch_nr < pcp.batch {
                    page.prefetch_buddy();
                }
                prefetch_nr += 1;
                count -= 1;
                batch_free -= 1;
                unsafe {
                    if count == 0 || batch_free == 0 || list.list_empty() {
                        break;
                    }
                }
            }
        }

        let mut lock = self.inner.lock();
        for p in head.iter_mut() {
            p.__free_one_page(&mut lock, p.to_pfn(), 0);
        }
    }

    /// 从 pcp lists 中释放全部缓存页到 area
    pub(crate) fn drain_pages_area(&self, cpu: usize) -> u32 {
        let mut has_page = 0;

        let flags = local_irq_save();
        // 处理本地 cpu 数据不需要锁, 禁用中断即可
        let raw_pcp;
        {
            let mut lock = self.inner.lock();
            raw_pcp = core::ptr::addr_of_mut!(lock.pcp[cpu]);
        }
        unsafe {
            let pcp = &mut *raw_pcp;
            if pcp.count != 0 {
                has_page = 1;
                self.free_pcppages_bulk(pcp.count as usize, raw_pcp);
            }
        }
        local_irq_restore(flags);
        has_page
    }

    pub(crate) fn free_one_page(&self, page: &mut Page, pfn: Pfn, order: usize) {
        let mut lock = self.inner.lock();
        page.__free_one_page(&mut lock, pfn, order);
    }
}

#[inline]
pub(crate) fn get_page_from_freelist(gfp_mask: GfpFlags, order: usize) -> Result<Vaddr> {
    if order >= MAX_ORDER {
        return Err(KallocError::Eorder);
    }

    let mut page;
    let mut train = 0;
    loop {
        page = GLOBAL_PAGE_ALLOC.rmqueue(order);

        if let Some(p) = page {
            unsafe {
                (*p).prep_new_page(order, gfp_mask);
            }
            return Ok(unsafe { (*p).to_virt().unwrap() });
        }

        for cpu in 0..nr_cpus() {
            if cpu == this_processor_id() {
                continue;
            }
            train += GLOBAL_PAGE_ALLOC.drain_pages_area(cpu);
        }
        if train != 0 {
            continue;
        }
        return Err(KallocError::Enomem);
    }
}

#[inline]
pub(crate) fn free_pages_to_freelist(vaddr: Vaddr, order: usize) {
    let page = vaddr.to_page_mut();
    if order == 0 {
        page.free_page_to_pcp();
    } else {
        page.free_pages_to_area(order);
    }
}

#[inline(always)]
pub(crate) fn free_pages_sub(nr: usize) {
    GLOBAL_PAGE_ALLOC.free_pages.fetch_sub(nr, Ordering::Relaxed);
}

#[inline(always)]
pub(crate) fn free_pages_add(nr: usize) {
    GLOBAL_PAGE_ALLOC.free_pages.fetch_add(nr, Ordering::Relaxed);
}

fn free_pages_memory(start_pfn: Pfn, end_pfn: Pfn) {
    let mut start = start_pfn;
    let end = end_pfn + 1;
    while start < end {
        let mut order = (MAX_ORDER - 1).min(start.to_value().trailing_zeros() as usize);
        while start.to_value() + (1 << order) > end.to_value() {
            order -= 1;
        }

        let paddr = start.to_phys();
        __free_pages_memory(
            paddr.to_page_mut().unwrap() as *mut Page,
            paddr.to_virt().unwrap(),
            order,
        );

        start = Pfn::from(start.to_value() + (1 << order));
    }
}

// 初始化内存分配器
pub(crate) fn kalloc_init() {
    mem_setup(|start, size, is_free| {
        let start_pfn = start.to_pfn().unwrap();
        let end_pfn = (start + size - 1).to_pfn().unwrap();
        for pfn in start_pfn.to_value()..(end_pfn + 1).to_value() {
            let page = Pfn::from(pfn).to_page_mut().unwrap();
            page.init_page(!is_free);
        }
        let nr = end_pfn + 1 - start_pfn;
        GLOBAL_PAGE_ALLOC.total_pages.fetch_add(nr, Ordering::Relaxed);
        if !is_free {
            GLOBAL_PAGE_ALLOC.reserved_pages.fetch_add(nr, Ordering::Relaxed);
        }
    });

    GLOBAL_PAGE_ALLOC.init_global(global_nr_total_pages() - global_nr_reserved_pages());

    memblock_foreach(|start, size, _| {
        let start_pfn = start.to_pfn().unwrap();
        let end_pfn = (start + size - 1).to_pfn().unwrap();
        free_pages_memory(start_pfn, end_pfn);
    });

    slab_init();
}

/// 总的空闲页内存数量
pub(super) fn global_nr_free_pages() -> usize {
    GLOBAL_PAGE_ALLOC.free_pages.load(Ordering::Relaxed)
}

/// 总的预留内存页数量
pub(super) fn global_nr_reserved_pages() -> usize {
    GLOBAL_PAGE_ALLOC.reserved_pages.load(Ordering::Relaxed)
}

/// 总的内存页数量
pub(super) fn global_nr_total_pages() -> usize {
    GLOBAL_PAGE_ALLOC.total_pages.load(Ordering::Relaxed)
}
