use core::sync::atomic::{AtomicUsize, Ordering};

use unsafe_list::{define_unsafe_list_head, init_unsafe_list_head, UnsafeListHead};

use super::page::Page;
use crate::{
    boot::{page_to_pfn, NR_CPUS, PAGE_SIZE},
    irq::irqflags::{local_irq_restore, local_irq_save},
    processor::this_processor_id,
    sync::spinlock::Spinlock,
};

pub(crate) const MAX_ORDER: usize = 11;

pub(crate) struct FreeArea {
    pub(crate) free_list: UnsafeListHead<Page>,
    pub(crate) nr_free: usize,
}

impl FreeArea {
    const fn new() -> Self {
        Self { free_list: UnsafeListHead::new(), nr_free: 0 }
    }

    fn init_free_area(&mut self) {
        let list = &mut self.free_list;
        init_unsafe_list_head!(list, Page, ty.Lru.0.lru);
    }
}

pub(crate) struct PerdataPages {
    pub(crate) count: u32,
    pub(crate) high: u32,
    pub(crate) batch: u32,

    pub(crate) lists: UnsafeListHead<Page>,
}

impl PerdataPages {
    const fn new() -> Self {
        Self { count: 0, high: 0, batch: 0, lists: UnsafeListHead::new() }
    }

    #[inline]
    fn pcp_update(&mut self, high: u32, batch: u32) {
        self.high = high;
        self.batch = batch;
    }

    #[inline]
    fn pcp_set_batch(&mut self, batch: u32) {
        self.pcp_update(6 * batch, 1.max(batch));
    }

    #[inline]
    fn zone_batchsize(&mut self, nr_manage: usize) -> usize {
        let mut batch = nr_manage / 1024;
        if batch * PAGE_SIZE > 1024 * 1024 {
            batch = (1024 * 1024) / PAGE_SIZE;
        }
        batch /= 4;
        if batch < 1 {
            batch = 1;
        }

        batch = (batch + batch / 2).next_power_of_two() - 1;
        batch
    }

    #[inline]
    fn pcp_set_high_and_batch(&mut self, nr_manage: usize) {
        let batch = self.zone_batchsize(nr_manage) as u32;
        self.pcp_set_batch(batch)
    }

    fn init_perdata_pages(&mut self, nr_manage: usize) {
        let list = &mut self.lists;
        init_unsafe_list_head!(list, Page, ty.Lru.0.lru);

        self.pcp_set_high_and_batch(nr_manage);
    }
}

const F_ARRAY_REPEAT_VALUE: FreeArea = FreeArea::new();
const P_ARRAY_REPEAT_VALUE: PerdataPages = PerdataPages::new();

pub(crate) struct ZoneInner {
    pub(crate) pcp: [PerdataPages; NR_CPUS],
    pub(crate) free_area: [FreeArea; MAX_ORDER],
}

impl ZoneInner {
    const fn new() -> Self {
        Self { pcp: [P_ARRAY_REPEAT_VALUE; NR_CPUS], free_area: [F_ARRAY_REPEAT_VALUE; MAX_ORDER] }
    }

    fn init_zone_inner(&mut self, nr_manage: usize) {
        for i in 0..NR_CPUS {
            let this = &mut self.pcp[i];
            this.init_perdata_pages(nr_manage);
        }
        for i in 0..MAX_ORDER {
            let this = &mut self.free_area[i];
            this.init_free_area();
        }
    }
}

unsafe impl Send for ZoneInner {}
unsafe impl Sync for ZoneInner {}

pub(crate) struct Zone {
    pub(crate) inner: Spinlock<ZoneInner>,
    managed_pages: AtomicUsize,
}

impl Zone {
    pub(crate) const fn new() -> Self {
        Self { inner: Spinlock::new(ZoneInner::new()), managed_pages: AtomicUsize::new(0) }
    }

    pub(crate) fn zone_managed_pages(&self) -> usize {
        self.managed_pages.load(Ordering::Relaxed)
    }

    pub(crate) fn init_zone(&self, nr_manage: usize) {
        let mut lock = self.inner.lock();
        lock.init_zone_inner(nr_manage);
        self.managed_pages.store(nr_manage, Ordering::Relaxed);
    }
}

impl Zone {
    fn expand(page: &mut Page, low: usize, mut high: usize, area: *mut FreeArea) {
        let mut size = 1 << high;

        let mut current_area = unsafe { &mut *area };
        while high > low {
            unsafe {
                current_area = &mut *(current_area as *mut FreeArea).sub(1);
            }
            high -= 1;
            size >>= 1;

            unsafe {
                let current_page = &mut *(page as *mut Page).add(size);
                current_page.set_page_order(high);
                current_page.buddy_list_add(&mut current_area.free_list);
            }
            current_area.nr_free += 1;
        }
    }

    // 返回一个分配的 page, 如果有必要则自动的进行页拆分
    fn __rmqueue_smallest(zone_inner: &mut ZoneInner, order: usize) -> Option<*mut Page> {
        for current_order in order..MAX_ORDER {
            let area = &mut zone_inner.free_area[current_order];
            let raw_area = area as *mut FreeArea;
            let page = unsafe { area.free_list.list_first_entry_or_null_mut() };
            if let Some(p) = page {
                p.buddy_list_del();
                p.rmv_page_order();
                area.nr_free -= 1;
                Zone::expand(p, order, current_order, raw_area);
                return Some(p as *mut Page);
            }
        }
        None
    }

    // 批量分配 count 数量的 order 页并挂载到 head 中, 返回分配的页数量
    fn rmqueue_bulk(&self, order: usize, count: usize, head: &mut UnsafeListHead<Page>) -> u32 {
        let mut alloced = 0;
        let mut lock = self.inner.lock();
        for _ in 0..count {
            let page_raw = Zone::__rmqueue_smallest(&mut lock, order);
            if let Some(page) = page_raw {
                unsafe {
                    let p = &mut *page;
                    p.check_new_page().unwrap();
                    p.buddy_list_add(head);
                    alloced += 1;
                }
            } else {
                break;
            }
        }
        alloced
    }

    // 从 percpu lists 中分配一页, 如果 pcp lists 为空, 则批量从 area 中分配 batch 数量的页
    fn __rmqueue_pcplist(
        &self,
        pcp_raw: *mut PerdataPages,
        head_raw: *mut UnsafeListHead<Page>,
    ) -> Option<*mut Page> {
        let pcp = unsafe { &mut *pcp_raw };
        let head = unsafe { &mut *head_raw };

        unsafe {
            if head.list_empty() {
                pcp.count += self.rmqueue_bulk(0, pcp.batch as usize, head);
                if head.list_empty() {
                    return None;
                }
            }

            let page = head.list_first_entry_or_null_mut().unwrap();
            page.buddy_list_del();
            pcp.count -= 1;
            page.rmv_page_order();
            page.check_new_page().unwrap();
            Some(page as *mut Page)
        }
    }

    // 从 pcp list 分配一页
    fn rmqueue_pcplist(&self) -> Option<*mut Page> {
        let pcp;
        let head;

        // 对本地数据访问我们只需要禁用中断即可, 不用浪费时间在锁上
        let flags = local_irq_save();
        {
            let mut lock = self.inner.lock();
            let this_pcp = &mut lock.pcp[this_processor_id()];
            head = &mut this_pcp.lists as *mut UnsafeListHead<Page>;
            pcp = this_pcp as *mut PerdataPages;
        }
        let page = self.__rmqueue_pcplist(pcp, head);
        local_irq_restore(flags);
        page
    }

    // 从 zone 区分配页
    #[inline(always)]
    pub(crate) fn rmqueue(&self, order: usize) -> Option<*mut Page> {
        if order == 0 {
            return self.rmqueue_pcplist();
        }

        let mut lock = self.inner.lock_irq_save();
        let page = Zone::__rmqueue_smallest(&mut lock, order);
        if let Some(p) = page {
            unsafe {
                (*p).check_new_pages(order).unwrap();
            }
            return Some(p);
        }
        None
    }

    // 从 pcp lists 中释放指定数量缓存页到 area
    pub(crate) fn free_pcppages_bulk(&self, mut count: usize, pcp_raw: *mut PerdataPages) {
        define_unsafe_list_head!(head, Page, ty.Lru.0.lru);

        let mut batch_free = 0;
        let mut prefetch_nr = 0;
        let pcp = unsafe { &mut *pcp_raw };
        while count != 0 {
            batch_free += 1;
            let list = &mut pcp.lists;
            loop {
                let page = unsafe { list.list_last_entry_mut() };
                page.buddy_list_del();
                pcp.count -= 1;

                page.free_pages_check().unwrap();

                page.buddy_list_add_tail(&mut head);

                if prefetch_nr < pcp.batch {
                    page.prefetch_buddy();
                }
                prefetch_nr += 1;
                count -= 1;
                batch_free -= 1;
                unsafe {
                    if count == 0 || batch_free == 0 || list.list_empty() {
                        break;
                    }
                }
            }
        }

        let mut lock = self.inner.lock();
        for p in head.iter_mut() {
            p.__free_one_page(&mut lock, page_to_pfn(p), 0);
        }
    }

    /// 从 pcp lists 中释放全部缓存页到 area
    pub(crate) fn drain_pages_zone(&self, cpu: usize) -> u32 {
        let mut has_page = 0;

        let flags = local_irq_save();
        // 处理本地 cpu 数据不需要锁, 禁用中断即可
        let raw_pcp;
        {
            let mut lock = self.inner.lock();
            raw_pcp = &mut lock.pcp[cpu] as *mut PerdataPages;
        }
        unsafe {
            let pcp = &mut *raw_pcp;
            if pcp.count != 0 {
                has_page = 1;
                self.free_pcppages_bulk(pcp.count as usize, raw_pcp);
            }
        }
        local_irq_restore(flags);
        has_page
    }

    pub(crate) fn free_one_page(&self, page: &mut Page, pfn: usize, order: usize) {
        let mut lock = self.inner.lock();
        page.__free_one_page(&mut lock, pfn, order);
    }
}
