// use std::{cell::RefCell, rc::Rc, ptr::{null, self}, mem};
use core::{mem, ptr};

use crate::{
    common::list::ListHead,
    common::{synchronization::interface::Mutex, synchronization::NullLock},
    kdebug, kwarn, list_entry,
    mm::slab,
    qemu_println, uart_println,
};
pub const BUDDY_PAGE_SIZE: u64 = 0x1000;
pub const BUDDY_PAGE_SIZE_ORDER: u64 = 12;
pub const BUDDY_MAX_ORDER: u64 = 14;

pub struct Page {
    pub allocated: bool,
    pub order: u64,
    pub node: *mut ListHead<Page>,
    pub slab: *mut slab::SlabHeader, // maybe this
}
unsafe impl Send for Page {}
pub struct FreeList {
    pub free_list: ListHead<Page>,
    pub nr_free: u64,
}
unsafe impl Send for FreeList {}
pub struct PhysMemPoolInner {
    pub pool_start_addr: u64,
    pub pool_mem_size: u64,
    pub pool_phys_page_num: u64,
    pub page_metadata: *mut ListHead<Page>,
    pub free_lists: [FreeList; BUDDY_MAX_ORDER as usize],
}
unsafe impl Send for PhysMemPoolInner {}
pub struct PhysMemPool {
    pub inner: NullLock<PhysMemPoolInner>,
}

// WTF
// impl Default for PhysMemPoolInner {
//     fn default() -> Self {
//         Self {
//             ..Default::default()
//         }
//     }
// }

// fn page_append(pooloutside: &mut PhysMemPool, page: &mut ListHead<Page>) {
//     let order = page.data.order;
//     let inner = pool.get_inner_mut();
//     let free_list = &mut inner.free_lists[order as usize];
//     free_list.free_list.add(&mut *page);
//     free_list.nr_free += 1;
// }
fn page_append(pool: &mut PhysMemPoolInner, page: &mut ListHead<Page>) {
    let order = page.data.order;
    let free_list = &mut pool.free_lists[order as usize];
    free_list.free_list.add(&mut *page);
    free_list.nr_free += 1;
}
fn page_del(pool: &mut PhysMemPoolInner, page: &mut ListHead<Page>) {
    let order = page.data.order;
    let free_list = &mut pool.free_lists[order as usize];
    page.delete();
    free_list.nr_free -= 1;
}

pub fn init_buddy(
    pooloutside: &mut PhysMemPool,
    start_page: *mut ListHead<Page>,
    start_addr: u64,
    page_num: u64,
) {
    let mut _order: usize;
    let mut _page_idx: usize;
    let mut _page: *mut Page;
    pooloutside.inner.lock(|pool| {
        pool.pool_start_addr = start_addr;
        pool.pool_mem_size = page_num * BUDDY_PAGE_SIZE;
        pool.pool_phys_page_num = page_num;
        pool.page_metadata = start_page;

        unsafe {
            // 初始化空闲链表
            for order in 0..BUDDY_MAX_ORDER as usize {
                pool.free_lists[order].nr_free = 0;
                // 空闲链表的初始空结点
                pool.free_lists[order].free_list.init();
            }
            qemu_println!("buddy:freelist init finished");
            // 初始化page_metadata物理页元数据
            ptr::write_bytes(
                start_page as *mut u8,
                0,
                (page_num as usize) * mem::size_of::<ListHead<Page>>(),
            );
            qemu_println!("buddy:page metadata init finished");
            for page_idx in 0..page_num as usize {
                let page = &mut (*pool.page_metadata.wrapping_add(page_idx as usize));
                page.data.allocated = true;
                page.data.order = 0;
                page.data.node = pool.page_metadata.wrapping_add(page_idx as usize);
            }

            for page_idx in 0..page_num as usize {
                let page = pool.page_metadata.wrapping_add(page_idx as usize);
                buddy_free_pages(pool, (*page).data.node.as_mut().unwrap());
            }
            qemu_println!("buddy:page init finished");
        }
    });
}

fn get_buddy_chunk(pool: &mut PhysMemPoolInner, chunk: &Page) -> Option<*mut ListHead<Page>> {
    let chunk_addr = page_to_virt(pool, chunk) as u64;
    let order = (*chunk).order;
    let buddy_chunk_addr = chunk_addr ^ (1 << (order + BUDDY_PAGE_SIZE_ORDER));
    if buddy_chunk_addr < pool.pool_start_addr
        || buddy_chunk_addr >= pool.pool_start_addr + pool.pool_mem_size
    {
        return None;
    } else {
        unsafe { Some(virt_to_page(pool, buddy_chunk_addr).as_mut().unwrap()) }
    }
}

fn split_page(pool: &mut PhysMemPoolInner, order: u64, page: &mut Page) {
    if page.allocated {
        uart_println!("Try to split an allocated page");
        qemu_println!("Try to split an allocated page");
    }

    page.allocated = false;
    page_del(pool, unsafe { page.node.as_mut().unwrap() });

    while page.order > order {
        page.order -= 1;
        let buddy_page = get_buddy_chunk(pool, &page);

        if let Some(buddy) = buddy_page {
            unsafe {
                (*buddy).data.allocated = false;
                (*buddy).data.order = page.order;
                page_append(pool, buddy.as_mut().unwrap());
            }
        }
    }
}

pub fn buddy_get_pages(pooloutside: &mut PhysMemPool, order: u64) -> Option<&mut Page> {
    pooloutside.inner.lock(|pool| {
        let mut current_order = order;
        while current_order < BUDDY_MAX_ORDER
            && pool.free_lists[current_order as usize].nr_free <= 0
        {
            current_order += 1;
        }

        if current_order >= BUDDY_MAX_ORDER {
            kwarn!("Try to allocate a buddy chunk greater than BUDDY_MAX_ORDER");
            return None;
        }

        // 需要修改marcro！
        let page = unsafe {
            list_entry!(
                pool.free_lists[current_order as usize].free_list.next,
                Page,
                node
            )
            .as_mut()
        };

        if let Some(page) = page {
            split_page(pool, order, page);
            page.allocated = true;
            Some(page)
        } else {
            kdebug!("buddy get a NULL page");
            return None;
        }
    })
}

fn merge_page(pool: &mut PhysMemPoolInner, page: &mut ListHead<Page>) {
    let p = &mut page.data;

    if p.allocated {
        // println!("Try to merge an allocated page");
        qemu_println!("Try to merge an allocated page");
    }

    page_del(pool, unsafe { p.node.as_mut().unwrap() });

    while p.order < BUDDY_MAX_ORDER - 1 {
        if let Some(buddy_page) = get_buddy_chunk(pool, p) {
            let buddy = unsafe { &mut buddy_page.as_mut().unwrap().data };
            if buddy.allocated || buddy.order != p.order {
                break;
            }
            if p as *mut Page as u64 > buddy as *mut Page as u64 {
                mem::swap(p, buddy);
            }

            buddy.allocated = true;
            page_del(pool, unsafe { p.node.as_mut().unwrap() });
            p.order += 1;
        } else {
            break;
        }
    }

    page_append(pool, unsafe { p.node.as_mut().unwrap() });
}

pub fn buddy_free_pages(pool: &mut PhysMemPoolInner, page: &mut ListHead<Page>) {
    if !page.data.allocated {
        uart_println!("Try to free a free page");
        qemu_println!("Try to free a free page");
        return;
    }

    page.data.allocated = false;
    page_append(pool, page);

    merge_page(pool, page);
}

pub fn page_to_virt(pool: &PhysMemPoolInner, page: &Page) -> *mut u8 {
    let addr = (page as *const Page as u64 - pool.page_metadata as u64) * BUDDY_PAGE_SIZE
        + pool.pool_start_addr;
    addr as *mut u8
}

pub fn virt_to_page(pool: &PhysMemPoolInner, addr: u64) -> *mut ListHead<Page> {
    pool.page_metadata
        .wrapping_add((addr - pool.pool_start_addr) as usize / BUDDY_PAGE_SIZE as usize)
}

pub fn get_free_mem_size_from_buddy(pooloutside: &PhysMemPool) -> u64 {
    pooloutside.inner.lock(|pool| {
        let mut total_size = 0;

        for order in 0..BUDDY_MAX_ORDER as usize {
            let current_order_size = BUDDY_PAGE_SIZE * (1 << order);
            let list = &pool.free_lists[order];
            total_size += list.nr_free * current_order_size;

            uart_println!(
                "buddy memory chunk order: {}, size: 0x{:x}, num: {}",
                order,
                current_order_size,
                list.nr_free
            );
        }
        total_size
    })
}
