use crate::block::Block;
use crate::page::Page;
use crate::page_queue::PageQueue;
use crate::segment::Segment;
use crate::segment_queue::SegmentQueue;
use crate::size_class::{PageKind, SizeClass};
use std::alloc::{GlobalAlloc, Layout};
use std::cell::UnsafeCell;
use std::sync::Once;

pub const MI_PAGES_DIRECT: usize = 128;
pub const MI_BIN_FULL: usize = 73;
pub const MI_SMALL_SIZE_MAX: usize = 256;

pub struct Allocator {
    heap: UnsafeCell<Option<Heap>>,
    init: Once,
}

impl Allocator {
    pub const fn new() -> Self {
        Self {
            heap: UnsafeCell::new(None),
            init: Once::new(),
        }
    }

    #[inline(always)]
    fn get_heap(&self) -> &mut Heap {
        self.init.call_once(|| unsafe {
            *self.heap.get() = Some(Heap::new(1));
        });
        unsafe { (*self.heap.get()).as_mut().unwrap() }
    }
}

unsafe impl GlobalAlloc for Allocator {
    #[inline(always)]
    unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
        self.get_heap()
            .allocate(layout.size())
            .unwrap_or(std::ptr::null_mut())
    }

    #[inline(always)]
    unsafe fn dealloc(&self, ptr: *mut u8, _layout: Layout) {
        self.get_heap().deallocate(ptr);
    }
}

unsafe impl Sync for Allocator {}
unsafe impl Send for Allocator {}

pub struct Heap {
    pages_free_direct: [*mut Page; MI_PAGES_DIRECT],
    pub(crate) pages: Vec<PageQueue>,
    small_free: SegmentQueue,
    medium_free: SegmentQueue,
    large_free: SegmentQueue,
    thread_id: usize,
}

impl Heap {
    pub fn new(thread_id: usize) -> Self {
        let mut pages = Vec::with_capacity(MI_BIN_FULL + 1);
        for i in 0..=MI_BIN_FULL {
            let block_size = if i < 32 { (i + 1) * 8 } else { 256 << (i - 32) };
            pages.push(PageQueue::new(block_size));
        }

        Heap {
            pages_free_direct: [std::ptr::null_mut(); MI_PAGES_DIRECT],
            pages,
            small_free: SegmentQueue::new(PageKind::Small),
            medium_free: SegmentQueue::new(PageKind::Medium),
            large_free: SegmentQueue::new(PageKind::Large),
            thread_id,
        }
    }

    #[inline(always)]
    fn get_bin(&self, size: usize) -> usize {
        if size <= 256 {
            (size - 1) >> 3
        } else {
            32 + (size.ilog2() as usize) - 8
        }
    }

    #[inline(always)]
    pub fn allocate(&mut self, size: usize) -> Option<*mut u8> {
        if size <= MI_SMALL_SIZE_MAX {
            let idx = (size - 1) >> 3;
            let page_ptr = self.pages_free_direct[idx];
            if !page_ptr.is_null() {
                unsafe {
                    let page = &mut *page_ptr;
                    if !page.free.is_null() {
                        let block = page.free;
                        page.free = (*block).next;
                        page.used += 1;
                        return Some(Block::get_data_ptr(block));
                    }
                }
            }
        }
        self.allocate_slow(size)
    }

    #[inline(never)]
    fn allocate_slow(&mut self, size: usize) -> Option<*mut u8> {
        let size_class = SizeClass::new(size);
        let bin = self.get_bin(size_class.block_size);

        if bin >= self.pages.len() {
            return None;
        }

        // Try to allocate from existing page
        if !self.pages[bin].first.is_null() {
            unsafe {
                let page = &mut *self.pages[bin].first;
                if let Some(ptr) = page.malloc() {
                    if size <= MI_SMALL_SIZE_MAX {
                        let idx = (size - 1) >> 3;
                        self.pages_free_direct[idx] = self.pages[bin].first;
                    }
                    return Some(ptr);
                }
            }
        }

        // Need to allocate a new page
        if self.alloc_new_page(bin, size_class) {
            unsafe {
                let page = &mut *self.pages[bin].first;
                let ptr = page.malloc();
                if ptr.is_some() && size <= MI_SMALL_SIZE_MAX {
                    let idx = (size - 1) >> 3;
                    self.pages_free_direct[idx] = self.pages[bin].first;
                }
                return ptr;
            }
        }

        None
    }

    #[inline(always)]
    fn get_segment_queue(&mut self, page_kind: PageKind) -> &mut SegmentQueue {
        match page_kind {
            PageKind::Small => &mut self.small_free,
            PageKind::Medium => &mut self.medium_free,
            PageKind::Large => &mut self.large_free,
        }
    }

    #[inline(never)]
    fn alloc_new_page(&mut self, bin: usize, size_class: SizeClass) -> bool {
        let thread_id = self.thread_id;
        let segment_queue = self.get_segment_queue(size_class.page_kind);

        let segment = if let Some(segment) = segment_queue.find_free_segment() {
            segment as *mut Segment
        } else {
            let segment = Box::new(Segment::new(thread_id, size_class.page_kind));
            let segment_ptr = Box::into_raw(segment);
            segment_queue.push_front(segment_ptr);
            segment_ptr
        };

        unsafe {
            if let Some(page_idx) = (*segment).find_free() {
                if !(*segment).init_page(page_idx, size_class) {
                    return false;
                }

                if let Some(page) = (*segment).get_page(page_idx) {
                    let page_ptr = page as *mut Page;
                    self.pages[bin].push_front(page_ptr);

                    if size_class.block_size <= MI_SMALL_SIZE_MAX {
                        let idx = (size_class.block_size - 1) >> 3;
                        self.pages_free_direct[idx] = page_ptr;
                    }

                    return true;
                }
            }
        }
        false
    }

    #[inline(always)]
    pub fn deallocate(&mut self, ptr: *mut u8) -> bool {
        if ptr.is_null() {
            return false;
        }

        unsafe {
            // Try pages_free_direct first for small allocations
            let block = Block::from_data_ptr(ptr);
            let block_size = (*block).next as usize - ptr as usize;
            if block_size <= MI_SMALL_SIZE_MAX {
                let idx = (block_size - 1) >> 3;
                let page_ptr = self.pages_free_direct[idx];
                if !page_ptr.is_null() {
                    let page = &mut *page_ptr;
                    if page.contains_ptr(ptr) {
                        return page.deallocate(ptr);
                    }
                }
            }

            // Then try all page queues
            for queue in &mut self.pages {
                let mut current = queue.first;
                while !current.is_null() {
                    let page = &mut *current;
                    if page.contains_ptr(ptr) {
                        return page.deallocate(ptr);
                    }
                    current = page.next;
                }
            }
        }
        false
    }
}
