use std::{ptr::addr_of, sync::atomic::Ordering};

use crate::mimalloc::_mi_segment_page_start;

use super::*;

/* -----------------------------------------------------------
  Page helpers
----------------------------------------------------------- */

pub fn mi_page_block_at(page: *mut MiPage, page_area: *mut u8, i: usize) -> *mut MiBlock {
    unsafe {
        let block_size = (*page).block_size;
        let block = page_area.offset((i * block_size) as isize) as *mut MiBlock;
        block
    }
}


pub fn _mi_page_use_delayed_free(page: *mut MiPage, enable: bool) {
    unsafe {
        let mut tfree = MiThreadFree{value: (*page).thread_free.load(Ordering::Acquire)};
        let mut tfreex;
        loop {
            tfreex = tfree;
            if enable {
                tfreex.set_delayed(MiDelayed::UesDelayedFree as usize);
            } else {
                tfreex.set_delayed(MiDelayed::NoDelayedFree as usize);
            }
            let old_value = tfree.value;
            let new_value = tfreex.value;
            if tfreex.get_delayed() == tfree.get_delayed() || (*page).thread_free.compare_exchange(old_value, new_value, Ordering::AcqRel, Ordering::Acquire).is_ok() {
                break;
            }
            tfree = MiThreadFree{value: (*page).thread_free.load(Ordering::Acquire)};
        }
    }
}


/* -----------------------------------------------------------
  Page collect the `local_free` and `thread_free` lists
----------------------------------------------------------- */

/// Collect the local 'thread_free' list using an atomic exchange.
/// Note: The exchange must be done atomically as this is used right after
/// moving to the full list in 'mi_page_collect_ex' and we need to
/// ensure that there was no race where the page became unfull just before the move.
pub fn mi_page_thread_free_collect(page: *mut MiPage) {
    unsafe {
        let mut tfree;
        let mut tfreex;
        let mut head: *mut MiBlock;
    
        loop {
            tfree = (*page).thread_free.load(Ordering::Acquire);
            head = ( tfree & !0b11 ) as *mut MiBlock;
            // clear the thread free list
            tfreex = (tfree & 0b11) as usize;
            if (*page).thread_free.compare_exchange(tfree, tfreex, Ordering::AcqRel, Ordering::Acquire).is_ok() {
                break;
            }

            // return if the last is empty
            if head.is_null() {return;}

            // find the tail
            let mut count = 1;
            let mut tail = head;
            let mut next = mi_block_next(page, tail);
            while !next.is_null() {
                tail = next;
                next = mi_block_next(page, tail);
                count += 1;
            }

            // and prepend to the free list
            mi_block_set_next(page, tail, (*page).free);
            (*page).free = head;
            
            // update counts now
            (*page).thread_freed.fetch_sub(count, Ordering::Relaxed);
            (*page).used -= count;
        }
    }
}



pub fn _mi_page_free_collect(page: *mut MiPage) {
    unsafe {
        // free the local free list
        if !(*page).local_free.is_null() {
            if (*page).free.is_null() {
                // usual case
                (*page).free = (*page).local_free;
            } else {
                let mut tail = (*page).free;
                let mut next = mi_block_next(page, tail);
                while !next.is_null() {
                    tail = next;
                    next = mi_block_next(page, tail);
                }
                mi_block_set_next(page, tail, (*page).local_free);
                (*page).local_free = std::ptr::null_mut();
            }
        }
        // and the thread free list
        // quick test to avoid an atomic operation
        if (*page).thread_free.load(Ordering::Relaxed) != 0 {
            mi_page_thread_free_collect(page);
        }
    }
}

/* -----------------------------------------------------------
  Users can register a deferred free function called
  when the `free` list is empty. Since the `local_free`
  is separate this is deterministically called after
  a certain number of allocations.
----------------------------------------------------------- */

static mut DEFERRED_FREE : Option<fn(bool, u64)> = None;

pub fn _mi_deferred_free(heap: *const MiHeap, force: bool) {
    unsafe {
        (*(*heap).tld).heartbeat += 1;
        if let Some(f) = DEFERRED_FREE {
            f(force, (*(*heap).tld).heartbeat);
        }
    }
}


pub fn mi_register_deferred_free(f: fn(bool, u64)) {
    unsafe {
        DEFERRED_FREE = Some(f);
    }
}


/* -----------------------------------------------------------
  Page fresh and retire
----------------------------------------------------------- */

/// called from segments when reclaiming abandoned pages
pub fn _mi_page_reclaim(heap: *const MiHeap, page: *mut MiPage) {
    _mi_page_free_collect(page);
    unsafe {
        let pq = mi_page_queue(heap, (*page).block_size);
        mi_page_queue_push(heap as *mut MiHeap, pq as *mut MiPageQueue, page);
    }
}



/// Allocate a fresh page from a segment
pub fn mi_page_fresh_alloc(heap: *const MiHeap, pq: *const MiPageQueue, block_size: usize) -> *mut MiPage {
    unsafe {
        let page = _mi_segment_page_alloc(block_size, &mut (*(*heap).tld).segments, &mut (*(*heap).tld).os);
        if page.is_null() {return std::ptr::null_mut();}
        mi_page_init(heap, page, block_size);
        mi_page_queue_push(heap as *mut MiHeap, pq as *mut MiPageQueue, page);
        page
    }
}

/// Get a fresh page to use
pub unsafe fn mi_page_fresh(heap: *const MiHeap, pq: *const MiPageQueue) -> *mut MiPage {
    // try to reclaim a abandoned page first
    let mut page = (*pq).first;
    if !(*heap).no_reclaim && _mi_segment_try_reclaim_abandoned(heap, false, &mut (*(*heap).tld).segments ) && page != (*pq).first {
        // we reclaimed, and we got lucky with a reclaimed page in our queue
        page = (*pq).first;
        if !page.is_null() {return page;}
    }
    // otherwise allocate the page
    mi_page_fresh_alloc(heap, pq, (*pq).block_size)
}

/* -----------------------------------------------------------
Do any delayed frees
(put there by other threads if they deallocated in a full page)
----------------------------------------------------------- */

fn _mi_free_delayed_block(block: *mut MiBlock) {
    let segment = _mi_ptr_segment(block as *mut u8);
    let page = _mi_segment_page_of(segment, block as *mut u8);
    _mi_free_block(page, true, block);
}

pub fn _mi_heap_delayed_free(heap: *const MiHeap) {
    unsafe {
        // take over the list
        let mut block;
        loop {
            block = (*heap).thread_delayed_free.load(Ordering::Acquire);
            if block.is_null() {return;}
            if (*heap).thread_delayed_free.compare_exchange(block, std::ptr::null_mut(), Ordering::AcqRel, Ordering::Acquire).is_ok() {
                break;
            }
        }

        // and free them all
        while !block.is_null() {
            let next = mi_block_nextx(block);
            // use internal free instead of regular one to keep stats etc correct
            _mi_free_delayed_block(block);
            block = next;
        }
    }
}


/* -----------------------------------------------------------
  Unfull, abandon, free and retire
----------------------------------------------------------- */

pub fn _mi_page_unfull(page: *mut MiPage) {
    unsafe {
        _mi_page_use_delayed_free(page, false);
        if !(*page).flags.contains(MiPageFlags::FULL) {return;}
        
        let heap = (*page).heap;
        let pq_full = &mut (*heap).pages[MI_BIN_FULL];
        (*page).flags.remove(MiPageFlags::FULL); // to get the right queue
        let pq = mi_heap_page_queue_of(heap, page);
        (*page).flags.insert(MiPageFlags::FULL);
        mi_page_queue_enqueue_from(pq, pq_full, page);
    }
}

pub fn mi_page_to_full(page: *mut MiPage, pq: *const MiPageQueue) {
    unsafe {
        _mi_page_use_delayed_free(page, true);
        if (*page).flags.contains(MiPageFlags::FULL) {return;}

        mi_page_queue_enqueue_from(&mut (*(*page).heap).pages[MI_BIN_FULL], pq as *mut MiPageQueue, page);
        // try to collect right away in case another thread freed just before
        // MI_USE_DELAYED_FREE was set
        mi_page_thread_free_collect(page);
    }
}

/// Abandon a page with used blocks at the end of a thread.
/// Note: only call if it is ensured that no references exist from
/// the 'page->heap->thread_delayed_free' into this page.
/// Currently only called through 'mi_heap_collect_ex' which ensures this.
pub fn _mi_page_abandon(page: *mut MiPage, pq: *const MiPageQueue) {
    unsafe {
        // and then remote from our page list
        let segment_tld = &mut (*(*(*page).heap).tld).segments;
        mi_page_queue_remove(pq as *mut MiPageQueue, page);

        // and abandon it
        _mi_segment_page_abandon(page, segment_tld);
    }
}




pub fn _mi_page_free(page: *mut MiPage, pq: *const MiPageQueue, force: bool) {
    unsafe {
        (*page).flags.remove(MiPageFlags::ALIGNED);

        // remove from the page list
        // (no need to do _mi_heap_delayed_free first as all blocks are already free)
        let heap = (*page).heap;
        let tld = (*heap).tld;
        let segments_tld = &mut (*tld).segments;
        mi_page_queue_remove(pq as *mut MiPageQueue, page);

        // and free it
        _mi_segment_page_free(page, force, segments_tld);
    }
}

/// Retire a page with no more used blocks
/// Important to not retire too quickly though as new
/// allocations might coming.
/// Note: called from 'mi_free' and benchmarks often
/// trigger this due to freeing everything and then
/// allocating again so careful when changing this.
pub fn _mi_page_retire(page: *mut MiPage) {
    unsafe {
        (*page).flags.remove(MiPageFlags::ALIGNED);
        // don't retire too often..
        // (or we end up retiring and re-allocating most of the time)
        // Note: refine this more: we should not retire if this
        // is the only page left with free blocks. It is not clear
        // how to check this efficiently though... for now we just check
        // if its neighbours are almost fully used.
        if (*page).block_size <= MI_LARGE_SIZE_MAX {
            if mi_page_mostly_used((*page).prev) && mi_page_mostly_used((*page).next) {
                // don't retire after all
                return;
            }
        }

        _mi_page_free(page, mi_page_queue_of(page), false);
    }
}





/* -----------------------------------------------------------
  Initialize the initial free list in a page.
  In secure mode we initialize a randomized list by 
  alternating between slices.
----------------------------------------------------------- */


pub fn mi_page_free_list_extend(_: *const MiHeap, page: *mut MiPage, extend: usize) {
    unsafe {
        let mut page_size = 0;
        let page_area = _mi_page_start(_mi_page_segment(page), page, &mut page_size);
        let bsize = (*page).block_size;
        let start = mi_page_block_at(page, page_area, (*page).capacity as usize);
        // initialize a sequential free list
        let end = mi_page_block_at(page, page_area, (*page).capacity as usize + extend - 1);
        let mut block = start;
        for _ in 0..extend {
            let next = block.offset(bsize as isize);
            mi_block_set_next(page, block, next);
            block = next;
        }
        mi_block_set_next(page, end, std::ptr::null_mut());
        (*page).free = start;
        (*page).capacity += extend as u16;
    }
}

/* -----------------------------------------------------------
  Page initialize and extend the capacity
----------------------------------------------------------- */

/// heuristic, one OS page seems to work well
const MI_MAX_EXTEND_SIZE: usize = 4 * 1024;
const MI_MIN_EXTEND: usize = 1;



/// Extend the capacity (up to reserved) by initializing a free list
/// We do at most `MI_MAX_EXTEND` to avoid touching too mush memory
/// Note: we also experimented with 'bump' allocation on the first 
/// allocation but this did not speed up any benchmark (due to an
/// extra test in malloc? or cache effects?)
pub fn mi_page_extend_free(heap: *const MiHeap, page: *mut MiPage) {
    unsafe {
        if !(*page).free.is_null() {return;}
        if (*page).capacity >= (*page).reserved {return;}

        let mut page_size = 0;
        _mi_page_start(_mi_page_segment(page), page, &mut page_size);
        (*page).owner_flags.remove(MiPageOwnerFlags::RESET);

        // calculate the extend count
        let mut extend = ((*page).reserved - (*page).capacity) as usize;
        let mut max_extend = MI_MAX_EXTEND_SIZE / (*page).block_size;
        if max_extend < MI_MIN_EXTEND {max_extend = MI_MIN_EXTEND;}

        if extend > max_extend {
            // ensure we don't touch memory beyond the page to reduce page commit.
            // the 'lean' benchmark tests this. Going from 1 to 8 increases rss by 50%.
            extend = if max_extend == 0 {1} else {max_extend};
        }

        mi_page_free_list_extend(heap, page, extend);
    }
}


/// Initialize a fresh page
pub fn mi_page_init(heap: *const MiHeap, page: *mut MiPage, block_size: usize) {
    unsafe {
        assert!(page != std::ptr::null_mut());
        let segment = _mi_page_segment(page);
        let mut page_size = 0;
        _mi_segment_page_start(segment, page, &mut page_size);
        (*page).block_size = block_size;
        (*page).reserved = (page_size / block_size) as u16;
        (*page).cookie = 0x12345678;
        
        // initialize an initial free list
        mi_page_extend_free(heap, page);
    }
}




/* -----------------------------------------------------------
  Find pages with free blocks
-------------------------------------------------------------*/

/// Find a page with free blocks of 'page->block_size'
pub unsafe fn mi_page_queue_find_free_ex(heap: *const MiHeap, pq: *const MiPageQueue) -> *mut MiPage {
    let mut rpage: *mut MiPage = std::ptr::null_mut();
    let mut page = (*pq).first;
    let mut page_free_count = 0;
    while !page.is_null() {
        // remember the next page
        let next = (*page).next;
        // 0. collect free blocks by us and other threads
        _mi_page_free_collect(page);

        // 1. if the page contains free blocks, we are done
        if mi_page_immediate_available(page) {
            if page_free_count < 8 && mi_page_all_free(page) {
                page_free_count += 1;
                if !rpage.is_null() {
                    _mi_page_free(rpage, pq, false);
                }
                rpage = page;
                page = next;
                continue;
            } else {
                break;
            }
        }

        // 2. Try to extend the page capacity
        if (*page).capacity < (*page).reserved {
            mi_page_extend_free(heap, page);
            break;
        }

        // 3. If the page if completely full, move it to the 'mi_page_full'
        //   queue so we don't visit long-lived pages too often.
        mi_page_to_full(page, pq);

        page = next;
    } // for each page

    if page.is_null() {
        page = rpage;
        rpage = std::ptr::null_mut();
    }
    if !rpage.is_null() {
        _mi_page_free(rpage, pq, false);
    }

    if page.is_null() {
        mi_page_fresh(heap, pq)
    } else {
        page
    }
}

pub unsafe fn mi_find_free_page(heap: *const MiHeap, block_size: usize) -> *mut MiPage {
    let pq = mi_page_queue(heap, block_size);
    if !(*pq).first.is_null() {
        let page = (*pq).first;
        _mi_page_free_collect(page);
        if mi_page_immediate_available(page) {
            return page;
        }
    }
    mi_page_queue_find_free_ex(heap, pq)
}



/* -----------------------------------------------------------
  General allocation
----------------------------------------------------------- */

/// A huge page is allocated directly without being in a queue.
pub fn mi_huge_page_alloc(heap: *const MiHeap, size: usize) -> *mut MiPage {
    let block_size = _mi_wsize_from_size(size) * std::mem::size_of::<usize>();
    let pq = mi_page_queue(heap, block_size);
    mi_page_fresh_alloc(heap, pq, block_size)
}




/* -----------------------------------------------------------
  Queue query
----------------------------------------------------------- */

#[inline]
pub fn mi_bsr(x: usize) -> usize {
    if x == 0 {
        return 0;
    } else {
        return WORD_BITS - 1 - x.leading_zeros() as usize;
    }
}

/// Return the bin for a given field size.
/// Returns MI_BIN_HUGE if the size is too large.
/// We use 'wsize' for the size in "machine words size",
/// i.e. byte size == 'wsize * sizeof(void*)'.
#[inline]
pub fn _mi_bin(block_size: usize) -> usize {
    let wsize = _mi_wsize_from_size(block_size);
    if wsize <= 1 {
        1
    } else if wsize <= 8 {
        wsize
    } else if wsize > MI_LARGE_WSIZE_MAX {
        MI_BIN_HUGE
    } else {
        // find the highest bit
        let b = mi_bsr(wsize - 1);
        // and use the top 3 bits to determine the bin 
        // (~16% worst internal fragmentation)
        // - adjust with 3 because we use do not round the first 8 sizes
        //   which each get an exact bin
        (b << 2) + ((wsize >> (b - 3)) & 0x3) - 3
    }
}

pub fn mi_page_queue_remove(pq: *mut MiPageQueue, page: *mut MiPage) {
    unsafe {
        if !(*page).prev.is_null() {
            (*(*page).prev).next = (*page).next;
        }
        if !(*page).next.is_null() {
            (*(*page).next).prev = (*page).prev;
        }
        if (*pq).last == page {
            (*pq).last = (*page).prev;
        }
        if (*pq).first == page {
            (*pq).first = (*page).next;
            // update first
            let heap = (*page).heap;
            mi_heap_queue_first_update(heap, pq);
        }
        (*(*page).heap).page_count -= 1;
        (*page).next = std::ptr::null_mut();
        (*page).prev = std::ptr::null_mut();
        (*page).heap = std::ptr::null_mut();
        (*page).flags.remove(MiPageFlags::FULL);
    }
}


pub fn mi_page_queue_push(heap: *mut MiHeap, pq: *mut MiPageQueue, page: *mut MiPage) {
    unsafe {
        if mi_page_queue_is_full(pq) {(*page).flags.insert(MiPageFlags::FULL);}
        (*page).heap = heap;
        (*page).next = (*pq).first;
        (*page).prev = std::ptr::null_mut();
        if !(*pq).first.is_null() {
            (*(*pq).first).prev = page;
            (*pq).first = page;
        } else {
            (*pq).first = page;
            (*pq).last = page;
        }

        // update direct
        mi_heap_queue_first_update(heap, pq);
        (*heap).page_count += 1;
    }
}


pub fn mi_page_queue_enqueue_from(to: *mut MiPageQueue, from: *mut MiPageQueue, page: *mut MiPage) {
    unsafe {
        if !(*page).prev.is_null() { (*(*page).prev).next = (*page).next; }
        if !(*page).next.is_null() { (*(*page).next).prev = (*page).prev; }
        if page == (*from).last { (*from).last = (*page).prev; }
        if page == (*from).first {
            (*from).first = (*page).next;
            // update first
            let heap = (*page).heap;
            mi_heap_queue_first_update(heap, from);
        }

        (*page).prev = (*to).last;
        (*page).next = std::ptr::null_mut();

        if !(*to).last.is_null() {
            (*(*to).last).next = page;
            (*to).last = page;
        } else {
            (*to).first = page;
            (*to).last = page;
            mi_heap_queue_first_update((*page).heap, to);
        }

        if mi_page_queue_is_full(to) {
            (*page).flags.insert(MiPageFlags::FULL);
        }
    }
}



#[inline]
pub fn mi_page_queue_is_full(pq: *const MiPageQueue) -> bool {
    unsafe {
        (*pq).block_size == MI_LARGE_SIZE_MAX + (2 * std::mem::size_of::<usize>())
    }
}


// The current small page array is for efficiency and for each
// small size (up to 256) it points directly to the page for that
// size without having to compute the bin. This means when the
// current free page queue is updated for a small bin, we need to update a
// range of entries in `_mi_page_small_free`.
#[inline]
pub fn mi_heap_queue_first_update(heap: *const MiHeap, pq: *const MiPageQueue) {
    unsafe {
        let size = (*pq).block_size;
        if size > MI_SMALL_SIZE_MAX {return;}

        let page = (*pq).first;

        // find index in the right direct page array
        let mut start;
        let idx = _mi_wsize_from_size(size);
        let mut page_free = (*heap).pages_free_direct;

        if page_free[idx] == page {return;} // already set

        // find start slot
        if idx <= 1 {
            start = 0;
        } else {
            // find previous size; due to minimal alignment upto 3 previous bins may need to be skipped
            let bin = _mi_bin(size);
            let mut prev = pq.offset(-1);
            while bin == _mi_bin((*prev).block_size) && prev > addr_of!((*heap).pages[0]) {
                prev = prev.offset(-1);
            }
            start = 1 + _mi_wsize_from_size((*prev).block_size);
            if start > idx {start = idx;}
        }
        // set size range to the right page
        for i in start..idx {
            page_free[i] = page;
        }
    }
}


pub fn mi_heap_page_queue_of(heap: *mut MiHeap, page: *const MiPage) -> *mut MiPageQueue {
    unsafe {
        let bin = if (*page).flags.contains(MiPageFlags::FULL) {
            MI_BIN_FULL
        } else {
            _mi_bin((*page).block_size)
        };
        &mut (*heap).pages[bin]
    }
}

pub fn mi_page_queue_of(page: *const MiPage) -> *const MiPageQueue {
    unsafe {
        let bin = if (*page).flags.contains(MiPageFlags::FULL) {
            MI_BIN_FULL
        } else {
            _mi_bin((*page).block_size)
        };
        let heap = (*page).heap;
        &(*heap).pages[bin]
    }
}

pub fn _mi_page_queue_append(heap: *mut MiHeap, pq: *mut MiPageQueue, append: *mut MiPageQueue) {
    unsafe {
        if (*append).first.is_null() {return;}
        
        // set append pages to new heap
        let mut page = (*append).first;
        while !page.is_null() {
            (*page).heap = heap;
            page = (*page).next;
        }

        if (*pq).last.is_null() {
            // take over afresh
            (*pq).first = (*append).first;
            (*pq).last = (*append).last;
            mi_heap_queue_first_update(heap, pq);
        } else {
            // append
            (*(*pq).last).next = (*append).first;
            (*(*append).first).prev = (*pq).last;
            (*pq).last = (*append).last;
        }
    }
}