use ptr::addr_of;

use super::*;
use std::ptr::addr_of_mut;
/* -----------------------------------------------------------
  Helpers
----------------------------------------------------------- */

// Visit all pages in a heap; returns 'false' if the break was called.
pub fn mi_heap_visit_pages(heap: *mut MiHeap, visitor: fn(*mut MiHeap, *mut MiPageQueue, *mut MiPage, MiCollect) -> bool, collect: MiCollect) -> bool {
    unsafe {
        if heap.is_null() || (*heap).page_count == 0 {return false;}
        let total = (*heap).page_count;
        let mut count = 0;
        for i in 0..(MI_BIN_FULL+1) {
            let pq = &mut (*heap).pages[i];
            let mut page = pq.first;
            while !page.is_null() {
                assert!((*page).heap == heap);
                let next = (*page).next;
                count += 1;
                if !visitor(heap as *mut MiHeap, pq as *mut MiPageQueue, page, collect) {
                    return false;
                }
                page = next;
            }
        }
        assert!(count == total);
        true
    }
}

/* -----------------------------------------------------------
  Heap new
----------------------------------------------------------- */

pub fn mi_heap_get_default() -> *const MiHeap {
    mi_thread_init();
    get_heap_default()
}

pub fn mi_heap_get_backing() -> *const MiHeap {
    let heap = mi_heap_get_default();
    unsafe {
        (*(*heap).tld).heap_backing
    }
}

pub fn mi_heap_new() -> *mut MiHeap {
    let bheap = mi_heap_get_backing();
    let heap = mi_heap_malloc(bheap, core::mem::size_of::<MiHeap>()) as *mut MiHeap;
    unsafe {
        std::ptr::copy_nonoverlapping(&_MI_HEAP_EMPTY, heap, 1);
        (*heap).tld = (*bheap).tld;
        (*heap).thread_id = mi_thread_id();
        (*heap).cookie = 0x123456789;
        // don't reclaim abandoned pages or otherwise destroy is unsafe
        (*heap).no_reclaim = true;
        heap
    }
}

pub fn mi_heap_reset_pages(heap: *mut MiHeap) {
    // TODO: copy full empty heap instead?
    unsafe {
        std::ptr::write_bytes(&mut (*heap).pages_free_direct, 0, 1);
        std::ptr::copy_nonoverlapping(&mut (*heap).pages, addr_of!((_MI_HEAP_EMPTY).pages) as *mut [MiPageQueue; MI_BIN_FULL + 1], 1);
        (*heap).page_count = 0;
        (*heap).thread_delayed_free = std::sync::atomic::AtomicPtr::new(std::ptr::null_mut());
    }
}

pub fn mi_heap_free(heap: *mut MiHeap) {
    // don't free the backing heap
    if mi_heap_is_backing(heap) {return;}

    // reset default
    if mi_heap_is_backing(heap) {
        unsafe {
            set_heap_default((*(*heap).tld).heap_backing);
        }
    }
    // and free the used memory
    _mi_free(heap as *mut u8);
}

/* -----------------------------------------------------------
  Heap destroy
----------------------------------------------------------- */


pub fn _mi_heap_page_destroy(heap: *mut MiHeap, _: *mut MiPageQueue, page: *mut MiPage, _: MiCollect) -> bool {
    // ensure no more thread_delayed_free will be added
    _mi_page_use_delayed_free(page, false);
    unsafe {
        // pretend it is all free now
        (*page).used = (*page).thread_freed.load(Ordering::Relaxed);
        // and free the page
        _mi_segment_page_free(page, false, &mut (*(*heap).tld).segments);
        // keep going
        true
    }
}

pub fn _mi_heap_destroy_pages(heap: *mut MiHeap) {
    mi_heap_visit_pages(heap, _mi_heap_page_destroy, MiCollect::FORCE);
    mi_heap_reset_pages(heap);
}

pub fn mi_heap_destroy(heap: *mut MiHeap) {
    unsafe {
        assert!(mi_heap_is_initialized(heap));
        assert!((*heap).no_reclaim);
        if !mi_heap_is_initialized(heap) {return;}
        if !(*heap).no_reclaim {
            // don't free in case it may contain reclaimed pages
            mi_heap_delete(heap);
        } else {
            // destroy all pages
            _mi_heap_destroy_pages(heap);
            // and free the heap itself
            mi_heap_free(heap);
        }
    }
}


/* -----------------------------------------------------------
  Safe Heap delete
----------------------------------------------------------- */


/// Tranfer the pages from one heap to the other
pub fn mi_heap_absorb(heap: *mut MiHeap, from: *mut MiHeap) {
    unsafe {
        if from.is_null() || (*from).page_count == 0 {return;}

        // unfull all full pages
        let mut page = (*from).pages[MI_BIN_FULL].first;
        while !page.is_null() {
            let next = (*page).next;
            _mi_page_unfull(page);
            page = next;
        }

        // free outstanding thread delayed free blocks
        _mi_heap_delayed_free(from);

        // transfer all pages by appending the queues; this will set
        // a new heap field which is ok as all pages are unfull'd and thus
        // other threads won't access this field anymore (see 'mi_free_block_mt')
        for i in 0..MI_BIN_FULL {
            let pq = &mut (*heap).pages[i];
            let appand = &mut (*from).pages[i];
            _mi_page_queue_append(heap, pq, appand);
        }

        // and reset the 'from' heap
        mi_heap_reset_pages(from);
    }
}

/// Safe delete a heap without freeing any still allocated blocks in that heap.
pub fn mi_heap_delete(heap: *mut MiHeap) {
    if !mi_heap_is_initialized(heap) {return;}
    unsafe {
        if !mi_heap_is_backing(heap) {
            // tramfer still used pages to the backing heap
            mi_heap_absorb((*(*heap).tld).heap_backing, heap);
        } else {
            // the backing heap abandons its pages
            _mi_heap_collect_abandon(heap as *const MiHeap);
        }

        mi_heap_free(heap);
    }
}


/* -----------------------------------------------------------
  "Collect" pages by migrating `local_free` and `thread_free`
  lists and freeing empty pages. This is done when a thread
  stops (and in that case abandons pages if there are still
  blocks alive)
----------------------------------------------------------- */
#[derive(PartialEq, Clone, Copy, Eq, PartialOrd, Ord)]
pub enum MiCollect {
    NORMAL,
    FORCE,
    ABANDON,
}

pub fn mi_heap_page_collect(_: *mut MiHeap, pq: *mut MiPageQueue, page: *mut MiPage, collect: MiCollect) -> bool {
    _mi_page_free_collect(page);
    if mi_page_all_free(page) {
        // no more used blocks, free the page. TODO: should we retire here and be less aggressive?
        _mi_page_free(page, pq, collect != MiCollect::NORMAL);
    } else if collect == MiCollect::ABANDON {
        // still use blocks but the thread is done; abandon the page
        _mi_page_abandon(page, pq);
    }
    // don't break
    true
}


pub fn mi_heap_collect_ex(heap: *const MiHeap, collect: MiCollect) {
    _mi_deferred_free(heap, collect > MiCollect::NORMAL);
    if !mi_heap_is_initialized(heap) { return; }

    unsafe {
        // collect (some) abandoned pages
        if collect >= MiCollect::NORMAL && !(*heap).no_reclaim {
            if collect == MiCollect::NORMAL {
                _mi_segment_try_reclaim_abandoned(heap, false, &mut (*(*heap).tld).segments);
            }
        }

        // if abandoning, mark all full pages to no longer add to delayed_free
        if collect == MiCollect::ABANDON {
            let mut page = (*heap).pages[MI_BIN_FULL].first;
            while !page.is_null() {
                _mi_page_use_delayed_free(page, false);
                page = (*page).next;
            }
        }

        // free thread delayed blocks
        // (if abandoning, after this there are no more local references to the pages.)
        _mi_heap_delayed_free(heap);

        // collect all pages owned by this thread
        mi_heap_visit_pages(heap as *mut MiHeap, mi_heap_page_collect, collect);

        // collect segment caches
        if collect >= MiCollect::FORCE {
            _mi_segment_thread_collect(&mut (*(*heap).tld).segments);
        }
    }
}


pub fn _mi_heap_collect_abandon(heap: *const MiHeap) {
    mi_heap_collect_ex(heap, MiCollect::ABANDON);
}

pub fn mi_heap_collect(heap: *const MiHeap, force: bool) {
    mi_heap_collect_ex(heap, if force { MiCollect::FORCE } else { MiCollect::NORMAL });
}

// pub fn mi_collect(force: bool) {
//     let heap = get_heap_default();
//     mi_heap_collect(heap, force);
// }