use core::ptr::null_mut;

use crate::alloc_::_mi_free_delayed_block;
use crate::alloc_::_mi_page_malloc;
use crate::alloc_::mi_page_all_free;
use crate::atomic::mi_atomic_compare_exchange_ptr;
use crate::atomic::mi_atomic_subtract;
use crate::atomic::mi_atomic_yield;
use crate::heap::*;
use crate::init::*;
use crate::internal::*;
use crate::page_queue::*;
use crate::segment::_mi_segment_page_alloc;
use crate::segment::_mi_segment_page_free;
use crate::segment::_mi_segment_page_start;
use crate::segment::_mi_segment_try_reclaim_abandoned;
use crate::stats::mi_stat_counter_increase;
use crate::stats::mi_stat_decrease;
use crate::stats::mi_stat_increase;
use crate::types::*;

pub const MI_MAX_EXTEND_SIZE: usize = 4 * 1024;
pub const MI_MIN_EXTEND: usize = 1;

pub unsafe fn mi_page_block_at(
    page: *mut mi_page_t,
    page_start: *mut u8,
    i: usize,
) -> *mut mi_block_t {
    page_start.add(i * (&mut *page).block_size) as *mut mi_block_t
}

pub unsafe fn _mi_malloc_generic(mut heap: *mut mi_heap_t, size: usize) -> *mut u8 {
    debug_assert!(!heap.is_null());
    // initialize if necessary
    if !mi_heap_is_initialized(heap) {
        mi_thread_init();
        heap = mi_get_default_heap();
    }

    debug_assert!(mi_heap_is_initialized(heap));

    _mi_deferred_free(heap, false);

    let page = if size > MI_LARGE_SIZE_MAX as usize {
        mi_huge_page_alloc(heap, size)
    } else {
        mi_find_free_page(heap, size)
    };

    if page.is_null() {
        return null_mut();
    }

    // TODO debug_assert

    _mi_page_malloc(heap, page, size)
}

static mut deferred_free: mi_deferred_free_fun = None;

pub unsafe fn _mi_deferred_free(heap: *mut mi_heap_t, force: bool) {
    let tld = &mut *(&mut *heap).tld;
    tld.heartbeat += 1;

    if let Some(f) = deferred_free {
        f(force, tld.heartbeat)
    }
}

#[allow(dead_code)]
pub unsafe fn mi_register_deferred_free(f: mi_deferred_free_fun) {
    deferred_free = f
}

pub unsafe fn mi_find_free_page(heap: *mut mi_heap_t, size: usize) -> *mut mi_page_t {
    _mi_heap_delayed_free(heap);
    let pq = mi_page_queue(heap, size);
    let page = (&mut *pq).first;

    if !page.is_null() {
        _mi_page_free_collect(page);
        // TODO! secure
        //_mi_page_free_collect(page);
        if mi_page_immediate_available(page) {
            return page;
        }
    }
    mi_page_queue_find_free_ex(heap, pq)
}

pub unsafe fn _mi_heap_delayed_free(heap: *mut mi_heap_t) {
    let mut block;
    loop {
        block = (&mut *heap).thread_delayed_free;
        let ok = !block.is_null()
            && !mi_atomic_compare_exchange_ptr(
                &mut (&mut *heap).thread_delayed_free as *mut _ as *mut usize,
                0,
                block as usize,
            );
        if !ok {
            break;
        }
    }

    while !block.is_null() {
        let next = mi_block_nextx((&mut *heap).cookie, block);
        _mi_free_delayed_block(block);
        block = next;
    }
}

pub unsafe fn mi_huge_page_alloc(heap: *mut mi_heap_t, size: usize) -> *mut mi_page_t {
    let block_size = _mi_wsize_from_size(size) * size_of::<*const ()>();
    debug_assert_eq!(_mi_bin(block_size), MI_BIN_HUGE as u8);
    let pq = mi_page_queue(heap, block_size);
    debug_assert!(mi_page_queue_is_huge(pq));
    let page = mi_page_fresh_alloc(heap, pq, block_size);

    if !page.is_null() {
        // TODO! debug_assert
        mi_heap_stat_increase!(heap, huge, block_size);
    }
    page
}

pub unsafe fn mi_page_fresh_alloc(
    heap: *mut mi_heap_t,
    pq: *mut mi_page_queue_t,
    block_size: usize,
) -> *mut mi_page_t {
    let tld = &mut *(&mut *heap).tld;

    let page = _mi_segment_page_alloc(block_size, &mut tld.segments, &mut tld.os);
    if page.is_null() {
        return null_mut();
    }

    mi_page_init(heap, page, block_size, &mut tld.stats);
    mi_heap_stat_increase!(heap, pages, 1);
    mi_page_queue_push(heap, pq, page);
    page
}

pub unsafe fn mi_page_init(
    heap: *mut mi_heap_t,
    page: *mut mi_page_t,
    block_size: usize,
    stats: *mut mi_stats_t,
) {
    debug_assert!(!page.is_null());
    let segment = _mi_page_segment(page);
    debug_assert!(!segment.is_null());
    // set fields
    let mut page_size = 0;
    _mi_segment_page_start(segment, page, Some(&mut page_size));
    (&mut *page).block_size = block_size;
    (&mut *page).reserved = (page_size as usize / block_size) as u16;
    (&mut *page).cookie = _mi_heap_random(heap) | 1;
    mi_page_extend_free(heap, page, stats);
}

pub unsafe fn mi_page_extend_free(
    heap: *mut mi_heap_t,
    page: *mut mi_page_t,
    stats: *mut mi_stats_t,
) {
    let page_mut = &mut *page;
    if !page_mut.free.is_null() {
        return;
    }

    if page_mut.capacity >= page_mut.reserved {
        return;
    }

    let mut page_size = 0;
    _mi_page_start(_mi_page_segment(page), page, Some(&mut page_size));
    if page_mut.is_reset() {
        page_mut.set_is_reset(false);
        mi_stat_decrease(&mut (&mut *stats).reset, page_size);
    }
    mi_stat_increase(&mut (&mut *stats).pages_extended, 1);

    let mut extend = (&mut *page).reserved as usize - (&mut *page).capacity as usize;
    let mut max_extend = MI_MAX_EXTEND_SIZE as usize / (&*page).block_size;
    max_extend = max_extend.max(MI_MIN_EXTEND);

    if extend > max_extend {
        extend = if max_extend == 0 { 1 } else { max_extend }
    }

    mi_page_free_list_extend(heap, page, extend, stats);
}

pub unsafe fn mi_page_free_list_extend(
    _heap: *mut mi_heap_t,
    page: *mut mi_page_t,
    extend: usize,
    stats: *mut mi_stats_t,
) {
    let page_area = _mi_page_start(_mi_page_segment(page), page, None);
    let bsize = (&*page).block_size;
    let start = mi_page_block_at(page, page_area, (&mut *page).capacity as usize);
    // TODO option_secure
    let end = mi_page_block_at(page, page_area, (&mut *page).capacity as usize + extend - 1);
    let mut block = start;
    for _ in 0..extend {
        let next = (block as *mut u8).add(bsize) as *mut mi_block_t;
        mi_block_set_next(page, block, next);
        block = next;
    }
    mi_block_set_next(page, end, null_mut());
    (&mut *page).free = start;
    (&mut *page).capacity += extend as u16;
    mi_stat_increase(
        &mut (&mut *stats).committed,
        extend * (&mut *page).block_size,
    );
}

pub unsafe fn _mi_page_retire(page: *mut mi_page_t) {
    (&mut *page).flags.__bindgen_anon_1.has_aligned = false;
    if (&mut *page).block_size <= MI_LARGE_SIZE_MAX as usize {
        if mi_page_mostly_used((&mut *page).prev) && !mi_page_mostly_used((&mut *page).next) {
            return;
        }
    }
    _mi_page_free(page, mi_page_queue_of(page), false);
}

pub unsafe fn _mi_page_free(page: *mut mi_page_t, pq: *mut mi_page_queue_t, force: bool) {
    (&mut *page).flags.__bindgen_anon_1.has_aligned = false;

    if (&mut *page).block_size > MI_LARGE_SIZE_MAX as usize {
        mi_heap_stat_decrease!((&mut *page).heap, huge, (&*page).block_size)
    }

    let segments_tld = &mut (&mut *(&mut *(&mut *(&mut *page)).heap).tld).segments;
    mi_page_queue_remove(pq, page);
    _mi_segment_page_free(page, force, segments_tld);
}

pub unsafe fn _mi_page_unfull(page: *mut mi_page_t) {
    _mi_page_use_delayed_free(page, false);
    let page = &mut *page;
    if !page.flags.__bindgen_anon_1.in_full {
        return;
    }

    let heap = page.heap;
    let pq_full = &mut (&mut *heap).pages[MI_BIN_FULL as usize];

    page.flags.__bindgen_anon_1.in_full = false;
    let pq = mi_heap_page_queue_of(heap, page);
    page.flags.__bindgen_anon_1.in_full = true;
    mi_page_queue_enqueue_from(pq, pq_full, page);
}

pub unsafe fn _mi_page_use_delayed_free(page: *mut mi_page_t, enable: bool) {
    let page = &mut *page;
    loop {
        let tfree = page.thread_free;
        let mut tfreex = page.thread_free;
        let delayed = if enable {
            mi_delayed_e_MI_USE_DELAYED_FREE
        } else {
            mi_delayed_e_MI_NO_DELAYED_FREE
        };
        tfreex.__bindgen_anon_1.set_delayed(delayed);

        if tfree.__bindgen_anon_1.delayed() == mi_delayed_e_MI_DELAYED_FREEING {
            mi_atomic_yield();
            continue;
        }

        let ok = tfreex.__bindgen_anon_1.delayed() != tfree.__bindgen_anon_1.delayed()
            && !mi_atomic_compare_exchange_ptr(
                (&mut page.thread_free) as *mut _ as *mut usize,
                tfreex.value as _,
                tfree.value as _,
            );
        if !ok {
            break;
        }
    }
}

pub unsafe fn _mi_page_free_collect(page: *mut mi_page_t) {
    if !(&mut *page).local_free.is_null() {
        if (&mut *page).free.is_null() {
            (&mut *page).free = (&mut *page).local_free;
        } else {
            let mut tail = (&mut *page).free;
            let mut next;
            loop {
                next = mi_block_next(page, tail);
                if next.is_null() {
                    break;
                }
                tail = next;
            }
            mi_block_set_next(page, tail, (&mut *page).local_free);
        }
        (&mut *page).local_free = null_mut();
    }

    if (&mut *page).thread_free.__bindgen_anon_1.head() != 0 {
        mi_page_thread_free_collect(page);
    }
}

pub unsafe fn mi_page_thread_free_collect(page: *mut mi_page_t) {
    let mut head;
    let mut tfree;
    let mut tfreex;

    loop {
        tfree = (&mut *page).thread_free;
        tfreex = (&mut *page).thread_free;
        head = (tfree.__bindgen_anon_1.head() << MI_TF_PTR_SHIFT) as *mut mi_block_t;
        tfreex.__bindgen_anon_1.set_head(0);
        let ok = !mi_atomic_compare_exchange_ptr(
            &mut (&mut *page).thread_free as *mut _ as *mut usize,
            tfreex.value,
            tfree.value,
        );
        if !ok {
            break;
        }
    }

    if head.is_null() {
        return;
    }

    let mut count = 1;
    let mut tail = head;
    let mut next;
    loop {
        next = mi_block_next(page, tail);
        if next.is_null() {
            break;
        }
        count += 1;
        tail = next;
    }

    mi_block_set_next(page, tail, (&mut *page).free);
    (&mut *page).free = head;
    mi_atomic_subtract(&mut (&mut *page).thread_freed, count);
    (&mut *page).used -= count;
}

pub unsafe fn mi_page_queue_find_free_ex(
    heap: *mut mi_heap_t,
    pq: *mut mi_page_queue_t,
) -> *mut mi_page_t {
    let mut rpage: *mut mi_page_t = null_mut();
    let mut count = 0;
    let mut page_free_count = 0;
    let mut page = (&mut *pq).first;
    while !page.is_null() {
        let next = (&mut *page).next;
        count += 1;
        _mi_page_free_collect(page);
        if mi_page_immediate_available(page) {
            if page_free_count < 8 && mi_page_all_free(page) {
                page_free_count += 1;
                if !rpage.is_null() {
                    _mi_page_free(rpage, pq, false);
                }
                rpage = page;
                page = next;
                continue;
            }
        } else {
            break;
        }

        if (&mut *page).capacity < (&mut *page).reserved {
            mi_page_extend_free(heap, page, &mut (&mut *(&mut *(&mut *heap)).tld).stats);
            break;
        }

        mi_page_to_full(page, pq);
        page = next;
    }

    mi_stat_counter_increase(&mut (&mut *(&mut *heap).tld).stats.searches, count);

    if page.is_null() {
        page = rpage;
        rpage = null_mut();
    }

    if !rpage.is_null() {
        _mi_page_free(rpage, pq, false);
    }

    if page.is_null() {
        page = mi_page_fresh(heap, pq);
    }
    page
}

pub unsafe fn mi_page_to_full(page: *mut mi_page_t, pq: *mut mi_page_queue_t) {
    _mi_page_use_delayed_free(page, true);
    if (&*page).flags.__bindgen_anon_1.in_full {
        return;
    }
    mi_page_queue_enqueue_from(
        &mut (&mut *(&mut *(&mut *page)).heap).pages[MI_BIN_FULL as usize],
        pq,
        page,
    );
    mi_page_thread_free_collect(page);
}

pub unsafe fn mi_page_fresh(heap: *mut mi_heap_t, pq: *mut mi_page_queue_t) -> *mut mi_page_t {
    let mut page = (&mut *pq).first;
    if !(&mut *heap).no_reclaim
        && _mi_segment_try_reclaim_abandoned(
            heap,
            false,
            &mut (&mut *(&mut *(&mut *heap)).tld).segments,
        )
        && page != (&mut *pq).first
    {
        page = (&mut *pq).first;
        if !(&mut *page).free.is_null() {
            return page;
        }
    }
    page = mi_page_fresh_alloc(heap, pq, (&mut *pq).block_size);
    if page.is_null() {
        return null_mut();
    }
    page
}

pub unsafe fn _mi_page_reclaim(heap: *mut mi_heap_t, page: *mut mi_page_t) {
    _mi_page_free_collect(page);
    let pq = mi_page_queue(heap, (&*page).block_size);
    mi_page_queue_push(heap, pq, page);
}
