use crate::alloc_aligned::*;
use crate::atomic::*;
use crate::heap::*;
use crate::internal::*;
use crate::page::*;
use crate::types::*;
use crate::types_extra::*;

pub unsafe fn mi_heap_malloc(heap: *mut mi_heap_t, size: usize) -> *mut u8 {
    assert!(!heap.is_null());
    let heap = &mut *heap;
    let thread_id = _mi_thread_id();
    assert!(heap.thread_id == 0 || heap.thread_id == thread_id);

    if size < MI_SMALL_SIZE_MAX {
        mi_heap_malloc_small(heap, size)
    } else {
        _mi_malloc_generic(heap, size)
    }
}

pub unsafe fn mi_heap_malloc_small(heap: *mut mi_heap_t, size: usize) -> *mut u8 {
    assert!(size < MI_SMALL_SIZE_MAX);
    let page = _mi_heap_get_free_small_page(heap, size);
    _mi_page_malloc(heap, page, size)
}

pub unsafe fn _mi_page_malloc(heap: *mut mi_heap_t, page: *mut mi_page_t, size: usize) -> *mut u8 {
    let page = &mut *page;
    let block = page.free;

    debug_assert!(page.block_size == 0 || page.block_size >= size);
    if block.is_null() {
        return _mi_malloc_generic(heap, size);
    }
    page.free = mi_block_next(page, block);
    page.used += 1;
    block as _
}

pub unsafe fn _mi_free_delayed_block(block: *mut mi_block_t) {
    let segment = _mi_ptr_segment(block as _);
    let page = _mi_segment_page_of(segment, block as _);
    _mi_free_block(page, true, block)
}

pub unsafe fn _mi_segment_page_of(segment: *mut mi_segment_t, p: *mut u8) -> *mut mi_page_t {
    let diff = p as i64 - segment as i64;
    let idx = diff >> (&mut *segment).page_shift;
    &mut (&mut *segment).pages[idx as usize]
}

pub unsafe fn _mi_free_block(page: *mut mi_page_t, local: bool, block: *mut mi_block_t) {
    if local {
        mi_block_set_next(page, block, (&mut *page).local_free);
        (&mut *page).local_free = block;
        (&mut *page).used -= 1;

        if mi_page_all_free(page) {
            _mi_page_retire(page);
        } else if (&mut *page).flags.__bindgen_anon_1.in_full {
            _mi_page_unfull(page);
        }
    } else {
        _mi_free_block_mt(page, block);
    }
}

pub unsafe fn mi_page_all_free(page: *mut mi_page_t) -> bool {
    (&*page).used == (&*page).thread_freed
}

pub unsafe fn _mi_free_block_mt(page: *mut mi_page_t, block: *mut mi_block_t) {
    let mut tfree;
    let mut tfreex;
    let mut use_delayed;

    loop {
        tfree = (&mut *page).thread_free;
        tfreex = (&mut *page).thread_free;
        use_delayed = tfree.__bindgen_anon_1.delayed() == mi_delayed_e_MI_USE_DELAYED_FREE;
        if use_delayed {
            tfreex
                .__bindgen_anon_1
                .set_delayed(mi_delayed_e_MI_DELAYED_FREEING);
        } else {
            let next = tfree.__bindgen_anon_1.head() << MI_TF_PTR_SHIFT;
            mi_block_set_next(page, block, next as _);
            tfreex
                .__bindgen_anon_1
                .set_head(block as usize >> MI_TF_PTR_SHIFT);
        }
        let ok = !mi_atomic_compare_exchange_ptr(
            &mut (&mut *page).thread_free as *mut _ as *mut usize,
            tfreex.value,
            tfree.value,
        );
        if !ok {
            break;
        }
    }

    if !use_delayed {
        mi_atomic_increment(&mut (&mut *page).thread_freed);
    } else {
        let heap = (&mut *page).heap;
        if !heap.is_null() {
            let mut dfree;
            loop {
                dfree = (&mut *heap).thread_delayed_free as *mut mi_block_t;
                mi_block_set_nextx((&mut *heap).cookie, block, dfree);
                let ok = !mi_atomic_compare_exchange_ptr(
                    &mut (&mut *heap).thread_delayed_free as *mut _ as *mut usize,
                    block as usize,
                    dfree as usize,
                );
                if !ok {
                    break;
                }
            }
        }

        loop {
            tfree = (&mut *page).thread_free;
            tfreex = (&mut *page).thread_free;
            tfreex
                .__bindgen_anon_1
                .set_delayed(mi_delayed_e_MI_DELAYED_FREEING);
            let ok = !mi_atomic_compare_exchange_ptr(
                &mut (&mut *page).thread_free as *mut _ as *mut usize,
                tfreex.value,
                tfree.value,
            );
            if !ok {
                break;
            }
        }
    }
}

pub unsafe fn mi_free(p: *mut u8) {
    let segment = _mi_ptr_segment(p);
    if segment.is_null() {
        return;
    }
    let local = (&*segment).thread_id == _mi_thread_id();
    let page = _mi_segment_page_of(segment, p);

    if (&*page).flags.value == 0 {
        let block = p as *mut mi_block_t;
        if local {
            mi_block_set_next(page, block, (&mut *page).local_free);
            (&mut *page).local_free = block;
            (&mut *page).used -= 1;
            if mi_page_all_free(page) {
                _mi_page_retire(page);
            }
        } else {
            _mi_free_block_mt(page, block);
        }
    } else {
        mi_free_generic(segment, page, local, p);
    }
}

pub unsafe fn mi_free_generic(
    segment: *mut mi_segment_t,
    page: *mut mi_page_t,
    local: bool,
    p: *mut u8,
) {
    let block = if (&*page).flags.__bindgen_anon_1.has_aligned {
        _mi_page_ptr_unalign(segment, page, p)
    } else {
        p as *mut mi_block_t
    };
    _mi_free_block(page, local, block);
}

pub unsafe fn _mi_page_ptr_unalign(
    segment: *mut mi_segment_t,
    page: *mut mi_page_t,
    p: *mut u8,
) -> *mut mi_block_t {
    let diff = p as usize - _mi_page_start(segment, page, None) as usize;
    let adjust = diff % (&*page).block_size;
    p.sub(adjust) as _
}

pub unsafe fn _mi_realloc_zero(p: *mut u8, newsize: usize, zero: bool) -> *mut u8 {
    if p.is_null() {
        return _mi_heap_malloc_zero(mi_get_default_heap(), newsize, zero);
    }
    let size = mi_usable_size(p);
    if newsize <= size && newsize >= (size / 2) {
        return p;
    }
    let newp: *mut u8 = mi_malloc(newsize);
    if !newp.is_null() {
        if zero && newsize > size {
            let start = if size >= size_of::<*mut ()>() {
                size - size_of::<*mut ()>()
            } else {
                0
            };
            libc::memset(newp as _, 0, newsize - start);
        }
        libc::memcpy(newp as _, p as _, newsize.min(size));
        mi_free(p);
    }
    newp
}

pub unsafe fn mi_usable_size(p: *mut u8) -> usize {
    if p.is_null() {
        return 0;
    }
    let segment = _mi_ptr_segment(p);
    let page = _mi_segment_page_of(segment, p);
    let size = (&mut *page).block_size;
    if (&*page).flags.__bindgen_anon_1.has_aligned {
        let adjust = p as usize - _mi_page_ptr_unalign(segment, page, p) as usize;
        size - adjust
    } else {
        size
    }
}

pub unsafe fn mi_malloc(size: usize) -> *mut u8 {
    mi_heap_malloc(mi_get_default_heap(), size)
}
