use crate::{
    init::_mi_page_empty,
    internal::_mi_wsize_from_size,
    types::{
        mi_heap_t, mi_page_queue_t, mi_page_t, MI_BIN_FULL, MI_BIN_HUGE, MI_LARGE_SIZE_MAX,
        MI_LARGE_WSIZE_MAX,
    },
    types_extra::MI_SMALL_SIZE_MAX,
};
use core::ptr::null_mut;

pub unsafe fn _mi_bin(size: usize) -> u8 {
    let mut wsize = _mi_wsize_from_size(size);
    let bin = if wsize <= 1 {
        1
    } else if wsize <= 8 {
        ((wsize + 1) & (!1)) as u8
    } else if wsize > MI_LARGE_WSIZE_MAX as usize {
        MI_BIN_HUGE as u8
    } else {
        wsize -= 1;
        let b = mi_bsr32(wsize as u32) as u8;
        let w = wsize >> (b - 2) as u8 & 0x03;
        (b << 2) + w as u8 - 3
    };
    debug_assert!(bin > 0 && bin <= MI_BIN_HUGE as u8);
    bin
}

pub unsafe fn mi_bsr32(mut x: u32) -> u8 {
    let debruijn: [u8; 32] = [
        31, 0, 22, 1, 28, 23, 18, 2, 29, 26, 24, 10, 19, 7, 3, 12, 30, 21, 27, 17, 25, 9, 6, 11,
        20, 16, 8, 5, 15, 4, 14, 13,
    ];

    x |= x >> 1;
    x |= x >> 2;
    x |= x >> 4;
    x |= x >> 8;
    x |= x >> 16;
    x += 1;
    debruijn[(x.wrapping_mul(0x076be629) >> 27) as usize]
}

pub unsafe fn mi_page_queue_is_huge(pq: *const mi_page_queue_t) -> bool {
    (&*pq).block_size == (MI_LARGE_SIZE_MAX as usize + size_of::<*const ()>())
}

pub unsafe fn mi_page_queue_is_full(pq: *const mi_page_queue_t) -> bool {
    (&*pq).block_size == (MI_LARGE_SIZE_MAX as usize + 2 * size_of::<*const ()>())
}

#[allow(dead_code)]
pub unsafe fn mi_page_queue_is_special(pq: *const mi_page_queue_t) -> bool {
    (&*pq).block_size > MI_LARGE_SIZE_MAX as usize
}

pub unsafe fn mi_page_queue_push(
    heap: *mut mi_heap_t,
    queue: *mut mi_page_queue_t,
    page: *mut mi_page_t,
) {
    // TODO! debug_assert
    (&mut *page).flags.__bindgen_anon_1.in_full = mi_page_queue_is_full(queue);
    (&mut *page).heap = heap;
    (&mut *page).next = (&mut *queue).first;
    (&mut *page).prev = null_mut();

    if !(&mut *queue).first.is_null() {
        (&mut *(&mut *queue).first).prev = page;
        (&mut *queue).first = page;
    } else {
        (&mut *queue).last = page;
        (&mut *queue).first = page;
    }

    mi_heap_queue_first_update(heap, queue);
    (&mut *heap).page_count += 1;
}

pub unsafe fn mi_heap_queue_first_update(heap: *mut mi_heap_t, pq: *mut mi_page_queue_t) {
    let size = (&mut *pq).block_size;
    if size > MI_SMALL_SIZE_MAX {
        return;
    }
    let mut page = (&mut *pq).first;
    if page.is_null() {
        page = &mut _mi_page_empty as _;
    }

    let mut start;
    let idx = _mi_wsize_from_size(size);
    let pages_free = &mut (&mut *heap).pages_free_direct;

    if pages_free[idx] == page {
        return;
    }

    if idx <= 1 {
        start = 0;
    } else {
        let bin = _mi_bin(size);
        let mut prev = pq.sub(1);
        while bin == _mi_bin((&mut *prev).block_size) && prev > &mut (&mut *heap).pages[0] {
            prev = prev.sub(1)
        }
        start = 1 + _mi_wsize_from_size((&mut *prev).block_size);
        if start > idx {
            start = idx
        }
    }

    debug_assert!(start <= idx);
    for sz in start..=idx {
        pages_free[sz] = page
    }
}

pub unsafe fn mi_page_queue_remove(queue: *mut mi_page_queue_t, page: *mut mi_page_t) {
    let page_mut = &mut *page;
    if !page_mut.prev.is_null() {
        (&mut *page_mut.prev).next = page_mut.next;
    }

    if !page_mut.next.is_null() {
        (&mut *page_mut.next).prev = page_mut.prev;
    }

    let queue_mut = &mut *queue;
    if page == queue_mut.last {
        queue_mut.last = page_mut.prev;
    }

    if page == queue_mut.first {
        queue_mut.first = page_mut.next;
        let heap = &mut *page_mut.heap;
        mi_heap_queue_first_update(heap, queue);
    }

    (&mut *page_mut.heap).page_count -= 1;
    page_mut.next = null_mut();
    page_mut.prev = null_mut();
    page_mut.heap = null_mut();
    page_mut.flags.__bindgen_anon_1.in_full = false;
}

pub unsafe fn mi_heap_page_queue_of(
    heap: *mut mi_heap_t,
    page: *mut mi_page_t,
) -> *mut mi_page_queue_t {
    let bin = if (&*page).flags.__bindgen_anon_1.in_full {
        MI_BIN_FULL as usize
    } else {
        _mi_bin((&*page).block_size) as usize
    };

    &mut (&mut *heap).pages[bin]
}

pub unsafe fn mi_page_queue_enqueue_from(
    to: *mut mi_page_queue_t,
    from: *mut mi_page_queue_t,
    page: *mut mi_page_t,
) {
    let page_mut = &mut *page;
    if !page_mut.prev.is_null() {
        (&mut *page_mut.prev).next = page_mut.next;
    }

    if !page_mut.next.is_null() {
        (&mut *page_mut.next).prev = page_mut.prev;
    }

    let from_mut = &mut *from;
    if page == from_mut.last {
        from_mut.last = page_mut.prev
    }

    if page == from_mut.first {
        from_mut.first = page_mut.next;
        let heap = page_mut.heap;
        mi_heap_queue_first_update(heap, from);
    }

    let to_mut = &mut *to;
    page_mut.prev = to_mut.last;
    page_mut.next = null_mut();

    if !to_mut.last.is_null() {
        (&mut *to_mut.last).next = page;
        to_mut.last = page;
    } else {
        to_mut.first = page;
        to_mut.last = page;
        let heap = page_mut.heap;
        mi_heap_queue_first_update(heap, to);
    }
    page_mut.flags.__bindgen_anon_1.in_full = mi_page_queue_is_full(to)
}

pub unsafe fn mi_page_queue_of(page: *mut mi_page_t) -> *mut mi_page_queue_t {
    let bin = if (&*page).flags.__bindgen_anon_1.in_full {
        MI_BIN_FULL as usize
    } else {
        _mi_bin((&*page).block_size) as usize
    };
    let heap = (&mut *page).heap;
    &mut (&mut *heap).pages[bin]
}
