use crate::{
    alloc_::mi_page_all_free,
    atomic::{mi_atomic_compare_exchange_ptr, mi_atomic_decrement, mi_atomic_increment},
    init::_mi_ptr_cookie,
    internal::{_mi_page_segment, _mi_thread_id},
    os::{_mi_align_up, _mi_os_alloc_aligned, _mi_os_free, _mi_os_page_size, _mi_os_shrink},
    page::_mi_page_reclaim,
    stats::{mi_stat_decrease, mi_stat_increase},
    types::*,
    types_extra::static_mi_segment_t_ptr,
};
use core::ptr::null_mut;

const MI_PAGE_HUGE_ALIGN: usize = 256 * 1024;

pub unsafe fn _mi_segment_page_alloc(
    block_size: usize,
    tld: *mut mi_segments_tld_t,
    os_tld: *mut mi_os_tld_t,
) -> *mut mi_page_t {
    if block_size < MI_SMALL_PAGE_SIZE as usize / 8 {
        mi_segment_small_page_alloc(tld, os_tld)
    } else if block_size < (MI_LARGE_PAGE_SIZE as usize - size_of::<mi_segment_t>()) {
        mi_segment_large_page_alloc(tld, os_tld)
    } else {
        mi_segment_huge_page_alloc(block_size, tld, os_tld)
    }
}

pub unsafe fn mi_segment_small_page_alloc(
    tld: *mut mi_segments_tld_t,
    os_tld: *mut mi_os_tld_t,
) -> *mut mi_page_t {
    if mi_segment_queue_is_empty(&(&*tld).small_free) {
        let segment = mi_segment_alloc(
            0,
            mi_page_kind_e_MI_PAGE_SMALL,
            MI_SMALL_PAGE_SHIFT as usize,
            tld,
            os_tld,
        );
        if segment.is_null() {
            return null_mut();
        }

        mi_segment_enqueue(&mut (&mut *tld).small_free, segment);
    }

    debug_assert!(!&(&*tld).small_free.first.is_null());
    mi_segment_small_page_alloc_in((&mut *tld).small_free.first, tld)
}

pub unsafe fn mi_segment_queue_is_empty(queue: *const mi_segment_queue_t) -> bool {
    (&*queue).first.is_null()
}

pub unsafe fn mi_segment_alloc(
    required: usize,
    page_kind: mi_page_kind_e,
    page_shift: usize,
    tld: *mut mi_segments_tld_t,
    os_tld: *mut mi_os_tld_t,
) -> *mut mi_segment_t {
    let capacity;
    if page_kind == mi_page_kind_e_MI_PAGE_HUGE {
        debug_assert!(page_shift == MI_SEGMENT_SHIFT as usize && required > 0);
        capacity = 1;
    } else {
        debug_assert_eq!(required, 0);
        let page_size = 1 << page_shift;
        capacity = (MI_SEGMENT_SIZE / page_size) as usize;
        debug_assert!(MI_SEGMENT_SIZE % page_size == 0);
        debug_assert!(capacity >= 1 && capacity <= MI_SMALL_PAGES_PER_SEGMENT as usize);
    };

    let mut info_size = 0;
    let mut pre_size = 0;
    let segment_size = mi_segment_size(
        capacity,
        required,
        Some(&mut pre_size),
        Some(&mut info_size),
    );
    debug_assert!(segment_size >= required);

    let _page_size = if page_kind == mi_page_kind_e_MI_PAGE_HUGE {
        segment_size
    } else {
        1 << page_shift
    };

    let mut segment = mi_segment_cache_find(tld, segment_size);

    if segment.is_null() {
        segment = _mi_os_alloc_aligned(segment_size, MI_SEGMENT_SIZE as usize, os_tld)
            as *mut mi_segment_t;
        if segment.is_null() {
            return null_mut();
        }
        mi_segments_track_size(segment_size as i64, tld);
    }
    libc::memset(segment as _, 0, info_size);
    (&mut *segment).page_kind = page_kind;
    (&mut *segment).capacity = capacity;
    (&mut *segment).page_shift = page_shift;
    (&mut *segment).segment_size = segment_size;
    (&mut *segment).segment_info_size = pre_size;
    (&mut *segment).thread_id = _mi_thread_id();
    (&mut *segment).cookie = _mi_ptr_cookie(segment as _);

    for i in 0..(&*segment).capacity {
        let page = &mut (&mut *segment).pages as *mut mi_page_s;
        let page = page.add(i);
        (&mut *page).segment_idx = i as u8;
    }
    mi_stat_increase(
        &mut (&mut *(&mut *tld).stats).committed,
        (&mut *segment).segment_info_size,
    );
    segment
    //segment
}

pub unsafe fn mi_segment_size(
    capacity: usize,
    required: usize,
    pre_size: Option<&mut usize>,
    info_size: Option<&mut usize>,
) -> usize {
    let minsize = size_of::<mi_segment_t>() + ((capacity - 1) * size_of::<mi_page_t>()) + 16;
    let guardsize = 0;

    // TODO! secure option
    let i_size = _mi_align_up(minsize, MI_MAX_ALIGN_SIZE.min(16) as usize);
    info_size.map(|x| *x = i_size);
    pre_size.map(|x| *x = i_size + guardsize);
    if required == 0 {
        MI_SEGMENT_SIZE as usize
    } else {
        _mi_align_up(required + i_size + 2 * guardsize, MI_PAGE_HUGE_ALIGN)
    }
}

pub unsafe fn mi_segments_track_size(segment_size: i64, tld: *mut mi_segments_tld_t) {
    let tld = &mut *tld;
    if segment_size >= 0 {
        mi_stat_increase(&mut (&mut *(tld.stats)).segments, 1);
    } else {
        mi_stat_decrease(&mut (&mut *(tld.stats)).segments, 1);
    }

    tld.current_size = (tld.current_size as i64 + segment_size) as usize;
    if tld.current_size > tld.peak_size {
        tld.peak_size = tld.current_size
    }
}

pub unsafe fn mi_segment_os_free(
    segment: *mut mi_segment_t,
    segment_size: usize,
    tld: *mut mi_segments_tld_t,
) {
    mi_segments_track_size(-(segment_size as i64), tld);
    _mi_os_free(segment as _, segment_size, (&mut *tld).stats)
}

pub unsafe fn mi_segment_cache_find(
    tld: *mut mi_segments_tld_t,
    required: usize,
) -> *mut mi_segment_t {
    _mi_segment_cache_findx(tld, required, false)
}

pub unsafe fn _mi_segment_cache_findx(
    tld: *mut mi_segments_tld_t,
    required: usize,
    reverse: bool,
) -> *mut mi_segment_t {
    debug_assert!(required % _mi_os_page_size() == 0);
    let mut segment = if reverse {
        (&*tld).cache.last
    } else {
        (&*tld).cache.first
    };

    while !segment.is_null() {
        let segment_mut = &mut *segment;
        if segment_mut.segment_size >= required {
            (&mut *tld).cache_count -= 1;
            (&mut *tld).cache_size -= segment_mut.segment_size;

            mi_segment_queue_remove(&mut (&mut *tld).cache, segment_mut);

            if required == 0 || segment_mut.segment_size == required {
                return segment_mut;
            } else if required != MI_SEGMENT_SIZE as usize
                && segment_mut.segment_size - (segment_mut.segment_size / 4) <= required
            {
                return segment_mut;
            } else {
                // TODO! option_secure
                if _mi_os_shrink(segment as _, segment_mut.segment_size, required) {
                    (&mut *tld).current_size -= segment_mut.segment_size;
                    (&mut *tld).current_size += required;
                    segment_mut.segment_size = required;
                    return segment_mut;
                } else {
                    mi_segment_os_free(segment_mut, segment_mut.segment_size, tld);
                    return null_mut();
                }
            }
        }

        segment = if reverse {
            segment_mut.prev
        } else {
            segment_mut.next
        }
    }

    null_mut()
}

pub unsafe fn mi_segment_queue_remove(queue: *mut mi_segment_queue_t, segment: *mut mi_segment_t) {
    let segment = &mut *segment;
    let queue = &mut *queue;
    if !segment.prev.is_null() {
        (&mut *segment.prev).next = segment.next;
    }

    if !segment.next.is_null() {
        (&mut *segment.next).prev = segment.prev;
    }

    if segment as *mut _ == queue.first {
        queue.first = segment.next
    }

    if segment as *mut _ == queue.last {
        queue.last = segment.prev
    }

    segment.next = null_mut();
    segment.prev = null_mut();
}

pub unsafe fn _mi_segment_page_start(
    segment: *mut mi_segment_t,
    page: *mut mi_page_t,
    page_size: Option<&mut usize>,
) -> *mut u8 {
    let segment_mut = &mut *segment;
    let mut psize = if segment_mut.page_kind == mi_page_kind_e_MI_PAGE_HUGE {
        segment_mut.segment_size
    } else {
        1 << segment_mut.page_shift
    };

    let page = &mut *page;
    let mut p = (segment as *mut u8).add(page.segment_idx as usize * psize);

    if page.segment_idx == 0 {
        p = p.add(segment_mut.segment_info_size);
        psize -= segment_mut.segment_info_size;
    }

    if let Some(page_size) = page_size {
        *page_size = psize;
    }

    p
}

pub unsafe fn _mi_segment_page_free(
    page: *mut mi_page_t,
    force: bool,
    tld: *mut mi_segments_tld_t,
) {
    let segment = _mi_page_segment(page);
    mi_segment_page_clear(segment, page, (&mut *tld).stats);
    if (&mut *segment).used == 0 {
        mi_segment_free(segment, force, tld);
    } else {
        if (&mut *segment).used == (&mut *segment).abandoned {
            mi_segment_abandon(segment, tld);
        } else if (&mut *segment).used + 1 == (&mut *segment).capacity {
            mi_segment_enqueue(&mut (&mut *tld).small_free, segment);
        }
    }
}

pub unsafe fn mi_segment_page_clear(
    segment: *mut mi_segment_t,
    page: *mut mi_page_t,
    stats: *mut mi_stats_t,
) {
    let inuse = (&mut *page).capacity as usize * (&mut *page).block_size;
    mi_stat_decrease(&mut (&mut *stats).committed, inuse);
    mi_stat_decrease(&mut (&mut *stats).pages, 1);

    // TODO! option_page_reset
    let idx = (&mut *page).segment_idx;
    let is_reset = (&mut *page).is_reset();
    *page = core::mem::zeroed();
    (&mut *page).segment_idx = idx;
    (&mut *page).set_segment_in_use(false);
    (&mut *page).set_is_reset(is_reset);
    (&mut *segment).used -= 1;
}

pub unsafe fn mi_segment_free(
    segment: *mut mi_segment_t,
    force: bool,
    tld: *mut mi_segments_tld_t,
) {
    if mi_segment_is_in_free_queue(segment, tld) {
        if (&mut *segment).page_kind != mi_page_kind_e_MI_PAGE_SMALL {
        } else {
            mi_segment_queue_remove(&mut (&mut *tld).small_free, segment);
        }
    }

    mi_stat_decrease(
        &mut (&mut *(&mut *tld).stats).committed,
        (&mut *segment).segment_info_size,
    );
    (&mut *segment).thread_id = 0;

    for i in 0..(&mut *segment).capacity {
        let page = &mut (&mut *segment).pages as *mut mi_page_s;
        let page = &mut *page.add(i);
        if page.is_reset() {
            page.set_is_reset(false);
            mi_stat_decrease(&mut (&mut *(&mut *tld).stats).reset, mi_page_size(page));
        }
    }

    if !force && mi_segment_cache_insert(segment, tld) {
    } else {
        mi_segment_os_free(segment, (&mut *segment).segment_size, tld);
    }
}

pub unsafe fn mi_page_size(page: *mut mi_page_t) -> usize {
    let mut psize = 0;
    _mi_segment_page_start(_mi_page_segment(page), page, Some(&mut psize));
    psize
}

pub unsafe fn mi_segment_is_in_free_queue(
    segment: *mut mi_segment_t,
    tld: *mut mi_segments_tld_t,
) -> bool {
    !(&mut *segment).next.is_null()
        || !(&mut *segment).prev.is_null()
        || (&mut *tld).small_free.first == segment
}

pub unsafe fn mi_segment_cache_insert(
    segment: *mut mi_segment_t,
    tld: *mut mi_segments_tld_t,
) -> bool {
    if mi_segment_cache_full(tld) {
        return false;
    }

    let mut seg = (&mut *tld).cache.first;
    if !seg.is_null() && (&mut *seg).segment_size < (&mut *segment).segment_size {
        seg = (&mut *seg).next;
    }

    mi_segment_queue_insert_before(&mut (&mut *tld).cache, seg, segment);
    (&mut *tld).cache_count += 1;
    (&mut *tld).cache_size += (&mut *segment).segment_size;
    true
}

const MI_SEGMENT_CACHE_MAX: usize = 32;
const MI_SEGMENT_CACHE_FRACTION: usize = 8;

pub unsafe fn mi_segment_cache_full(tld: *mut mi_segments_tld_t) -> bool {
    if (&mut *tld).cache_count < MI_SEGMENT_CACHE_MAX
        && (&mut *tld).cache_size * MI_SEGMENT_CACHE_FRACTION < (&mut *tld).peak_size
    {
        return false;
    }

    while (&mut *tld).cache_size * MI_SEGMENT_CACHE_FRACTION >= (&mut *tld).peak_size + 1 {
        let segment = mi_segment_cache_evict(tld);
        if !segment.is_null() {
            mi_segment_os_free(segment, (&mut *segment).segment_size, tld);
        }
    }
    true
}

pub unsafe fn mi_segment_cache_evict(tld: *mut mi_segments_tld_t) -> *mut mi_segment_t {
    _mi_segment_cache_findx(tld, 0, true)
}

pub unsafe fn mi_segment_queue_insert_before(
    queue: *mut mi_segment_queue_t,
    elem: *mut mi_segment_t,
    segment: *mut mi_segment_t,
) {
    (&mut *segment).prev = if elem.is_null() {
        (&mut *queue).last
    } else {
        (&mut *elem).prev
    };

    if !(&mut *segment).prev.is_null() {
        (&mut *(&mut *segment).prev).next = segment
    } else {
        (&mut *queue).first = segment
    }

    (&mut *segment).next = elem;
    if !(&mut *segment).next.is_null() {
        (&mut *(&mut *segment).next).prev = segment
    } else {
        (&mut *queue).last = segment
    }
}

static mut abandoned: static_mi_segment_t_ptr = static_mi_segment_t_ptr(null_mut());
static mut abandoned_count: usize = 0;

pub unsafe fn mi_segment_abandon(segment: *mut mi_segment_t, tld: *mut mi_segments_tld_t) {
    if mi_segment_is_in_free_queue(segment, tld) {
        mi_segment_queue_remove(&mut (&mut *tld).small_free, segment);
    }
    (&mut *segment).thread_id = 0;
    loop {
        (&mut *segment).abandoned_next = abandoned.0;
        if mi_atomic_compare_exchange_ptr(
            &mut abandoned.0 as *mut _ as *mut usize,
            segment as usize,
            (&mut *segment).abandoned_next as usize,
        ) {
            break;
        }
    }
    mi_atomic_increment(&mut abandoned_count);
    mi_stat_decrease(&mut (&mut *(&mut *(&mut *tld)).stats).segments_abandoned, 1);
}

pub unsafe fn mi_segment_enqueue(queue: *mut mi_segment_queue_t, segment: *mut mi_segment_t) {
    (&mut *segment).next = null_mut();
    (&mut *segment).prev = (&mut *queue).last;

    if !(&mut *queue).last.is_null() {
        (&mut *(&mut *queue).last).next = segment;
        (&mut *queue).last = segment
    } else {
        (&mut *queue).first = segment;
        (&mut *queue).last = segment;
    }
}

pub unsafe fn mi_segment_small_page_alloc_in(
    segment: *mut mi_segment_t,
    tld: *mut mi_segments_tld_t,
) -> *mut mi_page_t {
    let page = mi_segment_find_free(segment);
    (&mut *page).set_segment_in_use(true);
    (&mut *segment).used += 1;
    if (&*segment).used == (&*segment).capacity {
        mi_segment_queue_remove(&mut (&mut *tld).small_free, segment);
    }
    page
}

pub unsafe fn mi_segment_find_free(segment: *mut mi_segment_t) -> *mut mi_page_t {
    for i in 0..(&mut *segment).capacity {
        let page = &mut (&mut *segment).pages as *mut mi_page_s;
        let page = &mut *page.add(i);
        if !(&mut *page).segment_in_use() {
            return page;
        }
    }
    null_mut()
}

pub unsafe fn mi_segment_large_page_alloc(
    tld: *mut mi_segments_tld_t,
    os_tld: *mut mi_os_tld_t,
) -> *mut mi_page_t {
    let segment = mi_segment_alloc(
        0,
        mi_page_kind_e_MI_PAGE_LARGE,
        MI_LARGE_PAGE_SHIFT as usize,
        tld,
        os_tld,
    );
    if segment.is_null() {
        return null_mut();
    }

    (&mut *segment).used = 1;
    let page = &mut (&mut *segment).pages[0];
    (&mut *page).set_segment_in_use(true);
    page
}

pub unsafe fn mi_segment_huge_page_alloc(
    size: usize,
    tld: *mut mi_segments_tld_t,
    os_tld: *mut mi_os_tld_t,
) -> *mut mi_page_t {
    let segment = mi_segment_alloc(
        size,
        mi_page_kind_e_MI_PAGE_HUGE,
        MI_SEGMENT_SHIFT as usize,
        tld,
        os_tld,
    );
    if segment.is_null() {
        return null_mut();
    }

    (&mut *segment).used = 1;
    let page = &mut (&mut *segment).pages[0];
    (&mut *page).set_segment_in_use(true);
    page
}

pub unsafe fn _mi_segment_try_reclaim_abandoned(
    heap: *mut mi_heap_t,
    try_full: bool,
    tld: *mut mi_segments_tld_t,
) -> bool {
    let mut reclaimed = 0;
    let mut atmost;

    if try_full {
        atmost = abandoned_count + 16;
    } else {
        atmost = abandoned_count / 8;
        if atmost < 8 {
            atmost = 8;
        }
    }

    while atmost > reclaimed {
        let mut segment;
        loop {
            segment = abandoned.0;
            let ok = !segment.is_null()
                && !mi_atomic_compare_exchange_ptr(
                    &mut abandoned.0 as *mut _ as _,
                    (&mut *segment).abandoned_next as usize,
                    segment as usize,
                );
            if !ok {
                break;
            }
        }
        if segment.is_null() {
            break;
        }

        mi_atomic_decrement(&mut abandoned_count);
        (&mut *segment).thread_id = _mi_thread_id();
        (&mut *segment).abandoned_next = null_mut();
        mi_segments_track_size((&mut *segment).segment_size as i64, tld);
        mi_stat_decrease(&mut (&mut *(&mut *(&mut *tld)).stats).segments_abandoned, 1);

        if (&mut *segment).page_kind == mi_page_kind_e_MI_PAGE_SMALL && mi_segment_has_free(segment)
        {
            mi_segment_enqueue(&mut (&mut *tld).small_free, segment);
        }

        for i in 0..(&mut *segment).capacity {
            let page = &mut (&mut *segment).pages as *mut mi_page_s;
            let page = &mut *page.add(i);
            if (&*page).segment_in_use() {
                (&mut *segment).abandoned -= 1;
                mi_stat_decrease(&mut (&mut *(&mut *(&mut *tld)).stats).pages_abandoned, 1);
                if mi_page_all_free(page) {
                    mi_segment_page_clear(segment, page, (&mut *tld).stats);
                } else {
                    _mi_page_reclaim(heap, page);
                }
            }
        }
        if (&*segment).used == 0 {
            mi_segment_free(segment, false, tld);
        } else {
            reclaimed += 1;
        }
    }
    reclaimed > 0
}

pub unsafe fn mi_segment_has_free(segment: *mut mi_segment_t) -> bool {
    (&*segment).used < (&*segment).capacity
}
