use std::sync::atomic::{AtomicPtr, AtomicUsize, Ordering};

use libc_print::libc_println;

use super::*;

const MI_PAGE_HUGE_ALIGN: usize = 256 * 1024;


/* -----------------------------------------------------------
   Segment allocation
----------------------------------------------------------- */

/// Allocate a segment from the OS aligned to 'MI_SEGMENT_SIZE'
pub fn mi_segment_alloc(required: usize, kind: MiPageKind, page_shift: usize, tld: *mut MiSegmentsTld, os: *mut MiOsTld) -> *mut MiSegment {
    // calculate needed size first
    let capacity = if kind == MiPageKind::Huge {
        1
    } else {
        MI_SEGMENT_SIZE / (1 << page_shift)
    };
    let mut info_size = 0;
    let mut pre_size = 0;
    let segment_size = mi_segment_size(capacity, required, &mut pre_size, &mut info_size);

    // Allocate the segment
    let mut segment = mi_segment_cache_find(tld, segment_size);
    
    // and otherwise allocate it from the OS
    if segment.is_null() {
        segment = _mi_os_alloc_aligned(segment_size, MI_SEGMENT_SIZE, os) as *mut MiSegment;
        if segment.is_null() {return std::ptr::null_mut();}
        mi_segments_track_size(segment_size, tld, false);
    }

    unsafe {
        std::ptr::write_bytes(segment, 0, info_size);
        (*segment).page_kind = kind;
        (*segment).capacity = capacity;
        (*segment).page_shift = page_shift;
        (*segment).segment_size = segment_size;
        (*segment).segment_info_size = pre_size;
        (*segment).thread_id = mi_thread_id();
        (*segment).cookie = 0x12345678;
        let page = (*segment).pages.as_ptr() as *mut MiPage;
        for i in 0..capacity {
            // (*segment).pages[i].segment_idx = i as u8;
            let cur_page = page.offset(i as isize);
            (*cur_page).segment_idx = i as u8;
        }
        segment
    }
}


pub fn mi_segment_free(segment: *mut MiSegment, force: bool, tld: *mut MiSegmentsTld) {
    unsafe {
        if mi_segment_is_in_free_queue(segment, tld) {
            if (*segment).page_kind == MiPageKind::Small {
                mi_segment_queue_remove(&mut (*tld).small_free, segment);
            } else {
                libc_println!("mimalloc: expecting small segment");
            }
        }

        (*segment).thread_id = 0;
        for i in 0..(*segment).capacity {
            let page = _mi_segment_page_idx(segment, i);
            if (*page).owner_flags.contains(MiPageOwnerFlags::RESET) {
                (*page).owner_flags.remove(MiPageOwnerFlags::RESET);
            }
        }

        if !force && mi_segment_cache_insert(segment, tld) {
            // it is put in our cache
        } else {
            // otherwise return it to the OS
            mi_segment_os_free(segment, (*segment).segment_size, tld);
        }


    }
}



/* -----------------------------------------------------------
  Queue of segments containing free pages
----------------------------------------------------------- */

pub fn mi_segment_queue_is_empty(queue: *const MiSegmentQueue) -> bool {
    unsafe {
        (*queue).first.is_null()
    }
}

pub fn mi_segment_is_in_free_queue(segment: *mut MiSegment, tld: *const MiSegmentsTld) -> bool {
    unsafe {
        !(*segment).next.is_null() || !(*segment).prev.is_null() || (*tld).small_free.first == segment
    }
}

pub fn mi_segment_queue_remove(queue: *mut MiSegmentQueue, segment: *mut MiSegment) {
    unsafe {
        if !(*segment).prev.is_null() { (*(*segment).prev).next = (*segment).next; }
        if !(*segment).next.is_null() { (*(*segment).next).prev = (*segment).prev; }
        if (*queue).first == segment { (*queue).first = (*segment).next; }
        if (*queue).last == segment { (*queue).last = (*segment).prev; }
        (*segment).next = std::ptr::null_mut();
        (*segment).prev = std::ptr::null_mut();
    }
}

pub fn mi_segment_queue(queue: *mut MiSegmentQueue, segment: *mut MiSegment) {
    unsafe {
        (*segment).next = std::ptr::null_mut();
        (*segment).prev = (*queue).last;
        if !(*queue).last.is_null() {
            (*(*queue).last).next = segment;
            (*queue).last = segment;
        } else {
            (*queue).last = segment;
            (*queue).first = segment;
        }
    }
}

pub fn mi_segment_queue_insert_before(queue: *mut MiSegmentQueue, elem: *mut MiSegment, segment: *mut MiSegment) {
    unsafe {
        (*segment).prev = if elem.is_null() { (*queue).last } else { (*elem).prev };
        if !(*segment).prev.is_null() {
            (*(*segment).prev).next = segment;
        } else {
            (*queue).first = segment;
        }
        (*segment).next = elem;
        if !elem.is_null() {
            (*elem).prev = segment;
        } else {
            (*queue).last = segment;
        }
    }
}

pub fn _mi_segment_page_start(segment: *const MiSegment, page: *const MiPage, page_size: *mut usize) -> *mut u8 {
    unsafe {
        let mut psize = if (*segment).page_kind == MiPageKind::Huge {
            (*segment).segment_size
        } else {
            1 << (*segment).page_shift
        };
        let mut p = (segment as *mut u8).add((*page).segment_idx as usize * psize);

        if (*page).segment_idx == 0 {
            // the first page starts after the segment info (and possible guard page)
            p = p.add((*segment).segment_info_size);
            psize -= (*segment).segment_info_size;
        }

        if !page_size.is_null() {
            *page_size = psize;
        }
        p
    }
}

fn mi_segment_size(capacity: usize, required: usize, pre_size: *mut usize, info_size: *mut usize) -> usize {
    let minsize = std::mem::size_of::<MiSegment>() + (capacity - 1) * std::mem::size_of::<MiPage>() + 16;
    let guardsize = 0;
    let isize = _mi_align_up(minsize, MI_MAX_ALIGN_SIZE);

    unsafe {
        (*info_size) = isize;
        (*pre_size) = isize + guardsize;   
    }
    if required == 0 {
        return MI_SEGMENT_SIZE;
    }
    _mi_align_up(required + isize + guardsize * 2, MI_PAGE_HUGE_ALIGN)
}



/* -----------------------------------------------------------
Segment caches
We keep a small segment cache per thread to avoid repeated allocation
and free in the OS if a program allocates memory and then frees
all again repeatedly. (We tried a one-element cache but that
proves to be too small for certain workloads).
----------------------------------------------------------- */

pub fn mi_segments_track_size(segment_size: usize, tld: *mut MiSegmentsTld, is_sub: bool) {
    unsafe {
        if is_sub {
            (*tld).current_size -= segment_size;
        } else {
            (*tld).current_size += segment_size;
        }
        if (*tld).current_size > (*tld).peak_size {
            (*tld).peak_size = (*tld).current_size;
        }
    }
}

pub fn mi_segment_os_free(segment: *mut MiSegment, size: usize, tld: *mut MiSegmentsTld) {
    mi_segments_track_size(size, tld, true);
    _mi_os_free(segment as *mut u8, size);
}

/// The segment cache is limited to be at most 1/8 of the peak size
/// in use (and no more than 32)
const MI_SEGMENT_CACHE_MAX: usize = 32;
const MI_SEGMENT_CACHE_FRACTION: usize = 8;


pub unsafe fn _mi_segment_cache_findx(tld: *mut MiSegmentsTld, required: usize, reverse: bool) -> *mut MiSegment {
    let mut segment = if reverse {
        (*tld).cache.last
    } else {
        (*tld).cache.first
    };
    while !segment.is_null() {
        if (*segment).segment_size >= required {
            (*tld).cache_count -= 1;
            (*tld).cache_size -= (*segment).segment_size;
            mi_segment_queue_remove(&mut (*tld).cache, segment);
            // exact size match
            if required == 0 || (*segment).segment_size == required {
                return segment;
            }
            // not more than 25% wasted and on a huge segment? (in that case the segment size does not need to match required)
            else if required != MI_SEGMENT_SIZE && (*segment).segment_size - ((*segment).segment_size / 4) <= required {
                return segment;
            }
            // try to shrink the memory to match exactly
            else {
                if _mi_os_shrink(segment as *mut u8, (*segment).segment_size, required) {
                    (*tld).current_size -= (*segment).segment_size;
                    (*tld).current_size += required;
                    (*segment).segment_size = required;
                    return segment;
                } else {
                    // if that all fails, we give up
                    mi_segment_os_free(segment, (*segment).segment_size, tld);
                    return std::ptr::null_mut();
                }
            }
        }
        segment = if reverse {
            (*segment).prev
        } else {
            (*segment).next
        };
    }
    std::ptr::null_mut()
}



pub fn mi_segment_cache_find(tld: *mut MiSegmentsTld, required: usize) -> *mut MiSegment {
    unsafe { _mi_segment_cache_findx(tld, required, false) }
}

pub fn mi_segment_cache_evict(tld: *mut MiSegmentsTld) -> *mut MiSegment {
    // TODO: random eviction instead?
    // Now we evict the last one == LRU
    unsafe { _mi_segment_cache_findx(tld, 0, true) }
}


pub fn mi_segment_cache_full(tld: *mut MiSegmentsTld) -> bool {
    unsafe {
        if (*tld).cache_count < MI_SEGMENT_CACHE_MAX && (*tld).cache_size * MI_SEGMENT_CACHE_FRACTION < (*tld).peak_size {
            return false;
        }
        // take the opportunity to reduce the segment cache if it is too large (now)
        while (*tld).cache_size * MI_SEGMENT_CACHE_FRACTION >= (*tld).peak_size + 1 {
            let segment = mi_segment_cache_evict(tld);
            if ! segment.is_null() {
                mi_segment_os_free(segment, (*segment).segment_size, tld);
            }
        }
        true
    }
}


pub fn mi_segment_cache_insert(segment: *mut MiSegment, tld: *mut MiSegmentsTld) -> bool {
    unsafe {
        if mi_segment_cache_full(tld) { return false; }
        
        // insert ordered
        let mut seg = (*tld).cache.first;
        while !seg.is_null() && (*seg).segment_size < (*segment).segment_size {
            seg = (*seg).next;
        }
        mi_segment_queue_insert_before(&mut (*tld).cache, seg, segment);
        (*tld).cache_count += 1;
        (*tld).cache_size += (*segment).segment_size;
        true
    }
}

/// called by ending threads to free cached segments
pub fn _mi_segment_thread_collect(tld: *mut MiSegmentsTld) {
    loop {
        let segment = mi_segment_cache_find(tld, 0);
        if segment.is_null() {break;}
        unsafe {mi_segment_os_free(segment, (*segment).segment_size, tld);}
    }
}


/* -----------------------------------------------------------
  Free page management inside a segment
----------------------------------------------------------- */

pub fn mi_segment_has_free(segment: *const MiSegment) -> bool {
    unsafe {
        !(*segment).used < (*segment).capacity
    }
}

pub fn mi_segment_find_free(segment: *mut MiSegment) -> *mut MiPage {
    unsafe {
        let page = (*segment).pages.as_ptr() as *mut MiPage;
        for i in 0..(*segment).capacity {
            let cur_page = page.offset(i as isize);
            if !(*cur_page).owner_flags.contains(MiPageOwnerFlags::SEGMENT_IN_USE) {
                return cur_page;
            }
        }
        std::ptr::null_mut()
    }
}

/* -----------------------------------------------------------
   Free
----------------------------------------------------------- */

pub fn mi_segment_page_clear(segment: *mut MiSegment, page: *mut MiPage) {
    unsafe {
        let inuse = (*page).capacity as usize * (*page).block_size;
        // reset the page memory to reduce memory pressure
        if !(*page).owner_flags.contains(MiPageOwnerFlags::RESET) {
            let mut psize= 0;
            let start = _mi_segment_page_start(segment, page, &mut psize);
            (*page).owner_flags.insert(MiPageOwnerFlags::RESET);
            if inuse > 0 {
                _mi_os_reset(start, inuse);
            }
        }

        // zero the page data
        // don't clear the index
        let idx = (*page).segment_idx;
        // don't clear the reset flag
        let is_reset = (*page).owner_flags.contains(MiPageOwnerFlags::RESET);
        std::ptr::write_bytes(page, 0, 1);
        (*page).segment_idx = idx;
        if is_reset {
            (*page).owner_flags.insert(MiPageOwnerFlags::RESET);
        }
        (*segment).used -= 1;
    }
}

/* -----------------------------------------------------------
   Free
----------------------------------------------------------- */

pub fn _mi_segment_page_free(page: *mut MiPage, force: bool, tld: *mut MiSegmentsTld) {
    unsafe {
        let segment = _mi_page_segment(page);
        
        // mark it as free now
        mi_segment_page_clear(segment, page);

        if (*segment).used == 0 {
            // no more used pages; remove from the free list and free the segment
            mi_segment_free(segment, force, tld);
        } else {
            if (*segment).used == (*segment).abandoned {
                // only abandoned pages; remove from the free list and abandon
                mi_segment_abandon(segment, tld);
            } else if (*segment).used + 1 == (*segment).capacity {
                // for now we only support small pages
                mi_segment_queue(&mut (*tld).small_free, segment);
            }
        }

    }
}



/* -----------------------------------------------------------
   Abandonment
----------------------------------------------------------- */

/// When threads terminate, they can leave segments with
/// live blocks (reached through other threads). Such segments
/// are 'abandoned' and will be reclaimed by other threads to
/// reuse their pages and/or free them eventually.
static mut ABANDONED_COUNT: AtomicUsize = AtomicUsize::new(0);
static mut ABANDONED: AtomicPtr<MiSegment> = AtomicPtr::new(std::ptr::null_mut());


pub fn mi_segment_abandon(segment: *mut MiSegment, tld: *mut MiSegmentsTld) {
    unsafe {
        // remove the segment from the free page queue if needed
        if mi_segment_is_in_free_queue(segment, tld) {
            mi_segment_queue_remove(&mut (*tld).small_free, segment);
        }
        // all pages in the segment are abandoned; add it to the abandoned list
        (*segment).thread_id = 0;
        loop {
            (*segment).abandoned_next = ABANDONED.load(Ordering::SeqCst);
            if ABANDONED.compare_exchange((*segment).abandoned_next, segment, Ordering::SeqCst, Ordering::SeqCst).is_ok() {
                break;
            }
        }
        ABANDONED_COUNT.fetch_add(1, Ordering::SeqCst);
    }
}

pub fn _mi_segment_page_abandon(page: *mut MiPage, tld: *mut MiSegmentsTld) {
    unsafe {
        let segment = _mi_page_segment(page);
        (*segment).abandoned += 1;
        if (*segment).used == (*segment).abandoned {
            // all pages are abandoned, abandon the entire segment
            mi_segment_abandon(segment, tld);
        }
    }
}


pub unsafe fn _mi_segment_try_reclaim_abandoned(heap: *const MiHeap, try_all: bool, tld: *mut MiSegmentsTld) -> bool {
    let mut atmost = 0;
    if try_all {
        atmost = ABANDONED_COUNT.load(Ordering::SeqCst) + 16;
    } else {
        atmost = ABANDONED_COUNT.load(Ordering::SeqCst) / 8;
        if atmost < 8 {atmost = 8;}
    }

    let mut reclaimed: usize = 0;
    // for 'atmost' 'reclaimed' abandoned segments
    while atmost > reclaimed {
        // try to claim the head of the abandoned segments
        let mut segment: *mut MiSegment = std::ptr::null_mut();
        loop {
            segment = ABANDONED.load(Ordering::SeqCst);
            // stop early if no more segments available
            if segment.is_null() {return reclaimed > 0;}
            let next = (*segment).next;
            if ABANDONED.compare_exchange(segment, next, Ordering::SeqCst, Ordering::SeqCst).is_ok() {
                break;
            }
        }
        // got one, try to reclaim it
        ABANDONED_COUNT.fetch_sub(1, Ordering::SeqCst);
        (*segment).thread_id = mi_thread_id();
        (*segment).abandoned_next = std::ptr::null_mut();
        mi_segments_track_size((*segment).segment_size, tld, false);
        // add its free pages to the current thread
        if (*segment).page_kind == MiPageKind::Small && mi_segment_has_free(segment) {
            mi_segment_queue(&mut (*tld).small_free, segment);
        }
        // add its abandoned pages to the current thread
        let page = (*segment).pages.as_ptr() as *mut MiPage;
        for i in 0..(*segment).capacity {
            let cur_page = page.offset(i as isize);
            if (*cur_page).owner_flags.contains(MiPageOwnerFlags::SEGMENT_IN_USE) {
                (*segment).abandoned -= 1;
                if mi_page_all_free(cur_page) {
                    // if everything free by now, free the page
                    mi_segment_page_clear(segment, cur_page);
                } else {
                    // otherwise reclaim it
                    _mi_page_reclaim(heap, page);
                }
            }
        }
        if (*segment).used == 0 {
            // if everything free by now, free the segment
            mi_segment_free(segment, false, tld);
        } else {
            reclaimed += 1;
        }
    }
    reclaimed > 0
}


/* -----------------------------------------------------------
   Small page allocation
----------------------------------------------------------- */

/// Allocate a small page inside a segment.
/// Requires that the segment has free pages.
pub unsafe fn mi_segment_small_page_alloc_in(segment: *mut MiSegment, tld: *mut MiSegmentsTld) -> *mut MiPage {
    let page = mi_segment_find_free(segment);
    (*page).owner_flags.insert(MiPageOwnerFlags::SEGMENT_IN_USE);
    (*segment).used += 1;
    if (*segment).used == (*segment).capacity {
        // if no more free pages, remove from the queue
        mi_segment_queue_remove(&mut (*tld).small_free, segment);
    }
    page
}


pub fn mi_segment_small_page_alloc(tld: *mut MiSegmentsTld, os: *mut MiOsTld) -> *mut MiPage {
    unsafe {
        if mi_segment_queue_is_empty(&(*tld).small_free) {
            let segment = mi_segment_alloc(0, MiPageKind::Small, MI_SMALL_PAGE_SHIFT, tld, os);
            if segment.is_null() { return std::ptr::null_mut(); }
            mi_segment_queue(&mut (*tld).small_free, segment);
        }
        mi_segment_small_page_alloc_in((*tld).small_free.first, tld)
    }
}

/* -----------------------------------------------------------
   large page allocation
----------------------------------------------------------- */

pub fn mi_segment_large_page_alloc(tld: *mut MiSegmentsTld, os: *mut MiOsTld) -> *mut MiPage {
    let segment = mi_segment_alloc(0, MiPageKind::Large, MI_LARGE_PAGE_SHIFT, tld, os);
    if segment.is_null() {return std::ptr::null_mut();}
    unsafe {
        (*segment).used = 1;
        let page = &mut (*segment).pages[0];
        (*page).owner_flags.insert(MiPageOwnerFlags::SEGMENT_IN_USE);
        page
    }
}

pub fn mi_segment_huge_page_alloc(size: usize, tld: *mut MiSegmentsTld, os: *mut MiOsTld) -> *mut MiPage {
    let segment = mi_segment_alloc(size, MiPageKind::Huge, MI_SEGMENT_SHIFT, tld, os);
    if segment.is_null() {return std::ptr::null_mut();}
    unsafe {
        (*segment).used = 1;
        let page = &mut (*segment).pages[0];
        (*page).owner_flags.insert(MiPageOwnerFlags::SEGMENT_IN_USE);
        page
    }
}

/* -----------------------------------------------------------
   Page allocation and free
----------------------------------------------------------- */

pub fn _mi_segment_page_alloc(block_size: usize, tld: *mut MiSegmentsTld, os: *mut MiOsTld) -> *mut MiPage {
    if block_size < MI_SMALL_PAGE_SIZE / 8 {
        mi_segment_small_page_alloc(tld, os)
    } else if block_size < (MI_LARGE_SIZE_MAX - std::mem::size_of::<MiSegment>()) {
        mi_segment_large_page_alloc(tld, os)
    } else {
        mi_segment_huge_page_alloc(block_size, tld, os)
    }
}