use std::ptr::null_mut;
use libc::{__errno_location, MADV_FREE};
use libc_print::libc_println;

use super::{mi_align_down_ptr, mi_align_up_ptr, MiOsTld, _mi_align_down, _mi_align_up, MI_SEGMENT_SIZE};



/// Constants for platform-specific features.
// Default page size
const PAGE_SIZE: usize = 4096;
// Replace as needed for specific platform
const MAP_NORESERVE: i32 = 0;
// Replace with correct value per platform
const MADV_DONTNEED: i32 = 4;

unsafe fn mi_os_page_align_region(addr: *mut u8, size: usize, newsize: *mut usize) -> *mut u8 {
    if ! newsize.is_null() {
        *newsize = 0;
    }
    if size == 0 || addr.is_null() {return null_mut();}

    // page align conservatively within the range
    let start = mi_align_up_ptr(addr, PAGE_SIZE);
    let end = mi_align_down_ptr(addr.add(size), PAGE_SIZE);
    let diff = end.offset_from(start) as usize;
    if diff <= 0 {return null_mut();}
    if !newsize.is_null() {
        *newsize = diff;
    }
    start
}


pub fn _mi_os_shrink(p: *mut u8, oldsize: usize, newsize:usize) -> bool {
    if oldsize < newsize || p.is_null() {return false;}
    if oldsize == newsize {return true;}

    // oldsize and newsize should be page aligned or we cannot shrink precisely
    unsafe {
        let addr = p.add(newsize);
        let mut size = 0;
        let start = mi_os_page_align_region(addr, oldsize - newsize, &mut size);
        if size == 0 || start != addr {return false;}

        mi_munmap(start, size)
    }
}




/* -----------------------------------------------------------
  OS allocation using mmap/munmap
----------------------------------------------------------- */


pub fn _mi_os_alloc(size: usize) -> *mut u8{
    if size == 0 { return null_mut(); }
    mi_mmap(null_mut(), size, MAP_NORESERVE)
}

pub fn _mi_os_free(p: *mut u8, size: usize) {
    mi_munmap(p, size);
}

fn mi_munmap(addr: *mut u8, size: usize) -> bool {
    if addr == null_mut() || size == 0 {return true;}
    let err: bool = unsafe { libc::munmap(addr as *mut _, size) } == -1;
    if err {
        false
    } else {
        true
    }
}

fn mi_mmap(addr: *mut u8, size: usize, extra_flags: i32) -> *mut u8 {
    if size == 0 {return null_mut();}
    let p = unsafe {
        let flag = libc::MAP_PRIVATE | libc::MAP_ANONYMOUS | extra_flags;
        libc::mmap(addr as *mut _, size, libc::PROT_READ | libc::PROT_WRITE, flag, -1, 0)
    };
    if p == libc::MAP_FAILED {
        null_mut()
    } else {
        p as *mut u8
    }
}



// Signal to the OS that the address range is no longer in use
// but may be used later again. This will release physical memory
// pages and reduce swapping while keeping the memory committed.
// We page align to a conservative area inside the range to reset.
pub fn _mi_os_reset(addr: *mut u8, size: usize) -> bool {
    // page align conservatively within the range
    let mut csize = 0;
    let start = unsafe { mi_os_page_align_region(addr, size, &mut csize) };
    if csize == 0 {return true;}

    static mut ADVICE: i32 = MADV_FREE;
    let mut err = unsafe { libc::madvise(start as *mut _, csize, ADVICE)};
    if err != 0 && unsafe { *__errno_location() } == libc::EINVAL && unsafe { ADVICE }==MADV_FREE {
        unsafe { ADVICE = MADV_DONTNEED };
        err = unsafe { libc::madvise(start as *mut _, csize, ADVICE)};
    }
    err == 0
}


/// Slow but guaranteed way to allocated aligned memory
/// by over-allocating and then reallocating at a fixed aligned
/// address that should be available then.
pub fn mi_os_alloc_aligned_ensured(size: usize, alignment: usize, trie: usize) -> *mut u8 {
    // stop recursion (only on Windows)
    if trie >= 3 {return null_mut();}
    let alloc_size = size + alignment;
    // overflow?
    assert!(alloc_size >= size);
    if alloc_size < size {return null_mut();}

    // allocate a chunk that includes the alignment
    let p = mi_mmap(null_mut(), alloc_size, 0);
    if p == null_mut() {return null_mut();}
    // create an aligned pointer in the allocated area
    let aligned_p = mi_align_up_ptr(p, alignment);
    // we selectively unmap parts around the over-allocated area
    let pre_size = unsafe { aligned_p.offset_from(p) } as usize;
    let mid_size = _mi_align_up(size, PAGE_SIZE);
    let post_size = alloc_size - pre_size - mid_size;
    if pre_size > 0 {mi_munmap(p, pre_size);}
    if post_size > 0 {mi_munmap(unsafe { aligned_p.offset(mid_size as isize) }, post_size);}

    assert!(aligned_p as usize % alignment == 0);
    aligned_p
    
}







pub fn _mi_os_alloc_aligned(size: usize, alignment: usize, tld: *mut MiOsTld) -> *mut u8 {
    if size == 0 {return null_mut();}
    if alignment < 1024 {return _mi_os_alloc(size);}

    let mut p = os_pool_alloc(size, alignment, tld);
    if !p.is_null() {return p;}

    unsafe {
        if p.is_null() && (*tld).mmap_next_probable % alignment == 0 {
            p = mi_mmap(null_mut(), size, 0);
            if p.is_null() {return null_mut();}
        }

        if p.is_null() || p as usize % alignment != 0 {
            // if 'p' is not yet aligned after all, free the block and use a slower
            // but guaranteed way to allocate an aligned block
            if !p.is_null() {mi_munmap(p, size);}
            p = mi_os_alloc_aligned_ensured(size, alignment, 0);
        }

        if !p.is_null() {
            if (*tld).mmap_previous > p {
                (*tld).mmap_next_probable = _mi_align_down(p as usize - MI_SEGMENT_SIZE, PAGE_SIZE);
            } else {
                (*tld).mmap_next_probable = _mi_align_up(p as usize + MI_SEGMENT_SIZE, PAGE_SIZE);
            }
            (*tld).mmap_previous = p;
        }

        p
    }
}


const MI_POOL_ALIGNMENT: usize = MI_SEGMENT_SIZE;
const MI_POOL_SIZE: usize = MI_SEGMENT_SIZE * 10;

pub fn os_pool_alloc(mut size: usize, alignment: usize, tld: *mut MiOsTld) -> *mut u8 {
    if alignment != MI_POOL_ALIGNMENT {return null_mut();}
    
    size = _mi_align_up(size, MI_POOL_ALIGNMENT);
    if size > MI_POOL_SIZE {return null_mut();}

    unsafe {
        if (*tld).pool_available == 0 {
            (*tld).pool = mi_os_alloc_aligned_ensured(MI_POOL_SIZE, MI_POOL_ALIGNMENT, 0);
            if (*tld).pool.is_null() {return null_mut();}
            (*tld).pool_available += MI_POOL_SIZE;
        }

        if size > (*tld).pool_available {return null_mut();}
        let p = (*tld).pool;
        (*tld).pool = (*tld).pool.offset(size as isize);
        (*tld).pool_available -= size;
        p
    }
}