use super::{*,types::*,atomic::*,track::*};
use crate::segment::{mi_segment_page_start,};
use crate::mi_assert;
use crate::page_queue::_mi_bin;

#[macro_export]
macro_rules! MI_UNUSED {
    ($x:expr) => {
        let _ = $x;
    };
}

#[cfg(MI_DEBUG)]
#[macro_export]
macro_rules! MI_UNUSED_RELEASE {
    ($x:expr) => {};
}

#[cfg(not(MI_DEBUG))]
#[macro_export]
macro_rules! MI_UNUSED_RELEASE {
    ($x:expr) => {
        MI_UNUSED!($x);
    };
}

//arena.c
#[repr(C)]
pub struct MiArenaFieldCursor {
    pub start: MiArenaId, 
    pub count: i32,
    pub bitmap_idx: usize,
}

#[macro_export]
macro_rules! mi_init4 {
    ($x:ident) => {
        $x(), $x(), $x(), $x()
    };
}

#[macro_export]
macro_rules! mi_init8 {
    ($x:ident) => {
        mi_init4!($x), mi_init4!($x)
    };
}

#[macro_export]
macro_rules! mi_init16 {
    ($x:ident) => {
        mi_init8!($x), mi_init8!($x)
    };
}
#[macro_export]
macro_rules! mi_init32 {
    ($x:ident) => {
        mi_init16!($x), mi_init16!($x)
    };
}
#[macro_export]
macro_rules! mi_init64 {
    ($x:ident) => {
        mi_init32!($x), mi_init32!($x)
    };
}
#[macro_export]
macro_rules! mi_init128 {
    ($x:ident) => {
        mi_init64!($x), mi_init64!($x)
    };
}
#[macro_export]
macro_rules! mi_init256 {
    ($x:ident) => {
        mi_init128!($x), mi_init128!($x)
    };
}

use std::cell::RefCell;
use std::ptr::{self, null_mut};
use std::rc::Rc;
use std::sync::atomic::Ordering;
#[macro_export]
macro_rules! mi_memzero_var {
    ($x:expr) => {
        unsafe {
            ptr::write_bytes(&mut $x as *mut _ as *mut u8, 0, std::mem::size_of_val(&$x));
        }
    };
}

pub fn mi_is_power_of_two(x: usize) -> bool {
    (x & (x - 1)) == 0
}

pub fn mi_is_aligned(p: *const std::ffi::c_void, alignment: usize) -> bool {
   mi_assert!(alignment != 0);
    (p as usize) % alignment == 0
}

pub fn mi_align_up(sz: usize, alignment: usize) -> usize {
   mi_assert!(alignment != 0);
    let mask = alignment - 1;

    if (alignment & mask) == 0 { // power of two?
        (sz + mask) & !mask
    } else {
        ((sz + mask) / alignment) * alignment
    }
}

pub fn mi_align_down(sz: usize, alignment: usize) -> usize {
   mi_assert!(alignment != 0);
    let mask = alignment - 1;

    if (alignment & mask) == 0 { // power of two?
        sz & !mask
    } else {
        (sz / alignment) * alignment
    }
}

pub fn mi_align_up_ptr(p: *const std::ffi::c_void, alignment: usize) -> *mut std::ffi::c_void {
    mi_align_up(p as usize, alignment) as *mut std::ffi::c_void
}

pub fn mi_align_down_ptr(p: *const std::ffi::c_void, alignment: usize) -> *mut std::ffi::c_void {
    mi_align_down(p as usize, alignment) as *mut std::ffi::c_void
}

pub fn mi_divide_up(size: usize, divider: usize) -> usize {
   mi_assert!(divider != 0);
    if divider == 0 {
        size
    } else {
        (size + divider - 1) / divider
    }
}


pub fn mi_mem_is_zero(p: *const std::ffi::c_void, size: usize) -> bool {
    let slice = unsafe {
        std::slice::from_raw_parts(p as *const u8, size)
    };

    for &byte in slice {
        if byte != 0 {
            return false;
        }
    }
    true
}

pub fn mi_wsize_from_size(size: usize) -> usize {
   mi_assert!(size <= std::usize::MAX - std::mem::size_of::<usize>());
    (size + std::mem::size_of::<usize>() - 1) / std::mem::size_of::<usize>()
}

// internal 368 - 405

use std::sync::Once;

pub static mut _MI_HEAP_EMPTY: Option<MiHeap> = None;
static INIT: Once = Once::new();

fn get_empty_heap() -> &'static MiHeap {
    INIT.call_once(|| {
        unsafe {
            _MI_HEAP_EMPTY = Some(MiHeap::new_empty());
        }
    });
    unsafe { _MI_HEAP_EMPTY.as_ref().unwrap() }
}

pub fn mi_heap_is_backing(heap: *const MiHeap) -> bool {
    unsafe {
        (*(*heap).tld).heap_backings == heap as *mut MiHeap
    }
}

pub fn mi_heap_is_initialized(heap: *const MiHeap) -> bool {
   mi_assert!(heap != ptr::null());
   if let Some (temp_heap) = unsafe {
       _MI_HEAP_EMPTY.as_ref()
   }{
       return heap != temp_heap;
   }else {
       return false;
   }
}

pub fn mi_ptr_cookie(p: *const std::ffi::c_void) -> usize{
    let _mi_heap_main = MiHeap::new_empty();
    unsafe {
       mi_assert!(_mi_heap_main.cookie != 0);
        (p as usize) ^ _mi_heap_main.cookie
    }
}

pub fn mi_heap_get_free_small_page(heap: *mut MiHeap, size: usize) -> *mut MiPage{
   mi_assert!(size <= MI_SMALL_SIZE_MAX + MI_PADDING_SIZE);
   let idx: usize = mi_wsize_from_size(size);
   mi_assert!(idx < MI_PAGES_DIRECT);
    unsafe { (*heap).pages_free_direct[idx] }
}

pub fn mi_ptr_segment(p: *const std::ffi::c_void) -> *mut MiSegment {
    let segment = ((p as usize - 1) & !MI_SEGMENT_MASK) as *mut MiSegment;
    
    if std::mem::size_of::<usize>() <= 4 {
        if p.is_null() {
            return std::ptr::null_mut();
        }
    } else {
        if segment.is_null() || segment as isize <= 0 {
            return std::ptr::null_mut();
        }
    }
    segment
}

pub fn mi_slice_to_page(s: *const MiSlice) -> *mut MiPage {
    unsafe {
        if s.is_null() {
            return std::ptr::null_mut();
        }

        mi_assert!((*s).slice_offset== 0 && (*s).slice_count > 0);
        s as *mut MiPage
    }
}

pub fn mi_page_to_slice(p: *const MiPage) -> *mut MiSlice {
    unsafe {
        if p.is_null() {
            return std::ptr::null_mut();
        }

        mi_assert!((*p).slice_offset == 0 && (*p).slice_count > 0);
        p as *mut MiSlice
    }
}

pub fn mi_page_segment(page: *const MiPage) -> *mut MiSegment {
    mi_assert!(!page.is_null());
    
    let segment = mi_ptr_segment(page as *const std::ffi::c_void);

    /* 
    unsafe {
        mi_assert!(segment != std::ptr::null_mut() || 
            ((page as *const MiSlice >= (*segment).slices.as_ptr()) && 
            (page as *const MiSlice < (*segment).slices.as_ptr().offset((*segment).slice_entries as isize)))); 
    }
    */
    segment
}


pub fn mi_slice_first(slice: *const MiSlice) -> *mut  MiSlice{
    let start = (slice as *const u8).wrapping_sub(
        unsafe {
            (*slice).slice_offset as usize
        }
    ) as *mut MiSlice;
   //mi_assert!(start >= mi_ptr_segment(slice as *const std::ffi::c_void).slices);
   mi_assert!(unsafe { (*start).slice_offset == 0 });
   //mi_assert!(unsafe { start.add((*start).slice_count) > slice });

    start
}

pub fn mi_segment_page_of(segment: *const MiSegment, p: *const std::ffi::c_void) -> *mut MiPage {
   mi_assert!(p > segment as *const _);
    let diff = p as usize - segment as usize;
   mi_assert!(diff > 0 && diff <= MI_SEGMENT_SIZE);
    let idx = diff >> MI_SEGMENT_SLICE_SHIFT;
   unsafe{
    mi_assert!(idx <= (*segment).slice_entries);
   }
    unsafe{
        let slice0 = &(*segment).slices[idx] as *const MiSlice;
        let slice = mi_slice_first(slice0);  
        mi_assert!((*slice).slice_offset == 0);
        //mi_assert!(slice >= (*segment).slices && slice < (*segment).slices.add((*segment).slice_entries));
        mi_slice_to_page(slice)
    }
}

impl MiPage {
    pub fn mi_page_start(&self) -> *const u8 {
        unsafe{
            mi_assert!(!self.page_start.is_null());
            mi_assert!(mi_segment_page_start(mi_page_segment(self), self, &mut *null_mut()) == (self).page_start);
        }
        self.page_start
    }

    pub fn mi_page_block_size(&self) -> usize {
        mi_assert!(self.block_size >0);
        self.block_size
    }

    pub fn mi_page_is_huge(&self) -> bool {
        unsafe {
            //mi_assert!(((*page).is_huge != 0 && (*(mi_page_segment(page))).kind ==  MI_SEGMENT_HUGE)||
            //(!(*page).is_huge && *mi_page_segment(page).kind != MI_SEGMENT_HUGE));
             match (*(mi_page_segment(self))).kind {
                MiSegmentKind::MI_SEGMENT_NORMAL => mi_assert!(self.is_huge == 0),
                MiSegmentKind::MI_SEGMENT_HUGE => mi_assert!(self.is_huge != 0),
            }
            self.is_huge != 0
        }
    }

    pub fn mi_page_usable_block_size(&self) -> usize {
        self.mi_page_block_size() - MI_PADDING_SIZE
    }
}

pub fn mi_ptr_page(p: *const std::ffi::c_void) -> *mut MiPage {
   mi_assert!(p != ptr::null());
    return mi_segment_page_of(mi_ptr_segment(p), p);
}

pub fn mi_segment_size(segment: *const MiSegment) -> usize {
    unsafe {
        (*segment).segment_slices * MI_SEGMENT_SLICE_SIZE 
    }
}


pub fn mi_segment_end(segment: *const MiSegment)-> *const u8 {
    (segment as*const u8).wrapping_add(mi_segment_size(segment))
}

impl MiPage {
    pub fn mi_page_thread_free(&self) -> *mut MiBlock {
        let xthrea_free: *mut MiBlock = unsafe { std::mem::transmute(mi_atomic_load_relaxed(&(*self).xthread_free) & !3) };
        xthrea_free
    }

    pub fn mi_page_thread_free_flag(&self) -> MiDelayed {
        let flag = unsafe {
            mi_atomic_load_relaxed(&(*self).xthread_free) & 3
        };
        match flag as u8 {
            0 => MiDelayed::MI_DELAYED_FREEING,
            1 => MiDelayed::MI_DELAYED_FREEING,
            2 => MiDelayed::MI_NO_DELAYED_FREE,
            3 => MiDelayed::MI_NEVER_DELAYED_FREE,
            _ => unreachable!(),
        }
    }
}

pub fn mi_page_heap(mi_page: & MiPage) -> Option<Rc<RefCell<&mut MiHeap>>> {
    let heap_ptr = mi_page.xheap.load(Ordering::Relaxed) as *const MiHeap;
    if heap_ptr.is_null() {
        None
    } else {
        // SAFETY: 此处 heap_ptr 必须确保有效并且是合法的使用
        let heap_ref = unsafe { &mut *(heap_ptr as *mut MiHeap) };
        Some(Rc::new(RefCell::new(heap_ref)))
    }
}

pub fn mi_page_set_heap(page: *mut MiPage, heap: *mut MiHeap) {
   //mi_assert!(mi_page_thread_free_flag(page) != MiDelayed::MI_DELAYED_FREEING);
   unsafe{
    mi_atomic_store_release(&mut (*page).xheap, heap as usize);
   }
    if !heap.is_null() {
        unsafe {
            (*page).heap_tag = (*heap).tag;
        }
    }
}

pub fn mi_tf_block(tf: usize) -> *mut MiBlock {
    (tf & !0x03) as *mut MiBlock
}

pub fn mi_tf_delayed(tf: usize) -> MiDelayed {
    match tf & 0x03 {
        0 => MiDelayed::MI_DELAYED_FREEING,
        1 => MiDelayed::MI_DELAYED_FREEING,
        2 => MiDelayed::MI_NO_DELAYED_FREE,
        3 => MiDelayed::MI_NEVER_DELAYED_FREE,
        _ => unreachable!(),
    } 
}

pub fn mi_tf_make(block: *mut MiBlock, delayed: MiDelayed) -> usize {
    block as usize | delayed as usize
}

pub fn mi_tf_set_delayed(tf: usize, delayed: MiDelayed) -> usize {
    mi_tf_make(mi_tf_block(tf), delayed)
}

pub fn mi_tf_set_block(tf: usize, block: *mut MiBlock) -> usize {
    mi_tf_make(block, mi_tf_delayed(tf))
}

impl MiPage {
    pub fn mi_page_all_free(self: &Self) -> bool {
        self.used == 0
    }
    pub fn mi_page_has_any_available(self: &Self) -> bool {
        self.used < self.reserved || !self.mi_page_thread_free().is_null()
    }
}

pub fn mi_page_immediate_available(page: *const MiPage) -> bool {
    assert!(!page.is_null());
    unsafe {
        !(*page).free.is_null()
    }
}

pub fn mi_page_mostly_used(page: *const MiPage) -> bool {
    if page.is_null() {
        return true;
    }
    let frac = unsafe { (*page).reserved / 8 };
    
    unsafe {
        (*page).reserved - (*page).used <= frac
    }
}


pub fn mi_page_queue(heap: *const MiHeap, size: usize) -> *const MiPageQueue {
    unsafe {
        (*heap).pages[_mi_bin(size) as usize] as *const MiPageQueue
    }
}


impl MiPage {
    pub fn mi_page_is_in_full(self: &Self) -> bool {
        unsafe {
            self.flags.x.in_full != 0     
        }
    }

    pub fn mi_page_set_in_full(self: &mut Self, in_full: bool){
        self.flags.x.in_full = in_full as u8;
    }

    pub fn mi_page_has_aligned(self: &Self) -> bool {
        unsafe {
            self.flags.x.has_aligned != 0
        }
    }
}

pub fn mi_page_set_has_aligned(page: *mut MiPage, has_aligned: bool) {
    unsafe {
        (*page).flags.x.has_aligned = has_aligned as u8;
    }
}

/* -------------------------------------------------------------------
Encoding/Decoding the free list next pointers
------------------------------------------------------------------- */
pub fn mi_is_in_same_segment(p: *const std::ffi::c_void, q: *const std::ffi::c_void) -> bool {
    unsafe {
        mi_ptr_segment(p) == mi_ptr_segment(q)
    }
}

pub fn mi_is_in_same_page(p: *const std::ffi::c_void, q: *const std::ffi::c_void) -> bool {
    let segment = unsafe { mi_ptr_segment(p) };
    if unsafe { mi_ptr_segment(q) } != segment {
        return false;
    }
    
    let page = unsafe { mi_segment_page_of(segment, p) };
    let mut psize: usize = 0;
    let start = unsafe { mi_segment_page_start(segment, page, &mut psize) };
    unsafe{
        (start as *const std::ffi::c_void <= q) && (q < (start as *const std::ffi::c_void ).add(psize ))
    }
    
}

pub fn mi_rotl(x: usize, shift: usize) -> usize {
    let shift = shift % MI_INTPTR_BITS; 
    if shift == 0 {
        x
    } else {
        (x << shift) | (x >> (MI_INTPTR_BITS - shift))
    }
}

pub fn mi_rotr(x: usize, shift: usize) -> usize {
    let shift = shift % MI_INTPTR_BITS; 
    if shift == 0 {
        x
    } else {
        (x >> shift) | (x << (MI_INTPTR_BITS - shift))
    }
}

pub fn mi_ptr_decode(null: *const std::ffi::c_void, x: MiEncoded, keys: &[usize; 2]) -> *mut std::ffi::c_void {
    let p = mi_rotr(x.wrapping_sub(keys[0]), keys[0]) ^ keys[1];
    if p == null as usize {
        std::ptr::null_mut()
    } else {
        p as *mut std::ffi::c_void
    }
}

pub fn mi_ptr_encode(null: *const std::ffi::c_void, p: *const std::ffi::c_void, keys: &[usize; 2]) -> MiEncoded {
    let x = if p.is_null() {
        null as usize
    } else {
        p as usize
    };
    mi_rotl(x ^ keys[1], keys[0]) + keys[0]
}

pub fn mi_block_nextx(null: *const std::ffi::c_void,  block:*mut MiBlock, keys: &[usize; 2]) -> *mut MiBlock {
    mi_track_mem_defined(block as *mut u8, std::mem::size_of::<MiBlock>()); //track.rs

    let next: *mut MiBlock;

    #[cfg(MI_ENCODE_FREELIST)]
    {
        next = mi_ptr_decode(null, unsafe { (*block).next }, keys) as *mut MiBlock;
    }

    #[cfg(not(MI_ENCODE_FREELIST))]
    {
        MI_UNUSED!(keys);
        MI_UNUSED!(null);
        next = unsafe { (*block).next } as *mut MiBlock;
    }

    mi_track_mem_noaccess(block as *mut u8, std::mem::size_of::<MiBlock>()); //track.rs
    next
}

pub fn mi_block_set_nextx(null: *const std::ffi::c_void, block: *mut MiBlock, next: *const MiBlock, keys: &[usize; 2]) {
    mi_track_mem_undefined(block as *mut u8, std::mem::size_of::<MiBlock>()); // track.rs

    #[cfg(MI_ENCODE_FREELIST)]
    {
        unsafe {
            (*block).next = mi_ptr_encode(null, next, keys);
        }
    }

    #[cfg(not(MI_ENCODE_FREELIST))]
    {
        MI_UNUSED!(keys);
        MI_UNUSED!(null);
        unsafe {
            (*block).next = next as MiEncoded; 
        }
    }

    mi_track_mem_noaccess(block as *mut u8, std::mem::size_of::<MiBlock>()); // track.rs
}

pub fn mi_block_next(page: *const MiPage, block: *const MiBlock) -> *mut MiBlock {
    #[cfg(MI_ENCODE_FREELIST)]
    {
        let next = mi_block_nextx(page, block, unsafe { (*page).keys });

        if next.is_not_null() && !mi_is_in_same_page(block, next) {
            _mi_error_message(
                EFAULT,
                "corrupted free list entry of size %zub at {:?}: value 0x{:x}\n",
                mi_page_block_size(page),
                block as *const _,
                next as usize,
            ); //options.rs
            return std::ptr::null_mut();
        }
        return next;
    }
    #[cfg(not(MI_ENCODE_FREELIST))]
    {
        MI_UNUSED!(page);
        let null_array: [usize; 2] = [0; 2];
        return mi_block_nextx(page as *const std::ffi::c_void, block as *mut MiBlock, &null_array);
    }
}

pub fn mi_block_set_next(page: *const MiPage, block: *mut MiBlock, next: *const MiBlock) {
    #[cfg(MI_ENCODE_FREELIST)]
    {
        mi_block_set_nextx(page, block, next, unsafe { (*page).keys });
    }
    #[cfg(not(MI_ENCODE_FREELIST))]
    {
        MI_UNUSED!(page);
        let null_array: [usize; 2] = [0; 2];
        mi_block_set_nextx(page as *const std::ffi::c_void , block, next, &null_array);
    }
}

// -------------------------------------------------------------------
// commit mask
// -------------------------------------------------------------------

pub fn mi_commit_mask_create_empty(cm: &mut MiCommitMask) {
    for i in 0..MI_COMMIT_MASK_FIELD_COUNT {
        cm.mask[i] = 0;
    }
}

pub fn mi_commit_mask_create_full(cm: &mut MiCommitMask) {
    for i in 0..MI_COMMIT_MASK_FIELD_COUNT {
        cm.mask[i] = usize::MAX; 
    }
}

pub fn mi_commit_mask_is_empty(cm: &MiCommitMask) -> bool {
    for i in 0..MI_COMMIT_MASK_FIELD_COUNT {
        if cm.mask[i] != 0 {
            return false;
        }
    }
    true
}

pub fn mi_commit_mask_is_full(cm: &MiCommitMask) -> bool {
    for i in 0..MI_COMMIT_MASK_FIELD_COUNT {
        if cm.mask[i] != usize::MAX { 
            return false;
        }
    }
    true
}

#[macro_export]
macro_rules! mi_commit_mask_foreach {
    ($cm:expr, $idx:ident, $count:ident) => {
        $idx = 0;
        while {
            $count = $cm.next_run(&mut $idx);
            $count > 0
        } {
            // Body of the loop
        }
    };
}

// defined in segment.rs

/* -----------------------------------------------------------
  memory id's
----------------------------------------------------------- */

pub fn mi_memid_create(memkind: MiMemKind) -> MiMemId {
    let mut memid: MiMemId = MiMemId::new_empty();
    mi_memzero_var!(&mut memid); 
    memid.memkind = memkind;
    memid
}

pub fn mi_memid_none() -> MiMemId {
    mi_memid_create(MiMemKind::MI_MEM_NONE) 
}

pub fn mi_memid_create_os(committed: bool, is_zero: bool, is_large: bool) -> MiMemId {
    let mut memid = mi_memid_create(MiMemKind::MI_MEM_OS);
    memid.initially_committed = committed;
    memid.initially_zero = is_zero;
    memid.is_pinned = is_large;
    memid
}

// -------------------------------------------------------------------
// Fast "random" shuffle
// -------------------------------------------------------------------
pub fn mi_random_shuffle(x: usize) -> usize {
    let mut x = if x == 0 { 17 } else { x }; 

    #[cfg(target_pointer_width = "64")]
    {
        x ^= x >> 30;
        x = x.wrapping_mul(0xbf58476d1ce4e5b9);
        x ^= x >> 27;
        x = x.wrapping_mul(0x94d049bb133111eb);
        x ^= x >> 31;
    }

    #[cfg(target_pointer_width = "32")]
    {
        x ^= x >> 16;
        x = x.wrapping_mul(0x7feb352d);
        x ^= x >> 15;
        x = x.wrapping_mul(0x846ca68b);
        x ^= x >> 16;
    }

    x
}

// -------------------------------------------------------------------
// Optimize numa node access for the common case (= one node)
// -------------------------------------------------------------------

// _mi_numa_node_count: os.rs

pub fn mi_os_numa_node(tld: *mut MiOsTld) -> i32 {
    if mi_atomic_load_relaxed(&_mi_numa_node_count) == 1 {
        return 0;
    } else {
        unsafe {
            _mi_os_numa_node_get(tld)  // os.rs
        }
    }
}

pub fn mi_os_numa_node_count() -> usize {
    let count = mi_atomic_load_relaxed(&_mi_numa_node_count);
    if mi_likely(count > 0) {
        count
    } else {
        unsafe {
            _mi_os_numa_node_count_get()  //os.rs
        }
    }
}

// count bits: trailing or leading zeros
pub fn mi_ctz32(x: u32) -> usize {
    const DEBRUIJN: [usize; 32] = [
        0, 1, 28, 2, 29, 14, 24, 3,
        30, 22, 20, 15, 25, 17, 4, 8,
        31, 27, 13, 23, 21, 19, 16, 7,
        26, 12, 18, 6, 11, 5, 10, 9
    ];

    if x == 0 { return 32; }
    let bit = x & (!(x) + 1); // get the least significant bit
    DEBRUIJN[(bit.wrapping_mul(0x077CB531) >> 27) as usize]
}

pub fn mi_clz32(x: u32) -> usize {
    const DEBRUIJN: [usize; 32] = [
        31, 22, 30, 21, 18, 10, 29, 2,
        20, 17, 15, 13, 9, 6, 28, 1,
        23, 19, 11, 3, 16, 14, 7, 24,
        12, 4, 8, 25, 5, 26, 27, 0
    ];

    if x == 0 { return 32; }
    let mut x = x;
    x |= x >> 1;
    x |= x >> 2;
    x |= x >> 4;
    x |= x >> 8;
    x |= x >> 16;
    DEBRUIJN[(x.wrapping_mul(0x07C4ACDD) >> 27) as usize]
}

pub fn mi_clz(x: usize) -> usize {
    if x == 0 { return std::mem::size_of::<usize>() * 8; }

    #[cfg(target_pointer_width = "32")]
    return mi_clz32(x as u32);
    
    #[cfg(target_pointer_width = "64")]
    {
        let count = mi_clz32((x >> 32) as u32);
        if count < 32 {
            count
        } else {
            32 + mi_clz32(x as u32)
        }
    }
}

pub fn mi_ctz(x: usize) -> usize {
    if x == 0 { return std::mem::size_of::<usize>() * 8; }

    #[cfg(target_pointer_width = "32")]
    return mi_ctz32(x as u32);
    
    #[cfg(target_pointer_width = "64")]
    {
        let count = mi_ctz32(x as u32);
        if count < 32 {
            count
        } else {
            32 + mi_ctz32((x >> 32) as u32)
        }
    }
}

pub fn mi_bsr(x: usize) -> usize {
    if x == 0 {
        MI_INTPTR_BITS
    } else {
        MI_INTPTR_BITS - 1 - mi_clz(x)
    }
}

// todo : optimizing in mem-func for different platforms 
/* 
pub fn mi_memcpy(dst: *mut std::ffi::c_void, src: *const std::ffi::c_void, n: usize) {
    unsafe {
        std::ptr::copy_nonoverlapping(src as *const u8, dst as *mut u8, n);
    }
}

pub fn mi_memcpy_aligned(dst: *mut std::ffi::c_void, src: *const std::ffi::c_void, n: usize) {
    assert!((dst as usize % std::mem::size_of::<usize>() == 0) && (src as usize % std::mem::size_of::<usize>() == 0));
    mi_memcpy(dst, src, n);
}

pub fn mi_memzero_aligned(dst: *mut std::ffi::c_void, n: usize) {
    assert!(dst as usize % std::mem::size_of::<usize>() == 0);
    mi_memzero(dst, n); 
}


*/

