use super::*;


/// Free a block
pub fn _mi_free(ptr: *mut u8) {
    // optimize: merge null check with the segment masking (below)
    // if ptr.is_null() { return; }

    let segment = _mi_ptr_segment(ptr);
    if segment.is_null() {return;}
    unsafe  {
        // preload, note: putting the thread_id in the page->flags does
        // not improve performance
        let local = if mi_thread_id() == (*segment).thread_id {
            true
        } else {
            false
        };

        let page = _mi_segment_page_of(segment, ptr);

        // adjust if it might be an un-aligned block
        // note: merging both tests (local | value) does not matter for performance
        if (*page).flags.is_empty() {
            let block = ptr as *mut MiBlock;
            if local {
                // owning thread can free a block directly
                // note: moving this write earlier does not matter for performance
                mi_block_set_next(page, block, (*page).local_free);
                (*page).local_free = block;
                (*page).used -= 1;
                if mi_page_all_free(page) {
                    _mi_page_retire(page);
                }
            } else {
                // use atomic operations for a multi-threaded free
                _mi_free_block_mt(page, block);
            }
        } else {
            // aligned blocks, or a full page; use the more feneric path
            mi_free_generic(segment, page, local, ptr);
        }
    }  
}

pub fn mi_heap_malloc(heap: *const MiHeap, size: usize) -> *mut u8 {
    // unsafe{println!("mi_heap: 0x{:x}, size: {}", (*heap).thread_id as usize, size);}
    // println _default_heap();
    // unsafe {
    //     println!("mi_heap: 0x{:x}, size: {}", get_heap_default() as usize, size);
    // }
    if size <= MI_SMALL_SIZE_MAX {
        mi_heap_malloc_small(heap, size)
    } else {
        _mi_malloc_generic(heap, size)
    }
}
/// Allocate a small block
pub fn mi_heap_malloc_small(heap: *const MiHeap, size: usize) -> *mut u8 {
    let page = _mi_heap_get_free_small_page(heap, size);
    _mi_page_malloc(heap, page, size)
}

/// Generic allocation routine if the fast path fails
pub fn _mi_malloc_generic(mut heap: *const MiHeap, size: usize) -> *mut u8 {
    if !mi_heap_is_initialized(heap) {
        mi_thread_init();
        heap = get_heap_default();
    }
    // call potential deferred free routines
    // TODO: _mi_deferred_free(heap, false);

    // huge allocation?
    let page = if size > MI_LARGE_SIZE_MAX {
        mi_huge_page_alloc(heap, size)
    } else {
        unsafe { mi_find_free_page(heap, size) }
    };
    if page.is_null() {
        return ptr::null_mut();
    }

    // and try again, this time succeeding! (i.e. this should not recurse)
    _mi_page_malloc(heap, page, size)
}


/// Fast allocation in a page: just pop from the free list.
/// Fall back to generic allocation only if the list is empty.
pub fn _mi_page_malloc(heap: *const MiHeap, page: *mut MiPage, size: usize) -> *mut u8 {
    let block = unsafe { (*page).free };
    if block.is_null() {
        // slow path
        return _mi_malloc_generic(heap, size);
    }
    // pop from the free list
    unsafe {
        (*page).free = mi_block_next(page, block);
        (*page).used += 1;
    }
    block as _
}

/// Free a block using the generic path
pub fn mi_free_generic(segment: *mut MiSegment, page: *mut MiPage, local: bool, ptr: *mut u8) {
    unsafe {
        let block = if (*page).flags.contains(MiPageFlags::ALIGNED) {
            _mi_page_ptr_unalign(segment, page, ptr)
        } else {
            ptr as *mut MiBlock
        };
        _mi_free_block(page, local, block);
    }
}

/// multi-threaded free
pub fn _mi_free_block_mt(page: *mut MiPage, block: *mut MiBlock) {
    unsafe {
        let mut tfree;
        let mut tfreex;
        let mut use_delayed;

        loop {
            tfree = (*page).thread_free.load(Ordering::SeqCst);
            tfreex = tfree;
            use_delayed = if tfreex & 0b11 == MiDelayed::UesDelayedFree as usize {
                true
            } else {
                false
            };
            if use_delayed {
                // unlikely: this only happens on the first concurrent free in a page
                // that is in the full list
                tfreex = tfreex & !0b11 | MiDelayed::DelayedFreeing as usize;
            } else {
                // usual: directly add to page thread_free list
                mi_block_set_next(page, block, (tfreex & !0b11) as *mut MiBlock);
                tfreex = block as usize | tfreex & 0b11;
            }

            if (*page).thread_free.compare_exchange(tfree, tfreex, Ordering::SeqCst, Ordering::SeqCst).is_ok() {
                break;
            }
        }

        if !use_delayed {
            // increment the thread free count and return
            (*page).thread_freed.fetch_add(1, Ordering::SeqCst);
        } else {
            // racy read on 'heap', but ok because MI_DELAYED_FREEING is set 
            // (see 'mi_heap_delete' and 'mi_heap_collect_abandon')
            let heap = (*page).heap;
            if !heap.is_null() {
                // add to the delayed free list if this heap. (do this atomically
                // as the lock protects heap memory validity)
                loop {
                    let dfree = (*heap).thread_delayed_free.load(Ordering::SeqCst);
                    mi_block_set_nextx((*heap).cookie, block, dfree);
                    if (*heap).thread_delayed_free.compare_exchange(dfree, block, Ordering::SeqCst, Ordering::SeqCst).is_ok() {
                        break;
                    }
                }
            }

            // and reset the MI_DELAYED_FREEING flag
            loop {
                tfree = (*page).thread_free.load(Ordering::SeqCst);
                tfreex = tfree;
                tfreex = tfreex & !0b11 | MiDelayed::DelayedFreeing as usize;
                if (*page).thread_free.compare_exchange(tfree, tfreex, Ordering::SeqCst, Ordering::SeqCst).is_ok() {
                    break;
                }
            }
        }
    }
}

/// regular free
#[inline]
pub fn _mi_free_block(page: *mut MiPage, local: bool, block: *mut MiBlock) {
    unsafe {
        // and push it on the free list
        if local {
            // owning thread can free a block directly
            mi_block_set_next(page, block, (*page).local_free);
            (*page).local_free = block;
            (*page).used -= 1;
            if mi_page_all_free(page) {
                _mi_page_retire(page);
            } else if (*page).flags.contains(MiPageFlags::FULL) {
                _mi_page_unfull(page);
            }
        } else {
            _mi_free_block_mt(page, block);
        }
    }
}