use std::sync::atomic::{AtomicPtr, AtomicUsize};
use bitflags::bitflags;

// ------------------------------------------------------
// Platform specific values
// ------------------------------------------------------

// ------------------------------------------------------
// Size of a pointer.
// We assume that `sizeof(void*)==sizeof(intptr_t)`
// and it holds for all platforms we know of.
//
// However, the C standard only requires that:
//  p == (void*)((intptr_t)p))
// but we also need:
//  i == (intptr_t)((void*)i)
// or otherwise one might define an intptr_t type that is larger than a pointer...
// ------------------------------------------------------

pub const MI_INTPTR_SHIFT: usize = std::mem::size_of::<usize>().trailing_zeros() as usize;
pub const MI_INTPTR_SIZE: usize = 1 << MI_INTPTR_SHIFT;

// ------------------------------------------------------
// Main internal data-structures
// ------------------------------------------------------

/// Main tuning parameters for segment and page sizes.
pub const MI_SMALL_PAGE_SHIFT: usize = 16;  // 64kb
pub const MI_LARGE_PAGE_SHIFT: usize = 6 + MI_SMALL_PAGE_SHIFT; // 4mb
pub const MI_SEGMENT_SHIFT: usize = MI_LARGE_PAGE_SHIFT; // 4mb

/// Derived constants
pub const MI_SEGMENT_SIZE: usize = 1 << MI_SEGMENT_SHIFT;
pub const MI_SEGMENT_MASK: usize = MI_SEGMENT_SIZE - 1;

pub const MI_SMALL_PAGE_SIZE: usize = 1 << MI_SMALL_PAGE_SHIFT;
pub const MI_LARGE_PAGE_SIZE: usize = 1 << MI_LARGE_PAGE_SHIFT;

pub const MI_SMALL_PAGES_PER_SEGMENT: usize = MI_SEGMENT_SIZE / MI_SMALL_PAGE_SIZE;
pub const MI_LARGE_PAGES_PER_SEGMENT: usize = MI_SEGMENT_SIZE / MI_LARGE_PAGE_SIZE;

pub const MI_LARGE_SIZE_MAX : usize = MI_LARGE_PAGE_SIZE / 8; // 512kb on 64-bit
pub const MI_LARGE_WSIZE_MAX: usize = MI_LARGE_SIZE_MAX / std::mem::size_of::<usize>();


/// The maximum of the size classes. (spaced exponentially in 16.7% increments)
pub const MI_BIN_HUGE: usize = 64;

/// 'full' bin is used for full allocated pages
pub const MI_BIN_FULL: usize = MI_BIN_HUGE + 1;


/// The number of direct pages in a heap
pub const MI_SMALL_WSIZE_MAX: usize = 128;
pub const MI_SMALL_SIZE_MAX: usize = MI_SMALL_WSIZE_MAX * std::mem::size_of::<usize>();

/// Minimum alignment necessary. On most platforms 16 bytes are needed
/// due to SSE registers for example. This must be at least 'MI_INTPTR_SIZE'
pub const MI_MAX_ALIGN_SIZE: usize = 16;

// ------------------------------------------------------
// Mimalloc pages contain allocated blocks
// ------------------------------------------------------

// The free lists use encoded next fields
// (Only actually encodes when MI_ENCODED_FREELIST is defined.)

// Free lists contain blocks
#[repr(C)]
#[derive(Debug, Clone, Copy)]
pub struct MiBlock {
    pub next: *mut MiBlock,
}

bitflags! {
    pub struct MiPageOwnerFlags: u8 {
        // 'true' if the segment allocated this page
        const SEGMENT_IN_USE = 1;
        // 'true' if the page memory was reset
        const RESET = 1 << 1;
    }
}

bitflags! {
    pub struct MiPageFlags: u8 {
        const FULL = 1;
        const ALIGNED = 1 << 1;
        const ZERO_FREE = 1 << 2;
    }
}

pub enum MiDelayed {
    NoDelayedFree,
    UesDelayedFree,
    DelayedFreeing,
}

#[derive(Debug, Clone, Copy)]
pub struct MiThreadFree {
    pub value: usize,
}

impl MiThreadFree {
    /// last two bits are flags
    pub fn get_delayed(&self) -> usize {
        self.value & !0b11
    }

    pub fn set_delayed(&mut self, value: usize) {
        self.value = self.value & !0b11 | value;
    }

    pub fn get_head(&self) -> usize {
        self.value & !0b11
    }
}

/// A page contains blocks of one specific size ('block_size').
/// Each page has three list of free blocks:
/// - `free`: list of available free blocks
/// - `local_free`: list of deferred free blocks by this thread
/// - `thread_free`: list of deferred free blocks freed by other threads
/// The 'local_free' and 'thread_free' lists are migrated to the 'free' list
/// when it is exhausted. The separate 'local_free' list is necessary to implement
/// a monotonic heartbeat. The 'thread_free' list is needed for avoiding atomic
/// operations in the common case.
/// 
/// 'used - |thread_free|' == actual blocks that are in use (alive).
/// 'used - |thread_free| + |free| + |local_free| == capacity'.
/// 
/// We don't count 'freed' (as |free|) but use 'used' to reduce the number of
/// memory accesses in the 'mi_page_all_free' function.
/// 
/// 'owned' by the segment.
#[repr(C)]
pub struct MiPage {
    // index in the segment `pages` array
    pub segment_idx: u8,
    // owner flags
    pub owner_flags: MiPageOwnerFlags,

    // 'in_full' and 'has_aligned' flags
    pub flags: MiPageFlags,
    // Number of blocks committed, must be first field
    pub capacity: u16,
    // Number of blocks reserved in memory
    pub reserved: u16,

    // list of available free blocks ('malloc' allocates from this list)
    pub free: *mut MiBlock,
    // random cookie to encode the free lists
    pub cookie: usize,
    // number of blocks in use (including blocks in 'thread_free')
    pub used: usize,

    // list of deferred free blocks by this thread (migrated to 'free')
    pub local_free: *mut MiBlock,
    // at least this number of blocks are in 'thread_free'
    pub thread_freed: AtomicUsize,
    // list of deferred free blocks freed by other threads
    pub thread_free: AtomicUsize,

    // less accessed info
    // size available in each block (always '>0')
    pub block_size: usize,
    // the owning heap
    pub heap: *mut MiHeap,
    // next page owned by the heap with the same 'block_size'
    pub next: *mut MiPage,
    // previous page owned by the heap with the same 'block_size'
    pub prev: *mut MiPage,
}

unsafe impl Sync for MiPage {}

impl MiPage {
    pub const fn default() -> Self {
        Self {
            segment_idx: 0,
            owner_flags: MiPageOwnerFlags::empty(),
            flags: MiPageFlags::empty(),
            capacity: 0,
            reserved: 0,
            free: std::ptr::null_mut(),
            cookie: 0,
            used: 0,
            local_free: std::ptr::null_mut(),
            thread_freed: AtomicUsize::new(0),
            thread_free: AtomicUsize::new(0),
            block_size: 0,
            heap: std::ptr::null_mut(),
            next: std::ptr::null_mut(),
            prev: std::ptr::null_mut(),
        }
    }
}


// ------------------------------------------------------
// Mimalloc segments contain mimalloc pages
// ------------------------------------------------------

#[repr(u8)]
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum MiPageKind {
    // small blocks go into 64KiB pages inside a segment
    Small,
    // larger blocks go into a single page spanning a whole segment
    Large,
    // huge blocks go into a single page spanning a whole segment (but still 2MiB aligned)
    // used for blocks '> MI_LARGE_OBJ_SIZE_MAX' or an alignment '> MI_BLOCK_ALIGNMENT_MAX'
    Huge,
}

#[repr(C)]
pub struct MiSegment {
    pub next: *mut MiSegment,
    pub prev: *mut MiSegment,
    pub abandoned_next: *mut MiSegment,
    // abandoned pages ('abandoned' <= 'used')
    pub abandoned: usize,
    // count of pages ('used' <= 'capacity')
    pub used: usize,
    // count of available pages ('#free + used')
    pub capacity: usize,
    // for huge pages this may be different from `MI_SEGMENT_SIZE`
    pub segment_size: usize,
    // space we are using from the first page for segment meta-data and possible guard pages
    pub segment_info_size: usize,
    // verify addresses in debug mode: 'mi_ptr_cookie(segment) == segment->cookie'
    pub cookie: usize,

    // layout like this to optimize access in `mi_free`
    // '1 << page_shift' == the page sizes == 'page->block_size * page->reserved'
    // (unless the first page, then '-segment_info_size')
    pub page_shift: usize,
    // unique ID of the thread owning this segment
    pub thread_id: usize,
    // kind of pages: small, medium, large, or huge
    pub page_kind: MiPageKind,
    // up to `MI_SMALL_PAGES_PER_SEGMENT` pages
    pub pages: [MiPage; 1],
}

/// Pages of a certain block size are held in a queue.
#[derive(Copy, Clone)]
#[repr(C)]
pub struct MiPageQueue {
    pub first: *mut MiPage,
    pub last: *mut MiPage,
    pub block_size: usize,
}

impl MiPageQueue {
    pub const fn default() -> Self {
        Self {
            first: std::ptr::null_mut(),
            last:  std::ptr::null_mut(),
            block_size: 0,
        }
    }
}

/// A heap owns a set of pages.
#[repr(C)]
pub struct MiHeap {
    // thread local data
    pub tld: *mut MiTld,
    // optimize: array where every entry points a page with possibly free blocks in the corresponding queue for that size.
    pub pages_free_direct: [*mut MiPage; MI_SMALL_WSIZE_MAX + 2],
    // queue of pages for each size class (or "bin")
    pub pages: [MiPageQueue; MI_BIN_FULL + 1],
    pub thread_delayed_free: AtomicPtr<MiBlock>,
    // thread this heap belongs too
    pub thread_id: usize,

    pub cookie: usize,
    // random number used for secure allocation
    pub random: usize,
    // total number of pages in the `pages` queues.
    pub page_count: usize,
    // `true` if this heap should not reclaim abandoned pages
    pub no_reclaim: bool,
}
unsafe impl Sync for MiHeap {}

impl MiHeap {
    pub const fn default() -> Self {
        Self {
            tld: std::ptr::null_mut(),
            pages_free_direct: [std::ptr::null_mut(); MI_SMALL_WSIZE_MAX + 2],
            pages: [MiPageQueue::default(); MI_BIN_FULL + 1],
            thread_delayed_free: AtomicPtr::new(std::ptr::null_mut()),
            thread_id: 0,
            cookie: 0,
            random: 0,
            page_count: 0,
            no_reclaim: false,
        }
    }
}

// ------------------------------------------------------
// Thread Local data
// ------------------------------------------------------

/// Queue of segments
#[repr(C)]
pub struct MiSegmentQueue {
    pub first: *mut MiSegment,
    pub last: *mut MiSegment,
}

impl MiSegmentQueue {
    pub const fn default() -> Self {
        Self {
            first: std::ptr::null_mut(),
            last: std::ptr::null_mut(),
        }
    }
}

/// Segment thread-local data
#[repr(C)]
pub struct MiSegmentsTld {
    // Queue of segments with free small pages
    pub small_free: MiSegmentQueue,
    // current size of all segments
    pub current_size: usize,
    // peak size of all segments
    pub peak_size: usize,
    // number of segments in the cache
    pub cache_count: usize,
    // total size of all segments in the cache
    pub cache_size: usize,
    // (small) cache of segments for small and large pages (to avoid repeated mmap calls)
    pub cache: MiSegmentQueue,
}

impl MiSegmentsTld {
    pub const fn default() -> Self {
        Self {
            small_free: MiSegmentQueue::default(),
            current_size: 0,
            peak_size: 0,
            cache_count: 0,
            cache_size: 0,
            cache: MiSegmentQueue::default(),
        }
    }
}

/// OS thread-local data
#[repr(C)]
pub struct MiOsTld {
    // probable next address start allocated by mmap (to guess which path to take on alignment)
    pub mmap_next_probable: usize,
    // previous address returned by mmap
    pub mmap_previous: *mut u8,
    // pool of segments to reduce mmap calls on some platforms
    pub pool: *mut u8,
    // bytes available in the pool
    pub pool_available: usize,
}

impl MiOsTld {
    pub const fn default() -> Self {
        Self {
            mmap_next_probable: 0,
            mmap_previous: std::ptr::null_mut(),
            pool: std::ptr::null_mut(),
            pool_available: 0,
        }
    }
}

/// Thread local data
#[repr(C)]
pub struct MiTld {
    // monotonic heartbeat count
    pub heartbeat: u64,
    // backing heap of this thread (cannot be deleted)
    pub heap_backing: *mut MiHeap,
    // segment tld
    pub segments: MiSegmentsTld,
    // os tld
    pub os: MiOsTld,
}
unsafe impl Sync for MiTld {}

impl MiTld {
    pub const fn default() -> Self {
        Self {
            heartbeat: 0,
            heap_backing: std::ptr::null_mut(),
            segments: MiSegmentsTld::default(),
            os: MiOsTld::default(),
        }
    }
}