use std::sync::{atomic::AtomicUsize, Arc, Mutex};

use crate::{constants::MI_PAGES_DIRECT, memid::{mi_arena_id_t, MiMemid}};

pub type mi_threadid_t = usize;

pub const MI_BIN_HUGE: usize = 73;
pub const MI_BIN_FULL: usize = MI_BIN_HUGE+1;


pub struct MiHeap {
    pub tld: *mut MiTld,
    pub thread_delayed_free: AtomicUsize,
    pub thread_id: mi_threadid_t,
    pub arena_id: mi_arena_id_t,
    pub cookie: usize,
    pub keys: [usize; 2],
    pub random: MiRandomCxt,
    pub page_count: usize,
    pub page_retired_min: usize,
    pub page_retired_max: usize,
    pub next: *mut MiHeap,
    pub no_reclaim: bool,
    pub tag: u8,
    pub pages_free_direct: [*mut MiPage; MI_PAGES_DIRECT],
    pub pages: [MiPageQueue; MI_BIN_FULL+1],
}

unsafe impl Sync for MiHeap {}

pub struct MiPage {
    pub segment_idx: u8,
    pub segment_in_use: bool,
    pub is_committed: bool,
    pub is_zero_init: bool,
    pub is_huge: bool,
    pub capacity: u16,
    pub reserved: u16,
    pub flags: MiPageFlags,
    pub free_is_zero: bool,
    pub retire_expire: u8,
    pub free: *mut MiBlock,
    pub local_free: *mut MiBlock,
    pub used: u16,
    pub block_size_shift: u8,
    pub heap_tag: u8,
    pub block_size: usize,
    pub page_start: *mut u8,
    pub xthread_free: AtomicUsize,
    pub xheap: AtomicUsize,
    pub next: *mut MiPage,
    pub prev: *mut MiPage
}

unsafe impl Sync for MiPage {}

pub enum MiPageFlags {
    FullAligned(u32),
    X(MiPageFlagsStruct)
}

pub enum MiPageKind {
    MI_PAGE_SMALL,    // small blocks go into 64KiB pages inside a segment
    MI_PAGE_MEDIUM,   // medium blocks go into 512KiB pages inside a segment
    MI_PAGE_LARGE,    // larger blocks go into a single page spanning a whole segment
    MI_PAGE_HUGE      // a huge page is a single page in a segment of variable size (but still 2MiB aligned)
                      // used for blocks `> MI_LARGE_OBJ_SIZE_MAX` or an aligment `> MI_BLOCK_ALIGNMENT_MAX`.
} 

struct MiPageFlagsStruct {
    pub in_full: u8,
    pub has_aligned: u8,
}

pub struct MiPageQueue {
    pub first: *mut MiPage,
    pub last: *mut MiPage,
    pub block_size: usize,
}
pub struct MiRandomCxt {
    pub input: [u32; 16],
    pub output: [u32; 16],
    pub output_available: i32,
    pub weak: bool,
}

pub struct MiBlock {
    // mi_encoded_t next;
    pub next: usize,
}

// Thread local data
pub struct MiTld {
    pub heartbeat: u128,
    pub recurse: bool,
    pub heap_backing: *mut MiHeap,
    pub heaps: *mut MiHeap,
    pub segments: MiSegmentsTld,
    pub os: MiOSTld,
}

// Segments thread local data
pub struct MiSegmentsTld {
    pub small_free: MiSegmentQueue,   // queue of segments with free small pages
    pub medium_free: MiSegmentQueue,  // queue of segments with free medium pages
    pub pages_purge: MiPageQueue,  // queue of freed pages that are delay purged
    pub count: usize,        // current number of segments;
    pub peak_count: usize,   // peak number of segments
    pub current_size: usize, // current size of all segments
    pub peak_size: usize,    // peak size of all segments
    pub reclaim_count: usize,// number of reclaimed (abandoned) segments
    pub subproc: *mut MiSubproc,    // sub-process this thread belongs to.
    pub os: *mut MiOSTld          // points to os tld
}

pub struct MiOSTld {
    pub region_idx: usize,
}

pub struct MiSubproc {
    pub abandoned_count: AtomicUsize,         // count of abandoned segments for this sub-process
    pub abandoned_os_list_count: AtomicUsize, // count of abandoned segments in the os-list
    pub abandoned_os_lock: Arc<Mutex<usize>>,      // lock for the abandoned os segment list (outside of arena's) (this lock protect list operations)
    pub abandoned_os_visit_lock: Arc<Mutex<usize>>, // ensure only one thread per subproc visits the abandoned os list
    pub abandoned_os_list: *mut MiSegment,       // doubly-linked list of abandoned segments outside of arena's (in OS allocated memory)
    pub abandoned_os_list_tail: *mut MiSegment,  // the tail-end of the list
    pub memid: MiMemid,                  // provenance of this memory block
}

pub struct MiSegment {
    // constant fields
    pub memid: MiMemid,            // memory id to track provenance
    pub allow_decommit: bool,
    pub allow_purge: bool,
    pub segment_size: usize,     // for huge pages this may be different from `MI_SEGMENT_SIZE`
    pub subproc: *mut MiSubproc,          // segment belongs to sub process
    // segment fields
    pub next: *mut MiSegment,             // must be the first (non-constant) segment field  -- see `segment.c:segment_init`
    pub prev: *mut MiSegment,
    pub was_reclaimed: bool,    // true if it was reclaimed (used to limit reclaim-on-free reclamation)
    pub dont_free: bool,        // can be temporarily true to ensure the segment is not freed
    pub abandoned: usize,        // abandoned pages (i.e. the original owning thread stopped) (`abandoned <= used`)
    pub abandoned_visits: usize, // count how often this segment is visited for reclaiming (to force reclaim if it is too long)
    pub used: usize,             // count of pages in use (`used <= capacity`)
    pub capacity: usize,         // count of available pages (`#free + used`)
    pub segment_info_size: usize,// space we are using from the first page for segment meta-data and possible guard pages.
    pub cookie: usize,           // verify addresses in secure mode: `_mi_ptr_cookie(segment) == segment->cookie`
    pub abandoned_os_next: *mut MiSegment, // only used for abandoned segments outside arena's, and only if `mi_option_visit_abandoned` is enabled
    pub abandoned_os_prev: *mut MiSegment,
    // layout like this to optimize access in `mi_free`
    pub thread_id: Arc<mi_threadid_t>,      // unique id of the thread owning this segment
    pub page_shift: usize,       // `1 << page_shift` == the page sizes == `page->block_size * page->reserved` (unless the first page, then `-segment_info_size`).
    pub page_kind: MiPageKind,        // kind of pages: small, medium, large, or huge
    pub pages: MiPage,         // up to `MI_SMALL_PAGES_PER_SEGMENT` pages

}

pub struct MiSegmentQueue {
    pub first: *mut MiSegment,
    pub last: *mut MiSegment,
}