use core::sync::atomic::AtomicUsize;

#[allow(non_camel_case_types)]
type u1 = bool;
#[allow(non_camel_case_types)]
type u7 = u8;
type AtomicUptr = AtomicUsize;

/// A page contains blocks of one specific size (`block_size`).
/// Each page has three list of free blocks:
/// `free` for blocks that can be allocated,
/// `local_free` for freed blocks that are not yet available to `mi_malloc`
/// `thread_free` for freed blocks by other threads
/// The `local_free` and `thread_free` lists are migrated to the `free` list
/// when it is exhausted. The separate `local_free` list is necessary to
/// implement a monotonic heartbeat. The `thread_free` list is needed for
/// avoiding atomic operations in the common case.
///
/// `used - |thread_free|` == actual blocks that are in use (alive)
/// `used - |thread_free| + |free| + |local_free| == capacity`
///
/// We don't count `freed` (as |free|) but use `used` to reduce
/// the number of memory accesses in the `mi_page_all_free` function(s).
///
/// Notes:
/// - Access is optimized for `free.c:mi_free` and `alloc.c:mi_page_alloc`
/// - Using `uint16_t` does not seem to slow things down
/// - The size is 10 words on 64-bit which helps the page index calculations
///   (and 12 words on 32-bit, and encoded free lists add 2 words)
/// - `xthread_free` uses the bottom bits as a delayed-free flags to optimize
///   concurrent frees where only the first concurrent free adds to the owning
///   heap `thread_delayed_free` list (see `free.c:mi_free_block_mt`).
///   The invariant is that no-delayed-free is only set if there is
///   at least one block that will be added, or as already been added, to
///   the owning heap `thread_delayed_free` list. This guarantees that pages
///   will be freed correctly even if only other threads free blocks.
#[repr(C, packed)]
pub struct Page {
    // "owned" by the segment
    /// index in the segment `pages` array, `page == &segment->pages[page->segment_idx]`
    segment_idx: u8,
    /// `true` if the segment allocated this page
    segment_in_use: u1,
    /// `true` if the page virtual memory is committed
    is_committed: u1,
    /// `true` if the page was initially zero initialized
    is_zero_init: u1,
    /// `true` if the page is in a huge segment
    is_huge: u1,

    // layout like this to optimize access in `mi_malloc` and `mi_free`
    /// number of blocks committed, must be the first field, see `segment.c:page_clear`
    capacity: u16,
    /// number of blocks reserved in memory
    reserved: u16,
    /// `in_full` and `has_aligned` flags (8 bits)
    flags: PageFlags,
    /// `true` if the blocks in the free list are zero initialized
    free_is_zero: u1,
    /// expiration count for retired blocks
    retire_expire: u7,

    /// list of available free blocks (`malloc` allocates from this list)
    free: *mut Block,
    /// list of deferred free blocks by this thread (migrates to `free`)
    local_free: *mut Block,
    /// number of blocks in use (including blocks in `thread_free`)
    used: u16,
    /// if not zero, then `(1 << block_size_shift) == block_size` (only used for fast path in `free.c:_mi_page_ptr_unalign`)
    block_size_shift: u8,
    /// tag of the owning heap, used to separate heaps by object type
    heap_tag: u8,
    // padding
    /// size available in each block (always `>0`)
    block_size: usize,
    /// start of the page area containing the blocks
    page_start: *mut u8,

    #[cfg(any(feature = "encode-freelist", feature = "padding"))]
    keys: [usize; 2],

    /// list of deferred free blocks freed by other threads
    xthread_free: AtomicPtr<Block>,
    xheap: AtomicUptr,

    /// next page owned by the heap with the same `block_size`
    next: *mut Page,
    /// previous page owned by the heap with the same `block_size`
    prev: *mut Page,

    #[cfg(target_pointer_width = "32")]
    padding: *mut u8,
}

/// Pages of a certain block size are held in a queue.
pub struct PageQueue {
    first: *mut Page,
    last: *mut Page,
    block_size: usize,
}
