use core::sync::atomic::AtomicPtr;

use crate::page::{Page, PageQueue};

#[allow(non_camel_case_types)]
type uptr = usize;

#[repr(C)]
#[cfg(feature = "guarded")]
struct Guarded {
    /// Minimal size for guarded objects.
    size_min: usize,
    /// Maximal size for guarded objects.
    size_max: usize,
    /// Sample rate (set to 0 to disable guarded pages).
    sample_rate: usize,
    /// Starting sample count.
    sample_seed: usize,
    /// Current sample count (counting down to 0).
    sample_count: usize,
}

/// A heap owns a set of pages.
#[repr(C)]
pub struct Heap {
    tld: *mut Tld,
    thread_delayed_free: AtomicPtr<Block>,
    /// Thread id this heap belongs to.
    thread_id: ThreadId,
    /// Arena id if the heap belongs to a specific arena (or 0).
    arena_id: ArenaId,
    /// Random cookie to verify pointers.
    cookie: uptr,
    /// Two random keys used to encode the `thread_delayed_free` list.
    keys: [uptr; 2],
    /// Random number context used for secure allocation.
    random: RandomCtx,
    /// Total number of pages in the `pages` queues.
    page_count: usize,
    /// Smallest retired index (retired pages are fully free, but still in the page queues).
    page_retired_min: usize,
    /// Largest retired index into the `pages` array.
    page_retired_max: usize,
    /// List of heaps per thread.
    next: *mut Heap,
    /// `true` if this heap should not reclaim abandoned pages.
    no_reclaim: bool,
    /// Custom tag, can be used for separating heaps based on the object types.
    tag: u8,
    #[cfg(feature = "guarded")]
    guarded: Guarded,
    /// Optimize: array where every entry points a page with possibly free blocks in the corresponding queue for that size.
    pages_free_direct: [*mut Page; MI_PAGES_DIRECT],
    /// Queue of pages for each size class (or "bin").
    pages: [*mut PageQueue; MI_BIN_FULL + 1],
}
