//! Define the main evaluation stack of the Nickel abstract machine and related operations.
//!
//! See [eval](../eval/index.html).
use super::{
    Closure,
    cache::{Cache, CacheIndex},
    value::NickelValue,
};

use crate::{
    position::PosIdx,
    term::{BinaryOp, BindingType, NAryOp, StrChunk, UnaryOp},
};

use std::{mem, ptr};

/// The different kinds of items that the stack can store. Used in [Stack] as a marker that
/// determines the shape of the data beneath each marker.
#[derive(Clone, Copy, Eq, PartialEq, Debug)]
// CAUTION: Do not change the representation `repr(u8)`
//
// While some of the code is agnostic with respect to the size and representation of the marker,
// some parts (like `Stack::top_marker`) relies on the fact that it is `repr(u8)`. Also, I can't
// see why we would need more than 255 different types of stack items. Any additional metadata for
// an item can and should be encoded in the item itself, not in the marker.
#[repr(u8)]
pub(crate) enum Marker {
    /// A pair of expressions to be checked for equality.
    Eq,
    /// An argument of an application.
    Arg,
    /// A tracked argument. Behaves the same as a standard argument, but is given directly as a
    /// cache index (thunk), such that it can be shared with other part of the program.
    ///
    /// In particular, contract arguments are tracked, in order to report the actual, evaluated
    /// offending term in case of blame.
    TrackedArg,
    /// An update index (update frame), which is a pointer to a mutable memory cell to be updated
    /// (thunk).
    UpdateIndex,
    /// The continuation of a unary primitive operation.
    Op1Cont,
    /// The continuation of a binary primitive operation before the evaluation of the first
    /// argument.
    Op2FirstCont,
    /// The continuation of a binary primitive operation after the evaluation of the first argument.
    Op2SecondCont,
    /// The continuation of a n-ary primitive operation.
    OpNCont,
    /// A string chunk.
    ///
    /// Generated by [crate::term::UnaryOp::ChunksConcat] when evaluating a string with
    /// interpolated expressions. Such strings are represented by a list of chunks to evaluate and
    /// concatenate, a chunk being either an interpolated expression or a string literal.
    ///
    /// The shared environment is stored in the `str_acc` special register of the VM.
    StrChunk,
    /// A string accumulator.
    ///
    /// When evaluating a sequence of chunks, the `str_acc` register of the VM stores the chunks
    /// evaluated and concatenated into one string up to this point. If interpolated strings are
    /// nested, we need to save this accumulator on the stack, as a new string chunk evaluation can
    /// happen in the middle of the outer accumulation.
    StrAcc,
}

impl Marker {
    /// Returns the size in bytes of the stack item corresponding to this marker, that is the
    /// `size_of::<T>()` where `T` is such that `T::marker() == self`.
    fn item_size<C: Cache>(&self) -> usize {
        match self {
            Marker::Eq => mem::size_of::<EqItem>(),
            Marker::Arg => mem::size_of::<ArgItem>(),
            Marker::TrackedArg => mem::size_of::<TrackedArgItem>(),
            Marker::UpdateIndex => mem::size_of::<UpdateIndexItem<C>>(),
            Marker::Op1Cont => mem::size_of::<Op1ContItem>(),
            Marker::Op2FirstCont => mem::size_of::<Op2FirstContItem>(),
            Marker::Op2SecondCont => mem::size_of::<Op2SecondContItem>(),
            Marker::OpNCont => mem::size_of::<OpNContItem>(),
            Marker::StrChunk => mem::size_of::<StrChunkItem>(),
            Marker::StrAcc => mem::size_of::<StrAccItem>(),
        }
    }
}

/// A marker trait for data that can be pushed onto the stack.
trait StackItem {
    fn marker() -> Marker;
}

/// The payload of a [ItemKind::Eq] stack item.
#[derive(Clone, Debug, PartialEq)]
pub(crate) struct EqItem {
    pub(crate) arg1: Closure,
    pub(crate) arg2: Closure,
}

/// The payload of a [ItemKind::Arg] stack item.
#[derive(Clone, Debug, PartialEq)]
pub(crate) struct ArgItem {
    pub(crate) arg: Closure,
    /// The original position of the argument, before it's been evaluated.
    pub(crate) orig_arg_pos: PosIdx,
}

/// The payload of a [ItemKind::TrackedArg] stack item.
#[derive(Clone, Debug, PartialEq)]
pub(crate) struct TrackedArgItem {
    pub(crate) idx: CacheIndex,
    /// The original position of the argument, before it's been evaluated.
    pub(crate) orig_arg_pos: PosIdx,
}

/// The payload of a [ItemKind::UpdateIndex] stack item.
#[derive(Clone, Debug, PartialEq)]
pub(crate) struct UpdateIndexItem<C: Cache>(C::UpdateIndex);

/// Auxiliary data for operator continuation items.
#[derive(Clone, Debug, Copy, PartialEq)]
pub(crate) struct PrimopAppInfo {
    /// The callstack size at the point just before the operator evaluation started. Used to
    /// truncate the callstack after the evaluation is done.
    pub(crate) call_stack_size: u32,
    /// The position of the primitive operation application.
    pub(crate) pos_idx: PosIdx,
}

/// The payload of a [ItemKind::Op1Cont] stack item.
#[derive(Clone, Debug, PartialEq)]
pub(crate) struct Op1ContItem {
    pub(crate) op: UnaryOp,
    pub(crate) app_info: PrimopAppInfo,
    /// The original position of the argument, before it's been evaluated.
    pub(crate) orig_pos_arg: PosIdx,
}

/// The payload of a [ItemKind::Op2FirstCont] stack item.
#[derive(Clone, Debug, PartialEq)]
pub(crate) struct Op2FirstContItem {
    pub(crate) op: BinaryOp,
    pub(crate) app_info: PrimopAppInfo,
    pub(crate) arg2: Closure,
    /// The original position of the first argument, before it's been evaluated.
    pub(crate) orig_pos_arg1: PosIdx,
}

/// The payload of a [ItemKind::Op2SecondCont] stack item.
#[derive(Clone, Debug, PartialEq)]
pub(crate) struct Op2SecondContItem {
    pub(crate) op: BinaryOp,
    pub(crate) app_info: PrimopAppInfo,
    pub(crate) arg1_evaled: Closure,
    /// The original position of the first argument, before it's been evaluated.
    pub(crate) orig_pos_arg1: PosIdx,
    /// The original position of the second argument, before it's been evaluated.
    pub(crate) orig_pos_arg2: PosIdx,
}

/// The payload of a [ItemKind::OpNCont] stack item.
#[derive(Clone, Debug, PartialEq)]
pub(crate) struct OpNContItem {
    pub(crate) op: NAryOp,
    pub(crate) app_info: PrimopAppInfo,
    /// Arguments that have already been evaluated, together with their original position.
    pub(crate) evaluated: Vec<(Closure, PosIdx)>,
    /// The stack (thus reversed) of arguments yet to be evaluated.
    pub(crate) pending: Vec<Closure>,
    /// The original position of the argument being currently evaluated by the VM (note that this
    /// argument is neither in [Self::evaluated] nor in [Self::pending], but only in the VM state).
    pub(crate) current_pos_idx: PosIdx,
}

/// The payload of a [ItemKind::StrChunk] stack item.
#[derive(Clone, Debug, PartialEq)]
pub(crate) struct StrChunkItem {
    pub(crate) chunk: StrChunk<NickelValue>,
}

/// A string accumulator which maintains state while the virtual machine is evaluating a sequence
/// of string chunks to a single string.
#[derive(Default, PartialEq)]
pub(crate) struct StrAccItem {
    /// The current result.
    pub(crate) acc: String,
    /// The common environment of chunks.
    pub(crate) env: super::Environment,
    /// The indentation level of the chunk currently being evaluated.
    pub(crate) curr_indent: u32,
    /// The position of the original (unevaluated) expression of the chunk currently being
    /// evaluated.
    pub(crate) curr_pos: PosIdx,
}

impl StackItem for EqItem {
    fn marker() -> Marker {
        Marker::Eq
    }
}

impl StackItem for ArgItem {
    fn marker() -> Marker {
        Marker::Arg
    }
}

impl StackItem for TrackedArgItem {
    fn marker() -> Marker {
        Marker::TrackedArg
    }
}

impl<C: Cache> StackItem for UpdateIndexItem<C> {
    fn marker() -> Marker {
        Marker::UpdateIndex
    }
}

impl StackItem for Op1ContItem {
    fn marker() -> Marker {
        Marker::Op1Cont
    }
}

impl StackItem for Op2FirstContItem {
    fn marker() -> Marker {
        Marker::Op2FirstCont
    }
}

impl StackItem for Op2SecondContItem {
    fn marker() -> Marker {
        Marker::Op2SecondCont
    }
}

impl StackItem for OpNContItem {
    fn marker() -> Marker {
        Marker::OpNCont
    }
}

impl StackItem for StrChunkItem {
    fn marker() -> Marker {
        Marker::StrChunk
    }
}

impl StackItem for StrAccItem {
    fn marker() -> Marker {
        Marker::StrAcc
    }
}

impl Marker {
    #[cfg(test)]
    pub(crate) fn is_arg(self) -> bool {
        matches!(self, Marker::Arg | Marker::TrackedArg)
    }

    pub(crate) fn is_idx(self) -> bool {
        matches!(self, Marker::UpdateIndex)
    }

    pub(crate) fn is_cont(self) -> bool {
        matches!(
            self,
            Marker::Op1Cont | Marker::Op2FirstCont | Marker::Op2SecondCont | Marker::OpNCont
        )
    }
}

/// A possible continuation of interest when a sealed term is currently being evaluated.
/// See [Stack::peek_sealed_cont].
pub(crate) enum SealedCont {
    /// The next operation is an [Op2SecondContItem] where the operator is
    /// [BinaryOp::Unseal].
    Unseal,
    /// The next operation is an [Op1ContItem] where the operator is
    /// [UnaryOp::Seq].
    Seq,
    /// All other cases.
    Other,
}

/// The Nickel evaluation stack.
///
/// # Design
///
/// Because the Nickel virtual machine isn't yet a full bytecode VM (see RFC007), we currently need
/// to store non-trivial data structures on the stack. Ideally, in a bytecode VM, the kind of
/// things that can be stored on the stack is limited, and if possible they're all one-word long
/// (mostly values). This is much harder to realize in the current tree-walking interpreter.
///
/// We used to store a single type `Marker` which was a big enum of everything that could land in
/// the stack, having the stack itself just being a `Vec<Marker>`. Given how enums are represented
/// in Rust, this consumed an huge amount of space for any stack element (more than 100 bytes). As
/// mentioned above, it's really hard to bring this enum back to a reasonable size; unless we would
/// allocate a lot of things on the heap, but that would have poor performance and somehow defeats
/// the purpose of having a fast-to-access stack.
///
/// Instead, we now implement the stack as an untyped sequence of bytes (`Vec<u8>`). When we push
/// an arbitrary data structure to the stack, we first push the data, and then use the last word to
/// store an [ItemKind] marker. When popping, we look at the first word which lets us know how to
/// interpret the rest of the data at the top of the stack, and how many bytes we need to pop.
///
/// # Safety
///
/// We can entirely ignore alignement constraints, because the stack is designed with the following
/// invariant: no reference to a stack item is ever materialized directly in the backing vector.
/// For the Rust compiler, [Self::data] is just a stream of bytes. Whenever we need to pop, read or
/// write data, we always first re-materialize a stack item on the native stack (that is, in a
/// local variable), which is properly aligned.
pub(crate) struct Stack<C: Cache> {
    data: Vec<u8>,
    phantom: std::marker::PhantomData<C>,
}

impl<C: Cache> Default for Stack<C> {
    fn default() -> Self {
        Stack {
            data: Vec::default(),
            phantom: std::marker::PhantomData,
        }
    }
}

impl<C: Cache> Stack<C> {
    /// The default capacity of the eval stack. Currently one page (4KB).
    pub(crate) const DEFAULT_CAPACITY: usize = 4 * 1024;

    /// Creates a stack with a default capacity of [Self::DEFAULT_CAPACITY]. Use
    /// [Self::with_capacity] for a different value, or [Default::default] for a stack with the
    /// same default capacity as [Vec].
    pub(crate) fn new() -> Self {
        Self::with_capacity(Self::DEFAULT_CAPACITY)
    }

    /// Creates a stack with the specified capacity in bytes. Uses [Vec::with_capacity] to build
    /// the underlying vector.
    pub(crate) fn with_capacity(capacity: usize) -> Self {
        Stack {
            data: Vec::with_capacity(capacity),
            phantom: std::marker::PhantomData,
        }
    }

    /// Pushes an item of the stack. Reserve sufficient space in the backing storage, write `value`
    /// as a sequence of bytes, and finally write the corresponding marker.
    fn push<T: StackItem>(&mut self, item: T) {
        let size_value = mem::size_of::<T>();
        let kind = T::marker();

        let prev_len = self.data.len();
        self.data.reserve(size_value + mem::size_of::<Marker>());

        let storage = self.data.as_mut_ptr();

        unsafe {
            // Safety: we reserved enough space in `data` with the call to `reserve` above so that
            // the backing allocation of data contains `data_slot` and `marker_slot`.
            let data_slot = storage.add(self.data.len());
            let marker_slot = data_slot.add(size_value);

            // Safety:
            // - `data_slot` is valid for write (located in the backing allocation of a mutably
            //    borrowed Vec)
            // - `&value` is valid for read (as a local variable)
            // - `data_slot` and `value` can't overlap, as `Vec` guarantees unique ownership of its
            //   allocation in the heap, while `value` is on the stack
            ptr::copy_nonoverlapping(&item as *const T as *const u8, data_slot, size_value);
            // Safety: `marker_slot` is valid for write (located in the backing allocation of a
            // mutably borrowed Vec)
            ptr::write(marker_slot, kind as u8);

            // Safety: we've reserved enough capacity with `reserve()` above, and we wrote
            // initialized data in the new slots with `ptr::write`, so we can force a new length.
            self.data
                .set_len(prev_len + size_value + mem::size_of::<Marker>());
        }

        // Since we've copied the value onto the eval stack, we will materialize it again at pop
        // time: we mustn't run any clean up code now. In some sense, we've moved the value from
        // the Rust stack into the eval stack, albeit as a bunch of untyped bytes instead of a
        // proper `T` value.
        let _ = mem::ManuallyDrop::new(item);
    }

    /// Tries to pop an element as `T`, or returns `None` if the stack is empty or if the top
    /// marker isn't equal to `T::marker()`.
    fn pop<T: StackItem>(&mut self) -> Option<T> {
        let marker = self.top_marker()?;

        if marker != T::marker() {
            return None;
        }

        // Safety: we've checked that the stack is non-empty and that the top element is a marker
        // which is equal to `T::marker()`
        unsafe { Some(self.pop_unchecked()) }
    }

    /// Peeks the top marker of the stack, or returns `None` if the stack is empty.
    pub(crate) fn top_marker(&self) -> Option<Marker> {
        self.data
            .last()
            .map(|marker_untyped| unsafe { mem::transmute::<u8, Marker>(*marker_untyped) })
    }

    /// Returns an iterator over the markers, starting from the top of the stack and going down.
    fn markers(&self) -> StackMarkerIter<'_, C> {
        StackMarkerIter {
            stack: self,
            cursor: self.data.len(),
        }
    }

    /// Counts the number of consecutive markers satisfying `pred` from the top of the stack.
    #[cfg(test)]
    fn count(&self, mut pred: impl FnMut(Marker) -> bool) -> usize {
        self.markers().take_while(|marker| pred(*marker)).count()
    }

    /// Pops the top item as `T`, without checking that the tag correspond.
    ///
    /// # Safety
    ///
    /// The top element of the stack must of be of type `T`. That is, the stack must be non-empty
    /// and [Self::top_marker] must return `T::marker`.
    unsafe fn pop_unchecked<T: StackItem>(&mut self) -> T {
        // Safety: safety pre-conditions of this function
        let item: T = unsafe { mem::ManuallyDrop::into_inner(self.read_unchecked()) };
        // underflow: read_unchecked computes `marker_offset - item_size` which is `len -
        // marker_size - item_size` with checked operations (and panics upon failure), so we know
        // this computation can't underflow at this point
        self.data
            .truncate(self.data.len() - mem::size_of::<Marker>() - mem::size_of::<T>());

        item
    }

    /// Reads the top item as `T`, without checking that the tag correspond.
    ///
    /// # Safety
    ///
    /// The top element of the stack must of be of type `T`. That is, the stack must be non-empty
    /// and [Self::top_marker] must return `T::marker`.
    ///
    /// - Once the item is read, it is *not* removed from the stack. Thus the result must **not**
    ///   be manually dropped (e.g. [std::mem::ManuallyDrop::drop] or
    ///   [std::mem::ManuallyDrop::into_inner]).
    /// - You must not move out of any data contained in the return value.
    /// - The top value must not be materialized a second time, either using [Self::read_unchecked]
    ///   or any of the `pop` variants, while the return value of this method is still alive. That
    ///   will violate non-aliasing requirements of many heap-allocated data (this is only ok if
    ///   `T` is [Copy]).
    unsafe fn read_unchecked<T: StackItem>(&self) -> mem::ManuallyDrop<T> {
        let item_size = mem::size_of::<T>();
        let data_offset = self
            .data
            .len()
            .checked_sub(mem::size_of::<Marker>())
            .expect("unexpected missing marker on the eval stack")
            .checked_sub(item_size)
            .expect("unexpected missing item data on the eval stack after reading a marker");

        let mut item_slot = mem::MaybeUninit::<T>::uninit();

        unsafe {
            // Safety:
            // - self.data.as_ptr() is valid for read (backing allocation of a Vec)
            // - `item_slot` is valid for write (uninitialized slot on the stack)
            // - they can't alias (stack slot vs heap-allocated, mutably borrowed vector
            //   allocation)
            ptr::copy_nonoverlapping(
                self.data.as_ptr().add(data_offset),
                item_slot.as_mut_ptr() as *mut u8,
                item_size,
            );

            // Safety: we've written back a `T` value in `item_slot`, as it was originally pushed
            // onto the stack, so `item_slot` is initialized and is a valid state for `T`.
            mem::ManuallyDrop::new(item_slot.assume_init())
        }
    }

    /// Pops the top element of the stack and materializes it on the Rust stack to properly drop
    /// it, whatever element it is. Does nothing if the stack is empty.
    fn drop_top(&mut self) {
        let Some(top_marker) = self.top_marker() else {
            return;
        };

        // Safety: in each branch, we materialize an object whose type corresponds to the marker.
        unsafe {
            match top_marker {
                Marker::Eq => {
                    self.pop_unchecked::<EqItem>();
                }
                Marker::Arg => {
                    self.pop_unchecked::<ArgItem>();
                }
                Marker::TrackedArg => {
                    self.pop_unchecked::<TrackedArgItem>();
                }
                Marker::UpdateIndex => {
                    self.pop_unchecked::<UpdateIndexItem<C>>();
                }
                Marker::Op1Cont => {
                    self.pop_unchecked::<Op1ContItem>();
                }
                Marker::Op2FirstCont => {
                    self.pop_unchecked::<Op2FirstContItem>();
                }
                Marker::Op2SecondCont => {
                    self.pop_unchecked::<Op2SecondContItem>();
                }
                Marker::OpNCont => {
                    self.pop_unchecked::<OpNContItem>();
                }
                Marker::StrChunk => {
                    self.pop_unchecked::<StrChunkItem>();
                }
                Marker::StrAcc => {
                    self.pop_unchecked::<StrAccItem>();
                }
            }
        }
    }

    /// Pops (and drops) all items in the stack and resets the state of the [Cache] elements it encounters.
    pub(crate) fn unwind(&mut self, cache: &mut C) {
        while !self.data.is_empty() {
            if let Some(Marker::UpdateIndex) = self.top_marker() {
                // Safety: we checked in the outer if that the top marker is `UpdateIndex`.
                let UpdateIndexItem::<C>(mut uidx) = unsafe { self.pop_unchecked() };
                cache.reset_index_state(&mut uidx);
            } else {
                self.drop_top();
            }
        }
    }

    /// Count the number of arguments at the top of the stack.
    #[cfg(test)]
    pub(crate) fn count_args(&self) -> usize {
        self.count(Marker::is_arg)
    }

    pub(crate) fn push_arg(&mut self, arg: Closure, orig_arg_pos: PosIdx) {
        self.push(ArgItem { arg, orig_arg_pos })
    }

    pub(crate) fn push_tracked_arg(&mut self, idx: CacheIndex, orig_arg_pos: PosIdx) {
        self.push(TrackedArgItem { idx, orig_arg_pos })
    }

    pub(crate) fn push_update_index(&mut self, uidx: C::UpdateIndex) {
        self.push(UpdateIndexItem::<C>(uidx))
    }

    pub(crate) fn push_op1_cont(&mut self, op_cont: Op1ContItem) {
        self.push(op_cont);
    }

    pub(crate) fn push_op2_first_cont(&mut self, op_cont: Op2FirstContItem) {
        self.push(op_cont);
    }

    pub(crate) fn push_op2_second_cont(&mut self, op_cont: Op2SecondContItem) {
        self.push(op_cont);
    }

    pub(crate) fn push_opn_cont(&mut self, op_cont: OpNContItem) {
        self.push(op_cont);
    }

    /// Push a sequence of equalities on the stack.
    pub(crate) fn push_eqs<I>(&mut self, it: I)
    where
        I: Iterator<Item = (Closure, Closure)>,
    {
        self.data
            .reserve(it.size_hint().0 * (mem::size_of::<EqItem>() + 1));

        for (arg1, arg2) in it {
            self.push(EqItem { arg1, arg2 });
        }
    }

    /// Push a sequence of string chunks on the stack.
    pub(crate) fn push_str_chunks<I>(&mut self, it: I)
    where
        I: Iterator<Item = StrChunk<NickelValue>>,
    {
        self.data
            .reserve(it.size_hint().0 * (mem::size_of::<StrChunkItem>() + 1));

        for chunk in it {
            self.push(StrChunkItem { chunk });
        }
    }

    /// Push a string accumulator on the stack.
    pub(crate) fn push_str_acc(&mut self, str_acc: StrAccItem) {
        self.push(str_acc);
    }

    /// Try to pop an argument from the top of the stack. If `None` is returned, the top element
    /// was not an argument and the stack is left unchanged.
    ///
    /// If the argument is tracked, it is automatically converted into an owned closure.
    pub(crate) fn pop_arg(&mut self, cache: &C) -> Option<(Closure, PosIdx)> {
        let marker = self.top_marker()?;

        match marker {
            Marker::Arg => {
                let ArgItem { arg, orig_arg_pos } = unsafe { self.pop_unchecked() };
                Some((arg, orig_arg_pos))
            }
            Marker::TrackedArg => {
                let TrackedArgItem { idx, orig_arg_pos } = unsafe { self.pop_unchecked() };
                Some((cache.get(idx), orig_arg_pos))
            }
            _ => None,
        }
    }

    /// Try to pop an argument from the top of the stack and return it as an index. If `None` is
    /// returned, the top element was not an argument and the stack is left unchanged.
    ///
    /// If the argument is not tracked, it is directly returned.
    pub(crate) fn pop_arg_as_idx(&mut self, cache: &mut C) -> Option<(CacheIndex, PosIdx)> {
        let marker = self.top_marker()?;

        match marker {
            Marker::Arg => {
                let ArgItem { arg, orig_arg_pos } = unsafe { self.pop_unchecked() };
                let idx = cache.add(arg, BindingType::Normal);
                Some((idx, orig_arg_pos))
            }
            Marker::TrackedArg => {
                let TrackedArgItem { idx, orig_arg_pos } = unsafe { self.pop_unchecked() };
                Some((idx, orig_arg_pos))
            }
            _ => None,
        }
    }

    /// Try to pop an index from the top of the stack. If `None` is returned, the top element was
    /// not an index and the stack is left unchanged.
    pub(crate) fn pop_update_index(&mut self) -> Option<C::UpdateIndex> {
        self.pop::<UpdateIndexItem<C>>()
            .map(|UpdateIndexItem(uidx)| uidx)
    }

    /// Try to pop a unary operator continuation from the top of the stack. If `None` is returned, the
    /// top element was not an operator continuation and the stack is left unchanged.
    pub(crate) fn pop_op1_cont(&mut self) -> Option<Op1ContItem> {
        self.pop()
    }

    /// Try to pop a binary operator first continuation from the top of the stack. If `None` is
    /// returned, the top element was not an operator continuation and the stack is left unchanged.
    pub(crate) fn pop_op2_first_cont(&mut self) -> Option<Op2FirstContItem> {
        self.pop()
    }

    /// Try to pop a binary operator second continuation from the top of the stack. If `None` is
    /// returned, the top element was not an operator continuation and the stack is left unchanged.
    pub(crate) fn pop_op2_second_cont(&mut self) -> Option<Op2SecondContItem> {
        self.pop()
    }

    /// Try to pop a n-ary operator continuation from the top of the stack. If `None` is returned,
    /// the top element was not an operator continuation and the stack is left unchanged.
    pub(crate) fn pop_opn_cont(&mut self) -> Option<OpNContItem> {
        self.pop()
    }

    /// Helper for an ad-hoc check that needs to be performed by the evaluation of a sealed term,
    /// to decide what to do next. It used to be implemented using a generic `peek_op_cont` method,
    /// but such a generic method is now impossible to implement with the stack as a byte stream
    /// implementation. Instead, we moved this check inside stack.
    ///
    /// This method look at the first element of the stack and return a corresponding [SealedCont].
    pub(crate) fn peek_sealed_cont(&self) -> SealedCont {
        match self.top_marker() {
            Some(Marker::Op1Cont) => {
                // Safety: we checked that the marker corresponds to what we read
                let item: mem::ManuallyDrop<Op1ContItem> = unsafe { self.read_unchecked() };

                if let UnaryOp::Seq = item.op {
                    SealedCont::Seq
                } else {
                    SealedCont::Other
                }
            }
            Some(Marker::Op2SecondCont) => {
                // Safety: we checked that the marker corresponds to what we read
                let item: mem::ManuallyDrop<Op2SecondContItem> = unsafe { self.read_unchecked() };

                if let BinaryOp::Unseal = item.op {
                    SealedCont::Unseal
                } else {
                    SealedCont::Other
                }
            }
            _ => SealedCont::Other,
        }
    }

    /// Try to pop an equality from the top of the stack. If `None` is returned, the top element
    /// was not an equality and the stack is left unchanged.
    pub(crate) fn pop_eq(&mut self) -> Option<EqItem> {
        self.pop()
    }

    /// Try to pop a string chunk from the top of the stack. If `None` is returned, the top element
    /// was not a string chunk and the stack is left unchanged.
    pub(crate) fn pop_str_chunk(&mut self) -> Option<StrChunk<NickelValue>> {
        self.pop::<StrChunkItem>()
            .map(|StrChunkItem { chunk }| chunk)
    }

    /// Try to pop a string accumulator from the top of the stack. If `None` is returned, the top
    /// element was not a string chunk and the stack is left unchanged.
    pub(crate) fn pop_str_acc(&mut self) -> Option<StrAccItem> {
        self.pop()
    }

    /// Check if the top element is a [CacheIndex].
    pub(crate) fn is_top_idx(&self) -> bool {
        self.top_marker().is_some_and(Marker::is_idx)
    }

    /// Check if the top element is an operation continuation.
    pub(crate) fn is_top_cont(&self) -> bool {
        self.top_marker().is_some_and(Marker::is_cont)
    }

    /// Discard all the consecutive equality from the top of the stack. This drops the continuation
    /// of the equality being currently evaluated.
    pub(crate) fn clear_eqs(&mut self) {
        while self.pop_eq().is_some() {}
    }
}

impl<C: Cache> Drop for Stack<C> {
    fn drop(&mut self) {
        while !self.data.is_empty() {
            self.drop_top();
        }
    }
}

/// An iterator over the markers in the stack.
pub(crate) struct StackMarkerIter<'a, C: Cache> {
    stack: &'a Stack<C>,
    /// The cursor, pointing to one index past the current marker (that is, it's `stack.len()` at
    /// the beginning of iteration and `0` at the end).
    cursor: usize,
}

impl<C: Cache> Iterator for StackMarkerIter<'_, C> {
    type Item = Marker;

    fn next(&mut self) -> Option<Self::Item> {
        if self.cursor == 0 {
            return None;
        }

        // Safety: we maintain as an invariant that `cursor` either is `0`, or `cursor - 1` points
        // to a byte of the stack which is the marker of a stack item. This is true when creating
        // the iterator (cursor should point to one byte past the last element) and maintained
        // throughout iteration.
        //
        // We rely on `Marker` being `repr(u8)`.
        //
        // underflow: we checked that `cursor != 0` above, so `cursor >= 1`.
        let marker = unsafe { mem::transmute::<u8, Marker>(self.stack.data[self.cursor - 1]) };

        self.cursor = self
            .cursor
            .checked_sub(mem::size_of::<Marker>() + marker.item_size::<C>())
            .expect(
                "invalid eval stack layout during marker iteration: underflow while updating the cursor",
            );

        Some(marker)
    }
}

impl<C: Cache> std::fmt::Debug for Stack<C> {
    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
        writeln!(f, "--- STACK ---")?;
        for marker in self.markers() {
            writeln!(f, "| {marker:?}")?;
        }
        writeln!(f, "---  END  ---")
    }
}

#[cfg(test)]
mod tests {
    use super::*;
    use crate::eval::cache::CacheImpl;
    use crate::term::UnaryOp;
    use assert_matches::assert_matches;

    impl Stack<CacheImpl> {
        /// Count the number of indices at the top of the stack.
        pub fn count_thunks(&self) -> usize {
            Stack::count(self, Marker::is_idx)
        }

        /// Count the number of operation continuation at the top of the stack.
        pub fn count_conts(&self) -> usize {
            Stack::count(self, Marker::is_cont)
        }
    }

    fn some_closure() -> Closure {
        NickelValue::bool_true().into()
    }

    fn some_cont_item() -> Op1ContItem {
        Op1ContItem {
            op: UnaryOp::Typeof,
            app_info: PrimopAppInfo {
                call_stack_size: 0,
                pos_idx: PosIdx::NONE,
            },
            orig_pos_arg: PosIdx::NONE,
        }
    }

    #[test]
    fn marker_differentiates() {
        assert!(ArgItem::marker().is_arg());
        assert!(UpdateIndexItem::<CacheImpl>::marker().is_idx());
        assert!(Op1ContItem::marker().is_cont());
    }

    #[test]
    fn pushing_and_popping_args() {
        let mut s = Stack::default();
        assert_eq!(0, s.count_args());

        s.push_arg(some_closure(), PosIdx::NONE);
        s.push_arg(some_closure(), PosIdx::NONE);
        assert_eq!(2, s.count_args());
        assert_eq!(
            some_closure(),
            s.pop_arg(&CacheImpl::new()).expect("Already checked").0
        );
        assert_eq!(1, s.count_args());
    }

    #[test]
    fn pushing_and_popping_thunks() {
        let mut s = Stack::default();
        assert_eq!(0, s.count_thunks());

        let mut eval_cache = CacheImpl::new();

        let mut idx = eval_cache.add(some_closure(), BindingType::Normal);
        s.push_update_index(eval_cache.make_update_index(&mut idx).unwrap());
        idx = eval_cache.add(some_closure(), BindingType::Normal);
        s.push_update_index(eval_cache.make_update_index(&mut idx).unwrap());

        assert_eq!(2, s.count_thunks());
        s.pop_update_index().expect("Already checked");
        assert_eq!(1, s.count_thunks());
    }

    #[test]
    fn thunk_blackhole() {
        let mut eval_cache = CacheImpl::new();
        let mut idx = eval_cache.add(some_closure(), BindingType::Normal);
        let idx_upd = eval_cache.make_update_index(&mut idx);
        assert_matches!(idx_upd, Ok(..));
        assert_matches!(eval_cache.make_update_index(&mut idx), Err(..));
        eval_cache.update(some_closure(), idx_upd.unwrap());
        assert_matches!(eval_cache.make_update_index(&mut idx), Ok(..));
    }

    #[test]
    fn pushing_and_popping_conts() {
        let mut s = Stack::default();
        assert_eq!(0, s.count_conts());

        s.push_op1_cont(Op1ContItem {
            app_info: PrimopAppInfo {
                call_stack_size: 3,
                pos_idx: PosIdx::NONE,
            },
            ..some_cont_item()
        });
        s.push_op1_cont(Op1ContItem {
            app_info: PrimopAppInfo {
                call_stack_size: 4,
                pos_idx: PosIdx::NONE,
            },
            ..some_cont_item()
        });

        assert_eq!(2, s.count_conts());
        assert_eq!(
            Some(Op1ContItem {
                app_info: PrimopAppInfo {
                    call_stack_size: 4,
                    pos_idx: PosIdx::NONE
                },
                ..some_cont_item()
            }),
            s.pop_op1_cont()
        );
        assert_eq!(1, s.count_conts());
    }
}
