//! ## Map Backed by POSIX Shared Memory Object
//!
//! This module defines a map backed by a POSIX shared memory object, which allows single
//! writer and multiple readers to access the map concurrently without any atomic
//! operations.
//!
//! ### Structure in memory
//!
//! The memory layout of the memory region is painstakingly designed and placed at the
//! beginning of the shared memory object.
//!
//! ```txt
//! +-------------------+--> offset_base = BASE
//! |      Metadata     |
//! +-------------------+--> offset_slots = (offset_base + whole sizeof Metadata)
//! |       Slots       |                      .align_to(alignof Slots)
//! +-------------------+--> offset_entries = (offset_slots + nslots * sizeof Slot)
//! |            next   |
//! | Entry1 -----------+
//! |          key-vals |
//! +-------------------+--> ...
//! |            next   |
//! | Entry2 -----------+
//! |          key-vals |
//! +-------------------+--> ...
//! |       ....        |
//! +-------------------+--> top
//! |                   |
//! |     ( empty )     |
//! |                   |
//! +-------------------+--> limit (local/global)
//! ```
//!
//! where the metadata contains the magic number, top, global limit, and the number of
//! slots of the map.
//! - The magic number can be specified via [`Config`], and is used to validate the map.
//! - The top and global limit are used to manage the memory usage of the map. Readers
//!   should memorize the global limit (as local limit) when opening the map, and check
//!   if the map needs enlargement when accessing the map.
//! - In the slots, each slot contains the offset of the first entry in the slot.
//!
//! ### Configuration
//!
//! The configuration of the shared memory map is defined by the [`Config`]. It contains
//! the magic number, the prefix of the shared memory object, the data type of the map,
//! and the hasher type.
//! - The magic number is used to validate the map;
//! - The prefix is used to generate the name of the POSIX shared memory object;
//! - The data type of the map is used to define the key-value of the map entry;
//! - The hasher type is used to hash the key.
//!
//! ### Reader-Writer Model
//!
//! The shared memory map supports two states: [`Owned`] and [`Borrowed`], corresponding
//! to the owner (writer) and the borrower (reader). The owner can insert new entries
//! into the map, while the borrower can only lookup entries in the map.
//!
//! Insertion can result in the enlargement of the map (increasement of global limit),
//! and the reader can detect it by checking whether its local limit is less than the
//! global limit.
//!
//! The map is designed to be append-only, thus entry deletion is not supported.
//!
//! ### Insertion, Lookup And Modification
//!
//! The shared memory map provides the owner with the ability to insert new entries or
//! modify existing entries in the map, using the following functions:
//! - [`SharedMemMap::<Owned>::insert`]: insert a new entry into the map. The function
//!   might return an [`Error::MemNotEnough`] if there is not enough space for the new
//!   entry, and the owner should enlarge the map manually.
//! - [`SharedMemMap::<Owned>::lookup_mut`]: lookup an entry in the map with a mutable
//!   reference.
//!
//! And both the owner and the borrower can lookup entries in the map, using the
//! following functions:
//! - [`SharedMemMap::<Owned>::lookup`]: lookup an entry in the map, with no (local)
//!   limit.
//! - [`SharedMemMap::<Borrowed>::lookup`]: lookup an entry in the map, with a local
//!   limit. The function might return an [`Error::EnlargementNeeded`] if the entry being
//!   looked up is not within the limit.
//!
//! To use [`SharedMemMap::<Owned>::lookup_mut`] to modify an entry, the value type must
//! implement the [`MutableVal`] trait.
//!
//! ### Iteration
//!
//! The shared memory map provides [`SharedMemMap::slots`] to iterate over the slots in
//! the map.
//!
//! Note that caller should enlarge the map manually (enlargement is not automatic) if
//! needed.

use std::{
    array, ffi,
    hash::{Hash, Hasher},
    marker::PhantomData,
    mem, ptr, slice,
};

use crate::{
    error::{Error, Result},
    utils::{offset::Offset, retlibc},
};

mod private {
    use super::*;

    pub trait Sealed {}
    impl Sealed for Owned {}
    impl Sealed for Borrowed {}
}

/// Configuration for the shared memory map.
pub trait Config<const N: usize>: 'static {
    /// The magic number of the shared memory map.
    const MAGIC: u32;

    /// The prefix of the shared memory object, used to generate the name of the POSIX
    /// shared memory object.
    const PREFIX: &str;

    /// The key type of the shared memory map.
    type Key: Key;

    /// The value type of the shared memory map.
    type Val<'a>: Val<'a, N>;

    /// The hasher type of the shared memory map.
    type Hasher: Default + Hasher;

    /// Hash the key.
    ///
    /// The default implementation is provided.
    fn hash(key: &<Self::Key as Key>::Ref<'_>) -> u64 {
        let mut hasher = Self::Hasher::default();
        key.hash(&mut hasher);
        hasher.finish()
    }
}

/// The state of the shared memory object.
///
/// Note that this trait is sealed and cannot be implemented outside of this module.
pub trait State: private::Sealed {
    /// Return the limit of the shared memory object.
    ///
    /// - `Some(usize)`: the limit of the shared memory object.
    /// - `None`: the shared memory object is owned.
    fn local_limit(&self) -> Option<usize>;
}

/// The shared memory object is owned.
pub struct Owned;

impl State for Owned {
    fn local_limit(&self) -> Option<usize> {
        None
    }
}

/// The shared memory object is borrowed.
pub struct Borrowed(usize);

impl State for Borrowed {
    fn local_limit(&self) -> Option<usize> {
        Some(self.0)
    }
}

/// A shared memory map backed by a POSIX shared memory object.
pub struct SharedMemMap<const N: usize, C: Config<N>, S: State> {
    inner: &'static mut SharedMemMapInner,
    shm_name: ffi::CString,
    state: S,
    config: PhantomData<C>,
}

impl<const N: usize, C> SharedMemMap<N, C, Owned>
where
    C: Config<N>,
{
    /// Create a new [`SharedMemMap`] with the given name, number of slots, number of
    /// pages.
    ///
    /// A handle to the [`SharedMemMap`] is returned on success, dropping which will
    /// unlink the shared memory object and unmap it from the address space.
    pub fn new(name: &str, nslots: usize, npages: usize) -> Result<Self> {
        // Evaluate the limit.
        let required = SharedMemMapInner::evaluate_memory(nslots);
        let limit = npages * 4096;
        if required > limit {
            return Err(Error::MemNotEnough {
                required,
                free: limit,
            });
        }

        // Evaluate the name.
        let shm_name = SharedMemMapInner::evaluate_shm_name::<N, C>(name)?;

        // Create a new shared memory map.
        let map = SharedMemMapInner::new(&shm_name, limit)?;

        // Initialize the map.
        map.magic = C::MAGIC;
        map.nslots = nslots;
        map.top = required;
        map.global_limit = limit;
        map.slots_mut().fill(Offset::null());

        Ok(Self {
            inner: map,
            shm_name,
            state: Owned,
            config: PhantomData,
        })
    }

    /// Insert a new entry into the map.
    pub fn insert<'a>(
        &'a mut self,
        key: &<C::Key as Key>::Ref<'a>,
        val: &C::Val<'a>,
    ) -> Result<()> {
        let base = self.inner.base();

        // Hash the key to get the index.
        let index = C::hash(key) as usize % self.inner.nslots;

        let offset = self.inner.slots()[index];
        let mut prev = if offset.is_null() {
            // Create a new entry and initialize it.
            let offset = self.new_entry(key, val)?;

            // The slot is empty, so insert the offset directly.
            self.inner.slots_mut()[index] = offset;
            return Ok(());
        } else {
            // The slot is not empty, so get the first entry in the slot.
            //
            // Safety: the base address and offset are guaranteed to be valid.
            unsafe {
                Entry::<N, C::Key, C::Val<'a>>::from_raw_mut(self.inner.base().cast_mut(), offset)
            }
        };

        // Traverse the linked list to find the entry, and insert the offset.
        while &prev.key.as_ref() != key {
            if prev.next.is_null() {
                // Create a new entry and initialize it.
                prev.next = self.new_entry(key, val)?;
                break;
            }

            // Safety: the base address is guaranteed to be valid.
            let next =
                unsafe { &mut *(base.byte_add(prev.next.0) as *mut Entry<N, C::Key, C::Val<'a>>) };

            prev = next;
        }

        Ok(())
    }

    /// Lookup an entry in the map with no (local) limit.
    pub fn lookup<'a>(&'a self, key: &<C::Key as Key>::Ref<'a>) -> Option<C::Val<'a>> {
        // Hash the key to get the index.
        let index = C::hash(key) as usize % self.inner.nslots;

        // Lookup the entry in the map.
        let mut entry = self.entry(index)?;

        // Traverse the linked list to find the entry.
        while &entry.key.as_ref() != key {
            // Safety: the base address is guaranteed to be valid.
            entry = unsafe { entry.next_entry(self.inner.base()) }?;
        }

        Some(entry.value())
    }

    /// Enlarge the map with a given number of pages.
    pub fn enlarge(&mut self, npages: usize) -> Result<()> {
        // Safety: the old map is not accessed after this function.
        self.inner = unsafe { SharedMemMapInner::enlarge(&self.shm_name, self.inner, npages) }?;

        Ok(())
    }
}

impl<const N: usize, C> SharedMemMap<N, C, Owned>
where
    C: Config<N>,
    for<'a> C::Val<'a>: MutableVal<'a, N>,
{
    /// Lookup an entry in the map with a mutable reference.
    pub fn lookup_mut<'a>(
        &'a mut self,
        key: &<C::Key as Key>::Ref<'a>,
    ) -> Option<<C::Val<'a> as MutableVal<'a, N>>::Mut> {
        let base = self.inner.base();

        // Hash the key to get the index.
        let index = C::hash(key) as usize % self.inner.nslots;

        // Lookup the entry in the map.
        let mut entry = self.entry(index)?;

        // Traverse the linked list to find the entry.
        while &entry.key.as_ref() != key {
            // Safety: the base address is guaranteed to be valid.
            entry = unsafe { entry.next_entry(base) }?;
        }

        // Safety: the base address and offset are valid.
        let entry = unsafe {
            Entry::<N, C::Key, C::Val<'a>>::from_raw_mut(base.cast_mut(), entry.offset(base))
        };

        Some(entry.value_mut())
    }
}

impl<const N: usize, C> SharedMemMap<N, C, Borrowed>
where
    C: Config<N>,
{
    /// Open an existing [`SharedMemMap`] with the given name.
    ///
    /// A handle to the [`SharedMemMap`] is returned on success, dropping which will
    /// unmap it from the address space.
    pub fn open(name: &str) -> Result<Self> {
        // Evaluate the name.
        let shm_name = SharedMemMapInner::evaluate_shm_name::<N, C>(name)?;

        // Open an existing shared memory map.
        let (map, local_limit) = SharedMemMapInner::open(&shm_name)?;

        let map = Self {
            inner: map,
            shm_name,
            state: Borrowed(local_limit),
            config: PhantomData,
        };

        // Validate the map.
        map.validate()?;

        Ok(map)
    }

    /// Lookup an entry in the map with a local limit.
    pub fn lookup<'a>(&'a self, key: &<C::Key as Key>::Ref<'a>) -> Result<Option<C::Val<'a>>> {
        let base = self.inner.base();

        // As the shared memory object is borrowed, the limit is local.
        let limit = self.state.0;

        // Hash the key to get the index.
        let index = C::hash(key) as usize % self.inner.nslots;

        // Lookup the entry in the map.
        //
        // Note that the offset from the slot is not guaranteed to be within the limit,
        // but `Entry::within` will not access the memory before ensuring the entry is
        // valid (i.e., within the limit).
        let mut entry = match self.entry(index) {
            Some(entry) => entry,
            None => return Ok(None),
        };

        // Check if the entry is within the limit.
        if !entry.within(base, limit) {
            return Err(Error::EnlargementNeeded);
        }

        // Traverse the linked list to find the entry, during which the limit is checked.
        while &entry.key.as_ref() != key {
            if !entry.next.within(limit) {
                return Err(Error::EnlargementNeeded);
            }

            // Safety: the base address is guaranteed to be valid.
            entry = match unsafe { entry.next_entry(base) } {
                Some(entry) => entry,
                None => return Ok(None),
            };

            if !entry.within(base, limit) {
                return Err(Error::EnlargementNeeded);
            }
        }

        Ok(Some(entry.value()))
    }

    /// Enlarge the map if needed.
    pub fn enlarge(&mut self) -> Result<()> {
        if self.need_enlargement() {
            // Safety:
            // 1. the map and size are guaranteed to be valid by design.
            // 2. the old map is not accessed after this function.
            unsafe {
                (self.inner, self.state.0) =
                    SharedMemMapInner::reopen(&self.shm_name, self.inner, self.state.0)?;
            }
        }
        Ok(())
    }

    /// Get the local limit of the map.
    #[inline]
    pub fn local_limit(&self) -> usize {
        self.state.0
    }

    /// Check if the map needs enlargement.
    #[inline]
    pub fn need_enlargement(&self) -> bool {
        self.local_limit() < self.inner.global_limit()
    }
}

impl<const N: usize, C, S> SharedMemMap<N, C, S>
where
    C: Config<N>,
    S: State,
{
    /// Get an iterator over the slots in the map.
    #[inline]
    pub fn slots(
        &self,
    ) -> impl Iterator<Item = impl Iterator<Item = (<C::Key as Key>::Ref<'_>, C::Val<'_>)>> {
        self.inner.slots().iter().flat_map(|&offset| {
            if offset.is_null() {
                None
            } else {
                Some(SlotIter {
                    map: self.inner,
                    limit: self.limit(),
                    next: offset,
                    marker: PhantomData::<(C::Key, C::Val<'_>)>,
                })
            }
        })
    }

    /// Get the usage of the map.
    #[inline]
    pub fn usage(&self) -> usize {
        self.inner.top()
    }

    /// Get the capacity of the map.
    #[inline]
    pub fn capacity(&self) -> usize {
        self.inner.global_limit()
    }
}

impl<const N: usize, C, S> SharedMemMap<N, C, S>
where
    C: Config<N>,
    S: State,
{
    /// Get the limit of the map.
    #[inline]
    fn limit(&self) -> usize {
        self.state
            .local_limit()
            .unwrap_or(self.inner.global_limit())
    }

    /// Validate the map.
    #[inline]
    fn validate(&self) -> Result<()> {
        if self.inner.magic == C::MAGIC {
            Ok(())
        } else {
            Err(Error::InvalidMagic {
                actual: self.inner.magic,
                expected: C::MAGIC,
            })
        }
    }

    /// Create a new entry in the map.
    fn new_entry<'a>(
        &mut self,
        key: &<C::Key as Key>::Ref<'a>,
        val: &C::Val<'a>,
    ) -> Result<Offset> {
        // Check if there is enough space for the new entry.
        let required = Entry::<N, C::Key, C::Val<'a>>::whole_size_with_val(val);
        let free = self.inner.global_limit() - self.inner.top();
        if required > free {
            return Err(Error::MemNotEnough { required, free });
        }

        // Allocate the new entry.
        //
        // Safety: the top is guaranteed to be valid.
        let entry = unsafe {
            &mut *(self.inner.base().byte_add(self.inner.top())
                as *mut Entry<N, C::Key, C::Val<'a>>)
        };
        self.inner.set_top(
            (self.inner.top() + required)
                .next_multiple_of(mem::align_of::<Entry<N, C::Key, C::Val<'a>>>()),
        );

        // Initialize the entry.
        entry.initialize(key, val);

        Ok(entry.offset(self.inner.base()))
    }

    /// Get the entry at the given index.
    #[inline]
    fn entry<'a>(&self, index: usize) -> Option<&Entry<'a, N, C::Key, C::Val<'a>>> {
        let offset = self.inner.slots().get(index).copied()?;

        if offset.is_null() {
            None
        } else {
            // Safety: the base and the slots are guaranteed to be valid.
            Some(unsafe { Entry::from_raw(self.inner.base(), offset) })
        }
    }
}

impl<const N: usize, C, S> Drop for SharedMemMap<N, C, S>
where
    C: Config<N>,
    S: State,
{
    fn drop(&mut self) {
        let limit = match self.state.local_limit() {
            Some(limit) => limit,
            None => {
                // Safety: the name is valid as it is created by `evaluate_shm_name`.
                unsafe { libc::shm_unlink(self.shm_name.as_ptr()) };

                // NOTE: The content in shared memory object can still be accessed, until
                // the memory region is unmapped.
                //
                // See https://man7.org/linux/man-pages/man3/shm_unlink.3p.html
                self.inner.global_limit()
            }
        };

        // Safety: the base address and the limit are guaranteed to be valid by design.
        unsafe { libc::munmap(self.inner.base().cast_mut().cast(), limit) };
    }
}

/// An iterator over the entries in the slot.
struct SlotIter<'a, const N: usize, K: Key, V: Val<'a, N>> {
    map: &'a SharedMemMapInner,
    limit: usize,
    next: Offset,
    marker: PhantomData<(K, V)>,
}

impl<'a, const N: usize, K, V> Iterator for SlotIter<'a, N, K, V>
where
    K: 'a + Key,
    V: Val<'a, N>,
{
    type Item = (K::Ref<'a>, V);

    fn next(&mut self) -> Option<Self::Item> {
        if self.next.is_null() {
            return None;
        }

        // Safety: the base address and the limit are guaranteed to be valid.
        let entry = unsafe { Entry::<N, K, V>::from_raw(self.map.base(), self.next) };
        if entry.within(self.map.base(), self.limit) {
            self.next = entry.next;
            Some((entry.key(), entry.value()))
        } else {
            None
        }
    }
}

/// Inner structure of [`SharedMemMap`].
#[repr(C)]
struct SharedMemMapInner {
    magic: u32,
    top: usize,
    global_limit: usize,
    nslots: usize,
}

impl SharedMemMapInner {
    /// Create a new [`SharedMemMapInner`] and map it into the address space.
    fn new(shm_name: &ffi::CStr, size: usize) -> Result<&'static mut Self> {
        // Create a new shared memory object.
        //
        // Safety: The name is a valid C string and the flags are valid.
        let fd = unsafe {
            retlibc::check!(libc::shm_open(
                shm_name.as_ptr(),
                libc::O_RDWR | libc::O_CREAT,
                0o600,
            ))
        };

        // Resize the shared memory object.
        //
        // Safety:
        // 1. the file descriptor and size are valid.
        // 2. the allocated resources are reclaimed on failure.
        unsafe {
            retlibc::check!(libc::ftruncate(fd, size as _); else {
                libc::close(fd);
                libc::shm_unlink(shm_name.as_ptr());
            });
        }

        // Map the shared memory object into the address space of the calling process.
        //
        // Safety:
        // 1. the file descriptor and size are valid.
        // 2. the flags are valid.
        // 3. the allocated resources are reclaimed on failure.
        let shm = unsafe {
            retlibc::check!(
                libc::mmap(
                    ptr::null_mut(),
                    size,
                    libc::PROT_READ | libc::PROT_WRITE,
                    libc::MAP_SHARED,
                    fd,
                    0,
                );
                else {
                    libc::close(fd);
                    libc::shm_unlink(shm_name.as_ptr());
                }
            )
        };

        // Close the file descriptor.
        //
        // Safety:
        // 1. the file descriptor is valid.
        // 2. the allocated resources are reclaimed on failure.
        unsafe {
            retlibc::check!(libc::close(fd); else {
                libc::munmap(shm, size);
                libc::shm_unlink(shm_name.as_ptr());
            });
        }

        // Safety: the shared memory object is valid.
        let map = unsafe { &mut *(shm as *mut Self) };

        Ok(map)
    }

    /// Enlarge an existing [`SharedMemMapInner`] with a given number of pages and map
    /// it into the address space.
    ///
    /// ## Safety
    ///
    /// The caller must ensure that the pointer to the old table is valid.
    unsafe fn enlarge(
        shm_name: &ffi::CStr,
        old_map: *mut Self,
        npages: usize,
    ) -> Result<&'static mut Self> {
        // Get the size of the shared memory object.
        //
        // Safety: the old table is guaranteed to be valid by the caller.
        let old_size = unsafe { (*old_map).global_limit() };

        let new_size = old_size + npages * 4096;

        // Open an existing shared memory object.
        //
        // Safety: The name is a valid C string and the flags are valid.
        let fd = unsafe { retlibc::check!(libc::shm_open(shm_name.as_ptr(), libc::O_RDWR, 0o600)) };

        // Resize the shared memory object.
        //
        // Safety:
        // 1. the file descriptor and size are valid.
        // 2. the allocated resources are reclaimed on failure.
        unsafe {
            retlibc::check!(libc::ftruncate(fd, new_size as _); else {
                libc::close(fd);
            });
        }

        // Remap the shared memory object into the address space of the calling process.
        //
        // Safety:
        // 1. the old map, size, and flags are valid.
        // 2. the allocated resources are reclaimed on failure.
        // 3. the shared memory object size is recovered on failure.
        let shm = unsafe {
            retlibc::check!(
                libc::mremap(
                    old_map as *mut _ as _,
                    old_size,
                    new_size,
                    libc::MREMAP_MAYMOVE,
                );
                else {
                    libc::ftruncate(fd, old_size as _);
                    libc::close(fd);
                }
            )
        };

        // Close the file descriptor.
        //
        // Safety:
        // 1. the file descriptor is valid.
        // 2. the allocated resources are reclaimed on failure.
        unsafe {
            retlibc::check!(libc::close(fd); else {
                libc::munmap(shm, new_size);
                libc::shm_unlink(shm_name.as_ptr());
            });
        }

        // Safety: the shared memory object is valid.
        let new_map = unsafe { &mut *(shm as *mut Self) };

        // Update the global limit.
        new_map.set_global_limit(new_size);

        Ok(new_map)
    }

    /// Open an existing [`SharedMemMapInner`] and map it into the address space. The
    /// returned pair contains a mutable reference to the map and the local limit.
    fn open(shm_name: &ffi::CStr) -> Result<(&'static mut Self, usize)> {
        // Create a new shared memory object.
        //
        // Safety: The name is a valid C string and the flags are valid.
        let fd =
            unsafe { retlibc::check!(libc::shm_open(shm_name.as_ptr(), libc::O_RDONLY, 0o600)) };

        // Get the size of the shared memory object.
        //
        // Safety:
        // 1. the file descriptor is valid.
        // 2. the allocated resources are reclaimed on failure.
        let size = unsafe {
            let mut stat = mem::zeroed();
            retlibc::check!(libc::fstat(fd, &mut stat); else {
                libc::close(fd);
            });
            stat.st_size as usize
        };

        // Map the shared memory object into the address space of the calling process.
        //
        // Safety:
        // 1. the file descriptor and size are valid.
        // 2. the flags are valid.
        // 3. the allocated resources are reclaimed on failure.
        let shm = unsafe {
            retlibc::check!(
                libc::mmap(
                    ptr::null_mut(),
                    size,
                    libc::PROT_READ,
                    libc::MAP_SHARED,
                    fd,
                    0,
                );
                else {
                    libc::close(fd);
                }
            )
        };

        // Close the file descriptor.
        //
        // Safety:
        // 1. the file descriptor is valid.
        // 2. the allocated resources are reclaimed on failure.
        unsafe {
            retlibc::check!(libc::close(fd); else {
                libc::munmap(shm, size);
            });
        }

        // Safety: the shared memory object is valid.
        let map = unsafe { &mut *(shm as *mut Self) };

        Ok((map, size))
    }

    /// Reopen an existing [`SharedMemMapInner`] and map it into the address space. The
    /// returned pair contains a mutable reference to the map and the new size.
    ///
    /// ## Safety
    ///
    /// 1. The caller must ensure that the pointer to the old map and the old size are
    ///    valid.
    /// 2. The caller must ensure that the old map is not accessed after this function.
    unsafe fn reopen(
        shm_name: &ffi::CStr,
        old_map: *mut Self,
        old_size: usize,
    ) -> Result<(&'static mut Self, usize)> {
        // Open an existing shared memory object.
        //
        // Safety: The name is a valid C string and the flags are valid.
        let fd = unsafe { retlibc::check!(libc::shm_open(shm_name.as_ptr(), libc::O_RDWR, 0o600)) };

        // Get the size of the shared memory object.
        //
        // Safety:
        // 1. the file descriptor is valid.
        // 2. the allocated resources are reclaimed on failure.
        let new_size = unsafe {
            let mut stat = mem::zeroed();
            retlibc::check!(libc::fstat(fd, &mut stat); else {
                libc::close(fd);
            });
            stat.st_size as usize
        };

        // Remap the shared memory object into the address space of the calling process.
        //
        // Safety:
        // 1. the old map, size, and flags are valid.
        // 2. the allocated resources are reclaimed on failure.
        let shm = unsafe {
            retlibc::check!(
                libc::mremap(
                    old_map as *mut _ as _,
                    old_size,
                    new_size,
                    libc::MREMAP_MAYMOVE,
                );
                else {
                    libc::close(fd);
                }
            )
        };

        // Close the file descriptor.
        //
        // Safety:
        // 1. the file descriptor is valid.
        // 2. the allocated resources are reclaimed on failure.
        unsafe {
            retlibc::check!(libc::close(fd); else {
                libc::munmap(shm, new_size);
            });
        }

        // Safety: the shared memory object is valid.
        let new_map = unsafe { &mut *(shm as *mut Self) };

        Ok((new_map, new_size))
    }
}

impl SharedMemMapInner {
    /// Get the base address of the map.
    #[inline]
    fn base(&self) -> *const u8 {
        self as *const _ as _
    }

    /// Get the slots of the map.
    #[inline]
    fn slots(&self) -> &[Offset] {
        // Safety: the slots are guaranteed to be valid.
        unsafe { slice::from_raw_parts(self.slots_ptr(), self.nslots) }
    }

    /// Get the mutable slots of the map.
    #[inline]
    fn slots_mut(&mut self) -> &mut [Offset] {
        // Safety: the slots are guaranteed to be valid.
        unsafe { slice::from_raw_parts_mut(self.slots_ptr() as *mut _, self.nslots) }
    }

    /// Get the pointer to the slots of the map.
    #[inline]
    fn slots_ptr(&self) -> *const Offset {
        (self.base() as usize + mem::size_of::<Self>()).next_multiple_of(mem::align_of::<Offset>())
            as *const Offset
    }

    /// Get the global limit of the map via volatile read.
    #[inline]
    fn global_limit(&self) -> usize {
        // Safety: the global limit is guaranteed to be valid.
        unsafe { ptr::read_volatile(&self.global_limit) }
    }

    /// Set the global limit of the map via volatile write.
    #[inline]
    fn set_global_limit(&mut self, limit: usize) {
        // Safety: the global limit is guaranteed to be valid.
        unsafe { ptr::write_volatile(&mut self.global_limit, limit) }
    }

    /// Get the top of the map via volatile read.
    #[inline]
    fn top(&self) -> usize {
        // Safety: the top is guaranteed to be valid.
        unsafe { ptr::read_volatile(&self.top) }
    }

    /// Set the top of the map via volatile write.
    #[inline]
    fn set_top(&mut self, top: usize) {
        // Safety: the top is guaranteed to be valid.
        unsafe { ptr::write_volatile(&mut self.top, top) }
    }
}

impl SharedMemMapInner {
    /// Evaluate the required memory size for the shared memory object.
    fn evaluate_memory(nslots: usize) -> usize {
        mem::size_of::<Self>().next_multiple_of(mem::align_of::<Offset>())
            + nslots * mem::size_of::<Offset>()
    }

    /// Evaluate the name of the shared memory object and return a C string on success.
    fn evaluate_shm_name<const N: usize, C: Config<N>>(name: &str) -> Result<ffi::CString> {
        let name = format!("/{}{}", C::PREFIX, name);
        ffi::CString::new(name.clone()).map_err(|_| Error::InvalidName { name })
    }
}

/// The key of a shared memory map entry.
pub trait Key: Clone + Hash + PartialEq + Eq {
    /// The reference type of the key.
    type Ref<'a>: 'a + Clone + Hash + PartialEq + Eq;

    /// Get the reference to the key.
    fn as_ref(&self) -> Self::Ref<'_>;

    /// Clone the key from the reference.
    fn from_ref(key: &Self::Ref<'_>) -> Self;
}

/// The value of a shared memory map entry.
///
/// A value is composed of metadata and content.
pub trait Val<'a, const N: usize>: 'a {
    /// The metadata type of the value.
    type Metadata: Clone;

    /// The content type of the value.
    type Content;

    /// Get the metadata of the value.
    fn metadata(&self) -> &Self::Metadata;

    /// Get the content of the value.
    fn content(&self) -> [&[Self::Content]; N];

    /// Parse the metadata and content to create a new value.
    fn parse(metadata: &'a Self::Metadata, content: &[&'a [Self::Content]; N]) -> Self;
}

/// The mutable value of a shared memory map entry.
///
/// This trait can be optional implemented for the value type, when we need to call
/// [`SharedMemMap::<Owned>::lookup_mut`].
pub trait MutableVal<'a, const N: usize>: Val<'a, N> {
    /// The mutable type of the value, e.g. `&mut [u8]` for `&[u8]`.
    type Mut;

    /// Parse the metadata and mutable content to create a new mutable value.
    fn parse_mut(
        metadata: &'a mut Self::Metadata,
        content: [&'a mut [Self::Content]; N],
    ) -> Self::Mut;
}

/// The entry in the shared memory map.
#[repr(C)]
struct Entry<'a, const N: usize, K: Key, V: Val<'a, N>> {
    next: Offset,
    key: K,
    meta: V::Metadata,
    vsize: [usize; N],
}

impl<'a, const N: usize, K, V> Entry<'a, N, K, V>
where
    K: Key,
    V: Val<'a, N>,
{
    /// Get the key from the entry.
    fn key(&self) -> K::Ref<'_> {
        self.key.as_ref()
    }

    /// Get the values from the entry.
    fn value(&'a self) -> V {
        let mut offset = self.content_offset();
        let mut values: [&[V::Content]; N] = [&[]; N];
        for (i, &size) in self.vsize.iter().enumerate() {
            // Safety: the value is guaranteed to be valid.
            values[i] = unsafe { slice::from_raw_parts(offset as _, size) };
            offset = (offset + size * mem::size_of::<V::Content>())
                .next_multiple_of(mem::align_of::<V::Content>());
        }
        V::parse(&self.meta, &values)
    }

    /// Get the offset of the content from the base address.
    #[inline]
    fn content_offset(&self) -> usize {
        (self.base() as usize + mem::size_of::<Self>())
            .next_multiple_of(mem::align_of::<V::Content>())
    }

    /// Get the size of the entry with the given key-value.
    #[inline]
    fn whole_size_with_val(val: &V) -> usize {
        mem::size_of::<Self>().next_multiple_of(mem::align_of::<V::Content>())
            + val
                .content()
                .iter()
                .map(|v| mem::size_of_val(*v))
                .sum::<usize>()
    }
}

impl<'a, const N: usize, K, V> Entry<'a, N, K, V>
where
    K: Key,
    V: MutableVal<'a, N>,
{
    /// Get the mutable values from the entry.
    fn value_mut(&'a mut self) -> V::Mut {
        let mut offset = self.content_offset();
        let mut values = array::from_fn::<&mut [V::Content], N, _>(|_| &mut []);
        for (i, &size) in self.vsize.iter().enumerate() {
            // Safety: the value is guaranteed to be valid.
            values[i] = unsafe { slice::from_raw_parts_mut(offset as _, size) };
            offset = (offset + size * mem::size_of::<V::Content>())
                .next_multiple_of(mem::align_of::<V::Content>());
        }
        V::parse_mut(&mut self.meta, values)
    }
}

impl<'a, const N: usize, K, V> Entry<'a, N, K, V>
where
    K: Key,
    V: Val<'a, N>,
{
    /// Initialize the entry with the given key-value.
    fn initialize(&mut self, key: &K::Ref<'_>, val: &V) {
        self.next = Offset::null();
        self.key = K::from_ref(key);
        self.meta = val.metadata().clone();

        let mut offset = self.content_offset();
        for (i, value) in val.content().iter().enumerate() {
            self.vsize[i] = value.len();
            // Safety: the value is guaranteed to be valid.
            unsafe { ptr::copy_nonoverlapping(value.as_ptr(), offset as _, self.vsize[i]) };
            offset = (offset + self.vsize[i] * mem::size_of::<V::Content>())
                .next_multiple_of(mem::align_of::<V::Content>());
        }
    }
}

impl<'a, const N: usize, K, V> Entry<'a, N, K, V>
where
    K: Key,
    V: Val<'a, N>,
{
    /// Check if the entry is within the given limit.
    ///
    /// This function does not access any field of the entry before ensuring that the
    /// content offset is within the limit. So even if `self` is not valid, this function
    /// will not cause any undefined behavior.
    #[inline]
    fn within(&self, base: *const u8, limit: usize) -> bool {
        let content_offset = self.content_offset();
        let bound = base as usize + limit;
        content_offset <= bound
            && content_offset
                + self
                    .vsize
                    .iter()
                    .map(|v| v * mem::size_of::<V::Content>())
                    .sum::<usize>()
                <= bound
    }

    /// Get the base address of the entry.
    #[inline]
    fn base(&self) -> *const u8 {
        self as *const _ as _
    }

    /// Get the offset from the base address.
    #[inline]
    fn offset(&self, base: *const u8) -> Offset {
        Offset::new(self as *const _ as usize - base as usize)
    }

    /// Get the next entry based on the base address.
    ///
    /// ## Safety
    ///
    /// The caller must ensure that the `base` is valid.
    #[inline]
    unsafe fn next_entry(&self, base: *const u8) -> Option<&Self> {
        if self.next.is_null() {
            None
        } else {
            // Safety: the base address and the offset are guaranteed to be valid.
            Some(unsafe { &*(base.byte_add(self.next.0) as *const Self) })
        }
    }

    /// Resolving a raw address to a reference to the entry.
    ///
    /// ## Safety
    ///
    /// The caller must ensure that the `base` and `offset` are valid.
    /// 1. The base address is the start address of the shared memory object.
    /// 2. The offset is the offset from the base address to the entry, so it should not
    ///    be null and within the shared memory object limit.
    #[inline]
    unsafe fn from_raw(base: *const u8, offset: Offset) -> &'a Self {
        // Safety: the base address and the offset are guaranteed to be valid.
        unsafe { &*(base.byte_add(offset.0) as *const Self) }
    }

    /// Resolving a raw address to a mutable reference to the entry.
    ///
    /// ## Safety
    ///
    /// The caller must ensure that the `base` and `offset` are valid.
    /// 1. The base address is the start address of the shared memory object.
    /// 2. The offset is the offset from the base address to the entry, so it should not
    ///    be null and within the shared memory object limit.
    #[inline]
    unsafe fn from_raw_mut(base: *mut u8, offset: Offset) -> &'a mut Self {
        // Safety: the base address and the offset are guaranteed to be valid.
        unsafe { &mut *(base.byte_add(offset.0) as *mut Self) }
    }
}

#[cfg(test)]
mod test {
    use std::hash::DefaultHasher;

    use coverage_helper::test;

    use super::*;

    #[cfg_attr(all(coverage_nightly, test), coverage(off))]
    impl Key for usize {
        type Ref<'a> = usize;

        fn as_ref(&self) -> Self::Ref<'_> {
            *self
        }

        fn from_ref(key: &Self::Ref<'_>) -> Self {
            *key
        }
    }

    #[cfg_attr(all(coverage_nightly, test), coverage(off))]
    impl<'a> Val<'a, 1> for &'a [u8] {
        type Metadata = ();

        type Content = u8;

        fn metadata(&self) -> &Self::Metadata {
            &()
        }

        fn content(&self) -> [&[Self::Content]; 1] {
            [*self]
        }

        fn parse(_: &'a Self::Metadata, [slice]: &[&'a [Self::Content]; 1]) -> Self {
            slice
        }
    }

    #[cfg_attr(all(coverage_nightly, test), coverage(off))]
    impl<'a> MutableVal<'a, 1> for &'a [u8] {
        type Mut = &'a mut [u8];

        fn parse_mut(
            _: &'a mut Self::Metadata,
            [slice]: [&'a mut [Self::Content]; 1],
        ) -> Self::Mut {
            slice
        }
    }

    struct TestConfig;
    impl Config<1> for TestConfig {
        const PREFIX: &'static str = "cs2s-map-unittests";

        const MAGIC: u32 = 0x12345678;

        type Key = usize;

        type Val<'a> = &'a [u8];

        type Hasher = DefaultHasher;
    }

    const DATA_PAIRS: &[(usize, &[u8])] = &[
        (0, &[1, 2, 3]),
        (1, &[4, 5, 6]),
        (2, &[7, 8, 9]),
        (3, &[10, 11]),
        (4, &[12, 13, 14, 15]),
    ];

    #[derive(Debug, Clone, Copy, PartialEq, Eq)]
    struct TestLargeStruct<'a>(usize, &'a [[u32; 64]], &'a [[u32; 64]]);

    #[cfg_attr(all(coverage_nightly, test), coverage(off))]
    impl<'a> Val<'a, 2> for TestLargeStruct<'a> {
        type Metadata = usize;

        type Content = [u32; 64];

        fn metadata(&self) -> &Self::Metadata {
            &self.0
        }

        fn content(&self) -> [&[Self::Content]; 2] {
            [self.1, self.2]
        }

        fn parse(
            metadata: &'a Self::Metadata,
            [slice1, slice2]: &[&'a [Self::Content]; 2],
        ) -> Self {
            TestLargeStruct(*metadata, slice1, slice2)
        }
    }

    struct TestLargeConfig;
    impl Config<2> for TestLargeConfig {
        const PREFIX: &'static str = "cs2s-map-unittests-large";

        const MAGIC: u32 = 0x87654321;

        type Key = usize;

        type Val<'a> = TestLargeStruct<'a>;

        type Hasher = DefaultHasher;
    }

    const LARGE_DATA_PAIRS: &[(usize, TestLargeStruct)] = &[
        (
            0,
            TestLargeStruct(
                0xff,
                &[[0x55; 64], [0xff; 64], [0xcc; 64]],
                &[[0x77; 64], [0x88; 64], [0x99; 64]],
            ),
        ),
        (
            1,
            TestLargeStruct(
                0x11,
                &[[0x22; 64], [0x33; 64], [0x44; 64]],
                &[[0x55; 64], [0x66; 64]],
            ),
        ),
        (
            2,
            TestLargeStruct(
                0x22,
                &[[0x33; 64], [0x44; 64], [0x55; 64]],
                &[[0x66; 64], [0x77; 64], [0x88; 64]],
            ),
        ),
    ];

    /// Convenient macro for looking up an entry in the map.
    macro_rules! owner_lookup {
        ($handle:expr, $pair:expr) => {
            $handle.lookup(&$pair.0)
        };
    }

    /// Convenient macro for looking up an entry in the map from a borrowed map.
    macro_rules! borrowed_lookup {
        ($handle:expr, $pair:expr) => {
            $handle.lookup(&$pair.0)
        };
    }

    /// Convenient macro for asserting the lookup result.
    macro_rules! owner_lookup_assert {
        ($handle:expr, [$(
            $pair:expr
        ),+ $(,)?]) => {
            $(
                assert_eq!(owner_lookup!($handle, $pair), Some($pair.1));
            )+
        };
    }

    /// Convenient macro for asserting the lookup result from a borrowed map.
    macro_rules! borrowed_lookup_assert {
        ($handle:expr, [$(
            $pair:expr
        ),+ $(,)?]) => {
            $(
                assert_eq!(borrowed_lookup!($handle, $pair).unwrap(), Some($pair.1));
            )+
        };
    }

    /// Convenient macro for inserting an entry into the map.
    macro_rules! owner_insert_assert {
        ($handle:expr, [$(
            $pair:expr
        ),+ $(,)?]) => {
            $(
                assert!($handle.insert(&$pair.0, &$pair.1).is_ok());
                owner_lookup_assert!($handle, [$pair]);
            )+
        };
    }

    #[test]
    fn basic_ops() {
        const NAME: &str = "test-basic-ops";

        // Create a new map.
        let mut map = SharedMemMap::<1, TestConfig, Owned>::new(NAME, 2, 1).unwrap();

        // Should not find any entry in the map.
        assert!(owner_lookup!(map, DATA_PAIRS[0]).is_none());

        // Insert three entries into the map and check if they are found.
        owner_insert_assert!(map, [&DATA_PAIRS[0], &DATA_PAIRS[1], &DATA_PAIRS[2]]);

        // Confirm that all the inserted entries are found.
        owner_lookup_assert!(map, [&DATA_PAIRS[0], DATA_PAIRS[1], &DATA_PAIRS[2],]);

        // Insert a more entry into the map to test collision handling.
        owner_insert_assert!(map, [&DATA_PAIRS[3]]);

        // Check if the entries can still be found.
        owner_lookup_assert!(
            map,
            [DATA_PAIRS[0], DATA_PAIRS[1], DATA_PAIRS[2], DATA_PAIRS[3]]
        );
    }

    #[test]
    fn large_value() {
        const NAME: &str = "test-large-value";

        // Create a new map.
        let mut map = SharedMemMap::<2, TestLargeConfig, Owned>::new(NAME, 2, 16).unwrap();

        // Insert a large entry into the map.
        owner_insert_assert!(
            map,
            [
                &LARGE_DATA_PAIRS[0],
                &LARGE_DATA_PAIRS[1],
                &LARGE_DATA_PAIRS[2],
            ]
        );

        // Check if the large entry can be found.
        owner_lookup_assert!(
            map,
            [
                LARGE_DATA_PAIRS[0],
                LARGE_DATA_PAIRS[1],
                LARGE_DATA_PAIRS[2],
            ]
        );
    }

    #[test]
    fn collision() {
        const NAME: &str = "test-collision";

        // Create a new map.
        let mut map = SharedMemMap::<1, TestConfig, Owned>::new(NAME, 1, 1).unwrap();

        for (i, pair) in DATA_PAIRS.iter().enumerate() {
            // Insert a new entry into the map.
            owner_insert_assert!(map, [pair]);

            // Check if the inserted entry can be found.
            for pair in DATA_PAIRS.iter().take(i + 1) {
                owner_lookup_assert!(map, [pair]);
            }

            // Check if the other entries cannot be found.
            for pair in DATA_PAIRS.iter().skip(i + 1) {
                assert!(owner_lookup!(map, pair).is_none());
            }
        }
    }

    #[test]
    fn sharing() {
        const NAME: &str = "test-sharing";

        // Create a new map.
        let mut map = SharedMemMap::<1, TestConfig, Owned>::new(NAME, 1, 2).unwrap();

        // Open the map as borrowed.
        let ro_map = SharedMemMap::<1, TestConfig, Borrowed>::open(NAME).unwrap();

        // Insert some entries into the map.
        owner_insert_assert!(map, [&DATA_PAIRS[0], &DATA_PAIRS[1]]);

        // Check if the entries can be found from the borrowed map.
        borrowed_lookup_assert!(ro_map, [&DATA_PAIRS[0], &DATA_PAIRS[1]]);
    }

    #[test]
    fn space_exhaustion() {
        const NAME: &str = "test-space-exhaustion";

        // Create a new map.
        let mut map = SharedMemMap::<1, TestConfig, Owned>::new(NAME, 256, 1).unwrap();

        // Insert entries into the map until the space is exhausted.
        for i in 0.. {
            let pair = (i, [0; 64].as_ref());
            if let Err(Error::MemNotEnough { required, free }) = map.insert(&pair.0, &pair.1) {
                assert_eq!(map.capacity() - map.usage(), free);
                assert!(required > free);
                break;
            } else {
                owner_lookup_assert!(map, [pair]);
            }
        }
    }

    #[test]
    fn space_enlargement() {
        const NAME: &str = "test-space-enlargement";

        // Create a new map.
        let mut map = SharedMemMap::<1, TestConfig, Owned>::new(NAME, 256, 1).unwrap();

        // Open the map as borrowed.
        let mut ro_map = SharedMemMap::<1, TestConfig, Borrowed>::open(NAME).unwrap();

        let mut enlargement_count = 0;

        for i in 0.. {
            let pair = (i, [0; 64].as_ref());
            if let Err(Error::MemNotEnough { .. }) = map.insert(&pair.0, &pair.1) {
                enlargement_count += 1;
                map.enlarge(1).unwrap();
                owner_insert_assert!(map, [&pair]);
                assert!(ro_map.need_enlargement());
                assert_eq!(
                    borrowed_lookup!(ro_map, pair),
                    Err(Error::EnlargementNeeded)
                );
            }
            assert!(ro_map.usage() <= ro_map.capacity());
            ro_map.enlarge().unwrap();
            borrowed_lookup_assert!(ro_map, [pair]);

            if enlargement_count >= 100 {
                break;
            }
        }
    }

    #[test]
    fn entry_boundary() {
        const NAME: &str = "test-entry-boundary";
        const STRUCT: TestLargeStruct<'_> = TestLargeStruct(0xff, &[[0x55; 64], [0xcc; 64]], &[]);

        // Create a new map.
        let mut map = SharedMemMap::<2, TestLargeConfig, Owned>::new(NAME, 1, 1).unwrap();

        // Open the map as borrowed.
        let mut ro_map = SharedMemMap::<2, TestLargeConfig, Borrowed>::open(NAME).unwrap();

        // Manually set the top of the map to make it greater than the global limit.
        map.inner.set_top(map.inner.global_limit() + 8);

        // Enlarge the map and insert an entry.
        map.enlarge(1).unwrap();
        owner_insert_assert!(map, [(0, STRUCT)]);

        // The borrowed map should need enlargement.
        assert!(ro_map.need_enlargement());

        // The entry should not be found from the borrowed map due to the boundary.
        assert!(matches!(ro_map.lookup(&0), Err(Error::EnlargementNeeded)));

        // Enlarge the borrowed map and check if the entry can be found.
        ro_map.enlarge().unwrap();
        let val = ro_map.lookup(&0).unwrap().unwrap();
        assert_eq!(val, STRUCT);

        // Manually set the top of the map to the boundary of the entry.
        map.inner.set_top(
            map.inner.global_limit() - mem::size_of::<Entry<'_, 2, usize, TestLargeStruct>>() - 8,
        );

        // Enlarge the map and insert an entry.
        map.enlarge(1).unwrap();
        owner_insert_assert!(map, [(1, STRUCT)]);

        // The borrowed map should need enlargement.
        assert!(ro_map.need_enlargement());

        // The entry should not be found from the borrowed map due to the boundary.
        assert!(matches!(ro_map.lookup(&1), Err(Error::EnlargementNeeded)));

        // Enlarge the borrowed map and check if the entry can be found.
        ro_map.enlarge().unwrap();
        let val = ro_map.lookup(&1).unwrap().unwrap();
        assert_eq!(val, STRUCT);
    }

    #[test]
    fn insert_existing_data() {
        const NAME: &str = "test-insert-existing-data";

        // Create a new map.
        let mut map = SharedMemMap::<1, TestConfig, Owned>::new(NAME, 1, 1).unwrap();

        // Insert an entry into the map.
        let usage = map.usage();
        owner_insert_assert!(map, [&DATA_PAIRS[0]]);
        assert!(map.usage() > usage);

        // Insert the same entry again.
        let usage = map.usage();
        owner_insert_assert!(map, [&DATA_PAIRS[0]]);
        assert_eq!(map.usage(), usage);
    }

    #[test]
    fn update_existing_data() {
        const NAME: &str = "test-update-existing-data";

        // Create a new map.
        let mut map = SharedMemMap::<1, TestConfig, Owned>::new(NAME, 1, 1).unwrap();

        // Check that we cannot get the mutable entry of the non-existing key.
        assert!(map.lookup_mut(&DATA_PAIRS[0].0).is_none());

        // Insert an entry into the map.
        owner_insert_assert!(
            map,
            [&DATA_PAIRS[0], &DATA_PAIRS[1], &(0xff, [0, 1, 2].as_ref())]
        );

        // Check that we cannot get the mutable entry of the non-existing key, even when
        // collision happens.
        assert!(map.lookup_mut(&DATA_PAIRS[2].0).is_none());

        // Get the mutable entry and update the content.
        let content = map.lookup_mut(&0xff).unwrap();

        assert!(content.len() == 3);
        assert_ne!(content[0], 0xff);
        content[0] = 0xff;

        // Check if the content is updated.
        assert_eq!(map.lookup(&0xff), Some([0xff, 1, 2].as_ref()),);
    }

    #[test]
    fn iterator() {
        const NAME: &str = "test-iterator";

        // Create a new map.
        let mut map = SharedMemMap::<1, TestConfig, Owned>::new(NAME, 2, 16).unwrap();

        // Open the map as borrowed.
        let ro_map = SharedMemMap::<1, TestConfig, Borrowed>::open(NAME).unwrap();

        // Insert some entries into the map.
        for i in 0..128 {
            let pair = (i, [0; 64].as_ref());
            owner_insert_assert!(map, [&pair]);
        }

        // Iterate over the entries and check if they are found.
        for i in 0..128 {
            let pair = (i, [0; 64].as_ref());
            borrowed_lookup_assert!(ro_map, [&pair]);
            assert!(ro_map.slots().flatten().any(|entry| entry == pair));
        }
    }

    #[test]
    fn iterator_out_of_limit() {
        const NAME: &str = "test-iterator-out-of-limit";

        // Create a new map.
        let mut map = SharedMemMap::<1, TestConfig, Owned>::new(NAME, 2, 1).unwrap();

        // Open the map as borrowed.
        let mut ro_map = SharedMemMap::<1, TestConfig, Borrowed>::open(NAME).unwrap();

        let mut enlargement_count = 0;

        // Insert entries into the map until the space is exhausted.
        for i in 0.. {
            let pair = (i, [0; 64].as_ref());
            if let Err(Error::MemNotEnough { .. }) = map.insert(&pair.0, &pair.1) {
                enlargement_count += 1;

                // Enlarge the map and insert the entry again.
                map.enlarge(1).unwrap();
                owner_insert_assert!(map, [&pair]);

                // Iterate over the entries from the borrowed map and check if the
                // inserted entry cannot be found.
                assert!(!ro_map.slots().flatten().any(|entry| entry == pair));

                // Enlarge the borrowed map and check if the inserted entry can be found.
                assert!(ro_map.need_enlargement());
                ro_map.enlarge().unwrap();
                assert!(ro_map.slots().flatten().any(|entry| entry == pair));
            } else {
                // Iterate over the entries from the borrowed map and check if the
                // inserted entry can be found.
                assert!(ro_map.slots().flatten().any(|entry| entry == pair));
            }

            if enlargement_count >= 100 {
                break;
            }
        }
    }

    #[test]
    fn size_too_small() {
        assert!(matches!(
            SharedMemMap::<1, TestConfig, Owned>::new("test-size-too-small", 1, 0),
            Err(Error::MemNotEnough { .. }),
        ));

        assert!(matches!(
            SharedMemMap::<1, TestConfig, Owned>::new("test-size-too-small", 512, 1),
            Err(Error::MemNotEnough { .. }),
        ));

        assert!(matches!(
            SharedMemMap::<1, TestConfig, Owned>::new("test-size-too-small", 1024, 2),
            Err(Error::MemNotEnough { .. }),
        ));
    }

    #[test]
    fn invalid_name() {
        assert!(matches!(
            SharedMemMap::<1, TestConfig, Owned>::new("test-invalid\0-name", 1, 1),
            Err(Error::InvalidName { .. }),
        ));

        assert!(matches!(
            SharedMemMap::<1, TestConfig, Borrowed>::open("test-invalid\0-name"),
            Err(Error::InvalidName { .. }),
        ));

        assert!(matches!(
            SharedMemMap::<1, TestConfig, Owned>::new("test/invalid-name", 1, 1),
            Err(Error::LibcError {
                errno: libc::EINVAL,
            }),
        ));

        assert!(matches!(
            SharedMemMap::<1, TestConfig, Borrowed>::open("test/invalid-name"),
            Err(Error::LibcError {
                errno: libc::EINVAL,
            }),
        ));
    }

    #[test]
    fn nonexistent_name() {
        assert!(matches!(
            SharedMemMap::<1, TestConfig, Borrowed>::open("test-nonexistent-name"),
            Err(Error::LibcError {
                errno: libc::ENOENT
            }),
        ));
    }

    #[test]
    fn config() {
        const NAME: &str = "test-config";

        // Create a new map.
        let map = SharedMemMap::<1, TestConfig, Owned>::new(NAME, 1, 1).unwrap();

        // Open the map.
        let ro_map = SharedMemMap::<1, TestConfig, Borrowed>::open(NAME).unwrap();

        ro_map.validate().unwrap();

        map.inner.magic = !TestConfig::MAGIC;
        assert!(matches!(
            ro_map.validate(),
            Err(Error::InvalidMagic {
                actual,
                expected: TestConfig::MAGIC,
            }) if actual == !TestConfig::MAGIC,
        ));

        std::hint::black_box(map);
    }
}
