// SPDX-License-Identifier: Mulan PSL v2
/*
 * Copyright (c) 2025 Huawei Technologies Co., Ltd.
 * This software is licensed under Mulan PSL v2.
 * You can use this software according to the terms and conditions of the Mulan PSL v2.
 * You may obtain a copy of Mulan PSL v2 at:
 *         http://license.coscl.org.cn/MulanPSL2
 *
 * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
 * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
 * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
 * See the Mulan PSL v2 for more details.
 */

use std::{
    io,
    os::fd::{AsFd, AsRawFd, BorrowedFd},
    ptr,
};

use rustix::{io::Errno, mm};
use tracing::trace;

use super::mem;

pub mod linear {
    use super::*;

    /// A low-level RAII wrapper for a linear memory mapping created with `mmap`.
    ///
    /// This internal struct is responsible for managing the lifecycle of the
    /// virtual memory region and holding the underlying file descriptor.
    #[derive(Debug)]
    pub struct LinearMmap<F: AsFd> {
        /// The underlying file.
        file: F,
        /// Raw pointer to the base of the mapped virtual memory.
        ptr: ptr::NonNull<u8>,
        /// The total length of the entire virtual mapping in bytes.
        len: usize,
    }

    impl<F: AsFd> LinearMmap<F> {
        /// Maps a region of a file into memory.
        pub fn mmap(file: F, len: usize, offset: u64) -> io::Result<Self> {
            if len == 0 {
                return Err(Errno::INVAL.into());
            }

            trace!(
                "[Mmap] Mapping linear region, fd={}, len={}, offset={}...",
                file.as_fd().as_raw_fd(),
                len,
                offset
            );

            // Map the memory
            // SAFETY:
            // 1. We pass `ptr::null_mut()` to let the OS choose the address.
            // 2. `len` is checked to be non-zero above.
            // 3. `file.as_fd()` provides a valid borrowed file descriptor.
            // 4. We check the return value for error (MAP_FAILED) via `Errno`.
            let ptr = unsafe {
                let addr = mm::mmap(
                    ptr::null_mut(),
                    len,
                    mm::ProtFlags::READ | mm::ProtFlags::WRITE,
                    mm::MapFlags::SHARED | mm::MapFlags::POPULATE,
                    file.as_fd(),
                    offset,
                )?;
                ptr::NonNull::new(addr.cast::<u8>()).ok_or(Errno::NOMEM)?
            };

            // Performance Hints
            // SAFETY: `ptr` is a valid pointer returned by `mmap` and `len` matches the mapping size.
            unsafe {
                mm::madvise(ptr.as_ptr().cast(), len, mm::Advice::LinuxDontFork).ok();
            }

            Ok(Self { file, ptr, len })
        }

        /// Returns a reference to the underlying file object.
        #[inline(always)]
        pub fn file(&self) -> &F {
            &self.file
        }

        /// Returns a raw pointer to the base of the mapped virtual memory.
        #[inline(always)]
        pub fn base_ptr(&self) -> *mut u8 {
            self.ptr.as_ptr()
        }

        /// Returns the total length of the virtual address space reservation.
        #[inline(always)]
        pub fn len(&self) -> usize {
            self.len
        }

        /// Returns true if the mapping has a length of 0.
        #[inline(always)]
        pub fn is_empty(&self) -> bool {
            self.len == 0
        }
    }

    // Allow borrowing the underlying FD from the Mmap object.
    impl<F: AsFd> AsFd for LinearMmap<F> {
        #[inline]
        fn as_fd(&self) -> BorrowedFd<'_> {
            self.file.as_fd()
        }
    }

    // SAFETY: Mmap owns the underlying OS resource (virtual memory region + fd).
    unsafe impl<F: AsFd + Send> Send for LinearMmap<F> {}

    // SAFETY: The memory is accessible from multiple threads.
    unsafe impl<F: AsFd + Sync> Sync for LinearMmap<F> {}

    impl<F: AsFd> Drop for LinearMmap<F> {
        fn drop(&mut self) {
            if !self.is_empty() {
                // SAFETY: Unmap memory explicitly.
                // The `file` will be closed automatically when `self.file` is dropped.
                let _ = unsafe { mm::munmap(self.base_ptr().cast(), self.len) };
            }
        }
    }
}

pub use linear::LinearMmap;

pub mod mirrored {
    use super::*;

    /// A low-level RAII wrapper for a mirrored memory mapping created with `mmap`.
    ///
    /// This internal struct is responsible for managing the lifecycle of the
    /// virtual memory region and holding the underlying file descriptor.
    ///
    /// # Memory Layout
    ///
    /// The virtual memory layout consists of three contiguous regions:
    ///
    /// ```text
    /// [ Header (A) |  Data (B)  | Mirror (B') ]
    /// ```
    ///
    /// *   **A**: The header region containing metadata.
    /// *   **B**: The main data region.
    /// *   **B'**: The mirror region, which maps to the same physical memory as **B**.
    #[derive(Debug)]
    pub struct MirroredMmap<F: AsFd> {
        /// The underlying file.
        file: F,
        /// Raw pointer to the base of the mapped virtual memory.
        ptr: ptr::NonNull<u8>,
        /// The total length of the entire virtual mapping in bytes.
        len: usize,
        /// The size of the backed file (header + data).
        file_len: usize,
        /// The length of the header region.
        header_len: usize,
        /// The length of the main data region (also the length of the mirror).
        data_len: usize,
    }

    impl<F: AsFd> MirroredMmap<F> {
        /// Creates a new Mirrored Memory Mapping.
        pub fn mmap(file: F, header_len: usize, data_len: usize) -> io::Result<Self> {
            if !mem::is_page_aligned(header_len) || !mem::is_page_aligned(data_len) {
                return Err(Errno::INVAL.into());
            }

            let file_len = header_len.checked_add(data_len).ok_or(Errno::OVERFLOW)?;
            let len = file_len.checked_add(data_len).ok_or(Errno::OVERFLOW)?;

            trace!(
                "[Mmap] Reserving virtual address, fd={}, len={}...",
                file.as_fd().as_raw_fd(),
                len
            );

            // Reserve Virtual Address Space (Anonymous)
            // We create a hole in the address space where we will later place our fixed mappings.
            // SAFETY:
            // 1. NULL let OS pick address.
            // 2. PRIVATE | NORESERVE ensures we just reserve the VMA range without committing physical memory.
            let ptr = ptr::NonNull::new(unsafe {
                mm::mmap_anonymous(
                    ptr::null_mut(),
                    len,
                    mm::ProtFlags::empty(),
                    mm::MapFlags::PRIVATE | mm::MapFlags::NORESERVE,
                )?
            } as *mut u8)
            .ok_or(Errno::NOMEM)?;

            // Construct the struct immediately.
            let mmap = MirroredMmap {
                file,
                ptr,
                len,
                file_len,
                header_len,
                data_len,
            };

            let base_ptr = mmap.ptr.as_ptr();
            let fd = mmap.file.as_fd();

            // Map Header Region & Data Region (A + B)
            trace!(
                "[Mmap] Mapping header & data region, fd={}, len={}...",
                fd.as_raw_fd(),
                file_len
            );

            // SAFETY:
            // 1. `target` (base_ptr) is guaranteed to be the start of the region we just reserved anonymously.
            // 2. MAP_FIXED atomically replaces the anonymous mapping with the file mapping.
            // 3. `file_len` is < `len`, so we stay within bounds.
            unsafe {
                let target = base_ptr.cast();
                let mapped = mm::mmap(
                    target,
                    file_len,
                    mm::ProtFlags::READ | mm::ProtFlags::WRITE,
                    mm::MapFlags::SHARED | mm::MapFlags::FIXED | mm::MapFlags::POPULATE,
                    fd,
                    0,
                )?;
                if mapped != target {
                    return Err(Errno::ADDRNOTAVAIL.into());
                }
            }

            // Map Mirror Region (B')
            trace!(
                "[Mmap] Mapping mirror region, fd={}, len={}...",
                fd.as_raw_fd(),
                data_len
            );

            // SAFETY:
            // 1. `target` (base_ptr + file_len) is within the bounds of the reserved region (`len`).
            // 2. MAP_FIXED replaces the remaining part of the anonymous reservation.
            // 3. The file offset `header_len` correctly aligns to the data region in the file.
            unsafe {
                let target = base_ptr.add(file_len).cast();
                let mapped = mm::mmap(
                    target,
                    data_len,
                    mm::ProtFlags::READ | mm::ProtFlags::WRITE,
                    mm::MapFlags::SHARED | mm::MapFlags::FIXED | mm::MapFlags::POPULATE,
                    fd,
                    header_len as u64,
                )?;
                if mapped != target {
                    return Err(Errno::ADDRNOTAVAIL.into());
                }
            }

            // Performance Hints
            // SAFETY: All pointers and lengths are within the valid range managed by this struct.
            unsafe {
                mm::madvise(base_ptr.cast(), len, mm::Advice::LinuxDontFork).ok();
                mm::madvise(base_ptr.cast(), header_len, mm::Advice::Random).ok();
                mm::madvise(
                    base_ptr.add(header_len).cast(),
                    data_len * 2,
                    mm::Advice::Sequential,
                )
                .ok();
            }

            Ok(mmap)
        }

        /// Returns a reference to the underlying file object.
        #[inline(always)]
        pub fn file(&self) -> &F {
            &self.file
        }

        /// Returns a raw pointer to the base of the mapped virtual memory.
        #[inline(always)]
        pub fn base_ptr(&self) -> *mut u8 {
            self.ptr.as_ptr()
        }

        /// Returns the total length of the virtual address space reservation.
        #[inline(always)]
        pub fn len(&self) -> usize {
            self.len
        }

        /// Returns true if the mapping has a length of 0.
        #[inline(always)]
        pub fn is_empty(&self) -> bool {
            self.len == 0
        }

        /// Returns the length of the backed file content (A + B).
        #[inline(always)]
        pub fn file_len(&self) -> usize {
            self.file_len
        }

        /// Returns a raw pointer to the start of the header region (A).
        #[inline(always)]
        pub fn header_ptr(&self) -> *mut u8 {
            self.base_ptr()
        }

        /// Returns the length of the header region (A).
        #[inline(always)]
        pub fn header_len(&self) -> usize {
            self.header_len
        }

        /// Returns a raw pointer to the start of the data region (B).
        #[inline(always)]
        pub fn data_ptr(&self) -> *mut u8 {
            // SAFETY: `header_len` is guaranteed to be within the allocation bounds.
            unsafe { self.base_ptr().add(self.header_len) }
        }

        /// Returns the length of the usable data region (B).
        #[inline(always)]
        pub fn data_len(&self) -> usize {
            self.data_len
        }

        /// Returns a raw pointer to the start of the mirror region (B').
        #[inline(always)]
        pub fn mirror_ptr(&self) -> *mut u8 {
            // SAFETY: `file_len` is guaranteed to be within the allocation bounds.
            unsafe { self.base_ptr().add(self.file_len) }
        }
    }

    // Allow borrowing the underlying FD from the MirroredMmap object.
    impl<F: AsFd> AsFd for MirroredMmap<F> {
        #[inline]
        fn as_fd(&self) -> BorrowedFd<'_> {
            self.file.as_fd()
        }
    }

    // SAFETY: Mmap owns the underlying OS resource (virtual memory region + fd).
    unsafe impl<F: AsFd + Send> Send for MirroredMmap<F> {}

    // SAFETY: The memory is accessible from multiple threads.
    unsafe impl<F: AsFd + Sync> Sync for MirroredMmap<F> {}

    impl<F: AsFd> Drop for MirroredMmap<F> {
        fn drop(&mut self) {
            if !self.is_empty() {
                // SAFETY: Unmap memory.
                // The `file` will be closed automatically when `self.file` is dropped.
                let _ = unsafe { mm::munmap(self.base_ptr().cast(), self.len) };
            }
        }
    }
}

pub use mirrored::MirroredMmap;

#[cfg(test)]
mod tests {
    use std::os::fd::OwnedFd;

    use rustix::fs;

    use super::*;

    fn create_memfd(name: &str, size: usize) -> OwnedFd {
        let fd = fs::memfd_create(name, fs::MemfdFlags::empty()).expect("memfd_create failed");
        fs::ftruncate(&fd, size as u64).expect("ftruncate failed");
        fd
    }

    #[test]
    fn test_linear_mmap_basic() {
        let page_size = mem::page_size();
        let size = page_size * 2;
        let fd = create_memfd("test_linear", size);

        // Test creation
        let mmap = LinearMmap::mmap(fd, size, 0).expect("mmap failed");

        assert!(!mmap.is_empty());
        assert_eq!(mmap.len(), size);

        // Test Write/Read
        let ptr = mmap.base_ptr();
        unsafe {
            // Write to the beginning
            ptr.write(0xAA);
            // Write to the end
            ptr.add(size - 1).write(0xBB);

            assert_eq!(ptr.read(), 0xAA);
            assert_eq!(ptr.add(size - 1).read(), 0xBB);
        }
    }

    #[test]
    fn test_linear_mmap_invalid_len() {
        let fd = create_memfd("test_invalid", 4096);
        let err = LinearMmap::mmap(fd, 0, 0).unwrap_err();
        assert_eq!(err.kind(), io::ErrorKind::InvalidInput);
    }

    #[test]
    fn test_mirrored_mmap_magic_property() {
        let page_size = mem::page_size();
        let header_len = page_size; // 1 Page Header
        let data_len = page_size * 2; // 2 Pages Data
        let total_file_len = header_len + data_len;

        let fd = create_memfd("test_mirrored", total_file_len);
        let mmap = MirroredMmap::mmap(fd, header_len, data_len).expect("mirrored mmap failed");

        // Basic checks
        assert_eq!(mmap.header_len(), header_len);
        assert_eq!(mmap.data_len(), data_len);
        assert_eq!(mmap.file_len(), total_file_len);

        unsafe {
            let data_ptr = mmap.data_ptr();
            let mirror_ptr = mmap.mirror_ptr();

            // Write to the REAL data region
            // Write at offset 0 of data region
            data_ptr.write(0x11);
            // Write at the last byte of data region
            data_ptr.add(data_len - 1).write(0x22);

            // Read from the MIRROR region
            // The mirror starts immediately after the real data.
            // data_ptr[0] should be visible at mirror_ptr[0]
            assert_eq!(mirror_ptr.read(), 0x11, "Mirror should reflect data write");

            // data_ptr[last] should be visible at mirror_ptr[last]
            assert_eq!(
                mirror_ptr.add(data_len - 1).read(),
                0x22,
                "Mirror end should reflect data end"
            );

            // Write to the MIRROR region
            mirror_ptr.add(5).write(0xCC);

            // Read from REAL data region
            assert_eq!(
                data_ptr.add(5).read(),
                0xCC,
                "Data should reflect mirror write"
            );

            // Contiguous Access Test (The Magic Ring Buffer purpose)
            // If we are at the end of the real buffer, we should be able to read
            // logically contiguous data by overflowing into the mirror.
            // Let's verify pointers are contiguous in virtual memory.
            assert_eq!(
                data_ptr.add(data_len),
                mirror_ptr,
                "Mirror must immediately follow Data in virtual memory"
            );
        }
    }

    #[test]
    fn test_mirrored_mmap_alignment_check() {
        let page_size = mem::page_size();

        let base_fd = create_memfd("test_align", page_size * 4);
        let clone_fd = || base_fd.as_fd().try_clone_to_owned().unwrap();

        // Header not aligned
        let res = MirroredMmap::mmap(clone_fd(), page_size - 1, page_size);
        assert!(res.is_err());
        assert_eq!(res.unwrap_err().kind(), io::ErrorKind::InvalidInput);

        // Data not aligned
        let res = MirroredMmap::mmap(clone_fd(), page_size, page_size + 1);
        assert!(res.is_err());
    }
}
