// SPDX-License-Identifier: Mulan PSL v2
/*
 * Copyright (c) 2025 Huawei Technologies Co., Ltd.
 * This software is licensed under Mulan PSL v2.
 * You can use this software according to the terms and conditions of the Mulan PSL v2.
 * You may obtain a copy of Mulan PSL v2 at:
 *         http://license.coscl.org.cn/MulanPSL2
 *
 * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
 * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
 * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
 * See the Mulan PSL v2 for more details.
 */

use std::{
    fmt::{Debug, Display},
    hint, mem,
    ops::{Deref, DerefMut},
    ptr, slice,
    sync::{
        atomic::{fence, AtomicU64, AtomicUsize, Ordering},
        Arc,
    },
    time::{Duration, Instant},
};

use linux_futex::{Futex, Shared, TimedWaitError, WaitError};
use tracing::trace;

use crate::sys::cache::CacheAligned;

use super::{Error, Shmem};

/// A magic number used to identify a shared memory segment as a valid ShmemChannel.
const SHM_CHANNEL_MAGIC: u64 = 0x53484D454343484E; // 'SHMEMCHN' in ASCII.

/// The number of spin-loop iterations before blocking on a futex.
const SPIN_WAIT_ITERATIONS: u32 = 819200;

/// The number of spin iterations between timeout and status checks.
const SPIN_CHECK_ITERATIONS: u32 = 81920;

/// Futex state: the associated party is idle.
const FUTEX_IDLE: u32 = 0;

/// Futex state: the associated party is about to block and wait.
const FUTEX_WAITING: u32 = 1;

/// The header structure of the shared memory channel.
#[repr(C)]
struct ChannelHeader {
    /// Magic number to verify the buffer is initialized and valid.
    magic: AtomicU64,
    /// Cursor for the reader side.
    head: CacheAligned<AtomicUsize>,
    /// Cursor for the writer side.
    tail: CacheAligned<AtomicUsize>,
    /// Futex for the reader side.
    reader: CacheAligned<Futex<Shared>>,
    /// Futex for the writer side.
    writer: CacheAligned<Futex<Shared>>,
}

/// A high-performance, single-producer, single-consumer (SPSC) queue over shared memory.
///
/// This implementation relies on the underlying `Shmem` to use a mirrored memory mapping
/// technique. This allows the ring buffer to be treated as a contiguous block of memory,
/// eliminating the need for software to handle wrap-around logic.
#[derive(Debug)]
pub struct Channel {
    shmem: Shmem,
    header: *mut ChannelHeader,
    buffer: *mut u8,
    mask: usize, // Pre-calculated mask (capacity - 1) for fast bitwise modulo.
}

impl Channel {
    fn initialize(shmem: Shmem) -> Result<Self, Error> {
        let mmap = shmem.mmap();
        let header_ptr = mmap.header_ptr().cast::<ChannelHeader>();
        let buffer_ptr = mmap.data_ptr();

        let header_len = mem::size_of::<ChannelHeader>();
        let buffer_len = mmap.header_len();
        let capacity = mmap.data_len();

        if buffer_len < header_len {
            return Err(Error::InvalidShmemSize {
                required: header_len,
                capacity: buffer_len,
            });
        }

        // Enforce power-of-two capacity for bitwise masking optimization.
        if !capacity.is_power_of_two() {
            return Err(Error::InvalidCapacity { capacity });
        }

        // Use a file lock for safe, atomic initialization.
        {
            let _guard = shmem.flock().map_err(Error::LockFailed)?;

            // SAFETY: The `Shmem` object ensures the pointer is valid and
            // the file lock prevents data races during initialization.
            let header = unsafe { &*header_ptr };

            let magic_value = header.magic.load(Ordering::Acquire);
            if magic_value == 0 {
                // Owner process: initialize the header.
                trace!("[Shmem] Channel '{}': Initializing...", shmem);
                // SAFETY: The `header_ptr` is valid, and the file lock guarantees exclusive access.
                unsafe {
                    ptr::write(
                        header_ptr,
                        ChannelHeader {
                            magic: AtomicU64::new(0),
                            head: CacheAligned::new(AtomicUsize::new(0)),
                            tail: CacheAligned::new(AtomicUsize::new(0)),
                            reader: CacheAligned::new(Futex::new(FUTEX_IDLE)),
                            writer: CacheAligned::new(Futex::new(FUTEX_IDLE)),
                        },
                    );
                }
                header.magic.store(SHM_CHANNEL_MAGIC, Ordering::Release);
                trace!("[Shmem] Channel '{}': Initialized.", shmem);
            } else {
                trace!("[Shmem] Channel '{}': Connecting...", shmem);
                if magic_value != SHM_CHANNEL_MAGIC {
                    return Err(Error::InvalidMagic);
                }

                // Recovery:
                // Force reset both futexes to IDLE (0) and wake potential waiters.
                //
                // 1. If the peer crashed leaving a '1' (WAITING), the reset fixes the deadlock.
                // 2. If the peer is sleeping (valid '1'), the reset + wake will cause a
                //    spurious wakeup. The peer will wake up, loop around, see the condition
                //    is not met, and go back to sleep (safely setting it back to '1').
                //
                // Use SeqCst to ensure the store (reset) is globally visible before the wake.
                header.reader.value.store(FUTEX_IDLE, Ordering::SeqCst);
                header.writer.value.store(FUTEX_IDLE, Ordering::SeqCst);

                header.reader.wake(i32::MAX);
                header.writer.wake(i32::MAX);

                trace!("[Shmem] Channel '{}': Connected.", shmem);
            }
        }

        Ok(Self {
            shmem,
            header: header_ptr,
            buffer: buffer_ptr,
            mask: capacity - 1,
        })
    }

    #[inline]
    fn header(&self) -> &ChannelHeader {
        // SAFETY: The `header` pointer is guaranteed to be valid for the lifetime of `ShmemChannel`.
        unsafe { &*self.header }
    }
}

// SAFETY: `ShmemChannel` is safe to send across threads as its raw pointers point to a
// process-wide shared memory region, not thread-local data.
unsafe impl Send for Channel {}

// SAFETY: `ShmemChannel` is safe to share across threads because all modifications to the
// underlying shared state are handled through `Sync` primitives (Atomics and Futexes).
unsafe impl Sync for Channel {}

impl Display for Channel {
    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
        Display::fmt(self.shmem.name(), f)
    }
}

impl Drop for Channel {
    fn drop(&mut self) {
        if !self.shmem.is_owner() {
            return;
        }

        let header = self.header();

        header.magic.store(0, Ordering::Release);
        header.reader.wake(i32::MAX);
        header.writer.wake(i32::MAX);
    }
}

#[derive(Debug)]
pub struct Reader(Arc<Channel>);

impl Reader {
    #[inline]
    fn try_read(&self) -> Option<ReadGuard<'_>> {
        let channel = &self.0;
        let header = channel.header();

        let tail = header.tail.load(Ordering::Acquire);
        let head = header.head.load(Ordering::Relaxed);

        // Capacity check is implicitly handled by mask logic below and wrapping_sub behavior
        let readable = tail.wrapping_sub(head);
        if readable == 0 {
            return None;
        }

        // SAFETY: The `offset` is calculated with `capacity`, ensuring the pointer is a valid address.
        let ptr = unsafe { channel.buffer.byte_add(head & channel.mask) };

        // The `readable` length is safe because the underlying mirrored mmap
        // ensures the virtual memory is contiguous, even if it wraps in physical memory.
        trace!(
            "[Shmem] Channel '{}': {} bytes readable.",
            channel,
            readable
        );
        Some(ReadGuard {
            channel,
            ptr,
            len: readable,
            consumed: 0,
        })
    }

    #[inline]
    fn read_spin(&self, deadline: Option<Instant>) -> Result<Option<ReadGuard<'_>>, Error> {
        let channel = &self.0;
        let header = channel.header();
        let mut spin_count = 0;

        while spin_count < SPIN_WAIT_ITERATIONS {
            if let Some(guard) = self.try_read() {
                return Ok(Some(guard));
            }

            // Periodically check for timeout and connection status during the spin loop.
            if spin_count % SPIN_CHECK_ITERATIONS == 0 {
                // Check if the channel has been closed by the owner.
                if header.magic.load(Ordering::Acquire) != SHM_CHANNEL_MAGIC {
                    trace!("[Shmem] Channel '{}': Closed.", channel);
                    return Err(Error::ConnectionClosed);
                }

                // Check for timeout.
                if let Some(d) = deadline {
                    if Instant::now() >= d {
                        return Err(Error::Timeout);
                    }
                }
            }

            hint::spin_loop();
            spin_count += 1;
        }

        Ok(None)
    }

    #[cold]
    fn read_blocking(&self, deadline: Option<Instant>) -> Result<ReadGuard<'_>, Error> {
        let channel = &self.0;
        let header = channel.header();
        let futex = &header.reader;

        loop {
            // First, check if we can read.
            if let Some(guard) = self.try_read() {
                return Ok(guard);
            }

            // Check if the channel has been closed by the owner.
            if header.magic.load(Ordering::Acquire) != SHM_CHANNEL_MAGIC {
                trace!("[Shmem] Channel '{}': Closed.", channel);
                return Err(Error::ConnectionClosed);
            }

            // Check for timeout.
            let remaining = if let Some(d) = deadline {
                let rem = d.saturating_duration_since(Instant::now());
                if rem.is_zero() {
                    return Err(Error::Timeout);
                }
                Some(rem)
            } else {
                None
            };

            // Try to set our state to WAITING.
            // Check with a Relaxed load first to avoid expensive CAS cache line invalidation.
            if futex.value.load(Ordering::Relaxed) != FUTEX_WAITING {
                // Use `compare_exchange_weak` as it's more efficient inside a loop.
                //
                // MUST use `SeqCst` for success: This is part of the Store-Load barrier pattern.
                // We must ensure the "I am waiting" flag is visible BEFORE we check for data
                // availability again.
                if futex
                    .value
                    .compare_exchange_weak(
                        FUTEX_IDLE,
                        FUTEX_WAITING,
                        Ordering::SeqCst,
                        Ordering::Relaxed,
                    )
                    .is_err()
                {
                    hint::spin_loop();
                    continue;
                }
            }

            // --- Lost Wakeup Prevention ---
            // After successfully setting the state to WAITING, we MUST re-check if data has
            // become available. This handles the race condition where the writer adds data
            // and tries to wake us *after* our `try_read()` check but *before* we
            // set the futex state to WAITING.
            if self.try_read().is_some() {
                // Data became available. We must not sleep.
                // Reset the state and loop again to grab the read guard.
                futex.value.store(FUTEX_IDLE, Ordering::Relaxed);
                continue;
            }

            // It's confirmed there's no data, and our state is WAITING. We can now safely sleep.
            trace!("[Shmem] Channel '{}': Waiting for readable...", channel);

            // Perform the wait.
            let is_interrupted = match remaining {
                Some(t) => matches!(
                    futex.wait_for(FUTEX_WAITING, t),
                    Err(TimedWaitError::Interrupted)
                ),
                None => matches!(futex.wait(FUTEX_WAITING), Err(WaitError::Interrupted)),
            };

            // After waking up (or timing out/interrupted), reset the futex state.
            // Relaxed is fine because the next loop iteration will synchronize via `try_read`.
            futex.value.store(FUTEX_IDLE, Ordering::Relaxed);

            if is_interrupted {
                return Err(Error::Interrupted);
            }
        }
    }

    #[inline]
    fn read_internal(&self, timeout: Option<Duration>) -> Result<ReadGuard<'_>, Error> {
        let deadline = timeout.map(|t| Instant::now() + t);

        if let Some(guard) = self.read_spin(deadline)? {
            return Ok(guard);
        }

        self.read_blocking(deadline)
    }

    pub fn read(&self) -> Result<ReadGuard<'_>, Error> {
        self.read_internal(None)
    }

    pub fn read_timeout(&self, timeout: Duration) -> Result<ReadGuard<'_>, Error> {
        self.read_internal(Some(timeout))
    }
}

#[derive(Debug)]
pub struct ReadGuard<'a> {
    channel: &'a Channel,
    ptr: *mut u8,
    len: usize,
    consumed: usize,
}

impl ReadGuard<'_> {
    pub fn consume(mut self, bytes: usize) -> Result<(), Error> {
        if bytes > self.len {
            return Err(Error::ReadOverflow {
                attempted: bytes,
                capacity: self.len,
            });
        }

        self.consumed = bytes;
        Ok(())
    }
}

impl Deref for ReadGuard<'_> {
    type Target = [u8];

    #[inline(always)]
    fn deref(&self) -> &Self::Target {
        // SAFETY: The channel's logic ensures this memory is valid
        // and not concurrently written to for the guard's lifetime.
        unsafe { slice::from_raw_parts(self.ptr, self.len) }
    }
}

/// # Safety
///
/// This implementation allows mutable access to the read buffer, which is a
/// departure from the standard SPSC queue model. The caller MUST adhere to
/// the following rules:
///
/// 1. Any mutations are for in-place data processing only.
/// 2. The mutations are temporary. The data in this buffer slice will be
///    considered invalid and will be overwritten by the writer as soon
///    as this `ShmemReadGuard` is dropped and the space is reclaimed.
/// 3. This mechanism MUST NOT be used for communication from the reader
///    back to the writer, as the writer never reads this data back.
///
/// Failure to adhere to these rules can lead to data corruption or logic
/// errors in the application-level protocol.
impl<'a> DerefMut for ReadGuard<'a> {
    #[inline(always)]
    fn deref_mut(&mut self) -> &mut Self::Target {
        // SAFETY: The channel's logic ensures this memory is valid and
        // not concurrently written to for the guard's lifetime.
        unsafe { slice::from_raw_parts_mut(self.ptr, self.len) }
    }
}

impl Drop for ReadGuard<'_> {
    fn drop(&mut self) {
        if self.consumed == 0 {
            return;
        }

        let header = self.channel.header();
        trace!(
            "[Shmem] Channel '{}': Read {} bytes.",
            self.channel,
            self.consumed
        );

        // Load current head (Relaxed is fine, we are the only writer to head).
        let old_head = header.head.load(Ordering::Relaxed);

        // Store new head with Release semantics to publish data consumption.
        header
            .head
            .store(old_head.wrapping_add(self.consumed), Ordering::Release);

        // Full fence ensures the store is visible before we check the futex.
        fence(Ordering::SeqCst);

        let futex = &header.writer;

        // Check with Relaxed load first. Because `fence(SeqCst)` acts as a full fence,
        // a subsequent Relaxed load is sufficiently ordered relative to the `head` update.
        if futex.value.load(Ordering::Relaxed) == FUTEX_WAITING
            && futex
                .value
                .compare_exchange(
                    FUTEX_WAITING,
                    FUTEX_IDLE,
                    Ordering::Relaxed,
                    Ordering::Relaxed,
                )
                .is_ok()
        {
            trace!("[Shmem] Channel '{}': Notify writer.", self.channel);
            futex.wake(1);
        }
    }
}

#[derive(Debug)]
pub struct Writer(Arc<Channel>);

impl Writer {
    #[inline]
    fn try_write(&self) -> Option<WriteGuard<'_>> {
        let channel = &self.0;
        let header = channel.header();

        let head = header.head.load(Ordering::Acquire);
        let tail = header.tail.load(Ordering::Relaxed);
        let capacity = channel.mask + 1;

        let writable = capacity.saturating_sub(tail.wrapping_sub(head));
        if writable == 0 {
            return None;
        }

        // SAFETY: The `offset` is calculated with `capacity`, ensuring the pointer is a valid address.
        let ptr = unsafe { channel.buffer.byte_add(tail & channel.mask) };

        // The `writable` length is safe due to the mirrored mmap,
        // which provides a contiguous virtual address space.
        trace!(
            "[Shmem] Channel '{}': {} bytes writable.",
            channel,
            writable
        );
        Some(WriteGuard {
            channel,
            ptr,
            len: writable,
            submitted: 0,
        })
    }

    #[inline]
    fn write_spin(&self, deadline: Option<Instant>) -> Result<Option<WriteGuard<'_>>, Error> {
        let channel = &self.0;
        let header = channel.header();
        let mut spin_count = 0;

        while spin_count < SPIN_WAIT_ITERATIONS {
            if let Some(guard) = self.try_write() {
                return Ok(Some(guard));
            }

            // Periodically check for timeout and connection status during the spin loop.
            if spin_count % SPIN_CHECK_ITERATIONS == 0 {
                // Check if the channel has been closed by the owner.
                if header.magic.load(Ordering::Acquire) != SHM_CHANNEL_MAGIC {
                    trace!("[Shmem] Channel '{}': Closed.", channel);
                    return Err(Error::ConnectionClosed);
                }

                // Check for timeout.
                if let Some(d) = deadline {
                    if Instant::now() >= d {
                        return Err(Error::Timeout);
                    }
                }
            }

            hint::spin_loop();
            spin_count += 1;
        }

        Ok(None)
    }

    #[cold]
    fn write_blocking(&self, deadline: Option<Instant>) -> Result<WriteGuard<'_>, Error> {
        let channel = &self.0;
        let header = channel.header();
        let futex = &header.writer;

        loop {
            // First, check if we can write.
            if let Some(guard) = self.try_write() {
                return Ok(guard);
            }

            // Check if the channel has been closed by the owner.
            if header.magic.load(Ordering::Acquire) != SHM_CHANNEL_MAGIC {
                trace!("[Shmem] Channel '{}': Closed.", channel);
                return Err(Error::ConnectionClosed);
            }

            // Check for timeout.
            let remaining = if let Some(d) = deadline {
                let rem = d.saturating_duration_since(Instant::now());
                if rem.is_zero() {
                    return Err(Error::Timeout);
                }
                Some(rem)
            } else {
                None
            };

            // Try to set our state to WAITING.
            // Check with a Relaxed load first to avoid expensive CAS cache line invalidation.
            if futex.value.load(Ordering::Relaxed) != FUTEX_WAITING {
                // Use `compare_exchange_weak` as it's more efficient inside a loop.
                //
                // MUST use `SeqCst` for success: This is part of the Store-Load barrier pattern.
                // We must ensure the "I am waiting" flag is visible BEFORE we check for data
                // availability again.
                if futex
                    .value
                    .compare_exchange_weak(
                        FUTEX_IDLE,
                        FUTEX_WAITING,
                        Ordering::SeqCst,
                        Ordering::Relaxed,
                    )
                    .is_err()
                {
                    hint::spin_loop();
                    continue;
                }
            }

            // --- Lost Wakeup Prevention ---
            // After successfully setting the state to WAITING, we MUST re-check if space has
            // become available. This handles the race condition where the reader frees up
            // space and tries to wake us *after* our `try_write()` check but *before* we
            // set the futex state to WAITING.
            if self.try_write().is_some() {
                // Space became available. We must not sleep.
                // Reset the state and loop again to grab the write guard.
                futex.value.store(FUTEX_IDLE, Ordering::Relaxed);
                continue;
            }

            // It's confirmed there's no space, and our state is WAITING. We can now safely sleep.
            trace!("[Shmem] Channel '{}': Waiting for writable...", channel);

            // Perform the wait.
            let is_interrupted = match remaining {
                Some(t) => matches!(
                    futex.wait_for(FUTEX_WAITING, t),
                    Err(TimedWaitError::Interrupted)
                ),
                None => matches!(futex.wait(FUTEX_WAITING), Err(WaitError::Interrupted)),
            };

            // After waking up (or timing out), reset the futex state.
            // Relaxed is fine because the next loop iteration will synchronize via `try_write`.
            futex.value.store(FUTEX_IDLE, Ordering::Relaxed);

            if is_interrupted {
                return Err(Error::Interrupted);
            }
        }
    }

    #[inline]
    fn write_internal(&self, timeout: Option<Duration>) -> Result<WriteGuard<'_>, Error> {
        let deadline = timeout.map(|t| Instant::now() + t);

        if let Some(guard) = self.write_spin(deadline)? {
            return Ok(guard);
        }

        self.write_blocking(deadline)
    }

    pub fn write(&self) -> Result<WriteGuard<'_>, Error> {
        self.write_internal(None)
    }

    pub fn write_timeout(&self, timeout: Duration) -> Result<WriteGuard<'_>, Error> {
        self.write_internal(Some(timeout))
    }
}

#[derive(Debug)]
pub struct WriteGuard<'a> {
    channel: &'a Channel,
    ptr: *mut u8,
    len: usize,
    submitted: usize,
}

impl WriteGuard<'_> {
    pub fn submit(mut self, bytes: usize) -> Result<(), Error> {
        if bytes > self.len {
            return Err(Error::WriteOverflow {
                attempted: bytes,
                capacity: self.len,
            });
        }

        self.submitted = bytes;
        Ok(())
    }
}

impl<'a> Deref for WriteGuard<'a> {
    type Target = [u8];

    #[inline(always)]
    fn deref(&self) -> &Self::Target {
        // SAFETY: The channel's logic ensures this memory is valid
        // and not concurrently accessed for the guard's lifetime.
        unsafe { slice::from_raw_parts(self.ptr, self.len) }
    }
}

impl<'a> DerefMut for WriteGuard<'a> {
    #[inline(always)]
    fn deref_mut(&mut self) -> &mut Self::Target {
        // SAFETY: The channel's logic ensures this memory is valid
        // and not concurrently accessed for the guard's lifetime.
        unsafe { slice::from_raw_parts_mut(self.ptr, self.len) }
    }
}

impl Drop for WriteGuard<'_> {
    fn drop(&mut self) {
        if self.submitted == 0 {
            return;
        }

        let header = self.channel.header();
        trace!(
            "[Shmem] Channel '{}': Wrote {} bytes.",
            self.channel,
            self.submitted
        );

        // Load current tail (Relaxed is fine, we are the only writer to tail).
        let old_tail = header.tail.load(Ordering::Relaxed);

        // Store new tail with Release semantics to publish data.
        header
            .tail
            .store(old_tail.wrapping_add(self.submitted), Ordering::Release);

        // Full fence ensures the store is visible before we check the futex.
        fence(Ordering::SeqCst);

        let futex = &header.reader;

        // Check with Relaxed load first. Because `fence(SeqCst)` acts as a full fence,
        // a subsequent Relaxed load is sufficiently ordered relative to the `tail` update.
        if futex.value.load(Ordering::Relaxed) == FUTEX_WAITING
            && futex
                .value
                .compare_exchange(
                    FUTEX_WAITING,
                    FUTEX_IDLE,
                    Ordering::Relaxed,
                    Ordering::Relaxed,
                )
                .is_ok()
        {
            trace!("[Shmem] Channel '{}': Notify reader.", self.channel);
            futex.wake(1);
        }
    }
}

#[inline]
pub fn header_size() -> usize {
    mem::size_of::<ChannelHeader>()
}

#[inline]
pub fn channel(shmem: Shmem) -> Result<(Reader, Writer), Error> {
    let channel = Arc::new(Channel::initialize(shmem)?);
    let reader = Reader(channel.clone());
    let writer = Writer(channel);

    Ok((reader, writer))
}
