// SPDX-License-Identifier: Mulan PSL v2
/*
 * Copyright (c) 2025 Huawei Technologies Co., Ltd.
 * This software is licensed under Mulan PSL v2.
 * You can use this software according to the terms and conditions of the Mulan PSL v2.
 * You may obtain a copy of Mulan PSL v2 at:
 *         http://license.coscl.org.cn/MulanPSL2
 *
 * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
 * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
 * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
 * See the Mulan PSL v2 for more details.
 */

use std::{
    fmt::{Debug, Display},
    hint, io, mem,
    ops::{Deref, DerefMut},
    ptr, slice,
    sync::{
        atomic::{AtomicU64, AtomicUsize, Ordering},
        Arc,
    },
    time::{Duration, Instant},
};

use linux_futex::{Futex, Shared};
use rustix::io::Errno;
use tracing::trace;

use super::{error::ShmemTransportError, sys::shmem::Shmem};
use crate::ipc::transport::{ReadBuf, WriteBuf};

/// A magic number used to identify a shared memory segment as a valid ShmemChannel.
const SHM_CHANNEL_MAGIC: u64 = 0x53484D454343484E; // 'SHMEMCHN' in ASCII.

/// The number of spin-loop iterations before blocking on a futex.
const SPIN_WAIT_ITERATIONS: u32 = 1024;

/// Futex state: the associated party is idle.
const FUTEX_IDLE: u32 = 0;

/// Futex state: the associated party is about to block and wait.
const FUTEX_WAITING: u32 = 1;

#[repr(C, align(64))]
struct ReaderCursor {
    /// Total number of bytes read since the beginning.
    head: AtomicUsize,
    /// Futex to block the reader when the buffer is empty.
    futex: Futex<Shared>,
}

#[repr(C, align(64))]
struct WriterCursor {
    /// Total number of bytes written since the beginning.
    tail: AtomicUsize,
    /// Futex to block the writer when the buffer is full.
    futex: Futex<Shared>,
}

/// The header structure of the shared memory channel.
#[repr(C)]
struct ChannelHeader {
    /// Magic number to verify the buffer is initialized and valid.
    magic: AtomicU64,
    /// Cursor for the reader side.
    reader: ReaderCursor,
    /// Cursor for the writer side.
    writer: WriterCursor,
}

/// A high-performance, single-producer, single-consumer (SPSC) queue over shared memory.
///
/// This implementation relies on the underlying `Shmem` to use a mirrored memory mapping
/// technique. This allows the ring buffer to be treated as a contiguous block of memory,
/// eliminating the need for software to handle wrap-around logic.
#[derive(Debug)]
pub struct ShmemChannel {
    shmem: Shmem,
    header: *mut ChannelHeader,
    buffer: *mut u8,
}

impl ShmemChannel {
    fn new(shmem: Shmem) -> io::Result<Self> {
        let header_ptr = shmem.header_ptr().cast::<ChannelHeader>();
        let buffer_ptr = shmem.data_ptr();

        if shmem.header_len() < mem::size_of::<ChannelHeader>() {
            return Err(Errno::OVERFLOW.into());
        }

        // Use a file lock for safe, atomic initialization.
        {
            let _guard = shmem.flock()?;

            // SAFETY: The `Shmem` object ensures the pointer is valid and
            // the file lock prevents data races during initialization.
            let header = unsafe { &*header_ptr };

            let magic_value = header.magic.load(Ordering::Acquire);
            if magic_value == 0 {
                // Owner process: initialize the header.
                trace!("[Shmem] Channel '{}': Initializing...", shmem);
                // SAFETY: The `header_ptr` is valid, and the file lock guarantees exclusive access.
                unsafe {
                    ptr::write(
                        header_ptr,
                        ChannelHeader {
                            magic: AtomicU64::new(0),
                            reader: ReaderCursor {
                                head: AtomicUsize::new(0),
                                futex: Futex::new(FUTEX_IDLE),
                            },
                            writer: WriterCursor {
                                tail: AtomicUsize::new(0),
                                futex: Futex::new(FUTEX_IDLE),
                            },
                        },
                    );
                }
                header.magic.store(SHM_CHANNEL_MAGIC, Ordering::Release);
                trace!("[Shmem] Channel '{}': Initialized.", shmem);
            } else {
                // Connecting process: verify magic number.
                trace!("[Shmem] Channel '{}': Connecting...", shmem);
                if magic_value != SHM_CHANNEL_MAGIC {
                    return Err(Errno::PROTO.into());
                }
                trace!("[Shmem] Channel '{}': Connected.", shmem);
            }
        }

        Ok(Self {
            shmem,
            header: header_ptr,
            buffer: buffer_ptr,
        })
    }

    #[inline]
    fn header(&self) -> &ChannelHeader {
        // SAFETY: The `header` pointer is guaranteed to be valid for the lifetime of `ShmemChannel`.
        unsafe { &*self.header }
    }

    #[inline]
    fn capacity(&self) -> usize {
        self.shmem.data_len()
    }
}

// SAFETY: `ShmemChannel` is safe to send across threads as its raw pointers point to a
// process-wide shared memory region, not thread-local data.
unsafe impl Send for ShmemChannel {}

// SAFETY: `ShmemChannel` is safe to share across threads because all modifications to the
// underlying shared state are handled through `Sync` primitives (Atomics and Futexes).
unsafe impl Sync for ShmemChannel {}

impl Display for ShmemChannel {
    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
        Display::fmt(self.shmem.name(), f)
    }
}

impl Drop for ShmemChannel {
    fn drop(&mut self) {
        if !self.shmem.is_owner() {
            return;
        }

        let header = self.header();

        header.magic.store(0, Ordering::Release);
        header.reader.futex.wake(i32::MAX);
        header.writer.futex.wake(i32::MAX);
    }
}

#[derive(Debug)]
pub struct ShmemReader {
    channel: Arc<ShmemChannel>,
}

impl ShmemReader {
    #[inline]
    fn try_read(&self) -> Option<ShmemReadGuard<'_>> {
        let channel = &self.channel;

        let tail = channel.header().writer.tail.load(Ordering::Acquire);
        let head = channel.header().reader.head.load(Ordering::Relaxed);
        let capacity = channel.capacity();

        let readable = tail.wrapping_sub(head);
        if readable == 0 {
            return None;
        }

        // SAFETY: The `offset` is calculated with `capacity`, ensuring the pointer is a valid address.
        let ptr = unsafe { channel.buffer.byte_add(head % capacity) };

        // The `readable` length is safe because the underlying mirrored mmap
        // ensures the virtual memory is contiguous, even if it wraps in physical memory.
        trace!(
            "[Shmem] Channel '{}': {} bytes readable.",
            channel,
            readable
        );
        Some(ShmemReadGuard {
            channel,
            ptr,
            len: readable,
            consumed: 0,
        })
    }

    fn read_internal(
        &self,
        timeout: Option<Duration>,
    ) -> Result<ShmemReadGuard<'_>, ShmemTransportError> {
        let channel = &self.channel;
        let header = channel.header();

        // Fast path: Spin wait for a short period.
        for _ in 0..SPIN_WAIT_ITERATIONS {
            if let Some(guard) = self.try_read() {
                return Ok(guard);
            }
            hint::spin_loop();
        }

        // Slow path: Block using a futex.
        let start_time = Instant::now();
        loop {
            // First, check if we can read.
            if let Some(guard) = self.try_read() {
                return Ok(guard);
            }

            // Check if the channel has been closed by the owner.
            if header.magic.load(Ordering::Acquire) != SHM_CHANNEL_MAGIC {
                trace!("[Shmem] Channel '{}': Closed.", channel);
                return Err(ShmemTransportError::ConnectionClosed);
            }

            // Check for timeout.
            let remaining = timeout.map(|t| t.saturating_sub(start_time.elapsed()));
            if matches!(remaining, Some(value) if value.is_zero()) {
                return Err(ShmemTransportError::ReadTimeout);
            }

            // Try to set our state to WAITING.
            // Use `compare_exchange_weak` as it's more efficient inside a loop.
            // Success ordering is `Acquire` to prevent reordering with the `try_read` check below.
            // Failure ordering is `Relaxed` as we just loop again.
            let futex = &header.reader.futex;
            if futex
                .value
                .compare_exchange_weak(
                    FUTEX_IDLE,
                    FUTEX_WAITING,
                    Ordering::Acquire,
                    Ordering::Relaxed,
                )
                .is_ok()
            {
                // --- Lost Wakeup Prevention ---
                // After successfully setting the state to WAITING, we MUST re-check if data has
                // become available. This handles the race condition where the writer adds data
                // and tries to wake us *after* our `try_read()` check but *before* we
                // set the futex state to WAITING.
                if self.try_read().is_some() {
                    // Data became available. We must not sleep.
                    // Reset the state and loop again to grab the read guard.
                    futex.value.store(FUTEX_IDLE, Ordering::Relaxed);
                    continue;
                }

                // It's confirmed there's no data, and our state is WAITING. We can now safely sleep.
                trace!("[Shmem] Channel '{}': Waiting for readable...", channel);
                match remaining {
                    Some(t) => {
                        let _ = futex.wait_for(FUTEX_WAITING, t);
                    }
                    None => {
                        let _ = futex.wait(FUTEX_WAITING);
                    }
                }

                // After waking up (or timing out), reset the futex state.
                // Relaxed is fine because the next loop iteration will synchronize via `try_read`.
                futex.value.store(FUTEX_IDLE, Ordering::Relaxed);
            }
            // If compare_exchange_weak fails (real or spurious), we do nothing and just loop again,
            // which is the correct behavior.
        }
    }

    #[inline]
    pub fn read(&self) -> Result<ShmemReadGuard<'_>, ShmemTransportError> {
        self.read_internal(None)
    }

    #[inline]
    pub fn read_timeout(
        &self,
        timeout: Duration,
    ) -> Result<ShmemReadGuard<'_>, ShmemTransportError> {
        self.read_internal(Some(timeout))
    }
}

#[derive(Debug)]
pub struct ShmemReadGuard<'a> {
    channel: &'a ShmemChannel,
    ptr: *mut u8,
    len: usize,
    consumed: usize,
}

impl<'a> ReadBuf<'a> for ShmemReadGuard<'a> {
    type Error = ShmemTransportError;

    fn consume(mut self, bytes: usize) -> Result<(), Self::Error> {
        if bytes > self.len {
            return Err(ShmemTransportError::ReadOverflow {
                attempted: bytes,
                capacity: self.len,
            });
        }

        self.consumed = bytes;
        Ok(())
    }
}

impl<'a> Deref for ShmemReadGuard<'a> {
    type Target = [u8];

    fn deref(&self) -> &Self::Target {
        // SAFETY: The channel's logic ensures this memory is valid
        // and not concurrently written to for the guard's lifetime.
        unsafe { slice::from_raw_parts(self.ptr, self.len) }
    }
}

/// # Safety
///
/// This implementation allows mutable access to the read buffer, which is a
/// departure from the standard SPSC queue model. The caller MUST adhere to
/// the following rules:
///
/// 1. Any mutations are for in-place data processing only.
/// 2. The mutations are temporary. The data in this buffer slice will be
///    considered invalid and will be overwritten by the writer as soon
///    as this `ShmemReadGuard` is dropped and the space is reclaimed.
/// 3. This mechanism MUST NOT be used for communication from the reader
///    back to the writer, as the writer never reads this data back.
///
/// Failure to adhere to these rules can lead to data corruption or logic
/// errors in the application-level protocol.
impl<'a> DerefMut for ShmemReadGuard<'a> {
    fn deref_mut(&mut self) -> &mut Self::Target {
        // SAFETY: The channel's logic ensures this memory is valid and
        // not concurrently written to for the guard's lifetime.
        unsafe { slice::from_raw_parts_mut(self.ptr, self.len) }
    }
}

impl Drop for ShmemReadGuard<'_> {
    fn drop(&mut self) {
        if self.consumed == 0 {
            return;
        }

        let header = self.channel.header();
        trace!(
            "[Shmem] Channel '{}': Read {} bytes.",
            self.channel,
            self.consumed
        );

        header
            .reader
            .head
            .fetch_add(self.consumed, Ordering::Release);

        let futex = &header.writer.futex;
        if futex
            .value
            .compare_exchange(
                FUTEX_WAITING,
                FUTEX_IDLE,
                Ordering::AcqRel,
                Ordering::Relaxed,
            )
            .is_ok()
        {
            trace!("[Shmem] Channel '{}': Notify writer.", self.channel);
            futex.wake(1);
        }
    }
}

#[derive(Debug)]
pub struct ShmemWriter {
    channel: Arc<ShmemChannel>,
}

impl ShmemWriter {
    #[inline]
    fn try_write(&self) -> Option<ShmemWriteGuard<'_>> {
        let channel = &self.channel;

        let head = channel.header().reader.head.load(Ordering::Acquire);
        let tail = channel.header().writer.tail.load(Ordering::Relaxed);
        let capacity = channel.capacity();

        // The available writable space is `capacity - used_space`.
        // To distinguish a full buffer from an empty one (where head == tail),
        // we leave one byte unused. Thus, `used_space = tail - head`, and
        // `writable = capacity - (tail - head) - 1`
        let writable = capacity
            .saturating_sub(tail.wrapping_sub(head))
            .saturating_sub(1);
        if writable == 0 {
            return None;
        }

        // SAFETY: The `offset` is calculated with `capacity`, ensuring the pointer is a valid address.
        let ptr = unsafe { channel.buffer.byte_add(tail % capacity) };

        // The `writable` length is safe due to the mirrored mmap,
        // which provides a contiguous virtual address space.
        trace!(
            "[Shmem] Channel '{}': {} bytes writable.",
            channel,
            writable
        );
        Some(ShmemWriteGuard {
            channel,
            ptr,
            len: writable,
            submitted: 0,
        })
    }

    fn write_internal(
        &self,
        timeout: Option<Duration>,
    ) -> Result<ShmemWriteGuard<'_>, ShmemTransportError> {
        let channel = &self.channel;
        let header = channel.header();

        // Fast path: Spin wait for a short period.
        for _ in 0..SPIN_WAIT_ITERATIONS {
            if let Some(guard) = self.try_write() {
                return Ok(guard);
            }
            hint::spin_loop();
        }

        // Slow path: Block using a futex.
        let start_time = Instant::now();
        loop {
            // First, check if we can write.
            if let Some(guard) = self.try_write() {
                return Ok(guard);
            }

            // Check if the channel has been closed by the owner.
            if header.magic.load(Ordering::Acquire) != SHM_CHANNEL_MAGIC {
                trace!("[Shmem] Channel '{}': Closed.", channel);
                return Err(ShmemTransportError::ConnectionClosed);
            }

            // Check for timeout.
            let remaining = timeout.map(|t| t.saturating_sub(start_time.elapsed()));
            if matches!(remaining, Some(value) if value.is_zero()) {
                return Err(ShmemTransportError::WriteTimeout);
            }

            // Try to set our state to WAITING.
            // Use `compare_exchange_weak` as it's more efficient inside a loop.
            // Success ordering is `Acquire` to prevent reordering with the `try_write` check below.
            // Failure ordering is `Relaxed` as we just loop again.
            let futex = &header.writer.futex;
            if futex
                .value
                .compare_exchange_weak(
                    FUTEX_IDLE,
                    FUTEX_WAITING,
                    Ordering::Acquire,
                    Ordering::Relaxed,
                )
                .is_ok()
            {
                // --- Lost Wakeup Prevention ---
                // After successfully setting the state to WAITING, we MUST re-check if space has
                // become available. This handles the race condition where the reader frees up
                // space and tries to wake us *after* our `try_write()` check but *before* we
                // set the futex state to WAITING.
                if self.try_write().is_some() {
                    // Space became available. We must not sleep.
                    // Reset the state and loop again to grab the write guard.
                    futex.value.store(FUTEX_IDLE, Ordering::Relaxed);
                    continue;
                }

                // It's confirmed there's no space, and our state is WAITING. We can now safely sleep.
                trace!("[Shmem] Channel '{}': Waiting for writable...", channel);
                match remaining {
                    Some(t) => {
                        let _ = futex.wait_for(FUTEX_WAITING, t);
                    }
                    None => {
                        let _ = futex.wait(FUTEX_WAITING);
                    }
                }

                // After waking up (or timing out), reset the futex state.
                // Relaxed is fine because the next loop iteration will synchronize via `try_write`.
                futex.value.store(FUTEX_IDLE, Ordering::Relaxed);
            }
            // If compare_exchange_weak fails (real or spurious), we do nothing and just loop again,
            // which is the correct behavior.
        }
    }

    #[inline]
    pub fn write(&self) -> Result<ShmemWriteGuard<'_>, ShmemTransportError> {
        self.write_internal(None)
    }

    #[inline]
    pub fn write_timeout(
        &self,
        timeout: Duration,
    ) -> Result<ShmemWriteGuard<'_>, ShmemTransportError> {
        self.write_internal(Some(timeout))
    }
}

#[derive(Debug)]
pub struct ShmemWriteGuard<'a> {
    channel: &'a ShmemChannel,
    ptr: *mut u8,
    len: usize,
    submitted: usize,
}

impl<'a> WriteBuf<'a> for ShmemWriteGuard<'a> {
    type Error = ShmemTransportError;

    fn submit(mut self, bytes: usize) -> Result<(), Self::Error> {
        if bytes > self.len {
            return Err(ShmemTransportError::WriteOverflow {
                attempted: bytes,
                capacity: self.len,
            });
        }

        self.submitted = bytes;
        Ok(())
    }
}

impl<'a> Deref for ShmemWriteGuard<'a> {
    type Target = [u8];

    fn deref(&self) -> &Self::Target {
        // SAFETY: The channel's logic ensures this memory is valid
        // and not concurrently accessed for the guard's lifetime.
        unsafe { slice::from_raw_parts(self.ptr, self.len) }
    }
}

impl<'a> DerefMut for ShmemWriteGuard<'a> {
    fn deref_mut(&mut self) -> &mut Self::Target {
        // SAFETY: The channel's logic ensures this memory is valid
        // and not concurrently accessed for the guard's lifetime.
        unsafe { slice::from_raw_parts_mut(self.ptr, self.len) }
    }
}

impl Drop for ShmemWriteGuard<'_> {
    fn drop(&mut self) {
        if self.submitted == 0 {
            return;
        }

        let header = self.channel.header();
        trace!(
            "[Shmem] Channel '{}': Wrote {} bytes.",
            self.channel,
            self.submitted
        );

        header
            .writer
            .tail
            .fetch_add(self.submitted, Ordering::Release);

        let futex = &header.reader.futex;
        if futex
            .value
            .compare_exchange(
                FUTEX_WAITING,
                FUTEX_IDLE,
                Ordering::AcqRel,
                Ordering::Relaxed,
            )
            .is_ok()
        {
            trace!("[Shmem] Channel '{}': Notify reader.", self.channel);
            futex.wake(1);
        }
    }
}

#[inline]
pub fn header_size() -> usize {
    mem::size_of::<ChannelHeader>()
}

#[inline]
pub fn channel(shmem: Shmem) -> io::Result<(ShmemReader, ShmemWriter)> {
    let channel = Arc::new(ShmemChannel::new(shmem)?);

    let reader = ShmemReader {
        channel: channel.clone(),
    };
    let writer = ShmemWriter { channel };

    Ok((reader, writer))
}
