// SPDX-License-Identifier: Mulan PSL v2
/*
 * Copyright (c) 2025 Huawei Technologies Co., Ltd.
 * This software is licensed under Mulan PSL v2.
 * You can use this software according to the terms and conditions of the Mulan PSL v2.
 * You may obtain a copy of Mulan PSL v2 at:
 *         http://license.coscl.org.cn/MulanPSL2
 *
 * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
 * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
 * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
 * See the Mulan PSL v2 for more details.
 */

//! A shared-memory (SHMEM) implementation of the `Transport` trait.
//!
//! This module provides a high-performance, inter-process communication mechanism
//! built on shared memory ring buffers. It uses one channel for each direction of
//! communication. Synchronization is managed by `parking_lot` primitives placed
//! within the shared memory segment itself.

use std::{
    ops::{Deref, DerefMut},
    ptr, slice,
    sync::atomic::{AtomicU8, Ordering},
    time::{Duration, Instant},
};

use parking_lot::{Condvar, Mutex, MutexGuard};
use shared_memory::{Shmem, ShmemConf, ShmemError};
use thiserror::Error;

use super::{Endpoint, ReadGuard, Transport, WriteGuard};

const C2S_SUFFIX: &str = "_c2s";
const S2C_SUFFIX: &str = "_s2c";

/// An error that can occur during shared memory transport operations.
#[derive(Error, Debug)]
pub enum ShmemTransportError {
    /// An error originating from the underlying `shared_memory` crate, transparently forwarded.
    #[error(transparent)]
    SharedMemory(#[from] ShmemError),

    /// An error indicating the requested message size exceeds the buffer's total capacity.
    #[error("message too large for buffer (required: {required}, usable capacity: {capacity})")]
    MessageTooLarge { required: usize, capacity: usize },

    /// An error indicating connection state is unknown.
    #[error("invalid connection state: {0}")]
    InvalidConnectionState(u8),

    /// An error indicating connection is not ready.
    #[error("connection not ready")]
    ConnectionNotReady,

    /// An error indicating connection closed.
    #[error("connection closed")]
    ConnectionClosed,

    /// An error indicating connection timeout.
    #[error("connection timeout")]
    ConnectionTimeout,
}

/// A dedicated struct for ring buffer cursors.
/// Represents the head and tail pointers of the ring buffer.
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, PartialEq, Eq)]
struct Cursors {
    head: usize,
    tail: usize,
    wrap: Option<usize>,
}

#[repr(u8)]
#[derive(Debug, Default, Clone, Copy, PartialEq, Eq)]
enum ChannelState {
    #[default]
    Uninited = 0,
    Ready = 1,
    Closed = 2,
}

impl TryFrom<u8> for ChannelState {
    type Error = ShmemTransportError;

    fn try_from(value: u8) -> Result<Self, Self::Error> {
        match value {
            0 => Ok(ChannelState::Uninited),
            1 => Ok(ChannelState::Ready),
            2 => Ok(ChannelState::Closed),
            _ => Err(ShmemTransportError::InvalidConnectionState(value)),
        }
    }
}

/// A control block located at the start of the shared segment for synchronization.
#[repr(C)]
#[derive(Debug)]
pub(crate) struct ControlBlock {
    cursors: Mutex<Cursors>,
    state: AtomicU8,
    is_readable: Condvar,
    is_writable: Condvar,
}

impl ControlBlock {
    pub(crate) const fn size() -> usize {
        std::mem::size_of::<Self>()
    }
}

/// Represents a single direction of communication over a shared memory segment.
struct ShmemChannel {
    shmem: Shmem,
    control: *mut ControlBlock,
    data: *mut u8,
}

// SAFETY: All access to raw pointers is synchronized by the `parking_lot::Mutex`.
unsafe impl Send for ShmemChannel {}
unsafe impl Sync for ShmemChannel {}

impl ShmemChannel {
    #[inline]
    fn control(&self) -> &ControlBlock {
        unsafe { &*self.control }
    }

    #[inline]
    fn cursors(&self) -> MutexGuard<'_, Cursors> {
        self.control().cursors.lock()
    }

    #[inline]
    fn get_state(&self) -> Result<ChannelState, ShmemTransportError> {
        ChannelState::try_from(self.control().state.load(Ordering::Acquire))
    }

    #[inline]
    fn set_state(&self, state: ChannelState) {
        self.control().state.store(state as u8, Ordering::Release);
    }

    #[inline]
    fn wait_readable(&self, cursors_guard: &mut MutexGuard<'_, Cursors>) {
        self.control().is_readable.wait(cursors_guard);
    }

    #[inline]
    fn notify_readable(&self) {
        self.control().is_readable.notify_one();
    }

    #[inline]
    fn notify_all_readable(&self) {
        self.control().is_readable.notify_all();
    }

    #[inline]
    fn wait_writable(&self, cursors_guard: &mut MutexGuard<'_, Cursors>) {
        self.control().is_writable.wait(cursors_guard);
    }

    #[inline]
    fn notify_writable(&self) {
        self.control().is_writable.notify_one();
    }

    #[inline]
    fn notify_all_writable(&self) {
        self.control().is_writable.notify_all();
    }
}

impl ShmemChannel {
    fn initialize(shmem: Shmem) -> Self {
        let shmem_addr = shmem.as_ptr();
        let control = shmem_addr as *mut ControlBlock;
        let data = unsafe { shmem_addr.add(ControlBlock::size()) };

        if shmem.is_owner() {
            unsafe {
                ptr::write_bytes(control, 0, 1);
                ptr::write(&mut (*control).cursors, Mutex::new(Cursors::default()));
                ptr::write(&mut (*control).is_readable, Condvar::new());
                ptr::write(&mut (*control).is_writable, Condvar::new());
            }
        }

        Self {
            shmem,
            control,
            data,
        }
    }

    fn create(path: &str, size: usize) -> Result<Self, ShmemTransportError> {
        let shmem = ShmemConf::new().size(size).os_id(path).create()?;

        let channel = Self::initialize(shmem);
        channel.set_state(ChannelState::Ready);

        Ok(channel)
    }

    fn open(path: &str) -> Result<Self, ShmemTransportError> {
        let shmem = ShmemConf::new().os_id(path).open()?;

        Ok(Self::initialize(shmem))
    }

    #[inline]
    fn capacity(&self) -> usize {
        self.shmem.len() - ControlBlock::size()
    }

    #[inline]
    fn data(&self) -> *mut u8 {
        self.data
    }
}

/// The concrete `ReadGuard` for the shared memory transport.
pub struct ShmemReadGuard<'a> {
    cursors: MutexGuard<'a, Cursors>,
    channel: &'a ShmemChannel,
    data: &'a [u8],
}

impl<'a> Deref for ShmemReadGuard<'a> {
    type Target = [u8];

    fn deref(&self) -> &Self::Target {
        self.data
    }
}

impl<'a> ReadGuard<'a> for ShmemReadGuard<'a> {
    fn advance(mut self, data_len: usize) {
        assert!(
            data_len <= self.len(),
            "cannot consume more bytes than are available in the guard"
        );

        if data_len > 0 {
            // `read` has already handled all complex state transitions (like snapping `tail`
            // to 0 and resetting `wrap`). `consume`'s only job is to advance the
            // `tail` pointer from its current position.
            self.cursors.tail += data_len;

            self.channel.notify_writable();
        }
    }
}

/// The concrete `WriteGuard` for the shared memory transport.
pub struct ShmemWriteGuard<'a> {
    cursors: MutexGuard<'a, Cursors>,
    channel: &'a ShmemChannel,
    is_wrapped: bool,
    data: &'a mut [u8],
}

impl<'a> Deref for ShmemWriteGuard<'a> {
    type Target = [u8];

    fn deref(&self) -> &Self::Target {
        self.data
    }
}

impl<'a> DerefMut for ShmemWriteGuard<'a> {
    fn deref_mut(&mut self) -> &mut Self::Target {
        self.data
    }
}

impl<'a> WriteGuard<'a> for ShmemWriteGuard<'a> {
    fn submit(mut self) {
        if self.is_wrapped {
            // This was a wrap-around write.
            // Set `wrap` to the old `head` and jump the new `head` to the start.
            self.cursors.wrap = Some(self.cursors.head);
            self.cursors.head = self.data.len();
        } else {
            // This was a normal, contiguous write. `head` is simply advanced.
            self.cursors.head += self.data.len();
        }

        self.channel.notify_readable();
    }
}

/// A `Transport` implementation that uses two shared memory channels for bidirectional communication.
pub struct ShmemEndpoint {
    read_channel: ShmemChannel,
    write_channel: ShmemChannel,
}

impl Endpoint for ShmemEndpoint {
    type Error = ShmemTransportError;
    type ReadGuard<'a> = ShmemReadGuard<'a>;
    type WriteGuard<'a> = ShmemWriteGuard<'a>;

    fn read(&mut self) -> Result<Self::ReadGuard<'_>, Self::Error> {
        if self.read_channel.get_state()? == ChannelState::Closed {
            return Err(ShmemTransportError::ConnectionClosed);
        }

        let mut cursors = self.read_channel.cursors();

        // Block until there is data to read.
        while cursors.wrap.is_none() && cursors.head == cursors.tail {
            if self.read_channel.get_state()? == ChannelState::Closed {
                return Err(ShmemTransportError::ConnectionClosed);
            }
            self.read_channel.wait_readable(&mut cursors);
        }

        let data_ptr = self.read_channel.data();
        let data;

        if let Some(wrap) = cursors.wrap {
            // Case 1: WRAPPED state
            // Data is in two disjoint segments because the writer wrapped around.
            // `wrap` acts as a barrier to prevent reading stale data at the buffer's end.
            //
            // Indices:  0                                                    Capacity
            //           |_______________________________________________________|
            // Buffer:   |  VALID DATA #2  |    FREE   |  VALID DATA #1  | STALE |
            // Pointers: ^                 ^           ^                 ^
            //           0                head        tail              wrap
            //
            if cursors.tail < wrap {
                // We are currently processing the first segment (`VALID DATA #1`).
                // Return the slice from the current `tail` up to the `wrap` barrier.
                data = unsafe {
                    slice::from_raw_parts(
                        data_ptr.add(cursors.tail),
                        wrap.saturating_sub(cursors.tail),
                    )
                };
            } else {
                // The first segment has been fully consumed (`tail` has reached `wrap`).
                // It's time to start over from 0 (`VALID DATA #2`).
                data = unsafe { slice::from_raw_parts(data_ptr, cursors.head) };
                // CRITICAL: ATOMIC STATE RESET
                // Before returning the second segment, we atomically reset the buffer's state
                // back to NORMAL. The logical start of data is now index 0, and the wrap
                // marker is removed. This ensures the next operation sees a clean, contiguous buffer.
                cursors.tail = 0;
                cursors.wrap = None;
            }
        } else {
            // Case 2: NORMAL state
            // The data is in a single, contiguous block.
            //
            // Indices:  0                                                    Capacity
            //           |_______________________________________________________|
            // Buffer:   |        FREE        |     VALID DATA     |     FREE    |
            // Pointers:                      ^                    ^
            //                               tail                 head
            //
            data = unsafe {
                slice::from_raw_parts(
                    data_ptr.add(cursors.tail),
                    cursors.head.saturating_sub(cursors.tail),
                )
            };
        }

        Ok(ShmemReadGuard {
            cursors,
            channel: &self.read_channel,
            data,
        })
    }

    fn write(&mut self, data_len: usize) -> Result<Self::WriteGuard<'_>, Self::Error> {
        if self.write_channel.get_state()? == ChannelState::Closed {
            return Err(ShmemTransportError::ConnectionClosed);
        }

        let capacity = self.write_channel.capacity();
        let usable_capacity = capacity.saturating_sub(1);
        if data_len > usable_capacity {
            return Err(ShmemTransportError::MessageTooLarge {
                required: data_len,
                capacity: usable_capacity,
            });
        }

        let mut cursors = self.write_channel.cursors();

        let data_ptr = self.write_channel.data();
        let data;
        let is_wrapped;

        loop {
            if self.write_channel.get_state()? == ChannelState::Closed {
                return Err(ShmemTransportError::ConnectionClosed);
            }

            let (head, tail) = (cursors.head, cursors.tail);
            if head < tail {
                // Case 1: WRAPPED state
                // Data is already in two parts. The only contiguous free space is between `head` and `tail`.
                //
                // Indices:  0                                                    Capacity
                //           |_______________________________________________________|
                // Buffer:   |     DATA #2     |   middle_space   |      DATA #1     |
                // Pointers:                   ^                  ^
                //                            head               tail
                //
                let middle_space = (tail - head).saturating_sub(1);
                if middle_space >= data_len {
                    data = unsafe { slice::from_raw_parts_mut(data_ptr.add(head), data_len) };
                    is_wrapped = false;
                    break;
                }
            } else {
                // Case 2: NORMAL or EMPTY state
                // The data is in a single contiguous block, or the buffer is empty.
                // We have two potential strategies: write at the end, or wrap around to the start.
                //
                // Indices:  0                                                    Capacity
                //           |_______________________________________________________|
                // Buffer:   |   front_space   |     VALID DATA     |   back_space   |
                // Pointers:                   ^                    ^
                //                            tail                 head
                //
                // Try to fit the data contiguously at the end.
                let back_space = capacity.saturating_sub(head);
                if back_space >= data_len {
                    data = unsafe { slice::from_raw_parts_mut(data_ptr.add(head), data_len) };
                    is_wrapped = false;
                    break;
                }
                // If the end is full, try to WRAP AROUND and write at the beginning.
                // We need a 1 byte safety gap to distinguish a full buffer from an empty one.
                let front_space = tail.saturating_sub(1);
                if front_space >= data_len {
                    data = unsafe { slice::from_raw_parts_mut(data_ptr, data_len) };
                    is_wrapped = true;
                    break;
                }
            }

            self.write_channel.wait_writable(&mut cursors);
        }

        Ok(ShmemWriteGuard {
            cursors,
            channel: &self.write_channel,
            is_wrapped,
            data,
        })
    }
}

impl Drop for ShmemEndpoint {
    fn drop(&mut self) {
        if self.read_channel.shmem.is_owner() {
            self.read_channel.set_state(ChannelState::Closed);
            self.read_channel.notify_all_readable();
        }
        if self.write_channel.shmem.is_owner() {
            self.write_channel.set_state(ChannelState::Closed);
            self.write_channel.notify_all_writable();
        }
    }
}

#[derive(Debug, Clone)]
pub struct ShmemTransport {
    path: String,
    buff_size: usize,
    connect_timeout: Duration,
}

impl Transport for ShmemTransport {
    type Endpoint = ShmemEndpoint;
    type Error = ShmemTransportError;

    fn create(&self) -> Result<Self::Endpoint, Self::Error> {
        let read_channel =
            ShmemChannel::create(&format!("{}{}", self.path, C2S_SUFFIX), self.buff_size)?;
        let write_channel =
            ShmemChannel::create(&format!("{}{}", self.path, S2C_SUFFIX), self.buff_size)?;

        Ok(ShmemEndpoint {
            read_channel,
            write_channel,
        })
    }

    fn connect(&self) -> Result<Self::Endpoint, Self::Error> {
        const RETRY_DELAY: Duration = Duration::from_millis(10);
        let start = Instant::now();

        loop {
            if start.elapsed() >= self.connect_timeout {
                return Err(ShmemTransportError::ConnectionTimeout);
            }

            let connect_result = (|| {
                let read_channel = ShmemChannel::open(&format!("{}{}", self.path, S2C_SUFFIX))?;
                let write_channel = ShmemChannel::open(&format!("{}{}", self.path, C2S_SUFFIX))?;

                let read_state = read_channel.get_state().unwrap_or_default();
                let write_state = write_channel.get_state().unwrap_or_default();
                if read_state != ChannelState::Ready || write_state != ChannelState::Ready {
                    return Err(ShmemTransportError::ConnectionNotReady);
                }

                Ok(ShmemEndpoint {
                    read_channel,
                    write_channel,
                })
            })();

            match connect_result {
                Ok(connection) => return Ok(connection),
                Err(
                    ShmemTransportError::SharedMemory(ShmemError::MapOpenFailed(_))
                    | ShmemTransportError::ConnectionNotReady,
                ) => {
                    std::thread::sleep(RETRY_DELAY);
                    continue;
                }
                Err(e) => return Err(e),
            }
        }
    }
}

#[derive(Debug, Clone)]
pub struct ShmemTransportBuilder {
    path: String,
    buff_size: usize,
    connect_timeout: Duration,
}

impl ShmemTransportBuilder {
    pub fn new<S: AsRef<str>>(path: S) -> Self {
        const DEFAULT_BUFF_SIZE: usize = 4096;
        const DEFAULT_CONNECT_TIMEOUT: Duration = Duration::from_millis(100);

        Self {
            path: path.as_ref().to_owned(),
            buff_size: DEFAULT_BUFF_SIZE,
            connect_timeout: DEFAULT_CONNECT_TIMEOUT,
        }
    }

    pub fn buff_size(mut self, value: usize) -> Self {
        self.buff_size = value;
        self
    }

    pub fn connect_timeout(mut self, value: Duration) -> Self {
        self.connect_timeout = value;
        self
    }

    pub fn build(self) -> ShmemTransport {
        ShmemTransport {
            path: self.path,
            buff_size: self.buff_size,
            connect_timeout: self.connect_timeout,
        }
    }
}
