use std::sync::{Mutex, RwLock, Arc, atomic::{AtomicUsize, AtomicBool}};
use std::collections::{HashMap, BTreeMap, VecDeque};
use std::ptr::NonNull;
use std::sync::atomic::Ordering;
use std::time::Instant;
use log::{info, warn};

// IPC错误类型
#[derive(Debug, Clone, Hash, PartialEq, Eq)]
pub enum IPCError {
    ChannelNotFound,
    ChannelAlreadyExists,
    BufferEmpty,
    BufferOverflow,
    OutOfMemory,
    InvalidParameter,
    PermissionDenied,
    ConnectionFailed,
    OperationFailed,
    Timeout,
    NotImplemented,
    Busy,
}

// 内存分配器接口
pub trait MemoryAllocator {
    fn allocate(&mut self, size: u64, alignment: u64) -> Result<NonNull<u8>, IPCError>;
    fn free(&mut self, ptr: NonNull<u8>, size: u64) -> Result<(), IPCError>;
    fn get_stats(&self) -> MemoryStats;
}

// GPU内存分配器接口
pub trait GPUMemoryAllocator: Send + Sync {
    fn allocate_gpu_memory(&mut self, size: u64, alignment: u64) -> Result<(GpuResourceId, u64), IPCError>;
    fn free_gpu_memory(&mut self, resource_id: GpuResourceId) -> Result<(), IPCError>;
    fn get_gpu_memory_stats(&self) -> GPUMemoryStats;
}

// 内存统计信息
pub struct MemoryStats {
    pub allocated: u64,
    pub available: u64,
    pub fragmentation: f64,
}

// GPU内存统计信息
pub struct GPUMemoryStats {
    pub total: u64,
    pub used: u64,
    pub free: u64,
    pub dedicated_vram: u64,
    pub shared_system_memory: u64,
}

// GPU资源ID
pub type GpuResourceId = u64;

// GPU内存块
pub struct GPUMemoryBlock {
    pub resource_id: GpuResourceId,
    pub size: u64,
    pub virtual_address: u64,
    pub physical_address: u64,
    pub alignment: u64,
    pub is_mapped: bool,
    pub usage_flags: u32,
}

// 基数树内存管理器 - 实现MemoryAllocator接口
pub struct RadixTreeMemoryManager {
    // 基数树实现细节
    base_address: u64,
    size: u64,
    allocated: u64,
}

impl RadixTreeMemoryManager {
    // 创建新的基数树内存管理器
    pub fn new(base_address: u64, size: u64) -> Self {
        RadixTreeMemoryManager {
            base_address,
            size,
            allocated: 0,
        }
    }
}

impl MemoryAllocator for RadixTreeMemoryManager {
    fn allocate(&mut self, size: u64, alignment: u64) -> Result<NonNull<u8>, IPCError> {
        // 简化实现：直接返回基地址 + 已分配大小，不考虑对齐
        let ptr = NonNull::new((self.base_address + self.allocated) as *mut u8)
            .ok_or(IPCError::OutOfMemory)?;
        self.allocated += size;
        Ok(ptr)
    }
    
    fn free(&mut self, _ptr: NonNull<u8>, size: u64) -> Result<(), IPCError> {
        // 简化实现：不实际释放内存
        self.allocated = self.allocated.saturating_sub(size);
        Ok(())
    }
    
    fn get_stats(&self) -> MemoryStats {
        MemoryStats {
            allocated: self.allocated,
            available: self.size - self.allocated,
            fragmentation: 0.0,
        }
    }
}

// 混合内存管理器 - 结合系统内存和GPU内存
pub struct HybridMemoryManager {
    graphics_service: Arc<Mutex<crate::gfx_service::GraphicsService>>,
    system_memory_manager: RadixTreeMemoryManager,
    prefer_gpu_memory: bool,
}

impl HybridMemoryManager {
    // 创建新的混合内存管理器
    pub fn new(
        graphics_service: Arc<Mutex<crate::gfx_service::GraphicsService>>,
        system_memory_base: u64,
        system_memory_size: u64
    ) -> Self {
        HybridMemoryManager {
            graphics_service,
            system_memory_manager: RadixTreeMemoryManager::new(system_memory_base, system_memory_size),
            prefer_gpu_memory: true,
        }
    }
    
    // 分配内存，根据使用情况决定是在系统内存还是GPU内存中分配
    pub fn allocate(&mut self, size: u64, alignment: u64, usage: u32) -> Result<(bool, GpuResourceId, Option<NonNull<u8>>), IPCError> {
        // 根据usage和prefer_gpu_memory决定内存类型
        if self.prefer_gpu_memory {
            // 尝试在GPU内存中分配
            if let Ok(mut graphics_service) = self.graphics_service.try_lock() {
                if let Ok((resource_id, gpu_virtual_address)) = 
                    graphics_service.allocate_gpu_memory(size, alignment) {
                    return Ok((true, resource_id, NonNull::new(gpu_virtual_address as *mut u8)));
                }
            }
            // GPU内存分配失败，回退到系统内存
        }
        
        // 在系统内存中分配
        self.system_memory_manager.allocate(size, alignment)
            .map(|ptr| (false, 0, Some(ptr)))
    }
    
    // 释放内存
    pub fn free(&mut self, is_gpu_memory: bool, resource_id: GpuResourceId, ptr: Option<NonNull<u8>>) -> Result<(), IPCError> {
        if is_gpu_memory {
            // 释放GPU内存
            if let Ok(mut graphics_service) = self.graphics_service.try_lock() {
                return graphics_service.free_gpu_memory(resource_id);
            }
            return Err(IPCError::Busy);
        } else {
            // 释放系统内存
            if let Some(ptr) = ptr {
                return self.system_memory_manager.free(ptr, 0); // 简化实现
            }
        }
        Ok(())
    }
    
    // 设置是否优先使用GPU内存
    pub fn set_prefer_gpu_memory(&mut self, prefer: bool) {
        self.prefer_gpu_memory = prefer;
    }
}

// 能力类型 - 表示系统资源访问权限
pub struct Capability {
    acquired: bool,
    // 其他能力相关字段
}

impl Capability {
    // 检查能力是否已获取
    pub fn is_acquired(&self) -> bool {
        self.acquired
    }
}

impl Clone for Capability {
    fn clone(&self) -> Self {
        Self {
            acquired: self.acquired,
        }
    }
}

// IPC通道接口
pub trait IPCChannel: Send {
    fn send(&mut self, data: &[u8]) -> Result<(), IPCError>;
    fn receive(&mut self, buffer: &mut [u8]) -> Result<usize, IPCError>;
    fn channel_id(&self) -> u64;
    fn name(&self) -> &str;
}

// 消息端口接口
pub trait MessagePort {
    fn create_channel(&mut self, name: &str) -> Result<u64, IPCError>;
    fn connect_channel(&self, name: &str) -> Result<Box<dyn IPCChannel>, IPCError>;
    fn close_channel(&mut self, channel_id: u64) -> Result<(), IPCError>;
    fn list_channels(&self) -> Vec<String>;
}

// 消息结构
struct Message {
    length: usize,
    data: Vec<u8>,
}

// 环形缓冲区
struct RingBuffer {
    start: NonNull<u8>,
    size: usize,
    read_pos: AtomicUsize,
    write_pos: AtomicUsize,
}

impl RingBuffer {
    // 创建新的环形缓冲区
    pub fn new(start: NonNull<u8>, size: usize) -> Self {
        RingBuffer {
            start,
            size,
            read_pos: AtomicUsize::new(0),
            write_pos: AtomicUsize::new(0),
        }
    }
    
    // 写入数据到缓冲区
    pub fn write(&self, data: &[u8]) -> Result<(), IPCError> {
        let write_pos = self.write_pos.load(Ordering::SeqCst);
        let read_pos = self.read_pos.load(Ordering::SeqCst);
        
        // 计算可用空间
        let available = if write_pos >= read_pos {
            self.size - (write_pos - read_pos)
        } else {
            read_pos - write_pos
        } - 1; // 留出一个字节作为满标记
        
        if data.len() > available {
            return Err(IPCError::BufferOverflow);
        }
        
        // 写入数据
        let mut written = 0;
        while written < data.len() {
            let bytes_to_write = (self.size - write_pos).min(data.len() - written);
            unsafe {
                std::ptr::copy(
                    data[written..(written + bytes_to_write)].as_ptr(),
                    self.start.as_ptr().add(write_pos),
                    bytes_to_write
                );
            }
            
            written += bytes_to_write;
            let new_write_pos = (write_pos + bytes_to_write) % self.size;
            self.write_pos.store(new_write_pos, Ordering::SeqCst);
        }
        
        Ok(())
    }
    
    // 从缓冲区读取数据
    pub fn read(&self, buffer: &mut [u8]) -> Result<usize, IPCError> {
        let write_pos = self.write_pos.load(Ordering::SeqCst);
        let read_pos = self.read_pos.load(Ordering::SeqCst);
        
        // 计算可读数据量
        let available = if write_pos >= read_pos {
            write_pos - read_pos
        } else {
            self.size - (read_pos - write_pos)
        };
        
        if available == 0 {
            return Err(IPCError::BufferEmpty);
        }
        
        // 读取数据
        let read_size = buffer.len().min(available);
        let mut read = 0;
        while read < read_size {
            let bytes_to_read = (self.size - read_pos).min(read_size - read);
            unsafe {
                std::ptr::copy(
                    self.start.as_ptr().add(read_pos),
                    buffer[read..(read + bytes_to_read)].as_mut_ptr(),
                    bytes_to_read
                );
            }
            
            read += bytes_to_read;
            let new_read_pos = (read_pos + bytes_to_read) % self.size;
            self.read_pos.store(new_read_pos, Ordering::SeqCst);
        }
        
        Ok(read_size)
    }
    
    // 获取可写入的字节数
    pub fn available_write(&self) -> usize {
        let write_pos = self.write_pos.load(Ordering::SeqCst);
        let read_pos = self.read_pos.load(Ordering::SeqCst);
        
        if write_pos >= read_pos {
            self.size - (write_pos - read_pos)
        } else {
            read_pos - write_pos
        } - 1
    }
    
    // 获取可读的字节数
    pub fn available_read(&self) -> usize {
        let write_pos = self.write_pos.load(Ordering::SeqCst);
        let read_pos = self.read_pos.load(Ordering::SeqCst);
        
        if write_pos >= read_pos {
            write_pos - read_pos
        } else {
            self.size - (read_pos - write_pos)
        }
    }
}

// IPC统计信息
pub struct IPCStats {
    pub channels_created: u64,
    pub channels_closed: u64,
    pub active_channels: u64,
    pub messages_sent: u64,
    pub messages_received: u64,
    pub bytes_sent: u64,
    pub bytes_received: u64,
    pub avg_send_time_ns: u64,
    pub avg_receive_time_ns: u64,
    pub errors: HashMap<IPCError, u64>,
}

impl Default for IPCStats {
    fn default() -> Self {
        Self {
            channels_created: 0,
            channels_closed: 0,
            active_channels: 0,
            messages_sent: 0,
            messages_received: 0,
            bytes_sent: 0,
            bytes_received: 0,
            avg_send_time_ns: 0,
            avg_receive_time_ns: 0,
            errors: HashMap::new(),
        }
    }
}

impl IPCStats {
    // 创建新的统计信息
    pub fn new() -> Self {
        Self::default()
    }
    
    // 增加通道创建计数
    pub fn increment_channels_created(&mut self) {
        self.channels_created += 1;
        self.active_channels += 1;
    }
    
    // 增加通道关闭计数
    pub fn increment_channels_closed(&mut self) {
        self.channels_closed += 1;
        if self.active_channels > 0 {
            self.active_channels -= 1;
        }
    }
    
    // 增加消息发送计数
    pub fn increment_messages_sent(&mut self, bytes: usize) {
        self.messages_sent += 1;
        self.bytes_sent += bytes as u64;
    }
    
    // 增加消息接收计数
    pub fn increment_messages_received(&mut self, bytes: usize) {
        self.messages_received += 1;
        self.bytes_received += bytes as u64;
    }
    
    // 增加错误计数
    pub fn increment_error(&mut self, error: IPCError) {
        *self.errors.entry(error).or_insert(0) += 1;
    }
    
    // 更新平均发送时间
    pub fn update_avg_send_time(&mut self, time_ns: u64) {
        // 简单移动平均计算
        self.avg_send_time_ns = (self.avg_send_time_ns * 9 + time_ns) / 10;
    }
    
    // 更新平均接收时间
    pub fn update_avg_receive_time(&mut self, time_ns: u64) {
        // 简单移动平均计算
        self.avg_receive_time_ns = (self.avg_receive_time_ns * 9 + time_ns) / 10;
    }
    
    // 重置统计信息
    pub fn reset(&mut self) {
        *self = Self::default();
    }
}

// 共享内存区域
struct SharedMemoryRegion {
    start: NonNull<u8>,
    size: usize,
    memory_manager: RadixTreeMemoryManager,
}

impl Clone for SharedMemoryRegion {
    fn clone(&self) -> Self {
        Self {
            start: self.start,
            size: self.size,
            memory_manager: RadixTreeMemoryManager::new(
                self.start.as_ptr() as u64, 
                self.size as u64
            ),
        }
    }
}

// 共享内存通道
struct SharedMemoryChannel {
    channel_id: u64,
    name: String,
    shmem_region: SharedMemoryRegion,
    ref_count: AtomicUsize,
}

// 共享内存IPC实现
struct SharedMemoryIPC {
    channel_id: u64,
    name: String,
    shmem_region: SharedMemoryRegion,
    send_buffer: RingBuffer,
    recv_buffer: RingBuffer,
    shmem_registry: Arc<SharedMemoryRegistry>,
}

impl IPCChannel for SharedMemoryIPC {
    fn send(&mut self, data: &[u8]) -> Result<(), IPCError> {
        self.send_buffer.write(data)
    }
    
    fn receive(&mut self, buffer: &mut [u8]) -> Result<usize, IPCError> {
        self.recv_buffer.read(buffer)
    }
    
    fn channel_id(&self) -> u64 {
        self.channel_id
    }
    
    fn name(&self) -> &str {
        &self.name
    }
}

// 消息队列通道
struct MessageQueueChannel {
    channel_id: u64,
    name: String,
    messages: VecDeque<Message>,
    ref_count: AtomicUsize,
}

// 共享队列管理器
struct SharedQueues {
    channels: Mutex<BTreeMap<u64, MessageQueueChannel>>,
    name_to_id: Mutex<BTreeMap<String, u64>>,
    next_channel_id: AtomicUsize,
}

// 消息队列IPC实现
struct MessageQueueIPC {
    channel_id: u64,
    name: String,
    queues: Arc<SharedQueues>,
    local_queue: VecDeque<Message>,
}

impl IPCChannel for MessageQueueIPC {
    fn send(&mut self, data: &[u8]) -> Result<(), IPCError> {
        let mut channels = self.queues.channels.lock().unwrap();
        let channel = channels.get_mut(&self.channel_id)
            .ok_or(IPCError::ChannelNotFound)?;
        
        let message = Message {
            length: data.len(),
            data: data.into(),
        };
        
        channel.messages.push_back(message);
        Ok(())
    }
    
    fn receive(&mut self, buffer: &mut [u8]) -> Result<usize, IPCError> {
        // 检查本地队列
        if let Some(message) = self.local_queue.pop_front() {
            let copy_len = message.length.min(buffer.len());
            buffer[..copy_len].copy_from_slice(&message.data[..copy_len]);
            return Ok(copy_len);
        }
        
        // 从共享队列拉取消息
        let mut channels = self.queues.channels.lock().unwrap();
        let channel = channels.get_mut(&self.channel_id)
            .ok_or(IPCError::ChannelNotFound)?;
        
        if let Some(message) = channel.messages.pop_front() {
            let copy_len = message.length.min(buffer.len());
            buffer[..copy_len].copy_from_slice(&message.data[..copy_len]);
            Ok(copy_len)
        } else {
            Err(IPCError::BufferEmpty)
        }
    }
    
    fn channel_id(&self) -> u64 {
        self.channel_id
    }
    
    fn name(&self) -> &str {
        &self.name
    }
}

// 共享内存注册表
struct SharedMemoryRegistry {
    channels: RwLock<BTreeMap<u64, SharedMemoryChannel>>,
    name_to_id: RwLock<BTreeMap<String, u64>>,
    next_channel_id: AtomicUsize,
}

// 共享内存IPC端口
struct SharedMemoryIPCPort {
    registry: Arc<SharedMemoryRegistry>,
    mem_capability: Capability,
}

impl MessagePort for SharedMemoryIPCPort {
    fn create_channel(&mut self, name: &str) -> Result<u64, IPCError> {
        let mut name_to_id = self.registry.name_to_id.write().unwrap();
        if name_to_id.contains_key(name) {
            return Err(IPCError::ChannelAlreadyExists);
        }
        
        // 创建共享内存区域 (4MB)
        let size = 4 * 1024 * 1024;
        let mut memory_manager = RadixTreeMemoryManager::new(0, size as u64);
        let start = memory_manager.allocate(size as u64, 4096)
            .map_err(|_| IPCError::OutOfMemory)?;
        
        let channel_id = self.registry.next_channel_id.fetch_add(1, Ordering::SeqCst) as u64;
        
        // 添加到注册表
        let mut channels = self.registry.channels.write().unwrap();
        channels.insert(channel_id, SharedMemoryChannel {
            channel_id,
            name: name.to_string(),
            shmem_region: SharedMemoryRegion {
                start,
                size,
                memory_manager,
            },
            ref_count: AtomicUsize::new(1),
        });
        
        name_to_id.insert(name.to_string(), channel_id);
        
        Ok(channel_id)
    }
    
    fn connect_channel(&self, name: &str) -> Result<Box<dyn IPCChannel>, IPCError> {
        let name_to_id = self.registry.name_to_id.read().unwrap();
        let &channel_id = name_to_id.get(name)
            .ok_or(IPCError::ChannelNotFound)?;
        
        let channels = self.registry.channels.read().unwrap();
        let channel = channels.get(&channel_id)
            .ok_or(IPCError::ChannelNotFound)?;
        
        // 增加引用计数
        channel.ref_count.fetch_add(1, Ordering::SeqCst);
        
        Ok(Box::new(SharedMemoryIPC {
            channel_id,
            name: name.to_string(),
            shmem_region: channel.shmem_region.clone(),
            send_buffer: RingBuffer::new(channel.shmem_region.start, channel.shmem_region.size / 2),
            recv_buffer: RingBuffer::new(
                unsafe {
                    NonNull::new(channel.shmem_region.start.as_ptr().add(channel.shmem_region.size / 2))
                        .ok_or(IPCError::OutOfMemory)?
                },
                channel.shmem_region.size / 2
            ),
            shmem_registry: self.registry.clone(),
        }))
    }
    
    fn close_channel(&mut self, channel_id: u64) -> Result<(), IPCError> {
        let mut channels = self.registry.channels.write().unwrap();
        let channel = channels.get_mut(&channel_id)
            .ok_or(IPCError::ChannelNotFound)?;
        
        let ref_count = channel.ref_count.fetch_sub(1, Ordering::SeqCst);
        if ref_count == 1 {
            // 最后一个引用，清理通道
            let name = channel.name.clone();
            channels.remove(&channel_id);
            
            let mut name_to_id = self.registry.name_to_id.write().unwrap();
            name_to_id.remove(&name);
        }
        
        Ok(())
    }
    
    fn list_channels(&self) -> Vec<String> {
        let name_to_id = self.registry.name_to_id.read().unwrap();
        name_to_id.keys().cloned().collect()
    }
}

// 消息队列IPC端口
struct MessageQueueIPCPort {
    queues: Arc<SharedQueues>,
}

impl MessagePort for MessageQueueIPCPort {
    fn create_channel(&mut self, name: &str) -> Result<u64, IPCError> {
        let mut name_to_id = self.queues.name_to_id.lock().unwrap();
        if name_to_id.contains_key(name) {
            return Err(IPCError::ChannelAlreadyExists);
        }
        
        let channel_id = self.queues.next_channel_id.fetch_add(1, Ordering::SeqCst) as u64;
        
        // 添加到队列
        let mut channels = self.queues.channels.lock().unwrap();
        channels.insert(channel_id, MessageQueueChannel {
            channel_id,
            name: name.to_string(),
            messages: VecDeque::new(),
            ref_count: AtomicUsize::new(1),
        });
        
        name_to_id.insert(name.to_string(), channel_id);
        
        Ok(channel_id)
    }
    
    fn connect_channel(&self, name: &str) -> Result<Box<dyn IPCChannel>, IPCError> {
        let name_to_id = self.queues.name_to_id.lock().unwrap();
        let &channel_id = name_to_id.get(name)
            .ok_or(IPCError::ChannelNotFound)?;
        
        let channels = self.queues.channels.lock().unwrap();
        let channel = channels.get(&channel_id)
            .ok_or(IPCError::ChannelNotFound)?;
        
        // 增加引用计数
        channel.ref_count.fetch_add(1, Ordering::SeqCst);
        
        Ok(Box::new(MessageQueueIPC {
            channel_id,
            name: name.to_string(),
            queues: self.queues.clone(),
            local_queue: VecDeque::new(),
        }))
    }
    
    fn close_channel(&mut self, channel_id: u64) -> Result<(), IPCError> {
        let mut channels = self.queues.channels.lock().unwrap();
        let channel = channels.get_mut(&channel_id)
            .ok_or(IPCError::ChannelNotFound)?;
        
        let ref_count = channel.ref_count.fetch_sub(1, Ordering::SeqCst);
        if ref_count == 1 {
            // 最后一个引用，清理通道
            let name = channel.name.clone();
            channels.remove(&channel_id);
            
            let mut name_to_id = self.queues.name_to_id.lock().unwrap();
            name_to_id.remove(&name);
        }
        
        Ok(())
    }
    
    fn list_channels(&self) -> Vec<String> {
        let name_to_id = self.queues.name_to_id.lock().unwrap();
        name_to_id.keys().cloned().collect()
    }
}

// 信号量 - 用于同步访问
pub struct Semaphore {
    count: AtomicUsize,
    max_count: usize,
}

impl Semaphore {
    // 创建新的信号量
    pub fn new(initial_count: usize, max_count: usize) -> Self {
        Semaphore {
            count: AtomicUsize::new(initial_count),
            max_count,
        }
    }
    
    // 获取信号量
    pub fn acquire(&self) {
        loop {
            let current = self.count.load(Ordering::SeqCst);
            if current < self.max_count {
                if self.count.compare_exchange(
                    current, 
                    current + 1, 
                    Ordering::SeqCst, 
                    Ordering::SeqCst
                ).is_ok() {
                    break;
                }
            }
            // 简单自旋等待
            std::thread::yield_now();
        }
    }
    
    // 释放信号量
    pub fn release(&self) {
        self.count.fetch_sub(1, Ordering::SeqCst);
    }
    
    // 尝试获取信号量，不阻塞
    pub fn try_acquire(&self) -> bool {
        let current = self.count.load(Ordering::SeqCst);
        if current < self.max_count {
            self.count.compare_exchange(
                current, 
                current + 1, 
                Ordering::SeqCst, 
                Ordering::SeqCst
            ).is_ok()
        } else {
            false
        }
    }
}

// 管道 - 用于单向数据传输
pub struct Pipe {
    buffer: Mutex<VecDeque<u8>>,
    max_size: usize,
    semaphore: Semaphore,
}

impl Pipe {
    // 创建新的管道
    pub fn new(max_size: usize) -> Self {
        Pipe {
            buffer: Mutex::new(VecDeque::new()),
            max_size,
            semaphore: Semaphore::new(0, max_size),
        }
    }
    
    // 写入数据到管道
    pub fn write(&self, data: &[u8]) -> Result<usize, IPCError> {
        let mut buffer = self.buffer.lock().unwrap();
        let available = self.max_size - buffer.len();
        let write_size = data.len().min(available);
        
        if write_size == 0 {
            return Err(IPCError::BufferOverflow);
        }
        
        for i in 0..write_size {
            buffer.push_back(data[i]);
        }
        
        // 增加信号量计数
        for _ in 0..write_size {
            self.semaphore.acquire();
        }
        
        Ok(write_size)
    }
    
    // 从管道读取数据
    pub fn read(&self, buffer: &mut [u8]) -> Result<usize, IPCError> {
        // 等待有数据可读
        self.semaphore.acquire();
        
        let mut pipe_buffer = self.buffer.lock().unwrap();
        let read_size = buffer.len().min(pipe_buffer.len());
        
        for i in 0..read_size {
            buffer[i] = pipe_buffer.pop_front().unwrap();
        }
        
        Ok(read_size)
    }
    
    // 尝试读取数据，不阻塞
    pub fn try_read(&self, buffer: &mut [u8]) -> Result<usize, IPCError> {
        if !self.semaphore.try_acquire() {
            return Err(IPCError::BufferEmpty);
        }
        
        let mut pipe_buffer = self.buffer.lock().unwrap();
        let read_size = buffer.len().min(pipe_buffer.len());
        
        for i in 0..read_size {
            buffer[i] = pipe_buffer.pop_front().unwrap();
        }
        
        Ok(read_size)
    }
}

// 回调函数类型定义
type AsyncCallback = Box<dyn Fn(Result<usize, IPCError>) + Send + Sync + 'static>;

// 异步IPC操作
struct AsyncOperation {
    callback: AsyncCallback,
    completed: AtomicBool,
}

// IPC服务 - 集成所有IPC机制
#[derive(Clone)]
pub struct IPCService {
    mem_capability: Capability,
    shared_memory_port: SharedMemoryIPCPort,
    message_queue_port: MessageQueueIPCPort,
    pipes: HashMap<u64, Arc<Pipe>>,
    semaphores: HashMap<u64, Arc<Semaphore>>,
    async_operations: Mutex<HashMap<u64, AsyncOperation>>,
    next_id: AtomicUsize,
    stats: Arc<Mutex<IPCStats>>,
    hybrid_memory_manager: Option<HybridMemoryManager>,
}

impl IPCService {
    // 创建新的IPC服务
    pub fn new(mem_capability: Capability) -> Result<Self, String> {
        // 检查能力是否已获取
        if !mem_capability.is_acquired() {
            return Err("Memory capability not acquired".to_string());
        }
        
        // 初始化共享内存端口
        let shared_memory_port = SharedMemoryIPCPort {
            registry: Arc::new(SharedMemoryRegistry {
                channels: RwLock::new(BTreeMap::new()),
                name_to_id: RwLock::new(BTreeMap::new()),
                next_channel_id: AtomicUsize::new(1),
            }),
            mem_capability: mem_capability.clone(),
        };
        
        // 初始化消息队列端口
        let message_queue_port = MessageQueueIPCPort {
            queues: Arc::new(SharedQueues {
                channels: Mutex::new(BTreeMap::new()),
                name_to_id: Mutex::new(BTreeMap::new()),
                next_channel_id: AtomicUsize::new(1),
            }),
        };
        
        // 注意：这里我们没有初始化hybrid_memory_manager，因为我们需要GraphicsService实例
        // 实际应用中，应该在IPCService::start方法中初始化或通过单独的方法初始化
        
        Ok(IPCService {
            mem_capability,
            shared_memory_port,
            message_queue_port,
            pipes: HashMap::new(),
            semaphores: HashMap::new(),
            async_operations: Mutex::new(HashMap::new()),
            next_id: AtomicUsize::new(1),
            stats: Arc::new(Mutex::new(IPCStats::new())),
            hybrid_memory_manager: None,
        })
    }
    
    // 初始化混合内存管理器
    pub fn init_hybrid_memory_manager(
        &mut self,
        graphics_service: Arc<Mutex<crate::gfx_service::GraphicsService>>,
        system_memory_base: u64,
        system_memory_size: u64
    ) {
        let hybrid_manager = HybridMemoryManager::new(
            graphics_service,
            system_memory_base,
            system_memory_size
        );
        
        self.hybrid_memory_manager = Some(hybrid_manager);
        info!("Hybrid memory manager initialized");
    }
    
    // 使用混合内存管理器分配内存
    pub fn allocate_memory(
        &mut self,
        size: u64,
        alignment: u64,
        usage: u32
    ) -> Result<(bool, GpuResourceId, Option<NonNull<u8>>), IPCError> {
        match &mut self.hybrid_memory_manager {
            Some(manager) => manager.allocate(size, alignment, usage),
            None => Err(IPCError::InvalidParameter)
        }
    }
    
    // 使用混合内存管理器释放内存
    pub fn free_memory(
        &mut self,
        is_gpu_memory: bool,
        resource_id: GpuResourceId,
        ptr: Option<NonNull<u8>>
    ) -> Result<(), IPCError> {
        match &mut self.hybrid_memory_manager {
            Some(manager) => manager.free(is_gpu_memory, resource_id, ptr),
            None => Err(IPCError::InvalidParameter)
        }
    }
    
    // 设置是否优先使用GPU内存
    pub fn set_prefer_gpu_memory(&mut self, prefer: bool) {
        if let Some(manager) = &mut self.hybrid_memory_manager {
            manager.set_prefer_gpu_memory(prefer);
        } else {
            warn!("Cannot set GPU memory preference: hybrid memory manager not initialized");
        }
    }
    
    // 启动IPC服务
    pub fn start(&self) -> Result<(), String> {
        info!("IPC Service started");
        Ok(())
    }
    
    // 1. 分层基数树内存池管理架构
    // 创建新的内存管理器
    pub fn create_memory_manager(&self, base_address: u64, size: u64) -> RadixTreeMemoryManager {
        RadixTreeMemoryManager::new(base_address, size)
    }
    
    // 2. 共享内存+消息队列 - 创建共享内存通道
    pub fn create_shared_memory_channel(&mut self, name: &str) -> Result<u64, IPCError> {
        let result = self.shared_memory_port.create_channel(name);
        
        // 更新统计信息
        if result.is_ok() {
            self.stats.lock().unwrap().increment_channels_created();
        }
        
        result
    }
    
    // 连接共享内存通道
    pub fn connect_shared_memory_channel(&self, name: &str) -> Result<Box<dyn IPCChannel>, IPCError> {
        self.shared_memory_port.connect_channel(name)
    }
    
    // 创建消息队列通道
    pub fn create_message_queue_channel(&mut self, name: &str) -> Result<u64, IPCError> {
        let result = self.message_queue_port.create_channel(name);
        
        // 更新统计信息
        if result.is_ok() {
            self.stats.lock().unwrap().increment_channels_created();
        }
        
        result
    }
    
    // 连接消息队列通道
    pub fn connect_message_queue_channel(&self, name: &str) -> Result<Box<dyn IPCChannel>, IPCError> {
        self.message_queue_port.connect_channel(name)
    }
    
    // 3. 管道+信号量 - 创建新的管道
    pub fn create_pipe(&mut self, max_size: usize) -> u64 {
        let id = self.next_id.fetch_add(1, Ordering::SeqCst) as u64;
        let pipe = Arc::new(Pipe::new(max_size));
        self.pipes.insert(id, pipe);
        id
    }
    
    // 获取管道
    pub fn get_pipe(&self, id: u64) -> Option<Arc<Pipe>> {
        self.pipes.get(&id).map(Arc::clone)
    }
    
    // 创建新的信号量
    pub fn create_semaphore(&mut self, initial_count: usize, max_count: usize) -> u64 {
        let id = self.next_id.fetch_add(1, Ordering::SeqCst) as u64;
        let semaphore = Arc::new(Semaphore::new(initial_count, max_count));
        self.semaphores.insert(id, semaphore);
        id
    }
    
    // 获取信号量
    pub fn get_semaphore(&self, id: u64) -> Option<Arc<Semaphore>> {
        self.semaphores.get(&id).map(Arc::clone)
    }
    
    // 关闭共享内存通道
    pub fn close_shared_memory_channel(&mut self, channel_id: u64) -> Result<(), IPCError> {
        let result = self.shared_memory_port.close_channel(channel_id);
        
        // 更新统计信息
        if result.is_ok() {
            self.stats.lock().unwrap().increment_channels_closed();
        }
        
        result
    }
    
    // 关闭消息队列通道
    pub fn close_message_queue_channel(&mut self, channel_id: u64) -> Result<(), IPCError> {
        let result = self.message_queue_port.close_channel(channel_id);
        
        // 更新统计信息
        if result.is_ok() {
            self.stats.lock().unwrap().increment_channels_closed();
        }
        
        result
    }
    
    // 4. 同步IPC+异步IPC - 同步发送数据
    pub fn sync_send(&self, channel: &mut Box<dyn IPCChannel>, data: &[u8]) -> Result<(), IPCError> {
        // 测量发送时间
        let start_time = std::time::Instant::now();
        let result = channel.send(data);
        let elapsed_ns = start_time.elapsed().as_nanos() as u64;
        
        // 更新统计信息
        let mut stats = self.stats.lock().unwrap();
        match &result {
            Ok(_) => {
                stats.increment_messages_sent(data.len());
                stats.update_avg_send_time(elapsed_ns);
            },
            Err(err) => {
                stats.increment_error(err.clone());
            }
        }
        
        result
    }
    
    // 同步接收数据
    pub fn sync_receive(&self, channel: &mut Box<dyn IPCChannel>, buffer: &mut [u8]) -> Result<usize, IPCError> {
        // 测量接收时间
        let start_time = std::time::Instant::now();
        let result = channel.receive(buffer);
        let elapsed_ns = start_time.elapsed().as_nanos() as u64;
        
        // 更新统计信息
        let mut stats = self.stats.lock().unwrap();
        match &result {
            Ok(size) => {
                stats.increment_messages_received(*size);
                stats.update_avg_receive_time(elapsed_ns);
            },
            Err(err) => {
                stats.increment_error(err.clone());
            }
        }
        
        result
    }
    
    // 异步发送数据
    pub fn async_send(&self, channel: &mut Box<dyn IPCChannel>, data: &[u8], callback: AsyncCallback) -> u64 {
        let id = self.next_id.fetch_add(1, Ordering::SeqCst) as u64;
        let data_copy = data.to_vec();
        let channel_id = channel.channel_id();
        
        // 存储异步操作
        {
            let mut operations = self.async_operations.lock().unwrap();
            operations.insert(id, AsyncOperation {
                callback,
                completed: AtomicBool::new(false),
            });
        }
        
        // 在新线程中执行异步操作
        let operations_clone = self.async_operations.clone();
        let stats_clone = self.stats.clone();
        std::thread::spawn(move || {
            let result = channel.send(&data_copy);
            
            // 更新统计信息
            let mut stats = stats_clone.lock().unwrap();
            match &result {
                Ok(_) => {
                    stats.increment_messages_sent(data_copy.len());
                },
                Err(err) => {
                    stats.increment_error(err.clone());
                }
            }
            
            // 执行回调
            let mut operations = operations_clone.lock().unwrap();
            if let Some(op) = operations.remove(&id) {
                if !op.completed.load(Ordering::SeqCst) {
                    let len = if result.is_ok() { data_copy.len() } else { 0 };
                    (op.callback)(if result.is_ok() { Ok(len) } else { Err(result.unwrap_err()) });
                }
            }
        });
        
        id
    }
    
    // 异步接收数据
    pub fn async_receive(&self, channel: &mut Box<dyn IPCChannel>, buffer_size: usize, callback: AsyncCallback) -> u64 {
        let id = self.next_id.fetch_add(1, Ordering::SeqCst) as u64;
        
        // 存储异步操作
        {
            let mut operations = self.async_operations.lock().unwrap();
            operations.insert(id, AsyncOperation {
                callback,
                completed: AtomicBool::new(false),
            });
        }
        
        // 在新线程中执行异步操作
        let operations_clone = self.async_operations.clone();
        let stats_clone = self.stats.clone();
        std::thread::spawn(move || {
            let mut buffer = vec![0; buffer_size];
            let result = channel.receive(&mut buffer);
            
            // 更新统计信息
            let mut stats = stats_clone.lock().unwrap();
            match &result {
                Ok(size) => {
                    stats.increment_messages_received(*size);
                },
                Err(err) => {
                    stats.increment_error(err.clone());
                }
            }
            
            // 执行回调
            let mut operations = operations_clone.lock().unwrap();
            if let Some(op) = operations.remove(&id) {
                if !op.completed.load(Ordering::SeqCst) {
                    (op.callback)(result);
                }
            }
        });
        
        id
    }
    
    // 取消异步操作
    pub fn cancel_async_operation(&self, id: u64) -> bool {
        let mut operations = self.async_operations.lock().unwrap();
        if let Some(op) = operations.get_mut(&id) {
            op.completed.store(true, Ordering::SeqCst);
            true
        } else {
            false
        }
    }
    
    // 5. 硬件IPC+软件IPC - 获取硬件IPC接口
    // 注意：硬件IPC接口是抽象的，实际实现需要与具体硬件平台结合
    pub fn get_hardware_ipc_interface(&self) -> Result<Box<dyn IPCChannel>, IPCError> {
        // 这里返回软件IPC作为硬件IPC的抽象层
        // 实际实现中应该与硬件驱动交互
        // 修复：避免尝试在不可变引用上调用可变方法
        match self.connect_message_queue_channel("hardware_ipc") {
            Ok(channel) => Ok(channel),
            Err(_) => {
                // 创建一个可变副本以调用create_channel方法
                let mut service = self.clone();
                service.message_queue_port.create_channel("hardware_ipc")?;
                self.connect_message_queue_channel("hardware_ipc")
            }
        }
    }
    
    // 获取软件IPC接口
    pub fn get_software_ipc_interface(&self, name: &str) -> Result<Box<dyn IPCChannel>, IPCError> {
        self.connect_message_queue_channel(name)
    }
    
    // 列出所有IPC通道
    pub fn list_all_channels(&self) -> Vec<String> {
        let mut channels = self.shared_memory_port.list_channels();
        let mut msg_queues = self.message_queue_port.list_channels();
        channels.append(&mut msg_queues);
        channels
    }
    
    // 获取IPC统计信息
    pub fn get_stats(&self) -> IPCStats {
        self.stats.lock().unwrap().clone()
    }
    
    // 重置IPC统计信息
    pub fn reset_stats(&self) {
        self.stats.lock().unwrap().reset();
    }
    
    // 打印当前IPC统计信息
    pub fn print_stats(&self) {
        let stats = self.get_stats();
        info!("IPC Statistics:");
        info!("  Channels: created={}, closed={}, active={}", 
              stats.channels_created, stats.channels_closed, stats.active_channels);
        info!("  Messages: sent={}, received={}", 
              stats.messages_sent, stats.messages_received);
        info!("  Throughput: sent={} bytes, received={} bytes", 
              stats.bytes_sent, stats.bytes_received);
        info!("  Performance: avg send={} ns, avg receive={} ns", 
              stats.avg_send_time_ns, stats.avg_receive_time_ns);
        if !stats.errors.is_empty() {
            info!("  Errors:");
            for (error, count) in &stats.errors {
                info!("    {:?}: {}", error, count);
            }
        }
    }
    
    // 启动定期监控，每隔指定秒数打印一次统计信息
    pub fn start_monitoring(&self, interval_seconds: u64) {
        let stats_clone = self.stats.clone();
        std::thread::spawn(move || {
            loop {
                std::thread::sleep(std::time::Duration::from_secs(interval_seconds));
                let stats = stats_clone.lock().unwrap();
                info!("IPC Monitoring - Active Channels: {}, Messages Sent/Received: {}/{}",
                      stats.active_channels, stats.messages_sent, stats.messages_received);
            }
        });
    }
}