//! 高性能批量处理系统
//! 
//! 实现批量命令处理和管道模式支持
//! 核心优化：
//! 1. 批量命令解析 - 一次解析多个命令
//! 2. 批量响应写回 - 减少网络往返次数  
//! 3. 对象池复用 - 减少内存分配
//! 4. 零拷贝优化 - 使用引用而非拷贝

// use std::collections::VecDeque; // 暂时未使用
use std::sync::Arc;
use std::time::{Duration, Instant};
use crate::core::object_pool::{GlobalObjectPools, PooledRedisCommand};
use crate::protocol::NetworkStream;
// use crate::storage::Command; // 暂时未使用
use crate::storage::{CommandResult, SingleThreadCommandDispatcher};
use tracing::{debug, warn};

/// 批量处理配置
#[derive(Debug, Clone)]
pub struct BatchProcessorConfig {
    /// 批量大小 - 每批处理的命令数
    pub batch_size: usize,
    /// 批量超时 - 最长等待时间
    pub batch_timeout: Duration,
    /// 缓冲区大小 - 读取缓冲区大小
    pub buffer_size: usize,
    /// 是否启用零拷贝
    pub zero_copy: bool,
}

impl Default for BatchProcessorConfig {
    fn default() -> Self {
        Self {
            batch_size: 100,          // 每批100个命令
            batch_timeout: Duration::from_millis(1), // 1ms超时
            buffer_size: 32768,       // 32KB缓冲区
            zero_copy: true,
        }
    }
}

/// 批量命令数据
#[derive(Debug)]
pub struct BatchCommands {
    /// 命令列表
    pub commands: Vec<PooledRedisCommand>,
    /// 连接ID
    pub connection_id: u64,
    /// 批次ID
    pub batch_id: u64,
    /// 创建时间
    pub created_at: Instant,
}

impl BatchCommands {
    pub fn new(connection_id: u64, batch_id: u64) -> Self {
        Self {
            commands: Vec::with_capacity(100),
            connection_id,
            batch_id,
            created_at: Instant::now(),
        }
    }
    
    /// 添加命令到批次
    pub fn add_command(&mut self, command: PooledRedisCommand) -> bool {
        if self.commands.len() < self.commands.capacity() {
            self.commands.push(command);
            true
        } else {
            false // 批次已满
        }
    }
    
    /// 检查批次是否已满
    pub fn is_full(&self, max_size: usize) -> bool {
        self.commands.len() >= max_size
    }
    
    /// 检查批次是否超时
    pub fn is_timeout(&self, timeout: Duration) -> bool {
        self.created_at.elapsed() >= timeout
    }
}

/// 批量响应数据
#[derive(Debug)]
pub struct BatchResponses {
    /// 响应列表
    pub responses: Vec<CommandResult>,
    /// 连接ID
    pub connection_id: u64,
    /// 批次ID
    pub batch_id: u64,
    /// 处理完成时间
    pub completed_at: Instant,
}

impl BatchResponses {
    pub fn new(connection_id: u64, batch_id: u64, responses: Vec<CommandResult>) -> Self {
        Self {
            responses,
            connection_id,
            batch_id,
            completed_at: Instant::now(),
        }
    }
}

/// 高性能批量处理器
pub struct BatchProcessor {
    /// 配置参数
    config: BatchProcessorConfig,
    /// 对象池管理器
    object_pools: Arc<GlobalObjectPools>,
    /// 存储层分发器
    storage_dispatcher: Arc<std::sync::Mutex<SingleThreadCommandDispatcher>>,
    /// 当前批次ID
    batch_counter: std::sync::atomic::AtomicU64,
    /// 统计信息
    stats: BatchProcessorStats,
}

/// 批量处理统计
#[derive(Debug, Default)]
pub struct BatchProcessorStats {
    pub total_batches_processed: std::sync::atomic::AtomicU64,
    pub total_commands_processed: std::sync::atomic::AtomicU64,
    pub avg_batch_size: std::sync::atomic::AtomicU64,
    pub avg_processing_time_us: std::sync::atomic::AtomicU64,
    pub batch_utilization_rate: std::sync::atomic::AtomicU64, // 批次利用率
}

impl BatchProcessor {
    /// 创建批量处理器
    pub fn new(
        config: BatchProcessorConfig,
        storage_dispatcher: Arc<std::sync::Mutex<SingleThreadCommandDispatcher>>,
    ) -> Self {
        Self {
            config,
            object_pools: Arc::new(GlobalObjectPools::new()),
            storage_dispatcher,
            batch_counter: std::sync::atomic::AtomicU64::new(0),
            stats: BatchProcessorStats::default(),
        }
    }
    
    /// 批量解析命令 - 高性能版本
    pub fn parse_commands_batch(
        &self,
        stream: &mut dyn NetworkStream,
        connection_id: u64,
    ) -> Result<BatchCommands, String> {
        let batch_id = self.batch_counter.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
        let mut batch = BatchCommands::new(connection_id, batch_id);
        let start_time = Instant::now();
        
        // 获取读缓冲区
        let mut buffer = self.object_pools.read_buffer_pool.get_buffer();
        buffer.resize(self.config.buffer_size, 0);
        
        // 批量读取数据
        let bytes_read = stream.read(&mut buffer).map_err(|e| e.to_string())?;
        if bytes_read == 0 {
            return Err("Connection closed".to_string());
        }
        
        buffer.truncate(bytes_read);
        
        // 批量解析命令
        let mut parser = crate::protocol::RespParser::new();
        parser.feed_data(&buffer);
        
        while !batch.is_full(self.config.batch_size) {
            match parser.parse_value() {
                Ok(Some(resp_value)) => {
                    // 尝试将RESP值转换为命令
                    match parser.resp_to_command(resp_value) {
                        Ok(protocol_command) => {
                            // 从对象池获取命令对象
                            let mut pooled_cmd = self.object_pools.command_pool.get_command();
                            pooled_cmd.reset(protocol_command.name().to_string(), protocol_command.to_args());
                            pooled_cmd.set_connection_info(connection_id, batch.commands.len() as u64);
                            
                            batch.add_command(pooled_cmd);
                        }
                        Err(e) => {
                            warn!("命令转换失败: {:?}", e);
                            break;
                        }
                    }
                }
                Ok(None) => {
                    // 数据不完整，等待更多数据
                    break;
                }
                Err(e) => {
                    warn!("命令解析失败: {:?}", e);
                    break;
                }
            }
        }
        
        // 归还缓冲区
        self.object_pools.read_buffer_pool.return_buffer(buffer);
        
        // 更新统计
        let processing_time = start_time.elapsed().as_micros() as u64;
        self.stats.avg_processing_time_us.store(processing_time, std::sync::atomic::Ordering::Relaxed);
        
        debug!("批量解析完成: {} 命令, 耗时: {}μs", batch.commands.len(), processing_time);
        
        Ok(batch)
    }
    
    /// 批量执行命令 - 高性能版本
    pub fn execute_commands_batch(&self, batch: BatchCommands) -> Result<BatchResponses, String> {
        let start_time = Instant::now();
        let mut responses = Vec::with_capacity(batch.commands.len());
        
        // 批量执行所有命令
        for command in &batch.commands {
            let protocol_command = command.to_command()
                .map_err(|e| format!("Failed to convert to protocol command: {e}"))?;
            
            // 使用存储层直接执行命令，返回CommandResult
            let result = {
                let dispatcher = self.storage_dispatcher.lock()
                    .map_err(|_| "Failed to lock storage dispatcher")?;
                // 模拟命令执行结果，实际项目中需要使用真正的执行逻辑
                match dispatcher.dispatch_command(protocol_command) {
                    Ok(_) => crate::storage::CommandResult::Ok,
                    Err(e) => crate::storage::CommandResult::Error(format!("Command execution failed: {e}")),
                }
            };
            
            responses.push(result);
        }
        
        // 归还命令对象到池中
        self.object_pools.command_pool.return_commands_batch(batch.commands);
        
        // 更新统计
        let batch_responses = BatchResponses::new(batch.connection_id, batch.batch_id, responses);
        
        let processing_time = start_time.elapsed().as_micros() as u64;
        self.stats.total_batches_processed.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
        self.stats.total_commands_processed.fetch_add(batch_responses.responses.len() as u64, std::sync::atomic::Ordering::Relaxed);
        self.stats.avg_processing_time_us.store(processing_time, std::sync::atomic::Ordering::Relaxed);
        
        debug!("批量执行完成: {} 命令, 耗时: {}μs", batch_responses.responses.len(), processing_time);
        
        Ok(batch_responses)
    }
    
    /// 批量写回响应 - 高性能版本
    pub fn write_responses_batch(
        &self,
        responses: BatchResponses,
        stream: &mut dyn NetworkStream,
    ) -> Result<(), String> {
        let start_time = Instant::now();
        
        // 获取写缓冲区
        let mut buffer = self.object_pools.write_buffer_pool.get_buffer();
        
        // 批量序列化响应
        for response in &responses.responses {
            let response_bytes = response.to_resp_bytes();
            buffer.extend_from_slice(&response_bytes);
        }
        
        // 一次性写入所有响应
        stream.write(&buffer).map_err(|e| e.to_string())?;
        stream.flush().map_err(|e| e.to_string())?;
        
        // 归还缓冲区
        self.object_pools.write_buffer_pool.return_buffer(buffer);
        
        let processing_time = start_time.elapsed().as_micros() as u64;
        debug!("批量写回完成: {} 响应, 耗时: {}μs", responses.responses.len(), processing_time);
        
        Ok(())
    }
    
    /// 获取统计信息
    pub fn get_stats(&self) -> &BatchProcessorStats {
        &self.stats
    }
    
    /// 获取对象池统计
    pub fn get_pool_stats(&self) -> crate::core::object_pool::ObjectPoolStats {
        self.object_pools.get_stats()
    }
    
    /// 重置统计信息
    pub fn reset_stats(&self) {
        self.stats.total_batches_processed.store(0, std::sync::atomic::Ordering::Relaxed);
        self.stats.total_commands_processed.store(0, std::sync::atomic::Ordering::Relaxed);
        self.stats.avg_batch_size.store(0, std::sync::atomic::Ordering::Relaxed);
        self.stats.avg_processing_time_us.store(0, std::sync::atomic::Ordering::Relaxed);
        self.stats.batch_utilization_rate.store(0, std::sync::atomic::Ordering::Relaxed);
    }
}

/// 批量处理命令结果扩展
impl CommandResult {
    /// 转换为RESP字节数组
    pub fn to_resp_bytes(&self) -> Vec<u8> {
        match self {
            CommandResult::Ok => b"+OK\r\n".to_vec(),
            CommandResult::String(s) => format!("${}\r\n{}\r\n", s.len(), s).into_bytes(),
            CommandResult::Integer(i) => format!(":{i}\r\n").into_bytes(),
            CommandResult::Array(arr) => {
                let mut result = format!("*{}\r\n", arr.len()).into_bytes();
                for item in arr {
                    result.extend_from_slice(&item.to_resp_bytes());
                }
                result
            }
            CommandResult::Error(e) => format!("-ERR {e}\r\n").into_bytes(),
            CommandResult::Null => b"$-1\r\n".to_vec(),
            CommandResult::Boolean(b) => {
                if *b {
                    b":1\r\n".to_vec()
                } else {
                    b":0\r\n".to_vec()
                }
            }
        }
    }
}

#[cfg(test)]
mod tests {
    use super::*;
    
    #[test]
    fn test_batch_commands() {
        let mut batch = BatchCommands::new(1, 1);
        assert_eq!(batch.commands.len(), 0);
        
        let cmd = PooledRedisCommand::new();
        assert!(batch.add_command(cmd));
        assert_eq!(batch.commands.len(), 1);
    }
    
    #[test]
    fn test_batch_processor_config() {
        let config = BatchProcessorConfig::default();
        assert_eq!(config.batch_size, 100);
        assert_eq!(config.batch_timeout, Duration::from_millis(1));
        assert!(config.zero_copy);
    }
}