//! 高性能事件分发器实现
//! 
//! 使用成熟的高性能队列库（crossbeam + flume）避免锁竞争问题

use crate::event::{DynamicEvent, DynamicEventHandler, EventResult};
// use crossbeam::queue::SegQueue;  // 已注释：未使用的导入
use flume::{Sender, Receiver};
use fxhash::FxHashMap;
use std::sync::{Arc, RwLock};
use std::sync::atomic::{AtomicBool, AtomicU64, Ordering};
use std::thread;
use std::time::{Duration, Instant};

/// 高性能分发器配置
#[derive(Debug, Clone)]
pub struct HighPerformanceDispatcherConfig {
    /// 工作线程数
    pub worker_count: usize,
    /// 通道容量（0 = 无界）
    pub channel_capacity: usize,
    /// 工作线程空闲时的睡眠时间（微秒）
    pub idle_sleep_micros: u64,
}

impl Default for HighPerformanceDispatcherConfig {
    fn default() -> Self {
        Self {
            worker_count: num_cpus::get().max(2),
            channel_capacity: 0,  // 无界通道
            idle_sleep_micros: 100,  // 100微秒
        }
    }
}

/// 统计信息
#[derive(Debug, Default)]
pub struct HighPerformanceDispatcherStats {
    pub total_events: AtomicU64,
    pub processed_events: AtomicU64,
    pub failed_events: AtomicU64,
    pub avg_processing_time_ns: AtomicU64,
    pub queue_size: AtomicU64,
}

impl HighPerformanceDispatcherStats {
    pub fn get_qps(&self, duration: Duration) -> f64 {
        let processed = self.processed_events.load(Ordering::Relaxed) as f64;
        processed / duration.as_secs_f64()
    }
}

/// 基于Flume的高性能事件分发器
/// 
/// 特性：
/// 1. 使用flume::unbounded()或bounded()通道 - 业界最快的MPMC通道
/// 2. 支持多个工作线程并行处理
/// 3. 零锁竞争设计
/// 4. 内置统计和监控
pub struct HighPerformanceEventDispatcher {
    config: HighPerformanceDispatcherConfig,
    sender: Sender<DynamicEvent>,
    receiver: Receiver<DynamicEvent>,
    handlers: Arc<RwLock<FxHashMap<String, Vec<Arc<dyn DynamicEventHandler>>>>>,
    running: Arc<AtomicBool>,
    worker_handles: Vec<thread::JoinHandle<()>>,
    stats: Arc<HighPerformanceDispatcherStats>,
    start_time: Instant,
}

impl HighPerformanceEventDispatcher {
    /// 创建新的高性能分发器
    pub fn new(config: HighPerformanceDispatcherConfig) -> Self {
        let (sender, receiver) = if config.channel_capacity == 0 {
            flume::unbounded()  // 无界通道，最高性能
        } else {
            flume::bounded(config.channel_capacity)  // 有界通道，控制内存
        };

        Self {
            config,
            sender,
            receiver,
            handlers: Arc::new(RwLock::new(FxHashMap::default())),
            running: Arc::new(AtomicBool::new(false)),
            worker_handles: Vec::new(),
            stats: Arc::new(HighPerformanceDispatcherStats::default()),
            start_time: Instant::now(),
        }
    }

    /// 注册事件处理器
    pub fn register_handler(&mut self, event_type: &str, handler: Arc<dyn DynamicEventHandler>) {
        let mut handlers = self.handlers.write().unwrap();
        handlers.entry(event_type.to_string())
            .or_insert_with(Vec::new)
            .push(handler);
    }

    /// 获取发送器（用于发布事件）
    pub fn get_sender(&self) -> HighPerformanceEventSender {
        HighPerformanceEventSender {
            sender: self.sender.clone(),
            stats: self.stats.clone(),
        }
    }

    /// 启动分发器
    pub fn start(&mut self) {
        if self.running.load(Ordering::Relaxed) {
            return;
        }

        self.running.store(true, Ordering::Relaxed);

        // 启动工作线程
        for worker_id in 0..self.config.worker_count {
            let receiver = self.receiver.clone();
            let handlers = self.handlers.clone();
            let running = self.running.clone();
            let stats = self.stats.clone();
            let idle_sleep = Duration::from_micros(self.config.idle_sleep_micros);

            let handle = thread::Builder::new()
                .name(format!("hp-worker-{}", worker_id))
                .spawn(move || {
                    Self::worker_loop(receiver, handlers, running, stats, idle_sleep, worker_id);
                })
                .expect("Failed to spawn worker thread");

            self.worker_handles.push(handle);
        }

        println!("🚀 HighPerformanceEventDispatcher started with {} workers", 
                 self.config.worker_count);
    }

    /// 停止分发器
    pub fn stop(&mut self) {
        if !self.running.load(Ordering::Relaxed) {
            return;
        }

        self.running.store(false, Ordering::Relaxed);

        // 等待所有工作线程完成
        while let Some(handle) = self.worker_handles.pop() {
            let _ = handle.join();
        }

        println!("🛑 HighPerformanceEventDispatcher stopped");
    }

    /// 工作线程主循环
    fn worker_loop(
        receiver: Receiver<DynamicEvent>,
        handlers: Arc<RwLock<FxHashMap<String, Vec<Arc<dyn DynamicEventHandler>>>>>,
        running: Arc<AtomicBool>,
        stats: Arc<HighPerformanceDispatcherStats>,
        idle_sleep: Duration,
        worker_id: usize,
    ) {
        let mut processed_count = 0u64;
        let mut last_report = Instant::now();

        while running.load(Ordering::Relaxed) {
            match receiver.try_recv() {
                Ok(event) => {
                    let start_time = Instant::now();
                    let event_type = event.event_type();

                    // 查找并执行处理器
                    if let Ok(handlers_guard) = handlers.read() {
                        if let Some(handler_list) = handlers_guard.get(event_type) {
                            for handler in handler_list {
                                match handler.handle_dynamic(&event) {
                                    EventResult::Success => {
                                        stats.processed_events.fetch_add(1, Ordering::Relaxed);
                                    }
                                    EventResult::Failed(_) => {
                                        stats.failed_events.fetch_add(1, Ordering::Relaxed);
                                    }
                                    EventResult::Skipped => {
                                        // 跳过的事件，不计入统计
                                    }
                                    EventResult::Retry(_) => {
                                        // 需要重试的事件，记为失败
                                        stats.failed_events.fetch_add(1, Ordering::Relaxed);
                                    }
                                }
                                // 只处理第一个匹配的处理器（Redis单线程模式）
                                break;
                            }
                        }
                    }

                    // 更新处理时间统计
                    let processing_time = start_time.elapsed().as_nanos() as u64;
                    stats.avg_processing_time_ns.store(processing_time, Ordering::Relaxed);

                    processed_count += 1;

                    // 定期报告（每个worker每10秒报告一次）
                    if processed_count % 100000 == 0 && worker_id == 0 {
                        let elapsed = last_report.elapsed();
                        let qps = 100000.0 / elapsed.as_secs_f64();
                        println!("🏃 Worker-{}: processed 100K events, QPS: {:.0}", worker_id, qps);
                        last_report = Instant::now();
                    }
                }
                Err(flume::TryRecvError::Empty) => {
                    // 队列为空，短暂休眠避免CPU空转
                    thread::sleep(idle_sleep);
                }
                Err(flume::TryRecvError::Disconnected) => {
                    // 通道已关闭，退出循环
                    break;
                }
            }
        }

        println!("🏁 Worker-{} processed {} events and exited", worker_id, processed_count);
    }

    /// 获取统计信息
    pub fn get_stats(&self) -> HighPerformanceDispatcherStats {
        HighPerformanceDispatcherStats {
            total_events: AtomicU64::new(self.stats.total_events.load(Ordering::Relaxed)),
            processed_events: AtomicU64::new(self.stats.processed_events.load(Ordering::Relaxed)),
            failed_events: AtomicU64::new(self.stats.failed_events.load(Ordering::Relaxed)),
            avg_processing_time_ns: AtomicU64::new(self.stats.avg_processing_time_ns.load(Ordering::Relaxed)),
            queue_size: AtomicU64::new(self.receiver.len() as u64),
        }
    }

    /// 获取当前QPS
    pub fn get_current_qps(&self) -> f64 {
        let elapsed = self.start_time.elapsed();
        self.stats.get_qps(elapsed)
    }
}

impl Drop for HighPerformanceEventDispatcher {
    fn drop(&mut self) {
        self.stop();
    }
}

/// 高性能事件发送器
#[derive(Clone)]
pub struct HighPerformanceEventSender {
    sender: Sender<DynamicEvent>,
    stats: Arc<HighPerformanceDispatcherStats>,
}

impl HighPerformanceEventSender {
    /// 发布事件（非阻塞）
    pub fn publish(&self, event: DynamicEvent) -> Result<(), DynamicEvent> {
        self.stats.total_events.fetch_add(1, Ordering::Relaxed);
        
        match self.sender.try_send(event) {
            Ok(()) => {
                self.stats.queue_size.store(self.sender.len() as u64, Ordering::Relaxed);
                Ok(())
            }
            Err(flume::TrySendError::Full(event)) => Err(event),
            Err(flume::TrySendError::Disconnected(event)) => Err(event),
        }
    }

    /// 发布事件（阻塞）
    pub fn publish_blocking(&self, event: DynamicEvent) -> Result<(), DynamicEvent> {
        self.stats.total_events.fetch_add(1, Ordering::Relaxed);
        
        match self.sender.send(event) {
            Ok(()) => {
                self.stats.queue_size.store(self.sender.len() as u64, Ordering::Relaxed);
                Ok(())
            }
            Err(flume::SendError(event)) => Err(event),
        }
    }

    /// 获取队列长度
    pub fn queue_len(&self) -> usize {
        self.sender.len()
    }

    /// 检查是否已断开连接
    pub fn is_disconnected(&self) -> bool {
        self.sender.is_disconnected()
    }
}

#[cfg(test)]
mod tests {
    use super::*;
    use crate::event::{Event, EventPayload};
    use std::sync::Arc;
    use std::time::Duration;

    #[derive(Debug)]
    struct TestEvent {
        data: String,
    }

    impl EventPayload for TestEvent {
        fn payload_type(&self) -> &'static str {
            "test_event"
        }

        fn serialize(&self) -> Result<Vec<u8>, Box<dyn std::error::Error + Send + Sync>> {
            Ok(self.data.as_bytes().to_vec())
        }

        fn size(&self) -> usize {
            self.data.len()
        }
    }

    struct TestHandler;

    impl DynamicEventHandler for TestHandler {
        fn handle_dynamic(&self, _event: &Event<Box<dyn EventPayload>>) -> EventResult {
            EventResult::Success
        }

        fn name(&self) -> &str {
            "test_handler"
        }

        fn can_handle(&self, event_type: &str) -> bool {
            event_type == "test_event"
        }
    }

    #[test]
    fn test_high_performance_dispatcher() {
        let config = HighPerformanceDispatcherConfig {
            worker_count: 2,
            channel_capacity: 1000,
            idle_sleep_micros: 10,
        };

        let mut dispatcher = HighPerformanceEventDispatcher::new(config);
        let handler = Arc::new(TestHandler);
        
        dispatcher.register_handler("test_event", handler);
        dispatcher.start();

        let sender = dispatcher.get_sender();

        // 发送测试事件
        for i in 0..1000 {
            let event = Event::new(
                "test_event",
                Box::new(TestEvent { 
                    data: format!("test_data_{}", i) 
                }) as Box<dyn EventPayload>
            );
            sender.publish(event).unwrap();
        }

        // 等待处理完成
        thread::sleep(Duration::from_millis(100));

        let stats = dispatcher.get_stats();
        assert!(stats.processed_events.load(Ordering::Relaxed) > 0);

        dispatcher.stop();
    }
}