//! 事件处理器和并发处理池

use crate::dedup::DedupManager;
use crate::event::HttpEvent;
use crate::matcher::RuleMatcher;
use crate::storage::StorageBackend;
use crate::ScanError;
use chrono::Utc;
use serde::{Deserialize, Serialize};
use std::sync::Arc;
use std::sync::atomic::AtomicBool;
use tokio::sync::{broadcast, mpsc, Semaphore};
use flume;
use tokio::time::Duration;
use tracing::{debug, error, info, warn};

// 条件导入指纹识别模块
#[cfg(feature = "fingerprint")]
use good_mitm_fingerprint::{FingerprintCoreEngine};
#[cfg(feature = "fingerprint")]
use once_cell::sync::Lazy;

// 全局指纹识别引擎实例
#[cfg(feature = "fingerprint")]
static FINGERPRINT_ENGINE: Lazy<FingerprintCoreEngine> = Lazy::new(|| {
    let mut engine = FingerprintCoreEngine::new();
    
    // 尝试从默认目录加载规则
    let _ = load_rules_from_directory(&mut engine, "./fingerprint_rules");
    let _ = load_rules_from_directory(&mut engine, "./rules/finger");
    
    // 特别尝试加载finger.json
    let finger_json_path = "./rules/finger/finger.json";
    if std::path::Path::new(finger_json_path).exists() {
        match std::fs::read_to_string(finger_json_path) {
            Ok(_content) => {
                info!("Found finger.json, will load rules from it");
            },
            Err(e) => {
                warn!("Failed to read finger.json: {}", e);
            }
        }
    }
    
    engine
});

#[cfg(feature = "fingerprint")]
fn load_rules_from_directory(engine: &mut FingerprintCoreEngine, dir_path: &str) -> Result<(), std::io::Error> {
    if let Ok(entries) = std::fs::read_dir(dir_path) {
        for entry in entries {
            if let Ok(entry) = entry {
                let path = entry.path();
                if path.is_file() && path.extension().map_or(false, |ext| ext == "yaml" || ext == "yml") {
                    if let Err(e) = engine.load_rule(&path.to_string_lossy()) {
                        warn!("Failed to load rule from {}: {}", path.display(), e);
                    }
                }
            }
        }
    }
    Ok(())
}

/// 事件处理器配置
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ProcessorConfig {
    /// 工作线程数量
    pub worker_count: usize,
    /// 事件队列大小
    pub queue_size: usize,
    /// 单个主机并发限制
    pub host_concurrency_limit: usize,
    /// 处理超时时间
    pub processing_timeout: Duration,
    /// 是否启用指纹识别
    pub enable_fingerprinting: bool,
    /// 批量处理大小
    pub batch_size: usize,
    /// 批量处理间隔
    pub batch_interval: Duration,
}

impl Default for ProcessorConfig {
    fn default() -> Self {
        Self {
            worker_count: num_cpus::get(),
            queue_size: 10_000,
            host_concurrency_limit: 5,
            processing_timeout: Duration::from_secs(30),
            enable_fingerprinting: true,
            batch_size: 10,
            batch_interval: Duration::from_millis(100),
        }
    }
}

/// 处理状态
#[derive(Debug, Clone)]
pub struct ProcessingStats {
    /// 总处理事件数
    pub total_processed: u64,
    /// 成功处理数
    pub success_count: u64,
    /// 失败处理数
    pub error_count: u64,
    /// 去重跳过数
    pub dedup_skipped: u64,
    /// 发现的漏洞数
    pub vulnerabilities_found: u64,
    /// 平均处理时间 (微秒)
    pub avg_processing_time_us: u64,
    /// 当前队列长度
    pub queue_length: usize,
    /// 活跃工作线程数
    pub active_workers: usize,
}

impl Default for ProcessingStats {
    fn default() -> Self {
        Self {
            total_processed: 0,
            success_count: 0,
            error_count: 0,
            dedup_skipped: 0,
            vulnerabilities_found: 0,
            avg_processing_time_us: 0,
            queue_length: 0,
            active_workers: 0,
        }
    }
}

/// 事件处理器
pub struct EventProcessor {
    /// 配置
    config: ProcessorConfig,
    /// 去重管理器
    dedup_manager: Arc<DedupManager>,
    /// 规则匹配器
    matcher: Arc<dyn RuleMatcher>,
    /// 存储后端
    storage: Arc<dyn StorageBackend>,
    /// 事件发送器 (使用 flume 支持 MPMC)
    event_sender: flume::Sender<HttpEvent>,
    /// 事件接收器 (用于工作线程，可克隆)
    event_receiver: flume::Receiver<HttpEvent>,
    /// 处理统计
    stats: Arc<tokio::sync::RwLock<ProcessingStats>>,
    /// 主机并发限制器
    host_semaphores: Arc<tokio::sync::RwLock<std::collections::HashMap<String, Arc<Semaphore>>>>,
    /// 运行标志
    is_running: Arc<AtomicBool>,
    /// 工作线程句柄
    worker_handles: Arc<tokio::sync::Mutex<Vec<tokio::task::JoinHandle<()>>>>,
}

impl EventProcessor {
    /// 创建新的事件处理器
    pub fn new(
        config: ProcessorConfig,
        dedup_manager: Arc<DedupManager>,
        matcher: Arc<dyn RuleMatcher>,
        storage: Arc<dyn StorageBackend>,
    ) -> Self {
        // 使用 flume 的 unbounded channel，支持多个消费者
        let (event_sender, event_receiver) = flume::unbounded();

        info!("Creating EventProcessor with {} workers", config.worker_count);

        Self {
            config,
            dedup_manager,
            matcher,
            storage,
            event_sender,
            event_receiver,
            stats: Arc::new(tokio::sync::RwLock::new(ProcessingStats::default())),
            host_semaphores: Arc::new(tokio::sync::RwLock::new(std::collections::HashMap::new())),
            is_running: Arc::new(AtomicBool::new(false)),
            worker_handles: Arc::new(tokio::sync::Mutex::new(Vec::new())),
        }
    }

    /// 启动事件处理器
    pub async fn start(&self) -> Result<(), ScanError> {
        if self.is_running.load(std::sync::atomic::Ordering::Relaxed) {
            return Err(ScanError::EventError("Processor already running".to_string()));
        }

        self.is_running.store(true, std::sync::atomic::Ordering::Relaxed);
        info!("Starting event processor with {} workers", self.config.worker_count);

        // 启动工作线程
        let mut handles = self.worker_handles.lock().await;
        for i in 0..self.config.worker_count {
            let handle = self.start_worker(i).await?;
            handles.push(handle);
        }

        // 启动统计报告任务
        let stats_handle = self.start_stats_reporter().await?;
        handles.push(stats_handle);

        info!("Event processor started successfully");
        Ok(())
    }

    /// 停止事件处理器
    pub async fn stop(&self) -> Result<(), ScanError> {
        if !self.is_running.load(std::sync::atomic::Ordering::Relaxed) {
            return Ok(());
        }

        info!("Stopping event processor...");
        self.is_running.store(false, std::sync::atomic::Ordering::Relaxed);

        // 关闭事件通道
        drop(self.event_sender.clone());

        // 等待所有工作线程完成
        let mut handles = self.worker_handles.lock().await;
        while let Some(handle) = handles.pop() {
            if let Err(e) = handle.await {
                warn!("Error joining worker thread: {}", e);
            }
        }

        info!("Event processor stopped");
        Ok(())
    }

    /// 提交事件进行处理
    pub async fn submit_event(&self, event: HttpEvent) -> Result<(), ScanError> {
        if !self.is_running.load(std::sync::atomic::Ordering::Relaxed) {
            return Err(ScanError::EventError("Processor not running".to_string()));
        }

        self.event_sender.send(event)
            .map_err(|e| ScanError::EventError(format!("Failed to send event: {}", e)))?;

        Ok(())
    }

    /// 启动工作线程
    async fn start_worker(&self, worker_id: usize) -> Result<tokio::task::JoinHandle<()>, ScanError> {
        // 克隆 receiver - flume 支持多个消费者（MPMC）
        let event_receiver = self.event_receiver.clone();

        let config = self.config.clone();
        let dedup_manager = self.dedup_manager.clone();
        let matcher = self.matcher.clone();
        let storage = self.storage.clone();
        let stats = self.stats.clone();
        let host_semaphores = self.host_semaphores.clone();
        let is_running = self.is_running.clone();

        let handle = tokio::spawn(async move {
            info!("Worker {} started", worker_id);

            let mut batch = Vec::with_capacity(config.batch_size);
            let mut last_batch_time = Utc::now();

            loop {
                // 检查是否应该停止
                if !is_running.load(std::sync::atomic::Ordering::Relaxed) && batch.is_empty() {
                    break;
                }

                // 接收事件 (flume 的 recv_async 返回 Result)
                match event_receiver.recv_async().await {
                    Ok(event) => {
                        batch.push(event);
                    }
                    Err(_) => {
                        // 通道关闭，处理剩余批量
                        if batch.is_empty() {
                            break;
                        }
                    }
                }

                // 检查是否应该处理批量
                let now = Utc::now();
                let should_process_batch = batch.len() >= config.batch_size ||
                    (now - last_batch_time) >= chrono::Duration::from_std(config.batch_interval).unwrap_or(chrono::Duration::seconds(5));

                if should_process_batch && !batch.is_empty() {
                    let current_batch = std::mem::take(&mut batch);
                    last_batch_time = now;

                    // 处理批量事件
                    Self::process_batch(
                        current_batch,
                        &dedup_manager,
                        &matcher,
                        &storage,
                        &stats,
                        &host_semaphores,
                        &config,
                        &is_running,
                    ).await;
                }
            }

            info!("Worker {} stopped", worker_id);
        });

        Ok(handle)
    }

    /// 处理批量事件
    async fn process_batch(
        batch: Vec<HttpEvent>,
        dedup_manager: &Arc<DedupManager>,
        matcher: &Arc<dyn RuleMatcher>,
        storage: &Arc<dyn StorageBackend>,
        stats: &Arc<tokio::sync::RwLock<ProcessingStats>>,
        host_semaphores: &Arc<tokio::sync::RwLock<std::collections::HashMap<String, Arc<Semaphore>>>>,
        config: &ProcessorConfig,
        is_running: &Arc<AtomicBool>,
    ) {
        let start_time = std::time::Instant::now();

        for event in batch {
            // 检查是否应该继续运行
            if !is_running.load(std::sync::atomic::Ordering::Relaxed) {
                break;
            }

            // 更新统计
            {
                let mut stats_guard = stats.write().await;
                stats_guard.total_processed += 1;
            }

            // 获取主机信号量
            let host = event.metadata.host.clone();
            let semaphore = {
                let mut semaphores = host_semaphores.write().await;
                semaphores.entry(host.clone()).or_insert_with(|| {
                    Arc::new(Semaphore::new(config.host_concurrency_limit))
                }).clone()
            };

            // 获取处理许可
            let permit = match semaphore.try_acquire() {
                Ok(permit) => permit,
                Err(_) => {
                    // 如果无法获取许可，跳过该事件
                    debug!("Skipping event for host {} due to concurrency limit", host);
                    continue;
                }
            };

            // 处理事件
            match Self::process_single_event(&event, dedup_manager, matcher, storage, config).await {
                Ok((vuln_count, _fingerprint)) => {
                    // 更新成功统计
                    let mut stats_guard = stats.write().await;
                    stats_guard.success_count += 1;
                    stats_guard.vulnerabilities_found += vuln_count;

                    drop(permit); // 释放许可
                }
                Err(e) => {
                    error!("Error processing event: {}", e);
                    let mut stats_guard = stats.write().await;
                    stats_guard.error_count += 1;
                }
            }
        }

        // 更新处理时间统计
        let processing_time = start_time.elapsed().as_micros() as u64;
        {
            let mut stats_guard = stats.write().await;
            stats_guard.avg_processing_time_us =
                (stats_guard.avg_processing_time_us + processing_time) / 2;
        }
    }

    /// 处理单个事件
    async fn process_single_event(
        event: &HttpEvent,
        dedup_manager: &Arc<DedupManager>,
        matcher: &Arc<dyn RuleMatcher>,
        storage: &Arc<dyn StorageBackend>,
        config: &ProcessorConfig,
    ) -> Result<(u64, Option<String>), ScanError> {
        // 1. 指纹识别 (如果启用)
        let fingerprint = if config.enable_fingerprinting {
            Self::identify_fingerprint(event).await?
        } else {
            None
        };

        // 2. 去重检查
        if dedup_manager.is_duplicate(event, fingerprint.clone()).await {
            debug!("[Fingerprint] Duplicate event detected for {} with fingerprint {:?}, skipping", 
                   event.metadata.host, fingerprint);
            return Ok((0, fingerprint));
        }

        // 3. 规则匹配
        let results = matcher.match_rules(event).await?;
        let vuln_count = results.len() as u64;

        // 4. 存储结果
        if !results.is_empty() {
            info!("[Vulnerability Scan] Found {} vulnerabilities for {}:{}", 
                  vuln_count, event.metadata.host, event.path.as_deref().unwrap_or(""));
            for result in results {
                info!("[Vulnerability Scan] Vulnerability found: {} (Severity: {}, Rule: {})", 
                      result.title, result.severity, result.rule_name);
                storage.store_scan_result(&result).await?;
            }
        }

        // 5. 标记为已处理
        dedup_manager.mark_processed(event, fingerprint.clone()).await?;

        Ok((vuln_count, fingerprint))
    }

    /// 指纹识别
    async fn identify_fingerprint(event: &HttpEvent) -> Result<Option<String>, ScanError> {
        #[cfg(feature = "fingerprint")]
        {
            // 使用指纹识别模块进行技术栈识别
            // 构建响应信息
            // 注意：根据HttpEvent的实际结构调整
            // status_code是字符串类型，需要解析为u16
            let status = match &event.status_code {
                Some(code_str) => code_str.parse::<u16>().unwrap_or(200),
                None => 200,
            };
            // 使用host而不是url
            let url = format!("http://{}{}", event.metadata.host, event.path.as_deref().unwrap_or(""));
            // HttpEvent中没有直接的body字段，使用空字符串
            let body = "";
            
            // 转换headers为http::HeaderMap
            let mut http_headers = http::HeaderMap::new();
            for (key, value) in &event.headers {
                http_headers.insert(
                    http::header::HeaderName::try_from(key).unwrap_or(http::header::HeaderName::from_bytes(key.as_bytes()).unwrap()),
                    http::header::HeaderValue::from_str(value).unwrap_or(http::header::HeaderValue::from_str("").unwrap())
                );
            }
            
            // 调用指纹识别引擎
            let response_info = FINGERPRINT_ENGINE.parse_response(
                status, 
                &http_headers, 
                body, 
                &url
            );
            
            let fingerprints = FINGERPRINT_ENGINE.identify_fingerprints(&response_info);
            
            if !fingerprints.is_empty() {
                // 将识别结果格式化为字符串
                let fingerprint_strings: Vec<_> = fingerprints
                    .iter()
                    .map(|fp| {
                        if fp.version.is_empty() {
                            format!("{}:{}", fp.category, fp.name)
                        } else {
                            format!("{}:{}:v{}", fp.category, fp.name, fp.version)
                        }
                    })
                    .collect();
                
                let fingerprint = fingerprint_strings.join(",");
                info!("[Fingerprint] Identified for {}: {}", event.metadata.host, fingerprint);
                Ok(Some(fingerprint))
            } else {
                // 如果没有识别到指纹，回退到基本的头部识别
                if let Some(server) = event.headers.get("server") {
                    let fingerprint = format!("server:{}", server);
                    info!("[Fingerprint] Basic header for {}: {}", event.metadata.host, fingerprint);
                    Ok(Some(fingerprint))
                } else if let Some(powered_by) = event.headers.get("x-powered-by") {
                    let fingerprint = format!("powered_by:{}", powered_by);
                    info!("[Fingerprint] Basic header for {}: {}", event.metadata.host, fingerprint);
                    Ok(Some(fingerprint))
                } else {
                    debug!("[Fingerprint] No fingerprint identified for {}", event.metadata.host);
                    Ok(None)
                }
            }
        }
        
        #[cfg(not(feature = "fingerprint"))]
        {
            // 当未启用fingerprint feature时，使用基本的头部识别
            if let Some(server) = event.headers.get("server") {
                let fingerprint = format!("server:{}", server);
                info!("[Fingerprint] Basic header for {}: {}", event.metadata.host, fingerprint);
                Ok(Some(fingerprint))
            } else if let Some(powered_by) = event.headers.get("x-powered-by") {
                let fingerprint = format!("powered_by:{}", powered_by);
                info!("[Fingerprint] Basic header for {}: {}", event.metadata.host, fingerprint);
                Ok(Some(fingerprint))
            } else {
                debug!("[Fingerprint] No fingerprint identified for {}", event.metadata.host);
                Ok(None)
            }
        }
    }

    /// 启动统计报告器
    async fn start_stats_reporter(&self) -> Result<tokio::task::JoinHandle<()>, ScanError> {
        let stats = self.stats.clone();
        let is_running = self.is_running.clone();

        let handle = tokio::spawn(async move {
            let mut interval = tokio::time::interval(Duration::from_secs(30));

            loop {
                interval.tick().await;

                if !is_running.load(std::sync::atomic::Ordering::Relaxed) {
                    break;
                }

                let stats_guard = stats.read().await;
                info!(
                    "Processing stats - Total: {}, Success: {}, Errors: {}, Vulns: {}, Avg time: {}μs",
                    stats_guard.total_processed,
                    stats_guard.success_count,
                    stats_guard.error_count,
                    stats_guard.vulnerabilities_found,
                    stats_guard.avg_processing_time_us
                );
            }
        });

        Ok(handle)
    }

    /// 获取处理统计
    pub async fn get_stats(&self) -> ProcessingStats {
        self.stats.read().await.clone()
    }

    /// 检查是否正在运行
    pub fn is_running(&self) -> bool {
        self.is_running.load(std::sync::atomic::Ordering::Relaxed)
    }
}

/// 工作池 (简单的工作线程池实现)
pub struct WorkerPool {
    workers: Vec<tokio::task::JoinHandle<()>>,
    shutdown_tx: broadcast::Sender<()>,
}

impl WorkerPool {
    /// 创建新的工作池
    pub fn new<F, Fut>(worker_count: usize, worker_fn: F) -> Self
    where
        F: Fn(usize) -> Fut + Send + Sync + 'static + Clone,
        Fut: std::future::Future<Output = ()> + Send + 'static,
    {
        let (shutdown_tx, _) = broadcast::channel::<()>(worker_count + 1);
        let mut workers = Vec::new();

        for i in 0..worker_count {
            let worker_fn = worker_fn.clone();
            let mut shutdown_rx = shutdown_tx.subscribe();

            let handle = tokio::spawn(async move {
                tokio::select! {
                    _ = worker_fn(i) => {}
                    _ = shutdown_rx.recv() => {
                        debug!("Worker {} received shutdown signal", i);
                    }
                }
            });

            workers.push(handle);
        }

        Self {
            workers,
            shutdown_tx,
        }
    }

    /// 关闭工作池
    pub async fn shutdown(self) {
        // 发送关闭信号
        drop(self.shutdown_tx);

        // 等待所有工作线程完成
        for worker in self.workers {
            if let Err(e) = worker.await {
                warn!("Error shutting down worker: {}", e);
            }
        }
    }
}