//! 主动扫描引擎

use crate::config::ActiveScanConfig;
use crate::converter::ResponseConverter;
use crate::crawler::{CrawlerEngine, CrawlerResponse};
use anyhow::Result;
use good_mitm_passive_scan::{
    DedupConfig, DedupManager, EventProcessor, FileStorage, ProcessorConfig,
    RuleMatcher, ScanResult, StorageBackend,
};
use log::{error, info, warn};
use std::sync::Arc;
use chrono::Duration as ChronoDuration;
use tokio::sync::mpsc;

/// 主动扫描引擎
pub struct ActiveScanEngine {
    /// 配置
    config: ActiveScanConfig,
    /// 爬虫引擎
    crawler_engine: Option<Arc<CrawlerEngine>>,
    /// 事件处理器
    event_processor: Option<Arc<EventProcessor>>,
    /// 去重管理器
    dedup_manager: Arc<DedupManager>,
    /// 规则匹配器
    matcher: Arc<dyn RuleMatcher>,
    /// 存储后端
    storage: Arc<dyn StorageBackend>,
}

impl ActiveScanEngine {
    /// 创建新的主动扫描引擎
    pub async fn new(
        config: ActiveScanConfig,
        matcher: Arc<dyn RuleMatcher>,
    ) -> Result<Self> {
        info!("Initializing active scan engine...");

        // 创建去重管理器
        let dedup_config = DedupConfig {
            ttl: ChronoDuration::seconds(config.scan.dedup_ttl as i64),
            max_entries: 100_000,
            enable_fingerprint_dedup: true,
            cleanup_interval: ChronoDuration::minutes(1),
        };
        let dedup_manager = Arc::new(DedupManager::new(dedup_config));

        // 创建存储后端
        let storage: Arc<dyn StorageBackend> = Arc::new(FileStorage::new(
            config.scan.output_dir.clone(),
            50 * 1024 * 1024, // 50MB max file size
        ));

        info!("Active scan engine initialized successfully");

        Ok(Self {
            config,
            crawler_engine: None,
            event_processor: None,
            dedup_manager,
            matcher,
            storage,
        })
    }

    /// 启动主动扫描
    pub async fn start_scan(&mut self, target_url: &str) -> Result<ScanStats> {
        info!("Starting active scan for target: {}", target_url);

        // 创建响应通道
        let (response_tx, mut response_rx) = mpsc::unbounded_channel::<CrawlerResponse>();

        // 创建爬虫引擎
        let crawler_engine = Arc::new(CrawlerEngine::new(
            self.config.crawler.clone(),
            response_tx,
        )?);
        self.crawler_engine = Some(crawler_engine.clone());

        // 创建事件处理器配置
        // EventProcessor 现在支持真正的多 worker 并发处理 (使用 flume MPMC channel)
        let processor_config = ProcessorConfig {
            worker_count: self.config.scan.worker_count, // 使用配置的 worker 数量
            queue_size: self.config.scan.queue_size,
            host_concurrency_limit: 5,
            processing_timeout: std::time::Duration::from_secs(30),
            enable_fingerprinting: self.config.scan.enable_fingerprinting,
            batch_size: 10,
            batch_interval: std::time::Duration::from_millis(100),
        };

        // 创建事件处理器
        let event_processor = Arc::new(EventProcessor::new(
            processor_config,
            self.dedup_manager.clone(),
            self.matcher.clone(),
            self.storage.clone(),
        ));
        self.event_processor = Some(event_processor.clone());

        // 启动事件处理器
        event_processor.start().await?;

        // 统计信息
        let mut stats = ScanStats::default();

        // 启动爬虫任务
        let target_url_clone = target_url.to_string();
        let crawler_handle = tokio::spawn(async move {
            if let Err(e) = crawler_engine.crawl(&target_url_clone).await {
                error!("Crawler error: {}", e);
            }
        });

        // 处理爬虫响应
        let processor_handle = tokio::spawn(async move {
            while let Some(response) = response_rx.recv().await {
                // 转换为 HttpEvent
                let http_event = ResponseConverter::convert_to_http_event(response);

                // 提交到事件处理器
                if let Err(e) = event_processor.submit_event(http_event).await {
                    warn!("Failed to submit event to processor: {}", e);
                }
            }
        });

        // 等待爬虫完成
        crawler_handle.await?;

        // 关闭响应通道（这会导致处理器任务退出）
        drop(self.crawler_engine.take());

        // 等待处理器完成
        processor_handle.await?;

        // 停止事件处理器
        if let Some(processor) = &self.event_processor {
            processor.stop().await?;

            // 获取统计信息
            let processor_stats = processor.get_stats().await;
            stats.pages_crawled = processor_stats.total_processed;
            stats.vulnerabilities_found = processor_stats.vulnerabilities_found;
            stats.errors = processor_stats.error_count;
        }

        info!("Active scan completed for target: {}", target_url);
        info!("Stats: {:?}", stats);

        Ok(stats)
    }

    /// 停止扫描
    pub async fn stop(&mut self) -> Result<()> {
        info!("Stopping active scan engine...");

        if let Some(processor) = &self.event_processor {
            processor.stop().await?;
        }

        self.crawler_engine = None;
        self.event_processor = None;

        info!("Active scan engine stopped");
        Ok(())
    }

    /// 获取扫描结果
    pub async fn get_results(&self) -> Result<Vec<ScanResult>> {
        // 从存储后端获取结果
        // 这里需要根据 StorageBackend 的实际接口实现
        // 暂时返回空列表
        Ok(vec![])
    }
}

/// 扫描统计信息
#[derive(Debug, Default, Clone)]
pub struct ScanStats {
    /// 爬取的页面数
    pub pages_crawled: u64,
    /// 发现的漏洞数
    pub vulnerabilities_found: u64,
    /// 错误数
    pub errors: u64,
}

#[cfg(test)]
mod tests {
    use super::*;

    #[test]
    fn test_scan_stats_default() {
        let stats = ScanStats::default();
        assert_eq!(stats.pages_crawled, 0);
        assert_eq!(stats.vulnerabilities_found, 0);
        assert_eq!(stats.errors, 0);
    }
}
