//! 存储后端实现

use crate::matcher::ScanResult;
use crate::ScanError;
use async_trait::async_trait;
use chrono::{DateTime, Utc};
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::path::PathBuf;
use tokio::fs;
// use tokio::io::AsyncWriteExt;
use tracing::{debug, info};

/// 存储后端接口
#[async_trait]
pub trait StorageBackend: Send + Sync {
    /// 存储扫描结果
    async fn store_scan_result(&self, result: &ScanResult) -> Result<(), ScanError>;
    /// 获取扫描结果
    async fn get_scan_results(&self, limit: Option<usize>, offset: Option<usize>) -> Result<Vec<ScanResult>, ScanError>;
    /// 根据ID获取扫描结果
    async fn get_scan_result_by_id(&self, id: &str) -> Result<Option<ScanResult>, ScanError>;
    /// 删除扫描结果
    async fn delete_scan_result(&self, id: &str) -> Result<bool, ScanError>;
    /// 获取统计信息
    async fn get_stats(&self) -> Result<StorageStats, ScanError>;
    /// 清理过期数据
    async fn cleanup_expired(&self, before: DateTime<Utc>) -> Result<usize, ScanError>;
}

/// 存储统计信息
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct StorageStats {
    /// 总结果数
    pub total_results: u64,
    /// 按严重程度统计
    pub by_severity: HashMap<String, u64>,
    /// 按规则类型统计
    pub by_rule_type: HashMap<String, u64>,
    /// 按主机统计
    pub by_host: HashMap<String, u64>,
    /// 存储大小 (字节)
    pub storage_size_bytes: u64,
    /// 最早结果时间
    pub earliest_result: Option<DateTime<Utc>>,
    /// 最新结果时间
    pub latest_result: Option<DateTime<Utc>>,
}

/// 内存存储后端
pub struct MemoryStorage {
    results: tokio::sync::RwLock<std::collections::HashMap<String, ScanResult>>,
    max_entries: usize,
}

impl MemoryStorage {
    /// 创建新的内存存储
    pub fn new(max_entries: usize) -> Self {
        Self {
            results: tokio::sync::RwLock::new(std::collections::HashMap::new()),
            max_entries,
        }
    }

    /// 清理过期的条目
    async fn cleanup_if_needed(&self) -> Result<usize, ScanError> {
        let mut results = self.results.write().await;
        let current_size = results.len();

        if current_size <= self.max_entries {
            return Ok(0);
        }

        // 按时间排序，获取要删除的id
        let mut entries: Vec<(String, DateTime<Utc>)> = results
            .iter()
            .map(|(id, result)| (id.clone(), result.timestamp))
            .collect();
        entries.sort_by_key(|(_, timestamp)| *timestamp);

        let to_remove = current_size - self.max_entries;
        let mut removed_count = 0;
        
        // 删除最旧的条目
        for (id, _) in entries.into_iter().take(to_remove) {
            if results.remove(&id).is_some() {
                removed_count += 1;
            }
        }

        Ok(removed_count)
    }
}

#[async_trait]
impl StorageBackend for MemoryStorage {
    async fn store_scan_result(&self, result: &ScanResult) -> Result<(), ScanError> {
        // 检查是否需要清理
        if self.results.read().await.len() >= self.max_entries {
            self.cleanup_if_needed().await?;
        }

        let mut results = self.results.write().await;
        results.insert(result.id.clone(), result.clone());
        debug!("Stored scan result {} in memory", result.id);
        Ok(())
    }

    async fn get_scan_results(&self, limit: Option<usize>, offset: Option<usize>) -> Result<Vec<ScanResult>, ScanError> {
        let results = self.results.read().await;
        let mut results_vec: Vec<_> = results.values().cloned().collect();

        // 按时间倒序排列
        results_vec.sort_by(|a, b| b.timestamp.cmp(&a.timestamp));

        let offset = offset.unwrap_or(0);
        let limit = limit.unwrap_or(results_vec.len());

        Ok(results_vec.into_iter().skip(offset).take(limit).collect())
    }

    async fn get_scan_result_by_id(&self, id: &str) -> Result<Option<ScanResult>, ScanError> {
        let results = self.results.read().await;
        Ok(results.get(id).cloned())
    }

    async fn delete_scan_result(&self, id: &str) -> Result<bool, ScanError> {
        let mut results = self.results.write().await;
        Ok(results.remove(id).is_some())
    }

    async fn get_stats(&self) -> Result<StorageStats, ScanError> {
        let results = self.results.read().await;
        let mut by_severity = HashMap::new();
        let mut by_rule_type = HashMap::new();
        let mut by_host = HashMap::new();
        let mut earliest = None;
        let mut latest = None;

        for result in results.values() {
            // 按严重程度统计
            let severity = format!("{:?}", result.severity);
            *by_severity.entry(severity).or_insert(0) += 1;

            // 按规则类型统计
            let rule_type = format!("{:?}", result.rule_type);
            *by_rule_type.entry(rule_type).or_insert(0) += 1;

            // 按主机统计
            let host = if let Some(url) = url::Url::parse(&result.url).ok() {
                url.host_str().unwrap_or("unknown").to_string()
            } else {
                "unknown".to_string()
            };
            *by_host.entry(host).or_insert(0) += 1;

            // 时间统计
            if earliest.is_none() || result.timestamp < earliest.unwrap() {
                earliest = Some(result.timestamp);
            }
            if latest.is_none() || result.timestamp > latest.unwrap() {
                latest = Some(result.timestamp);
            }
        }

        Ok(StorageStats {
            total_results: results.len() as u64,
            by_severity,
            by_rule_type,
            by_host,
            storage_size_bytes: 0, // 内存存储不计算大小
            earliest_result: earliest,
            latest_result: latest,
        })
    }

    async fn cleanup_expired(&self, before: DateTime<Utc>) -> Result<usize, ScanError> {
        let mut results = self.results.write().await;
        let initial_size = results.len();

        results.retain(|_, result| result.timestamp > before);

        let removed_count = initial_size - results.len();
        if removed_count > 0 {
            info!("Cleaned up {} expired scan results", removed_count);
        }

        Ok(removed_count)
    }
}

/// 文件存储后端
pub struct FileStorage {
    base_dir: PathBuf,
    // 暂时注释掉未使用的字段
    // max_file_size: usize,
}

impl FileStorage {
    /// 创建新的文件存储
    pub fn new<P: Into<PathBuf>>(base_dir: P, _max_file_size: usize) -> Self {
        Self {
            base_dir: base_dir.into(),
            // max_file_size,
        }
    }

    /// 获取结果文件路径
    fn get_result_file_path(&self, id: &str) -> PathBuf {
        self.base_dir.join(format!("{}.json", id))
    }

    /// 获取索引文件路径
    fn get_index_file_path(&self) -> PathBuf {
        self.base_dir.join("index.json")
    }

    /// 确保目录存在
    async fn ensure_dir_exists(&self) -> Result<(), ScanError> {
        fs::create_dir_all(&self.base_dir).await?;
        Ok(())
    }

    /// 读取索引
    async fn read_index(&self) -> Result<Vec<IndexEntry>, ScanError> {
        let index_path = self.get_index_file_path();

        if !index_path.exists() {
            return Ok(Vec::new());
        }

        let content = fs::read_to_string(&index_path).await?;
        let index: Vec<IndexEntry> = serde_json::from_str(&content)?;
        Ok(index)
    }

    /// 写入索引
    async fn write_index(&self, entries: &[IndexEntry]) -> Result<(), ScanError> {
        let index_path = self.get_index_file_path();
        let content = serde_json::to_string_pretty(entries)?;

        fs::write(&index_path, content).await?;
        Ok(())
    }

    /// 添加索引条目
    async fn add_index_entry(&self, entry: IndexEntry) -> Result<(), ScanError> {
        let mut entries = self.read_index().await?;
        entries.push(entry);

        // 按时间排序
        entries.sort_by(|a, b| b.timestamp.cmp(&a.timestamp));

        self.write_index(&entries).await?;
        Ok(())
    }
}

#[async_trait]
impl StorageBackend for FileStorage {
    async fn store_scan_result(&self, result: &ScanResult) -> Result<(), ScanError> {
        self.ensure_dir_exists().await?;

        // 存储结果文件
        let result_path = self.get_result_file_path(&result.id);
        let content = serde_json::to_string_pretty(result)?;
        fs::write(&result_path, content).await?;

        // 添加到索引
        let index_entry = IndexEntry {
            id: result.id.clone(),
            timestamp: result.timestamp,
            severity: format!("{:?}", result.severity),
            url: result.url.clone(),
            rule_name: result.rule_name.clone(),
        };
        self.add_index_entry(index_entry).await?;

        debug!("Stored scan result {} to file", result.id);
        Ok(())
    }

    async fn get_scan_results(&self, limit: Option<usize>, offset: Option<usize>) -> Result<Vec<ScanResult>, ScanError> {
        let entries = self.read_index().await?;
        let offset = offset.unwrap_or(0);
        let limit = limit.unwrap_or(entries.len());

        let mut results = Vec::new();

        for entry in entries.iter().skip(offset).take(limit) {
            let result_path = self.get_result_file_path(&entry.id);
            if result_path.exists() {
                let content = fs::read_to_string(&result_path).await?;
                let result: ScanResult = serde_json::from_str(&content)?;
                results.push(result);
            }
        }

        Ok(results)
    }

    async fn get_scan_result_by_id(&self, id: &str) -> Result<Option<ScanResult>, ScanError> {
        let result_path = self.get_result_file_path(id);

        if !result_path.exists() {
            return Ok(None);
        }

        let content = fs::read_to_string(&result_path).await?;
        let result: ScanResult = serde_json::from_str(&content)?;
        Ok(Some(result))
    }

    async fn delete_scan_result(&self, id: &str) -> Result<bool, ScanError> {
        let result_path = self.get_result_file_path(id);

        if !result_path.exists() {
            return Ok(false);
        }

        // 删除文件
        fs::remove_file(&result_path).await?;

        // 从索引中删除
        let mut entries = self.read_index().await?;
        entries.retain(|entry| entry.id != id);
        self.write_index(&entries).await?;

        Ok(true)
    }

    async fn get_stats(&self) -> Result<StorageStats, ScanError> {
        let entries = self.read_index().await?;
        let mut by_severity = HashMap::new();
        let by_rule_type = HashMap::new();
        let mut by_host = HashMap::new();
        let mut earliest = None;
        let mut latest = None;
        let mut total_size = 0u64;

        for entry in &entries {
            // 统计严重程度
            *by_severity.entry(entry.severity.clone()).or_insert(0) += 1;

            // 统计主机
            if let Ok(url) = url::Url::parse(&entry.url) {
                let host = url.host_str().unwrap_or("unknown").to_string();
                *by_host.entry(host).or_insert(0) += 1;
            }

            // 时间统计
            if earliest.is_none() || entry.timestamp < earliest.unwrap() {
                earliest = Some(entry.timestamp);
            }
            if latest.is_none() || entry.timestamp > latest.unwrap() {
                latest = Some(entry.timestamp);
            }

            // 文件大小
            let result_path = self.get_result_file_path(&entry.id);
            if let Ok(metadata) = fs::metadata(&result_path).await {
                total_size += metadata.len();
            }
        }

        Ok(StorageStats {
            total_results: entries.len() as u64,
            by_severity,
            by_rule_type,
            by_host,
            storage_size_bytes: total_size,
            earliest_result: earliest,
            latest_result: latest,
        })
    }

    async fn cleanup_expired(&self, before: DateTime<Utc>) -> Result<usize, ScanError> {
        let entries = self.read_index().await?;
        let mut to_remove = Vec::new();

        for entry in &entries {
            if entry.timestamp < before {
                to_remove.push(entry.id.clone());
            }
        }

        let mut removed_count = 0;
        for id in to_remove {
            if self.delete_scan_result(&id).await? {
                removed_count += 1;
            }
        }

        if removed_count > 0 {
            info!("Cleaned up {} expired scan results", removed_count);
        }

        Ok(removed_count)
    }
}

/// 索引条目
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct IndexEntry {
    pub id: String,
    pub timestamp: DateTime<Utc>,
    pub severity: String,
    pub url: String,
    pub rule_name: String,
}

/// SQLite存储后端 (需要database feature)
#[cfg(feature = "database")]
pub struct SqliteStorage {
    pool: sqlx::SqlitePool,
}

#[cfg(feature = "database")]
impl SqliteStorage {
    /// 创建新的SQLite存储
    pub async fn new(database_url: &str) -> Result<Self, ScanError> {
        let pool = sqlx::SqlitePool::connect(database_url).await?;

        // 运行迁移
        sqlx::migrate!("./migrations").run(&pool).await?;

        Ok(Self { pool })
    }

    /// 初始化数据库表
    async fn init_tables(&self) -> Result<(), ScanError> {
        sqlx::query(
            r#"
            CREATE TABLE IF NOT EXISTS scan_results (
                id TEXT PRIMARY KEY,
                rule_name TEXT NOT NULL,
                rule_type TEXT NOT NULL,
                severity TEXT NOT NULL,
                title TEXT NOT NULL,
                description TEXT,
                url TEXT NOT NULL,
                method TEXT,
                status_code INTEGER,
                confidence INTEGER,
                timestamp DATETIME NOT NULL,
                fingerprint TEXT,
                tags TEXT,
                evidence TEXT,
                references TEXT,
                remediation TEXT,
                created_at DATETIME DEFAULT CURRENT_TIMESTAMP
            );

            CREATE INDEX IF NOT EXISTS idx_scan_results_timestamp ON scan_results(timestamp);
            CREATE INDEX IF NOT EXISTS idx_scan_results_severity ON scan_results(severity);
            CREATE INDEX IF NOT EXISTS idx_scan_results_url ON scan_results(url);
            "#
        )
        .execute(&self.pool)
        .await?;

        Ok(())
    }
}

#[cfg(feature = "database")]
#[async_trait]
impl StorageBackend for SqliteStorage {
    async fn store_scan_result(&self, result: &ScanResult) -> Result<(), ScanError> {
        let tags_json = serde_json::to_string(&result.tags)?;
        let evidence_json = serde_json::to_string(&result.evidence)?;
        let references_json = serde_json::to_string(&result.references)?;

        sqlx::query(
            r#"
            INSERT OR REPLACE INTO scan_results (
                id, rule_name, rule_type, severity, title, description, url,
                method, status_code, confidence, timestamp, fingerprint, tags,
                evidence, references, remediation
            ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
            "#
        )
        .bind(&result.id)
        .bind(&result.rule_name)
        .bind(format!("{:?}", result.rule_type))
        .bind(format!("{:?}", result.severity))
        .bind(&result.title)
        .bind(&result.description)
        .bind(&result.url)
        .bind(&result.method)
        .bind(&result.status_code)
        .bind(result.confidence as i32)
        .bind(result.timestamp)
        .bind(&result.fingerprint)
        .bind(tags_json)
        .bind(evidence_json)
        .bind(references_json)
        .bind(&result.remediation)
        .execute(&self.pool)
        .await?;

        Ok(())
    }

    async fn get_scan_results(&self, limit: Option<usize>, offset: Option<usize>) -> Result<Vec<ScanResult>, ScanError> {
        let query = "SELECT * FROM scan_results ORDER BY timestamp DESC LIMIT ? OFFSET ?";
        let limit = limit.unwrap_or(100) as i64;
        let offset = offset.unwrap_or(0) as i64;

        let rows = sqlx::query(query)
            .bind(limit)
            .bind(offset)
            .fetch_all(&self.pool)
            .await?;

        let mut results = Vec::new();
        for row in rows {
            let tags: Vec<String> = serde_json::from_str(row.get("tags")).unwrap_or_default();
            let evidence: Vec<String> = serde_json::from_str(row.get("evidence")).unwrap_or_default();
            let references: Vec<String> = serde_json::from_str(row.get("references")).unwrap_or_default();

            let result = ScanResult {
                id: row.get("id"),
                rule_name: row.get("rule_name"),
                rule_type: serde_json::from_str(row.get("rule_type")).unwrap_or(crate::matcher::RuleType::Custom),
                severity: serde_json::from_str(row.get("severity")).unwrap_or(crate::matcher::Severity::Info),
                title: row.get("title"),
                description: row.get("description"),
                evidence,
                url: row.get("url"),
                method: row.get("method"),
                status_code: row.get("status_code"),
                match_positions: Vec::new(), // 简化实现
                extracted_params: std::collections::HashMap::new(),
                confidence: row.get::<i32, _>("confidence") as u8,
                timestamp: row.get("timestamp"),
                fingerprint: row.get("fingerprint"),
                tags,
                references,
                remediation: row.get("remediation"),
            };
            results.push(result);
        }

        Ok(results)
    }

    async fn get_scan_result_by_id(&self, id: &str) -> Result<Option<ScanResult>, ScanError> {
        let query = "SELECT * FROM scan_results WHERE id = ?";

        let row = sqlx::query(query)
            .bind(id)
            .fetch_optional(&self.pool)
            .await?;

        if let Some(row) = row {
            // 类似于 get_scan_results 的解析逻辑
            // 这里简化实现
            Ok(None)
        } else {
            Ok(None)
        }
    }

    async fn delete_scan_result(&self, id: &str) -> Result<bool, ScanError> {
        let result = sqlx::query("DELETE FROM scan_results WHERE id = ?")
            .bind(id)
            .execute(&self.pool)
            .await?;

        Ok(result.rows_affected() > 0)
    }

    async fn get_stats(&self) -> Result<StorageStats, ScanError> {
        let row = sqlx::query("SELECT COUNT(*) as count FROM scan_results")
            .fetch_one(&self.pool)
            .await?;

        let total_results = row.get::<i64, _>("count") as u64;

        // 这里简化实现，实际应该查询各种统计信息
        Ok(StorageStats {
            total_results,
            by_severity: std::collections::HashMap::new(),
            by_rule_type: std::collections::HashMap::new(),
            by_host: std::collections::HashMap::new(),
            storage_size_bytes: 0,
            earliest_result: None,
            latest_result: None,
        })
    }

    async fn cleanup_expired(&self, before: DateTime<Utc>) -> Result<usize, ScanError> {
        let result = sqlx::query("DELETE FROM scan_results WHERE timestamp < ?")
            .bind(before)
            .execute(&self.pool)
            .await?;

        Ok(result.rows_affected() as usize)
    }
}