use async_trait::async_trait;
use rustcloud_core::{ServiceResult, ServiceError};
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::sync::Arc;
use std::time::{SystemTime, UNIX_EPOCH};
use tokio::sync::RwLock;
use serde_json::Value;

/// 日志级别枚举
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)]
pub enum LogLevel {
    /// 追踪级别（最详细）
    Trace = 0,
    /// 调试级别
    Debug = 1,
    /// 信息级别
    Info = 2,
    /// 警告级别
    Warn = 3,
    /// 错误级别
    Error = 4,
    /// 致命错误级别（最严重）
    Fatal = 5,
}

impl LogLevel {
    /// 从字符串解析日志级别
    pub fn from_str(s: &str) -> Self {
        match s.to_lowercase().as_str() {
            "trace" => LogLevel::Trace,
            "debug" => LogLevel::Debug,
            "info" => LogLevel::Info,
            "warn" | "warning" => LogLevel::Warn,
            "error" => LogLevel::Error,
            "fatal" | "panic" => LogLevel::Fatal,
            _ => LogLevel::Info,
        }
    }

    /// 转换为字符串
    pub fn to_string(&self) -> String {
        match self {
            LogLevel::Trace => "TRACE".to_string(),
            LogLevel::Debug => "DEBUG".to_string(),
            LogLevel::Info => "INFO".to_string(),
            LogLevel::Warn => "WARN".to_string(),
            LogLevel::Error => "ERROR".to_string(),
            LogLevel::Fatal => "FATAL".to_string(),
        }
    }
}

/// 结构化日志条目
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct LogEntry {
    /// 时间戳（微秒）
    pub timestamp: u64,
    /// 日志级别
    pub level: LogLevel,
    /// 日志消息
    pub message: String,
    /// 服务名称
    pub service_name: String,
    /// 实例ID
    pub instance_id: String,
    /// 追踪ID（可选）
    pub trace_id: Option<String>,
    /// Span ID（可选）
    pub span_id: Option<String>,
    /// 结构化字段
    pub fields: HashMap<String, Value>,
    /// 标签
    pub tags: HashMap<String, String>,
    /// 来源信息
    pub source: LogSource,
}

impl LogEntry {
    /// 创建新的日志条目
    pub fn new(
        level: LogLevel,
        message: String,
        service_name: String,
        instance_id: String,
    ) -> Self {
        Self {
            timestamp: SystemTime::now()
                .duration_since(UNIX_EPOCH)
                .unwrap_or_default()
                .as_micros() as u64,
            level,
            message,
            service_name,
            instance_id,
            trace_id: None,
            span_id: None,
            fields: HashMap::new(),
            tags: HashMap::new(),
            source: LogSource::default(),
        }
    }

    /// 设置追踪信息
    pub fn with_trace(mut self, trace_id: String, span_id: Option<String>) -> Self {
        self.trace_id = Some(trace_id);
        self.span_id = span_id;
        self
    }

    /// 添加字段
    pub fn with_field(mut self, key: &str, value: Value) -> Self {
        self.fields.insert(key.to_string(), value);
        self
    }

    /// 添加标签
    pub fn with_tag(mut self, key: &str, value: &str) -> Self {
        self.tags.insert(key.to_string(), value.to_string());
        self
    }

    /// 设置来源信息
    pub fn with_source(mut self, source: LogSource) -> Self {
        self.source = source;
        self
    }

    /// 格式化为JSON字符串
    pub fn to_json(&self) -> ServiceResult<String> {
        serde_json::to_string(self)
            .map_err(|e| ServiceError::MetricsError(format!("序列化日志条目失败: {}", e)))
    }

    /// 格式化为可读字符串
    pub fn to_formatted_string(&self) -> String {
        let timestamp = chrono::DateTime::from_timestamp_micros(self.timestamp as i64)
            .unwrap_or_default()
            .format("%Y-%m-%d %H:%M:%S%.3f");
        
        let mut formatted = format!(
            "{} [{}] {}/{}: {}",
            timestamp,
            self.level.to_string(),
            self.service_name,
            self.instance_id,
            self.message
        );

        if let Some(trace_id) = &self.trace_id {
            formatted.push_str(&format!(" [trace:{}]", trace_id));
        }

        if !self.fields.is_empty() {
            formatted.push_str(" {");
            for (key, value) in &self.fields {
                formatted.push_str(&format!(" {}={}", key, value));
            }
            formatted.push_str(" }");
        }

        formatted
    }
}

/// 日志来源信息
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct LogSource {
    /// 文件名
    pub file: Option<String>,
    /// 行号
    pub line: Option<u32>,
    /// 函数名
    pub function: Option<String>,
    /// 模块名
    pub module: Option<String>,
}

impl Default for LogSource {
    fn default() -> Self {
        Self {
            file: None,
            line: None,
            function: None,
            module: None,
        }
    }
}

/// 日志过滤器
#[derive(Debug, Clone)]
pub struct LogFilter {
    /// 最小日志级别
    pub min_level: LogLevel,
    /// 服务名称过滤器
    pub service_filters: Vec<String>,
    /// 标签过滤器
    pub tag_filters: HashMap<String, String>,
    /// 字段过滤器
    pub field_filters: HashMap<String, Value>,
}

impl Default for LogFilter {
    fn default() -> Self {
        Self {
            min_level: LogLevel::Info,
            service_filters: Vec::new(),
            tag_filters: HashMap::new(),
            field_filters: HashMap::new(),
        }
    }
}

impl LogFilter {
    /// 检查日志条目是否通过过滤器
    pub fn matches(&self, entry: &LogEntry) -> bool {
        // 检查日志级别
        if entry.level < self.min_level {
            return false;
        }

        // 检查服务名称过滤器
        if !self.service_filters.is_empty() && !self.service_filters.contains(&entry.service_name) {
            return false;
        }

        // 检查标签过滤器
        for (key, value) in &self.tag_filters {
            if entry.tags.get(key) != Some(value) {
                return false;
            }
        }

        // 检查字段过滤器
        for (key, value) in &self.field_filters {
            if entry.fields.get(key) != Some(value) {
                return false;
            }
        }

        true
    }
}

/// 日志聚合器trait
#[async_trait]
pub trait LogAggregator: Send + Sync {
    /// 添加日志条目
    async fn append(&self, entry: LogEntry) -> ServiceResult<()>;
    
    /// 批量添加日志条目
    async fn append_batch(&self, entries: Vec<LogEntry>) -> ServiceResult<()>;
    
    /// 查询日志条目
    async fn query(&self, filter: LogFilter, limit: Option<usize>) -> ServiceResult<Vec<LogEntry>>;
    
    /// 获取日志统计信息
    async fn get_stats(&self) -> ServiceResult<LogStats>;
    
    /// 清空日志
    async fn clear(&self) -> ServiceResult<()>;
}

/// 内存日志聚合器
pub struct InMemoryLogAggregator {
    logs: Arc<RwLock<Vec<LogEntry>>>,
    max_entries: usize,
}

impl InMemoryLogAggregator {
    pub fn new(max_entries: usize) -> Self {
        Self {
            logs: Arc::new(RwLock::new(Vec::new())),
            max_entries,
        }
    }

    /// 清理旧日志条目
    async fn cleanup_old_entries(&self) {
        let mut logs = self.logs.write().await;
        if logs.len() > self.max_entries {
            let excess = logs.len() - self.max_entries;
            logs.drain(0..excess);
        }
    }
}

#[async_trait]
impl LogAggregator for InMemoryLogAggregator {
    async fn append(&self, entry: LogEntry) -> ServiceResult<()> {
        {
            let mut logs = self.logs.write().await;
            logs.push(entry);
        }
        self.cleanup_old_entries().await;
        Ok(())
    }

    async fn append_batch(&self, entries: Vec<LogEntry>) -> ServiceResult<()> {
        {
            let mut logs = self.logs.write().await;
            logs.extend(entries);
        }
        self.cleanup_old_entries().await;
        Ok(())
    }

    async fn query(&self, filter: LogFilter, limit: Option<usize>) -> ServiceResult<Vec<LogEntry>> {
        let logs = self.logs.read().await;
        let mut filtered: Vec<LogEntry> = logs
            .iter()
            .filter(|entry| filter.matches(entry))
            .cloned()
            .collect();

        // 按时间戳降序排序
        filtered.sort_by(|a, b| b.timestamp.cmp(&a.timestamp));

        if let Some(limit) = limit {
            filtered.truncate(limit);
        }

        Ok(filtered)
    }

    async fn get_stats(&self) -> ServiceResult<LogStats> {
        let logs = self.logs.read().await;
        let mut stats = LogStats::default();
        
        stats.total_entries = logs.len();
        
        for entry in logs.iter() {
            match entry.level {
                LogLevel::Trace => stats.trace_count += 1,
                LogLevel::Debug => stats.debug_count += 1,
                LogLevel::Info => stats.info_count += 1,
                LogLevel::Warn => stats.warn_count += 1,
                LogLevel::Error => stats.error_count += 1,
                LogLevel::Fatal => stats.fatal_count += 1,
            }
            
            *stats.service_counts.entry(entry.service_name.clone()).or_insert(0) += 1;
        }

        Ok(stats)
    }

    async fn clear(&self) -> ServiceResult<()> {
        let mut logs = self.logs.write().await;
        logs.clear();
        Ok(())
    }
}

/// 日志统计信息
#[derive(Debug, Default)]
pub struct LogStats {
    pub total_entries: usize,
    pub trace_count: usize,
    pub debug_count: usize,
    pub info_count: usize,
    pub warn_count: usize,
    pub error_count: usize,
    pub fatal_count: usize,
    pub service_counts: HashMap<String, usize>,
}

/// 日志导出器trait
#[async_trait]
pub trait LogExporter: Send + Sync {
    /// 导出日志条目
    async fn export(&self, entries: Vec<LogEntry>) -> ServiceResult<()>;
    
    /// 关闭导出器
    async fn shutdown(&self) -> ServiceResult<()>;
}

/// 文件日志导出器
pub struct FileLogExporter {
    file_path: String,
    format: LogFormat,
    buffer: Arc<RwLock<Vec<LogEntry>>>,
    max_buffer_size: usize,
}

/// 日志格式枚举
#[derive(Debug, Clone)]
pub enum LogFormat {
    /// JSON格式
    Json,
    /// 格式化文本
    Text,
    /// CSV格式
    Csv,
}

impl FileLogExporter {
    pub fn new(file_path: String, format: LogFormat, max_buffer_size: usize) -> Self {
        Self {
            file_path,
            format,
            buffer: Arc::new(RwLock::new(Vec::new())),
            max_buffer_size,
        }
    }

    /// 刷新缓冲区到文件
    async fn flush_to_file(&self) -> ServiceResult<()> {
        let entries = {
            let mut buffer = self.buffer.write().await;
            buffer.drain(..).collect::<Vec<_>>()
        };

        if entries.is_empty() {
            return Ok(());
        }

        let content = match self.format {
            LogFormat::Json => self.format_as_json(&entries)?,
            LogFormat::Text => self.format_as_text(&entries),
            LogFormat::Csv => self.format_as_csv(&entries)?,
        };

        tokio::fs::write(&self.file_path, content)
            .await
            .map_err(|e| ServiceError::MetricsError(format!("写入日志文件失败: {}", e)))?;

        Ok(())
    }

    fn format_as_json(&self, entries: &[LogEntry]) -> ServiceResult<String> {
        let mut lines = Vec::new();
        for entry in entries {
            lines.push(entry.to_json()?);
        }
        Ok(lines.join("\n") + "\n")
    }

    fn format_as_text(&self, entries: &[LogEntry]) -> String {
        entries
            .iter()
            .map(|entry| entry.to_formatted_string())
            .collect::<Vec<_>>()
            .join("\n") + "\n"
    }

    fn format_as_csv(&self, entries: &[LogEntry]) -> ServiceResult<String> {
        let mut csv = String::from("timestamp,level,service_name,instance_id,message,trace_id,span_id\n");
        
        for entry in entries {
            csv.push_str(&format!(
                "{},{},{},{},\"{}\",{},{}\n",
                entry.timestamp,
                entry.level.to_string(),
                entry.service_name,
                entry.instance_id,
                entry.message.replace('"', "\"\""),
                entry.trace_id.as_deref().unwrap_or(""),
                entry.span_id.as_deref().unwrap_or("")
            ));
        }
        
        Ok(csv)
    }
}

#[async_trait]
impl LogExporter for FileLogExporter {
    async fn export(&self, entries: Vec<LogEntry>) -> ServiceResult<()> {
        let mut buffer = self.buffer.write().await;
        buffer.extend(entries);

        let should_flush = buffer.len() >= self.max_buffer_size;
        drop(buffer);

        if should_flush {
            self.flush_to_file().await?;
        }

        Ok(())
    }

    async fn shutdown(&self) -> ServiceResult<()> {
        self.flush_to_file().await
    }
}

/// 控制台日志导出器
pub struct ConsoleLogExporter {
    format: LogFormat,
    colored: bool,
}

impl ConsoleLogExporter {
    pub fn new(format: LogFormat, colored: bool) -> Self {
        Self { format, colored }
    }

    fn colorize_level(&self, level: &LogLevel) -> String {
        if !self.colored {
            return level.to_string();
        }

        match level {
            LogLevel::Trace => format!("\x1b[36m{}\x1b[0m", level.to_string()), // 青色
            LogLevel::Debug => format!("\x1b[34m{}\x1b[0m", level.to_string()), // 蓝色
            LogLevel::Info => format!("\x1b[32m{}\x1b[0m", level.to_string()),  // 绿色
            LogLevel::Warn => format!("\x1b[33m{}\x1b[0m", level.to_string()),  // 黄色
            LogLevel::Error => format!("\x1b[31m{}\x1b[0m", level.to_string()), // 红色
            LogLevel::Fatal => format!("\x1b[35m{}\x1b[0m", level.to_string()), // 紫色
        }
    }
}

#[async_trait]
impl LogExporter for ConsoleLogExporter {
    async fn export(&self, entries: Vec<LogEntry>) -> ServiceResult<()> {
        for entry in entries {
            match self.format {
                LogFormat::Json => {
                    println!("{}", entry.to_json()?);
                }
                LogFormat::Text => {
                    let timestamp = chrono::DateTime::from_timestamp_micros(entry.timestamp as i64)
                        .unwrap_or_default()
                        .format("%Y-%m-%d %H:%M:%S%.3f");
                    
                    println!(
                        "{} [{}] {}/{}: {}",
                        timestamp,
                        self.colorize_level(&entry.level),
                        entry.service_name,
                        entry.instance_id,
                        entry.message
                    );
                }
                LogFormat::Csv => {
                    // CSV格式通常不适合控制台输出，转为简单格式
                    println!("{},{},{},{}", 
                        entry.timestamp, 
                        entry.level.to_string(),
                        entry.service_name,
                        entry.message
                    );
                }
            }
        }
        Ok(())
    }

    async fn shutdown(&self) -> ServiceResult<()> {
        Ok(())
    }
}

/// HTTP日志导出器
pub struct HttpLogExporter {
    endpoint: String,
    client: reqwest::Client,
    headers: HashMap<String, String>,
    format: LogFormat,
}

impl HttpLogExporter {
    pub fn new(endpoint: String, format: LogFormat) -> Self {
        Self {
            endpoint,
            client: reqwest::Client::new(),
            headers: HashMap::new(),
            format,
        }
    }

    pub fn with_header(mut self, key: &str, value: &str) -> Self {
        self.headers.insert(key.to_string(), value.to_string());
        self
    }

    pub fn with_auth_token(self, token: &str) -> Self {
        self.with_header("Authorization", &format!("Bearer {}", token))
    }
}

#[async_trait]
impl LogExporter for HttpLogExporter {
    async fn export(&self, entries: Vec<LogEntry>) -> ServiceResult<()> {
        let payload = match self.format {
            LogFormat::Json => {
                serde_json::to_string(&entries)
                    .map_err(|e| ServiceError::MetricsError(format!("序列化日志失败: {}", e)))?
            }
            _ => {
                // 对于非JSON格式，转换为JSON发送
                serde_json::to_string(&entries)
                    .map_err(|e| ServiceError::MetricsError(format!("序列化日志失败: {}", e)))?
            }
        };

        let mut request = self.client
            .post(&self.endpoint)
            .header("Content-Type", "application/json")
            .body(payload);

        for (key, value) in &self.headers {
            request = request.header(key, value);
        }

        let response = request
            .send()
            .await
            .map_err(|e| ServiceError::MetricsError(format!("发送HTTP日志失败: {}", e)))?;

        if !response.status().is_success() {
            let status = response.status();
            let body = response.text().await.unwrap_or_default();
            return Err(ServiceError::MetricsError(format!(
                "HTTP日志导出失败 {}: {}", status, body
            )));
        }

        Ok(())
    }

    async fn shutdown(&self) -> ServiceResult<()> {
        Ok(())
    }
}

/// 日志管理器
pub struct LogManager {
    aggregator: Arc<dyn LogAggregator>,
    exporters: Vec<Arc<dyn LogExporter>>,
    service_name: String,
    instance_id: String,
    buffer: Arc<RwLock<Vec<LogEntry>>>,
    buffer_size: usize,
    auto_flush_interval: tokio::time::Duration,
}

impl LogManager {
    pub fn new(
        aggregator: Arc<dyn LogAggregator>,
        service_name: String,
        instance_id: String,
        buffer_size: usize,
    ) -> Self {
        Self {
            aggregator,
            exporters: Vec::new(),
            service_name,
            instance_id,
            buffer: Arc::new(RwLock::new(Vec::new())),
            buffer_size,
            auto_flush_interval: tokio::time::Duration::from_secs(5),
        }
    }

    /// 添加日志导出器
    pub fn add_exporter(&mut self, exporter: Arc<dyn LogExporter>) {
        self.exporters.push(exporter);
    }

    /// 记录日志
    pub async fn log(&self, level: LogLevel, message: String) -> ServiceResult<()> {
        let entry = LogEntry::new(level, message, self.service_name.clone(), self.instance_id.clone());
        self.log_entry(entry).await
    }

    /// 记录带追踪信息的日志
    pub async fn log_with_trace(
        &self,
        level: LogLevel,
        message: String,
        trace_id: String,
        span_id: Option<String>,
    ) -> ServiceResult<()> {
        let entry = LogEntry::new(level, message, self.service_name.clone(), self.instance_id.clone())
            .with_trace(trace_id, span_id);
        self.log_entry(entry).await
    }

    /// 记录结构化日志
    pub async fn log_structured(
        &self,
        level: LogLevel,
        message: String,
        fields: HashMap<String, Value>,
    ) -> ServiceResult<()> {
        let mut entry = LogEntry::new(level, message, self.service_name.clone(), self.instance_id.clone());
        entry.fields = fields;
        self.log_entry(entry).await
    }

    /// 记录日志条目
    pub async fn log_entry(&self, entry: LogEntry) -> ServiceResult<()> {
        // 添加到聚合器
        self.aggregator.append(entry.clone()).await?;

        // 添加到缓冲区
        let mut buffer = self.buffer.write().await;
        buffer.push(entry);

        // 检查是否需要刷新
        if buffer.len() >= self.buffer_size {
            let entries_to_export = buffer.drain(..).collect::<Vec<_>>();
            drop(buffer);
            self.export_entries(entries_to_export).await?;
        }

        Ok(())
    }

    /// 导出日志条目到所有导出器
    async fn export_entries(&self, entries: Vec<LogEntry>) -> ServiceResult<()> {
        for exporter in &self.exporters {
            exporter.export(entries.clone()).await?;
        }
        Ok(())
    }

    /// 手动刷新缓冲区
    pub async fn flush(&self) -> ServiceResult<()> {
        let entries = {
            let mut buffer = self.buffer.write().await;
            buffer.drain(..).collect::<Vec<_>>()
        };

        if !entries.is_empty() {
            self.export_entries(entries).await?;
        }

        Ok(())
    }

    /// 查询日志
    pub async fn query(&self, filter: LogFilter, limit: Option<usize>) -> ServiceResult<Vec<LogEntry>> {
        self.aggregator.query(filter, limit).await
    }

    /// 获取日志统计
    pub async fn get_stats(&self) -> ServiceResult<LogStats> {
        self.aggregator.get_stats().await
    }

    /// 关闭日志管理器
    pub async fn shutdown(&self) -> ServiceResult<()> {
        // 刷新缓冲区
        self.flush().await?;

        // 关闭所有导出器
        for exporter in &self.exporters {
            exporter.shutdown().await?;
        }

        Ok(())
    }

    /// 启动自动刷新任务
    pub async fn start_auto_flush(&self) {
        let buffer = self.buffer.clone();
        let exporters = self.exporters.clone();
        let interval = self.auto_flush_interval;

        tokio::spawn(async move {
            let mut ticker = tokio::time::interval(interval);
            
            loop {
                ticker.tick().await;
                
                let entries = {
                    let mut buffer = buffer.write().await;
                    if buffer.is_empty() {
                        continue;
                    }
                    buffer.drain(..).collect::<Vec<_>>()
                };

                for exporter in &exporters {
                    let _ = exporter.export(entries.clone()).await;
                }
            }
        });
    }
}

/// 简化的日志记录器
pub struct SimpleLogger {
    manager: Arc<LogManager>,
}

impl SimpleLogger {
    pub fn new(manager: Arc<LogManager>) -> Self {
        Self { manager }
    }

    /// 记录Trace级别日志
    pub async fn trace(&self, message: &str) -> ServiceResult<()> {
        self.manager.log(LogLevel::Trace, message.to_string()).await
    }

    /// 记录Debug级别日志
    pub async fn debug(&self, message: &str) -> ServiceResult<()> {
        self.manager.log(LogLevel::Debug, message.to_string()).await
    }

    /// 记录Info级别日志
    pub async fn info(&self, message: &str) -> ServiceResult<()> {
        self.manager.log(LogLevel::Info, message.to_string()).await
    }

    /// 记录Warn级别日志
    pub async fn warn(&self, message: &str) -> ServiceResult<()> {
        self.manager.log(LogLevel::Warn, message.to_string()).await
    }

    /// 记录Error级别日志
    pub async fn error(&self, message: &str) -> ServiceResult<()> {
        self.manager.log(LogLevel::Error, message.to_string()).await
    }

    /// 记录Fatal级别日志
    pub async fn fatal(&self, message: &str) -> ServiceResult<()> {
        self.manager.log(LogLevel::Fatal, message.to_string()).await
    }
}