#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
日志记录系统
实现结构化日志记录，支持本地文件和腾讯云CLS
"""

import json
import logging
import logging.handlers
import os
import time
from datetime import datetime, timedelta
from typing import Dict, Any, Optional, List
from dataclasses import dataclass, asdict
from pathlib import Path
import threading

logger = logging.getLogger(__name__)


@dataclass
class TaskProcessingLog:
    """任务处理日志数据类"""
    timestamp: str
    type: str = "task_processing"
    data: Dict[str, Any] = None
    
    def __post_init__(self):
        if self.data is None:
            self.data = {}


@dataclass
class ServerStatusLog:
    """服务器状态日志数据类"""
    timestamp: str
    type: str = "server_status"
    data: Dict[str, Any] = None
    
    def __post_init__(self):
        if self.data is None:
            self.data = {}


@dataclass
class CostTrafficLog:
    """成本与流量日志数据类"""
    timestamp: str
    type: str = "cost_traffic"
    data: Dict[str, Any] = None
    
    def __post_init__(self):
        if self.data is None:
            self.data = {}


@dataclass
class AlertLog:
    """报警日志数据类"""
    timestamp: str
    type: str = "alert"
    data: Dict[str, Any] = None
    
    def __post_init__(self):
        if self.data is None:
            self.data = {}


class StructuredLogger:
    """结构化日志记录器"""
    
    def __init__(self, log_dir: str = "logs", max_file_size: int = 100 * 1024 * 1024, 
                 backup_count: int = 10):
        """
        初始化结构化日志记录器
        
        Args:
            log_dir: 日志目录
            max_file_size: 单个日志文件最大大小（字节）
            backup_count: 备份文件数量
        """
        self.log_dir = Path(log_dir)
        self.max_file_size = max_file_size
        self.backup_count = backup_count
        self.log_lock = threading.Lock()
        
        # 创建日志目录
        self.log_dir.mkdir(exist_ok=True)
        
        # 初始化不同类型的日志记录器
        self.task_logger = self._create_logger("task_processing", "task_processing")
        self.server_logger = self._create_logger("server_status", "server_status")
        self.cost_logger = self._create_logger("cost_traffic", "cost_traffic")
        self.alert_logger = self._create_logger("alerts", "alerts")
        
        logger.info(f"结构化日志记录器已初始化: {self.log_dir}")
    
    def _create_logger(self, name: str, filename_prefix: str) -> logging.Logger:
        """创建日志记录器"""
        logger_instance = logging.getLogger(f"hai_optimization.{name}")
        logger_instance.setLevel(logging.INFO)
        
        # 清除现有处理器
        for handler in logger_instance.handlers[:]:
            logger_instance.removeHandler(handler)
        
        # 创建文件处理器
        log_file = self.log_dir / f"{filename_prefix}_{datetime.now().strftime('%Y-%m-%d')}.log"
        file_handler = logging.handlers.RotatingFileHandler(
            log_file, maxBytes=self.max_file_size, backupCount=self.backup_count
        )
        
        # 创建格式化器
        formatter = logging.Formatter('%(message)s')
        file_handler.setFormatter(formatter)
        
        logger_instance.addHandler(file_handler)
        logger_instance.propagate = False
        
        return logger_instance
    
    def log_task_processing(self, task_id: str, user_id: str, client_ip: str,
                          time_window: int, server_id: str, region: str, gpu_type: str,
                          submit_time: float, start_time: float, complete_time: float,
                          status: str, error_message: str = None, 
                          processing_duration: float = None, wait_duration: float = None) -> None:
        """
        记录任务处理日志
        
        Args:
            task_id: 任务ID
            user_id: 用户ID
            client_ip: 客户端IP
            time_window: 时间窗口编号
            server_id: 服务器ID
            region: 地域
            gpu_type: GPU类型
            submit_time: 提交时间
            start_time: 开始处理时间
            complete_time: 完成时间
            status: 任务状态
            error_message: 错误信息
            processing_duration: 处理时长
            wait_duration: 等待时长
        """
        if processing_duration is None and start_time and complete_time:
            processing_duration = complete_time - start_time
        
        if wait_duration is None and submit_time and start_time:
            wait_duration = start_time - submit_time
        
        log_data = {
            "task_id": task_id,
            "user_id": user_id,
            "user_identification_method": "ip_time_window",
            "client_ip": client_ip,
            "time_window": time_window,
            "server_id": server_id,
            "region": region,
            "gpu_type": gpu_type,
            "submit_time": datetime.fromtimestamp(submit_time).isoformat() + "Z",
            "start_time": datetime.fromtimestamp(start_time).isoformat() + "Z" if start_time else None,
            "complete_time": datetime.fromtimestamp(complete_time).isoformat() + "Z" if complete_time else None,
            "processing_duration": processing_duration,
            "wait_duration": wait_duration,
            "status": status
        }
        
        if error_message:
            log_data["error_message"] = error_message
        
        log_entry = TaskProcessingLog(
            timestamp=datetime.utcnow().isoformat() + "Z",
            data=log_data
        )
        
        with self.log_lock:
            self.task_logger.info(json.dumps(asdict(log_entry), ensure_ascii=False))
    
    def log_server_status(self, server_id: str, region: str, gpu_type: str,
                         status: str, cpu_usage: float, gpu_usage: float, 
                         memory_usage: float, current_tasks: int, 
                         max_concurrent_tasks: int, health_check_status: str,
                         created_time: float = None, destroyed_time: float = None,
                         event_type: str = "status_update") -> None:
        """
        记录服务器状态日志
        
        Args:
            server_id: 服务器ID
            region: 地域
            gpu_type: GPU类型
            status: 服务器状态
            cpu_usage: CPU使用率
            gpu_usage: GPU使用率
            memory_usage: 内存使用率
            current_tasks: 当前任务数
            max_concurrent_tasks: 最大并发任务数
            health_check_status: 健康检查状态
            created_time: 创建时间
            destroyed_time: 销毁时间
            event_type: 事件类型
        """
        log_data = {
            "server_id": server_id,
            "region": region,
            "gpu_type": gpu_type,
            "status": status,
            "cpu_usage": cpu_usage,
            "gpu_usage": gpu_usage,
            "memory_usage": memory_usage,
            "current_tasks": current_tasks,
            "max_concurrent_tasks": max_concurrent_tasks,
            "health_check_status": health_check_status,
            "event_type": event_type
        }
        
        if created_time:
            log_data["created_time"] = datetime.fromtimestamp(created_time).isoformat() + "Z"
        
        if destroyed_time:
            log_data["destroyed_time"] = datetime.fromtimestamp(destroyed_time).isoformat() + "Z"
        
        log_entry = ServerStatusLog(
            timestamp=datetime.utcnow().isoformat() + "Z",
            data=log_data
        )
        
        with self.log_lock:
            self.server_logger.info(json.dumps(asdict(log_entry), ensure_ascii=False))
    
    def log_cost_traffic(self, date: str, gpu_cost: float, traffic_cost: float,
                        total_cost: float, traffic_usage: float, 
                        traffic_limit: float = 500.0, region_breakdown: Dict = None) -> None:
        """
        记录成本与流量日志
        
        Args:
            date: 日期
            gpu_cost: GPU费用
            traffic_cost: 流量费用
            total_cost: 总费用
            traffic_usage: 流量使用量(GB)
            traffic_limit: 流量限制(GB)
            region_breakdown: 地域费用分解
        """
        log_data = {
            "date": date,
            "gpu_cost": gpu_cost,
            "traffic_cost": traffic_cost,
            "total_cost": total_cost,
            "traffic_usage_gb": traffic_usage,
            "traffic_limit_gb": traffic_limit,
            "traffic_utilization": traffic_usage / traffic_limit if traffic_limit > 0 else 0,
            "region_breakdown": region_breakdown or {}
        }
        
        log_entry = CostTrafficLog(
            timestamp=datetime.utcnow().isoformat() + "Z",
            data=log_data
        )
        
        with self.log_lock:
            self.cost_logger.info(json.dumps(asdict(log_entry), ensure_ascii=False))
    
    def log_alert(self, alert_type: str, severity: str, message: str,
                 details: Dict[str, Any] = None, threshold_value: float = None,
                 current_value: float = None) -> None:
        """
        记录报警日志
        
        Args:
            alert_type: 报警类型
            severity: 严重程度
            message: 报警消息
            details: 详细信息
            threshold_value: 阈值
            current_value: 当前值
        """
        log_data = {
            "alert_type": alert_type,
            "severity": severity,
            "message": message,
            "details": details or {},
            "threshold_value": threshold_value,
            "current_value": current_value
        }
        
        log_entry = AlertLog(
            timestamp=datetime.utcnow().isoformat() + "Z",
            data=log_data
        )
        
        with self.log_lock:
            self.alert_logger.info(json.dumps(asdict(log_entry), ensure_ascii=False))
    
    def get_log_files(self, log_type: str = None) -> List[Path]:
        """
        获取日志文件列表
        
        Args:
            log_type: 日志类型，如果为None则返回所有日志文件
            
        Returns:
            日志文件路径列表
        """
        if log_type:
            pattern = f"{log_type}_*.log"
        else:
            pattern = "*.log"
        
        return list(self.log_dir.glob(pattern))
    
    def cleanup_old_logs(self, days_to_keep: int = 30) -> int:
        """
        清理旧日志文件
        
        Args:
            days_to_keep: 保留天数
            
        Returns:
            清理的文件数量
        """
        cutoff_date = datetime.now() - timedelta(days=days_to_keep)
        cleaned_count = 0
        
        for log_file in self.get_log_files():
            if log_file.stat().st_mtime < cutoff_date.timestamp():
                try:
                    log_file.unlink()
                    cleaned_count += 1
                    logger.info(f"删除旧日志文件: {log_file}")
                except Exception as e:
                    logger.error(f"删除日志文件失败: {log_file}, 错误: {e}")
        
        return cleaned_count


class TencentCloudCLSLogger:
    """腾讯云CLS日志记录器"""
    
    def __init__(self, secret_id: str, secret_key: str, region: str, 
                 logset_id: str, topic_id: str):
        """
        初始化腾讯云CLS日志记录器
        
        Args:
            secret_id: 腾讯云SecretId
            secret_key: 腾讯云SecretKey
            region: 地域
            logset_id: 日志集ID
            topic_id: 日志主题ID
        """
        self.secret_id = secret_id
        self.secret_key = secret_key
        self.region = region
        self.logset_id = logset_id
        self.topic_id = topic_id
        self.enabled = bool(secret_id and secret_key and logset_id and topic_id)
        
        if self.enabled:
            try:
                # 这里需要导入腾讯云CLS SDK
                # from tencentcloud.cls.v20201016 import cls_client, models
                # self.client = cls_client.ClsClient(credential, region)
                logger.info(f"腾讯云CLS日志记录器已初始化: {region}")
            except ImportError:
                logger.warning("腾讯云CLS SDK未安装，CLS日志功能将被禁用")
                self.enabled = False
        else:
            logger.warning("腾讯云CLS配置不完整，CLS日志功能将被禁用")
    
    def send_log(self, log_data: Dict[str, Any], log_type: str = "task_processing") -> bool:
        """
        发送日志到腾讯云CLS
        
        Args:
            log_data: 日志数据
            log_type: 日志类型
            
        Returns:
            是否发送成功
        """
        if not self.enabled:
            return False
        
        try:
            # 实现腾讯云CLS发送逻辑
            return self._send_to_cls(log_data, log_type)
        except Exception as e:
            logger.error(f"发送日志到CLS失败: {e}")
            return False
    
    def _send_to_cls(self, log_data: Dict[str, Any], log_type: str) -> bool:
        """发送日志到腾讯云CLS"""
        try:
            # 检查CLS配置
            if not self.cls_config or not self.cls_config.get('enabled'):
                logger.debug("CLS功能未启用，跳过发送")
                return True
            
            # 这里应该实现真实的CLS API调用
            # 由于CLS API比较复杂，暂时记录到本地文件
            import json
            import os
            from datetime import datetime
            
            log_dir = "/home/ubuntu/PhotoEnhanceAI-web/logs"
            os.makedirs(log_dir, exist_ok=True)
            
            log_file = os.path.join(log_dir, f"cls_{log_type}_{datetime.now().strftime('%Y-%m-%d')}.log")
            
            with open(log_file, 'a', encoding='utf-8') as f:
                f.write(json.dumps(log_data, ensure_ascii=False) + '\n')
            
            logger.debug(f"日志已记录到本地文件: {log_file}")
            return True
            
        except Exception as e:
            logger.error(f"发送日志到CLS失败: {e}")
            return False


class LogAnalyzer:
    """日志分析器"""
    
    def __init__(self, structured_logger: StructuredLogger):
        self.structured_logger = structured_logger
    
    def analyze_task_processing_logs(self, date: str) -> Dict[str, Any]:
        """
        分析任务处理日志
        
        Args:
            date: 日期字符串 (YYYY-MM-DD)
            
        Returns:
            分析结果
        """
        log_file = self.structured_logger.log_dir / f"task_processing_{date}.log"
        
        if not log_file.exists():
            return {"error": "日志文件不存在"}
        
        stats = {
            "total_tasks": 0,
            "successful_tasks": 0,
            "failed_tasks": 0,
            "average_processing_time": 0,
            "average_wait_time": 0,
            "user_statistics": {},
            "server_statistics": {},
            "region_statistics": {},
            "gpu_type_statistics": {}
        }
        
        processing_times = []
        wait_times = []
        
        try:
            with open(log_file, 'r', encoding='utf-8') as f:
                for line in f:
                    try:
                        log_entry = json.loads(line.strip())
                        data = log_entry.get('data', {})
                        
                        stats["total_tasks"] += 1
                        
                        if data.get('status') == 'success':
                            stats["successful_tasks"] += 1
                        else:
                            stats["failed_tasks"] += 1
                        
                        # 处理时间统计
                        if data.get('processing_duration'):
                            processing_times.append(data['processing_duration'])
                        
                        if data.get('wait_duration'):
                            wait_times.append(data['wait_duration'])
                        
                        # 用户统计
                        user_id = data.get('user_id')
                        if user_id:
                            if user_id not in stats["user_statistics"]:
                                stats["user_statistics"][user_id] = 0
                            stats["user_statistics"][user_id] += 1
                        
                        # 服务器统计
                        server_id = data.get('server_id')
                        if server_id:
                            if server_id not in stats["server_statistics"]:
                                stats["server_statistics"][server_id] = 0
                            stats["server_statistics"][server_id] += 1
                        
                        # 地域统计
                        region = data.get('region')
                        if region:
                            if region not in stats["region_statistics"]:
                                stats["region_statistics"][region] = 0
                            stats["region_statistics"][region] += 1
                        
                        # GPU类型统计
                        gpu_type = data.get('gpu_type')
                        if gpu_type:
                            if gpu_type not in stats["gpu_type_statistics"]:
                                stats["gpu_type_statistics"][gpu_type] = 0
                            stats["gpu_type_statistics"][gpu_type] += 1
                    
                    except json.JSONDecodeError:
                        continue
            
            # 计算平均值
            if processing_times:
                stats["average_processing_time"] = sum(processing_times) / len(processing_times)
            
            if wait_times:
                stats["average_wait_time"] = sum(wait_times) / len(wait_times)
            
            # 计算成功率
            if stats["total_tasks"] > 0:
                stats["success_rate"] = stats["successful_tasks"] / stats["total_tasks"]
            
        except Exception as e:
            logger.error(f"分析任务处理日志失败: {e}")
            stats["error"] = str(e)
        
        return stats
    
    def generate_daily_report(self, date: str) -> Dict[str, Any]:
        """生成每日报告"""
        task_stats = self.analyze_task_processing_logs(date)
        
        report = {
            "date": date,
            "task_processing": task_stats,
            "generated_at": datetime.utcnow().isoformat() + "Z"
        }
        
        return report


# 全局实例
structured_logger = StructuredLogger()
cls_logger = TencentCloudCLSLogger(
    secret_id=os.getenv('TENCENT_SECRET_ID', ''),
    secret_key=os.getenv('TENCENT_SECRET_KEY', ''),
    region=os.getenv('TENCENT_REGION', 'ap-beijing'),
    logset_id=os.getenv('TENCENT_LOG_SET_ID', ''),
    topic_id=os.getenv('TENCENT_LOG_TOPIC_ID', '')
)
log_analyzer = LogAnalyzer(structured_logger)


if __name__ == "__main__":
    # 测试代码
    logging.basicConfig(level=logging.INFO)
    
    # 测试任务处理日志
    structured_logger.log_task_processing(
        task_id="task_12345",
        user_id="user_192.168.1.100_1",
        client_ip="192.168.1.100",
        time_window=1,
        server_id="gpu-001",
        region="ap-beijing",
        gpu_type="basic",
        submit_time=time.time() - 30,
        start_time=time.time() - 25,
        complete_time=time.time(),
        status="success",
        processing_duration=25,
        wait_duration=5
    )
    
    # 测试服务器状态日志
    structured_logger.log_server_status(
        server_id="gpu-001",
        region="ap-beijing",
        gpu_type="basic",
        status="healthy",
        cpu_usage=45.5,
        gpu_usage=78.2,
        memory_usage=62.1,
        current_tasks=3,
        max_concurrent_tasks=10,
        health_check_status="healthy"
    )
    
    # 测试成本流量日志
    structured_logger.log_cost_traffic(
        date="2025-01-03",
        gpu_cost=15.6,
        traffic_cost=8.4,
        total_cost=24.0,
        traffic_usage=420.5,
        traffic_limit=500.0
    )
    
    # 测试报警日志
    structured_logger.log_alert(
        alert_type="traffic_warning",
        severity="warning",
        message="流量使用量接近限制",
        threshold_value=450.0,
        current_value=420.5
    )
    
    print("日志记录测试完成")
