import pymysql
from pymysql.cursors import DictCursor
from datetime import datetime, timedelta, timezone
from typing import Dict, Any, List, Optional, Tuple
import json
import logging
from collections import Counter

# 配置日志
logger = logging.getLogger("AnomalyLogAnalyzer")
logger.setLevel(logging.INFO)
if not logger.handlers:
    handler = logging.StreamHandler()
    handler.setFormatter(logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
    logger.addHandler(handler)

def parse_datetime(time_str: str) -> Optional[datetime]:
    """解析ISO格式时间字符串为datetime对象，并转换为北京时间(UTC+8)"""
    try:
        # 解析ISO格式的时间字符串为UTC时间
        dt_utc = datetime.fromisoformat(time_str.replace('Z', '+00:00'))
        
        # 转换为北京时间 (UTC+8)
        beijing_tz = timezone(timedelta(hours=8))
        dt_beijing = dt_utc.astimezone(beijing_tz)
        
        return dt_beijing
    except Exception as e:
        logger.error(f"时间解析失败: {str(e)}")
        return None

def collect_logs_for_anomaly(anomaly: Dict[str, Any], db_config: Dict[str, Any]) -> Dict[str, Any]:
    """
    根据异常的start_time和duration，收集相关日志并按log_level分类。
    返回：{
        'logs_by_level': {'ERROR': [...], 'WARNING': [...], 'INFO': [...], 'DEBUG': [...]},
        'total_count': 123,
        'error_count': 10,
        'warning_count': 20,
        'stats': {...},
        'system_logs': [...],
        'security_logs': [...],
        'application_logs': [...]
    }
    """
    start_time = anomaly.get('start_time')
    duration = anomaly.get('duration')
    anomaly_id = anomaly.get('id', 'unknown')
    anomaly_type = anomaly.get('type', 'unknown')
    
    if not start_time or duration is None:
        logger.warning(f"异常 {anomaly_id} 缺少start_time或duration字段")
        return create_empty_result()
    
    try:
        duration = int(duration)
    except Exception as e:
        logger.error(f"异常 {anomaly_id} duration转换失败: {str(e)}")
        return create_empty_result()
    
    start = parse_datetime(start_time)
    if not start:
        logger.error(f"异常 {anomaly_id} start_time解析失败: {start_time}")
        return create_empty_result()
    
    end = start + timedelta(seconds=duration)
    
    logger.info(f"查询时间范围: {start.strftime('%Y-%m-%d %H:%M:%S')} 至 {end.strftime('%Y-%m-%d %H:%M:%S')}")
    
    # 从数据库获取日志
    all_logs = fetch_logs_by_timespan(db_config, start, end)
    
    # 对日志进行分类和统计
    logs_by_level = {
        'ERROR': [],
        'WARNING': [],
        'INFO': [],
        'DEBUG': []
    }

    # 将所有日志按级别分类
    for table_name, logs in all_logs.items():
        for log in logs:
            level = log.get('log_level', '').upper()
            if level in logs_by_level:
                # 为日志添加来源表标识
                log['source_table'] = table_name
                logs_by_level[level].append(log)
    
    # 统计信息
    stats = analyze_logs(all_logs, anomaly)
    
    # 构造返回结果
    result = {
        'logs_by_level': logs_by_level,
        'total_count': sum(len(logs) for logs in all_logs.values()),
        'error_count': len(logs_by_level['ERROR']),
        'warning_count': len(logs_by_level['WARNING']),
        'info_count': len(logs_by_level['INFO']),
        'debug_count': len(logs_by_level['DEBUG']),
        'stats': stats
    }
    
    # 保留原始表格分类的日志
    result.update(all_logs)
    
    return result

def fetch_logs_by_timespan(db_config: Dict[str, Any], start: datetime, end: datetime) -> Dict[str, List[Dict[str, Any]]]:
    """
    查询指定时间范围内的所有日志，按表分类。
    """
    conn = None
    try:
        conn = pymysql.connect(
            host=db_config.get('host', 'localhost'),
            port=db_config.get('port', 3306),
            user=db_config.get('user', 'root'),
            password=db_config.get('password', '1234'),
            database=db_config.get('database', 'kylin_2025'),
            charset='utf8mb4',
            cursorclass=DictCursor
        )
        
        # 将datetime对象格式化为'YYYY-MM-DD HH:MM:SS'格式，适配数据库timestamp格式
        start_str = start.strftime('%Y-%m-%d %H:%M:%S')
        end_str = end.strftime('%Y-%m-%d %H:%M:%S')
        
        # 对每个表执行查询
        system_logs = query_logs(conn, 'system_logs', start_str, end_str)
        security_logs = query_logs(conn, 'security_logs', start_str, end_str)
        application_logs = query_logs(conn, 'application_logs', start_str, end_str)
        
        return {
            'system_logs': system_logs,
            'security_logs': security_logs,
            'application_logs': application_logs
        }
    except Exception as e:
        logger.error(f"数据库查询失败: {str(e)}")
        return {'system_logs': [], 'security_logs': [], 'application_logs': []}
    finally:
        if conn:
            conn.close()

def query_logs(conn, table: str, start: str, end: str) -> List[Dict[str, Any]]:
    """查询指定表在时间段内的所有日志，按log_level优先级和时间排序"""
    # 使用FIELD来自定义排序优先级: ERROR > WARNING > INFO > DEBUG
    query = f"""
    SELECT *
    FROM {table}
    WHERE timestamp BETWEEN %s AND %s
    ORDER BY 
        FIELD(log_level, 'ERROR', 'WARNING', 'INFO', 'DEBUG'), 
        timestamp
    """
    try:
        with conn.cursor() as cursor:
            cursor.execute(query, (start, end))
            results = list(cursor.fetchall())
            # 仅在有查询结果时才记录日志，减少无用日志
            if results:
                logger.info(f"从表 {table} 查询到 {len(results)} 条日志，时间范围: {start} 至 {end}")
            else:
                logger.debug(f"从表 {table} 查询到 0 条日志")
            return results
    except Exception as e:
        logger.error(f"表 {table} 查询失败: {str(e)}")
        return []

def analyze_logs(logs_by_table: Dict[str, List[Dict[str, Any]]], anomaly: Dict[str, Any]) -> Dict[str, Any]:
    """
    分析日志，生成统计信息
    """
    # 所有日志合并
    all_logs = []
    for table, logs in logs_by_table.items():
        all_logs.extend(logs)
    
    # 统计各级别日志数量
    levels_counter = Counter()
    for log in all_logs:
        level = log.get('log_level', 'UNKNOWN').upper()
        levels_counter[level] += 1
    
    # 统计各表日志数量
    tables_counter = {k: len(v) for k, v in logs_by_table.items()}
    
    # 获取异常类型
    anomaly_type = anomaly.get('type', '').lower()
    
    return {
        'level_stats': dict(levels_counter),
        'table_stats': tables_counter,
        'anomaly_type': anomaly_type,
        'time_period': {
            'start': anomaly.get('start_time'),
            'duration': anomaly.get('duration')
        }
    }

def create_empty_result() -> Dict[str, Any]:
    """创建空的结果结构"""
    return {
        'logs_by_level': {'ERROR': [], 'WARNING': [], 'INFO': [], 'DEBUG': []},
        'total_count': 0,
        'error_count': 0,
        'warning_count': 0,
        'info_count': 0,
        'debug_count': 0,
        'stats': {},
        'system_logs': [],
        'security_logs': [],
        'application_logs': []
    }

def format_logs_summary(anomaly: Dict[str, Any], logs_result: Dict[str, Any]) -> str:
    """格式化日志摘要，用于控制台输出"""
    anomaly_id = anomaly.get('id', 'unknown')
    anomaly_type = anomaly.get('type', 'unknown')
    
    summary = []
    summary.append(f"=== 异常 {anomaly_id} ({anomaly_type}) 相关日志分析 ===")
    summary.append(f"总日志数: {logs_result['total_count']}")
    summary.append(f"ERROR级别: {logs_result['error_count']}")
    summary.append(f"WARNING级别: {logs_result['warning_count']}")
    summary.append(f"INFO级别: {logs_result['info_count']}")
    
    stats = logs_result.get('stats', {})
    summary.append(f"\n表分布:")
    for table, count in stats.get('table_stats', {}).items():
        summary.append(f"  - {table}: {count}条")
    
    # 显示ERROR日志
    error_logs = logs_result['logs_by_level'].get('ERROR', [])
    if error_logs:
        summary.append(f"\n=== ERROR日志 (显示前10条) ===")
        for i, log in enumerate(error_logs[:10]):
            table = log.get('source_table', 'unknown')
            timestamp = log.get('timestamp', '')
            # 直接使用数据库中的时间戳，不做转换
            timestamp_str = str(timestamp) if timestamp else '未知时间'
                
            # 优先使用original字段，完整显示日志内容
            message = log.get('original', '')
            if not message:
                message = log.get('message', '')
                if not message and 'log_message' in log:
                    message = log.get('log_message', '')
            summary.append(f"{i+1}. [{table}] {timestamp_str} - {message}")
    
    # 显示WARNING日志
    warning_logs = logs_result['logs_by_level'].get('WARNING', [])
    if warning_logs and len(error_logs) < 2:  # 如果ERROR日志少于2条，才显示WARNING
        summary.append(f"\n=== WARNING日志 (显示前10条) ===")
        for i, log in enumerate(warning_logs[:10]):
            table = log.get('source_table', 'unknown')
            timestamp = log.get('timestamp', '')
            # 直接使用数据库中的时间戳，不做转换
            timestamp_str = str(timestamp) if timestamp else '未知时间'
                
            # 优先使用original字段，完整显示日志内容
            message = log.get('original', '')
            if not message:
                message = log.get('message', '')
                if not message and 'log_message' in log:
                    message = log.get('log_message', '')
            summary.append(f"{i+1}. [{table}] {timestamp_str} - {message}")
    
    return "\n".join(summary)

def collect_and_analyze_logs_for_anomalies(anomalies: List[Dict[str, Any]], db_config: Dict[str, Any]) -> Dict[str, Dict[str, Any]]:
    """
    批量处理多个异常，收集并分析相关日志
    """
    results = {}
    # 防止重复处理的异常ID集合
    processed_ids = set()
    
    for anomaly in anomalies:
        anomaly_id = anomaly.get('id', 'unknown')
        
        # 检查是否已经处理过该异常
        if anomaly_id in processed_ids:
            continue
            
        processed_ids.add(anomaly_id)
        logger.info(f"收集异常 {anomaly_id} 的相关日志")
        
        # 收集和分析日志
        logs_result = collect_logs_for_anomaly(anomaly, db_config)
        
        # 打印摘要
        summary = format_logs_summary(anomaly, logs_result)
        print(summary)
        
        # 存储结果
        results[anomaly_id] = logs_result
        
        # 记录日志（仅记录一次结果）
        log_count = logs_result['total_count']
        error_count = logs_result['error_count']
        warning_count = logs_result['warning_count']
        logger.info(f"异常 {anomaly_id} 收集到 {log_count} 条日志 (ERROR: {error_count}, WARNING: {warning_count})")
    
    return results

def prepare_logs_for_llm(logs_result: Dict[str, Any], max_logs: int = 10) -> Dict[str, List[Dict[str, Any]]]:
    """
    为大模型准备日志数据，精简和格式化
    """
    # 优先选择ERROR和WARNING级别的日志
    selected_logs = {
        'error_logs': [],
        'warning_logs': [],
        'info_logs': []
    }
    
    # 北京时区
    beijing_tz = timezone(timedelta(hours=8))
    
    # 添加ERROR日志
    error_logs = logs_result['logs_by_level'].get('ERROR', [])
    for log in error_logs[:max_logs]:
        # 转换时间戳为北京时间
        timestamp = log.get('timestamp', '')
        if isinstance(timestamp, datetime):
            if timestamp.tzinfo is None:
                # 如果时间没有时区信息，假设是UTC时间，转换为北京时间
                timestamp = timestamp.replace(tzinfo=timezone.utc).astimezone(beijing_tz)
            else:
                # 如果已有时区信息，直接转换为北京时间
                timestamp = timestamp.astimezone(beijing_tz)
            timestamp_str = timestamp.strftime('%Y-%m-%d %H:%M:%S')
        else:
            timestamp_str = str(timestamp)
            
        # 优先使用original字段
        message = log.get('original', '')
        if not message:
            message = log.get('message', log.get('log_message', '未知消息'))
            
        selected_logs['error_logs'].append({
            'source': log.get('source_table', '未知'),
            'time': timestamp_str,
            'message': message,
            'pid': log.get('pid', '')
        })
    
    # 添加WARNING日志
    warning_logs = logs_result['logs_by_level'].get('WARNING', [])
    for log in warning_logs[:max_logs]:
        # 转换时间戳为北京时间
        timestamp = log.get('timestamp', '')
        if isinstance(timestamp, datetime):
            if timestamp.tzinfo is None:
                # 如果时间没有时区信息，假设是UTC时间，转换为北京时间
                timestamp = timestamp.replace(tzinfo=timezone.utc).astimezone(beijing_tz)
            else:
                # 如果已有时区信息，直接转换为北京时间
                timestamp = timestamp.astimezone(beijing_tz)
            timestamp_str = timestamp.strftime('%Y-%m-%d %H:%M:%S')
        else:
            timestamp_str = str(timestamp)
            
        # 优先使用original字段
        message = log.get('original', '')
        if not message:
            message = log.get('message', log.get('log_message', '未知消息'))
            
        selected_logs['warning_logs'].append({
            'source': log.get('source_table', '未知'),
            'time': timestamp_str,
            'message': message,
            'pid': log.get('pid', '')
        })
    
    # 如果ERROR和WARNING日志数量不足，添加INFO日志
    info_logs = logs_result['logs_by_level'].get('INFO', [])
    if len(selected_logs['error_logs']) + len(selected_logs['warning_logs']) < max_logs:
        remaining = max_logs - len(selected_logs['error_logs']) - len(selected_logs['warning_logs'])
        for log in info_logs[:remaining]:
            # 转换时间戳为北京时间
            timestamp = log.get('timestamp', '')
            if isinstance(timestamp, datetime):
                if timestamp.tzinfo is None:
                    # 如果时间没有时区信息，假设是UTC时间，转换为北京时间
                    timestamp = timestamp.replace(tzinfo=timezone.utc).astimezone(beijing_tz)
                else:
                    # 如果已有时区信息，直接转换为北京时间
                    timestamp = timestamp.astimezone(beijing_tz)
                timestamp_str = timestamp.strftime('%Y-%m-%d %H:%M:%S')
            else:
                timestamp_str = str(timestamp)
                
            # 优先使用original字段
            message = log.get('original', '')
            if not message:
                message = log.get('message', log.get('log_message', '未知消息'))
                
            selected_logs['info_logs'].append({
                'source': log.get('source_table', '未知'),
                'time': timestamp_str,
                'message': message,
                'pid': log.get('pid', '')
            })
    
    return selected_logs 