import pymysql
from pymysql.cursors import DictCursor
from typing import List, Dict, Any, Optional, Union, Tuple
from datetime import datetime, timedelta
import logging


class LogFetcher:
    """
    根据异常开始时间和持续时间查询数据库中的系统日志、安全日志和应用日志。
    """
    
    def __init__(self, host='localhost', port=3306, user='root', password='1234', database='kylin_2025'):
        """
        初始化日志获取器。
        
        参数:
            host (str): 数据库主机
            port (int): 数据库端口
            user (str): 数据库用户名
            password (str): 数据库密码
            database (str): 数据库名称
        """
        self.host = host
        self.port = port
        self.user = user
        self.password = password
        self.database = database
        self.connection = None
        self.logger = logging.getLogger(__name__)
    
    def connect(self) -> bool:
        """
        连接到数据库。
        
        返回:
            bool: 连接成功返回True，否则返回False
        """
        try:
            self.connection = pymysql.connect(
                host=self.host,
                port=self.port,
                user=self.user,
                password=self.password,
                database=self.database,
                charset='utf8mb4',
                cursorclass=DictCursor
            )
            return True
        except Exception as e:
            self.logger.error(f"数据库连接失败: {str(e)}")
            return False
    
    def close(self) -> None:
        """
        关闭数据库连接。
        """
        if self.connection:
            self.connection.close()
            self.connection = None
    
    def _parse_datetime(self, time_str: str) -> Optional[datetime]:
        """
        解析时间字符串为datetime对象。
        
        参数:
            time_str (str): 时间字符串，如ISO格式
            
        返回:
            datetime: 解析后的datetime对象，解析失败返回None
        """
        try:
            # 尝试解析ISO格式的时间字符串
            return datetime.fromisoformat(time_str.replace('Z', '+00:00'))
        except Exception as e:
            self.logger.error(f"时间解析失败: {str(e)}")
            return None
    
    def fetch_logs_by_anomaly(self, start_time: str, duration: int) -> Dict[str, List[Dict[str, Any]]]:
        """
        根据异常的开始时间和持续时间查询相关日志。
        
        参数:
            start_time (str): 异常开始时间，ISO格式
            duration (int): 持续时间(秒)
            
        返回:
            Dict[str, List[Dict[str, Any]]]: 包含三种日志的字典
                {
                    'system_logs': [...],
                    'security_logs': [...],
                    'application_logs': [...]
                }
        """
        start = self._parse_datetime(start_time)
        if not start:
            return {'system_logs': [], 'security_logs': [], 'application_logs': []}
        
        # 计算结束时间
        end = start + timedelta(seconds=duration)
        
        # 确保数据库连接
        if not self.connection and not self.connect():
            return {'system_logs': [], 'security_logs': [], 'application_logs': []}
        
        result = {
            'system_logs': self.fetch_system_logs(start, end),
            'security_logs': self.fetch_security_logs(start, end),
            'application_logs': self.fetch_application_logs(start, end)
        }
        
        return result
    
    def fetch_system_logs(self, start_time: datetime, end_time: datetime) -> List[Dict[str, Any]]:
        """
        查询系统日志表中指定时间范围内的日志。
        
        参数:
            start_time (datetime): 开始时间
            end_time (datetime): 结束时间
            
        返回:
            List[Dict[str, Any]]: 系统日志列表
        """
        query = """
        SELECT 
            id, 
            timestamp, 
            log_time, 
            type, 
            pid, 
            log_level, 
            message, 
            ingest_timestamp
        FROM system_logs
        WHERE log_time BETWEEN %s AND %s
        ORDER BY log_time
        """
        
        return self._execute_query(query, (start_time, end_time))
    
    def fetch_security_logs(self, start_time: datetime, end_time: datetime) -> List[Dict[str, Any]]:
        """
        查询安全日志表中指定时间范围内的日志。
        
        参数:
            start_time (datetime): 开始时间
            end_time (datetime): 结束时间
            
        返回:
            List[Dict[str, Any]]: 安全日志列表
        """
        query = """
        SELECT 
            id, 
            timestamp, 
            log_time, 
            log_type, 
            pid, 
            log_level, 
            log_message, 
            ingest_timestamp
        FROM security_logs
        WHERE log_time BETWEEN %s AND %s
        ORDER BY log_time
        """
        
        return self._execute_query(query, (start_time, end_time))
    
    def fetch_application_logs(self, start_time: datetime, end_time: datetime) -> List[Dict[str, Any]]:
        """
        查询应用日志表中指定时间范围内的日志。
        
        参数:
            start_time (datetime): 开始时间
            end_time (datetime): 结束时间
            
        返回:
            List[Dict[str, Any]]: 应用日志列表
        """
        query = """
        SELECT 
            id, 
            timestamp, 
            log_time, 
            source, 
            pid, 
            log_level, 
            log_message, 
            ingest_timestamp
        FROM application_logs
        WHERE log_time BETWEEN %s AND %s
        ORDER BY log_time
        """
        
        return self._execute_query(query, (start_time, end_time))
    
    def _execute_query(self, query: str, params: Tuple) -> List[Dict[str, Any]]:
        """
        执行SQL查询并返回结果。
        
        参数:
            query (str): SQL查询语句
            params (Tuple): 查询参数
            
        返回:
            List[Dict[str, Any]]: 查询结果列表
        """
        try:
            if self.connection is None:
                self.logger.error("数据库连接为None，无法执行查询")
                return []
                
            with self.connection.cursor() as cursor:
                cursor.execute(query, params) #通过 cursor.fetchall() 获取所有结果
                result = cursor.fetchall()
                return list(result)  # 确保返回类型为List[Dict[str, Any]]
        except Exception as e:
            self.logger.error(f"查询执行失败: {str(e)}")
            return []
    
    def fetch_logs_with_keywords(self, start_time: str, duration: int, keywords: List[str], 
                                case_sensitive: bool = False) -> Dict[str, List[Dict[str, Any]]]:
        """
        根据关键词过滤指定时间范围内的日志。
        功能：在指定时间范围内筛选包含关键词的日志。
        
        参数:
            start_time (str): 异常开始时间，ISO格式
            duration (int): 持续时间(秒)
            keywords (List[str]): 关键词列表
            case_sensitive (bool): 是否区分大小写，默认False
            
        返回:
            Dict[str, List[Dict[str, Any]]]: 过滤后的日志结果
        """
        # 先获取时间范围内的所有日志
        all_logs = self.fetch_logs_by_anomaly(start_time, duration)
        
        # 如果没有提供关键词，直接返回所有日志
        if not keywords:
            return all_logs
        
        # 过滤包含关键词的日志
        filtered_logs = {
            'system_logs': [],
            'security_logs': [],
            'application_logs': []
        }
        
        # 处理系统日志
        for log in all_logs['system_logs']:
            if self._contains_keywords(log.get('message', ''), keywords, case_sensitive):
                filtered_logs['system_logs'].append(log)
        
        # 处理安全日志
        for log in all_logs['security_logs']:
            if self._contains_keywords(log.get('log_message', ''), keywords, case_sensitive):
                filtered_logs['security_logs'].append(log)
        
        # 处理应用日志
        for log in all_logs['application_logs']:
            if self._contains_keywords(log.get('log_message', ''), keywords, case_sensitive):
                filtered_logs['application_logs'].append(log)
        
        return filtered_logs
    
    def _contains_keywords(self, text: str, keywords: List[str], case_sensitive: bool) -> bool:
        """
        检查文本是否包含任一关键词。
        
        参数:
            text (str): 要检查的文本
            keywords (List[str]): 关键词列表
            case_sensitive (bool): 是否区分大小写
            
        返回:
            bool: 包含任一关键词返回True，否则返回False
        """
        if not case_sensitive:
            text = text.lower()
            keywords = [k.lower() for k in keywords]
        
        for keyword in keywords:
            if keyword in text:
                return True
        return False
    
    def fetch_logs_by_level(self, start_time: str, duration: int, log_level: str) -> Dict[str, List[Dict[str, Any]]]:
        """
        获取指定日志级别的日志。
        
        参数:
            start_time (str): 异常开始时间，ISO格式
            duration (int): 持续时间(秒)
            log_level (str): 日志级别，如 'ERROR', 'WARNING'
            
        返回:
            Dict[str, List[Dict[str, Any]]]: 过滤后的日志结果
        """
        start = self._parse_datetime(start_time)
        if not start:
            return {'system_logs': [], 'security_logs': [], 'application_logs': []}
        
        # 计算结束时间
        end = start + timedelta(seconds=duration)
        
        # 确保数据库连接
        if not self.connection and not self.connect():
            return {'system_logs': [], 'security_logs': [], 'application_logs': []}
        
        # 准备查询
        system_query = """
        SELECT 
            id, timestamp, log_time, type, pid, log_level, message, ingest_timestamp
        FROM system_logs
        WHERE log_time BETWEEN %s AND %s AND log_level = %s
        ORDER BY log_time
        """
        
        security_query = """
        SELECT 
            id, timestamp, log_time, log_type, pid, log_level, log_message, ingest_timestamp
        FROM security_logs
        WHERE log_time BETWEEN %s AND %s AND log_level = %s
        ORDER BY log_time
        """
        
        application_query = """
        SELECT 
            id, timestamp, log_time, source, pid, log_level, log_message, ingest_timestamp
        FROM application_logs
        WHERE log_time BETWEEN %s AND %s AND log_level = %s
        ORDER BY log_time
        """
        
        result = {
            'system_logs': self._execute_query(system_query, (start, end, log_level)),
            'security_logs': self._execute_query(security_query, (start, end, log_level)),
            'application_logs': self._execute_query(application_query, (start, end, log_level))
        }
        
        return result
    
    def collect_relevant_logs(self, anomaly: Dict[str, Any], max_logs: int = 10) -> Dict[str, List[Dict[str, Any]]]:
        """
        收集与特定异常相关的所有日志，并按照相关性进行排序和限制数量。
        
        参数:
            anomaly (Dict[str, Any]): 异常信息
            max_logs (int): 每种日志类型的最大返回数量
            
        返回:
            Dict[str, List[Dict[str, Any]]]: 收集的相关日志
                {
                    'error_logs': [...],  # ERROR级别的所有相关日志
                    'keyword_logs': [...],  # 包含关键词的所有日志
                    'warning_logs': [...],  # WARNING级别的所有相关日志
                    'all_logs': [...],  # 所有相关日志 (有限数量)
                }
        """
        # 提取异常信息
        anomaly_type = anomaly.get('type', '')
        start_time = anomaly.get('start_time')
        duration = anomaly.get('duration')
        
        # 检查必要的参数
        if not start_time or not duration:
            self.logger.warning("缺少开始时间或持续时间，无法收集日志")
            return {
                'error_logs': [],
                'keyword_logs': [],
                'warning_logs': [],
                'all_logs': []
            }
        
        # 根据异常类型选择关键词
        keywords = []
        if anomaly_type == 'cpu':
            keywords = ['cpu', 'processor', 'load', 'high usage', '处理器', '负载', '性能', '超载']
        elif anomaly_type == 'memory':
            keywords = ['memory', 'oom', 'out of memory', '内存', '溢出', '泄漏', '不足']
        elif anomaly_type == 'disk':
            keywords = ['disk', 'storage', 'space', 'full', '磁盘', '存储', '空间', '满', 'io']
        elif anomaly_type == 'network':
            keywords = ['network', 'connection', 'packet', 'bandwidth', '网络', '连接', '带宽', '丢包', '延迟']
        
        # 获取所有相关日志
        all_logs = self.fetch_logs_by_anomaly(start_time, duration)
        
        # 获取ERROR级别的日志
        error_logs = self.fetch_logs_by_level(start_time, duration, 'ERROR')
        
        # 获取WARNING级别的日志
        warning_logs = self.fetch_logs_by_level(start_time, duration, 'WARNING')
        
        # 获取包含关键词的日志
        keyword_logs = self.fetch_logs_with_keywords(start_time, duration, keywords) if keywords else {
            'system_logs': [],
            'security_logs': [],
            'application_logs': []
        }
        
        # 合并并整理日志
        collected_logs = {
            'error_logs': self._merge_logs(error_logs, max_logs),
            'keyword_logs': self._merge_logs(keyword_logs, max_logs),
            'warning_logs': self._merge_logs(warning_logs, max_logs),
            'all_logs': self._merge_logs(all_logs, max_logs)
        }
        
        return collected_logs
    
    def _merge_logs(self, logs: Dict[str, List[Dict[str, Any]]], max_count: int = 10) -> List[Dict[str, Any]]:
        """
        合并来自不同日志表的日志，并限制数量。
        
        参数:
            logs (Dict[str, List[Dict[str, Any]]]): 包含不同类型日志的字典
            max_count (int): 每种类型日志的最大数量
            
        返回:
            List[Dict[str, Any]]: 合并后的日志列表，每个日志添加了来源标识
        """
        result = []
        
        # 处理系统日志
        for log in logs.get('system_logs', [])[:max_count]:
            log['source_table'] = 'system_logs'
            log['display_message'] = log.get('message', '')
            result.append(log)
        
        # 处理安全日志
        for log in logs.get('security_logs', [])[:max_count]:
            log['source_table'] = 'security_logs'
            log['display_message'] = log.get('log_message', '')
            result.append(log)
        
        # 处理应用日志
        for log in logs.get('application_logs', [])[:max_count]:
            log['source_table'] = 'application_logs'
            log['display_message'] = log.get('log_message', '')
            result.append(log)
        
        # 按时间排序
        result.sort(key=lambda x: x.get('log_time', x.get('timestamp', '')))
        
        return result
    
    def format_logs_for_display(self, logs: List[Dict[str, Any]]) -> str:
        """
        将日志格式化为易于显示的字符串。
        
        参数:
            logs (List[Dict[str, Any]]): 日志列表
            
        返回:
            str: 格式化后的日志字符串
        """
        result = []
        
        for i, log in enumerate(logs):
            source_table = log.get('source_table', '未知来源')
            log_time = log.get('log_time', log.get('timestamp', '未知时间'))
            log_level = log.get('log_level', '未知级别')
            pid = log.get('pid', 'N/A')
            message = log.get('display_message', '无消息内容')
            
            log_str = f"[{i+1}] {source_table} | 时间: {log_time} | 级别: {log_level} | PID: {pid}\n"
            log_str += f"消息: {message}\n{'-' * 80}"
            
            result.append(log_str)
        
        return "\n".join(result)

    def fetch_logs_by_anomaly_list(self, anomalies: List[Dict[str, Any]]) -> Dict[str, Dict[str, List[Dict[str, Any]]]]:
        """
        批量查询每个异常时间段内的所有日志。
        参数:
            anomalies (List[Dict[str, Any]]): 异常列表，每个异常需包含'start_time'和'duration'
        返回:
            Dict[str, Dict[str, List[Dict[str, Any]]]]: {anomaly_id: {system_logs:[], security_logs:[], application_logs:[]}}
        """
        results = {}
        for idx, anomaly in enumerate(anomalies):
            anomaly_id = anomaly.get('id', f'unknown-{idx}')
            start_time = anomaly.get('start_time')
            duration = anomaly.get('duration')
            if not start_time or not duration:
                self.logger.warning(f"异常 {anomaly_id} 缺少开始时间或持续时间，跳过")
                results[anomaly_id] = {'system_logs': [], 'security_logs': [], 'application_logs': []}
                continue
            logs = self.fetch_logs_by_anomaly(start_time, duration)
            results[anomaly_id] = logs
        return results


# 使用示例
if __name__ == "__main__":
    import json
    from Agent.utils.redis_client import RedisClient
    
    # 配置日志
    logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    logger = logging.getLogger("LogFetcherTest")
    
    # 数据库连接信息
    DB_CONFIG = {
        'host': 'localhost',
        'port': 3306,
        'user': 'root',
        'password': 'wzw1234',
        'database': 'kylin_2025'
    }
    
    # 初始化Redis客户端和日志获取器
    redis_client = RedisClient(host='localhost', port=6379)
    log_fetcher = LogFetcher(**DB_CONFIG)
    
    # 连接到数据库
    if log_fetcher.connect():
        logger.info("成功连接到数据库")
        
        # 从Redis获取异常数据
        redis_key = "anomaly_data"
        anomalies = redis_client.get_anomalies(redis_key)
        
        if anomalies:
            logger.info(f"获取到 {len(anomalies)} 个异常")
            
            # 异常统计信息
            anomaly_types = {}
            for anomaly in anomalies:
                anomaly_type = anomaly.get('type', '未知')
                if anomaly_type not in anomaly_types:
                    anomaly_types[anomaly_type] = 0
                anomaly_types[anomaly_type] += 1
            
            logger.info("异常类型分布:")
            for atype, count in anomaly_types.items():
                logger.info(f"  - {atype}: {count}个")
            
            # 处理每一个异常
            for idx, anomaly in enumerate(anomalies):
                # 获取异常信息
                anomaly_id = anomaly.get('id', f'unknown-{idx}')
                start_time = anomaly.get('start_time')
                duration = anomaly.get('duration')
                anomaly_type = anomaly.get('type', '未知')
                severity = anomaly.get('severity', '未知')
                device = anomaly.get('device', '未知')
                
                logger.info(f"\n{'='*50}")
                logger.info(f"处理异常 #{idx+1}: {anomaly_id}")
                logger.info(f"类型: {anomaly_type}, 严重程度: {severity}, 设备: {device}")
                logger.info(f"开始时间: {start_time}, 持续时间: {duration}秒")
                
                # 检查必要的参数
                if not start_time or not duration:
                    logger.warning(f"异常 {anomaly_id} 缺少开始时间或持续时间，跳过处理")
                    continue
                
                try:
                    # 收集所有相关日志
                    logger.info(f"收集异常相关的日志...")
                    collected_logs = log_fetcher.collect_relevant_logs(anomaly)
                    
                    # 统计收集到的日志数量
                    error_count = len(collected_logs['error_logs'])
                    keyword_count = len(collected_logs['keyword_logs'])
                    warning_count = len(collected_logs['warning_logs'])
                    all_count = len(collected_logs['all_logs'])
                    
                    logger.info(f"收集到的日志统计:")
                    logger.info(f"  - ERROR级别日志: {error_count} 条")
                    logger.info(f"  - 包含关键词的日志: {keyword_count} 条")
                    logger.info(f"  - WARNING级别日志: {warning_count} 条")
                    logger.info(f"  - 所有相关日志: {all_count} 条")
                    
                    # 打印异常信息
                    print(f"\n{'='*50}")
                    print(f"异常ID: {anomaly_id}")
                    print(f"类型: {anomaly_type}, 严重程度: {severity}, 设备: {device}")
                    print(f"开始时间: {start_time}, 持续时间: {duration}秒")
                    print(f"{'='*50}\n")
                    
                    # 打印与异常相关的日志
                    # 优先打印ERROR级别的日志
                    if error_count > 0:
                        print(f"\n==== ERROR级别日志 ({error_count}条) ====")
                        print(log_fetcher.format_logs_for_display(collected_logs['error_logs']))
                    
                    # 打印包含关键词的日志（通常与异常最相关）
                    if keyword_count > 0:
                        print(f"\n==== 包含关键词的日志 ({keyword_count}条) ====")
                        print(log_fetcher.format_logs_for_display(collected_logs['keyword_logs']))
                    
                    # 如果没有ERROR或关键词日志，则打印WARNING日志
                    if error_count == 0 and keyword_count == 0 and warning_count > 0:
                        print(f"\n==== WARNING级别日志 ({warning_count}条) ====")
                        print(log_fetcher.format_logs_for_display(collected_logs['warning_logs']))
                    
                    # 如果上面三种日志都没有，则打印所有日志
                    if error_count == 0 and keyword_count == 0 and warning_count == 0 and all_count > 0:
                        print(f"\n==== 所有相关日志 ({all_count}条) ====")
                        print(log_fetcher.format_logs_for_display(collected_logs['all_logs']))
                    
                    # 如果没有收集到任何日志
                    if all_count == 0:
                        print("没有找到与此异常相关的任何日志记录")
                    
                except Exception as e:
                    logger.error(f"处理异常 {anomaly_id} 时出错: {str(e)}")
                    continue
                
                logger.info(f"{'='*50}\n")
                
            # 生成总结报告
            logger.info("\n==== 总结报告 ====")
            logger.info(f"共处理 {len(anomalies)} 个异常")
            logger.info(f"异常类型分布:")
            for atype, count in anomaly_types.items():
                logger.info(f"  - {atype}: {count}个")
            
            # 新增：批量查询所有异常时间段内的日志
            all_logs_by_anomaly = log_fetcher.fetch_logs_by_anomaly_list(anomalies)
            print("\n==== 每个异常时间段内的所有日志信息 ====")
            for anomaly_id, logs in all_logs_by_anomaly.items():
                print(f"\n异常ID: {anomaly_id}")
                for log_type, log_list in logs.items():
                    print(f"  {log_type} 共 {len(log_list)} 条")
                    for log in log_list[:3]:  # 只展示前3条
                        print(f"    时间: {log.get('log_time', log.get('timestamp', ''))} 消息: {log.get('message', log.get('log_message', ''))}")
        else:
            logger.warning("Redis中未找到异常数据")
        
        # 关闭连接
        log_fetcher.close()
        logger.info("数据库连接已关闭")
    else:
        logger.error("数据库连接失败") 