"""
Boss节点 - 主控智能体节点
负责分析异常指标数据和相关日志，进行根因分析并生成解决方案
"""
# 在文件顶部定义全局变量



import json
import logging
from datetime import datetime
from typing import Dict, List, Any
from langchain_ollama import OllamaLLM
from Agent.utils.anomaly_data_manager import AnomalyDataManager
from Agent.utils.log_fetcher import LogFetcher
from Agent.utils.llm_parser import parse_boss_llm_response  # 新增导入
import requests  # 新增导入
import asyncio  # 新增导入
from Agent.utils.anomaly_data_async_fetcher import fetch_and_delete_anomaly_data  # 新增导入
from Agent.utils.anomaly_log_query import query_logs_for_anomaly  # 新增导入
from Agent.utils.anomaly_log_analyzer import collect_and_analyze_logs_for_anomalies, prepare_logs_for_llm  # 新增导入
import config
import os  # 新增导入
import re  # 新增导入
from langchain_openai import ChatOpenAI
# 配置日志
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger("BossNode")

# 调用本地大模型
llm = OllamaLLM(base_url="http://10.11.5.14:11434", model="deepseek-r1:14b")
# llm = ChatOpenAI(
#     api_key="sk-61460b5662224760a887471cf5277cc2",  # 替换为您的实际API密钥
#     base_url="https://api.deepseek.com",
#     model="deepseek-reasoner",  # 或其他DeepSeek支持的模型
#     temperature=0.7
# )


# llm = OllamaLLM(base_url="http://127.0.0.1:11434", model="deepseek-r1:1.5b")


def boss_node(state: Dict[str, Any]) -> Dict[str, Any]:
    """Boss大模型节点：负责分析异常指标数据和相关日志，进行根因分析并生成解决方案"""
    logger.info("进入Boss节点函数")
    
    # 获取配置
    redis_config = state.get("redis_config", {
        'redis_host': 'localhost',
        'redis_port': 6379,
        'redis_db': 0,
        'redis_password': None
    })
    
    db_config = state.get("db_config", {
        'host': 'localhost',
        'port': 3306,
        'user': 'root',
        'password': 'wzw123456',
        'database': 'kylin_2025'
    })

    #获取 Redis 中存储异常数据的键名，默认为 "anomaly_data"。既redis_key代表anomaly_data
    redis_key = state.get("redis_key", "anomaly_data")
    
    # 1. 从Redis获取所有异常指标数据
    # 创建 AnomalyDataManager 实例，用于从 Redis 获取异常数据。
    # 创建 LogFetcher 实例，用于从数据库获取日志。
    anomaly_manager = AnomalyDataManager(**redis_config)
    log_fetcher = LogFetcher(**db_config)
    
    # 确保数据库连接
    if not log_fetcher.connect():
        logger.error("数据库连接失败")
        return {"success": False, "error": "数据库连接失败"}
    
    try:
        # 获取异常数据（异步遍历anomaly_data:前缀的所有key，并删除）
        logger.info(f"异步从Redis获取所有anomaly_data异常数据")
        # 构造redis_url
        redis_host = redis_config.get('redis_host', 'localhost')
        redis_port = redis_config.get('redis_port', 6379)
        redis_db = redis_config.get('redis_db', 0)
        redis_password = redis_config.get('redis_password', None)
        if redis_password:
            redis_url = f"redis://:{redis_password}@{redis_host}:{redis_port}/{redis_db}"
        else:
            redis_url = f"redis://{redis_host}:{redis_port}/{redis_db}"

        async def get_all_anomalies_async():
            anomalies = []
            async for anomaly_json in fetch_and_delete_anomaly_data(redis_url):
                # anomaly_json 可能是完整的anomaly_data结构，需提取anomalies字段
                if isinstance(anomaly_json, dict) and 'anomalies' in anomaly_json:
                    anomalies.extend(anomaly_json['anomalies'])
                elif isinstance(anomaly_json, list):
                    anomalies.extend(anomaly_json)
                else:
                    anomalies.append(anomaly_json)
            return anomalies

        all_anomalies = asyncio.run(get_all_anomalies_async())
        if not all_anomalies:
            logger.warning(f"Redis中未找到anomaly_data异常数据")
            config.RESULT = None
            return {"success": False, "error": "未找到异常数据"}
        logger.info(f"获取到 {len(all_anomalies)} 个异常")
        # 打印所有异常数据内容
        print("=== 从Redis获取到的异常数据 ===")
        # 打印reids 内容
        # for i, anomaly in enumerate(all_anomalies):
        #     print(f"--- 异常 #{i+1} ---")
        #     print(json.dumps(anomaly, ensure_ascii=False, indent=2))
        # 获取异常的统计信息（如按类型、设备、严重程度分类等）。
        # 这里复用原有anomaly_manager的统计方法
        anomaly_summary = anomaly_manager.get_anomaly_summary(redis_key)  # 统计方法仍用原有同步接口
        anomaly_data = {
            "anomalies": all_anomalies,
            "summary": anomaly_summary
        }
        
        # 2. 收集与异常相关的日志
        logger.info(f"开始收集 {len(all_anomalies)} 个异常的相关日志")

        # 使用新的日志分析工具批量收集和分析所有异常的相关日志
        # 该函数会自动在控制台打印日志摘要
        logs_by_anomaly_raw = collect_and_analyze_logs_for_anomalies(all_anomalies, db_config)
        
        # 转换为适合大模型的格式
        logs_by_anomaly = {}
        for anomaly_id, logs_result in logs_by_anomaly_raw.items():
            # 为大模型准备精简的日志数据
            logs_by_anomaly[anomaly_id] = prepare_logs_for_llm(logs_result)
        
        # 3. 读取和分析kylin_trace目录下的trace.txt和kylin_trace.txt文件
        logger.info("开始读取和分析kylin_trace文件")
        trace_analysis = analyze_trace_files()
        
        # 3. 构建提示词
        logger.info("构建提示词")
        anomalies = anomaly_data.get("anomalies", [])
        summary = anomaly_data.get("summary", {})
        
        prompt = f"""
你是麒麟操作系统多智能体智能运维管家，你的任务是分析系统中的异常指标和相关日志，进行根因分析，并提出解决方案。

大模型会获取目录下的文件，对于trace.txt会读取该文件后5000行的内容和kylin_trace.txt文件的完整内容。通过读取这两个文件的内容加入提示词给大模型进行分析。

### 异常指标数据汇总
总异常数量: {summary.get('total', 0)}
按类型统计: {json.dumps(summary.get('by_type', {}), ensure_ascii=False, indent=2)}
按严重程度统计: {json.dumps(summary.get('by_severity', {}), ensure_ascii=False, indent=2)}
按设备统计: {json.dumps(summary.get('by_device', {}), ensure_ascii=False, indent=2)}

### 系统追踪文件分析 (kylin_trace/trace.txt 和 kylin_trace/kylin_trace.txt)
{trace_analysis}

### 详细异常指标数据:
"""

        # 添加每个异常的详细信息和相关日志
        for anomaly in anomalies:
            anomaly_id = anomaly.get('id', 'unknown')
            anomaly_type = anomaly.get('type', '未知')
            measurement = anomaly.get('measurement', '未知')
            severity = anomaly.get('severity', '未知')
            device = anomaly.get('device', '未知')
            start_time = anomaly.get('start_time', '未知')
            duration = anomaly.get('duration', 0)
            complexity = anomaly.get('complexity_score', 0)
            impact = anomaly.get('business_impact', '未知')
            affected_services = ', '.join(anomaly.get('affected_services', []))
            metrics = anomaly.get('metrics', {})
            
            prompt += f"""
异常ID: {anomaly_id}
类型: {anomaly_type}
测量项: {measurement}
严重程度: {severity}
设备: {device}
开始时间: {start_time}
持续时间: {duration}秒
复杂度: {complexity}
业务影响: {impact}
受影响服务: {affected_services}

指标数据:
{json.dumps(metrics, ensure_ascii=False, indent=2)}

相关日志:
"""
            
            # 添加相关日志
            logs = logs_by_anomaly.get(anomaly_id, {})
            
            # 添加ERROR日志 (使用新的日志结构)
            error_logs = logs.get('error_logs', [])
            if error_logs:
                prompt += f"ERROR级别日志 ({len(error_logs)}条):\n"
                for log in error_logs:
                    source = log.get('source', '未知')
                    time = log.get('time', '未知时间')
                    message = log.get('message', '未知消息')
                    prompt += f"- [{source}] {time}: {message}\n"
            
            # 添加WARNING日志
            warning_logs = logs.get('warning_logs', [])
            if warning_logs:
                prompt += f"WARNING级别日志 ({len(warning_logs)}条):\n"
                for log in warning_logs[:5]:  # 只添加前5条
                    source = log.get('source', '未知')
                    time = log.get('time', '未知时间')
                    message = log.get('message', '未知消息')
                    prompt += f"- [{source}] {time}: {message}\n"
            
            # 添加INFO日志（如果ERROR和WARNING日志较少）
            info_logs = logs.get('info_logs', [])
            if info_logs and not error_logs and len(warning_logs) < 2:
                prompt += f"INFO级别日志 ({len(info_logs)}条):\n"
                for log in info_logs[:5]:  # 只添加前5条
                    source = log.get('source', '未知')
                    time = log.get('time', '未知时间')
                    message = log.get('message', '未知消息')
                    prompt += f"- [{source}] {time}: {message}\n"
        
        # 提示LLM进行分析并给出结果
        prompt += """
                    根据上述异常指标数据和相关日志，请对系统进行全面分析：
                    1. 识别主要问题和根本原因
                    2. 对每个异常进行详细分析，确定问题类型、根本原因、相关因素和证据
                    3. 提出解决问题的建议，包括立即行动和长期措施
                    4. 分析受影响的服务及其影响程度
                    5. 描述分析过程和推理链
                    
                    
                    响应格式（严格按照以下JSON格式返回）：
                    {
                      "success": true,
                      "analysis_result": {
                        "request_id": "req_XXXXXXXXXX",
                        "analysis_timestamp": "ISO格式的当前时间",
                        "overall_assessment": {
                          "primary_issue": "主要问题的简短描述",
                          "root_cause": "根本原因的简短描述",
                          "severity_level": 0.0-10.0的严重程度分数,
                          "business_impact": "low|moderate|high|critical",
                          "estimated_resolution_time": "预计解决时间"
                        },
                        "anomaly_analysis": [
                          {
                            "anomaly_id": "异常的ID",
                            "analysis": {
                              "issue_type": "问题类型",
                              "root_cause": "根本原因",
                              "contributing_factors": ["相关因素1", "相关因素2", ...],
                              "evidence": ["证据1", "证据2", ...],
                              "severity": 0.0-10.0的严重程度分数,
                            }
                          }
                        ],
                        "recommendations": {
                          "immediate_actions": [
                            {
                              "action": "操作标识",
                              "description": "操作描述",
                              "priority": "high|medium|low",
                              "estimated_duration": "预计持续时间",
                              "risk_level": "high|medium|low",
                              "affected_services": ["受影响服务1", ...]
                            }
                          ],
                          "long_term_actions": [
                            {
                              "action": "操作标识",
                              "description": "操作描述",
                              "priority": "high|medium|low",
                              "estimated_duration": "预计持续时间",
                              "risk_level": "high|medium|low",
                              "affected_services": ["受影响服务1", ...]
                            }
                          ]
                        },
                        "affected_services_summary": {
                          "primary_affected": ["主要受影响服务1", ...],
                          "secondary_affected": ["次要受影响服务1", ...],
                          "impact_assessment": {
                            "服务名1": "high|medium|low",
                            ...
                          }
                        },
                        "langraph_workflow": {
                          "nodes_executed": ["执行的节点1", ...],
                          "reasoning_chain": ["推理步骤1", ...],
                          "confidence_factors": {
                            "因素1": 0.0-1.0的分数,
                            ...
                          }
                        }
                      }
                    }
                    仅返回JSON格式的分析结果，不要包含其他文本。
        """
        
        # 4. 调用LLM生成分析结果
        logger.info("调用LLM生成分析结果")
        llm_response = llm.invoke(prompt)
        # 打印LLM响应
        print(llm_response)
        logger.info("获取到LLM响应")

        # 5. 解析JSON响应（使用新方法）
        parsed_json = parse_boss_llm_response(llm_response)
        if parsed_json is not None:
            result = parsed_json
        else:
            # 兼容原有异常处理
            try:
                result = json.loads(llm_response)
            except Exception as e:
                logger.error(f"LLM响应不是有效的JSON格式: {str(e)}")
                result = {
                    "success": False,
                    "error": "LLM响应不是有效的JSON格式",
                    "response": llm_response
                }

        # 将结果POST到后端接口
        try:
            url = "http://localhost:8001/api/intelligent/process_llm_analysis"
            headers = {
                "Content-Type": "application/json",
                "Accept": "application/json"
            }
            
            logger.info(f"正在发送分析结果到后端接口: {url}")
            response = requests.post(url, json=result, headers=headers, timeout=300)
            
            logger.info(f"POST到后端状态码: {response.status_code}")
            logger.info(f"响应时间: {response.elapsed.total_seconds():.2f} 秒")
            
            if response.status_code == 200:
                response_data = response.json()
                logger.info("✅ 后端处理成功!")
                logger.info(f"时间戳: {response_data.get('timestamp', 'N/A')}")
                logger.info(f"Ansible目录: {response_data.get('ansible_directory', 'N/A')}")
                
                # 显示置信度分析结果
                confidence_analysis = response_data.get('confidence_analysis', [])
                logger.info(f"置信度分析结果: {len(confidence_analysis)} 个异常")
                for i, conf in enumerate(confidence_analysis, 1):
                    logger.info(f"  异常 {i}: ID={conf.get('anomaly_id', 'N/A')}, "
                              f"置信度={conf.get('confidence_score', 0):.3f}, "
                              f"处理方式={'规则引擎' if conf.get('confidence_score', 0) >= 0.7 else 'LLM分析'}")
                
                # 显示处理结果摘要
                summary = response_data.get('summary', {})
                logger.info(f"处理摘要: 总异常={summary.get('total_anomalies', 0)}, "
                          f"规则引擎处理={summary.get('rule_engine_processed', 0)}, "
                          f"LLM处理={summary.get('llm_processed', 0)}")
                
                # 显示详细结果
                rule_engine_results = response_data.get('rule_engine_results', [])
                llm_results = response_data.get('llm_results', [])
                
                logger.info(f"规则引擎结果: {len(rule_engine_results)} 个")
                for i, rule_result in enumerate(rule_engine_results, 1):
                    logger.info(f"  结果 {i}: {rule_result.get('success', False)} - {rule_result.get('selected_script', 'N/A')}")
                
                logger.info(f"LLM分析结果: {len(llm_results)} 个")
                for i, llm_result in enumerate(llm_results, 1):
                    logger.info(f"  结果 {i}: {llm_result.get('success', False)} - {llm_result.get('generated_script', 'N/A')}")
                    
            else:
                logger.error(f"❌ 后端处理失败!")
                logger.error(f"错误信息: {response.text}")
                
        except requests.exceptions.ConnectionError:
            logger.error("❌ 连接错误: 无法连接到后端服务器")
            logger.error("请确保后端服务器正在运行在 http://localhost:8001")
            
        except requests.exceptions.Timeout:
            logger.error("❌ 请求超时: 后端服务器响应时间过长")
            
        except requests.exceptions.RequestException as e:
            logger.error(f"❌ 请求异常: {e}")
            
        except json.JSONDecodeError as e:
            logger.error(f"❌ JSON解析错误: {e}")
            logger.error(f"响应内容: {response.text}")

        logger.info("Boss节点分析完成")
        return result
        
    except Exception as e:
        logger.error(f"处理分析请求时出错: {str(e)}")
        
        return {
            "success": False,
            "error": f"处理分析请求时出错: {str(e)}"
        }
    finally:
        # 确保关闭数据库连接
        log_fetcher.close()

def analyze_trace_files():
    """读取和分析kylin_trace目录下的trace.txt和kylin_trace.txt文件"""
    try:
        # 获取脚本所在目录的父级目录（项目根目录）
        current_file_dir = os.path.dirname(os.path.abspath(__file__))
        project_root = os.path.dirname(os.path.dirname(current_file_dir))  # 向上两级到项目根目录
        
        trace_file_path = os.path.join(project_root, "kylin_trace", "trace.txt")
        kylin_trace_file_path = os.path.join(project_root, "kylin_trace", "kylin_trace.txt")
        
        trace_analysis = []
        
        # 1. 分析trace.txt文件（读取后5000行）
        trace_analysis.append("=== trace.txt 文件分析 ===")
        
        if not os.path.exists(trace_file_path):
            logger.warning(f"trace.txt文件不存在: {trace_file_path}")
            trace_analysis.append("trace.txt文件不存在或无法访问")
        else:
            # 获取文件大小
            file_size = os.path.getsize(trace_file_path)
            logger.info(f"trace.txt文件大小: {file_size / (1024*1024):.2f} MB")
            trace_analysis.append(f"文件大小: {file_size / (1024*1024):.2f} MB")
            
            # 只读取文件的后5000行进行分析
            with open(trace_file_path, 'r', encoding='utf-8', errors='ignore') as f:
                lines = f.readlines()
                total_lines = len(lines)
                trace_analysis.append(f"总行数: {total_lines}")
                
                # 分析后5000行
                end_lines = lines[-5000:] if total_lines > 5000 else lines
                trace_analysis.append(f"分析后{len(end_lines)}行内容:")
                
                # 提取关键信息
                error_count = 0
                warning_count = 0
                critical_events = []
                performance_issues = []
                
                for i, line in enumerate(end_lines):
                    line_lower = line.lower()
                    
                    # 统计错误和警告
                    if 'error' in line_lower or 'err' in line_lower:
                        error_count += 1
                        if error_count <= 10:  # 只记录前10个错误
                            critical_events.append(f"行{total_lines - len(end_lines) + i + 1}: {line.strip()}")
                    
                    elif 'warning' in line_lower or 'warn' in line_lower:
                        warning_count += 1
                    
                    # 检测性能相关问题
                    if any(keyword in line_lower for keyword in ['cpu', 'memory', 'disk', 'network', 'timeout', 'slow']):
                        if len(performance_issues) < 10:  # 只记录前10个性能问题
                            performance_issues.append(f"行{total_lines - len(end_lines) + i + 1}: {line.strip()}")
                
                # 添加统计信息
                trace_analysis.append(f"后5000行统计 - 错误: {error_count}, 警告: {warning_count}")
                
                # 添加关键事件
                if critical_events:
                    trace_analysis.append("关键错误事件:")
                    trace_analysis.extend(critical_events[:5])  # 只显示前5个
                
                if performance_issues:
                    trace_analysis.append("性能相关问题:")
                    trace_analysis.extend(performance_issues[:5])  # 只显示前5个
                
                # 分析文件时间范围（如果包含时间戳）
                time_pattern = r'\d{4}-\d{2}-\d{2}|\d{2}:\d{2}:\d{2}'
                timestamps = []
                for line in end_lines:
                    matches = re.findall(time_pattern, line)
                    if matches:
                        timestamps.extend(matches)
                
                if timestamps:
                    trace_analysis.append(f"检测到时间戳格式，时间范围: {timestamps[0]} 到 {timestamps[-1]}")
        
        # 2. 分析kylin_trace.txt文件（读取完整文件）
        trace_analysis.append("\n=== kylin_trace.txt 文件分析 ===")
        
        if not os.path.exists(kylin_trace_file_path):
            logger.warning(f"kylin_trace.txt文件不存在: {kylin_trace_file_path}")
            trace_analysis.append("kylin_trace.txt文件不存在或无法访问")
        else:
            # 获取文件大小
            kylin_file_size = os.path.getsize(kylin_trace_file_path)
            logger.info(f"kylin_trace.txt文件大小: {kylin_file_size / 1024:.2f} KB")
            trace_analysis.append(f"文件大小: {kylin_file_size / 1024:.2f} KB")
            
            # 读取完整kylin_trace.txt文件内容
            with open(kylin_trace_file_path, 'r', encoding='utf-8', errors='ignore') as f:
                kylin_content = f.read().strip()
                
                if kylin_content:
                    trace_analysis.append("文件内容:")
                    trace_analysis.append(kylin_content)
                    
                    # 尝试解析JSON内容（如果是JSON格式）
                    try:
                        kylin_data = json.loads(kylin_content)
                        trace_analysis.append("\nJSON解析成功，结构化数据:")
                        
                        # 分析abnormal_kpi_paths
                        if 'abnormal_kpi_paths' in kylin_data:
                            paths = kylin_data['abnormal_kpi_paths']
                            trace_analysis.append(f"异常KPI路径数量: {len(paths)}")
                            for i, path in enumerate(paths[:5]):  # 只显示前5个路径
                                trace_analysis.append(f"  路径{i+1}: {' -> '.join(path)}")
                        
                        # 分析topo_chains
                        if 'topo_chains' in kylin_data:
                            chains = kylin_data['topo_chains']
                            trace_analysis.append(f"拓扑链数量: {len(chains)}")
                            for metric, chain in list(chains.items())[:5]:  # 只显示前5个
                                trace_analysis.append(f"  {metric}: {' -> '.join(chain)}")
                                
                    except json.JSONDecodeError:
                        trace_analysis.append("文件内容不是标准JSON格式，作为普通文本处理")
                else:
                    trace_analysis.append("文件为空")
        
        return "\n".join(trace_analysis)
        
    except Exception as e:
        logger.error(f"分析追踪文件失败: {e}")
        return f"分析追踪文件时出错: {str(e)}"

# # 测试代码
# if __name__ == "__main__":
#     # 创建模拟state
#     state = {
#         "redis_config": {
#             'redis_host': 'localhost',
#             'redis_port': 6379,
#             'redis_db': 0,
#             'redis_password': None
#         },
#         "db_config": {
#             'host': 'localhost',
#             'port': 3306,
#             'user': 'root',
#             'password': '1234',
#             'database': 'kylin_2025'
#         },
#         "redis_key": "anomaly_data"
#     }
#
#     # 调用boss_node
#     result = boss_node(state)
#
#     # 打印结果
#     print(json.dumps(result, ensure_ascii=False, indent=2))

def boss_1():
    # 创建模拟state
    state = {
        "redis_config": {
            'redis_host': 'localhost',
            'redis_port': 6379,
            'redis_db': 0,
            'redis_password': None
        },
        "db_config": {
            'host': 'localhost',
            'port': 3306,
            'user': 'root',
            'password': '1234',
            'database': 'kylin_2025'
        },
        "redis_key": "anomaly_data"
    }

    # 调用boss_node
    config.RESULT = boss_node(state)

    # 打印结果
    print(json.dumps(config.RESULT, ensure_ascii=False, indent=2))







