#!/usr/bin/env python3
"""
智能异常处理系统演示脚本
"""
import asyncio
import json
from datetime import datetime

# 演示数据
demo_data = {
    "success": True,
    "analysis_result": {
        "request_id": "demo_1703123456",
        "analysis_timestamp": "2023-12-21T10:32:30.000000+00:00",
        "overall_assessment": {
            "primary_issue": "system_performance_degradation",
            "root_cause": "Multiple performance issues detected",
            "severity_level": 8.2,
            "business_impact": "high",
            "estimated_resolution_time": "45 minutes"
        },
        "anomaly_analysis": [
            {
                "anomaly_id": "cpu_high_usage_demo_001",
                "analysis": {
                    "issue_type": "performance_degradation",
                    "root_cause": "CPU usage spike due to inefficient database queries",
                    "contributing_factors": [
                        "Inefficient SQL queries",
                        "Missing database indexes",
                        "Connection pool size too small",
                        "High concurrent user load"
                    ],
                    "evidence": [
                        "CPU usage spike correlates with database query execution",
                        "Logs show slow query execution times > 5 seconds",
                        "Connection pool metrics indicate 95% utilization",
                        "System load average > 10.0"
                    ],
                    "severity": 8.5,
                }
            },
            {
                "anomaly_id": "memory_leak_demo_002",
                "analysis": {
                    "issue_type": "memory_leak",
                    "root_cause": "Memory leak in web application",
                    "contributing_factors": [
                        "Unclosed database connections",
                        "Memory-intensive operations",
                        "Garbage collection issues"
                    ],
                    "evidence": [
                        "Memory usage steadily increasing over 24 hours",
                        "Logs show connection leaks in application",
                        "Process memory allocation patterns indicate leak",
                        "OOM killer triggered 3 times in last hour"
                    ],
                    "severity": 7.8,
                }
            },
            {
                "anomaly_id": "network_latency_demo_003",
                "analysis": {
                    "issue_type": "network_issue",
                    "root_cause": "Network latency issues",
                    "contributing_factors": [
                        "Network buffer size too small",
                        "TCP parameters not optimized",
                        "Network interface configuration issues"
                    ],
                    "evidence": [
                        "Network latency > 100ms to database",
                        "TCP retransmission rate > 5%",
                        "Network interface showing errors",
                        "Connection timeouts increasing"
                    ],
                    "severity": 6.2,
                }
            }
        ],
        "recommendations": {
            "immediate_actions": [
                {
                    "action": "restart_database_connection_pool",
                    "description": "重启数据库连接池以释放资源",
                    "priority": "high",
                    "estimated_duration": "5 minutes",
                    "risk_level": "low",
                    "affected_services": ["database"]
                }
            ],
            "long_term_actions": [
                {
                    "action": "optimize_database_queries",
                    "description": "优化慢查询，添加必要的索引",
                    "priority": "high",
                    "estimated_duration": "30 minutes",
                    "risk_level": "medium",
                    "affected_services": ["database", "api"]
                }
            ]
        },
        "affected_services_summary": {
            "primary_affected": ["database", "api", "web"],
            "secondary_affected": ["monitoring"],
            "impact_assessment": {
                "database": "high",
                "api": "high",
                "web": "medium",
                "monitoring": "low"
            }
        }
    }
}
async def demo_confidence_analysis():
    """演示置信度分析"""
    print("🔍 演示置信度分析...")
    
    from agent_mcp.confidence_module.confidence_analyzer import ConfidenceAnalyzer
    analyzer = ConfidenceAnalyzer()
    
    for i, anomaly in enumerate(demo_data["analysis_result"]["anomaly_analysis"], 1):
        anomaly_data = {
            "anomaly_id": anomaly["anomaly_id"],
            "issue_type": anomaly["analysis"]["issue_type"],
            "root_cause": anomaly["analysis"]["root_cause"],
            "severity": anomaly["analysis"]["severity"],
            "contributing_factors": anomaly["analysis"]["contributing_factors"],
            "evidence": anomaly["analysis"]["evidence"]
        }
        
        confidence = analyzer.analyze_confidence(anomaly_data)
        level = analyzer.get_confidence_level(confidence)
        
        print(f"  异常 {i}: {anomaly_data['anomaly_id']}")
        print(f"    类型: {anomaly_data['issue_type']}")
        print(f"    严重程度: {anomaly_data['severity']}")
        print(f"    置信度: {confidence:.3f} ({level})")
        print(f"    处理方式: {'规则引擎' if confidence >= 0.8 else 'LLM分析'}")
        print()

async def demo_rule_engine():
    """演示规则引擎"""
    print("⚙️ 演示规则引擎...")
    
    from agent_mcp.role.rule_engine import RuleEngine
    import os
    
    # 创建演示目录
    timestamp = datetime.now().strftime("%Y%m%d%H%M%S")
    demo_dir = f"agent-mcp/Ansible/demo_{timestamp}"
    os.makedirs(demo_dir, exist_ok=True)
    
    rule_engine = RuleEngine()
    
    # 演示高置信度异常处理
    high_confidence_anomaly = {
        "anomaly_id": "cpu_high_usage_demo_001",
        "issue_type": "performance_degradation",
        "root_cause": "CPU usage spike due to inefficient database queries",
        "severity": 8.5,
        "contributing_factors": [
            "Inefficient SQL queries",
            "Missing database indexes",
            "Connection pool size too small"
        ],
        "evidence": [
            "CPU usage spike correlates with database query execution",
            "Logs show slow query execution times",
            "Connection pool metrics indicate exhaustion"
        ]
    }
    
    result = await rule_engine.process_anomaly(high_confidence_anomaly, demo_dir)
    
    if result["success"]:
        print(f"  ✅ 规则引擎处理成功")
        print(f"  异常类型: {result['anomaly_type']}")
        print(f"  选择脚本数: {len(result['selected_scripts'])}")
        print(f"  执行计划: {result['execution_plan']['estimated_duration']}")
        print(f"  风险等级: {result['execution_plan']['risk_level']}")
    else:
        print(f"  ❌ 规则引擎处理失败: {result['error']}")
    
    print()

async def demo_llm_analyzer():
    """演示LLM分析器"""
    print("🤖 演示LLM分析器...")
    
    from agent_mcp.llm.llm_analyzer import LLMAnalyzer
    import os
    
    # 创建演示目录
    timestamp = datetime.now().strftime("%Y%m%d%H%M%S")
    demo_dir = f"agent-mcp/Ansible/demo_{timestamp}"
    os.makedirs(demo_dir, exist_ok=True)
    
    llm_analyzer = LLMAnalyzer()
    
    # 演示低置信度异常处理
    low_confidence_anomaly = {
        "anomaly_id": "memory_leak_demo_002",
        "issue_type": "memory_leak",
        "root_cause": "Memory leak in web application",
        "severity": 7.8,
        "contributing_factors": [
            "Unclosed database connections",
            "Memory-intensive operations"
        ],
        "evidence": [
            "Memory usage steadily increasing",
            "Logs show connection leaks"
        ]
    }
    
    result = await llm_analyzer.process_anomaly(low_confidence_anomaly, demo_dir)
    
    if result["success"]:
        print(f"  ✅ LLM分析器处理成功")
        print(f"  异常类型: {result['anomaly_type']}")
        print(f"  生成脚本: {result['generated_script']}")
        print(f"  提示词长度: {len(result['prompt'])} 字符")
    else:
        print(f"  ❌ LLM分析器处理失败: {result['error']}")
    
    print()

async def demo_complete_workflow():
    """演示完整工作流程"""
    print("🚀 演示完整工作流程...")
    
    from agent_mcp.app.routes import analyze_anomalies
    from agent_mcp.confidence_module.confidence_analyzer import ConfidenceAnalyzer
    from agent_mcp.role.rule_engine import RuleEngine
    from agent_mcp.llm.llm_analyzer import LLMAnalyzer
    import os
    
    # 1. 分析异常
    analysis_result = demo_data["analysis_result"]
    anomaly_summary = await analyze_anomalies(analysis_result)
    print(f"  📊 分析到 {len(anomaly_summary['anomalies'])} 个异常")
    
    # 2. 置信度分析
    confidence_analyzer = ConfidenceAnalyzer()
    confidence_results = []
    
    print("  🔍 置信度分析结果:")
    for anomaly in anomaly_summary["anomalies"]:
        confidence_score = confidence_analyzer.analyze_confidence(anomaly)
        confidence_results.append({
            "anomaly_id": anomaly["anomaly_id"],
            "confidence_score": confidence_score,
            "anomaly_data": anomaly
        })
        
        level = confidence_analyzer.get_confidence_level(confidence_score)
        method = "规则引擎" if confidence_score >= 0.8 else "LLM分析"
        print(f"    {anomaly['anomaly_id']}: {confidence_score:.3f} ({level}) → {method}")
    
    # 3. 分支处理
    timestamp = datetime.now().strftime("%Y%m%d%H%M%S")
    ansible_dir = f"agent-mcp/Ansible/complete_demo_{timestamp}"
    os.makedirs(ansible_dir, exist_ok=True)
    
    rule_engine_results = []
    llm_results = []
    
    print("  ⚙️ 分支处理:")
    for result in confidence_results:
        if result["confidence_score"] >= 0.8:
            print(f"    {result['anomaly_id']} → 规则引擎")
            rule_engine = RuleEngine()
            rule_result = await rule_engine.process_anomaly(
                result["anomaly_data"], 
                ansible_dir
            )
            rule_engine_results.append(rule_result)
        else:
            print(f"    {result['anomaly_id']} → LLM分析")
            llm_analyzer = LLMAnalyzer()
            llm_result = await llm_analyzer.process_anomaly(
                result["anomaly_data"], 
                ansible_dir
            )
            llm_results.append(llm_result)
    
    # 4. 结果总结
    print("  📋 处理总结:")
    print(f"    总异常数: {len(confidence_results)}")
    print(f"    规则引擎处理: {len(rule_engine_results)}")
    print(f"    LLM处理: {len(llm_results)}")
    print(f"    Ansible目录: {ansible_dir}")
    
    # 5. 生成脚本统计
    total_scripts = 0
    for result in rule_engine_results:
        if result["success"]:
            total_scripts += len(result["copied_scripts"])
    
    for result in llm_results:
        if result["success"]:
            total_scripts += 1
    
    print(f"    生成脚本总数: {total_scripts}")
    
    return {
        "confidence_results": confidence_results,
        "rule_engine_results": rule_engine_results,
        "llm_results": llm_results,
        "ansible_dir": ansible_dir,
        "total_scripts": total_scripts
    }

async def main():
    """主演示函数"""
    print("🎯 智能异常处理系统演示")
    print("=" * 50)
    
    try:
        # 演示各个模块
        await demo_confidence_analysis()
        await demo_rule_engine()
        await demo_llm_analyzer()
        
        # 演示完整工作流程
        result = await demo_complete_workflow()
        
        print("\n" + "=" * 50)
        print("✅ 演示完成！")
        print(f"📁 生成的脚本保存在: {result['ansible_dir']}")
        print(f"📊 总共生成了 {result['total_scripts']} 个调优脚本")
        
    except Exception as e:
        print(f"❌ 演示过程中出现错误: {e}")
        import traceback
        traceback.print_exc()

if __name__ == "__main__":
    asyncio.run(main()) 