#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
发送测试JSON数据到LLM分析API端点
"""

import requests
import json
import time

def send_test_data():
    """发送测试数据到API端点"""
    
    # 测试数据
    test_data = {
        "success": True,
        "analysis_result": {
            "request_id": "req_1703123456",
            "analysis_timestamp": "2023-12-21T10:32:30.000000+00:00",
            "overall_assessment": {
                "primary_issue": "database_connection_pool_exhaustion",
                "root_cause": "High CPU usage caused by inefficient database queries",
                "severity_level": 7.5,
                "business_impact": "moderate",
                "estimated_resolution_time": "30 minutes"
            },
            "anomaly_analysis": [
                {
                    "anomaly_id": "cpu_usage_system_server-01_1703123456",
                    "analysis": {
                        "issue_type": "performance_degradation",
                        "root_cause": "Database connection pool exhaustion due to slow queries",
                        "contributing_factors": [
                            "Inefficient SQL queries",
                            "Missing database indexes",
                            "Connection pool size too small"
                        ],
                        "evidence": [
                            "CPU usage spike correlates with database query execution",
                            "Logs show slow query execution times",
                            "Connection pool metrics indicate exhaustion"
                        ],
                        "severity": 7.5
                    }
                },
                {
                    "anomaly_id": "memory_usage_server-01_1703123500",
                    "analysis": {
                        "issue_type": "memory_leak",
                        "root_cause": "Memory leak in web application",
                        "contributing_factors": [
                            "Unclosed database connections",
                            "Memory-intensive operations",
                            "Garbage collection issues"
                        ],
                        "evidence": [
                            "Memory usage steadily increasing",
                            "Logs show connection leaks",
                            "Process memory allocation patterns"
                        ],
                        "severity": 6.8
                    }
                },
                {
                    "anomaly_id": "cpu_usage_system_server-01_1703123457",
                    "analysis": {
                        "issue_type": "performance_degradation",
                        "root_cause": "Database connection pool exhaustion due to slow queries",
                        "contributing_factors": [
                            "Inefficient SQL queries",
                            "Missing database indexes",
                            "Connection pool size too small"
                        ],
                        "evidence": [
                            "CPU usage spike correlates with database query execution",
                            "Logs show slow query execution times",
                            "Connection pool metrics indicate exhaustion"
                        ],
                        "severity": 7.5
                    }
                },
            ],
            "recommendations": {
                "immediate_actions": [
                    {
                        "action": "restart_database_connection_pool",
                        "description": "重启数据库连接池以释放资源",
                        "priority": "high",
                        "estimated_duration": "5 minutes",
                        "risk_level": "low",
                        "affected_services": ["database"]
                    },
                    {
                        "action": "optimize_database_queries",
                        "description": "优化慢查询，添加必要的索引",
                        "priority": "high",
                        "estimated_duration": "20 minutes",
                        "risk_level": "medium",
                        "affected_services": ["database", "api"]
                    }
                ],
                "long_term_actions": [
                    {
                        "action": "increase_connection_pool_size",
                        "description": "增加数据库连接池大小",
                        "priority": "medium",
                        "estimated_duration": "10 minutes",
                        "risk_level": "low",
                        "affected_services": ["database"]
                    },
                    {
                        "action": "implement_connection_monitoring",
                        "description": "实现连接池监控和自动清理",
                        "priority": "medium",
                        "estimated_duration": "60 minutes",
                        "risk_level": "low",
                        "affected_services": ["database", "monitoring"]
                    }
                ]
            },
            "affected_services_summary": {
                "primary_affected": ["database", "api"],
                "secondary_affected": ["web"],
                "impact_assessment": {
                    "database": "high",
                    "api": "medium",
                    "web": "low"
                }
            },
            "langraph_workflow": {
                "nodes_executed": [
                    "anomaly_detection",
                    "log_analysis",
                    "root_cause_analysis",
                    "recommendation_generation"
                ],
                "reasoning_chain": [
                    "Detected CPU usage spike at 10:30:45",
                    "Analyzed logs showing slow database queries",
                    "Identified connection pool exhaustion as root cause",
                    "Generated immediate and long-term recommendations"
                ],
                "confidence_factors": {
                    "log_correlation": 0.95,
                    "pattern_recognition": 0.88,
                    "historical_comparison": 0.82
                }
            }
        }
    }
    # 模拟后端发送json数据给mcp进行分析
    # 设置API端点URL
    # 定义请求头，指定内容类型和接受类型为JSON
    # API端点URL
    url = "http://localhost:8000/api/intelligent/process_llm_analysis"
    
    # 请求头
    headers = {
        "Content-Type": "application/json",
        "Accept": "application/json"
    }
    
    print("=== 发送测试数据到LLM分析API ===")
    print(f"目标URL: {url}")
    print(f"请求时间: {time.strftime('%Y-%m-%d %H:%M:%S')}")
    print(f"数据大小: {len(json.dumps(test_data))} 字节")
    
    try:
        # 发送POST请求
        print("\n正在发送请求...")
        response = requests.post(url, json=test_data, headers=headers, timeout=60)
        
        print(f"响应状态码: {response.status_code}")
        print(f"响应时间: {response.elapsed.total_seconds():.2f} 秒")
        
        if response.status_code == 200:
            result = response.json()
            print("\n✅ 请求成功!")
            print(f"时间戳: {result.get('timestamp', 'N/A')}")
            print(f"Ansible目录: {result.get('ansible_directory', 'N/A')}")
            
            # 显示置信度分析结果
            confidence_analysis = result.get('confidence_analysis', [])
            print(f"\n置信度分析结果:")
            for i, conf in enumerate(confidence_analysis, 1):
                print(f"  异常 {i}:")
                print(f"    ID: {conf.get('anomaly_id', 'N/A')}")
                print(f"    置信度分数: {conf.get('confidence_score', 0):.3f}")
                print(f"    处理方式: {'规则引擎' if conf.get('confidence_score', 0) >= 0.7 else 'LLM分析'}")
            
            # 显示处理结果摘要
            summary = result.get('summary', {})
            print(f"\n处理摘要:")
            print(f"  总异常数: {summary.get('total_anomalies', 0)}")
            print(f"  规则引擎处理: {summary.get('rule_engine_processed', 0)}")
            print(f"  LLM处理: {summary.get('llm_processed', 0)}")
            
            # 显示详细结果
            print(f"\n规则引擎结果:")
            for i, rule_result in enumerate(result.get('rule_engine_results', []), 1):
                print(f"  结果 {i}: {rule_result.get('success', False)} - {rule_result.get('selected_script', 'N/A')}")
            
            print(f"\nLLM分析结果:")
            for i, llm_result in enumerate(result.get('llm_results', []), 1):
                print(f"  结果 {i}: {llm_result.get('success', False)} - {llm_result.get('generated_script', 'N/A')}")
                
        else:
            print(f"\n❌ 请求失败!")
            print(f"错误信息: {response.text}")
            
    except requests.exceptions.ConnectionError:
        print("\n❌ 连接错误: 无法连接到服务器")
        print("请确保服务器正在运行在 http://localhost:8000")
        
    except requests.exceptions.Timeout:
        print("\n❌ 请求超时: 服务器响应时间过长")
        
    except requests.exceptions.RequestException as e:
        print(f"\n❌ 请求异常: {e}")
        
    except json.JSONDecodeError as e:
        print(f"\n❌ JSON解析错误: {e}")
        print(f"响应内容: {response.text}")



if __name__ == "__main__":
    # 发送完整测试数据
    send_test_data()
    
