#!/usr/bin/env python3
"""
AI问题调试脚本
"""

import requests
import json
import sys
import os

def check_backend_service():
    """检查后端服务"""
    print("🔍 检查后端服务...")
    
    try:
        response = requests.get("http://localhost:8080/", timeout=5)
        if response.status_code == 200:
            print("✅ 后端服务正常运行")
            return True
        else:
            print(f"❌ 后端服务响应异常: {response.status_code}")
            return False
    except Exception as e:
        print(f"❌ 后端服务未启动: {e}")
        return False

def check_ai_config():
    """检查AI配置"""
    print("🔍 检查AI配置...")
    
    try:
        response = requests.get("http://localhost:8080/ai/config", timeout=10)
        if response.status_code == 200:
            config = response.json()
            print("✅ AI配置获取成功")
            print(f"   提供商: {config.get('provider', 'unknown')}")
            print(f"   模型: {config.get('model', 'unknown')}")
            print(f"   基础URL: {config.get('base_url', 'unknown')}")
            print(f"   最大Token: {config.get('max_tokens', 'unknown')}")
            print(f"   温度: {config.get('temperature', 'unknown')}")
            return config
        else:
            print(f"❌ AI配置获取失败: {response.status_code}")
            print(f"   错误信息: {response.text}")
            return None
    except Exception as e:
        print(f"❌ AI配置获取失败: {e}")
        return None

def check_local_lm_service():
    """检查本地LM服务"""
    print("🔍 检查本地LM服务...")
    
    try:
        response = requests.get("http://localhost:1234/v1/models", timeout=10)
        if response.status_code == 200:
            models = response.json()
            print("✅ 本地LM服务连接正常")
            print(f"   可用模型: {[model['id'] for model in models.get('data', [])]}")
            return True
        else:
            print(f"❌ 本地LM服务响应异常: {response.status_code}")
            print(f"   错误信息: {response.text}")
            return False
    except Exception as e:
        print(f"❌ 本地LM服务连接失败: {e}")
        return False

def test_ai_analysis_without_category():
    """测试无错误类型的AI分析"""
    print("🔍 测试概览分析...")
    
    try:
        response = requests.post("http://localhost:8080/ai/analyze", json={}, timeout=60)
        
        if response.status_code == 200:
            result = response.json()
            if 'initial_analysis' in result:
                print("✅ 概览分析成功")
                print(f"   分析结果长度: {len(result['initial_analysis'])} 字符")
                print(f"   建议问题数量: {len(result.get('suggested_questions', []))}")
                return True
            else:
                print("❌ 概览分析结果异常")
                print(f"   返回结果: {result}")
                return False
        else:
            print(f"❌ 概览分析失败: {response.status_code}")
            print(f"   错误信息: {response.text}")
            return False
    except Exception as e:
        print(f"❌ 概览分析失败: {e}")
        return False

def test_ai_analysis_with_category(category_name):
    """测试特定错误类型的AI分析"""
    print(f"🔍 测试 {category_name} 错误分析...")
    
    try:
        data = {"error_category": category_name}
        response = requests.post("http://localhost:8080/ai/analyze", json=data, timeout=60)
        
        if response.status_code == 200:
            result = response.json()
            if 'initial_analysis' in result:
                print("✅ 错误类型分析成功")
                print(f"   错误类型: {result.get('error_category', category_name)}")
                print(f"   错误数量: {result.get('error_count', 0)}")
                print(f"   分析结果长度: {len(result['initial_analysis'])} 字符")
                return True
            else:
                print("❌ 错误类型分析结果异常")
                print(f"   返回结果: {result}")
                return False
        else:
            print(f"❌ 错误类型分析失败: {response.status_code}")
            print(f"   错误信息: {response.text}")
            return False
    except Exception as e:
        print(f"❌ 错误类型分析失败: {e}")
        return False

def test_local_lm_chat():
    """测试本地LM聊天功能"""
    print("🔍 测试本地LM聊天功能...")
    
    try:
        data = {
            "model": "google/gemma-3-4b",
            "messages": [
                {
                    "role": "system",
                    "content": "你是一个日志分析专家，请用中文回答。"
                },
                {
                    "role": "user",
                    "content": "请简单介绍一下日志分析的重要性。"
                }
            ],
            "temperature": 0.7,
            "max_tokens": -1,
            "stream": False
        }
        
        response = requests.post(
            "http://localhost:1234/v1/chat/completions",
            headers={"Content-Type": "application/json"},
            json=data,
            timeout=30
        )
        
        if response.status_code == 200:
            result = response.json()
            answer = result['choices'][0]['message']['content']
            print("✅ 本地LM聊天功能正常")
            print(f"   回答长度: {len(answer)} 字符")
            return True
        else:
            print(f"❌ 本地LM聊天失败: {response.status_code}")
            print(f"   错误信息: {response.text}")
            return False
    except Exception as e:
        print(f"❌ 本地LM聊天失败: {e}")
        return False

def check_log_data():
    """检查日志数据"""
    print("🔍 检查日志数据...")
    
    try:
        response = requests.get("http://localhost:8080/analysis", timeout=10)
        if response.status_code == 200:
            result = response.json()
            print("✅ 日志数据正常")
            print(f"   总日志数: {result.get('total_logs', 0)}")
            print(f"   组件数量: {result.get('unique_components', 0)}")
            return True
        else:
            print(f"❌ 日志数据获取失败: {response.status_code}")
            return False
    except Exception as e:
        print(f"❌ 日志数据获取失败: {e}")
        return False

def main():
    """主调试函数"""
    print("🔧 AI问题调试工具")
    print("=" * 50)
    
    # 检查后端服务
    if not check_backend_service():
        print("\n❌ 后端服务问题，请先启动后端服务")
        sys.exit(1)
    
    # 检查日志数据
    if not check_log_data():
        print("\n❌ 日志数据问题，请先上传日志文件")
        sys.exit(1)
    
    # 检查AI配置
    ai_config = check_ai_config()
    if not ai_config:
        print("\n❌ AI配置问题")
        sys.exit(1)
    
    # 根据提供商检查相应服务
    provider = ai_config.get('provider', 'unknown')
    print(f"\n🔍 检查 {provider} 服务...")
    
    if provider == 'local':
        if not check_local_lm_service():
            print("\n❌ 本地LM服务问题")
            print("   请启动LM Studio或其他本地模型服务")
            sys.exit(1)
        
        if not test_local_lm_chat():
            print("\n❌ 本地LM聊天功能问题")
            sys.exit(1)
    
    # 测试AI分析功能
    print(f"\n🔍 测试AI分析功能...")
    
    # 测试概览分析
    if not test_ai_analysis_without_category():
        print("\n❌ 概览分析失败")
        sys.exit(1)
    
    # 获取错误分类并测试
    try:
        response = requests.get("http://localhost:8080/ai/error-categories", timeout=10)
        if response.status_code == 200:
            categories = response.json()
            if categories:
                # 测试第一个错误类型
                first_category = categories[0]['name']
                test_ai_analysis_with_category(first_category)
            else:
                print("⚠️  没有找到错误分类")
        else:
            print(f"❌ 获取错误分类失败: {response.status_code}")
    except Exception as e:
        print(f"❌ 获取错误分类失败: {e}")
    
    print("\n" + "=" * 50)
    print("🎉 调试完成!")
    print("\n📊 检查结果:")
    print("   ✅ 后端服务: 正常")
    print("   ✅ 日志数据: 正常")
    print("   ✅ AI配置: 正常")
    if provider == 'local':
        print("   ✅ 本地LM服务: 正常")
    print("   ✅ AI分析功能: 正常")
    print("\n💡 如果前端仍有问题，请检查:")
    print("   1. 浏览器控制台错误信息")
    print("   2. 网络连接状态")
    print("   3. CORS配置")

if __name__ == "__main__":
    main() 