import os
import json
import logging
import requests
import glob
from typing import Dict, List, Any, Optional
from datetime import datetime
import openai
from .config_manager import ConfigManager

logger = logging.getLogger(__name__)

class AIAnalyzer:
    """AI日志分析器 - 基于大模型API的智能分析"""
    
    def __init__(self, config_path: str = 'config/ai_config.json'):
        self.config_path = config_path
        self.temp_config_path = 'temp/ai_config_runtime.json'
        self.config_manager = ConfigManager()
        self.load_ai_config()
        self.conversation_history = []
        self.analysis_context = {}
        
    def load_ai_config(self):
        """加载AI配置"""
        try:
            # 首先尝试加载运行时配置（包含敏感信息）
            if os.path.exists(self.temp_config_path):
                with open(self.temp_config_path, 'r', encoding='utf-8') as f:
                    self.ai_config = json.load(f)
                logger.info("从临时文件加载AI配置（包含敏感信息）")
                return
            
            # 如果临时配置不存在，从默认配置加载
            if os.path.exists(self.config_path):
                with open(self.config_path, 'r', encoding='utf-8') as f:
                    self.ai_config = json.load(f)
                logger.info("从默认配置文件加载AI配置")
                
                # 迁移现有配置：将敏感信息保存到临时文件，清理默认配置
                self.migrate_sensitive_config()
            else:
                # 创建默认配置
                self.ai_config = {
                    'current_model': 'local',
                    'models': {
                        'local': {
                            'name': '本地LM Studio',
                            'provider': 'local',
                            'api_key': '',
                            'model': 'google/gemma-3-4b',
                            'max_tokens': -1,
                            'temperature': 0.7,
                            'base_url': 'http://localhost:1234'
                        }
                    },
                    'system_prompt': self._get_default_system_prompt(),
                    'analysis_prompts': self._get_analysis_prompts()
                }
                self.save_default_config()
            
            # 保存到临时文件（即使没有敏感信息，也创建临时文件用于后续更新）
            self.save_runtime_config()
            
        except Exception as e:
            logger.error(f"加载AI配置失败: {e}")
            self.ai_config = {}
    
    def migrate_sensitive_config(self):
        """迁移敏感配置信息"""
        try:
            # 检查是否有敏感信息需要迁移
            has_sensitive_info = False
            if 'models' in self.ai_config:
                for model_key, model_config in self.ai_config['models'].items():
                    if model_config.get('api_key', '').strip():
                        has_sensitive_info = True
                        break
            
            if has_sensitive_info:
                logger.info("检测到敏感信息，正在迁移到临时文件...")
                # 保存包含敏感信息的配置到临时文件
                self.save_runtime_config()
                
                # 清理默认配置文件中的敏感信息
                self.save_default_config()
                logger.info("敏感信息迁移完成")
            else:
                logger.info("未检测到敏感信息，无需迁移")
        except Exception as e:
            logger.error(f"迁移敏感配置失败: {e}")
    
    def save_ai_config(self):
        """保存AI配置到临时文件（包含敏感信息）"""
        try:
            self.save_runtime_config()
            # 同时更新默认配置（不包含敏感信息）
            self.save_default_config()
        except Exception as e:
            logger.error(f"保存AI配置失败: {e}")
    
    def save_default_config(self):
        """保存默认配置（不包含敏感信息）"""
        try:
            os.makedirs(os.path.dirname(self.config_path), exist_ok=True)
            
            # 创建不包含敏感信息的默认配置
            default_config = self.ai_config.copy()
            if 'models' in default_config:
                for model_key, model_config in default_config['models'].items():
                    # 清空API密钥
                    if 'api_key' in model_config:
                        model_config['api_key'] = ''
            
            with open(self.config_path, 'w', encoding='utf-8') as f:
                json.dump(default_config, f, ensure_ascii=False, indent=2)
            logger.info("保存默认AI配置（不包含敏感信息）")
        except Exception as e:
            logger.error(f"保存默认AI配置失败: {e}")
    
    def save_runtime_config(self):
        """保存运行时配置（包含敏感信息）"""
        try:
            os.makedirs(os.path.dirname(self.temp_config_path), exist_ok=True)
            with open(self.temp_config_path, 'w', encoding='utf-8') as f:
                json.dump(self.ai_config, f, ensure_ascii=False, indent=2)
            logger.info("保存运行时AI配置（包含敏感信息）")
        except Exception as e:
            logger.error(f"保存运行时AI配置失败: {e}")
    
    def _get_default_system_prompt(self) -> str:
        """获取默认系统提示词"""
        return """你是一个专业的日志分析专家，专门帮助用户分析系统日志中的问题。

你的职责包括：
1. 分析日志中的错误、警告和异常信息
2. 识别问题的根本原因
3. 提供具体的解决方案和建议
4. 回答用户关于日志分析的问题
5. 帮助用户理解复杂的日志信息

分析原则：
- 优先关注ERROR和WARN级别的日志
- 寻找错误之间的关联性
- 考虑时间序列和组件依赖关系
- 提供可操作的解决方案
- 用通俗易懂的语言解释技术问题

请用中文回答用户的问题。"""
    
    def _get_analysis_prompts(self) -> Dict:
        """获取分析提示词模板"""
        return {
            'initial_analysis': """请分析以下日志数据，识别主要问题：

日志统计信息：
{stats}

重复日志：
{duplicates}

请提供：
1. 主要问题概述
2. 问题优先级排序
3. 建议的解决方案
4. 需要进一步调查的方面""",
            
            'component_analysis': """分析以下组件日志：

组件：{component}
日志数量：{log_count}
日志级别分布：{level_distribution}

请分析：
1. 组件运行状态
2. 潜在问题
3. 性能指标
4. 优化建议"""
        }
    
    def setup_openai_client(self):
        """设置OpenAI客户端"""
        try:
            current_model_config = self.get_current_model_config()
            openai.api_key = current_model_config.get('api_key', '')
            openai.base_url = current_model_config.get('base_url', 'https://api.openai.com/v1')
            return True
        except Exception as e:
            logger.error(f"设置OpenAI客户端失败: {e}")
            return False
    
    def analyze_logs(self, log_processor) -> Dict:
        """分析日志数据"""
        if not log_processor.logs:
            return {'error': '没有日志数据'}
        
        # 准备分析数据
        analysis_data = self._prepare_analysis_data(log_processor)
        
        # 生成概览分析
        initial_analysis = self._generate_initial_analysis(analysis_data)
        suggested_questions = self._generate_suggested_questions(analysis_data)
        
        return {
            'initial_analysis': initial_analysis,
            'suggested_questions': suggested_questions,
            'analysis_data': analysis_data
        }
    
    def _prepare_analysis_data(self, log_processor) -> Dict:
        """准备分析数据"""
        stats = log_processor.generate_summary()
        
        # 提取重复日志示例
        duplicate_examples = {}
        for key, dup_info in list(stats.get('duplicates', {}).items())[:5]:
            duplicate_examples[key] = dup_info
        
        return {
            'stats': stats,
            'duplicate_examples': duplicate_examples,
            'total_logs': len(log_processor.logs),
            'components': stats.get('components', {}),
            'time_patterns': stats.get('time_patterns', {})
        }
    
    def _generate_initial_analysis(self, analysis_data: Dict) -> str:
        """生成初始分析"""
        try:
            # 简化数据，减少token使用
            stats = analysis_data['stats']
            simplified_stats = {
                'total_logs': stats.get('total_logs', 0),
                'unique_components': stats.get('unique_components', 0),
                'duplicate_ratio': stats.get('duplicate_ratio', 0),
                'most_active_components': stats.get('most_active_components', [])[:3]
            }
            
            prompt = f"""请分析以下日志统计信息：

总日志数：{simplified_stats['total_logs']}
组件数量：{simplified_stats['unique_components']}
重复率：{simplified_stats['duplicate_ratio']}%

最活跃组件：
{json.dumps(simplified_stats['most_active_components'], ensure_ascii=False, indent=2)}

请提供：
1. 主要问题概述
2. 问题优先级排序
3. 建议的解决方案
4. 需要进一步调查的方面

请用中文回答，提供具体可操作的建议。"""
            
            response = self._call_ai_api(prompt)
            return response
        except Exception as e:
            logger.error(f"生成初始分析失败: {e}")
            return "分析生成失败，请检查AI配置"
    
    def _generate_suggested_questions(self, analysis_data: Dict) -> List[str]:
        """生成建议问题"""
        try:
            return [
                "这些错误的主要原因是什么？",
                "如何解决这些重复日志问题？",
                "系统性能是否受到影响？",
                "有什么预防措施可以避免这些问题？",
                "需要进一步调查哪些方面？"
            ]
        except Exception as e:
            logger.error(f"生成建议问题失败: {e}")
            return [
                "这些错误的主要原因是什么？",
                "如何解决这些重复日志问题？",
                "系统性能是否受到影响？",
                "有什么预防措施可以避免这些问题？",
                "需要进一步调查哪些方面？"
            ]
    
    def ask_question(self, question: str, log_processor=None) -> Dict:
        """回答用户问题"""
        try:
            # 构建上下文
            context = self._build_context(question, log_processor)
            
            # 调用AI API
            response = self._call_ai_api(question, context)
            
            # 记录对话历史
            self.conversation_history.append({
                'timestamp': datetime.now().isoformat(),
                'question': question,
                'answer': response,
                'context': context
            })
            
            return {
                'answer': response,
                'timestamp': datetime.now().isoformat(),
                'context_used': bool(context)
            }
        except Exception as e:
            logger.error(f"回答问题失败: {e}")
            return {
                'error': f'回答问题失败: {str(e)}',
                'timestamp': datetime.now().isoformat()
            }
    
    def ask_question_stream(self, question: str, log_processor=None, conversation_id: str = None, include_logs: bool = False):
        """流式回答用户问题（支持多轮对话）"""
        async def process_dsl_query_stream():
            try:
                # 获取日志文件路径
                if not log_processor:
                    return ["错误：DSL查询需要提供日志数据"]
                
                # 获取temp目录下的app.log
                log_file = "/root/workspace/AI_logging_engine/temp/app.log"
                if not os.path.exists(log_file):
                    return ["错误：找不到app.log文件，请确保日志已正确解压到temp目录"]
                
                logger.info(f"使用日志文件: {log_file}")
                
                # 处理DSL查询
                result = await self.process_dsl_query(question, log_file)
                
                # 准备输出部分
                output_parts = []
                
                # 记录对话历史
                self.conversation_history.append({
                    'timestamp': datetime.now().isoformat(),
                    'question': question,
                    'answer': str(result),
                    'conversation_id': conversation_id
                })
                
                if result.get('status') == 'no_match':
                    output_parts.extend([
                        "未找到匹配的Flow\n\n",
                        "原因：\n",
                        f"{result['reason']}\n\n",
                        "建议：\n",
                        f"{result['suggestion']}\n"
                    ])
                elif 'error' in result:
                    output_parts.append(result['error'])
                else:
                    # Flow匹配信息
                    output_parts.extend([
                        "匹配到的Flow：\n",
                        f"ID: {result['matched_flow']['id']}\n",
                        f"置信度: {result['matched_flow']['confidence']}\n",
                        f"匹配原因: {result['matched_flow']['reason']}\n\n",
                        "分析结果：\n",
                        result['analysis']
                    ])
                
                return output_parts
            except Exception as e:
                logger.error(f"DSL查询处理失败: {e}")
                return [f"处理失败: {str(e)}"]
        
        try:
            # 检查是否是DSL查询
            if question.startswith("/dsl "):
                # 使用asyncio运行异步函数
                import asyncio
                output_parts = asyncio.run(process_dsl_query_stream())
                for part in output_parts:
                    yield part
                return
            
            # 非DSL查询的普通处理
            # 获取或创建对话会话
            if conversation_id is None:
                conversation_id = datetime.now().isoformat()
            
            # 构建多轮对话消息
            messages = self._build_conversation_messages(question, log_processor, conversation_id, include_logs)
            
            # 调用AI API（流式）
            response_stream = self._call_ai_api_with_messages(messages, stream=True)
            
            # 收集完整回答用于记录历史
            full_response = ""
            
            # 返回流式响应
            for chunk in response_stream:
                full_response += chunk
                yield chunk
            
            # 记录对话历史
            self.conversation_history.append({
                'timestamp': datetime.now().isoformat(),
                'question': question,
                'answer': full_response,
                'conversation_id': conversation_id
            })
            
        except Exception as e:
            logger.error(f"流式回答问题失败: {e}")
            yield f"回答问题失败: {str(e)}"
    
    async def process_dsl_query(self, query: str, log_file: str) -> Dict:
        """处理DSL查询"""
        from backend.dsl_handler import DSLHandler
        
        try:
            # 移除 /dsl 前缀
            if query.startswith("/dsl "):
                query = query[5:].strip()
            
            # 创建DSL处理器
            dsl_handler = DSLHandler()
            
            # 处理查询
            result = await dsl_handler.process_dsl_query(query, log_file, self)
            return result
            
        except Exception as e:
            logger.error(f"DSL查询处理失败: {e}")
            return {'error': f'DSL查询处理失败: {str(e)}'}
    
    async def ask_question(self, question: str, log_processor=None, conversation_id: str = None, include_logs: bool = False) -> Dict:
        """回答用户问题（支持多轮对话）"""
        try:
            # 检查是否是DSL查询
            if question.startswith("/dsl "):
                # 获取日志文件路径
                log_file = log_processor.log_file if log_processor else None
                if not log_file:
                    return {
                        'error': 'DSL查询需要提供日志文件',
                        'timestamp': datetime.now().isoformat()
                    }
                
                # 处理DSL查询
                result = await self.process_dsl_query(question, log_file)
                return {
                    **result,
                    'timestamp': datetime.now().isoformat(),
                    'conversation_id': conversation_id
                }
            
            # 非DSL查询的普通处理
            # 获取或创建对话会话
            if conversation_id is None:
                conversation_id = datetime.now().isoformat()
            
            # 构建多轮对话消息
            messages = self._build_conversation_messages(question, log_processor, conversation_id, include_logs)
            
            # 调用AI API
            response = self._call_ai_api_with_messages(messages, stream=False)
            
            # 记录对话历史
            self.conversation_history.append({
                'timestamp': datetime.now().isoformat(),
                'question': question,
                'answer': response,
                'conversation_id': conversation_id
            })
            
            return {
                'answer': response,
                'timestamp': datetime.now().isoformat(),
                'conversation_id': conversation_id
            }
        except Exception as e:
            logger.error(f"回答问题失败: {e}")
            return {
                'error': f'回答问题失败: {str(e)}',
                'timestamp': datetime.now().isoformat(),
                'conversation_id': conversation_id
            }
    
    def _build_context(self, question: str, log_processor=None) -> str:
        """构建问题上下文"""
        context_parts = []
        
        # 添加系统提示词
        context_parts.append(self.ai_config.get('system_prompt', ''))
        
        # 添加日志分析数据
        if log_processor and log_processor.logs:
            analysis_data = self._prepare_analysis_data(log_processor)
            context_parts.append(f"日志分析数据：\n{json.dumps(analysis_data['stats'], ensure_ascii=False, indent=2)}")
        
        # 添加最近的对话历史
        if self.conversation_history:
            recent_history = self.conversation_history[-3:]  # 最近3轮对话
            history_text = "\n".join([
                f"Q: {h['question']}\nA: {h['answer']}" 
                for h in recent_history
            ])
            context_parts.append(f"最近的对话历史：\n{history_text}")
        
        return "\n\n".join(context_parts)
    
    def _build_conversation_messages(self, question: str, log_processor=None, conversation_id: str = None, include_logs: bool = False) -> List[Dict]:
        """构建多轮对话消息"""
        messages = []
        
        # 添加系统消息
        system_content = self.ai_config.get('system_prompt', '')
        
        # 只有在明确要求时才添加日志分析数据
        if include_logs and log_processor and log_processor.logs:
            analysis_data = self._prepare_analysis_data(log_processor)
            # 简化日志数据，只保留关键信息
            simplified_stats = {
                'total_logs': analysis_data['stats'].get('total_logs', 0),
                'most_active_components': analysis_data['stats'].get('most_active_components', [])[:3]
            }
            system_content += f"\n\n日志分析数据：\n{json.dumps(simplified_stats, ensure_ascii=False, indent=2)}"
        
        messages.append({
            "role": "system",
            "content": system_content
        })
        
        # 添加当前对话的历史消息
        if conversation_id:
            conversation_history = [
                h for h in self.conversation_history 
                if h.get('conversation_id') == conversation_id
            ]
            
            for history in conversation_history:
                messages.append({
                    "role": "user",
                    "content": history['question']
                })
                messages.append({
                    "role": "assistant",
                    'content': history['answer']
                })
        
        # 添加当前问题
        messages.append({
            "role": "user",
            "content": question
        })
        
        return messages
    
    def _call_ai_api(self, prompt: str, context: str = "", stream: bool = False):
        """调用AI API"""
        try:
            current_model_config = self.get_current_model_config()
            if current_model_config.get('provider') == 'openai':
                return self._call_openai_api(prompt, context, stream)
            elif current_model_config.get('provider') == 'claude':
                return self._call_claude_api(prompt, context, stream)
            elif current_model_config.get('provider') == 'local':
                return self._call_local_api(prompt, context, stream)
            elif current_model_config.get('provider') == 'qwen':
                return self._call_qwen_api(prompt, context, stream)
            else:
                return "不支持的AI提供商"
        except Exception as e:
            logger.error(f"调用AI API失败: {e}")
            return f"AI服务调用失败: {str(e)}"
    
    def _call_ai_api_with_messages(self, messages: List[Dict], stream: bool = False):
        """基于消息列表调用AI API"""
        try:
            current_model_config = self.get_current_model_config()
            if current_model_config.get('provider') == 'openai':
                return self._call_openai_api_with_messages(messages, stream)
            elif current_model_config.get('provider') == 'claude':
                return self._call_claude_api_with_messages(messages, stream)
            elif current_model_config.get('provider') == 'local':
                return self._call_local_api_with_messages(messages, stream)
            elif current_model_config.get('provider') == 'qwen':
                return self._call_qwen_api_with_messages(messages, stream)
            else:
                return "不支持的AI提供商"
        except Exception as e:
            logger.error(f"调用AI API失败: {e}")
            return f"AI服务调用失败: {str(e)}"
    
    # 为Claude和本地LM添加基于消息的调用方法（简化版本）
    def _call_claude_api_with_messages(self, messages: List[Dict], stream: bool = False):
        """基于消息列表调用Claude API（简化版本）"""
        # 将消息转换为单次请求格式
        if messages:
            # 提取系统消息和用户消息
            system_content = ""
            user_content = ""
            
            for msg in messages:
                if msg['role'] == 'system':
                    system_content += msg['content'] + "\n\n"
                elif msg['role'] == 'user':
                    user_content += msg['content'] + "\n"
                elif msg['role'] == 'assistant':
                    user_content += "AI回答: " + msg['content'] + "\n"
            
            # 组合为单次请求
            combined_content = system_content + user_content
            return self._call_claude_api(combined_content, "", stream)
        return "无效的消息格式"
    
    def _call_local_api_with_messages(self, messages: List[Dict], stream: bool = False):
        """基于消息列表调用本地LM API（简化版本）"""
        # 将消息转换为单次请求格式
        if messages:
            # 提取系统消息和用户消息
            system_content = ""
            user_content = ""
            
            for msg in messages:
                if msg['role'] == 'system':
                    system_content += msg['content'] + "\n\n"
                elif msg['role'] == 'user':
                    user_content += msg['content'] + "\n"
                elif msg['role'] == 'assistant':
                    user_content += "AI回答: " + msg['content'] + "\n"
            
            # 组合为单次请求
            combined_content = system_content + user_content
            return self._call_local_api(combined_content, "", stream)
        return "无效的消息格式"
    
    def _call_openai_api(self, prompt: str, context: str = "", stream: bool = False) -> str:
        """调用OpenAI API"""
        current_model_config = self.get_current_model_config()
        if not self.setup_openai_client():
            return "OpenAI客户端设置失败"
        
        try:
            messages = []
            if context:
                messages.append({"role": "system", "content": context})
            messages.append({"role": "user", "content": prompt})
            
            # 打印发送的内容用于调试
            logger.info(f"发送到OpenAI API的内容:")
            logger.info(f"Model: {current_model_config.get('model', 'gpt-3.5-turbo')}")
            logger.info(f"Messages: {json.dumps(messages, ensure_ascii=False, indent=2)}")
            logger.info(f"Max tokens: {current_model_config.get('max_tokens', 2000)}")
            logger.info(f"Temperature: {current_model_config.get('temperature', 0.7)}")
            
            if stream:
                # 流式输出
                return self._stream_openai_api(messages, current_model_config)
            else:
                # 非流式输出
                response = openai.ChatCompletion.create(
                    model=current_model_config.get('model', 'gpt-3.5-turbo'),
                    messages=messages,
                    max_tokens=current_model_config.get('max_tokens', 2000),
                    temperature=current_model_config.get('temperature', 0.7),
                    stream=False
                )
                
                content = response.choices[0].message.content
                
                # 打印回复内容用于调试
                logger.info(f"OpenAI API回复内容:")
                logger.info(f"Content: {content}")
                
                return content
        except Exception as e:
            logger.error(f"OpenAI API调用失败: {e}")
            return f"OpenAI API调用失败: {str(e)}"
    
    def _stream_openai_api(self, messages: list, current_model_config: dict):
        """流式调用OpenAI API"""
        try:
            response = openai.ChatCompletion.create(
                model=current_model_config.get('model', 'gpt-3.5-turbo'),
                messages=messages,
                max_tokens=current_model_config.get('max_tokens', 2000),
                temperature=current_model_config.get('temperature', 0.7),
                stream=True
            )
            
            full_content = ""
            for chunk in response:
                if chunk.choices[0].delta.content:
                    content = chunk.choices[0].delta.content
                    full_content += content
                    yield content
            
            logger.info(f"OpenAI流式API完整回复: {full_content}")
            return full_content
        except Exception as e:
            error_msg = f"OpenAI流式API调用失败: {str(e)}"
            logger.error(error_msg)
            yield error_msg
    
    def _call_openai_api_with_messages(self, messages: List[Dict], stream: bool = False):
        """基于消息列表调用OpenAI API"""
        current_model_config = self.get_current_model_config()
        if not self.setup_openai_client():
            return "OpenAI客户端设置失败"
        
        try:
            # 打印发送的内容用于调试
            logger.info(f"发送到OpenAI API的消息:")
            logger.info(f"Model: {current_model_config.get('model', 'gpt-3.5-turbo')}")
            logger.info(f"Messages: {json.dumps(messages, ensure_ascii=False, indent=2)}")
            logger.info(f"Max tokens: {current_model_config.get('max_tokens', 2000)}")
            logger.info(f"Temperature: {current_model_config.get('temperature', 0.7)}")
            
            if stream:
                # 流式输出
                return self._stream_openai_api_with_messages(messages, current_model_config)
            else:
                # 非流式输出
                response = openai.ChatCompletion.create(
                    model=current_model_config.get('model', 'gpt-3.5-turbo'),
                    messages=messages,
                    max_tokens=current_model_config.get('max_tokens', 2000),
                    temperature=current_model_config.get('temperature', 0.7),
                    stream=False
                )
                
                content = response.choices[0].message.content
                
                # 打印回复内容用于调试
                logger.info(f"OpenAI API回复内容:")
                logger.info(f"Content: {content}")
                
                return content
        except Exception as e:
            logger.error(f"OpenAI API调用失败: {e}")
            return f"OpenAI API调用失败: {str(e)}"
    
    def _stream_openai_api_with_messages(self, messages: list, current_model_config: dict):
        """基于消息列表流式调用OpenAI API"""
        try:
            response = openai.ChatCompletion.create(
                model=current_model_config.get('model', 'gpt-3.5-turbo'),
                messages=messages,
                max_tokens=current_model_config.get('max_tokens', 2000),
                temperature=current_model_config.get('temperature', 0.7),
                stream=True
            )
            
            full_content = ""
            for chunk in response:
                if chunk.choices[0].delta.content:
                    content = chunk.choices[0].delta.content
                    full_content += content
                    yield content
            
            logger.info(f"OpenAI流式API完整回复: {full_content}")
            return full_content
        except Exception as e:
            error_msg = f"OpenAI流式API调用失败: {str(e)}"
            logger.error(error_msg)
            yield error_msg
    
    def _call_claude_api(self, prompt: str, context: str = "", stream: bool = False) -> str:
        """调用Claude API"""
        current_model_config = self.get_current_model_config()
        try:
            api_key = current_model_config.get('api_key', '')
            headers = {
                'Content-Type': 'application/json',
                'x-api-key': api_key,
                'anthropic-version': '2023-06-01'
            }
            
            data = {
                'model': current_model_config.get('model', 'claude-3-sonnet-20240229'),
                'max_tokens': current_model_config.get('max_tokens', 2000),
                'messages': [
                    {
                        'role': 'user',
                        'content': f"{context}\n\n{prompt}" if context else prompt
                    }
                ],
                'stream': stream
            }
            
            # 打印发送的内容用于调试
            logger.info(f"发送到Claude API的内容:")
            logger.info(f"URL: https://api.anthropic.com/v1/messages")
            logger.info(f"Headers: {headers}")
            logger.info(f"Data: {json.dumps(data, ensure_ascii=False, indent=2)}")
            
            response = requests.post(
                'https://api.anthropic.com/v1/messages',
                headers=headers,
                json=data
            )
            
            if response.status_code == 200:
                result = response.json()
                content = result['content'][0]['text']
                
                # 打印回复内容用于调试
                logger.info(f"Claude API回复内容:")
                logger.info(f"Status: {response.status_code}")
                logger.info(f"Response: {json.dumps(result, ensure_ascii=False, indent=2)}")
                logger.info(f"Content: {content}")
                
                return content
            else:
                logger.error(f"Claude API调用失败: {response.status_code}")
                logger.error(f"错误响应: {response.text}")
                return f"Claude API调用失败: {response.text}"
        except Exception as e:
            logger.error(f"Claude API调用失败: {e}")
            return f"Claude API调用失败: {str(e)}"
    
    def _call_local_api(self, prompt: str, context: str = "", stream: bool = False) -> str:
        """调用本地LM API"""
        current_model_config = self.get_current_model_config()
        try:
            base_url = current_model_config.get('base_url', 'http://localhost:1234')
            headers = {
                'Content-Type': 'application/json'
            }
            
            messages = []
            if context:
                messages.append({
                    'role': 'system',
                    'content': context
                })
            messages.append({
                'role': 'user',
                'content': prompt
            })
            
            data = {
                'model': current_model_config.get('model', 'google/gemma-3-4b'),
                'messages': messages,
                'temperature': current_model_config.get('temperature', 0.7),
                'max_tokens': current_model_config.get('max_tokens', -1),
                'stream': stream
            }
            
            # 打印发送的内容用于调试
            logger.info(f"发送到本地LM API的内容:")
            logger.info(f"URL: {base_url}/v1/chat/completions")
            logger.info(f"Headers: {headers}")
            logger.info(f"Data: {json.dumps(data, ensure_ascii=False, indent=2)}")
            
            if stream:
                # 流式输出
                return self._stream_local_api(base_url, headers, data)
            else:
                # 非流式输出
                response = requests.post(
                    f'{base_url}/v1/chat/completions',
                    headers=headers,
                    json=data,
                    timeout=120  # 延长到2分钟
                )
                
                if response.status_code == 200:
                    result = response.json()
                    content = result['choices'][0]['message']['content']
                    
                    # 打印回复内容用于调试
                    logger.info(f"本地LM API回复内容:")
                    logger.info(f"Status: {response.status_code}")
                    logger.info(f"Response: {json.dumps(result, ensure_ascii=False, indent=2)}")
                    logger.info(f"Content: {content}")
                    
                    return content
                else:
                    logger.error(f"本地LM API调用失败: {response.status_code}")
                    logger.error(f"错误响应: {response.text}")
                    return f"本地LM API调用失败: {response.text}"
        except Exception as e:
            logger.error(f"本地LM API调用失败: {e}")
            return f"本地LM API调用失败: {str(e)}"
    
    def _stream_local_api(self, base_url: str, headers: dict, data: dict):
        """流式调用本地LM API"""
        try:
            response = requests.post(
                f'{base_url}/v1/chat/completions',
                headers=headers,
                json=data,
                timeout=120,
                stream=True
            )
            
            if response.status_code == 200:
                full_content = ""
                for line in response.iter_lines():
                    if line:
                        line = line.decode('utf-8')
                        if line.startswith('data: '):
                            data_str = line[6:]  # 移除 'data: ' 前缀
                            if data_str == '[DONE]':
                                break
                            try:
                                chunk = json.loads(data_str)
                                if chunk.get('choices') and chunk['choices'][0].get('delta', {}).get('content'):
                                    content = chunk['choices'][0]['delta']['content']
                                    full_content += content
                                    yield content
                            except json.JSONDecodeError:
                                continue
                
                logger.info(f"本地LM流式API完整回复: {full_content}")
                return full_content
            else:
                error_msg = f"本地LM流式API调用失败: {response.status_code}"
                logger.error(error_msg)
                yield error_msg
        except Exception as e:
            error_msg = f"本地LM流式API调用失败: {str(e)}"
            logger.error(error_msg)
            yield error_msg
    
    def _call_qwen_api(self, prompt: str, context: str = "", stream: bool = False):
        """调用通义千问API"""
        try:
            current_model_config = self.get_current_model_config()
            api_key = current_model_config.get('api_key', '')
            base_url = current_model_config.get('base_url', 'https://dashscope.aliyuncs.com/compatible-mode/v1')
            model = current_model_config.get('model', 'qwen-turbo')
            
            headers = {
                'Content-Type': 'application/json',
                'Authorization': f'Bearer {api_key}'
            }
            
            messages = []
            if context:
                messages.append({
                    'role': 'system',
                    'content': context
                })
            messages.append({
                'role': 'user',
                'content': prompt
            })
            
            data = {
                'model': model,
                'messages': messages,
                'temperature': current_model_config.get('temperature', 0.7),
                'max_tokens': current_model_config.get('max_tokens', 2000),
                'stream': stream
            }
            
            # 打印发送的内容用于调试
            logger.info(f"发送到通义千问API的内容:")
            logger.info(f"URL: {base_url}/chat/completions")
            logger.info(f"Headers: {headers}")
            logger.info(f"Data: {json.dumps(data, ensure_ascii=False, indent=2)}")
            
            if stream:
                # 流式输出
                return self._stream_qwen_api(base_url, headers, data)
            else:
                # 非流式输出
                response = requests.post(
                    f'{base_url}/chat/completions',
                    headers=headers,
                    json=data,
                    timeout=120
                )
                
                if response.status_code == 200:
                    result = response.json()
                    content = result['choices'][0]['message']['content']
                    
                    # 打印回复内容用于调试
                    logger.info(f"通义千问API回复内容:")
                    logger.info(f"Status: {response.status_code}")
                    logger.info(f"Response: {json.dumps(result, ensure_ascii=False, indent=2)}")
                    logger.info(f"Content: {content}")
                    
                    return content
                else:
                    logger.error(f"通义千问API调用失败: {response.status_code}")
                    logger.error(f"错误响应: {response.text}")
                    return f"通义千问API调用失败: {response.text}"
        except Exception as e:
            logger.error(f"通义千问API调用失败: {e}")
            return f"通义千问API调用失败: {str(e)}"
    
    def _call_qwen_api_with_messages(self, messages: List[Dict], stream: bool = False):
        """基于消息列表调用通义千问API"""
        try:
            current_model_config = self.get_current_model_config()
            api_key = current_model_config.get('api_key', '')
            base_url = current_model_config.get('base_url', 'https://dashscope.aliyuncs.com/compatible-mode/v1')
            model = current_model_config.get('model', 'qwen-turbo')
            
            headers = {
                'Content-Type': 'application/json',
                'Authorization': f'Bearer {api_key}'
            }
            
            data = {
                'model': model,
                'messages': messages,
                'temperature': current_model_config.get('temperature', 0.7),
                'max_tokens': current_model_config.get('max_tokens', 2000),
                'stream': stream
            }
            
            # 打印发送的内容用于调试
            logger.info(f"发送到通义千问API的消息:")
            logger.info(f"URL: {base_url}/chat/completions")
            logger.info(f"Headers: {headers}")
            logger.info(f"Data: {json.dumps(data, ensure_ascii=False, indent=2)}")
            
            if stream:
                # 流式输出
                return self._stream_qwen_api_with_messages(base_url, headers, data)
            else:
                # 非流式输出
                response = requests.post(
                    f'{base_url}/chat/completions',
                    headers=headers,
                    json=data,
                    timeout=120
                )
                
                if response.status_code == 200:
                    result = response.json()
                    content = result['choices'][0]['message']['content']
                    
                    # 打印回复内容用于调试
                    logger.info(f"通义千问API回复内容:")
                    logger.info(f"Status: {response.status_code}")
                    logger.info(f"Response: {json.dumps(result, ensure_ascii=False, indent=2)}")
                    logger.info(f"Content: {content}")
                    
                    return content
                else:
                    logger.error(f"通义千问API调用失败: {response.status_code}")
                    logger.error(f"错误响应: {response.text}")
                    return f"通义千问API调用失败: {response.text}"
        except Exception as e:
            logger.error(f"通义千问API调用失败: {e}")
            return f"通义千问API调用失败: {str(e)}"
    
    def _stream_qwen_api_with_messages(self, base_url: str, headers: dict, data: dict):
        """基于消息列表流式调用通义千问API"""
        try:
            response = requests.post(
                f'{base_url}/chat/completions',
                headers=headers,
                json=data,
                timeout=120,
                stream=True
            )
            
            if response.status_code == 200:
                full_content = ""
                for line in response.iter_lines():
                    if line:
                        line = line.decode('utf-8')
                        if line.startswith('data: '):
                            data_str = line[6:]  # 移除 'data: ' 前缀
                            if data_str == '[DONE]':
                                break
                            try:
                                chunk = json.loads(data_str)
                                if chunk.get('choices') and chunk['choices'][0].get('delta', {}).get('content'):
                                    content = chunk['choices'][0]['delta']['content']
                                    full_content += content
                                    yield content
                            except json.JSONDecodeError:
                                continue
                
                logger.info(f"通义千问流式API完整回复: {full_content}")
                return full_content
            else:
                error_msg = f"通义千问流式API调用失败: {response.status_code}"
                logger.error(error_msg)
                yield error_msg
        except Exception as e:
            error_msg = f"通义千问流式API调用失败: {str(e)}"
            logger.error(error_msg)
            yield error_msg
    
    def _stream_qwen_api(self, base_url: str, headers: dict, data: dict):
        """流式调用通义千问API"""
        try:
            response = requests.post(
                f'{base_url}/chat/completions',
                headers=headers,
                json=data,
                timeout=120,
                stream=True
            )
            
            if response.status_code == 200:
                full_content = ""
                for line in response.iter_lines():
                    if line:
                        line = line.decode('utf-8')
                        if line.startswith('data: '):
                            data_str = line[6:]  # 移除 'data: ' 前缀
                            if data_str == '[DONE]':
                                break
                            try:
                                chunk = json.loads(data_str)
                                if chunk.get('choices') and chunk['choices'][0].get('delta', {}).get('content'):
                                    content = chunk['choices'][0]['delta']['content']
                                    full_content += content
                                    yield content
                            except json.JSONDecodeError:
                                continue
                
                logger.info(f"通义千问流式API完整回复: {full_content}")
                return full_content
            else:
                error_msg = f"通义千问流式API调用失败: {response.status_code}"
                logger.error(error_msg)
                yield error_msg
        except Exception as e:
            error_msg = f"通义千问流式API调用失败: {str(e)}"
            logger.error(error_msg)
            yield error_msg
    
    def _analyze_specific_error(self, log_processor, error_category: str, analysis_data: Dict) -> Dict:
        """分析特定错误类型"""
        try:
            # 获取该错误类型的日志
            error_logs = analysis_data['stats'].get('error_categories', {}).get(error_category, [])
            
            if not error_logs:
                return {
                    'error': f'未找到 {error_category} 类型的错误日志',
                    'analysis_data': analysis_data
                }
            
            # 限制日志数量，避免token过多
            limited_logs = error_logs[:10]  # 只取前10条日志
            
            # 构建分析提示词
            prompt = f"""请分析以下 {error_category} 类型的错误日志：

错误类型：{error_category}
错误数量：{len(error_logs)}
错误示例：
{json.dumps(limited_logs, ensure_ascii=False, indent=2)}

请提供详细分析：
1. 错误的具体原因和触发条件
2. 影响范围和严重程度
3. 具体的解决方案和修复步骤
4. 预防措施和监控建议
5. 是否需要进一步调查其他方面

请用中文回答，提供具体可操作的建议。"""
            
            # 调用AI分析
            analysis_result = self._call_ai_api(prompt)
            
            return {
                'initial_analysis': analysis_result,
                'error_category': error_category,
                'error_count': len(error_logs),
                'analysis_data': analysis_data
            }
            
        except Exception as e:
            logger.error(f"分析特定错误失败: {e}")
            return {
                'error': f'分析 {error_category} 错误失败: {str(e)}',
                'analysis_data': analysis_data
            }
    
    def update_ai_config(self, config: Dict) -> bool:
        """更新AI配置"""
        try:
            self.ai_config.update(config)
            self.save_ai_config()
            return True
        except Exception as e:
            logger.error(f"更新AI配置失败: {e}")
            return False
    
    def get_conversation_history(self) -> List[Dict]:
        """获取对话历史"""
        return self.conversation_history
    
    def get_conversation_history_by_id(self, conversation_id: str) -> List[Dict]:
        """根据对话ID获取对话历史"""
        return [h for h in self.conversation_history if h.get('conversation_id') == conversation_id]
    
    def clear_conversation_history(self):
        """清空对话历史"""
        self.conversation_history = []
    
    def get_ai_config(self) -> Dict:
        """获取AI配置"""
        return self.ai_config.copy() 

    def get_current_model_config(self) -> Dict:
        """获取当前模型配置"""
        current_model = self.ai_config.get('current_model', 'local')
        return self.ai_config.get('models', {}).get(current_model, {})
    
    def set_current_model(self, model_key: str) -> bool:
        """设置当前使用的模型"""
        try:
            if model_key in self.ai_config.get('models', {}):
                self.ai_config['current_model'] = model_key
                self.save_ai_config()
                return True
            return False
        except Exception as e:
            logger.error(f"设置当前模型失败: {e}")
            return False
    
    def add_model(self, model_key: str, model_config: Dict) -> bool:
        """添加新的AI模型配置"""
        try:
            if 'models' not in self.ai_config:
                self.ai_config['models'] = {}
            self.ai_config['models'][model_key] = model_config
            self.save_ai_config()
            return True
        except Exception as e:
            logger.error(f"添加模型配置失败: {e}")
            return False
    
    def update_model(self, model_key: str, model_config: Dict) -> bool:
        """更新AI模型配置"""
        try:
            if 'models' not in self.ai_config:
                self.ai_config['models'] = {}
            self.ai_config['models'][model_key] = model_config
            self.save_ai_config()
            return True
        except Exception as e:
            logger.error(f"更新模型配置失败: {e}")
            return False
    
    def remove_model(self, model_key: str) -> bool:
        """删除AI模型配置"""
        try:
            if model_key in self.ai_config.get('models', {}):
                del self.ai_config['models'][model_key]
                # 如果删除的是当前模型，切换到第一个可用模型
                if self.ai_config.get('current_model') == model_key:
                    available_models = list(self.ai_config['models'].keys())
                    if available_models:
                        self.ai_config['current_model'] = available_models[0]
                    else:
                        # 如果没有可用模型，创建默认模型
                        self.ai_config['models']['local'] = {
                            'name': '本地LM Studio',
                            'provider': 'local',
                            'api_key': '',
                            'model': 'google/gemma-3-4b',
                            'max_tokens': -1,
                            'temperature': 0.7,
                            'base_url': 'http://localhost:1234'
                        }
                        self.ai_config['current_model'] = 'local'
                self.save_ai_config()
                return True
            return False
        except Exception as e:
            logger.error(f"删除模型配置失败: {e}")
            return False
    
    def get_all_models(self) -> Dict:
        """获取所有模型配置"""
        return self.ai_config.get('models', {})
    
    def get_available_models(self) -> List[Dict]:
        """获取可用模型列表（用于前端显示）"""
        models = self.ai_config.get('models', {})
        current_model = self.ai_config.get('current_model', 'local')
        
        result = []
        for key, config in models.items():
            result.append({
                'key': key,
                'name': config.get('name', key),
                'provider': config.get('provider', 'unknown'),
                'is_current': key == current_model
            })
        return result
    
    def reset_runtime_config(self):
        """重置运行时配置"""
        try:
            if os.path.exists(self.temp_config_path):
                os.remove(self.temp_config_path)
                logger.info("运行时配置已重置")
            return True
        except Exception as e:
            logger.error(f"重置运行时配置失败: {e}")
            return False 