"""LLM服务接口抽象层"""
from abc import ABC, abstractmethod
from typing import List, Dict, Any, Optional
from app.core.config import settings


class LLMService(ABC):
    """LLM服务抽象基类"""
    
    @abstractmethod
    async def generate_text(
        self,
        prompt: str,
        max_tokens: Optional[int] = None,
        temperature: Optional[float] = None,
        **kwargs
    ) -> str:
        """
        生成文本
        
        Args:
            prompt: 提示词
            max_tokens: 最大token数
            temperature: 温度参数
            **kwargs: 其他参数
            
        Returns:
            生成的文本
        """
        pass
    
    @abstractmethod
    async def analyze_legal_case(
        self,
        case_content: str,
        question: Optional[str] = None
    ) -> Dict[str, Any]:
        """
        分析法律案例
        
        Args:
            case_content: 案例内容
            question: 可选的问题
            
        Returns:
            分析结果
        """
        pass
    
    @abstractmethod
    async def evaluate_answer(
        self,
        question: str,
        user_answer: str,
        correct_answer: str,
        explanation: Optional[str] = None
    ) -> Dict[str, Any]:
        """
        评估答案
        
        Args:
            question: 题目
            user_answer: 用户答案
            correct_answer: 正确答案
            explanation: 可选的标准解释
            
        Returns:
            评估结果，包含错误类型、错误原因等
        """
        pass
    
    @abstractmethod
    async def generate_explanation(
        self,
        question: str,
        answer: str,
        context: Optional[Dict[str, Any]] = None
    ) -> str:
        """
        生成题目解释
        
        Args:
            question: 题目内容
            answer: 答案
            context: 可选上下文信息
            
        Returns:
            解释文本
        """
        pass


class OpenAIService(LLMService):
    """OpenAI服务实现"""
    
    def __init__(self, api_key: str, base_url: Optional[str] = None):
        """
        初始化OpenAI服务
        
        Args:
            api_key: OpenAI API密钥
            base_url: API基础URL（用于代理）
        """
        self.api_key = api_key
        self.base_url = base_url
        self._client = None
    
    def _get_client(self):
        """获取OpenAI客户端（延迟初始化）"""
        if self._client is None:
            try:
                from openai import AsyncOpenAI
                client_kwargs = {"api_key": self.api_key}
                if self.base_url:
                    client_kwargs["base_url"] = self.base_url
                self._client = AsyncOpenAI(**client_kwargs)
            except ImportError:
                raise ImportError("请安装openai库: pip install openai>=1.0.0")
        return self._client
    
    async def generate_text(
        self,
        prompt: str,
        max_tokens: Optional[int] = None,
        temperature: Optional[float] = None,
        **kwargs
    ) -> str:
        """使用OpenAI生成文本"""
        try:
            client = self._get_client()
            
            # 获取模型名称，默认使用gpt-3.5-turbo
            model = kwargs.get("model", "gpt-3.5-turbo")
            use_responses_api = kwargs.get("use_responses_api", False)
            
            # 检查是否使用新的responses API（支持gpt-5-nano等新模型）
            if model.startswith("gpt-5") or use_responses_api:
                try:
                    response = await client.responses.create(
                        model=model,
                        input=prompt,
                        store=kwargs.get("store", False),
                    )
                    return response.output_text
                except AttributeError:
                    # 如果不支持responses API，回退到传统API
                    print("responses API不可用，使用传统chat completions API")
                except Exception as e:
                    # 如果新API失败，尝试传统API
                    print(f"新API格式失败，尝试传统API: {e}")
            
            # 使用传统的chat completions API
            # 如果模型是gpt-5系列但responses API不可用，回退到gpt-3.5-turbo
            fallback_model = "gpt-3.5-turbo" if model.startswith("gpt-5") else model
            response = await client.chat.completions.create(
                model=fallback_model,
                messages=[
                    {"role": "user", "content": prompt}
                ],
                max_tokens=max_tokens or 2000,
                temperature=temperature or 0.7,
                **{k: v for k, v in kwargs.items() if k not in ["model", "store", "use_responses_api"]}
            )
            
            return response.choices[0].message.content
        except Exception as e:
            print(f"OpenAI API调用失败: {e}")
            raise
    
    async def analyze_legal_case(
        self,
        case_content: str,
        question: Optional[str] = None
    ) -> Dict[str, Any]:
        """分析法律案例"""
        try:
            from app.ai.legalbert_service import get_legalbert_service
            legalbert = get_legalbert_service()
            bert_analysis = await legalbert.analyze_legal_text(case_content)
            entities = await legalbert.extract_entities(case_content)
            classification = await legalbert.classify_legal_document(case_content)
            
            # 使用LLM增强分析
            analysis_prompt = f"""
请分析以下法律案例：

【案例内容】
{case_content}

{f'【问题】{question}' if question else ''}

【LegalBERT分析结果】
- 法律实体：{len(entities)}个
- 分类：{classification.get('category', '未知')}

请提供详细的法律分析，包括：
1. 法律问题识别
2. 关键事实提取
3. 相关法律条文
4. 分析结论
5. 建议
"""
            
            llm_analysis = await self.generate_text(analysis_prompt, max_tokens=1500)
            
            return {
                "legal_issues": [classification.get("category", "法律纠纷")],
                "key_facts": [e["text"] for e in entities if e["type"] == "行为"],
                "relevant_laws": [e["text"] for e in entities if e["type"] == "法律条文"],
                "entities": entities,
                "classification": classification,
                "analysis": llm_analysis,
                "recommendation": "建议进一步分析相关法律条文和案例。",
                "bert_analysis": bert_analysis
            }
        except Exception as e:
            print(f"法律案例分析失败: {e}")
            # 回退到基础分析
            return {
                "legal_issues": ["法律纠纷"],
                "key_facts": [],
                "relevant_laws": [],
                "analysis": f"分析过程中出现错误: {str(e)}",
                "recommendation": "请检查API配置和网络连接。"
            }
    
    async def evaluate_answer(
        self,
        question: str,
        user_answer: str,
        correct_answer: str,
        explanation: Optional[str] = None
    ) -> Dict[str, Any]:
        """评估答案"""
        evaluation_prompt = f"""
请评估以下答案：

【题目】
{question}

【用户答案】
{user_answer}

【正确答案】
{correct_answer}

{f'【标准解析】{explanation}' if explanation else ''}

请分析：
1. 答案是否正确
2. 如果错误，错误类型是什么（知识性错误/思维性错误/技巧性错误/注意力错误）
3. 错误原因
4. 改进建议

请以JSON格式返回，包含：is_correct, error_type, error_category, error_details, score, feedback
"""
        
        try:
            response_text = await self.generate_text(evaluation_prompt, max_tokens=1000, temperature=0.3)
            
            # 尝试解析JSON
            import json
            import re
            # 提取JSON部分
            json_match = re.search(r'\{.*\}', response_text, re.DOTALL)
            if json_match:
                result = json.loads(json_match.group())
            else:
                # 如果无法解析JSON，使用基础判断
                is_correct = user_answer.strip().lower() == correct_answer.strip().lower()
                result = {
                    "is_correct": is_correct,
                    "error_type": "knowledge" if not is_correct else None,
                    "error_category": "法律条文理解错误" if not is_correct else None,
                    "error_details": {},
                    "score": 10 if is_correct else 0,
                    "feedback": response_text
                }
            
            return result
        except Exception as e:
            print(f"答案评估失败: {e}")
            # 回退到基础判断
            is_correct = user_answer.strip().lower() == correct_answer.strip().lower()
            return {
                "is_correct": is_correct,
                "error_type": "knowledge" if not is_correct else None,
                "error_category": "法律条文理解错误" if not is_correct else None,
                "error_details": {},
                "score": 10 if is_correct else 0,
                "feedback": "答案评估过程中出现错误，请检查API配置。"
            }
    
    async def generate_explanation(
        self,
        question: str,
        answer: str,
        context: Optional[Dict[str, Any]] = None
    ) -> str:
        """生成题目解释"""
        explanation_prompt = f"""
请为以下题目生成详细的解释：

【题目】
{question}

【答案】
{answer}

{f'【上下文】{context}' if context else ''}

请提供：
1. 为什么这个答案是正确的
2. 涉及的法律条文和原理
3. 其他选项为什么是错误的（如果适用）
4. 相关的法律知识点

请用清晰、易懂的语言解释。
"""
        
        try:
            explanation = await self.generate_text(explanation_prompt, max_tokens=1000, temperature=0.5)
            return explanation
        except Exception as e:
            print(f"生成解释失败: {e}")
            return f"根据相关法律规定，{answer}是正确的选择。详细解释生成失败，请检查API配置。"


class TongyiService(LLMService):
    """阿里云通义千问服务实现"""
    
    def __init__(self, api_key: str):
        """
        初始化通义千问服务
        
        Args:
            api_key: 通义千问API密钥
        """
        self.api_key = api_key
    
    def _get_client(self):
        """获取通义千问客户端"""
        try:
            import dashscope
            dashscope.api_key = self.api_key
            return dashscope
        except ImportError:
            raise ImportError("请安装dashscope库: pip install dashscope>=1.17.0")
    
    async def generate_text(
        self,
        prompt: str,
        max_tokens: Optional[int] = None,
        temperature: Optional[float] = None,
        **kwargs
    ) -> str:
        """使用通义千问生成文本"""
        try:
            import dashscope
            import asyncio
            dashscope.api_key = self.api_key
            
            model = kwargs.get("model", "qwen-turbo")
            
            # 使用asyncio.to_thread将同步调用转换为异步
            def _call_api():
                response = dashscope.Generation.call(
                    model=model,
                    prompt=prompt,
                    max_tokens=max_tokens or 2000,
                    temperature=temperature or 0.7,
                    **{k: v for k, v in kwargs.items() if k != "model"}
                )
                return response
            
            response = await asyncio.to_thread(_call_api)
            
            if response.status_code == 200:
                return response.output.text
            else:
                raise Exception(f"通义千问API调用失败: {response.message}")
        except Exception as e:
            print(f"通义千问API调用失败: {e}")
            raise
    
    async def analyze_legal_case(
        self,
        case_content: str,
        question: Optional[str] = None
    ) -> Dict[str, Any]:
        """分析法律案例"""
        try:
            from app.ai.legalbert_service import get_legalbert_service
            legalbert = get_legalbert_service()
            bert_analysis = await legalbert.analyze_legal_text(case_content)
            entities = await legalbert.extract_entities(case_content)
            classification = await legalbert.classify_legal_document(case_content)
            
            analysis_prompt = f"""
请分析以下法律案例：

【案例内容】
{case_content}

{f'【问题】{question}' if question else ''}

请提供详细的法律分析，包括：
1. 法律问题识别
2. 关键事实提取
3. 相关法律条文
4. 分析结论
5. 建议
"""
            
            llm_analysis = await self.generate_text(analysis_prompt, max_tokens=1500)
            
            return {
                "legal_issues": [classification.get("category", "法律纠纷")],
                "key_facts": [e["text"] for e in entities if e["type"] == "行为"],
                "relevant_laws": [e["text"] for e in entities if e["type"] == "法律条文"],
                "entities": entities,
                "classification": classification,
                "analysis": llm_analysis,
                "recommendation": "建议进一步分析相关法律条文和案例。",
                "bert_analysis": bert_analysis
            }
        except Exception as e:
            print(f"法律案例分析失败: {e}")
            return {
                "legal_issues": ["法律纠纷"],
                "key_facts": [],
                "relevant_laws": [],
                "analysis": f"分析过程中出现错误: {str(e)}",
                "recommendation": "请检查API配置和网络连接。"
            }
    
    async def evaluate_answer(
        self,
        question: str,
        user_answer: str,
        correct_answer: str,
        explanation: Optional[str] = None
    ) -> Dict[str, Any]:
        """评估答案"""
        evaluation_prompt = f"""
请评估以下答案：

【题目】
{question}

【用户答案】
{user_answer}

【正确答案】
{correct_answer}

{f'【标准解析】{explanation}' if explanation else ''}

请分析答案是否正确，如果错误，请指出错误类型和原因。
"""
        
        try:
            response_text = await self.generate_text(evaluation_prompt, max_tokens=1000, temperature=0.3)
            is_correct = user_answer.strip().lower() == correct_answer.strip().lower()
            
            return {
                "is_correct": is_correct,
                "error_type": "knowledge" if not is_correct else None,
                "error_category": "法律条文理解错误" if not is_correct else None,
                "error_details": {},
                "score": 10 if is_correct else 0,
                "feedback": response_text
            }
        except Exception as e:
            print(f"答案评估失败: {e}")
            is_correct = user_answer.strip().lower() == correct_answer.strip().lower()
            return {
                "is_correct": is_correct,
                "error_type": "knowledge" if not is_correct else None,
                "error_category": "法律条文理解错误" if not is_correct else None,
                "error_details": {},
                "score": 10 if is_correct else 0,
                "feedback": "答案评估过程中出现错误，请检查API配置。"
            }
    
    async def generate_explanation(
        self,
        question: str,
        answer: str,
        context: Optional[Dict[str, Any]] = None
    ) -> str:
        """生成题目解释"""
        explanation_prompt = f"""
请为以下题目生成详细的解释：

【题目】
{question}

【答案】
{answer}

请提供：
1. 为什么这个答案是正确的
2. 涉及的法律条文和原理
3. 其他选项为什么是错误的（如果适用）
4. 相关的法律知识点

请用清晰、易懂的语言解释。
"""
        
        try:
            explanation = await self.generate_text(explanation_prompt, max_tokens=1000, temperature=0.5)
            return explanation
        except Exception as e:
            print(f"生成解释失败: {e}")
            return f"根据相关法律规定，{answer}是正确的选择。详细解释生成失败，请检查API配置。"


class ZhipuService(LLMService):
    """智谱AI服务实现"""
    
    def __init__(self, api_key: str, base_url: Optional[str] = None):
        """
        初始化智谱AI服务
        
        Args:
            api_key: 智谱AI API密钥
            base_url: API基础URL（可选，用于自定义端点）
        """
        self.api_key = api_key
        self.base_url = base_url
        self._client = None
    
    def _get_client(self):
        """获取智谱AI客户端（延迟初始化）"""
        if self._client is None:
            try:
                from zhipuai import ZhipuAI
                client_kwargs = {"api_key": self.api_key}
                if self.base_url:
                    client_kwargs["base_url"] = self.base_url
                self._client = ZhipuAI(**client_kwargs)
            except ImportError:
                raise ImportError("请安装zhipuai库: pip install zhipuai>=2.0.0")
        return self._client
    
    async def generate_text(
        self,
        prompt: str,
        max_tokens: Optional[int] = None,
        temperature: Optional[float] = None,
        **kwargs
    ) -> str:
        """使用智谱AI生成文本"""
        try:
            import asyncio
            client = self._get_client()
            
            # 默认使用glm-4模型
            model = kwargs.get("model", "glm-4")
            
            # 使用asyncio.to_thread将同步调用转换为异步
            def _call_api():
                response = client.chat.completions.create(
                    model=model,
                    messages=[
                        {"role": "user", "content": prompt}
                    ],
                    max_tokens=max_tokens or 2000,
                    temperature=temperature or 0.7,
                    **{k: v for k, v in kwargs.items() if k != "model"}
                )
                return response
            
            response = await asyncio.to_thread(_call_api)
            
            # 智谱AI返回格式
            if hasattr(response, 'choices') and len(response.choices) > 0:
                return response.choices[0].message.content
            elif hasattr(response, 'data') and hasattr(response.data, 'choices'):
                return response.data.choices[0].message.content
            else:
                raise Exception(f"智谱AI返回格式异常: {response}")
        except Exception as e:
            print(f"智谱AI API调用失败: {e}")
            raise
    
    async def analyze_legal_case(
        self,
        case_content: str,
        question: Optional[str] = None
    ) -> Dict[str, Any]:
        """分析法律案例"""
        try:
            from app.ai.legalbert_service import get_legalbert_service
            legalbert = get_legalbert_service()
            bert_analysis = await legalbert.analyze_legal_text(case_content)
            entities = await legalbert.extract_entities(case_content)
            classification = await legalbert.classify_legal_document(case_content)
            
            analysis_prompt = f"""
请分析以下法律案例：

【案例内容】
{case_content}

{f'【问题】{question}' if question else ''}

请提供详细的法律分析，包括：
1. 法律问题识别
2. 关键事实提取
3. 相关法律条文
4. 分析结论
5. 建议
"""
            
            llm_analysis = await self.generate_text(analysis_prompt, max_tokens=1500)
            
            return {
                "legal_issues": [classification.get("category", "法律纠纷")],
                "key_facts": [e["text"] for e in entities if e["type"] == "行为"],
                "relevant_laws": [e["text"] for e in entities if e["type"] == "法律条文"],
                "entities": entities,
                "classification": classification,
                "analysis": llm_analysis,
                "recommendation": "建议进一步分析相关法律条文和案例。",
                "bert_analysis": bert_analysis
            }
        except Exception as e:
            print(f"法律案例分析失败: {e}")
            return {
                "legal_issues": ["法律纠纷"],
                "key_facts": [],
                "relevant_laws": [],
                "analysis": f"分析过程中出现错误: {str(e)}",
                "recommendation": "请检查API配置和网络连接。"
            }
    
    async def evaluate_answer(
        self,
        question: str,
        user_answer: str,
        correct_answer: str,
        explanation: Optional[str] = None
    ) -> Dict[str, Any]:
        """评估答案"""
        evaluation_prompt = f"""
请评估以下答案：

【题目】
{question}

【用户答案】
{user_answer}

【正确答案】
{correct_answer}

{f'【标准解析】{explanation}' if explanation else ''}

请分析答案是否正确，如果错误，请指出错误类型和原因。
"""
        
        try:
            response_text = await self.generate_text(evaluation_prompt, max_tokens=1000, temperature=0.3)
            is_correct = user_answer.strip().lower() == correct_answer.strip().lower()
            
            return {
                "is_correct": is_correct,
                "error_type": "knowledge" if not is_correct else None,
                "error_category": "法律条文理解错误" if not is_correct else None,
                "error_details": {},
                "score": 10 if is_correct else 0,
                "feedback": response_text
            }
        except Exception as e:
            print(f"答案评估失败: {e}")
            is_correct = user_answer.strip().lower() == correct_answer.strip().lower()
            return {
                "is_correct": is_correct,
                "error_type": "knowledge" if not is_correct else None,
                "error_category": "法律条文理解错误" if not is_correct else None,
                "error_details": {},
                "score": 10 if is_correct else 0,
                "feedback": "答案评估过程中出现错误，请检查API配置。"
            }
    
    async def generate_explanation(
        self,
        question: str,
        answer: str,
        context: Optional[Dict[str, Any]] = None
    ) -> str:
        """生成题目解释"""
        explanation_prompt = f"""
请为以下题目生成详细的解释：

【题目】
{question}

【答案】
{answer}

请提供：
1. 为什么这个答案是正确的
2. 涉及的法律条文和原理
3. 其他选项为什么是错误的（如果适用）
4. 相关的法律知识点

请用清晰、易懂的语言解释。
"""
        
        try:
            explanation = await self.generate_text(explanation_prompt, max_tokens=1000, temperature=0.5)
            return explanation
        except Exception as e:
            print(f"生成解释失败: {e}")
            return f"根据相关法律规定，{answer}是正确的选择。详细解释生成失败，请检查API配置。"


class MultiLLMService(LLMService):
    """多模型服务，支持自动切换和故障转移"""
    
    def __init__(self, services: List[LLMService], switch_mode: str = "fallback"):
        """
        初始化多模型服务
        
        Args:
            services: LLM服务列表，按优先级排序
            switch_mode: 切换模式，fallback（故障转移）或 round_robin（轮询）
        """
        self.services = services
        self.switch_mode = switch_mode
        self.current_index = 0  # 用于轮询模式
        self.service_names = [type(s).__name__ for s in services]
    
    def _get_next_service(self) -> LLMService:
        """获取下一个服务（轮询模式）"""
        service = self.services[self.current_index]
        self.current_index = (self.current_index + 1) % len(self.services)
        return service
    
    async def _try_with_fallback(
        self,
        method_name: str,
        *args,
        **kwargs
    ) -> Any:
        """使用故障转移模式调用方法"""
        last_error = None
        for i, service in enumerate(self.services):
            try:
                method = getattr(service, method_name)
                result = await method(*args, **kwargs)
                if i > 0:
                    print(f"✅ 服务 {self.service_names[i-1]} 失败，已切换到 {self.service_names[i]}")
                return result
            except Exception as e:
                last_error = e
                print(f"⚠️ 服务 {self.service_names[i]} 调用失败: {e}")
                if i < len(self.services) - 1:
                    print(f"🔄 尝试切换到下一个服务...")
                continue
        
        # 所有服务都失败
        raise Exception(f"所有LLM服务都失败，最后一个错误: {last_error}")
    
    async def _try_with_round_robin(
        self,
        method_name: str,
        *args,
        **kwargs
    ) -> Any:
        """使用轮询模式调用方法"""
        service = self._get_next_service()
        service_name = type(service).__name__
        try:
            method = getattr(service, method_name)
            result = await method(*args, **kwargs)
            return result
        except Exception as e:
            print(f"⚠️ 服务 {service_name} 调用失败: {e}")
            # 轮询模式下，如果当前服务失败，尝试下一个
            return await self._try_with_fallback(method_name, *args, **kwargs)
    
    async def generate_text(
        self,
        prompt: str,
        max_tokens: Optional[int] = None,
        temperature: Optional[float] = None,
        **kwargs
    ) -> str:
        """生成文本"""
        if self.switch_mode == "round_robin":
            return await self._try_with_round_robin("generate_text", prompt, max_tokens, temperature, **kwargs)
        else:
            return await self._try_with_fallback("generate_text", prompt, max_tokens, temperature, **kwargs)
    
    async def analyze_legal_case(
        self,
        case_content: str,
        question: Optional[str] = None
    ) -> Dict[str, Any]:
        """分析法律案例"""
        if self.switch_mode == "round_robin":
            return await self._try_with_round_robin("analyze_legal_case", case_content, question)
        else:
            return await self._try_with_fallback("analyze_legal_case", case_content, question)
    
    async def evaluate_answer(
        self,
        question: str,
        user_answer: str,
        correct_answer: str,
        explanation: Optional[str] = None
    ) -> Dict[str, Any]:
        """评估答案"""
        if self.switch_mode == "round_robin":
            return await self._try_with_round_robin("evaluate_answer", question, user_answer, correct_answer, explanation)
        else:
            return await self._try_with_fallback("evaluate_answer", question, user_answer, correct_answer, explanation)
    
    async def generate_explanation(
        self,
        question: str,
        answer: str,
        context: Optional[Dict[str, Any]] = None
    ) -> str:
        """生成题目解释"""
        if self.switch_mode == "round_robin":
            return await self._try_with_round_robin("generate_explanation", question, answer, context)
        else:
            return await self._try_with_fallback("generate_explanation", question, answer, context)


class MockLLMService(LLMService):
    """模拟LLM服务实现（用于开发测试）"""
    
    async def generate_text(
        self,
        prompt: str,
        max_tokens: Optional[int] = None,
        temperature: Optional[float] = None,
        **kwargs
    ) -> str:
        """模拟文本生成"""
        return f"[模拟生成] 基于提示词: {prompt[:50]}..."
    
    async def analyze_legal_case(
        self,
        case_content: str,
        question: Optional[str] = None
    ) -> Dict[str, Any]:
        """分析法律案例（使用LegalBERT增强）"""
        try:
            from app.ai.legalbert_service import get_legalbert_service
            legalbert = get_legalbert_service()
            bert_analysis = await legalbert.analyze_legal_text(case_content)
            entities = await legalbert.extract_entities(case_content)
            classification = await legalbert.classify_legal_document(case_content)
            
            return {
                "legal_issues": [classification.get("category", "合同纠纷")],
                "key_facts": [e["text"] for e in entities if e["type"] == "行为"],
                "relevant_laws": [e["text"] for e in entities if e["type"] == "法律条文"],
                "entities": entities,
                "classification": classification,
                "analysis": "这是一个典型的合同纠纷案例，涉及违约责任的认定。",
                "recommendation": "建议从合同成立、生效、履行等角度进行分析。",
                "bert_analysis": bert_analysis
            }
        except Exception as e:
            # 如果LegalBERT加载失败，使用模拟数据
            return {
                "legal_issues": ["合同纠纷", "违约责任"],
                "key_facts": ["双方签订合同", "一方未履行"],
                "relevant_laws": ["《合同法》", "《民法典》"],
                "analysis": "这是一个典型的合同纠纷案例，涉及违约责任的认定。",
                "recommendation": "建议从合同成立、生效、履行等角度进行分析。",
                "note": f"LegalBERT未加载，使用模拟分析: {str(e)}"
            }
    
    async def evaluate_answer(
        self,
        question: str,
        user_answer: str,
        correct_answer: str,
        explanation: Optional[str] = None
    ) -> Dict[str, Any]:
        """模拟答案评估"""
        is_correct = user_answer.strip().lower() == correct_answer.strip().lower()
        
        return {
            "is_correct": is_correct,
            "error_type": "knowledge" if not is_correct else None,
            "error_category": "法律条文理解错误" if not is_correct else None,
            "error_details": {
                "mistaken_concept": "管辖规则" if not is_correct else None,
                "correct_concept": "合同纠纷的管辖选择" if not is_correct else None
            },
            "score": 10 if is_correct else 0,
            "feedback": "答案正确" if is_correct else "答案错误，建议复习相关法律条文"
        }
    
    async def generate_explanation(
        self,
        question: str,
        answer: str,
        context: Optional[Dict[str, Any]] = None
    ) -> str:
        """模拟生成解释"""
        return f"[模拟解释] 对于题目: {question[:30]}...\n正确答案是: {answer}\n解释: 根据相关法律规定，该选项是正确的选择。"


# 工厂函数：根据配置返回相应的LLM服务实例
def get_llm_service() -> LLMService:
    """
    获取LLM服务实例
    
    行为：
    1. 如果指定了LLM_PROVIDER，使用指定的服务（单服务模式）
    2. 如果配置了多个服务，创建MultiLLMService（多服务模式，支持自动切换）
    3. 如果都没有配置，返回MockLLMService
    
    多服务模式：
    - 支持故障转移（fallback）：按优先级顺序，如果第一个失败，自动切换到下一个
    - 支持轮询（round_robin）：轮询使用不同的服务
    - 可以通过LLM_PROVIDER_PRIORITY自定义优先级顺序
    
    Returns:
        LLM服务实例
    """
    # 创建服务映射
    service_map = {}
    if settings.OPENAI_API_KEY:
        service_map["openai"] = OpenAIService(api_key=settings.OPENAI_API_KEY, base_url=settings.OPENAI_BASE_URL)
    if settings.TONGYI_API_KEY:
        service_map["tongyi"] = TongyiService(api_key=settings.TONGYI_API_KEY)
    if settings.ZHIPU_API_KEY:
        service_map["zhipu"] = ZhipuService(api_key=settings.ZHIPU_API_KEY, base_url=settings.ZHIPU_BASE_URL)
    # 文心一言和Anthropic暂时未实现
    # if settings.WENXIN_API_KEY:
    #     service_map["wenxin"] = WenxinService(...)
    # if settings.ANTHROPIC_API_KEY:
    #     service_map["anthropic"] = AnthropicService(...)
    
    # 如果指定了提供商，优先使用指定的（单服务模式）
    if settings.LLM_PROVIDER:
        provider = settings.LLM_PROVIDER.lower()
        if provider in service_map:
            print(f"✅ 使用指定的LLM服务: {provider}")
            return service_map[provider]
        else:
            print(f"⚠️ 指定的LLM服务 {provider} 未配置或不可用，将使用多服务模式")
    
    # 多服务模式：根据优先级排序
    if len(service_map) > 1:
        # 确定优先级顺序
        if settings.LLM_PROVIDER_PRIORITY:
            # 使用自定义优先级
            priority_list = [p.strip().lower() for p in settings.LLM_PROVIDER_PRIORITY.split(",")]
            # 只保留已配置的服务
            priority_list = [p for p in priority_list if p in service_map]
            # 添加未在优先级列表中的服务
            for provider in service_map:
                if provider not in priority_list:
                    priority_list.append(provider)
        else:
            # 使用默认优先级：OpenAI > 通义千问 > 智谱AI
            default_priority = ["openai", "tongyi", "zhipu", "wenxin", "anthropic"]
            priority_list = [p for p in default_priority if p in service_map]
            # 添加未在默认优先级中的服务
            for provider in service_map:
                if provider not in priority_list:
                    priority_list.append(provider)
        
        # 按优先级创建服务列表
        services = [service_map[p] for p in priority_list if p in service_map]
        
        if len(services) > 1:
            switch_mode = settings.LLM_SWITCH_MODE or "fallback"
            service_names = [type(s).__name__ for s in services]
            print(f"✅ 启用多模型服务模式 ({switch_mode}): {', '.join(service_names)}")
            return MultiLLMService(services=services, switch_mode=switch_mode)
        elif len(services) == 1:
            # 只有一个服务，直接返回
            return services[0]
    
    # 单服务模式：按默认优先级返回第一个可用的服务
    if settings.OPENAI_API_KEY:
        return OpenAIService(api_key=settings.OPENAI_API_KEY, base_url=settings.OPENAI_BASE_URL)
    if settings.TONGYI_API_KEY:
        return TongyiService(api_key=settings.TONGYI_API_KEY)
    if settings.ZHIPU_API_KEY:
        return ZhipuService(api_key=settings.ZHIPU_API_KEY, base_url=settings.ZHIPU_BASE_URL)
    
    # 默认返回模拟服务
    print("⚠️ 未配置任何LLM服务，使用模拟服务")
    return MockLLMService()

