"""
AI智能体评估引擎 - 双模型架构
架构：Qwen（结构化检查） + HuaTuoGPT（医学评估）
"""
from typing import Dict, Any, List, Optional
import json
import asyncio
import re
from app.schemas.assessment import IssueType, IssueSeverity, DimensionWeights
from app.core.logger import app_logger
from app.core.config import settings
from app.core.prompts.enhanced_medical_prompts import (
    SYSTEM_ROLE, 
    build_enhanced_prompt
)
from app.core.json_to_text import (
    convert_record_to_natural_language,
    build_huatuogpt_prompt,
    extract_scores_from_huatuogpt_response,
    extract_issues_from_huatuogpt_response,
    extract_suggestions_from_huatuogpt_response,
    extract_summary_from_huatuogpt_response
)


class AIEvaluator:
    """AI智能体评估器 - 双模型架构"""
    
    def __init__(self, weights: DimensionWeights = None):
        """
        初始化AI评估器
        
        Args:
            weights: 自定义权重，不提供则使用默认值
        """
        self.weights = weights or DimensionWeights()
        self.use_huatuogpt = settings.USE_HUATUOGPT
        self.use_local_llm = settings.USE_LOCAL_LLM
        self.huatuogpt_wrapper = None
        
        # 优先级：HuaTuoGPT + Qwen > Qwen > 规则引擎
        if self.use_huatuogpt:
            try:
                from app.core.huatuogpt_wrapper import get_huatuogpt
                self.huatuogpt_wrapper = get_huatuogpt()
                app_logger.info("🧬 使用 HuaTuoGPT 中文医疗专业模型")
            except Exception as e:
                app_logger.error(f"❌ HuaTuoGPT 初始化失败: {e}")
                app_logger.warning("⚠️  回退到 Qwen 模型")
                self.use_huatuogpt = False
        
        # Qwen 用于结构化检查
        if self.use_local_llm:
            app_logger.info(f"🤖 使用 Qwen 模型进行结构化检查: {settings.QWEN_MODEL}")
    
    async def evaluate(self, record_data: Dict[str, Any]) -> Dict[str, Any]:
        """
        执行AI评估 - 双模型架构
        
        流程：
        1. Qwen 进行 JSON 结构化检查（规范性）
        2. HuaTuoGPT 进行医学评估（合理性、精准性）
        3. 合并两个评估结果
        
        Args:
            record_data: 病历数据
            
        Returns:
            评估结果字典
        """
        patient_name = record_data.get('patient_name', '未知患者')
        app_logger.info(f"🔍 开始双模型AI评估病历: {patient_name}")
        
        try:
            if self.use_huatuogpt and self.huatuogpt_wrapper:
                # 🎯 双模型评估（Qwen + HuaTuoGPT）
                result = await self._dual_model_evaluate(record_data)
            elif self.use_local_llm:
                # 🤖 单模型评估（仅 Qwen）
                app_logger.info(f"🤖 使用 Qwen 单模型评估: {settings.QWEN_MODEL}")
                prompt = build_enhanced_prompt(record_data)
                response = await self._call_qwen(prompt)
                result = self._parse_llm_response_enhanced(response)
            else:
                # 规则引擎评估
                app_logger.warning("⚠️  未启用 AI 模型，使用规则引擎")
                return await self._fallback_to_rule_engine(record_data, "AI模型未启用")
            
            app_logger.info(f"🎯 AI评估完成！综合得分: {result['overall_score']}分")
            return result
            
        except Exception as e:
            app_logger.error(f"❌ AI评估失败: {e}", exc_info=True)
            
            # 🔄 智能回退：使用本地规则引擎
            app_logger.warning(f"⚠️  AI模型评估失败，自动回退到本地规则引擎")
            return await self._fallback_to_rule_engine(record_data, str(e))
    
    async def _dual_model_evaluate(self, record_data: Dict[str, Any]) -> Dict[str, Any]:
        """
        双模型评估（Qwen + HuaTuoGPT）
        
        Args:
            record_data: 病历数据
            
        Returns:
            合并后的评估结果
        """
        app_logger.info("🎭 开始双模型评估：Qwen（结构化） + HuaTuoGPT（医学评估）")
        
        # ⚡ 步骤 1: Qwen 结构化检查（JSON 格式，规范性）
        app_logger.info("📋 步骤 1/3: Qwen 结构化检查（规范性）")
        qwen_prompt = self._build_qwen_structure_prompt(record_data)
        qwen_response = await self._call_qwen(qwen_prompt)
        qwen_result = self._parse_llm_response_enhanced(qwen_response)
        
        app_logger.info(f"✅ Qwen 结构化检查完成，规范性得分: {qwen_result.get('standardization_score', 0)}分")
        
        # ⚡ 步骤 2: HuaTuoGPT 医学评估（自然语言，合理性、精准性）
        app_logger.info("🧬 步骤 2/3: HuaTuoGPT 医学评估（合理性、精准性）")
        
        # 将 JSON 转换为自然语言
        natural_language_record = convert_record_to_natural_language(record_data)
        app_logger.info(f"✅ JSON → 自然语言转换完成，长度: {len(natural_language_record)} 字符")
        
        # 构建 HuaTuoGPT 提示词
        huatuogpt_prompt = build_huatuogpt_prompt(natural_language_record)
        
        # 调用 HuaTuoGPT
        try:
            huatuogpt_response = await self._call_huatuogpt(huatuogpt_prompt)
            app_logger.info(f"✅ HuaTuoGPT 评估完成，长度: {len(huatuogpt_response)} 字符")
            
            # 解析 HuaTuoGPT 响应
            huatuogpt_result = self._parse_huatuogpt_response(huatuogpt_response)
            app_logger.info(f"✅ HuaTuoGPT 评估得分: {huatuogpt_result.get('overall_score', 0)}分")
            
        except Exception as e:
            app_logger.error(f"❌ HuaTuoGPT 评估失败: {e}")
            # HuaTuoGPT 失败，只使用 Qwen 结果
            app_logger.warning("⚠️  HuaTuoGPT 失败，仅使用 Qwen 结果")
            return qwen_result
        
        # ⚡ 步骤 3: 合并两个评估结果
        app_logger.info("🔀 步骤 3/3: 合并 Qwen 和 HuaTuoGPT 评估结果")
        merged_result = self._merge_evaluation_results(qwen_result, huatuogpt_result)
        
        return merged_result
    
    def _build_qwen_structure_prompt(self, record_data: Dict[str, Any]) -> str:
        """
        构建 Qwen 结构化检查提示词
        
        专注于：
        - 完整性（必填字段）
        - 规范性（格式、术语）
        
        Args:
            record_data: 病历数据
            
        Returns:
            Qwen 提示词
        """
        # 使用原有的增强提示词，但强调结构化检查
        prompt = build_enhanced_prompt(record_data)
        
        # 添加 Qwen 专用指引
        qwen_instruction = """

特别说明：
本次评估重点关注 **完整性** 和 **规范性**，请严格检查：
1. 完整性（30分）：必填字段是否齐全
2. 规范性（25分）：格式、术语是否标准

请按照 JSON 格式输出评估结果。"""
        
        return prompt + qwen_instruction
    
    async def _call_huatuogpt(self, prompt: str) -> str:
        """
        调用 HuaTuoGPT 模型
        
        Args:
            prompt: 提示词（中文自然语言）
            
        Returns:
            HuaTuoGPT 响应文本
        """
        try:
            app_logger.info("🧬 开始调用 HuaTuoGPT 生成医学评估...")
            
            # 调用 HuaTuoGPT 生成
            response = await self.huatuogpt_wrapper.generate(
                prompt=prompt,
                max_length=settings.HUATUOGPT_MAX_LENGTH,
                temperature=settings.HUATUOGPT_TEMPERATURE,
                top_p=settings.HUATUOGPT_TOP_P,
                top_k=settings.HUATUOGPT_TOP_K
            )
            
            app_logger.info(f"✅ HuaTuoGPT 生成完成，长度: {len(response)} 字符")
            return response
            
        except Exception as e:
            app_logger.error(f"❌ HuaTuoGPT 调用失败: {e}")
            
            # 自动回退到 Qwen
            if settings.ENABLE_MODEL_FALLBACK and self.use_local_llm:
                app_logger.warning("⚠️  自动回退到 Qwen 模型")
                # 重新构建 Qwen 提示词（包含医学评估部分）
                full_prompt = build_enhanced_prompt({'raw_data': {}})
                return await self._call_qwen(full_prompt)
            else:
                raise
    
    async def _call_qwen(self, prompt: str, model_name: Optional[str] = None) -> str:
        """
        调用 Qwen 模型（Ollama）
        
        Args:
            prompt: 提示词
            model_name: 指定模型名（用于回退）
            
        Returns:
            Qwen 响应文本
        """
        import aiohttp
        
        current_model = model_name or settings.QWEN_MODEL
        is_fallback = model_name is not None
        
        try:
            # 设置超时
            if is_fallback and hasattr(settings, 'FALLBACK_LLM_TIMEOUT'):
                timeout = aiohttp.ClientTimeout(total=settings.FALLBACK_LLM_TIMEOUT)
            else:
                timeout = aiohttp.ClientTimeout(total=settings.QWEN_TIMEOUT)
            
            async with aiohttp.ClientSession(timeout=timeout) as session:
                url = f"{settings.LOCAL_LLM_BASE_URL}/api/generate"
                
                # 配置参数
                if is_fallback and hasattr(settings, 'FALLBACK_LLM_NUM_CTX'):
                    num_ctx = settings.FALLBACK_LLM_NUM_CTX
                else:
                    num_ctx = settings.QWEN_NUM_CTX
                
                data = {
                    "model": current_model,
                    "prompt": prompt,
                    "stream": False,
                    "system": SYSTEM_ROLE,
                    "options": {
                        "temperature": settings.QWEN_TEMPERATURE,
                        "num_ctx": num_ctx,
                        "top_p": 0.9,
                        "top_k": 40,
                        "repeat_penalty": 1.1,
                        "num_predict": 2048,
                    }
                }
                
                app_logger.info(
                    f"🚀 调用 Qwen: {settings.LOCAL_LLM_BASE_URL}\n"
                    f"   模型: {current_model}\n"
                    f"   温度: {settings.QWEN_TEMPERATURE}\n"
                    f"   上下文: {num_ctx}"
                )
                
                async with session.post(url, json=data) as response:
                    if response.status != 200:
                        error_text = await response.text()
                        raise Exception(f"Qwen 返回错误: {response.status} - {error_text}")
                    
                    result = await response.json()
                    response_text = result.get('response', '')
                    
                    # 记录统计
                    if 'total_duration' in result:
                        duration_sec = result['total_duration'] / 1e9
                        app_logger.info(f"⏱️  推理耗时: {duration_sec:.2f}秒")
                    
                    app_logger.info(f"✅ Qwen 响应成功，长度: {len(response_text)} 字符")
                    return response_text
                    
        except asyncio.TimeoutError:
            raise Exception(f"Qwen 响应超时（{settings.QWEN_TIMEOUT}秒）")
        except aiohttp.ClientConnectorError:
            raise Exception(f"无法连接到 Ollama 服务 ({settings.LOCAL_LLM_BASE_URL})")
        except Exception as e:
            app_logger.error(f"❌ 调用 Qwen({current_model}) 失败: {e}")
            
            # 🔄 二级回退：qwen3:8b → qwen2:7b
            if (settings.ENABLE_MODEL_FALLBACK 
                and not is_fallback
                and current_model == settings.QWEN_MODEL
                and hasattr(settings, 'FALLBACK_LLM_MODEL')
                and settings.FALLBACK_LLM_MODEL != settings.QWEN_MODEL):
                
                app_logger.warning(f"⚠️  二级回退：{current_model} → {settings.FALLBACK_LLM_MODEL}")
                return await self._call_qwen(prompt, model_name=settings.FALLBACK_LLM_MODEL)
            else:
                raise
    
    def _parse_huatuogpt_response(self, response: str) -> Dict[str, Any]:
        """
        解析 HuaTuoGPT 响应
        
        Args:
            response: HuaTuoGPT 的自然语言响应
            
        Returns:
            结构化的评估结果
        """
        # 提取分数
        scores = extract_scores_from_huatuogpt_response(response)
        
        # 提取问题
        issues_text = extract_issues_from_huatuogpt_response(response)
        
        # 转换为标准 issues 格式
        issues = []
        for i, issue_desc in enumerate(issues_text, 1):
            issues.append({
                'issue_type': 'consistency',  # HuaTuoGPT 主要关注一致性和准确性
                'severity': 'major',
                'field_name': f'医学评估问题 {i}',
                'issue_description': issue_desc,
                'suggestion': '',
                'score_deduction': 0
            })
        
        # 提取建议
        suggestions = extract_suggestions_from_huatuogpt_response(response)
        
        # 提取总结
        summary = extract_summary_from_huatuogpt_response(response)
        
        # 映射 HuaTuoGPT 分数到标准维度
        # HuaTuoGPT: 医学合理性、临床精准性、逻辑一致性、医疗安全性
        # 标准维度: 完整性、规范性、一致性、准确性
        result = {
            'overall_score': scores['overall_score'],
            'completeness_score': 0,  # HuaTuoGPT 不评估完整性（由 Qwen 评估）
            'standardization_score': 0,  # HuaTuoGPT 不评估规范性（由 Qwen 评估）
            'consistency_score': scores['logic_consistency_score'],  # 逻辑一致性
            'accuracy_score': scores['clinical_accuracy_score'],  # 临床精准性
            'medical_rationality_score': scores['medical_rationality_score'],  # 医学合理性
            'safety_score': scores['safety_score'],  # 医疗安全性
            'issues': issues,
            'summary': summary,
            'suggestions': suggestions
        }
        
        return result
    
    def _merge_evaluation_results(
        self, 
        qwen_result: Dict[str, Any], 
        huatuogpt_result: Dict[str, Any]
    ) -> Dict[str, Any]:
        """
        合并 Qwen 和 HuaTuoGPT 的评估结果
        
        策略：
        - 完整性：100% Qwen
        - 规范性：100% Qwen
        - 一致性：100% HuaTuoGPT
        - 准确性：100% HuaTuoGPT
        
        Args:
            qwen_result: Qwen 评估结果
            huatuogpt_result: HuaTuoGPT 评估结果
            
        Returns:
            合并后的评估结果
        """
        # 计算各维度得分
        completeness_score = qwen_result.get('completeness_score', 0)  # 30分满分 → 100分制
        standardization_score = qwen_result.get('standardization_score', 0)  # 25分满分 → 100分制
        consistency_score = huatuogpt_result.get('consistency_score', 0)  # 25分满分 → 100分制
        accuracy_score = huatuogpt_result.get('accuracy_score', 0)  # 25分满分 → 100分制
        
        # 计算综合得分（根据权重）
        overall_score = int(
            completeness_score +
            standardization_score +
            consistency_score +
            accuracy_score
        )
        
        # 合并问题列表
        all_issues = qwen_result.get('issues', []) + huatuogpt_result.get('issues', [])
        
        # 合并建议
        all_suggestions = list(set(
            qwen_result.get('suggestions', []) + 
            huatuogpt_result.get('suggestions', [])
        ))[:5]  # 取前5条
        
        # 生成总结
        summary = f"【双模型评估】Qwen结构化检查 + HuaTuoGPT医学评估。{huatuogpt_result.get('summary', '')}"
        
        merged_result = {
            'overall_score': overall_score,
            'completeness_score': completeness_score,
            'standardization_score': standardization_score,
            'consistency_score': consistency_score,
            'accuracy_score': accuracy_score,
            'issues': all_issues,
            'summary': summary,
            'suggestions': all_suggestions
        }
        
        app_logger.info(
            f"🔀 评估结果合并完成：\n"
            f"   完整性: {completeness_score}分 (Qwen)\n"
            f"   规范性: {standardization_score}分 (Qwen)\n"
            f"   一致性: {consistency_score}分 (HuaTuoGPT)\n"
            f"   准确性: {accuracy_score}分 (HuaTuoGPT)\n"
            f"   综合得分: {overall_score}分"
        )
        
        return merged_result
    
    def _parse_llm_response_enhanced(self, response: str) -> Dict[str, Any]:
        """
        增强版LLM响应解析 - 支持多种格式和智能修复
        
        Args:
            response: LLM返回的文本
            
        Returns:
            解析后的评估结果
        """
        try:
            # 1. 尝试直接解析JSON
            result = self._try_parse_json(response)
            
            if result is None:
                # 2. 尝试提取JSON（可能包含在markdown代码块中）
                result = self._extract_json_from_text(response)
            
            if result is None:
                raise ValueError("无法从响应中提取有效的JSON")
            
            # 3. 验证和修复必要字段
            result = self._validate_and_fix_result(result)
            
            return result
            
        except Exception as e:
            app_logger.error(f"❌ 解析LLM响应失败: {e}")
            app_logger.error(f"原始响应前500字符: {response[:500]}")
            
            # 返回降级结果
            return self._get_fallback_result(str(e))
    
    def _try_parse_json(self, text: str) -> Optional[Dict[str, Any]]:
        """尝试直接解析JSON"""
        try:
            return json.loads(text)
        except json.JSONDecodeError:
            return None
    
    def _extract_json_from_text(self, text: str) -> Optional[Dict[str, Any]]:
        """从文本中提取JSON（处理markdown代码块等）"""
        try:
            # 尝试匹配 ```json ... ``` 或 ``` ... ```
            json_pattern = r'```(?:json)?\s*(\{.*?\})\s*```'
            match = re.search(json_pattern, text, re.DOTALL)
            if match:
                return json.loads(match.group(1))
            
            # 尝试匹配第一个完整的JSON对象 {...}
            json_obj_pattern = r'\{[^{}]*(?:\{[^{}]*\}[^{}]*)*\}'
            match = re.search(json_obj_pattern, text, re.DOTALL)
            if match:
                return json.loads(match.group(0))
            
            return None
        except Exception as e:
            app_logger.warning(f"从文本提取JSON失败: {e}")
            return None
    
    def _validate_and_fix_result(self, result: Dict[str, Any]) -> Dict[str, Any]:
        """验证并修复评估结果"""
        # 必要字段列表
        required_fields = {
            'completeness_score': 0,
            'standardization_score': 0,
            'consistency_score': 0,
            'accuracy_score': 0,
            'overall_score': 0,
            'issues': [],
            'summary': '',
            'suggestions': []
        }
        
        # 补全缺失字段
        for field, default_value in required_fields.items():
            if field not in result:
                app_logger.warning(f"⚠️  缺少字段 {field}，使用默认值: {default_value}")
                result[field] = default_value
        
        # 修正分数范围（各维度独立满分）
        score_ranges = {
            'completeness_score': (0, 30),
            'standardization_score': (0, 25),
            'consistency_score': (0, 25),
            'accuracy_score': (0, 20),
            'overall_score': (0, 100)
        }
        
        for score_field, (min_val, max_val) in score_ranges.items():
            score = result[score_field]
            if not isinstance(score, (int, float)):
                app_logger.warning(f"⚠️  {score_field} 类型错误: {type(score)}, 重置为0")
                result[score_field] = 0
            elif score < min_val or score > max_val:
                app_logger.warning(f"⚠️  {score_field} 超出范围: {score}, 修正为{min_val}-{max_val}")
                result[score_field] = max(min_val, min(max_val, int(score)))
        
        # 验证overall_score是否与各维度一致
        calculated_overall = (
            result['completeness_score'] +
            result['standardization_score'] +
            result['consistency_score'] +
            result['accuracy_score']
        )
        if abs(calculated_overall - result['overall_score']) > 2:
            app_logger.warning(
                f"⚠️  综合得分不一致: AI输出{result['overall_score']}，"
                f"计算值{calculated_overall}，使用计算值"
            )
            result['overall_score'] = calculated_overall
        
        # 确保issues是列表
        if not isinstance(result['issues'], list):
            app_logger.warning(f"⚠️  issues字段类型错误，重置为空列表")
            result['issues'] = []
        
        # 确保suggestions是列表
        if not isinstance(result['suggestions'], list):
            if isinstance(result['suggestions'], str):
                result['suggestions'] = [result['suggestions']]
            else:
                result['suggestions'] = []
        
        return result
    
    def _get_fallback_result(self, error_msg: str) -> Dict[str, Any]:
        """获取降级结果（仅用于解析失败时的临时降级）"""
        return {
            'overall_score': 0,
            'completeness_score': 0,
            'standardization_score': 0,
            'consistency_score': 0,
            'accuracy_score': 0,
            'issues': [{
                'issue_type': 'accuracy',
                'severity': 'critical',
                'field_name': 'AI评估系统',
                'issue_description': f'AI评估响应解析失败: {error_msg}',
                'suggestion': '请检查AI模型配置或联系管理员',
                'score_deduction': 0
            }],
            'summary': f'AI评估响应解析失败: {error_msg[:100]}',
            'suggestions': [
                '请检查AI模型是否正常工作',
                '请尝试重新评估',
                '如问题持续，请联系管理员'
            ]
        }
    
    async def _fallback_to_rule_engine(self, record_data: Dict[str, Any], error_msg: str) -> Dict[str, Any]:
        """
        回退到本地规则引擎评估
        
        Args:
            record_data: 病历数据
            error_msg: 错误信息
            
        Returns:
            规则引擎评估结果
        """
        try:
            from app.core.evaluator import QualityEvaluator
            
            app_logger.info("🔄 启动本地规则引擎评估...")
            rule_evaluator = QualityEvaluator(self.weights)
            result = rule_evaluator.evaluate(record_data)
            
            # 在summary中标注使用了回退机制
            original_summary = result.get('summary', '')
            result['summary'] = (
                f"[规则引擎评估] {original_summary} "
                f"(AI模型暂时不可用，已自动使用本地规则引擎确保评估准确性)"
            )
            
            # 添加提示信息
            result['suggestions'].insert(0, f"提示：AI模型评估失败({error_msg[:50]}...)，已使用规则引擎评估")
            
            app_logger.info(f"✅ 规则引擎评估完成，得分: {result['overall_score']}分")
            return result
            
        except Exception as fallback_error:
            app_logger.error(f"❌ 规则引擎回退也失败: {fallback_error}", exc_info=True)
            
            # 最终降级方案：返回基础结果
            return self._get_fallback_result(f"AI模型失败 + 规则引擎失败")


class HybridEvaluator:
    """混合评估器（规则引擎 + AI智能体）"""
    
    def __init__(self, weights: DimensionWeights = None):
        """初始化混合评估器"""
        from app.core.evaluator import QualityEvaluator
        
        self.rule_evaluator = QualityEvaluator(weights)
        self.ai_evaluator = AIEvaluator(weights)
        self.weights = weights or DimensionWeights()
    
    async def evaluate(self, record_data: Dict[str, Any]) -> Dict[str, Any]:
        """
        执行混合评估
        
        策略：规则引擎快速筛查 + AI深度分析
        
        Args:
            record_data: 病历数据
            
        Returns:
            评估结果字典
        """
        app_logger.info(f"开始混合评估病历: {record_data.get('patient_name')}")
        
        # 1. 规则引擎评估（快速，基础检查）
        rule_result = self.rule_evaluator.evaluate(record_data)
        
        # 2. AI评估（深度，语义分析）
        ai_result = await self.ai_evaluator.evaluate(record_data)
        
        # 3. 合并结果（规则引擎40% + AI60%）
        result = {
            'overall_score': int(rule_result['overall_score'] * 0.4 + ai_result['overall_score'] * 0.6),
            'completeness_score': int(rule_result['completeness_score'] * 0.4 + ai_result['completeness_score'] * 0.6),
            'standardization_score': int(rule_result['standardization_score'] * 0.4 + ai_result['standardization_score'] * 0.6),
            'consistency_score': int(rule_result['consistency_score'] * 0.4 + ai_result['consistency_score'] * 0.6),
            'accuracy_score': int(rule_result['accuracy_score'] * 0.4 + ai_result['accuracy_score'] * 0.6),
            'issues': rule_result['issues'] + ai_result['issues'],
            'summary': f"规则评估: {rule_result['overall_score']}分 | AI评估: {ai_result['overall_score']}分 | 综合: {int(rule_result['overall_score'] * 0.4 + ai_result['overall_score'] * 0.6)}分。{ai_result['summary']}",
            'suggestions': list(set(rule_result['suggestions'] + ai_result['suggestions']))[:5]
        }
        
        app_logger.info(f"混合评估完成，综合得分: {result['overall_score']}")
        return result
