"""
NLU (Natural Language Understanding) 自然语言理解服务
使用LLM进行任务判断，将ASR识别的文本分类为简单/隐私或复杂/非隐私任务
"""

import asyncio
import json
import time
from typing import Dict, Any, Optional
from config.settings import config
from services.llm import LLMService
from utils.logger import (
    emobot_logger,
    log_function_call,
    log_function_result,
    log_function_error,
)
from utils.performance_logger import (
    log_api_call,
    track_step
)

logger = emobot_logger.get_logger()


class NLUService:
    """NLU自然语言理解服务（优化版）"""
    
    def __init__(self):
        self.llm_service = LLMService()
        # 添加简单缓存机制
        self._cache = {}
        # 快速关键词匹配规则
        self._complex_keywords = [
            "天气", "查询", "工具", "心理学", "认知", "焦虑", "抑郁", "正念", "冥想",
            "治疗", "症状", "方法", "技术", "分析", "搜索", "网络", "外部"
        ]
        self._simple_keywords = [
            "你好", "谢谢", "再见", "问候", "闲聊", "聊天", "个人", "隐私"
        ]
        
    async def analyze_task(self, text: str, task_id: str = None, robot_id: str = None) -> Dict[str, Any]:
        """
        分析文本内容，判断任务类型
        
        Args:
            text: ASR识别出的文本内容
            task_id: 任务标识符
            robot_id: 机器人标识符
            
        Returns:
            Dict包含任务分析结果
        """
        start_time = time.time()
        log_function_call("NLUService.analyze_task", {
            "text": text[:100] + "..." if len(text) > 100 else text,
            "task_id": task_id,
            "robot_id": robot_id
        })
        
        try:
            if not text or not text.strip():
                raise ValueError("输入文本不能为空")
            
            logger.info(f"开始NLU任务分析: {text[:50]}...")
            
            # 1. 检查缓存
            cache_key = text.strip().lower()
            if cache_key in self._cache:
                cached_result = self._cache[cache_key].copy()
                cached_result["from_cache"] = True
                duration = (time.time() - start_time) * 1000
                logger.info(f"NLU缓存命中，耗时: {duration:.2f}ms")
                return cached_result
            
            # 2. 快速关键词预判断（仅用于缓存）
            quick_result = self._quick_analysis(text)
            if quick_result:
                # 缓存结果
                self._cache[cache_key] = quick_result.copy()
                duration = (time.time() - start_time) * 1000
                logger.info(f"NLU快速分析完成，耗时: {duration:.2f}ms")
                return quick_result
            
            # 3. 如果快速判断失败，继续使用LLM模型判断
            
            # 记录NLU API调用
            log_api_call(
                api_name="NLU_TASK_ANALYSIS",
                api_type="LLM",
                request_data={
                    "text": text,
                    "text_length": len(text),
                    "task_id": task_id,
                    "robot_id": robot_id
                }
            )
            
            # 生成任务判断提示词
            prompt = self._build_task_analysis_prompt(text)
            
            # 调用LLM进行分析
            messages = [
                {"role": "system", "content": "你是任务分析专家，请严格按照JSON格式返回分析结果。"},
                {"role": "user", "content": prompt},
            ]
            
            # 调用LLM进行分析 (NLU使用qwen-turbo模型)
            try:
                result = self.llm_service.chat_completion(messages, model="qwen-turbo")
            except Exception as e:
                logger.error(f"LLM调用失败: {e}")
                raise
            
            if not result:
                raise Exception("LLM调用失败")
            
            # 解析LLM响应
            analysis_result = self._parse_llm_response(result, text, task_id, robot_id)
            
            duration = (time.time() - start_time) * 1000
            log_function_result("NLUService.analyze_task", analysis_result, duration)
            logger.info(f"NLU任务分析完成，耗时: {duration:.2f}ms")
            
            return analysis_result
            
        except Exception as e:
            duration = (time.time() - start_time) * 1000
            log_function_error("NLUService.analyze_task", e, {
                "text": text[:100] if text else "",
                "task_id": task_id,
                "robot_id": robot_id
            })
            logger.error(f"NLU任务分析失败: {e}")
            
            # 返回错误结果
            return {
                "task_id": task_id,
                "text": text,
                "任务类型": "未知",
                "confidence": 0.0,
                "timestamp": time.time(),
                "error": str(e),
                "success": False,
                "robot_id": robot_id
            }
    
    def _build_task_analysis_prompt(self, text: str) -> str:
        """构建任务分析提示词（优化版）"""
        
        # 使用极简化的prompt，减少LLM处理时间
        prompt = f"""分析文本类型: "{text}"

规则:
- 天气/工具/心理学/技术问题 → 复杂/非隐私
- 问候/闲聊/隐私 → 简单/隐私

返回JSON: {{"任务类型":"复杂/非隐私","confidence":0.9,"reasoning":"包含工具调用"}}"""
        
        return prompt
    
    def _quick_analysis(self, text: str) -> Optional[Dict[str, Any]]:
        """快速关键词分析，仅在非常明确的情况下返回结果"""
        text_lower = text.lower()
        
        # 只对非常明确的简单问候进行快速判断
        simple_greetings = ["你好", "谢谢", "再见", "早上好", "晚上好", "晚安"]
        for greeting in simple_greetings:
            if greeting in text_lower and len(text.strip()) < 20:
                return {
                    "task_id": None,
                    "text": text,
                    "任务类型": "简单/隐私",
                    "confidence": 0.95,
                    "reasoning": f"明确简单问候: {greeting}",
                    "timestamp": time.time(),
                    "success": True,
                    "robot_id": None,
                    "analysis_method": "quick_keywords"
                }
        
        # 对于其他情况，都使用LLM模型判断
        return None
    
    def _parse_llm_response(self, llm_result: str, original_text: str, task_id: str = None, robot_id: str = None) -> Dict[str, Any]:
        """解析LLM响应结果"""
        
        try:
            # 尝试直接解析JSON
            if llm_result.strip().startswith('{'):
                result_data = json.loads(llm_result.strip())
            else:
                # 尝试从文本中提取JSON
                import re
                json_match = re.search(r'\{.*\}', llm_result, re.DOTALL)
                if json_match:
                    result_data = json.loads(json_match.group())
                else:
                    raise ValueError("无法从LLM响应中提取JSON")
            
            # 验证和清理数据
            task_type = result_data.get("任务类型", "未知")
            confidence = float(result_data.get("confidence", 0.5))
            reasoning = result_data.get("reasoning", "")
            
            # 标准化任务类型
            if "复杂" in task_type or "非隐私" in task_type:
                normalized_task_type = "复杂/非隐私"
            elif "简单" in task_type or "隐私" in task_type:
                normalized_task_type = "简单/隐私"
            else:
                normalized_task_type = "未知"
            
            return {
                "task_id": task_id,
                "text": original_text,
                "任务类型": normalized_task_type,
                "confidence": confidence,
                "reasoning": reasoning,
                "timestamp": time.time(),
                "success": True,
                "robot_id": robot_id,
                "raw_llm_response": llm_result
            }
            
        except Exception as e:
            logger.error(f"解析LLM响应失败: {e}, 原始响应: {llm_result}")
            raise e
    
    async def analyze_intent(self, text: str) -> Dict[str, Any]:
        """
        分析文本意图（analyze_task的别名方法，用于兼容性）
        
        Args:
            text: 待分析的文本
            
        Returns:
            Dict包含意图分析结果
        """
        return await self.analyze_task(text)

