#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
RAG性能优化服务（统一版）
包含：普通响应、流式响应、缓存支持
"""
import json
import time
import asyncio
from typing import Dict, List, Optional, AsyncGenerator, Tuple
from services.llm import LLMService
# from cache.redis import CacheService  # 可选缓存支持

class RAGOptimizer:
    """RAG性能优化器（统一版）"""
    
    def __init__(self, enable_cache: bool = False):
        self.llm_service = LLMService()
        self.enable_cache = enable_cache
        
        # 可选缓存服务
        if enable_cache:
            try:
                from cache.redis import CacheService
                self.cache_service = CacheService()
            except ImportError:
                self.cache_service = None
                self.enable_cache = False
        else:
            self.cache_service = None
        
        # 常见问题预设答案
        self.preset_answers = {
            "什么是认知行为疗法": {
                "short": "认知行为疗法(CBT)是一种心理治疗方法，通过改变负面思维模式来改善情绪和行为。",
                "full": "认知行为疗法（Cognitive Behavioral Therapy，简称CBT）是一种广泛应用于心理治疗领域的、以问题为导向、结构化的心理治疗方法。它基于心理学中'认知'与'行为'之间的相互作用，旨在通过改变个体的思维模式和行为反应来改善情绪和心理状态。"
            },
            "如何缓解焦虑情绪": {
                "short": "缓解焦虑的方法包括深呼吸、正念冥想、规律运动、充足睡眠和寻求专业帮助。",
                "full": "缓解焦虑情绪需要综合方法：1)深呼吸练习 2)正念冥想 3)规律运动 4)充足睡眠 5)限制咖啡因 6)寻求专业帮助"
            },
            "抑郁症的症状有哪些": {
                "short": "抑郁症主要症状包括持续低落情绪、兴趣丧失、疲劳、睡眠问题、食欲变化、注意力不集中等。",
                "full": "抑郁症的主要症状包括：1)持续的低落情绪 2)对日常活动失去兴趣 3)疲劳和精力不足 4)睡眠问题 5)食欲变化 6)注意力不集中 7)自我价值感低 8)自杀念头"
            },
            "正念冥想的方法": {
                "short": "正念冥想包括专注呼吸、身体扫描、观察思绪等练习，建议从5-10分钟开始。",
                "full": "正念冥想的基本方法：1)专注呼吸练习 2)身体扫描 3)观察思绪 4)正念行走 5)慈心冥想。建议从5-10分钟开始，逐渐增加时间。"
            }
        }
        
        # 快速响应模板
        self.quick_templates = {
            "心理学知识": "作为心理学专家，{question}的简要回答是：{answer}。如需详细了解，请告诉我。",
            "治疗方法": "关于{question}，建议的方法是：{answer}。需要具体指导可以继续询问。",
            "疾病症状": "关于{question}，主要症状包括：{answer}。如有疑虑请咨询专业医生。"
        }
    
    # ==================== 普通响应方法 ====================
    
    async def get_optimized_rag_response(self, query: str, user_id: str = None) -> Dict:
        """
        获取优化的RAG响应（普通版）
        策略：预设答案 -> 快速模板 -> 简化LLM
        """
        start_time = time.time()
        response_data = {
            "response": "",
            "source": "LLM",
            "optimized": False,
            "duration_ms": 0.0,
            "response_length": 0
        }

        # 1. 检查预设答案
        for key, value in self.preset_answers.items():
            if key in query:
                response_data["response"] = value["short"]
                response_data["source"] = "preset"
                response_data["optimized"] = True
                response_data["duration_ms"] = (time.time() - start_time) * 1000
                response_data["response_length"] = len(response_data["response"])
                return response_data

        # 2. 检查快速模板
        if "正念冥想" in query and "什么" in query:
            response_data["response"] = self.quick_templates["心理学知识"].format(
                question="什么是正念冥想",
                answer="正念冥想的基本方法：1)专注呼吸 2)身体扫描 3)观察思绪。建议从5-10分钟开始，找一个安静的地方，专注于当下。"
            )
            response_data["source"] = "quick_template"
            response_data["optimized"] = True
        elif "抑郁症" in query and "治疗" in query:
            response_data["response"] = self.quick_templates["治疗方法"].format(
                question="抑郁症的治疗",
                answer="抑郁症的主要症状包括持续低落情绪、兴趣丧失、疲劳、睡眠问题等。如果症状持续2周以上，建议寻求专业帮助。"
            )
            response_data["source"] = "quick_template"
            response_data["optimized"] = True
        elif "焦虑" in query and "缓解" in query:
            response_data["response"] = self.quick_templates["治疗方法"].format(
                question="缓解焦虑情绪",
                answer="缓解焦虑的方法包括深呼吸、正念冥想、规律运动、充足睡眠和寻求专业帮助。"
            )
            response_data["source"] = "quick_template"
            response_data["optimized"] = True

        if response_data["optimized"]:
            response_data["duration_ms"] = (time.time() - start_time) * 1000
            response_data["response_length"] = len(response_data["response"])
            return response_data

        # 3. 简化LLM调用 (如果以上都不匹配)
        simplified_prompt = f"作为心理学专家，请用简洁专业的语言回答以下问题：{query}"
        llm_response = await self.llm_service.chat_completion(
            messages=[{"role": "user", "content": simplified_prompt}],
            model="qwen-turbo"
        )
        response_content = llm_response.choices[0].message.content if llm_response.choices else "未能获取LLM响应。"
        response_data["response"] = response_content
        response_data["source"] = "LLM_simplified"
        response_data["optimized"] = False
        response_data["duration_ms"] = (time.time() - start_time) * 1000
        response_data["response_length"] = len(response_data["response"])
        return response_data

    async def get_full_rag_response(self, query: str) -> str:
        """
        获取完整的RAG响应（模拟原始的、未优化的LLM调用）
        """
        prompt = f"""作为心理学专家，请回答以下问题。请提供专业、准确、有帮助的信息。

问题: {query}

请从心理学专业角度回答，包括：
1. 核心概念解释
2. 相关理论背景
3. 实际应用方法
4. 注意事项

请用专业但易懂的语言回答。"""
        llm_response = await self.llm_service.chat_completion(
            messages=[{"role": "user", "content": prompt}],
            model="qwen-turbo"
        )
        return llm_response.choices[0].message.content if llm_response.choices else "未能获取LLM响应。"
    
    # ==================== 流式响应方法 ====================
    
    async def get_streaming_rag_response(self, query: str, user_id: str = None) -> AsyncGenerator[Dict, None]:
        """
        获取流式RAG响应
        策略：预设答案 -> 快速模板 -> 流式LLM
        """
        start_time = time.time()
        
        # 1. 检查预设答案
        preset_result = self._check_preset_answers(query)
        if preset_result:
            # 模拟流式输出预设答案
            response_text = preset_result["response"]
            words = response_text.split()
            
            for i, word in enumerate(words):
                await asyncio.sleep(0.05)  # 模拟打字效果
                yield {
                    "chunk": word + " ",
                    "source": preset_result["source"],
                    "is_final": i == len(words) - 1,
                    "duration": (time.time() - start_time) * 1000,
                    "optimized": True
                }
            return
        
        # 2. 快速模板响应
        quick_result = self._get_quick_template_response(query)
        if quick_result:
            # 模拟流式输出快速模板
            response_text = quick_result["response"]
            words = response_text.split()
            
            for i, word in enumerate(words):
                await asyncio.sleep(0.03)  # 更快的打字效果
                yield {
                    "chunk": word + " ",
                    "source": quick_result["source"],
                    "is_final": i == len(words) - 1,
                    "duration": (time.time() - start_time) * 1000,
                    "optimized": True
                }
            return
        
        # 3. 流式LLM响应
        async for chunk in self._get_streaming_llm_response(query, start_time):
            yield chunk

    async def get_full_streaming_response(self, query: str, user_id: str = None) -> AsyncGenerator[Dict, None]:
        """获取完整详细流式响应"""
        start_time = time.time()
        
        # 检查是否有预设的完整答案
        query_lower = query.lower().strip()
        for key, answer in self.preset_answers.items():
            if key in query_lower or any(word in query_lower for word in key.split()):
                # 流式输出完整答案
                response_text = answer["full"]
                words = response_text.split()
                
                for i, word in enumerate(words):
                    await asyncio.sleep(0.03)
                    yield {
                        "chunk": word + " ",
                        "source": "preset_full",
                        "is_final": i == len(words) - 1,
                        "duration": (time.time() - start_time) * 1000,
                        "optimized": True
                    }
                return
        
        # 使用完整prompt获取详细回答
        full_prompt = f"""作为心理学专家，请详细回答：{query}

请提供：
1. 核心概念解释
2. 相关理论背景  
3. 实际应用方法
4. 注意事项

请用专业但易懂的语言回答。"""
        
        try:
            messages = [{"role": "user", "content": full_prompt}]
            
            async for chunk in self.llm_service.chat_completion_streaming(messages):
                yield {
                    "chunk": chunk,
                    "source": "full_streaming_llm",
                    "is_final": False,
                    "duration": (time.time() - start_time) * 1000,
                    "optimized": False
                }
            
            yield {
                "chunk": "",
                "source": "full_streaming_llm",
                "is_final": True,
                "duration": (time.time() - start_time) * 1000,
                "optimized": False
            }
            
        except Exception as e:
            yield {
                "chunk": f"抱歉，无法获取详细回答。错误：{str(e)}",
                "source": "error",
                "is_final": True,
                "duration": (time.time() - start_time) * 1000,
                "optimized": False,
                "error": str(e)
            }
    
    # ==================== 辅助方法 ====================
    
    def _check_preset_answers(self, query: str) -> Optional[Dict]:
        """检查预设答案"""
        query_lower = query.lower().strip()
        
        for key, answer in self.preset_answers.items():
            if key in query_lower or any(word in query_lower for word in key.split()):
                return {
                    "response": answer["short"],
                    "source": "preset"
                }
        
        return None
    
    def _get_quick_template_response(self, query: str) -> Optional[Dict]:
        """获取快速模板响应"""
        if any(word in query for word in ["认知行为", "CBT", "行为疗法"]):
            return {
                "response": "认知行为疗法(CBT)是一种通过改变负面思维模式来改善情绪的心理治疗方法。它结合认知重构和行为改变技术，帮助患者识别和修正不合理的思维模式。",
                "source": "quick_template"
            }
        
        if any(word in query for word in ["焦虑", "紧张", "担心"]):
            return {
                "response": "缓解焦虑的方法包括：1)深呼吸练习 2)正念冥想 3)规律运动 4)充足睡眠 5)限制咖啡因摄入。建议从简单的深呼吸开始练习。",
                "source": "quick_template"
            }
        
        if any(word in query for word in ["抑郁", "情绪低落", "心情不好"]):
            return {
                "response": "抑郁症的主要症状包括持续低落情绪、兴趣丧失、疲劳、睡眠问题等。如果症状持续2周以上，建议寻求专业帮助。",
                "source": "quick_template"
            }
        
        if any(word in query for word in ["正念", "冥想", "静心"]):
            return {
                "response": "正念冥想的基本方法：1)专注呼吸 2)身体扫描 3)观察思绪。建议从5-10分钟开始，找一个安静的地方，专注于当下。",
                "source": "quick_template"
            }
        
        return None
    
    async def _get_streaming_llm_response(self, query: str, start_time: float) -> AsyncGenerator[Dict, None]:
        """获取流式LLM响应"""
        # 使用简化的prompt
        optimized_prompt = f"""请简洁回答：{query}

要求：
1. 回答要专业准确
2. 控制在200字以内
3. 重点突出关键信息
4. 语言通俗易懂

请直接给出答案，不要多余的解释。"""
        
        try:
            messages = [{"role": "user", "content": optimized_prompt}]
            
            # 使用流式LLM调用
            async for chunk in self.llm_service.chat_completion_streaming(messages):
                yield {
                    "chunk": chunk,
                    "source": "streaming_llm",
                    "is_final": False,
                    "duration": (time.time() - start_time) * 1000,
                    "optimized": True
                }
            
            # 发送结束标记
            yield {
                "chunk": "",
                "source": "streaming_llm",
                "is_final": True,
                "duration": (time.time() - start_time) * 1000,
                "optimized": True
            }
            
        except Exception as e:
            yield {
                "chunk": f"抱歉，暂时无法回答您的问题。错误：{str(e)}",
                "source": "error",
                "is_final": True,
                "duration": (time.time() - start_time) * 1000,
                "optimized": False,
                "error": str(e)
            }
    
    # ==================== 缓存支持方法 ====================
    
    async def get_cached_rag_response(self, query: str, user_id: str = None) -> Optional[Dict]:
        """获取缓存的RAG响应"""
        if not self.enable_cache or not self.cache_service:
            return None
        
        try:
            cache_key = f"rag:{user_id}:{hash(query)}"
            cached_result = await self.cache_service.get(cache_key)
            if cached_result:
                return json.loads(cached_result)
        except Exception:
            pass
        
        return None
    
    async def set_cached_rag_response(self, query: str, response: Dict, user_id: str = None, expire: int = 3600):
        """设置缓存的RAG响应"""
        if not self.enable_cache or not self.cache_service:
            return
        
        try:
            cache_key = f"rag:{user_id}:{hash(query)}"
            await self.cache_service.set(cache_key, json.dumps(response), expire=expire)
        except Exception:
            pass
    
    # ==================== 性能统计方法 ====================
    
    def get_performance_stats(self) -> Dict:
        """获取性能统计"""
        return {
            "preset_answers_count": len(self.preset_answers),
            "quick_templates_count": len(self.quick_templates),
            "streaming_enabled": True,
            "cache_enabled": self.enable_cache,
            "optimization_strategies": [
                "预设答案响应",
                "快速模板响应", 
                "流式LLM响应",
                "分层响应机制",
                "缓存支持" if self.enable_cache else "无缓存"
            ]
        }
    
    def get_preset_answers(self) -> Dict:
        """获取预设答案"""
        return self.preset_answers
    
    def get_quick_templates(self) -> Dict:
        """获取快速模板"""
        return self.quick_templates