#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
LLM服务类，用于与MindCare OpenAI服务交互
"""

import os
import json
import logging
from pyexpat import model
from typing import Dict, List, Any, Optional, Union
import asyncio
from openai import OpenAI
from config.settings import Settings
from dotenv import load_dotenv
import re
import time
import string
from collections import Counter
from typing import List, Dict, Tuple
import nltk
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from nltk.stem import WordNetLemmatizer

from core.persona import PersonaManager, PersonaRepository
from core.memory import MemoryStore
from core.llm import LLMClient
from core.prompts import PromptEngineer
from core.knowledge import KnowledgeManager, VectorStore
from core.models import ChatRequest, ChatResponse

from copy import deepcopy

from langchain_core.messages import HumanMessage, AIMessage
from langchain_core.chat_history import InMemoryChatMessageHistory

load_dotenv()
logger = logging.getLogger("AI-MindCare-System-LLM")
import inspect
import logging
import sys


import requests
import json


def generate_response_from_doubao(payload):
    headers = {
        'Authorization': f'Bearer {doubao_api_key}',
        'Content-Type': 'application/json'
    }
    response = requests.post(doubao_api_url,
                             headers=headers,
                             data=json.dumps(payload))
    logger.info(response.json())
    return response.json()["choices"][0]["message"]["content"]


class LLMService:
    """LLM服务类，处理与Azure OpenAI的交互"""

    def __init__(
        self,
        settings: Optional[Settings] = None,
        default_model_key: str = "deepseek-ai/DeepSeek-V3",
    ):
        """
        初始化LLM服务
        Args:
            settings: 系统配置
        """
        if settings is None:
            settings = Settings()

        self.settings = settings
        self.llm_config = settings.llm_config
        self.default_model_config = self.settings.MODEL_CONFIGS.get(
            default_model_key)  # 字典类型
        # 初始化MindCare OpenAI客户端
        # self.client = self._initialize_mindcare_client()

        self.repo = PersonaRepository()
        self.person_manager = PersonaManager(self.repo)
        self.vector_store = VectorStore()
        self.knowledge_manager = KnowledgeManager(self.vector_store)
        self.llm_engineer = PromptEngineer(self.person_manager)
        self.llm_client = LLMClient(**self.default_model_config)
        logger.info(
            f"LLMService初始化完成，使用默认模型：{self.default_model_config.get('model_name', '未指定')}"
        )
        logger.info(f"LLMService默认模型配置：{self.llm_client.model}")

    def _initialize_mindcare_client(self) -> OpenAI:
        """
        初始化OpenAI客户端

        Returns:
            OpenAI客户端实例
        """
        try:
            api_key = os.getenv("MINDCARE_OPENAI_API_KEY")

            endpoint = os.getenv("MINDCARE_OPENAI_ENDPOINT")
            # 确保endpoint包含http/https协议前缀
            if endpoint and not endpoint.startswith(("http://", "https://")):
                logger.warning(
                    f"MINDCARE OpenAI endpoint URL '{endpoint}' 缺少协议前缀，自动添加'https://'"
                )
                endpoint = "https://" + endpoint
                self.llm_config["endpoint"] = endpoint

            # 记录初始化信息（注意不要记录完整的API密钥）
            logger.info(f"正在初始化Mindcare OpenAI客户端")
            logger.info(f"端点URL：{endpoint}")
            if api_key:
                masked_key = (api_key[:4] + "***" +
                              api_key[-4:] if len(api_key) > 8 else "***")
                logger.info(f"API密钥（部分掩码）：{masked_key}")
            else:
                logger.warning("未提供API密钥")

            client = OpenAI(
                api_key=api_key,  # 替换成你的 API Key
                base_url=endpoint,  # 硅基流动的 API 地址
            )
            logger.info("OpenAI客户端初始化成功")
            return client
        except Exception as e:
            logger.error(f"OpenAI客户端初始化失败: {str(e)}")
            logger.error(f"错误类型: {type(e).__name__}")
            if hasattr(e, "__traceback__"):
                import traceback

                logger.error(f"错误详情: {traceback.format_exc()}")
            raise

    def parse_message_id(self, msg_id: str) -> int:
        """解析 message_id，返回其数字部分"""
        return int(msg_id.split('_')[-1])

    def convert_to_langchain_format(self, history: list):
        """将无序对话历史转换为 LangChain 支持的线性格式"""

        chat_history = InMemoryChatMessageHistory()
        history = deepcopy(history)

        # 1. 按 message_id 数字部分升序排序
        sorted_history = sorted(
            history, key=lambda x: self.parse_message_id(x['message_id']))

        # 2. 构建 message_id 到消息的映射
        message_map = {msg['message_id']: msg for msg in sorted_history}

        # 3. 处理 parent_message_id
        prev_msg_id = None
        for msg in sorted_history:
            parent_id = msg['parent_message_id']
            if parent_id is None or parent_id not in message_map:
                # 如果 parent_id 无效，使用前一个消息的 ID
                if prev_msg_id is not None:
                    msg['parent_message_id'] = prev_msg_id
            else:
                prev_msg_id = msg['message_id']  # 更新前一个消息 ID

            # 4. 转换为 LangChain 格式
            if msg['role'] == 'ai':
                chat_history.add_ai_message(msg['content'])
            elif msg['role'] == 'user':
                chat_history.add_user_message(msg['content'])

        return chat_history

    def generate_response_with_rag(
            self,
            prompt: str,
            system_message: str = "你是一个专业的AI助手，提供准确、专业的咨询建议。",
            model_key: str = "Qwen2.5-VL-72B-instruct",
            max_tokens: int = 500,
            temperature: float = 0.5,
            persona: str = 'therapist',
            user_buffer: List = [],
            long_term_memory_str: str = "",
            picture: str = "",
            ) -> str:
        """
        生成LLM响应

        Args:
            prompt: 用户提示
            system_message: 系统消息
            max_tokens: 最大令牌数
            temperature: 温度参数(创造性)

        Returns:
            LLM生成的响应文本
        """
 
        try:

            # 处理人格切换
            if persona:
                try:
                    self.person_manager.switch_persona(persona)
                except Exception as e:
                    if "proxy" in error_str:
                        logger.error("切换人格失败")

            # 获取当前人格
            current_persona = self.person_manager.current_persona
            self.llm_client.change_model(model=model_key, temperature=temperature, max_tokens=max_tokens)
            self.llm_client.change_streaming(False)
            # 加载对话历史
            # history = memory.load_conversation(request.session_id)
            history = self.convert_to_langchain_format(user_buffer)
            logger.info(f"历史对话{history}")

            # 检索相关知识
            rag_context = self.knowledge_manager.retrieve_context(prompt)

            # 构建系统消息
            system_message = self.llm_engineer.build_system_message(long_term_memory_str)

            # 构建完整提示模板
            prompt_template = self.llm_engineer.build_full_prompt(rag_context)

            # 创建对话链
            chain = self.llm_client.create_chain(prompt_template)
            logger.info(f"图片：{picture}")
            # """
            if picture is None or len(picture) == 0:
                input_prompt = HumanMessage(
                    content=[
                        {"type": "text", "text": prompt}
                    ]
                )
            else:
                input_prompt = HumanMessage(
                    content=[
                        {"type": "text", "text": prompt},
                        {
                            "type": "image_url",
                            "image_url": {
                                # "url": f"data:image/jpeg;base64,{picture}"
                                "url": f"data:image/jpeg;base64,{picture}"
                            }
                        }
                    ]
                )

            # 准备输入
            chain_input = {
                "system_message": system_message,
                "history": history.messages,
                "input": input_prompt
            }

        # 生成响应
            start_time = time.time()

            response = chain.invoke(chain_input)
            logger.info('响应',response)

            elapsed_time = time.time() - start_time
            logger.info(f"LLM API调用成功，耗时：{elapsed_time:.2f}秒")
            content = response
            # content = response.choices[0].message.content
            logger.info(
                f"响应内容（前50个字符）：{content[:50]}{'...' if len(content) > 50 else ''}"
            )

            return content
        except Exception as e:
            elapsed_time = time.time() - start_time if "start_time" in locals(
            ) else -1
            logger.error(f"LLM响应生成失败: {str(e)}，耗时：{elapsed_time:.2f}秒")
            logger.error(f"错误类型: {type(e).__name__}")
            if hasattr(e, "response"):
                try:
                    logger.error(
                        f"API错误响应: {e.response.text if hasattr(e.response, 'text') else e.response}"
                    )
                    logger.error(
                        f"API错误状态码: {e.response.status_code if hasattr(e.response, 'status_code') else 'Unknown'}"
                    )
                except Exception:
                    pass
            logger.error(f"错误详情: {str(e)}")

            # 网络错误特别处理
            error_str = str(e).lower()
            if "connection" in error_str or "timeout" in error_str:
                logger.error("检测到网络连接问题，请检查您的网络连接和防火墙设置")
                if "proxy" in error_str:
                    logger.error("可能与代理设置有关，请检查您的代理配置")
            logger.error(f"错误详情: {str(e)}")
            return f"很抱歉，我无法处理您的请求"
        
    def generate_response_with_rag_stream(
            self,
            prompt: str,
            system_message: str = "你是一个专业的AI助手，提供准确、专业的咨询建议。",
            model_key: str = "Qwen2.5-VL-72B-instruct",
            max_tokens: int = 500,
            temperature: float = 0.5,
            persona: str = 'therapist',
            user_buffer: List = [],
            long_term_memory_str: str = "",
            picture: str = "",
            ) -> str:
        """
        生成LLM响应

        Args:
            prompt: 用户提示
            system_message: 系统消息
            max_tokens: 最大令牌数
            temperature: 温度参数(创造性)

        Returns:
            LLM生成的响应文本
        """
 
        try:

            # 处理人格切换
            if persona:
                try:
                    self.person_manager.switch_persona(persona)
                except Exception as e:
                    if "proxy" in error_str:
                        logger.error("切换人格失败")

            # 获取当前人格
            current_persona = self.person_manager.current_persona
            self.llm_client.change_model(model=model_key, temperature=temperature, max_tokens=max_tokens)
            self.llm_client.change_streaming(True)
            # 加载对话历史
            # history = memory.load_conversation(request.session_id)
            history = self.convert_to_langchain_format(user_buffer)
            logger.info(f"历史对话{history}")

            # 检索相关知识
            rag_context = self.knowledge_manager.retrieve_context(prompt)

            # 构建系统消息
            system_message = self.llm_engineer.build_system_message(long_term_memory_str)

            # 构建完整提示模板
            prompt_template = self.llm_engineer.build_full_prompt(rag_context)

            # 创建对话链
            chain = self.llm_client.create_chain(prompt_template)
            logger.info(f"图片：{picture}")
            # """
            if picture is None or len(picture) == 0:
                input_prompt = HumanMessage(
                    content=[
                        {"type": "text", "text": prompt}
                    ]
                )
            else:
                input_prompt = HumanMessage(
                    content=[
                        {"type": "text", "text": prompt},
                        {
                            "type": "image_url",
                            "image_url": {
                                # "url": f"data:image/jpeg;base64,{picture}"
                                "url": f"data:image/jpeg;base64,{picture}"
                            }
                        }
                    ]
                )

            # 准备输入
            chain_input = {
                "system_message": system_message,
                "history": history.messages,
                "input": input_prompt
            }

        # 生成响应
            start_time = time.time()
            response = ""
            # content = chain.invoke(chain_input)
            for chunk in chain.stream(chain_input):
                response += chunk
                yield chunk


            logger.info('响应',response)

            elapsed_time = time.time() - start_time
            logger.info(f"LLM API调用成功，耗时：{elapsed_time:.2f}秒")
            content = ""
            # content = response.choices[0].message.content
            logger.info(
                f"响应内容（前50个字符）：{content[:50]}{'...' if len(content) > 50 else ''}"
            )

            yield content
        except Exception as e:
            elapsed_time = time.time() - start_time if "start_time" in locals(
            ) else -1
            logger.error(f"LLM响应生成失败: {str(e)}，耗时：{elapsed_time:.2f}秒")
            logger.error(f"错误类型: {type(e).__name__}")
            if hasattr(e, "response"):
                try:
                    logger.error(
                        f"API错误响应: {e.response.text if hasattr(e.response, 'text') else e.response}"
                    )
                    logger.error(
                        f"API错误状态码: {e.response.status_code if hasattr(e.response, 'status_code') else 'Unknown'}"
                    )
                except Exception:
                    pass
            logger.error(f"错误详情: {str(e)}")

            # 网络错误特别处理
            error_str = str(e).lower()
            if "connection" in error_str or "timeout" in error_str:
                logger.error("检测到网络连接问题，请检查您的网络连接和防火墙设置")
                if "proxy" in error_str:
                    logger.error("可能与代理设置有关，请检查您的代理配置")
            logger.error(f"错误详情: {str(e)}")
            yield f"很抱歉，我无法处理您的请求"

    def generate_response(
        self,
        prompt: str,
        system_message: str = "你是一个专业的AI助手，提供准确、专业的咨询建议。",
        model_key: str = "deepseek-ai/DeepSeek-V3",
        max_tokens: int = 500,
        temperature: float = 0.5,
    ) -> str:
        """
        生成LLM响应

        Args:
            prompt: 用户提示
            system_message: 系统消息
            max_tokens: 最大令牌数
            temperature: 温度参数(创造性)

        Returns:
            LLM生成的响应文本
        """
        try:

            model_config = self.default_model_config
            if model_key:
                model_config = self.settings.MODEL_CONFIGS.get(
                    model_key, self.default_model_config)

            model_name = model_config.get("model")

            max_tokens = (max_tokens if max_tokens is not None else
                          model_config.get("max_tokens", 500))
            temperature = (temperature if temperature is not None else
                           model_config.get("temperature", 0.5))

            logger.info(
                f"正在调用LLM API，模型：{model_name}，最大令牌：{max_tokens}，温度：{temperature}"
            )
            logger.info(
                f"系统消息：{system_message[:50]}{'...' if len(system_message) > 50 else ''}"
            )
            logger.info(
                f"提示词（前50个字符）：{prompt[:50]}{'...' if len(prompt) > 50 else ''}"
            )

            # 开始计时
            import time

            start_time = time.time()

            # 实际调用API
            response = self.client.chat.completions.create(
                model=model_name,
                messages=[
                    {
                        "role": "system",
                        "content": system_message
                    },
                    {
                        "role": "user",
                        "content": prompt
                    },
                ],
                max_tokens=max_tokens,
                temperature=temperature,
            )

            # 记录响应时间和基本信息
            elapsed_time = time.time() - start_time
            logger.info(f"LLM API调用成功，耗时：{elapsed_time:.2f}秒")
            content = response.choices[0].message.content
            logger.info(
                f"响应内容（前50个字符）：{content[:50]}{'...' if len(content) > 50 else ''}"
            )

            return content
        except Exception as e:
            elapsed_time = time.time() - start_time if "start_time" in locals(
            ) else -1
            logger.error(f"LLM响应生成失败: {str(e)}，耗时：{elapsed_time:.2f}秒")
            logger.error(f"错误类型: {type(e).__name__}")
            if hasattr(e, "response"):
                try:
                    logger.error(
                        f"API错误响应: {e.response.text if hasattr(e.response, 'text') else e.response}"
                    )
                    logger.error(
                        f"API错误状态码: {e.response.status_code if hasattr(e.response, 'status_code') else 'Unknown'}"
                    )
                except Exception:
                    pass
            logger.error(f"错误详情: {str(e)}")

            # 网络错误特别处理
            error_str = str(e).lower()
            if "connection" in error_str or "timeout" in error_str:
                logger.error("检测到网络连接问题，请检查您的网络连接和防火墙设置")
                if "proxy" in error_str:
                    logger.error("可能与代理设置有关，请检查您的代理配置")

            return f"很抱歉，我无法处理您的请求: {str(e)}"

    async def generate_response_async(
        self,
        prompt: str,
        system_message: str = "你是一个专业的 AI 助手，提供准确、专业的咨询建议。",
        model_key: str = "deepseek-ai/DeepSeek-V3",
        max_tokens: int = 500,
        temperature: float = 0.5,
    ) -> str:
        """
        异步生成LLM响应

        Args:
            prompt: 用户提示
            system_message: 系统消息
            max_tokens: 最大令牌数
            temperature: 温度参数(创造性)

        Returns:
            LLM生成的响应文本
        """
        loop = asyncio.get_event_loop()
        return await loop.run_in_executor(
            None,
            lambda: self.generate_response(
                prompt=prompt,
                system_message=system_message,
                model_key=model_key,
                max_tokens=max_tokens,
                temperature=temperature,
            ),
        )

#######################################################################################################
## 方案一：基于关键词提取的简易主题生成器
#######################################################################################################

    def _initialize_topic_generation_resources(self) -> None:
        """初始化主题生成所需的NLP资源"""
        try:
            nltk.data.find('tokenizers/punkt')
        except LookupError:
            nltk.download('punkt', quiet=True)

        try:
            nltk.data.find('corpora/stopwords')
        except LookupError:
            nltk.download('stopwords', quiet=True)

        try:
            nltk.data.find('corpora/wordnet')
        except LookupError:
            nltk.download('wordnet', quiet=True)

        self.stop_words = set(stopwords.words('english'))
        # 可扩展支持中文
        self.stop_words.update(
            set([
                '的', '了', '在', '是', '我', '有', '和', '就', '不', '人', '都', '一',
                '一个', '上', '也', '很', '到', '说', '要', '去', '你', '会', '着', '没有',
                '看', '好', '自己', '这'
            ]))
        self.lemmatizer = WordNetLemmatizer()

    def _preprocess_text(self, text: str) -> List[str]:
        """预处理文本用于主题提取"""
        # 转为小写
        text = text.lower()
        # 移除标点符号
        text = text.translate(str.maketrans('', '', string.punctuation))
        # 分词
        tokens = word_tokenize(text)
        # 移除停用词并词形还原
        tokens = [
            self.lemmatizer.lemmatize(token) for token in tokens
            if token not in self.stop_words
        ]
        return tokens

    def _extract_keywords(self,
                          tokens: List[str],
                          top_n: int = 5) -> List[Tuple[str, int]]:
        """提取关键词及其频率"""
        counter = Counter(tokens)
        return counter.most_common(top_n)

    def _generate_topic_from_keywords(self,
                                      keywords: List[Tuple[str, int]],
                                      max_length: int = 20) -> str:
        """根据关键词生成主题字符串"""
        # 按重要性排序关键词
        sorted_keywords = sorted(keywords, key=lambda x: x[1], reverse=True)

        # 组合关键词形成主题
        topic_words = [kw[0] for kw in sorted_keywords
                       if len(kw[0]) > 2]  # 过滤短词

        # 确保主题长度适中
        if not topic_words:
            return "未识别主题"

        # 尝试形成有意义的短语
        if len(topic_words) > 1:
            # 检查是否有动词，优先使用主谓结构
            verbs = [
                'is', 'are', 'was', 'were', 'be', 'am', 'have', 'has', 'do',
                'does', 'did'
            ]
            verb_found = False
            for i, word in enumerate(topic_words):
                if word in verbs or word.endswith('ing'):
                    topic = f"{', '.join(topic_words[:i])} {word} {', '.join(topic_words[i+1:])}"
                    verb_found = True
                    break

            if not verb_found:
                # 默认使用连接词组合
                if len(topic_words) > 2:
                    topic = f"{', '.join(topic_words[:-1])} and {topic_words[-1]}"
                else:
                    topic = f"{topic_words[0]} and {topic_words[1]}"
        else:
            topic = topic_words[0]

        # 限制主题长度
        if len(topic) > max_length:
            topic = " ".join(topic.split()[:max_length // 2]) + "..."

        return topic

    def generate_topic(self,
                       conversation: List[Dict[str, str]],
                       min_words: int = 10) -> str:
        """
        根据对话历史生成主题
        
        Args:
            conversation: 对话历史，格式为[{"role": "user", "content": "消息内容"}, ...]
            min_words: 生成主题所需的最小词数
            
        Returns:
            生成的主题字符串
        """
        # 提取对话中的所有文本
        all_text = " ".join(
            [msg["content"] for msg in conversation if msg.get("content")])

        # 检查是否有足够的文本
        if len(all_text.split()) < min_words:
            return "简短对话"

        # 预处理文本
        tokens = self._preprocess_text(all_text)

        # 提取关键词
        keywords = self._extract_keywords(tokens, top_n=5)

        # 生成主题
        return self._generate_topic_from_keywords(keywords)

#######################################################################################################
## 方案二：基于 LLM 的增强主题生成器
#######################################################################################################

    def generate_topic_with_llm(self,
                                conversation: List[Dict[str, str]],
                                max_length: int = 10) -> str:
        """
        使用LLM生成对话主题（更准确但需要额外API调用）
        
        Args:
            conversation: 对话历史，格式为[{"role": "user", "content": "消息内容"}, ...]
            max_length: 主题最大长度
            
        Returns:
            生成的主题字符串
        """
        if not conversation:
            return "无对话内容"

        # 构建主题生成提示词
        prompt = f"""
请为以下对话生成一个简洁的主题（不超过{max_length}个字符）：

对话内容:
{json.dumps(conversation, ensure_ascii=False, indent=2)}

请直接提供主题，无需额外解释。
"""

        # 使用系统消息指导LLM生成合适的主题
        system_message = "你是一个专业的对话摘要专家，擅长提炼对话的核心主题。"

        try:
            # 调用LLM生成主题
            topic = self.generate_response(
                prompt=prompt,
                system_message=system_message,
                model_key="deepseek-ai/DeepSeek-V3",  # 可配置专用模型
                max_tokens=50,
                temperature=0.3  # 低温度以保持确定性
            )

            # 清理生成的主题
            topic = topic.strip()
            # 移除可能的引号或其他多余字符
            if topic.startswith(('"', "'")) and topic.endswith(('"', "'")):
                topic = topic[1:-1]

            return topic
        except Exception as e:
            logger.error(f"主题生成失败: {e}")
            # 回退到基于关键词的方法
            return self.generate_topic(conversation)


#######################################################################################################
## 方案三：混合方法（推荐）
# 结合关键词提取和 LLM 的优势，先使用关键词生成候选主题，再用 LLM 优化
#######################################################################################################

    def generate_smart_topic(self,
                             conversation: List[Dict[str, str]],
                             max_length: int = 30) -> str:
        """
        智能生成对话主题（结合关键词提取和LLM优化）
        
        Args:
            conversation: 对话历史，格式为[{"role": "user", "content": "消息内容"}, ...]
            max_length: 主题最大长度
            
        Returns:
            生成的主题字符串
        """
        # 1. 使用关键词提取生成基础主题
        base_topic = self.generate_topic(conversation)

        # 2. 检查是否需要LLM优化
        if len(base_topic) <= max_length or len(conversation) < 3:
            return base_topic

        # 3. 使用LLM优化主题
        prompt = f"""
请优化以下对话主题，使其更简洁且能准确反映对话核心（不超过{max_length}个字符）：

原始主题: "{base_topic}"

对话内容:
{json.dumps(conversation, ensure_ascii=False, indent=2)}

请直接提供优化后的主题，无需额外解释。
"""

        try:
            optimized_topic = self.generate_response(
                prompt=prompt,
                system_message="你是一个专业的对话摘要专家，擅长提炼对话的核心主题。",
                max_tokens=50,
                temperature=0.3)

            # 清理生成的主题
            optimized_topic = optimized_topic.strip()
            if optimized_topic.startswith(
                ('"', "'")) and optimized_topic.endswith(('"', "'")):
                optimized_topic = optimized_topic[1:-1]

            return optimized_topic if len(
                optimized_topic) <= max_length else base_topic
        except Exception as e:
            logger.error(f"主题优化失败: {e}")
            return base_topic
