from typing import Any, Optional, List, Dict, Tuple
import redis
from langchain.chains.conversation.base import ConversationChain
from langchain_core.callbacks import StreamingStdOutCallbackHandler
from langchain_core.output_parsers import StrOutputParser
from langchain_core.vectorstores import VectorStoreRetriever
from langchain_redis import RedisChatMessageHistory
from langchain_core.messages import HumanMessage, AIMessage, SystemMessage
from langchain_core.prompts import MessagesPlaceholder, ChatPromptTemplate
from langchain_openai import ChatOpenAI
from operator import itemgetter
import os
import json
import hashlib
import re
from datetime import datetime
from RAG.RAGBuild import RAGBuild, format_retriever, get_index_name, logger
from langchain_core.documents import Document
from langchain_core.runnables import RunnableParallel, RunnableLambda

# 优化的提示模板
ENHANCED_PROMPT_TEMPLATE = ChatPromptTemplate.from_messages([
    SystemMessage(content=(
        '你是一名经验丰富的RAG系统开发人员，能够根据用户的问题以及知识手册里面的内容:{knowledge}来给用户一个'
        '满意的答复，如果说用户的问题和知识手册上查询到的内容完全不匹配，那么就不要以知识'
        '手册的方式回复，采用你自己的知识进行回复，但是要在开头添加一个提示："以下内容并不来自知识库，请以自己的知识为准"'
        ',如果你是根据知识库内容回答的在开头给出提示:以下内容根据知识库回答'
    )),
    MessagesPlaceholder(variable_name="history"),
    ("human", "{input}"),
])

# 查询扩展模板
QUERY_EXPANSION_PROMPT = ChatPromptTemplate.from_messages([
    SystemMessage(content=(
        "你是一个查询优化助手，需要帮助用户扩展和优化查询语句。\n"
        "根据原始查询生成3个相关的变体查询，保持专业性和相关性。\n"
        "输出格式为JSON列表，例如：[\"查询1\", \"查询2\", \"查询3\"]"
    )),
    ("human", "原始查询: {query}"),
])


class EnhancedStreamHandler(StreamingStdOutCallbackHandler):
    """增强的流式输出处理器"""

    def __init__(self):
        super().__init__()
        self.buffer = []

    def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
        self.buffer.append(token)

    def on_llm_end(self, response, **kwargs: Any) -> None:
        if self.buffer:
            print(''.join(self.buffer), end='', flush=True)
            self.buffer = []
        print()

    def get_accumulated_text(self) -> str:
        return ''.join(self.buffer)


class RAGChatter:
    def __init__(self, api_base_url: str, api_key: str, llm_name: str = "qwen-plus",
                 temperature: float = 0.2, streaming: bool = False,
                 rag_build: RAGBuild = None, redis_url: str = "redis://localhost:6379/0"):
        self.api_key = api_key
        self.api_base_url = api_base_url
        self.llm_name = llm_name
        self.temperature = temperature
        self.streaming = streaming
        self.redis_url = redis_url
        self.rag_build = rag_build

        self.redis_pool = redis.ConnectionPool.from_url(redis_url)
        self.llm = self._create_llm()
        self.query_expansion_llm = ChatOpenAI(
            openai_api_key=self.api_key,
            openai_api_base=self.api_base_url,
            model_name="qwen-turbo",
            temperature=0.1
        )

    def hybrid_retrieval(self, retriever: VectorStoreRetriever, query: str,
                         k: int = 3) -> List[Document]:
        """
        混合检索策略：结合原始查询和扩展查询的结果
        """
        expanded_queries = self.expand_query(query)
        unique_docs = {}

        for q in expanded_queries:
            try:
                docs = retriever.get_relevant_documents(q)
                for doc in docs:
                    # 使用文档内容哈希作为键去重
                    doc_hash = hashlib.md5(doc.page_content.encode()).hexdigest()
                    if doc_hash not in unique_docs or \
                            (doc_hash in unique_docs and doc.metadata.get("score", 0) > unique_docs[
                                doc_hash].metadata.get("score", 0)):
                        unique_docs[doc_hash] = doc
            except Exception as e:
                logger.warning(f"检索失败 {q}: {str(e)}")
                continue

        # 按相关性排序并返回前k个
        sorted_docs = sorted(unique_docs.values(),
                             key=lambda x: x.metadata.get("score", 0),
                             reverse=True)
        return sorted_docs[:k]

    def expand_query(self, original_query: str) -> List[str]:
        """使用LLM扩展查询，生成相关查询变体"""
        try:
            chain = QUERY_EXPANSION_PROMPT | self.query_expansion_llm | StrOutputParser()
            expanded = chain.invoke({"query": original_query})

            # 尝试解析JSON
            try:
                expanded_queries = json.loads(expanded)
                if isinstance(expanded_queries, list) and len(expanded_queries) > 0:
                    return [original_query] + expanded_queries[:3]  # 保留原始查询+前3个扩展
            except json.JSONDecodeError:
                logger.warning(f"查询扩展返回非JSON格式: {expanded}")

        except Exception as e:
            logger.error(f"查询扩展失败: {str(e)}")

        return [original_query]  # 失败时返回原始查询

    def get_redis_connection(self):
        """获取Redis连接"""
        return redis.Redis(connection_pool=self.redis_pool)

    def _create_llm(self) -> ChatOpenAI:
        """创建LLM实例"""
        return ChatOpenAI(
            openai_api_key=self.api_key,
            openai_api_base=self.api_base_url,
            model_name=self.llm_name,
            temperature=self.temperature,
            streaming=self.streaming,
            model_kwargs={
                "top_p": 0.8,
                "max_tokens": 2000,
                "presence_penalty": 0.2
            }
        )

    def _get_cache_key(self, user_id: int, question: str) -> str:
        """生成一致的缓存键"""
        question_hash = hashlib.md5(question.encode()).hexdigest()
        return f"qa_cache:{user_id}:{question_hash}"

    def _get_history_key(self, user_id: int, chat_id: int) -> str:
        """生成历史记录键"""
        return f"{user_id}:{chat_id}"

    def check_cache(self, user_id: int, question: str) -> Tuple[Optional[str], float]:
        """
        检查缓存的统一方法
        返回: (缓存答案, 相似度)
        """
        if not self.rag_build:
            return None, 0.0

        # 1. 检查直接匹配的缓存
        cache_key = self._get_cache_key(user_id, question)
        r = self.get_redis_connection()

        try:
            if hasattr(r, 'json'):
                cached_data = r.json().get(cache_key)
            else:
                cached_data = json.loads(r.get(cache_key) or "{}")

            if cached_data and "answer" in cached_data:
                logger.info(f"直接缓存命中: {question[:50]}...")
                return cached_data["answer"], 1.0
        except Exception as e:
            logger.warning(f"直接缓存检查失败: {str(e)}")

        # 2. 检查相似问题缓存
        return self.rag_build.get_similar_cached_answer(user_id, question, similarity_threshold=0.75)

    def save_to_cache(self, user_id: int, question: str, answer: str) -> bool:
        """统一保存到缓存"""
        if not self.rag_build:
            return False

        try:
            self.rag_build.set_similar_cached_answer(
                user_id=user_id,
                question=question,
                answer=answer,
                expire_time=3600
            )
            return True
        except Exception as e:
            logger.error(f"缓存保存失败: {str(e)}")
            return False

    def get_history(self, user_id: int, chat_id: int) -> RedisChatMessageHistory:
        """获取历史记录"""
        history = RedisChatMessageHistory(
            session_id=self._get_history_key(user_id, chat_id),
            redis_url=self.redis_url,
            ttl=86400
        )

        # 历史记录压缩
        if len(history.messages) > 20:
            compressed = [history.messages[0]] + history.messages[-19:]
            history.clear()
            history.add_messages(compressed)

        return history

    def create_chain(self, retriever: VectorStoreRetriever) -> ConversationChain:
        """
        构建增强的对话链，支持混合检索和结果验证
        """

        def _retrieve_wrapper(input_dict: Dict) -> Dict:
            """包装检索过程"""
            docs = self.hybrid_retrieval(retriever, input_dict["input"])
            return {"knowledge": docs, **input_dict}

        chain = (
                RunnableLambda(_retrieve_wrapper)
                | RunnableLambda(lambda x: {
            "knowledge": format_retriever(x["knowledge"]),
            "input": x["input"],
            "history": x["history"]
        })
                | ENHANCED_PROMPT_TEMPLATE
                | self.llm
                | StrOutputParser()
        )
        return chain

    def chat(self, user_question: str, user_id: int, chat_id: int,
             chain: ConversationChain) -> str:
        """
        优化的聊天主流程
        1. 优先检查缓存
        2. 无缓存时调用LLM
        3. 保存结果到缓存和历史
        """
        # 检查缓存
        cached_answer, similarity = self.check_cache(user_id, user_question)
        if cached_answer and similarity >= 0.75:
            logger.info(f"使用缓存回答 (相似度: {similarity:.2f})")
            history = self.get_history(user_id, chat_id)
            history.add_messages([
                HumanMessage(content=user_question),
                AIMessage(content=cached_answer)
            ])
            return cached_answer

        # 获取历史记录
        history = self.get_history(user_id, chat_id)

        # 调用LLM
        try:
            if self.streaming:
                handler = EnhancedStreamHandler()
                ai_msg = chain.invoke(
                    {"input": user_question, "history": history.messages},
                    config={"callbacks": [handler]}
                )
                ai_msg = handler.get_accumulated_text()
            else:
                ai_msg = chain.invoke(
                    {"input": user_question, "history": history.messages}
                )

            # 验证响应
            ai_msg = self._validate_response(ai_msg, user_question)

            # 保存到历史
            history.add_messages([
                HumanMessage(content=user_question),
                AIMessage(content=ai_msg)
            ])

            # 保存到缓存
            self.save_to_cache(user_id, user_question, ai_msg)

            return ai_msg

        except Exception as e:
            logger.error(f"生成回答失败: {str(e)}")
            return "抱歉，回答生成时出现错误"

    def _validate_response(self, response: str, question: str) -> str:
        """验证响应质量"""
        if len(response.split()) < 3:
            return "抱歉，我可能没有理解您的问题。请尝试更详细地描述您的问题。"

        q_words = set(re.findall(r'\w+', question.lower()))
        r_words = set(re.findall(r'\w+', response.lower()))
        if len(q_words & r_words) < len(q_words) / 3:
            return f"关于'{question}'，我找到以下信息:\n{response}\n\n(如果这不完全符合您的问题，请提供更多细节)"

        return response

    def view_chat_history(self, user_id: int, session_id: int) -> List[Dict]:
        """查看历史记录"""
        history = self.get_history(user_id, session_id)
        return [{
            "role": "user" if isinstance(msg, HumanMessage) else "ai",
            "content": msg.content,
            "timestamp": datetime.now().isoformat()
        } for msg in history.messages]

    def clear_history(self, user_id: int, chat_id: int) -> bool:
        """清除历史记录"""
        try:
            history = self.get_history(user_id, chat_id)
            history.clear()

            r = self.get_redis_connection()
            r.delete(self._get_history_key(user_id, chat_id))

            return True
        except Exception as e:
            logger.error(f"清除历史失败: {str(e)}")
            return False
