import json
import logging
import os
import time
from datetime import datetime

import redis.asyncio as redis
from langchain.memory import ConversationBufferWindowMemory
from langchain_core.messages import HumanMessage, AIMessage
from langchain_core.prompts import ChatPromptTemplate

from models.schemas import AddTextsRequest, DelRequest
from prompts.financial_prompt import create_prompt
from services.llm_service import DeepSeekLLM
from utils.msg_tools import get_history_msg, identify_user_category, get_category

logger = logging.getLogger(f"app.{__name__}")

# 从环境变量获取 Redis 配置，如果没有则使用默认值
REDIS_HOST = os.getenv("REDIS_HOST", "localhost")
REDIS_PORT = int(os.getenv("REDIS_PORT", 6379))

r = redis.Redis(host=REDIS_HOST, port=REDIS_PORT, db=0, decode_responses=True)

class RAGChain:
    def __init__(self, vector_store_service):
        self.vector_store = vector_store_service
        self.llm = DeepSeekLLM()


    async def query(self, msg: str, session_id: str,k=1):
        # 确保向量存储已加载
        logger.info(f"消息：{msg}")
        msg = msg.replace("{", "{{").replace("}", "}}")
        logger.info(f"new正在检查向量存储...{msg}")
        if not self.vector_store.vector_store:
            raise ValueError("向量存储未初始化，请先运行 ingest.py 脚本")

        # 获取聊天历史
        chat_history = await get_history_msg(session_id)
        # 格式化历史记录用于提示词
        formatted_history = self._format_chat_history(chat_history)
        logger.info(f"历史记录: {formatted_history}")

        # 获取用户类别
        user_category = get_category(msg, chat_history, session_id)
        filter_metadata = None
        if user_category:
            filter_metadata = {"category": user_category}
        # 检索相关文档
        docs = self.vector_store.similarity_search_qa(msg, k=k, filter_metadata=filter_metadata)
        # 构建上下文
        context = "\n\n".join(doc.page_content for doc in docs)
        logger.info(f"向量库检索上下文: {context}")

        pt = create_prompt(context, msg, formatted_history)
        prompt = ChatPromptTemplate.from_template(pt)
        s4 = time.time()

        # 生成回答
        chain = prompt | self.llm.llm
        answer = await chain.ainvoke({"context": context, "question": msg})
        s5 = time.time()
        print(f"生成回答耗时: {s5-s4}")
        chat_history.append(HumanMessage(content=msg))
        chat_history.append(AIMessage(content=answer.content))

        history_to_save = []
        for msg_obj in chat_history[-10:]:  # 只保存最近的10条消息
            if isinstance(msg_obj, HumanMessage):
                history_to_save.append({"role": "human", "content": msg_obj.content})
            elif isinstance(msg_obj, AIMessage):

                history_to_save.append({"role": "ai", "content": msg_obj.content})

        # 保存到Redis
        await r.setex(history_key, 3600, json.dumps(history_to_save))
        s6 = time.time()
        print(f"保存历史记录耗时: {s6-s5}")

        return {
            "answer": answer.content,
            "sources": [
                {"content": doc.page_content, "metadata": doc.metadata}
                for doc in docs
            ]
        }

    def _format_chat_history(self, chat_history):
        """格式化聊天历史用于提示词"""
        if not chat_history:
            return ""

        formatted = []
        for msg in chat_history[-6:]:  # 只取最近3轮对话
            if isinstance(msg, HumanMessage):
                formatted.append(f"客户: {msg.content}")
            elif isinstance(msg, AIMessage):
                formatted.append(f"顾问: {msg.content}")

        return "\n".join(formatted)

    async def add_text(self,add_request: AddTextsRequest):
        try:
            doc_ids = self.vector_store.add_texts(
                texts=add_request.texts,
                created_by=add_request.created_by,
                category=add_request.category,
                source=add_request.source,
                product_type=add_request.product_type
            )

            for i, (doc_id, text) in enumerate(zip(doc_ids, add_request.texts)):
                await r.hset("docs_text",doc_id, text)

            return {
                "result": True,
                "code":200,
                "message": f"成功添加 {len(doc_ids)} 个文档到向量库",
                "document_ids": doc_ids
            }
        except Exception as e:
            logger.exception(f"文档添加失败: {str(e)}")
            raise ValueError(f"文档添加失败: {str(e)}")

    async def add_qa(self,add_request: AddTextsRequest):
        try:
            doc_ids = self.vector_store.add_qa_pairs(
                questions=add_request.questions,
                answers=add_request.answers,
                created_by=add_request.created_by,
                category=add_request.category,
                source=add_request.source,
                product_type=add_request.product_type
            )

            for i, (doc_id, question, answer) in enumerate(zip(doc_ids, add_request.questions, add_request.answers)):
                # 创建结构化的存储内容
                qa_info = {
                    "category": add_request.category,
                    "source": add_request.source,
                    "product_type": add_request.product_type,
                    "question": question,
                    "answer": answer
                }
                # 将结构化信息转换为JSON字符串存储
                await r.hset("docs_text", doc_id, json.dumps(qa_info, ensure_ascii=False))

            return {
                "result": True,
                "code":200,
                "message": f"成功添加 {len(doc_ids)} 个文档到向量库",
                "document_ids": doc_ids
            }
        except Exception as e:
            logger.exception(f"文档添加失败: {str(e)}")
            raise ValueError(f"文档添加失败: {str(e)}")



    async def get_all_documents_as_map(self):
        """
        获取所有文档，以id-text映射的形式返回

        Args:
            limit: 限制返回的文档数量

        Returns:
            list: 包含{id: text}映射的列表
        """
        # 获取所有文档ID

        all_docs = await r.hgetall("docs_text")
        # 构建id-text映射列表
        documents_map = []
        for doc_id, text in all_docs.items():
            documents_map.append({
                "id": doc_id,
                "text": text
            })
        return documents_map

    async def delete_documents_by_ids(self, doc_ids):

        await r.hdel("docs_text", *doc_ids)

        ct = self.vector_store.delete_documents_by_ids(doc_ids)

        return ct

    async def delete_context_by_id(self, sender_id: str,rec_id):
        try:
            session_id = self.generate_session_id(sender_id, rec_id)
            history_key = f"chat:{session_id}"
            # 删除Redis中的聊天历史记录
            result = await r.delete(history_key)
            return result > 0  # 如果删除成功，返回True
        except Exception as e:
            logger.exception(f"删除会话上下文失败: {str(e)}")
            return False


    async def update_document(self, doc_id: str, update_request: AddTextsRequest):
        """
        更新文档（通过删除后重新添加实现）
        """
        try:
            # 1. 从Redis中删除旧文档
            await r.hdel("docs_text", doc_id)

            # 2. 从向量数据库中删除旧文档
            self.vector_store.delete_documents_by_ids([doc_id])

            result = await self.add_qa(update_request)
            return result
        except Exception as e:
            logger.exception(f"文档更新失败: {str(e)}")
            raise ValueError(f"文档更新失败: {str(e)}")

    def generate_session_id(self, send_id: str, receiver_id: str, is_group: bool = False) -> str:
        """生成会话ID"""
        if is_group:
            return f"group_{receiver_id}"
        else:
            # 排序ID确保唯一会话
            ids = sorted([send_id, receiver_id])
            return f"p2p_{ids[0]}_{ids[1]}"