import json
import logging
import time

from fastapi.encoders import jsonable_encoder
from langchain.retrievers import ContextualCompressionRetriever
from langchain_core.messages import BaseMessage, HumanMessage, AIMessage
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder

from compressor.rerank import DashScopeRerankCompressor, XinferenceRerankCompressor
from config.LLMConfig import XINFERENCE_ENDPOINT
from crud.chat_history_crud import ChatHistoryCrud
from models.chat_history_model import ChatHistory, ChatHistoryCreate
from models.chat_model import ChatStreamResponse, Chatting
from .base import chat_llm, milvus_vector_store

chat_history_crud = ChatHistoryCrud()


async def generate_stream(chain, model_name, invoke_params, chat_session_id):
    """LangChain 流响应转 JSON 字符串流响应"""

    think = ""
    content = ""
    isThinking = False
    # 一个跳过本次循环的标记，目的是剔除think标签
    loop_continue = False
    async for chunk in chain.astream(invoke_params):
        # 收集 think 和 content
        loop_continue = False
        if "<think>" in chunk:
            isThinking = True
            loop_continue = True
        if "</think>" in chunk:
            isThinking = False
            loop_continue = True
        if not loop_continue:
            if isThinking:
                think += chunk
            else:
                content += chunk

        json_chunk = json.dumps(
            jsonable_encoder(
                ChatStreamResponse(
                    model=model_name,
                    created_at=int(round(time.time() * 1000)),
                    message=Chatting(role="assistant", content=chunk),
                    done=False,
                ).model_dump(exclude_none=True)
            ),
            ensure_ascii=False,
        )
        # 换行符分隔JSON行
        yield f"{json_chunk}\n"

    # 流结束后发送完成标记
    done = json.dumps(
        jsonable_encoder(
            ChatStreamResponse(
                model=model_name,
                created_at=int(round(time.time() * 1000)),
                message=Chatting(role="assistant", content=""),
                done=True,
                done_reason="stop",
            )
        ),
        ensure_ascii=False,
    )
    yield f"{done}\n"

    # 流式响应完成后，assistant 消息保存到历史消息记录中
    assistantChat = ChatHistoryCreate(
        role="assistant",
        content=content,
        think=think,
        chat_session_id=chat_session_id,
    )

    chat_history_crud.add_item(assistantChat)
async def graph_stream(graph, model_name, invoke_params, chat_session_id):
    """LangChain 流响应转 JSON 字符串流响应"""

    think = ""
    content = ""
    isThinking = False
    # 一个跳过本次循环的标记，目的是剔除think标签
    loop_continue = False
    invoke_params["messages"] = [invoke_params["question"]]
    async for chunk in graph.astream(invoke_params, stream_mode="updates" ):

        for value in chunk.values():
            if not value or isinstance(value["messages"][-1], list):  # 工具返回
                continue
            if 'Final Answer' == value["messages"][-1].get('action'):
                json_chunk = json.dumps(
                    jsonable_encoder(
                        ChatStreamResponse(
                                model=model_name,
                                created_at=int(round(time.time() * 1000)),
                                message=Chatting(role="assistant", content=value["messages"][-1]['answer']),
                                done=False,
                            ).model_dump(exclude_none=True)
                        ),
                        ensure_ascii=False,
                    )
                # 换行符分隔JSON行
                content  =value["messages"][-1]['answer']
                yield f"{json_chunk}\n"

        # 流结束后发送完成标记
        done = json.dumps(
            jsonable_encoder(
                ChatStreamResponse(
                    model=model_name,
                    created_at=int(round(time.time() * 1000)),
                    message=Chatting(role="assistant", content=""),
                    done=True,
                    done_reason="stop",
                )
            ),
        ensure_ascii=False,
        )
        yield f"{done}\n"

    # 流式响应完成后，assistant 消息保存到历史消息记录中
    assistantChat = ChatHistoryCreate(
        role="assistant",
        content=content,
        think=think,
        chat_session_id=chat_session_id,
    )
    chat_history_crud.add_item(assistantChat)


async def react_agent_stream(react_agent, model_name, invoke_params, chat_session_id):
    """LangChain 流响应转 JSON 字符串流响应"""
    # 配置 thread_id 以保持会话连续性
    config = {"configurable": {"thread_id": str(chat_session_id)},"recursion_limit": 25}
    think = ""
    content = ""
    isThinking = False
    # 一个跳过本次循环的标记，目的是剔除think标签
    loop_continue = False
    invoke_params["messages"] = [invoke_params["question"]]
    invoke_params["question"] = None
    invoke_params["chat_history"] = None
    async for event in react_agent.astream(invoke_params, config=config):
        # print(math_response)
        print({ "output": event})
        content = event["agent"]["messages"][-1].content

        json_chunk = json.dumps(
            jsonable_encoder(
                ChatStreamResponse(
                    model=model_name,
                    created_at=int(round(time.time() * 1000)),
                    message=Chatting(role="assistant", content=content),
                    done=False,
                ).model_dump(exclude_none=True)
            ),
            ensure_ascii=False,
        )
        # 换行符分隔JSON行
        yield f"{json_chunk}\n"

    # 流结束后发送完成标记

    done = json.dumps(
        jsonable_encoder(
            ChatStreamResponse(
                model=model_name,
                created_at=int(round(time.time() * 1000)),
                message=Chatting(role="assistant", content=""),
                done=True,
                done_reason="stop",
            )
        ),
        ensure_ascii=False,
    )
    yield f"{done}\n"

    # 流式响应完成后，assistant 消息保存到历史消息记录中
    assistantChat = ChatHistoryCreate(
        role="assistant",
        content=content,
        think=think,
        chat_session_id=chat_session_id,
    )

    chat_history_crud.add_item(assistantChat)


def build_history_template(chat_history_list: list[ChatHistory]):
    """构建聊天历史模板"""

    if type(chat_history_list) != list or len(chat_history_list) == 0:
        return []

    history_messages: list[BaseMessage] = []
    # 历史记录转换为 LangChain 消息对象数组
    for history in chat_history_list:
        if history.role == "user":
            history_messages.append(HumanMessage(content=history.content))
        elif history.role == "assistant":
            history_messages.append(AIMessage(content=history.content))
    return history_messages


def build_qa_chain(param: dict = None):

    # 初始化 Chroma 向量数据库
    vector_store = milvus_vector_store()

    # 初始化 deepseek 模型
    llm = chat_llm(param)

    # 初始化检索，并配置

    base_retriever = vector_store.as_retriever(
        search_type="similarity",
        search_kwargs={
            "k": 5,  # 检索结果返回最相似的文档数量
            "fetch_k": 20,  # 要传递给 MMR 算法的文档量
            # "search_type": "mmr",
            # "score_threshold": 0.7,  # 相似度阈值过滤
             "ranker_type": "weighted",
            "ranker_params": {"weights": [0.6, 0.4]}
        }
    )
    compressor = DashScopeRerankCompressor(model="gte-rerank-v2", top_n=3)
    # compressor = XinferenceRerankCompressor(model="gte-rerank-v2", top_n=3,  endpoint=XINFERENCE_ENDPOINT)
    # 构建上下文压缩检索器
    retriever = ContextualCompressionRetriever(
        base_compressor=compressor,
        base_retriever=base_retriever
    )

    # system 提示词模板
    system_template = """
        您是一个设计用于査询文档来回答问题的代理，您的名字是超级牛逼哄哄的小天才助手。
        您可以使用文档检索工具，并基于检索内容来回答问题。不需要说出检索文档的id。
        您可能不查询文档就知道答案，但是您仍然应该查询文档来获得答案。
        如果用户的问题与检索文档上下文的内容无关，您仍然应该查询文档来获得答案。
        结合文档内容作为答案,切记不可自由发挥,不可胡编乱造。
        如果您从文档中找不到任何信息用于回答问题，则只需返回“抱歉，这个问题我还不知道。”作为答案。
        文档内容：{context}
        """
    prompt = ChatPromptTemplate(
        [
            ("system", system_template),
            MessagesPlaceholder("chat_history"),
            ("human", "{question}"),
        ]
    )
    # tools = [searxng_search,get_weather]
    #
    # prompt = prompt.partial(
    #     tools=render_text_description(list(tools)),
    #     tool_names=", ".join([t.name for t in tools]),
    # )
    # logging.debug(prompt)
    # 构建检索链管道 Runnable
    # retriever.invoke() 作用是根据用户问题检索匹配最相关的文档
    # x 值是管道里的参数，包括 question，chat_history，还要其他有关langchain的参数
    logging.info("构建检索链管道 Runnable")
    return (
        {
            "context": lambda x: retriever.invoke(x["question"]),
            "chat_history": lambda x: x["chat_history"],
            "question": lambda x: x["question"],
        }
        | prompt
        | llm
        | StrOutputParser()
    )
