from langchain.memory import ConversationBufferWindowMemory

from server.agent.custom_agent.ChatGLM3Agent import initialize_glm3_agent
from server.agent.tools_select import tools, tool_names
from server.agent.callbacks import CustomAsyncIteratorCallbackHandler, Status
from langchain.agents import LLMSingleActionAgent, AgentExecutor
from server.agent.custom_template import CustomOutputParser, CustomPromptTemplate
from fastapi import Body
from sse_starlette.sse import EventSourceResponse
from configs import LLM_MODELS, TEMPERATURE, HISTORY_LEN, Agent_MODEL
from server.utils import wrap_done, get_ChatOpenAI, get_prompt_template, embedding_device
from langchain.chains import LLMChain
from typing import AsyncIterable, Optional
import asyncio
from typing import List
from server.chat.utils import History
import json
from server.agent import model_container
from server.knowledge_base.kb_service.base import get_kb_details
import os
from configs.model_config import EMBEDDING_MODEL
from configs.kb_config import CONVERSATION_SEARCH_THRESHOLD, CONVERSATION_SEARCH_TOP_K
from langchain.vectorstores.faiss import FAISS
from server.knowledge_base.kb_cache.faiss_cache import _FaissPool
from server.knowledge_base.utils import get_vs_path
from server.db.repository.message_repository import add_message_to_custom_db
from server.utils import create_chat_response


async def agent_chat(uid: str = Body(..., description="用户ID"),
                     query: str = Body(..., description="用户输入", examples=["恼羞成怒"]),
                     history: List[History] = Body([],
                                                   description="历史对话",
                                                   examples=[[
                                                       {"role": "user", "content": "请使用知识库工具查询今天北京天气"},
                                                       {"role": "assistant",
                                                        "content": "使用天气查询工具查询到今天北京多云，10-14摄氏度，东北风2级，易感冒"}]]
                                                   ),
                     stream: bool = Body(False, description="流式输出"),
                     model_name: str = Body(LLM_MODELS[0], description="LLM 模型名称。"),
                     temperature: float = Body(TEMPERATURE, description="LLM 采样温度", ge=0.0, le=1.0),
                     max_tokens: Optional[int] = Body(None, description="限制LLM生成Token数量，默认None代表模型最大值"),
                     prompt_name: str = Body("default",
                                             description="使用的prompt模板名称(在configs/prompt_config.py中配置)"),
                     # top_p: float = Body(TOP_P, description="LLM 核采样。勿与temperature同时设置", gt=0.0, lt=1.0),
                     nickname: str = Body(..., description="昵称"),
                     character: str = Body(..., description="性格"),
                     instruction: str = Body("-1", description="指令"),
                     request_id: str = Body(..., description="请求ID")
                     ):
    history = [History.from_data(h) for h in history]

    async def agent_chat_iterator(
            query: str,
            history: Optional[List[History]],
            model_name: str = LLM_MODELS[0],
            prompt_name: str = prompt_name,
    ) -> AsyncIterable[str]:
        nonlocal max_tokens
        callback = CustomAsyncIteratorCallbackHandler()
        if isinstance(max_tokens, int) and max_tokens <= 0:
            max_tokens = None

        model = get_ChatOpenAI(
            model_name=model_name,
            temperature=temperature,
            max_tokens=max_tokens,
            callbacks=[callback],
        )

        faiss_pool = _FaissPool()

        kb_name = os.path.join("History", f"{uid}")

        create = True
        vs_path = get_vs_path(knowledge_base_name=kb_name, vector_name=EMBEDDING_MODEL)
        if os.path.isfile(os.path.join(vs_path, "index.faiss")):
            embeddings = faiss_pool.load_kb_embeddings(kb_name=kb_name, embed_device=embedding_device(),
                                                       default_embed_model=EMBEDDING_MODEL)
            vector_store = FAISS.load_local(vs_path, embeddings, normalize_L2=True)
        elif create:
            # create an empty vector store
            if not os.path.exists(vs_path):
                os.makedirs(vs_path)
            vector_store = faiss_pool.new_vector_store()
            vector_store.save_local(vs_path)
        else:
            raise RuntimeError(f"knowledge base {kb_name} not exist.")

        if len(history) >= HISTORY_LEN * 2:
            retriever = vector_store.as_retriever(search_type="similarity_score_threshold",
                                                  search_kwargs={'score_threshold': CONVERSATION_SEARCH_THRESHOLD,
                                                                 'k': CONVERSATION_SEARCH_TOP_K})
            retrieved_results = retriever.get_relevant_documents(query=query)
            # 构建检索到的历史对话内容，转换回列表格式
            related_history = []
            for doc in retrieved_results:
                qa_pair = json.loads(doc.page_content)
                related_history.extend(qa_pair)
            print("similar_history:", related_history)
            similar_history = [History.from_data(h) for h in related_history]
        else:
            similar_history = []

        history_set = set(str(item) for item in history)  # 简单转换为字符串来比较

        # 对 similar_history_to_msg_template 进行去重操作，去除与 history_to_msg_template 重复的内容
        similar_history_to_msg_template = [
            item for item in similar_history
            if str(item) not in history_set
        ]

        history = history + similar_history_to_msg_template

        ## 传入全局变量来实现agent调用
        kb_list = {x["kb_name"]: x for x in get_kb_details()}
        model_container.DATABASE = {name: details['kb_info'] for name, details in kb_list.items()}

        if Agent_MODEL:
            ## 如果有指定使用Agent模型来完成任务
            model_agent = get_ChatOpenAI(
                model_name=Agent_MODEL,
                temperature=temperature,
                max_tokens=max_tokens,
                callbacks=[callback],
            )
            model_container.MODEL = model_agent
        else:
            model_container.MODEL = model

        prompt_template = get_prompt_template("agent_chat", prompt_name)
        prompt_template_agent = CustomPromptTemplate(
            template=prompt_template,
            tools=tools,
            nickname=nickname,
            character=character,
            input_variables=["input", "intermediate_steps", "history", "nickname", "character"]
        )
        output_parser = CustomOutputParser()
        llm_chain = LLMChain(llm=model, prompt=prompt_template_agent)
        # 把history转成agent的memory
        memory = ConversationBufferWindowMemory(k=HISTORY_LEN * 4, input_key="input")
        for message in history:
            # 检查消息的角色
            if message.role == 'user':
                # 添加用户消息
                memory.chat_memory.add_user_message(message.content)
            else:
                # 添加AI消息
                memory.chat_memory.add_ai_message(message.content)

        if "chatglm3" in model_container.MODEL.model_name:
            agent_executor = initialize_glm3_agent(
                llm=model,
                tools=tools,
                callback_manager=None,
                # Langchain Prompt is not constructed directly here, it is constructed inside the GLM3 agent.
                prompt=prompt_template,
                input_variables=["input", "intermediate_steps", "history"],
                memory=memory,
                verbose=True,
            )
        else:
            agent = LLMSingleActionAgent(
                llm_chain=llm_chain,
                output_parser=output_parser,
                stop=["\nObservation:", "Observation"],
                allowed_tools=tool_names,
            )
            agent_executor = AgentExecutor.from_agent_and_tools(agent=agent,
                                                                tools=tools,
                                                                verbose=True,
                                                                memory=memory,
                                                                )
        while True:
            try:
                task = asyncio.create_task(wrap_done(
                    agent_executor.acall({"input": query, "nickname": nickname, "character": character},
                                         callbacks=[callback], include_run_info=True),
                    callback.done))
                break
            except:
                pass

        if stream:
            async for chunk in callback.aiter():
                tools_use = []
                # Use server-sent-events to stream the response
                data = json.loads(chunk)
                if data["status"] == Status.start or data["status"] == Status.complete:
                    continue
                elif data["status"] == Status.error:
                    tools_use.append("\n```\n")
                    tools_use.append("工具名称: " + data["tool_name"])
                    tools_use.append("工具状态: " + "调用失败")
                    tools_use.append("错误信息: " + data["error"])
                    tools_use.append("重新开始尝试")
                    tools_use.append("\n```\n")
                    # yield json.dumps({"tools": tools_use}, ensure_ascii=False)
                elif data["status"] == Status.tool_finish:
                    tools_use.append("\n```\n")
                    tools_use.append("工具名称: " + data["tool_name"])
                    tools_use.append("工具状态: " + "调用成功")
                    tools_use.append("工具输入: " + data["input_str"])
                    tools_use.append("工具输出: " + data["output_str"])
                    tools_use.append("\n```\n")
                    # yield json.dumps({"tools": tools_use}, ensure_ascii=False)
                elif data["status"] == Status.agent_finish:
                    final_answer = data["final_answer"]
                    print(f"request_id: {request_id}")
                    add_message_to_custom_db(user_id=uid, conversation_id=request_id, chat_type="agent_chat",
                                             query=query, response=final_answer, instruction=instruction)
                    # yield {"final_answer": data["final_answer"]}
                    # 格式化问答对为字典格式
                    if final_answer != "":
                        qa_pair = [
                            {"role": "user", "content": query},
                            {"role": "assistant", "content": final_answer}
                        ]
                        # 将问答对序列化为字符串存储
                        qa_pair_str = json.dumps(qa_pair)
                        vector_store.add_texts([qa_pair_str])
                        vector_store.save_local(vs_path)
                    yield create_chat_response(data["final_answer"], instruction="-1", request_id=request_id, finish=True, data_content="")
                else:
                    # yield {"answer": data["llm_token"]}
                    yield create_chat_response(data["llm_token"], instruction="-1", request_id=request_id, finish=False, data_content="")


        else:
            answer = ""
            final_answer = ""
            async for chunk in callback.aiter():
                # Use server-sent-events to stream the response
                data = json.loads(chunk)
                if data["status"] == Status.start or data["status"] == Status.complete:
                    continue
                if data["status"] == Status.error:
                    answer += "\n```\n"
                    answer += "工具名称: " + data["tool_name"] + "\n"
                    answer += "工具状态: " + "调用失败" + "\n"
                    answer += "错误信息: " + data["error"] + "\n"
                    answer += "\n```\n"
                if data["status"] == Status.tool_finish:
                    answer += "\n```\n"
                    answer += "工具名称: " + data["tool_name"] + "\n"
                    answer += "工具状态: " + "调用成功" + "\n"
                    answer += "工具输入: " + data["input_str"] + "\n"
                    answer += "工具输出: " + data["output_str"] + "\n"
                    answer += "\n```\n"
                if data["status"] == Status.agent_finish:
                    final_answer = data["final_answer"]
                else:
                    answer += data["llm_token"]

            # yield json.dumps({"answer": answer, "final_answer": final_answer}, ensure_ascii=False)
            yield create_chat_response(final_answer, instruction="-1", request_id=request_id, finish=True, data_content="")
        await task

    return agent_chat_iterator(query=query,
                               history=history,
                               model_name=model_name,
                               prompt_name=prompt_name)
