

# --------------------------------------- 依赖包导入区 ---------------------------------------
import time

from langchain_openai import ChatOpenAI
from langchain.agents import Tool


# 网络搜索工具
from langchain.agents import tool
from langchain_community.tools.tavily_search import TavilySearchResults

from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_core.messages import AIMessage, HumanMessage
from langchain.agents.format_scratchpad.openai_tools import (
    format_to_openai_tool_messages,
)
from langchain.agents.output_parsers.openai_tools import OpenAIToolsAgentOutputParser
from knowledge_lib import load_vector_store_bd, get_info_from_vector_store_bd

from langchain.agents import AgentExecutor

import os


# --------------------------------------- 全局变量区 ---------------------------------------

NGINX_PIC_PATH = os.environ["NGINX_PIC_PATH"]
NGINX_PIC_PREFIX = os.environ["NGINX_PIC_PREFIX"]





__MODEL_LIST = [
    "glm-4-0520",
    "chatglm3-6b:1950518390::11jx4gpg",
    "glm-3-turbo:1950518390:jr08:fslpv2gq",
]

__LLMS = [
    ChatOpenAI(
        temperature=0.2,
        model=modeli,
        openai_api_base="https://open.bigmodel.cn/api/paas/v4/",
        streaming=True,
    )
    for modeli in __MODEL_LIST
]

print("Loading LLM tools...")



@tool
def make_pic(data):
    """当需要画一个折线图时调用。输入数据是一个字符串，例如data="{"title":"标题", "data":[(x1,y1),(x2,y2)]}"，根据这个字符串显示一个图表"""
    try:
        print("In tool named make_pic, data=", data)
        data_ = eval(data)
        table_name = data_["title"]
        data_list = data_["data"]

        # 按照data_list[0]从小到大排序
        data_list = sorted(data_list, key=lambda x: x[0])

        # 画图
        import matplotlib.pyplot as plt

        x = [i[0] for i in data_list]
        y = [i[1] for i in data_list]
        plt.plot(x, y)
        plt.rcParams["font.sans-serif"] = ["SimHei"]  # 用来正常显示中文标签
        plt.title(table_name)
        # plt.show()

        # 将图表保存到文件
        filename = table_name + str(time.time()) + ".png"
        plt.savefig(NGINX_PIC_PATH + filename)

        markdown_line = f"![{table_name}]({NGINX_PIC_PREFIX + filename})"
        return "以下是您的图表：\n" + markdown_line
    except Exception as e:
        return "绘图出现错误，输入={}，请检查输入是否正确。原因：{}".format(data_list, e)



__PIC_TOOL = Tool.from_function(
    name="绘制折线图",
    func=make_pic,
    description="""当需要画一个折线图时调用。输入数据是一个字符串，例如data="{"title":"标题", "data":[(x1,y1),(x2,y2)]}"，根据这个字符串显示一个图表""",
)

__TAVILY_SEARCH = TavilySearchResults(
    search_result=True,
    max_results=3,
    description="当你需要一些实时信息或者网络信息时，这个工具可以帮助你搜索网络上的信息。输入是一个询问",
)

__TOOLS = [
    __TAVILY_SEARCH,
    # math_tool
    __PIC_TOOL,
]
print("LLM tools loaded.")


print("Loading LLM Prompt...")


__MEMORY_KEY = "chat_history"
__PROMPT = ChatPromptTemplate.from_messages(
    [
        (
            "system",
            "你是一个中文理财小助手，你的任务是为用户分析他的理财信息，并为其提供理财服务. 本地知识=“{local_document_info}”",
        ),
        MessagesPlaceholder(variable_name=__MEMORY_KEY),    # chat_history
        ("user", "{input}"),
        MessagesPlaceholder(variable_name="agent_scratchpad"),
    ]
)
print("LLM Prompt loaded.")


print("Loading LLM Agent...")
# History And Agent


print("LLM Agent loaded.：", "Bind tools...")
__LLM_WTIH_TOOLS = [llmi.bind_tools(__TOOLS) for llmi in __LLMS]

print("LLM Agent loaded.：", "Import tools...")


print("LLM Agent loaded.：", "Load vector store...")
__VS = load_vector_store_bd(persist_directory="./chroma_db_1")

print("LLM Agent loaded.：", "Define agent...")
__AGENTS = [
    (
        {
            "input": lambda x: x["input"],
            "agent_scratchpad": lambda x: format_to_openai_tool_messages(
                x["intermediate_steps"]
            ),
            "chat_history": lambda x: x["chat_history"],
            "local_document_info": lambda x: (
                get_info_from_vector_store_bd(
                    vectorstore=__VS, query=x["input"], top_k=3
                )
                if x["use_document"]
                else []
            ),
            # "posted_document": lambda x: x["posted_info"],
        }
        | __PROMPT
        | tooli
        | OpenAIToolsAgentOutputParser()
    )
    for tooli in __LLM_WTIH_TOOLS
]


print("LLM Agent loaded.")



AGENT_EXECUTORS = [
    AgentExecutor(agent=agenti, tools=__TOOLS, verbose=True) for agenti in __AGENTS
]





# --------------------------------------- 导出函数定义区 ---------------------------------------


def GetAgentExecutor(option=0):
    return AGENT_EXECUTORS[option]





