# -*- encoding: utf-8 -*-
'''
@文件    :llm_invoke_tools.py
@说明    :通过agent来调用不同的工具执行
@时间    :2024/10/30 21:47:54
@作者    :冯佳辉
@版本    :
'''

import sys, os

sys.path.append(r'D:\gitee_project\fastapi_project')
os.environ["DASHVECTOR_API_KEY"] = "sk-97263d9d264b4e61b75cfc5bda33a6c9"
from common.llm_master import Agent_Master
from langchain_community.document_loaders import WebBaseLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_community.vectorstores.dashvector import DashVector
from langchain_community.embeddings.dashscope import DashScopeEmbeddings
from langchain.tools.retriever import create_retriever_tool
from langchain.tools import DuckDuckGoSearchRun, tool
from langchain_core.messages import HumanMessage
from langchain.tools.render import format_tool_to_openai_function
from langchain.vectorstores import Chroma
from langchain.embeddings import OllamaEmbeddings
from langchain.agents import create_tool_calling_agent
from langchain.agents import AgentExecutor
from langchain import hub
from langsmith.client import LangSmith

master = Agent_Master()
embedding_model = OllamaEmbeddings(model="nomic-embed-text")
api_key = "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"
# api_key = os.getenv("LANGSMITH_API_KEY")
langsmith_client = LangSmith(api_key=api_key)


@tool
def multiply(first_number: int, second_number: int) -> int:
    """
    两个整型数进行相乘
    """
    return first_number * second_number


@tool
def add(first_number: int, second_number: int) -> int:
    """
    两个整型数进行相加
    """
    return first_number + second_number


@tool
def exponentiate(base: int, exponent: int) -> int:
    """
    底数的指数次方
    """
    return base**exponent


tools = [DuckDuckGoSearchRun(), multiply, add, exponentiate]


def build_retriever_tools():
    """
    爬取网站内容并存入到向量数据库
    """
    loader = WebBaseLoader("https://docs.smith.langchain.com/overview",
                           verify_ssl=False)
    docs = loader.load()
    splitter_docs = RecursiveCharacterTextSplitter(
        chunk_size=1000, chunk_overlap=200).split_documents(docs)
    # vector=DashVector.from_documents(splitter_docs,DashScopeEmbeddings(),collection_name="langchain")#collection_name 参数用于指定在向量数据库中存储文档的集合名称
    vector = Chroma.from_documents(splitter_docs,
                                   embedding_model,
                                   collection_name="langchain")
    retriever = vector.as_retriever(search_type="similarity")
    retriever_tool = create_retriever_tool(
        retriever,
        "langsmith_search",  #工具名称，也就是name
        "搜索有关LangSmith的信息。关于LangSmith的任何问题，您都可以使用这个工具")  #描述信息，也就是description
    tools.append(retriever_tool)


def invoke_llm_with_tools(query=None):
    """
    向llm提出问题，让大模型结合自己所学习的内容和我们自己构造的tools来回答
    """
    model_with_tools = master.llm.bind(
        functions=[format_tool_to_openai_function(tool) for tool in tools])
    response = model_with_tools.invoke([HumanMessage(content=query)])
    return {
        "content_string": response.content,
        "tool_calls": response.tool_calls
    }


def create_agent_executor():
    prompt = hub.pull("hwchase17/openai-functions-agent")
    agent = create_tool_calling_agent(master.llm, prompt, tools)
    agent_executor = AgentExecutor(agent=agent, tools=tools)
    return agent_executor


def query_with_agent(query=None):
    res = create_agent_executor().invoke({"input": query})
    print('=======>>>>>>', res)


if __name__ == '__main__':
    build_retriever_tools()
    print(tools)
    # result=invoke_llm_with_tools("2024年奥运会是哪个国家举办的")
    # print('--->',result)
    query_with_agent('告诉我关于 LangSmith 的相关信息')
