import os

from llama_index.core.agent import ReActAgent
from llama_index.core.tools import FunctionTool
from llama_index.llms.deepseek import DeepSeek
from llama_index.llms.openai import OpenAI
from llama_index.llms.openllm import OpenLLM
from llama_index.core.agent import AgentRunner


def add(x: int, y: int) -> int:
    """Adds two integers together."""
    return x + y


def mystery(x: int, y: int) -> int:
    """Mystery function that operates on top of two numbers."""
    return (x + y) * (x + y)


add_tool = FunctionTool.from_defaults(fn=add)
mystery_tool = FunctionTool.from_defaults(fn=mystery)

llm = OpenAI(model="gpt-3.5-turbo")
llm_v1 = DeepSeek(model="deepseek-chat")
llm_r1 = DeepSeek(model="deepseek-reasoner")
llm_v2 = OpenLLM(
    model="deepseek-chat", api_base="https://api.deepseek.com/beta", api_key=os.environ.get("DEEPSEEK_API_KEY")
)
response = llm.predict_and_call(
    [add_tool, mystery_tool],
    "Tell me the output of the mystery function on 2 and 9",
    # "Tell me 2 add 5 mystery 3",
    verbose=True
)
print(str(response))

# 使用react agent
# initialize ReAct agent
agent = ReActAgent.from_tools([add_tool, mystery_tool], llm=llm, verbose=True)
response = agent.chat("Tell me 2 add 5 mystery 3")
print(str(response))
