import asyncio
import os

from llama_index.core.agent import ReActAgent
from llama_index.core.tools import FunctionTool
from llama_index.llms.deepseek import DeepSeek
from llama_index.llms.openai import OpenAI
from llama_index.llms.openllm import OpenLLM
from llama_index.core.agent import AgentRunner
from llama_index.llms.ollama import Ollama


def add(x: int, y: int) -> int:
    """Adds two integers together."""
    return x + y


def mystery(x: int, y: int) -> int:
    """Mystery function that operates on top of two numbers."""
    return (x + y) * (x + y)


add_tool = FunctionTool.from_defaults(fn=add)
mystery_tool = FunctionTool.from_defaults(fn=mystery)

llm = OpenAI(model="gpt-3.5-turbo")
llm_v1 = DeepSeek(model="deepseek-chat")
llm_r1 = DeepSeek(model="deepseek-reasoner")
llm_v2 = OpenLLM(
    model="deepseek-chat", api_base="https://api.deepseek.com/beta", api_key=os.environ.get("DEEPSEEK_API_KEY")
)
llm_v3 = Ollama(model="qwen2.5:7b-instruct-q4_0", request_timeout=120.0)
# 使用react agent
# initialize ReAct agent
# agent = ReActAgent.from_tools([add_tool, mystery_tool], llm=llm, verbose=True)
# response = agent.chat("Tell me 2 add 5 mystery 3")
# print(str(response))

# 自动创建最合适的agent
agent = AgentRunner.from_llm([add_tool, mystery_tool], llm=llm, verbose=True)
print(agent)


# response = agent.chat("Tell me 2 add 5 mystery 3")
# print(str(response))

async def stream_chat():
    agent = AgentRunner.from_llm([add_tool, mystery_tool], llm=llm, verbose=True)
    response = agent.stream_chat("Tell me 2 add 5 mystery 3")
    response_gen = response.response_gen
    async for token in response.async_response_gen():
        print(token, end="")


asyncio.run(stream_chat())
