from autogen_agentchat.agents import AssistantAgent
from autogen_agentchat.ui import Console
from autogen_ext.models.openai import OpenAIChatCompletionClient
model_client = OpenAIChatCompletionClient(model="qwen2.5:7b",
                                                 model_info={
                                                     "vision": False,
                                                     "function_calling": True,
                                                     "family": "Qwen3",
                                                     "structured_output": True,
                                                     "json_output": True,
                                                 },
                                                 api_key="ollama",
                                                 base_url="http://127.0.0.1:11434/v1")


# Define a simple function tool that an agent can use.
# In this example, we use a simulated weather tool for demonstration.
async def get_weather(city: str) -> str:
    """Get the weather for a given city."""
    return f"The weather in {city} is 73 degrees and Sunny."


# Define an AssistantAgent with the model, tool, system message, and reflection enabled.
# The system message instructs the agent via natural language.
agent = AssistantAgent(
    name="weather_agent",
    model_client=model_client,
    tools=[get_weather],
    system_message="You are a helpful assistant.",
    reflect_on_tool_use=True,
    model_client_stream=True,  # Enable streaming tokens from the model client.
)


# Run the agent and stream the messages to the console.
async def main() -> None:
    await Console(agent.run_stream(task="What is the weather in New York?"))


# NOTE: if running this inside a Python script you'll need to use asyncio.run(main()).
if __name__ == "__main__":
    import asyncio
    asyncio.run(main())