import asyncio
from autogen_agentchat.agents import AssistantAgent
from autogen_agentchat.ui import Console
from autogen_ext.code_executors.local import LocalCommandLineCodeExecutor
from autogen_ext.tools.code_execution import PythonCodeExecutionTool

from autogen_ext.models.ollama import OllamaChatCompletionClient


async def main() -> None:
    tool = PythonCodeExecutionTool(LocalCommandLineCodeExecutor(work_dir="coding"))

    model_client = OllamaChatCompletionClient(
        model="hhao/qwen2.5-coder-tools:latest",
        #model="llama3.1:latest",
        host="http://192.168.99.142:11434", 
         model_info={
        "vision": False,
        "function_calling": True,
        "json_output": True,
        "family": "unknow",
        "structed_output":True,
        },
    )
    agent = AssistantAgent(
        "assistant",model_client, tools=[tool], reflect_on_tool_use=True
    )
    await Console(
        agent.run_stream(
            task="Create a plot of MSFT stock prices in 2024 and save it to a file. Use yfinance and matplotlib."
        )
    )


asyncio.run(main())
