import asyncio
from autogen_ext.models.openai import OpenAIChatCompletionClient
from autogen_ext.teams.magentic_one import MagenticOne
from autogen_agentchat.ui import Console

from autogen_ext.models.ollama import OllamaChatCompletionClient
model_client = OllamaChatCompletionClient(
        model="hhao/qwen2.5-coder-tools:latest",
        #model="llama3.1:latest",
        host="http://192.168.99.142:11434", 
         model_info={
        "vision": False,
        "function_calling": True,
        "json_output": True,
        "family": "unknow",
        "structed_output":True,
        },
    )

async def example_usage():
    m1 = MagenticOne(client=model_client)
    task = "Write a Python script to fetch data from an API."
    result = await Console(m1.run_stream(task=task))
    print(result)


if __name__ == "__main__":
    asyncio.run(example_usage())
