import asyncio
from autogen_ext.models.openai import OpenAIChatCompletionClient
from autogen_agentchat.agents import AssistantAgent
from autogen_agentchat.teams import MagenticOneGroupChat
from autogen_agentchat.ui import Console

from autogen_ext.models.ollama import OllamaChatCompletionClient

async def main() -> None:
    model_client = OllamaChatCompletionClient(
        model="hhao/qwen2.5-coder-tools:latest",
        #model="llama3.1:latest",
        host="http://192.168.99.142:11434", 
         model_info={
        "vision": False,
        "function_calling": True,
        "json_output": True,
        "family": "unknow",
        "structed_output":True,
        },
    )

    assistant = AssistantAgent(
        "Assistant",
        model_client=model_client,
    )
    team = MagenticOneGroupChat([assistant], model_client=model_client)
    #await Console(team.run_stream(task="Provide a different proof for Fermat's Last Theorem"))
    await Console(team.run_stream(task="通过浏览重庆大学的主页，回答重庆大学现任小组长是谁."))
    await model_client.close()


asyncio.run(main())
