
from autogen_core.models import UserMessage
from autogen_ext.models.ollama import OllamaChatCompletionClient
import asyncio
# Assuming your Ollama server is running locally on port 11434.
async def main():

    ollama_model_client = OllamaChatCompletionClient(
        model="hhao/qwen2.5-coder-tools:latest",
        #model="llama3.1:latest",
        host="http://192.168.99.142:11434", 
         model_info={
        "vision": False,
        "function_calling": True,
        "json_output": True,
        "family": "unknow",
        "structed_output":True,
        },
    )

    response = await ollama_model_client.create([UserMessage(content="What is the capital of France?", source="user")])
    print(response.content)
    await ollama_model_client.close()
asyncio.run(main())
"""
from autogen_ext.models.ollama import OllamaChatCompletionClient
model_client = OllamaChatCompletionClient(
        model="hhao/qwen2.5-coder-tools:latest",
        #model="llama3.1:latest",
        host="http://192.168.99.142:11434", 
         model_info={
        "vision": False,
        "function_calling": True,
        "json_output": True,
        "family": "unknow",
        "structed_output":True,
        },
    )
"""