#测试本地ollama通过openai是否能正常访问

from autogen_ext.models.openai import OpenAIChatCompletionClient

openai_model_client = OpenAIChatCompletionClient(
    model="modelscope.cn/unsloth/Qwen3-14B-GGUF:Q5_K_M",
    api_key="ollama",
    base_url="http://127.0.0.1:11434/v1",
    model_info={
        "context_length": 8192,
        "max_tokens": 2048,
        "stop": ["\n\nHuman:"],
        "vision": False,
        "function_calling":True,
        "json_output":True,
        "family":"llama-3.3-8b"
    },
    # api_key="sk-...", # Optional if you have an OPENAI_API_KEY environment variable set.
)


#测试模型
async def test_model():
    from autogen_core.models import UserMessage
    result = await openai_model_client.create([UserMessage(content="法国的首都在哪里?", source="user")])
    print(result)

if __name__ == "__main__":
    import asyncio
    asyncio.run(test_model())