import os
from llama_stack_client import LlamaStackClient
from ApiBase import apiBase

from llama_stack_client import LlamaStackClient
from llama_stack_client.types import UserMessage

client = LlamaStackClient(
    base_url=apiBase.prop_read("OPENAI_API_BASE"),
)

response = client.inference.chat_completion(
    messages=[
        UserMessage(
            content="hello world",
            role="user",
        ),
    ],
    model=apiBase.prop_read("OPENAI_MODEL"),
    stream=False,
)
print(response)
