from llm import getAiProxy, MODEL_TYPE


def generateText(question: str):
    completion = getAiProxy().chat.completions.create(
        model=MODEL_TYPE,
        messages=[
            {"role": "user", "content": question},
        ],
    )

    return completion.model_dump_json()
