from openai import OpenAI

from llm.SystemConfig import SystemConfig


def get_response(user_msg):
    client = OpenAI(
        api_key=SystemConfig().LJ_API_KEY,  # 如果您没有配置环境变量，请在此处用您的API Key进行替换
        base_url=SystemConfig().LJ_BASE_RUL,  # 填写DashScope服务的base_url
    )
    completion = client.chat.completions.create(
        model=SystemConfig().LJ_MODULE_NAME,
        messages=[
            {'role': 'system', 'content': 'You are a helpful assistant.'},
            {'role': 'user', 'content': user_msg}],
        temperature=0.8,
        top_p=0.8,
        stream=True,
        # 可选，配置以后会在流式输出的最后一行展示token使用信息
        stream_options={"include_usage": True}
    )

    for chunk in completion:
        yield chunk.model_dump_json()
        # print(chunk.model_dump_json())

    # print(completion.model_dump_json())
