import json
import time

from openai import OpenAI

from core.conf import settings

llm_confs = {
    "tongyi": {
        "api_key": settings.TONYI_API_KEY,
        "base_url": "https://dashscope.aliyuncs.com/compatible-mode/v1",
        "model": "qwen-turbo"
    },
    "doubao": {
        "api_key": settings.DOUBAO_API_KEY,
        "base_url": "https://ark.cn-beijing.volces.com/api/v3/",
        "model": "ep-20240625030303-h4jxd"
    }
}

ai_conf = llm_confs["tongyi"]

client = OpenAI(
    api_key=ai_conf["api_key"],
    base_url=ai_conf["base_url"],
)

data_to_classify = []

prompt_tmp = {
    "分类名称": [
        {
            "name": "网站名称,不要超过十个字",
            "desc": "网站描述",
            "url": "https://www.xxx.com/"
        }
    ]
}
SYSTEM_PROMPT = (
    f"请对以下数据按照应用类型（如编程、办公、学习等）进行分类，分类名称使用中文，分类中不允许为空，并将title提取为name和desc两部分"
    f"不要询问，不要回复其他文字，若提供的数据错误请返回空值，我需要将这份数据引入到 python 代码中，因此严格按照 {json.dumps(prompt_tmp)} 格式进行输出为Json")

completion = client.chat.completions.create(
    model=ai_conf["model"],
    messages=[
        {'role': 'system', 'content': SYSTEM_PROMPT},
        {'role': 'user', 'content': str(data_to_classify)}],
    stream=True,
    response_format={
        "type": "json_object"
    },
    stream_options={"include_usage": True}
)

full_content = ""
input_prompt_token = 0
output_prompt_token = 0
total_token = 0
begin_time = time.time()
for chunk in completion:
    if not chunk.choices:
        print('\n')
        input_prompt_token = chunk.usage.prompt_tokens
        output_prompt_token = chunk.usage.completion_tokens
        total_token = chunk.usage.total_tokens
        continue
    full_content += chunk.choices[0].delta.content
    print(chunk.choices[0].delta.content)
end_time = time.time()
print(f"总耗时为：{end_time - begin_time}")
print("输入 prompt 的 token 数量", input_prompt_token)
print("模型输出的 token 数量", output_prompt_token)
print("输入和输出的总的 token 数量", total_token)
print(f"完整内容为：{full_content}")


