import os
from openai import OpenAI
from typing import List, Dict

# 星火AI接口地址
url = 'https://spark-api-open.xf-yun.com/v2'
# APIPassword, 本地开发可从环境变量改为静态字符串
# api_token = os.getenv('SPARKAI_PASSWORD')
api_token = 'dFfCIVONqOBluwQPJYpn:GYZtZstxLfEfixpIuupd'
# 模拟用户的问题
question = '请介绍以下datawhale这个开源社区'
# 创建openai的对象
client = OpenAI(api_key=api_token, base_url="https://spark-api-open.xf-yun.com/v2")

def chat(messages:List[Dict]):
    # 调用星火大模型
    response = client.chat.completions.create(
        # 固定参数
        model="x1",
        # 用户消息
        messages=messages,
        # 流式输出
        stream=True
    )
    full_response = '' # 初始化模拟结果
    isFirstContent = True  # 首帧标识
    for chunk in response:
        # 判断思维链状态并输出
        if hasattr(chunk.choices[0].delta, 'reasoning_content') and chunk.choices[0].delta.reasoning_content is not None:
            reasoning_content = chunk.choices[0].delta.reasoning_content
            print(reasoning_content, end="", flush=True)  # 实时打印思考模型输出的思考过程每个片段
        # 判断最终结果状态并输出
        if hasattr(chunk.choices[0].delta, 'content') and chunk.choices[0].delta.content is not None:
            content = chunk.choices[0].delta.content
            if (True == isFirstContent):
                isFirstContent = False
                print("\n*******************以上为思维链内容，模型回复内容如下********************\n")
            print(content, end="", flush=True)  # 实时打印每个片段
            full_response += content
    return full_response

messages = [
    {"role": "user", "content": question}
]
chat(messages)