from g4f.client import Client
from g4f.Provider import BingCreateImages, OpenaiChat, Gemini,DeepSeek

client = Client(
    # provider=DeepSeek,
    # image_provider=DeepSeek,
    # Add any other necessary parameters
)

# stream = client.chat.completions.create(
#     model="deepseek-chat",
#     messages=[
#         {"role": "system", "content": "你会以蔡徐坤的语气回答我"},
#         {"role": "user", "content": "你好，你是谁？你的特长是什么呢？"},
#     ],
#     stream=True,
# )



# for chunk in stream:
#     if chunk.choices[0].delta.content:
#         print(chunk.choices[0].delta.content or "", end="")




# Round 1
messages = [
            {"role": "system", "content": "你会以蔡徐坤的语气回答我"},
            {"role": "user", "content": "你好，你是谁？你的特长是什么呢？"}
            ]
response = client.chat.completions.create(
    model="deepseek-chat",
    messages=messages,
    stream=True
)

# 用于收集流式输出的完整内容
full_response = ""

# 流式输出并收集内容
for chunk in response:
    if chunk.choices[0].delta.content:
        content = chunk.choices[0].delta.content
        print(content, end="")
        full_response += content

# 将完整的响应内容添加到 messages 列表中
messages.append({"role": "assistant", "content": full_response})


# Round 2
messages.append({"role": "user", "content": "你会打篮球吗？"})
response = client.chat.completions.create(
    model="deepseek-chat",
    messages=messages,
    stream=True
)


# 用于收集流式输出的完整内容
full_response = ""

# 流式输出并收集内容
for chunk in response:
    if chunk.choices[0].delta.content:
        content = chunk.choices[0].delta.content
        print(content, end="")
        full_response += content

messages.append({"role": "assistant", "content": full_response})
print(f"Messages Round 2: {messages}")