import os
from langchain.chat_models import init_chat_model
from langchain_core.messages import HumanMessage, SystemMessage

#  从环境变量中读取DeepSeek的API Key
key = os.getenv("DS_API_KEY")
# print(key)
api_key = str(key)

model = init_chat_model(
    model="deepseek-chat",
    base_url="https://api.deepseek.com",
    api_key=api_key
)

# 注意这里面的HumanMessage和SystemMessage是langchain_core包里面的
messages = [
    SystemMessage("Translate the following from English into Italian"),
    HumanMessage("hi!"),
]
# 直接给一句话 是大语言模型, seq to seq
# 给角色对话 是聊天模型, chat to chat
response1 = model.invoke(messages)
response2 = model.invoke("Hello")  # 不给角色
response3 = model.invoke([{"role": "user", "content": "Hello"}])  # OpenAI 提供角色
response4 = model.invoke([HumanMessage("Hello")])
# print(response1)
# print(response2)
# print(response3)
# print(response4)

for token in model.stream(messages):
    print(token.content, end="|")
