import os

from dotenv import load_dotenv
from langchain_deepseek import ChatDeepSeek

load_dotenv(override=True)
deepseek_api_key = os.getenv("DEEPSEEK_API_KEY")

# 初始化 deepseek
model = ChatDeepSeek(
    model="deepseek-chat",
    temperature=0,
    max_tokens=None,
    timeout=None,
    max_retries=2,
    api_key=deepseek_api_key,
)

# 问题列表
questions = [
    "什么是LangChain？",
    "Python的生成器是做什么的？",
    "解释一下Docker和Kubernetes的关系"
]
# 批量调用大模型
response = model.batch(questions)
for q, r in zip(questions, response):
    print(f"问题：{q}\n回答：{r}\n")
