import os
from dotenv import load_dotenv, find_dotenv # 导入 find_dotenv 帮助定位
from langchain_core.prompts import ChatPromptTemplate
from langchain_openai import ChatOpenAI

# 加载 .env 文件中的环境变量 (增强调试)
load_dotenv(dotenv_path=find_dotenv(usecwd=True), verbose=True, override=True)

# 从环境变量加载 API 密钥和基础 URL
api_key = os.getenv("OPENAI_API_KEY")
api_base = os.getenv("OPENAI_API_BASE")
os.environ["OPENAI_API_KEY"] = api_key
os.environ["OPENAI_API_BASE"] = api_base

model = ChatOpenAI(
    model="gpt-3.5-turbo",
)
prompt = ChatPromptTemplate.from_template("给我讲一个关于{topic}的笑话")
chain = prompt | model

# 流式输出
# for s in chain.stream({"topic": "熊"}):
#     print(s.content, end="", flush=True)

# 批量执行相同任务
content = [ message.content for message in chain.batch([{"topic": "熊"}, {"topic": "猫"}])]
print("\n\n".join(content))

# max_concurrency控制并发数
# chain.batch([{"topic": "熊"}, {"topic": "猫"}, {"topic": "狗"}], config={"max_concurrency": 5})