import os
from dotenv import load_dotenv, find_dotenv # 导入 find_dotenv 帮助定位
from langchain_openai.chat_models import ChatOpenAI
from langchain_community.callbacks.manager import get_openai_callback

# 加载 .env 文件中的环境变量 (增强调试)
load_dotenv(dotenv_path=find_dotenv(usecwd=True), verbose=True, override=True)

# 从环境变量加载 API 密钥和基础 URL
api_key = os.getenv("OPENAI_API_KEY")
base_url = os.getenv("OPENAI_BASE_URL")
model = os.getenv("DEFAULT_MODEL")

#构造一个llm
llm = ChatOpenAI(
    model = "gpt-3.5-turbo-instruct",
#     model = "claude-3-opus-20240229",
    temperature=0,
    openai_api_key = api_key,
    openai_api_base = base_url,
    max_tokens=512,
)

# 流式输出结果
for chunk in llm.stream("写一首关于秋天的诗歌"):
    print(chunk.content, end="", flush=False)

# 监测token消耗情况
# with get_openai_callback() as count:
#     print(llm.invoke("写一首关于春天的诗歌").content)
#     print("\n\n",count)

