import os
from time import time

from langchain.globals import set_llm_cache
from langchain_core.caches import InMemoryCache
from langchain_openai import ChatOpenAI

os.environ["OPENAI_API_KEY"] = "sk-f5324346ba744ef89eda093af8f307c7"
os.environ["OPENAI_API_BASE"] = "https://dashscope.aliyuncs.com/compatible-mode/v1"

set_llm_cache(InMemoryCache())
# model = ChatOpenAI(model="gpt-4o-mini", cache=False)
# model = ChatOpenAI(model="gpt-4o-mini", cache=InMemoryCache())
model = ChatOpenAI(model="deepseek-r1")

start_time = time()
response = model.invoke("给我讲个一句话笑话")
end_time = time()
print(response.content)
print(f"第一次调用耗时: {end_time - start_time}秒")

start_time = time()
response = model.invoke("给我讲个一句话笑话")
end_time = time()
print(response.content)
print(f"第二次调用耗时: {end_time - start_time}秒")