# 缓存

import langchain.globals
from langchain_core.caches import InMemoryCache
from langchain_community.cache import SQLiteCache
from openaiConfigurations import openai_api_key, openai_api_base
from langchain_openai import ChatOpenAI
import time
from langchain.globals import set_llm_cache
from langchain_ollama import OllamaLLM

# 内存缓存
langchain.globals._llm_cache = InMemoryCache()
llm = ChatOpenAI(openai_api_key = openai_api_key, openai_api_base = openai_api_base)
# 第一次执行
s = time.time()
llm.predict("Tell me a short story")
print(time.time() - s)
# 第二次执行
s = time.time()
llm.predict("Tell me a short story")
print(time.time() - s)
# predict是简化版的输出，注重返回的文本本身

# sqlite缓存
langchain.globals._llm_cache = SQLiteCache(database_path = '.langchain.db')
# 第一次执行
s = time.time()
llm.predict("Tell me a short story")
print(time.time() - s)
# 第二次执行
s = time.time()
llm.predict("Tell me a short story")
print(time.time() - s)

# 设置全局缓存
set_llm_cache(InMemoryCache())

llm = OllamaLLM(model = "qwen:1.8b")
print(llm.invoke("中国的首都是哪个城市？"))