from langchain_ollama import ChatOllama
from langchain_openai import ChatOpenAI
from openaiConfigurations import openai_api_key, openai_api_base

short_llm = ChatOpenAI(api_key = openai_api_key, base_url = openai_api_base)
long_llm = ChatOllama(model = "deepseek-r1:7b")
llm = short_llm.with_fallbacks([long_llm])

inputs = "下一个数字是：" + "，".join(["one", "two"] * 3000)

# 先使用短token模型
# try:
#     print(short_llm.invoke(inputs))
# except Exception as e:
#     print(e)

# 回退长token模型测试
try:
    print(llm.invoke(inputs))
except Exception as e:
    print(e)