from unittest.mock import patch
from langchain_ollama import ChatOllama
from langchain_openai import ChatOpenAI
from openai import RateLimitError
from openaiConfigurations import openai_api_key, openai_api_base

# 初始化 OpenAI 语言模型
openai_llm = ChatOpenAI(
    max_retries=0,
    base_url=openai_api_base,
    api_key=openai_api_key,
)

# 初始化 Ollama 语言模型
deep_seek_r1_llm = ChatOllama(model='deepseek-r1:7b')

# 设置备用模型
llm = openai_llm.with_fallbacks([deep_seek_r1_llm])

# 定义一个函数来抛出 RateLimitError 异常
def mock_rate_limit_error(*args, **kwargs):
    raise RateLimitError("模拟的速率限制错误")

# 使用 patch 模拟 OpenAI API 调用时抛出 RateLimitError 异常
# with patch("langchain.chat_models.openai.ChatOpenAI._generate", side_effect=mock_rate_limit_error):
#     try:
#         print(llm.invoke("你是谁？"))
#     except RateLimitError:
#         print("遇到错误")

# 使用具有回退功能的mml
from langchain.prompts import ChatPromptTemplate

prompts = ChatPromptTemplate.from_messages(
    [
        (
            "system",
            "你真是一个贴心的助手，每次回复都会附上赞美之词。",
        ),
        ("human", "为什么你喜欢{city}")
    ]
)
chain = prompts | llm
with patch("langchain.chat_models.openai.ChatOpenAI._generate", side_effect = mock_rate_limit_error):
    try:
        print(chain.invoke({"city": "巴中"}))
    except RateLimitError:
        print("遇到错误")