#!/usr/bin/env python
# -*- coding:utf-8 -*-
# @Author: '习惯'
# @Email: 'songbing513@gmail.com'
# @Date: '2025/9/4 21:57'
# @Project: 'McpAgent'
# @File: '04.py'
# @Software: 'PyCharm'


from langchain_openai import ChatOpenAI
from langchain_core.rate_limiters import InMemoryRateLimiter
from langchain_anthropic import ChatAnthropic
import time


rate_limiter = InMemoryRateLimiter(
    requests_per_second=1, # 每1 秒请求一次
    check_every_n_seconds=0.1, # 每100 毫秒检查一次是否允许
    max_bucket_size=10, # 控制最大突发大小
)
# 定义模型
# ChatAnthropic是LangChain为Anthropic模型设计的专用包装器，而DeepSeek采用OpenAI兼容的API结构，所以必须用ChatOpenAI。
# model = ChatAnthropic(
#     model="chat-babbage-001",
#     temperature=0,
#     base_url="https://api.deepseek.com/v1",
#     api_key="sk-3291e6df463f408eaeea2629bd84fc6a",
#     rate_limiter=rate_limiter, #  关键：添加速率限制器
# )

model = ChatOpenAI(
    model="deepseek-chat",
    temperature=0,
    base_url="https://api.deepseek.com/v1",
    api_key="sk-3291e6df463f408eaeea2629bd84fc6a",
    rate_limiter=rate_limiter, #  关键：添加速率限制器
)

# 使用计时器来模拟请求之间的时间间隔

for _ in range(10):
    start = time.time()
    model.invoke("你叫什么名字")
    end = time.time()
    print(f"请求时间: {end - start:.2f} 秒")