from langchain_core.rate_limiters import InMemoryRateLimiter
from langchain.chat_models import init_chat_model
import time
import os
rate_limiter = InMemoryRateLimiter(
    requests_per_second=0.1,  # <-- Super slow! We can only make a request once every 10 seconds!!
    check_every_n_seconds=0.1,  # Wake up every 100 ms to check whether allowed to make a request,
    max_bucket_size=10,  # Controls the maximum burst size.
)

key = os.getenv("OPENAI_API_KEY")
api_key = str(key)

llm = init_chat_model(
    model="gpt-4o-mini",
    model_provider="openai",
    base_url="https://api.zetatechs.com/v1",
    api_key=api_key
)
for _ in range(5):
    tic = time.time()
    llm.invoke("hello")  # 因为这是在重复的输入, 因此可以利用这个缓存机制
    toc = time.time()
    print(toc - tic)