# langchain的异步api基于asyncio实现

import time
import asyncio
from langchain_openai import ChatOpenAI
from openaiConfigurations import openai_api_key, openai_api_base

def generate_serially():
    llm = ChatOpenAI(temperature = 0.9, openai_api_key = openai_api_key, openai_api_base = openai_api_base)
    for _ in range(10):
        resp = llm.generate(["Hello, how are you?"])
        print(resp.generations[0][0].text)

async def async_generate(llm):
    resp = await llm.agenerate(["Hello, how are you?"])
    print(resp.generations[0][0].text)

async def generate_cuccurently():
    llm = ChatOpenAI(temperature = 0.9, openai_api_key = openai_api_key, openai_api_base = openai_api_base)
    tasks = [async_generate(llm) for _ in range(10)]
    result = await asyncio.gather(*tasks)

s = time.perf_counter()
if __name__ == '__main__':
    async def main():
        await generate_cuccurently()
        return time.perf_counter() - s
    
    resp = asyncio.run(main())
    print("异步运行耗时：", resp)
    print("-" * 20)
    s = time.perf_counter()
    generate_serially()
    print("同步运行耗时" ,time.perf_counter() - s)