from langchain.llms.openai import OpenAI

llm = OpenAI()
# print(llm("给我讲一个笑话"))
llm_result = llm.generate(["给我讲一个笑话", "给我朗读一首诗"]*3)
print(len(llm_result.generations))
for item in llm_result.generations:
    print(item)
print(llm_result.llm_output)
# {'token_usage': {'prompt_tokens': 93, 'total_tokens': 685, 'completion_tokens': 592}, 'model_name': 'text-davinci-003'}