from open_ai_model import getOpenAiModel

# 为了支持异步调用
import asyncio
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.output_parsers import StrOutputParser

model = getOpenAiModel()
prompt = ChatPromptTemplate.from_template("给我讲一个关于{topic}的笑话，50个字以内。")
output_parser = StrOutputParser()  # 对结果集格式化，取content字段

chain = prompt | model | output_parser

# 同步调用
# result = chain.invoke({"topic": "喵咪"})
# print(result)


# 异步调用
async def async_stream():
    async for chunk_content in chain.astream({"topic": "喵咪"}):
        print(chunk_content, end="|", flush=True)


# 运行异步流处理
asyncio.run(async_stream())
