import asyncio
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_openai import ChatOpenAI

llm = ChatOpenAI(
    api_key="sk-a3f7718fb81f43b2915f0a6483b6661b",
    base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
    model="llama-4-scout-17b-16e-instruct",  # 此处以qwen-plus为例，您可按需更换模型名称。模型列表：https://help.aliyun.com/zh/model-studio/getting-started/models
    # other params...
)

promptStr = """
请讲一个关于{input}的笑话
"""
prompt =  ChatPromptTemplate.from_template(promptStr)

outparse = StrOutputParser()

chain = prompt | llm |  outparse
async def async_stream():
    for chunk in chain.astream_events({"input": "猴子"},version="v2"):
        print(chunk, end="|", flush=True)

asyncio.run(async_stream())