import asyncio

from fastapi import FastAPI,Request
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_openai import ChatOpenAI
from sse_starlette import EventSourceResponse
from langchain_community.callbacks.manager import get_openai_callback
llm = ChatOpenAI(
    api_key="sk-a3f7718fb81f43b2915f0a6483b6661b",
    base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
    model="llama-4-scout-17b-16e-instruct",  # 此处以qwen-plus为例，您可按需更换模型名称。模型列表：https://help.aliyun.com/zh/model-studio/getting-started/models
    # other params...
)

promptStr = """
请讲一个关于{input}的笑话
"""
prompt =  ChatPromptTemplate.from_template(promptStr)

outparse = StrOutputParser()

chain = prompt | llm |  outparse
async def async_stream():
    for chunk in chain.stream({"input": "猴子"}):
        print(chunk, end="|", flush=True)

# asyncio.run(async_stream())

# FastAPI 应用
app = FastAPI()
@app.get("/stream")
async def stream():
    async def event_generator():
     with get_openai_callback() as cb:
        for chunk in chain.stream({"input": "猴子"}):
            print(cb)
            yield {"data": chunk}
        yield {"data": "[DONE]"}
    return EventSourceResponse(event_generator())


if __name__ == "__main__":
    import uvicorn
    uvicorn.run(app, host="0.0.0.0", port=8000)