import asyncio
from asyncio import Queue

import uvicorn
from fastapi import FastAPI
from langchain_core.callbacks import BaseCallbackHandler
import os
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
# 初始化OpenAI模型，并开启流式输出
from langchain_community.llms.tongyi import Tongyi
from starlette.responses import StreamingResponse

os.environ["DASHSCOPE_API_KEY"] = "sk-9d8f1914800e497f8717144e860f99bc"
## python -m uvicorn main:app --reload

app = FastAPI()
class StreamingCallbackHandler(BaseCallbackHandler):
    def __init__(self):
        self.queue = Queue
    def on_llm_new_token(self, token: str, **kwargs):
        self.queue.put(f"data: {token}\n\n")


async def stream_data(question):
    llm = Tongyi(streaming=True)
    template = "Question: {question}"
    prompt = PromptTemplate(template=template, input_variables=["question"])
    chain = LLMChain(llm=llm, prompt=prompt)

    callback_handler = StreamingCallbackHandler()
    await chain.astream({"question": question}, callbacks=[callback_handler])

    while True:
        try:
            # 使用await来等待队列中的下一个项目
            token = await callback_handler.queue.get()
            yield f"data: {token}\n\n"
        except asyncio.QueueEmpty:
            break


@app.get("/")
async def read_root():
    async def event_generator():
        async for response in stream_data("What is the meaning of life?"):
            yield response.encode()
    headers = {'Content-Type': 'text/event-stream'}
    return StreamingResponse(event_generator(), headers=headers)


if __name__ == "__main__":
    uvicorn.run(app, host="0.0.0.0", port=9001)
