import os
from datetime import time
from typing import Any, Generator
from urllib import request

from fastapi.openapi.models import Response
from langchain_community.chat_models import ChatTongyi
from langchain_core.callbacks import StreamingStdOutCallbackHandler, CallbackManager
from langchain_core.messages import HumanMessage
from langchain_core.outputs import LLMResult

from fastapi.responses import StreamingResponse
import uvicorn
from fastapi import FastAPI

os.environ["DASHSCOPE_API_KEY"] = "sk-9d8f1914800e497f8717144e860f99bc"
app = FastAPI()

class StreamWeb(StreamingStdOutCallbackHandler):
    def __init__(self):
        self.num = 0
        self.tokens = []
        self.finish = False

    def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
        self.tokens.append(token)

    def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
        self.finish = True

    def generate_tokens(self) -> Generator:
        while not self.finish or self.tokens:
            if self.tokens:

                data = self.tokens.pop(0)
                yield data
            else:
                time.sleep(1)


sWeb = StreamWeb()
# 流式回答gpt
llm_streaming = ChatTongyi(
                           streaming=True,
                           verbose=True,
                           callbacks=[sWeb])



@app.get("/")
async def read_root():
    callback_handler = StreamWeb()
    callback_manager = CallbackManager([callback_handler])
    return StreamingResponse(callback_handler.generate_tokens())


if __name__ == "__main__":
    uvicorn.run(app, host="0.0.0.0", port=9001)
