import asyncio
import uvicorn
import os
from typing import AsyncIterable, Awaitable
from dotenv import load_dotenv
from fastapi import FastAPI
from fastapi.responses import FileResponse, StreamingResponse
from langchain.callbacks import AsyncIteratorCallbackHandler
from langchain.chat_models import ChatOpenAI
from langchain.schema import HumanMessage

# 配置环境变量
os.environ["OPENAI_API_KEY"] = "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"

async def wait_done(fn: Awaitable, event: asyncio.Event):
    try:
        await fn
    except Exception as e:
        print(e)
        event.set()
    finally:
        event.set()

async def call_openai(question: str) -> AsyncIterable[str]:
    callback = AsyncIteratorCallbackHandler()
    model = ChatOpenAI(model="gpt-3.5-turbo-0613", temperature=0, streaming=True, verbose=True, callbacks=[callback])

    coroutine = wait_done(model.agenerate(messages=[[HumanMessage(content=question)]]), callback.done)
    task = asyncio.create_task(coroutine)

    async for token in callback.aiter():
        yield f"{token}"

    await task


app = FastAPI()

@app.post("/ask")
def ask(body: dict):
    return StreamingResponse(call_openai(body['question']), media_type="text/event-stream")

@app.get("/")
async def homepage():
    return FileResponse(f'{os.getcwd()}/statics/index.html')

if __name__ == "__main__":
    uvicorn.run(host="0.0.0.0", port=8002, app=app)
