import logging
from contextlib import AsyncExitStack

from langchain_core.messages import HumanMessage

from fastapi import FastAPI, Request
from fastapi.responses import StreamingResponse
from graph import init_app

logging.basicConfig(level=logging.INFO)

app = FastAPI()


@app.post("/chat")
async def chat(request: Request):
    app = await init_app(s := AsyncExitStack())

    request = await request.json()

    query = request["query"]
    thread_id = request["thread_id"]

    output = await app.ainvoke(
        {"messages": [HumanMessage(query)]}, config={"configurable": {"thread_id": thread_id}}
    )

    await s.aclose()

    return output["messages"][-1].content


@app.post("/achat")
async def as_chat(request: Request):
    request = await request.json()

    return StreamingResponse(streaming_chat(request))


async def streaming_chat(request: dict):
    app = await init_app(s := AsyncExitStack())

    query = request["query"]
    thread_id = request["thread_id"]

    async for event in app.astream_events(
        {"messages": [HumanMessage(query)]}, config={"configurable": {"thread_id": thread_id}}
    ):
        if event["event"] == "on_chat_model_stream" and (content := event["data"]["chunk"].content):
            yield content

    await s.aclose()


if __name__ == "__main__":
    import uvicorn

    uvicorn.run(app, host="localhost", port=8000)
