import json
from pydantic import BaseModel, Field
from fastapi.responses import StreamingResponse
from langchain_core.messages import HumanMessage

from app.graph import graph
from app.repo.chat_message import append_chat_message


class ChatPayload(BaseModel):
    tid: str = Field(title="langgraph thread id", max_length=60)
    message: str = Field(description="Chat message", max_length=200, min_length=1)


def do_chat(payload: ChatPayload):
    config = {"configurable": {"thread_id": payload.tid}}
    input = {"messages": [HumanMessage(payload.message)]}
    append_chat_message(payload.tid, "human", payload.message)

    chunks = graph().stream(input=input, config=config, stream_mode="custom")
    return [chunk for chunk in chunks]


def do_chat_sse(payload: ChatPayload):
    tid = payload.tid
    config = {"configurable": {"thread_id": tid}}
    input = {"messages": [HumanMessage(payload.message)]}
    append_chat_message(tid, "human", payload.message)

    def _gen():
        chunks = graph().stream(input=input, config=config, stream_mode="custom")

        # TODO: reasoning 可以在不同节点产生，包括好几部分，这里可以修正
        reasoning_tokens: list[str] = []
        content_tokens: list[str] = []

        for chunk in chunks:
            type = chunk["type"]
            token = chunk["token"]
            if type == "R":
                reasoning_tokens.append(token)
            elif type == "C":
                content_tokens.append(token)

            yield f"data: {json.dumps(chunk, ensure_ascii=False)}\n\n"
        # end for

        ai_msg = (
            f"```reasoning\n{''.join(reasoning_tokens)}\n```\n\n\n"
            if len(reasoning_tokens) > 0
            else ""
        )
        ai_msg = f"{ai_msg}{''.join(content_tokens)}"

        append_chat_message(tid, "ai", ai_msg)

    return StreamingResponse(_gen(), media_type="text/event-stream")
