"""流式输出处理器：封装流式响应生成、回复存储逻辑"""
from typing import AsyncGenerator, List
from fastapi.responses import StreamingResponse
from config.constants import ROLE_ASSISTANT, MEDIA_TYPE_EVENT_STREAM
from db.chat_record import save_chat_record
from db.chat_session import update_session_updated_at

async def stream_llm_response(
    llm_stream,
    session_id: str,
    user_id: str
) -> StreamingResponse:
    """
    处理大模型流式响应：生成前端可接收的流式数据，并存储最终回复
    :param llm_stream: 大模型返回的流式对象
    :param session_id: 对话会话ID
    :param user_id: 用户ID
    :return: FastAPI StreamingResponse对象
    """
    # 收集大模型回复的所有片段（用于最终存储）
    assistant_reply_chunks: List[str] = []

    async def generate_stream() -> AsyncGenerator[str, None]:
        """流式生成器：逐段返回大模型回复"""
        async for chunk in llm_stream:
            # 提取当前片段的内容（过滤空片段）
            chunk_content = chunk.choices[0].delta.content
            if chunk_content:
                assistant_reply_chunks.append(chunk_content)
                yield chunk_content  # 向前端推送片段

        # 流式结束后：存储完整的AI回复
        if assistant_reply_chunks:
            full_reply = "".join(assistant_reply_chunks)
            # 保存AI回复到数据库
            save_chat_record(
                session_id=session_id,
                user_id=user_id,
                role=ROLE_ASSISTANT,
                content=full_reply
            )
            # 更新会话的最后更新时间
            update_session_updated_at(session_id=session_id)

    # 返回FastAPI流式响应
    return StreamingResponse(
        content=generate_stream(),
        media_type=MEDIA_TYPE_EVENT_STREAM
    )