# Copyright (c) 2025 Bytedance Ltd. and/or its affiliates
# SPDX-License-Identifier: MIT

import base64
import json
import logging
import os
from typing import List, cast
from uuid import uuid4

from fastapi import FastAPI, HTTPException
from fastapi.middleware.cors import CORSMiddleware
from fastapi.responses import Response, StreamingResponse
from langchain_core.messages import AIMessageChunk, ToolMessage
from langgraph.types import Command

from src.graph.builder import build_graph_with_memory
from src.podcast.graph.builder import build_graph as build_podcast_graph
from src.ppt.graph.builder import build_graph as build_ppt_graph
from src.prose.graph.builder import build_graph as build_prose_graph
from src.server.chat_request import (
    ChatMessage,
    ChatRequest,
    GeneratePodcastRequest,
    GeneratePPTRequest,
    GenerateProseRequest,
    TTSRequest,
)
from src.server.mcp_request import MCPServerMetadataRequest, MCPServerMetadataResponse
from src.server.mcp_utils import load_mcp_tools
from src.tools import VolcengineTTS

logger = logging.getLogger(__name__)

app = FastAPI(
    title="DeerFlow API",
    description="API for Deer",
    version="0.1.0",
)

# Add CORS middleware
app.add_middleware(
    CORSMiddleware,
    allow_origins=["*"],  # Allows all origins
    allow_credentials=True,
    allow_methods=["*"],  # Allows all methods
    allow_headers=["*"],  # Allows all headers
)

graph = build_graph_with_memory()


@app.post("/api/chat/stream")
async def chat_stream(request: ChatRequest):
    thread_id = request.thread_id
    if thread_id == "__default__":
        thread_id = str(uuid4())
    return StreamingResponse(
        _astream_workflow_generator(
            request.model_dump()["messages"],
            thread_id,
            request.max_plan_iterations,
            request.max_step_num,
            request.auto_accepted_plan,
            request.interrupt_feedback,
            request.mcp_settings,
            request.enable_background_investigation,
        ),
        media_type="text/event-stream",
    )


async def _astream_workflow_generator(
    messages: List[ChatMessage],  # 输入的聊天消息列表
    thread_id: str,  # 线程ID
    max_plan_iterations: int,  # 最大计划迭代次数
    max_step_num: int,  # 最大步骤数
    auto_accepted_plan: bool,  # 是否自动接受计划
    interrupt_feedback: str,  # 中断反馈信息
    mcp_settings: dict,  # MCP设置
    enable_background_investigation,  # 是否启用背景调查
):
    # 初始化输入字典
    input_ = {
        "messages": messages,  # 输入的聊天消息列表
        "plan_iterations": 0,  # 计划迭代次数，初始化为0
        "final_report": "",  # 最终报告，初始化为空
        "current_plan": None,  # 当前计划，初始化为None
        "observations": [],  # 观察结果，初始化为空列表
        "auto_accepted_plan": auto_accepted_plan,  # 是否自动接受计划
        "enable_background_investigation": enable_background_investigation,  # 是否启用背景调查
    }
    # 如果不是自动接受计划，并且有中断反馈信息
    if not auto_accepted_plan and interrupt_feedback:
        # 构建恢复消息
        resume_msg = f"[{interrupt_feedback}]"
        # 如果有输入的聊天消息
        if messages:
            # 将最后一条消息添加到恢复消息中
            resume_msg += f" {messages[-1]['content']}"
        # 将输入字典更新为恢复命令
        input_ = Command(resume=resume_msg)
    # 使用图形流处理输入
    async for agent, _, event_data in graph.astream(
        input_,  # 输入字典
        config={
            "thread_id": thread_id,  # 线程ID
            "max_plan_iterations": max_plan_iterations,  # 最大计划迭代次数
            "max_step_num": max_step_num,  # 最大步骤数
            "mcp_settings": mcp_settings,  # MCP设置
        },
        stream_mode=["messages", "updates"],  # 流模式
        subgraphs=True,  # 是否启用子图
    ):
        # 如果事件数据是字典类型
        if isinstance(event_data, dict):
            # 如果事件数据中包含中断信息
            if "__interrupt__" in event_data:
                # 生成中断事件
                yield _make_event(
                    "interrupt",  # 事件类型
                    {
                        "thread_id": thread_id,  # 线程ID
                        "id": event_data["__interrupt__"][0].ns[0],  # 中断ID
                        "role": "assistant",  # 角色
                        "content": event_data["__interrupt__"][0].value,  # 中断内容
                        "finish_reason": "interrupt",  # 完成原因
                        "options": [  # 选项
                            {"text": "Edit plan", "value": "edit_plan"},  # 编辑计划
                            {"text": "Start research", "value": "accepted"},  # 开始研究
                        ],
                    },
                )
            # 跳过当前迭代
            continue
        # 将事件数据转换为消息块和消息元数据的元组
        message_chunk, message_metadata = cast(
            tuple[AIMessageChunk, dict[str, any]], event_data
        )
        # 构建事件流消息字典
        event_stream_message: dict[str, any] = {
            "thread_id": thread_id,  # 线程ID
            "agent": agent[0].split(":")[0],  # 代理
            "id": message_chunk.id,  # 消息ID
            "role": "assistant",  # 角色
            "content": message_chunk.content,  # 消息内容
        }
        # 如果消息块的响应元数据中有完成原因
        if message_chunk.response_metadata.get("finish_reason"):
            # 将完成原因添加到事件流消息字典中
            event_stream_message["finish_reason"] = message_chunk.response_metadata.get(
                "finish_reason"
            )
        # 如果消息块是工具消息
        if isinstance(message_chunk, ToolMessage):
            # 工具消息 - 返回工具调用结果
            event_stream_message["tool_call_id"] = message_chunk.tool_call_id  # 工具调用ID
            # 生成工具调用结果事件
            yield _make_event("tool_call_result", event_stream_message)
        else:
            # AI消息 - 原始消息令牌
            # 如果消息块有工具调用
            if message_chunk.tool_calls:
                # AI消息 - 工具调用
                event_stream_message["tool_calls"] = message_chunk.tool_calls  # 工具调用
                event_stream_message["tool_call_chunks"] = (
                    message_chunk.tool_call_chunks
                )  # 工具调用块
                # 生成工具调用事件
                yield _make_event("tool_calls", event_stream_message)
            elif message_chunk.tool_call_chunks:
                # AI消息 - 工具调用块
                event_stream_message["tool_call_chunks"] = (
                    message_chunk.tool_call_chunks
                )  # 工具调用块
                # 生成工具调用块事件
                yield _make_event("tool_call_chunks", event_stream_message)
            else:
                # AI消息 - 原始消息令牌
                # 生成消息块事件
                yield _make_event("message_chunk", event_stream_message)


def _make_event(event_type: str, data: dict[str, any]):
    """
    构建事件流消息字符串。
    如果数据字典中的"content"键对应的值为空，则从数据字典中移除该键。
    返回一个字符串，格式为"event: <事件类型>\ndata: <数据JSON字符串>\n\n"。
    """
    if data.get("content") == "":
        data.pop("content")
    return f"event: {event_type}\ndata: {json.dumps(data, ensure_ascii=False)}\n\n"


@app.post("/api/tts")
async def text_to_speech(request: TTSRequest):
    """Convert text to speech using volcengine TTS API."""
    try:
        app_id = os.getenv("VOLCENGINE_TTS_APPID", "")
        if not app_id:
            raise HTTPException(
                status_code=400, detail="VOLCENGINE_TTS_APPID is not set"
            )
        access_token = os.getenv("VOLCENGINE_TTS_ACCESS_TOKEN", "")
        if not access_token:
            raise HTTPException(
                status_code=400, detail="VOLCENGINE_TTS_ACCESS_TOKEN is not set"
            )
        cluster = os.getenv("VOLCENGINE_TTS_CLUSTER", "volcano_tts")
        voice_type = os.getenv("VOLCENGINE_TTS_VOICE_TYPE", "BV700_V2_streaming")

        tts_client = VolcengineTTS(
            appid=app_id,
            access_token=access_token,
            cluster=cluster,
            voice_type=voice_type,
        )
        # Call the TTS API
        result = tts_client.text_to_speech(
            text=request.text[:1024],
            encoding=request.encoding,
            speed_ratio=request.speed_ratio,
            volume_ratio=request.volume_ratio,
            pitch_ratio=request.pitch_ratio,
            text_type=request.text_type,
            with_frontend=request.with_frontend,
            frontend_type=request.frontend_type,
        )

        if not result["success"]:
            raise HTTPException(status_code=500, detail=str(result["error"]))

        # Decode the base64 audio data
        audio_data = base64.b64decode(result["audio_data"])

        # Return the audio file
        return Response(
            content=audio_data,
            media_type=f"audio/{request.encoding}",
            headers={
                "Content-Disposition": (
                    f"attachment; filename=tts_output.{request.encoding}"
                )
            },
        )
    except Exception as e:
        logger.exception(f"Error in TTS endpoint: {str(e)}")
        raise HTTPException(status_code=500, detail=str(e))


@app.post("/api/podcast/generate")
async def generate_podcast(request: GeneratePodcastRequest):
    try:
        report_content = request.content
        print(report_content)
        workflow = build_podcast_graph()
        final_state = workflow.invoke({"input": report_content})
        audio_bytes = final_state["output"]
        return Response(content=audio_bytes, media_type="audio/mp3")
    except Exception as e:
        logger.exception(f"Error occurred during podcast generation: {str(e)}")
        raise HTTPException(status_code=500, detail=str(e))


@app.post("/api/ppt/generate")
async def generate_ppt(request: GeneratePPTRequest):
    try:
        report_content = request.content
        print(report_content)
        workflow = build_ppt_graph()
        final_state = workflow.invoke({"input": report_content})
        generated_file_path = final_state["generated_file_path"]
        with open(generated_file_path, "rb") as f:
            ppt_bytes = f.read()
        return Response(
            content=ppt_bytes,
            media_type="application/vnd.openxmlformats-officedocument.presentationml.presentation",
        )
    except Exception as e:
        logger.exception(f"Error occurred during ppt generation: {str(e)}")
        raise HTTPException(status_code=500, detail=str(e))


@app.post("/api/prose/generate")
async def generate_prose(request: GenerateProseRequest):
    try:
        logger.info(f"Generating prose for prompt: {request.prompt}")
        workflow = build_prose_graph()
        events = workflow.astream(
            {
                "content": request.prompt,
                "option": request.option,
                "command": request.command,
            },
            stream_mode="messages",
            subgraphs=True,
        )
        return StreamingResponse(
            (f"data: {event[0].content}\n\n" async for _, event in events),
            media_type="text/event-stream",
        )
    except Exception as e:
        logger.exception(f"Error occurred during prose generation: {str(e)}")
        raise HTTPException(status_code=500, detail=str(e))


@app.post("/api/mcp/server/metadata", response_model=MCPServerMetadataResponse)
async def mcp_server_metadata(request: MCPServerMetadataRequest):
    """Get information about an MCP server."""
    try:
        # Set default timeout with a longer value for this endpoint
        timeout = 300  # Default to 300 seconds for this endpoint

        # Use custom timeout from request if provided
        if request.timeout_seconds is not None:
            timeout = request.timeout_seconds

        # Load tools from the MCP server using the utility function
        tools = await load_mcp_tools(
            server_type=request.transport,
            command=request.command,
            args=request.args,
            url=request.url,
            env=request.env,
            timeout_seconds=timeout,
        )

        # Create the response with tools
        response = MCPServerMetadataResponse(
            transport=request.transport,
            command=request.command,
            args=request.args,
            url=request.url,
            env=request.env,
            tools=tools,
        )

        return response
    except Exception as e:
        if not isinstance(e, HTTPException):
            logger.exception(f"Error in MCP server metadata endpoint: {str(e)}")
            raise HTTPException(status_code=500, detail=str(e))
        raise
