import asyncio
import base64
import io
import json
import logging
import os
import re
from typing import Annotated, List
from uuid import uuid4
from fastapi import FastAPI, HTTPException, Query, UploadFile, File, Form, Depends
from fastapi.middleware.cors import CORSMiddleware
from fastapi.responses import JSONResponse
from fastapi.responses import Response, StreamingResponse
from langchain_core.messages import AIMessageChunk, ToolMessage, SystemMessage, HumanMessage
from langgraph.types import Command

from src.config.agents import AGENT_LLM_MAP
from src.config.tools import SELECTED_RAG_PROVIDER
from src.graph.builder import build_graph_with_memory, build_graph_interview, build_graph_ogsm
from src.graph.nodes import test_reporter_node
from src.graph.talent_model import Talent
from src.html.graph.builder import build_graph as build_html_graph
from src.knowledge_base.file_processor import FileProcessor
from src.llms.llm import get_llm_by_type
from src.minio.minio_client import upload_file_to_minio
from src.podcast.graph.builder import build_graph as build_podcast_graph
from src.ppt.graph.builder import build_graph as build_ppt_graph
from src.prompts import get_prompt_template
from src.prose.graph.builder import build_graph as build_prose_graph
from src.rag.builder import build_retriever
from src.rag.retriever import Resource
from src.server.chat_request import (
    ChatMessage,
    ChatRequest,
    GeneratePodcastRequest,
    GeneratePPTRequest,
    GenerateProseRequest,
    TTSRequest, GenerateHtmlRequest,
)
from src.server.gloabl_map import set_skip_tool_call_thread, delete_skip_tool_call_thread, \
    get_skip_tool_call_thread
from src.server.mcp_request import MCPServerMetadataRequest, MCPServerMetadataResponse
from src.server.mcp_utils import load_mcp_tools
from src.server.rag_request import (
    RAGConfigResponse,
    RAGResourceRequest,
    RAGResourcesResponse,
)
from src.server.talent_request import TalentResponse, TalentRequest
from src.supabase_db.api.ChatMessagesApi import save_user_message, save_interrupt_message
from src.supabase_db.api.ProjectsApi import generate_project_name, get_project_list
from src.supabase_db.api.TalentsApi import save_tenant, get_tenants
from src.supabase_db.models.Accounts import Accounts
from src.supabase_db.supabase import DBConnection
from src.tools import VolcengineTTS
from src.utils.auth_utils import get_current_user_id_from_jwt

logger = logging.getLogger(__name__)

INTERNAL_SERVER_ERROR_DETAIL = "Internal Server Error"

app = FastAPI(
    title="DeerFlow API",
    description="API for Deer",
    version="0.1.0",
)

# Add CORS middleware
app.add_middleware(
    CORSMiddleware,
    allow_origins=["*"],  # Allows all origins
    allow_credentials=True,
    allow_methods=["*"],  # Allows all methods
    allow_headers=["*"],  # Allows all headers
)

GRAPH_MAP = {
    "default": build_graph_with_memory(),
    "interview": build_graph_interview(),
    "ogsm": build_graph_ogsm(),
}

# 跳过tool_call返回流的节点
SKIP_TOOL_CALL_NODE = ["resume_matcher", "interview_evaluator"]


def get_graph_by_agent_code(agent_code: str):
    return GRAPH_MAP.get(agent_code, GRAPH_MAP["default"])


@app.post("/api/chat/stream")
async def chat_stream(request: ChatRequest, user_id: str = Depends(get_current_user_id_from_jwt)):
    thread_id = request.thread_id
    if thread_id == "__default__":
        thread_id = str(uuid4())
    # user_id = "ddcc2c49-bb11-406c-9ece-6b868c6c9dc9"
    asyncio.create_task(generate_project_name(user_id, thread_id, request.messages[0].content, request.agent_code))
    await save_user_message(request.messages[0].content, user_id, thread_id, '')
    return StreamingResponse(
        _astream_workflow_generator(
            request.model_dump()["messages"],
            thread_id,
            request.resources,
            request.max_plan_iterations,
            request.max_step_num,
            request.max_search_results,
            request.auto_accepted_plan,
            request.interrupt_feedback,
            request.mcp_settings,
            request.enable_background_investigation,
            request.agent_code,
            user_id,
        ),
        media_type="text/event-stream",
    )


async def _astream_workflow_generator(
        messages: List[ChatMessage],
        thread_id: str,
        resources: List[Resource],
        max_plan_iterations: int,
        max_step_num: int,
        max_search_results: int,
        auto_accepted_plan: bool,
        interrupt_feedback: str,
        mcp_settings: dict,
        enable_background_investigation,
        agent_code: str,
        user_id: str,
):
    input_ = {
        "messages": messages,
        "plan_iterations": 0,
        "final_report": "",
        "current_plan": None,
        "observations": [],
        "auto_accepted_plan": auto_accepted_plan,
        "enable_background_investigation": enable_background_investigation,
        "agent_code": agent_code,
        "user_id": user_id,
    }
    if interrupt_feedback:
        resume_msg = f"[{interrupt_feedback}]"
        # add the last message to the resume message
        if messages:
            resume_msg += f"{messages[-1]['content']}"
        input_ = Command(resume=resume_msg)
    graph = get_graph_by_agent_code(agent_code)

    async for path, key, event_data in graph.astream(
            input_,
            config={
                "thread_id": thread_id,
                "resources": resources,
                "max_plan_iterations": max_plan_iterations,
                "max_step_num": max_step_num,
                "max_search_results": max_search_results,
                "mcp_settings": mcp_settings,
            },
            stream_mode=["messages", "updates"],
            subgraphs=True,
    ):
        if isinstance(event_data, dict):
            if "__interrupt__" in event_data:
                message_id = event_data["__interrupt__"][0].id
                content = event_data["__interrupt__"][0].value
                await save_interrupt_message(content, message_id, user_id, thread_id)
                yield _make_event(
                    "interrupt",
                    {
                        "thread_id": thread_id,
                        "id": message_id,
                        "role": "assistant",
                        "content": content,
                        "finish_reason": "interrupt",
                        "options": [
                            {"text": "编辑", "value": "edit_plan"},
                            {"text": "开始分析", "value": "accepted"},
                        ],
                    },
                )
            continue
        message_chunk, metadata = event_data
        event_stream_message: dict[str, any] = {
            "thread_id": thread_id,
            "agent": metadata.get("langgraph_node") if len(path) == 0 else path[0].split(":")[0],
            "id": message_chunk.id,
            "role": "assistant",
            "content": message_chunk.content or "",
        }

        if message_chunk.response_metadata.get("finish_reason"):
            event_stream_message["finish_reason"] = message_chunk.response_metadata.get(
                "finish_reason"
            )
        if isinstance(message_chunk, ToolMessage):
            # Tool Message - Return the result of the tool call
            event_stream_message["tool_call_id"] = message_chunk.tool_call_id
            # 过滤不响应的消息
            if event_stream_message["agent"] in SKIP_TOOL_CALL_NODE and get_skip_tool_call_thread(thread_id) is True:
                delete_skip_tool_call_thread(thread_id)
            else:
                yield _make_event("tool_call_result", event_stream_message)
        elif isinstance(message_chunk, AIMessageChunk):
            # AI Message - Raw message tokens
            if message_chunk.tool_calls:
                # AI Message - Tool Call
                event_stream_message["tool_calls"] = message_chunk.tool_calls
                event_stream_message["tool_call_chunks"] = (
                    message_chunk.tool_call_chunks
                )
                # 过滤不响应的消息
                if event_stream_message["agent"] in SKIP_TOOL_CALL_NODE and get_skip_tool_call_thread(
                        thread_id) is False:
                    yield _make_event("tool_calls", event_stream_message)
                    set_skip_tool_call_thread(thread_id)
                    # 返回第一条tool_calls用于前端标识开始流响应了
                elif event_stream_message["agent"] in SKIP_TOOL_CALL_NODE and get_skip_tool_call_thread(
                        thread_id) is True:
                    set_skip_tool_call_thread(thread_id)
                else:
                    yield _make_event("tool_calls", event_stream_message)
            elif message_chunk.tool_call_chunks:
                # AI Message - Tool Call Chunks
                event_stream_message["tool_call_chunks"] = (
                    message_chunk.tool_call_chunks
                )
                # 过滤不响应的消息
                if event_stream_message["agent"] in SKIP_TOOL_CALL_NODE:
                    set_skip_tool_call_thread(thread_id)
                else:
                    yield _make_event("tool_call_chunks", event_stream_message)
            else:
                # 过滤不响应的消息
                if get_skip_tool_call_thread(thread_id) is False:
                    # AI Message - Raw message tokens
                    prev_msg_id = message_chunk.id
                    yield _make_event("message_chunk", event_stream_message)


def _make_event(event_type: str, data: dict[str, any]):
    if data.get("content") == "":
        data.pop("content")
    return f"event: {event_type}\ndata: {json.dumps(data, ensure_ascii=False)}\n\n"


@app.post("/api/tts")
async def text_to_speech(request: TTSRequest):
    """Convert text to speech using volcengine TTS API."""
    try:
        app_id = os.getenv("VOLCENGINE_TTS_APPID", "")
        if not app_id:
            raise HTTPException(
                status_code=400, detail="VOLCENGINE_TTS_APPID is not set"
            )
        access_token = os.getenv("VOLCENGINE_TTS_ACCESS_TOKEN", "")
        if not access_token:
            raise HTTPException(
                status_code=400, detail="VOLCENGINE_TTS_ACCESS_TOKEN is not set"
            )
        cluster = os.getenv("VOLCENGINE_TTS_CLUSTER", "volcano_tts")
        voice_type = os.getenv("VOLCENGINE_TTS_VOICE_TYPE", "BV700_V2_streaming")

        tts_client = VolcengineTTS(
            appid=app_id,
            access_token=access_token,
            cluster=cluster,
            voice_type=voice_type,
        )
        # Call the TTS API
        result = tts_client.text_to_speech(
            text=request.text[:1024],
            encoding=request.encoding,
            speed_ratio=request.speed_ratio,
            volume_ratio=request.volume_ratio,
            pitch_ratio=request.pitch_ratio,
            text_type=request.text_type,
            with_frontend=request.with_frontend,
            frontend_type=request.frontend_type,
        )

        if not result["success"]:
            raise HTTPException(status_code=500, detail=str(result["error"]))

        # Decode the base64 audio data
        audio_data = base64.b64decode(result["audio_data"])

        # Return the audio file
        return Response(
            content=audio_data,
            media_type=f"audio/{request.encoding}",
            headers={
                "Content-Disposition": (
                    f"attachment; filename=tts_output.{request.encoding}"
                )
            },
        )
    except Exception as e:
        logger.exception(f"Error in TTS endpoint: {str(e)}")
        raise HTTPException(status_code=500, detail=INTERNAL_SERVER_ERROR_DETAIL)


@app.post("/api/podcast/generate")
async def generate_podcast(request: GeneratePodcastRequest):
    try:
        report_content = request.content
        workflow = build_podcast_graph()
        final_state = workflow.invoke({"input": report_content})
        audio_bytes = final_state["output"]
        return Response(content=audio_bytes, media_type="audio/mp3")
    except Exception as e:
        logger.exception(f"Error occurred during podcast generation: {str(e)}")
        raise HTTPException(status_code=500, detail=INTERNAL_SERVER_ERROR_DETAIL)


@app.post("/api/html/generate")
async def generate_html(request: GenerateHtmlRequest):
    try:
        report_content = request.content
        print(report_content)
        workflow = build_html_graph()
        final_state = workflow.invoke({"input": report_content, "agent_code": request.agent_code})
        html = final_state["output"]
        return Response(
            content=html,
            media_type="text/html"
        )
    except Exception as e:
        logger.exception(f"Error occurred during podcast generation: {str(e)}")
        raise HTTPException(status_code=500, detail=INTERNAL_SERVER_ERROR_DETAIL)


@app.post("/api/ppt/generate")
async def generate_ppt(request: GeneratePPTRequest):
    try:
        report_content = request.content
        print(report_content)
        workflow = build_ppt_graph()
        final_state = workflow.invoke({"input": report_content})
        generated_file_path = final_state["generated_file_path"]
        with open(generated_file_path, "rb") as f:
            ppt_bytes = f.read()
        return Response(
            content=ppt_bytes,
            media_type="application/vnd.openxmlformats-officedocument.presentationml.presentation",
        )
    except Exception as e:
        logger.exception(f"Error occurred during ppt generation: {str(e)}")
        raise HTTPException(status_code=500, detail=INTERNAL_SERVER_ERROR_DETAIL)


@app.post("/api/prose/generate")
async def generate_prose(request: GenerateProseRequest):
    try:
        sanitized_prompt = request.prompt.replace("\r\n", "").replace("\n", "")
        logger.info(f"Generating prose for prompt: {sanitized_prompt}")
        workflow = build_prose_graph()
        events = workflow.astream(
            {
                "content": request.prompt,
                "option": request.option,
                "command": request.command,
            },
            stream_mode="messages",
            subgraphs=True,
        )
        return StreamingResponse(
            (f"data: {event[0].content}\n\n" async for _, event in events),
            media_type="text/event-stream",
        )
    except Exception as e:
        logger.exception(f"Error occurred during prose generation: {str(e)}")
        raise HTTPException(status_code=500, detail=INTERNAL_SERVER_ERROR_DETAIL)


@app.post("/api/mcp/server/metadata", response_model=MCPServerMetadataResponse)
async def mcp_server_metadata(request: MCPServerMetadataRequest):
    """Get information about an MCP server."""
    try:
        # Set default timeout with a longer value for this endpoint
        timeout = 300  # Default to 300 seconds for this endpoint

        # Use custom timeout from request if provided
        if request.timeout_seconds is not None:
            timeout = request.timeout_seconds

        # Load tools from the MCP server using the utility function
        tools = await load_mcp_tools(
            server_type=request.transport,
            command=request.command,
            args=request.args,
            url=request.url,
            env=request.env,
            timeout_seconds=timeout,
        )

        # Create the response with tools
        response = MCPServerMetadataResponse(
            transport=request.transport,
            command=request.command,
            args=request.args,
            url=request.url,
            env=request.env,
            tools=tools,
        )

        return response
    except Exception as e:
        if not isinstance(e, HTTPException):
            logger.exception(f"Error in MCP server metadata endpoint: {str(e)}")
            raise HTTPException(status_code=500, detail=INTERNAL_SERVER_ERROR_DETAIL)
        raise


@app.get("/api/rag/config", response_model=RAGConfigResponse)
async def rag_config():
    """Get the config of the RAG."""
    return RAGConfigResponse(provider=SELECTED_RAG_PROVIDER)


@app.get("/api/rag/resources", response_model=RAGResourcesResponse)
async def rag_resources(request: Annotated[RAGResourceRequest, Query()]):
    """Get the resources of the RAG."""
    retriever = build_retriever()
    if retriever:
        return RAGResourcesResponse(resources=retriever.list_resources(request.query))
    return RAGResourcesResponse(resources=[])


def resize_if_needed(image, max_width=1240):
    if image.width > max_width:
        new_height = int(image.height * (max_width / image.width))
        return image.resize((max_width, new_height))
    return image


def extract_name_from_filename(filename: str) -> str:
    # 去除文件扩展名
    base_name = os.path.splitext(filename)[0]

    # 找到“简历”出现的位置，优先截取“简历”之前的内容
    resume_index = base_name.find("简历")
    if resume_index > 0:
        name_part = base_name[:resume_index]
    else:
        # 否则保留整个名称（例如没有“简历”字样的情况）
        name_part = base_name

    # 移除常见的非中文字符（只保留中文、英文字母、·符号）
    name_candidate = re.sub(r"[^\u4e00-\u9fa5a-zA-Z·]", "", name_part)

    return name_candidate.strip()


async def handle_resume_file(thread_id: str, user_id: str, file: UploadFile = File(...)):
    try:
        # 读取 PDF 文件的 bytes
        file_bytes = await file.read()

        # 生成 MinIO 对象名，避免重名
        object_name = f"{thread_id}_{file.filename}"
        file_object = io.BytesIO(file_bytes)

        # 1. 上传文件到 MinIO
        file_url = upload_file_to_minio(file_object, object_name, file.content_type)
        logger.info(f"文件上传到 MinIO 完成: {file_url}")

        processor = FileProcessor()
        full_text = await processor.do_extract_file_content(file_bytes, object_name, file.content_type)

        print(f'full_text:{full_text}')

        model = get_llm_by_type(AGENT_LLM_MAP["default"])
        system_content = get_prompt_template("interview/resume_extract")
        resume_content = model.invoke(
            [
                SystemMessage(content=system_content),
                HumanMessage(content=full_text),
            ],
        )
        logger.info(f"resume_content: {resume_content.content}")

        resume_dict = json.loads(resume_content.content)

        name = resume_dict["name"]
        if name == "":
            name = extract_name_from_filename(file.filename)
        new_talent_data = Talent(
            project_id=thread_id,
            user_id=user_id,
            name=name,
            gender=resume_dict["gender"],
            age=resume_dict["age"],
            mobile=resume_dict["mobile"],
            education=resume_dict["education"],
            experience=resume_dict["experience"],
            position_applied=resume_dict["position_applied"],
            resume_url=file_url,
            resume_content=resume_dict["resume_content"]
        )

        await save_tenant(new_talent_data)

        uploaded_file = {
            "name": name,
            "resume_content": resume_content.content,
            "filename": file.filename,
            "resume_url": file_url
        }

        return JSONResponse(content={"success": True, "data": uploaded_file})

    except Exception as e:
        logger.exception(f"Error occurred during PDF upload: {str(e)}")
        return JSONResponse(content={"success": False, "error": str(e)})


async def handle_recording_file(thread_id: str, user_id: str, file):
    try:
        # 遍历人才库根据name被文件名包含的则为对应简历，识别文本内容后更新到对应人才库数据，否则报错不存在对应文件简历
        file_bytes = await file.read()

        # 生成 MinIO 对象名，避免重名
        object_name = f"{thread_id}_{file.filename}"
        file_object = io.BytesIO(file_bytes)

        file_url = upload_file_to_minio(file_object, object_name, file.content_type)
        logger.info(f"文件上传到 MinIO 完成: {file_url}")
        # 读取文件内容
        processor = FileProcessor()
        full_text = await processor.do_extract_file_content(file_bytes, object_name, file.content_type)

        exist_resume = False

        talents = await get_tenants(thread_id)
        for talent in talents:
            if talent.name in file.filename:
                exist_resume = True
                talent.interview_record_url = file_url
                talent.interview_record_content = full_text
                await save_tenant(talent)
                break
        if not exist_resume:
            return JSONResponse(content={"success": False, "error": "上传的面试记录文件不存在对应的简历"})

        uploaded_file = {
            "interview_record_content": full_text,
            "filename": file.filename,
            "interview_record_url": file_url
        }
        return JSONResponse(content={"success": True, "data": uploaded_file})
    except Exception as e:
        logger.exception(f"Error occurred during recording_file upload: {str(e)}")
        return JSONResponse(content={"success": False, "error": str(e)})


def try_decode(file_bytes: bytes) -> str:
    # 常见中文编码
    encodings = ['utf-8', 'gbk', 'gb2312', 'big5']
    for enc in encodings:
        try:
            return file_bytes.decode(enc)
        except UnicodeDecodeError:
            continue
    return file_bytes.decode('utf-8', errors='replace')


# 简历上传,file_type: rusume、interview_record
@app.post("/api/upload_interview")
async def upload_interview(thread_id: str = Form(...), upload_type: str = Form(...),
                           file: UploadFile = File(...), user_id: str = Depends(get_current_user_id_from_jwt)):
    if upload_type == "resume":
        return await handle_resume_file(thread_id, user_id, file)
    elif upload_type == "interview_record":
        return await handle_recording_file(thread_id, user_id, file)
    return JSONResponse(content={"success": False, "error": "未知的业务类型"})


@app.get("/project_list")
async def project_list(user_id: str = Depends(get_current_user_id_from_jwt)):
    """最近项目列表"""
    datas = await  get_project_list(user_id)
    return JSONResponse(content={"success": True, "data": datas})


@app.get("/test")
async def test(user_id: str):
    # asyncio.create_task(generate_project_name(user_id, thread_id, message))
    datas = await  get_project_list(user_id)
    return JSONResponse(content={"success": True, "data": datas})


@app.get("/test/reporter")
async def test_reporter(agent_code: str, title: str, thought: str):
    content = await test_reporter_node(agent_code, title, thought)

    return StreamingResponse(content, media_type="text/plain")


@app.get("/api/talents", response_model=TalentResponse)
async def talent_list(request: Annotated[TalentRequest, Query()], user_id: str = Depends(get_current_user_id_from_jwt)):
    """Get the config of the RAG."""
    talents = await get_tenants(request.thread_id)

    return TalentResponse(talents=talents)


@app.post("/register")
async def register_user(user: Accounts):
    try:
        db_conn = DBConnection()
        client = await db_conn.client
        result = await client.auth.admin.create_user({
            "email": user.email,
            "password": user.password,
            "email_confirm": True,
            "user_metadata": {
                "tenant_id": user.tenant_id,
                "nick_name": user.nick_name,
                "avatar": user.avatar
            }
        })

        return {"success": True, "user": result.user.id}
    except Exception as e:
        raise HTTPException(status_code=500, detail=str(e))
