"""API主文件 - SQL Agent API服务的入口点。"""

import os
import logging
import asyncio
from tabnanny import check
from langgraph.checkpoint.memory import InMemorySaver
from langgraph.prebuilt import ToolNode
from typing import Optional
import uvicorn
from contextlib import asynccontextmanager
from fastapi import FastAPI, HTTPException
from fastapi.responses import JSONResponse, StreamingResponse
from fastapi.middleware.cors import CORSMiddleware
from langchain_core.messages import HumanMessage, AIMessage, ToolMessage
from langchain_core.messages.base import message_to_dict  # 添加导入
from datetime import datetime  # 添加导入

from langchain_core.runnables import RunnableConfig  # 添加这个导入
from langchain_core.prompts import PromptTemplate
from prompts.prompts import SQL_AGENT_SYSTEM
import uuid, json, time
from .models import (
    ChatCompletionRequest,
    ChatCompletionResponse,
    ChatCompletionResponseChoice,
    Message,
    StreamDelta,
    StreamChoice,
    StreamResponse,
)
from concurrent_log_handler import ConcurrentRotatingFileHandler
from utils.config import Config
from .routes import sql_router
from .auth_routes import auth_router  # 添加认证路由导入
from agents.graphs import (
    create_sql_graph_async,
    react_graph,
)
from agents.states import SqlGraphAnnotation
from utils.models import load_chat_model
from utils.logger import logger
from tools.database import list_tables_tool, get_schema_tool, run_query_tool

# API服务设置
PORT = 8013

# 全局变量
graph = None

llm = None
tool_names = None


@asynccontextmanager
async def lifespan(app: FastAPI):
    """应用生命周期管理。"""
    global graph, llm, tool_names

    try:
        logger.info("正在初始化SQL Agent API服务...")

        # 初始化LLM
        llm = load_chat_model()
        # 移除IPython导入，因为这是在服务器环境中运行
        # from IPython.display import display, Image

        # 初始化SQL Graph
        graph = await create_sql_graph_async()
        # graph = await create_react_sql_agent_async()
        tool_names = {
            tool.name for tool in [list_tables_tool, get_schema_tool, run_query_tool]
        }

        # 直接获取PNG字节数据，不使用IPython的display
        try:
            png_data = graph.get_graph().draw_mermaid_png()
            # 确保output目录存在
            os.makedirs("output", exist_ok=True)
            # 保存图片
            with open("output/sql_agent.png", "wb") as f:
                f.write(png_data)
            logger.info("SQL Agent图表已保存到 output/sql_agent.png")
        except Exception as e:
            logger.warning(f"保存图表失败，但不影响服务启动: {str(e)}")

        logger.info("SQL Agent API服务初始化完成")
    except Exception as e:
        logger.error(f"初始化过程中出错: {str(e)}")
        raise

    try:
        yield
    except asyncio.CancelledError:
        logger.info("接收到取消信号，开始优雅关闭...")
    except KeyboardInterrupt:
        logger.info("接收到键盘中断信号，开始优雅关闭...")
    except Exception as e:
        logger.error(f"运行时异常: {str(e)}")
    finally:
        # 清理逻辑
        logger.info("正在关闭SQL Agent API服务...")
        try:
            # 导入reset_connection_pool函数
            from utils.database_pool import reset_connection_pool

            # 重置连接池状态
            await reset_connection_pool()
            logger.info("连接池已清理")

            # 清理全局变量
            graph = None
            llm = None
            tool_names = None
            logger.info("全局变量已清理")

        except Exception as e:
            logger.error(f"清理过程中出错: {str(e)}")

        logger.info("SQL Agent API服务关闭完成")


# 创建FastAPI应用
app = FastAPI(
    title="SQL Agent API",
    description="基于LangGraph的SQL Agent API服务",
    version="1.0.0",
    lifespan=lifespan,
)

# 添加CORS中间件
app.add_middleware(
    CORSMiddleware,
    allow_origins=["*"],
    allow_credentials=True,
    allow_methods=["*"],
    allow_headers=["*"],
)

# 注册路由
app.include_router(sql_router)
app.include_router(auth_router)  # 注册认证路由


@app.get("/")
async def root():
    """根路径，返回API信息。"""
    return {"message": "SQL Agent API Service", "version": "1.0.0", "docs": "/docs"}


@app.get("/health")
async def health_check():
    """健康检查端点。"""
    return {
        "status": "healthy",
        "service": "SQL Agent API",
        "graph_initialized": graph is not None,
        "llm_initialized": llm is not None,
    }


import re


def format_response(response):
    """对输入的文本进行段落分隔、添加适当的换行符，以及在代码块中增加标记，以便生成更有可读性的输出。

    Args:
        response: 输入的文本。

    Returns:
        具有清晰段落分隔的文本。
    """
    # 使用正则表达式 \n{2, }将输入的response按照两个或更多的连续换行符进行分割。这样可以将文本分割成多个段落，每个段落由连续的非空行组成
    paragraphs = re.split(r"\n{2,}", response)
    # 空列表，用于存储格式化后的段落
    formatted_paragraphs = []
    # 遍历每个段落进行处理
    for para in paragraphs:
        # 检查段落中是否包含代码块标记
        if "```" in para:
            # 将段落按照```分割成多个部分，代码块和普通文本交替出现
            parts = para.split("```")
            for i, part in enumerate(parts):
                # 检查当前部分的索引是否为奇数，奇数部分代表代码块
                if i % 2 == 1:  # 这是代码块
                    # 将代码块部分用换行符和```包围，并去除多余的空白字符
                    parts[i] = f"\n```\n{part.strip()}\n```\n"
            # 将分割后的部分重新组合成一个字符串
            para = "".join(parts)
        else:
            # 否则，将句子中的句点后面的空格替换为换行符，以便句子之间有明确的分隔
            para = para.replace(". ", ".\n")
        # 将格式化后的段落添加到formatted_paragraphs列表
        # strip()方法用于移除字符串开头和结尾的空白字符（包括空格、制表符 \t、换行符 \n等）
        formatted_paragraphs.append(para.strip())
    # 将所有格式化后的段落用两个换行符连接起来，以形成一个具有清晰段落分隔的文本
    return "\n\n".join(formatted_paragraphs)


# 处理非流式响应的异步函数，生成并返回完整的响应内容
async def handle_non_stream_response(user_input, graph, config):
    """
    处理非流式响应的异步函数，生成并返回完整的响应内容。

    Args:
        user_input (str): 用户输入的内容。
        graph: 图对象，用于处理消息流。
        tool_config: 工具配置对象，包含可用工具的名称和定义。
        config (dict): 配置参数，包含线程和用户标识。

    Returns:
        JSONResponse: 包含格式化响应的 JSON 响应对象。
    """
    # 初始化 content 变量，用于存储最终响应内容
    content = None
    try:
        # 启动 graph.stream 处理用户输入，生成事件流
        events = graph.stream(
            {"messages": [{"role": "user", "content": user_input}], "rewrite_count": 0},
            config,
        )
        # 遍历事件流中的每个事件
        for event in events:
            # 遍历事件中的所有值
            for value in event.values():
                # 检查事件值是否包含有效消息列表
                if "messages" not in value or not isinstance(value["messages"], list):
                    # 记录警告日志，跳过无效消息
                    logger.warning("No valid messages in response")
                    continue

                # 获取消息列表中的最后一条消息
                last_message = value["messages"][-1]

                # 检查消息是否包含工具调用
                if hasattr(last_message, "tool_calls") and last_message.tool_calls:
                    # 遍历所有工具调用
                    for tool_call in last_message.tool_calls:
                        # 验证工具调用是否为字典且包含名称
                        if isinstance(tool_call, dict) and "name" in tool_call:
                            # 记录工具调用日志
                            logger.info(f"Calling tool: {tool_call['name']}")
                    # 跳过本次循环，继续处理下一事件
                    continue

                # 检查消息是否包含内容
                if hasattr(last_message, "content"):
                    # 将消息内容赋值给 content
                    content = last_message.content

                    # 检查是否为工具输出（基于工具名称）
                    if hasattr(last_message, "name"):
                        # 获取工具名称
                        tool_name = last_message.name
                        # 记录工具输出日志
                        logger.info(f"Tool Output [{tool_name}]: {content}")
                    # 处理大模型输出（非工具消息）
                    else:
                        # 记录最终响应日志
                        logger.info(f"Final Response is: {content}")
                else:
                    # 记录无内容的消息日志，跳过处理
                    logger.info("Message has no content, skipping")
    except ValueError as ve:
        # 捕获并记录值错误
        logger.error(f"Value error in response processing: {ve}")
    except Exception as e:
        # 捕获并记录其他未预期的异常
        logger.error(f"Error processing response: {e}")

    # 格式化响应内容，若无内容则返回默认值
    formatted_response = (
        str(format_response(content)) if content else "No response generated"
    )
    # 记录格式化后的响应日志
    logger.info(f"Results for Formatting: {formatted_response}")

    # 构造返回给客户端的响应对象
    try:
        response = ChatCompletionResponse(
            choices=[
                ChatCompletionResponseChoice(
                    index=0,
                    message=Message(role="assistant", content=formatted_response),
                    finish_reason="stop",
                )
            ]
        )
    except Exception as resp_error:
        # 捕获并记录构造响应对象时的异常
        logger.error(f"Error creating response object: {resp_error}")
        # 构造错误响应对象
        response = ChatCompletionResponse(
            choices=[
                ChatCompletionResponseChoice(
                    index=0,
                    message=Message(
                        role="assistant", content="Error generating response"
                    ),
                    finish_reason="error",
                )
            ]
        )

    # 记录发送给客户端的响应内容日志
    logger.info(f"Send response content: \n{response}")
    # 返回 JSON 格式的响应对象
    return JSONResponse(content=response.model_dump())


# 处理流式响应的异步函数，生成并返回流式数据
async def handle_stream_response(user_input, graph, config):
    """
    处理流式响应的异步函数，生成并返回流式数据。

    Args:
        user_input (str): 用户输入的内容。
        graph: 图对象，用于处理消息流。
        config (dict): 配置参数，包含线程和用户标识。

    Returns:
        StreamingResponse: 流式响应对象，媒体类型为 text/event-stream。
    """

    async def generate_stream():
        chunk_id = f"chatcmpl-{uuid.uuid4().hex}"

        # 初始化消息收集数组
        # messages_data = []

        # 确保output文件夹存在
        # output_dir = "output"
        # if not os.path.exists(output_dir):
        #     os.makedirs(output_dir)
        #     logger.info(f"创建output文件夹: {output_dir}")

        async for message_chunk, metadata in graph.astream(
            {"messages": user_input}, config, stream_mode="messages"
        ):
            chunk = message_chunk.content

            # 添加到消息数组
            # messages_data.append(message_to_dict(message_chunk))

            # logging.info(format_response(message_chunk))
            # logger.info(f"metadata: {metadata}")
            # logger.info(f"message_chunk: {message_chunk}")
            if isinstance(message_chunk, ToolMessage):
                chunk = message_chunk.content
                # logger.info(f"ToolMessage message_chunk: {message_chunk}")
                continue
            # 在处理过程中产生每个块
            # 使用结构化的响应模型
            stream_response = StreamResponse(
                id=chunk_id,
                object="chat.completion.chunk",
                created=int(time.time()),
                choices=[
                    StreamChoice(
                        index=0, delta=StreamDelta(content=chunk), finish_reason=None
                    )
                ],
            )
            logger.info(f"message_chunk: {message_chunk}")
            yield f"data: {json.dumps(stream_response.model_dump())}\n\n"

        # # 生成JSON文件
        # timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
        # filename = f"messages_{timestamp}_{chunk_id[:8]}.json"
        # filepath = os.path.join(output_dir, filename)

        # try:
        #     with open(filepath, "w", encoding="utf-8") as f:
        #         json.dump(messages_data, f, ensure_ascii=False, indent=2)
        #     logger.info(f"消息数据已保存到文件: {filepath}")
        # except Exception as e:
        #     logger.error(f"保存消息数据到文件失败: {str(e)}")

        # 流结束的最后一块
        final_response = StreamResponse(
            id=chunk_id,
            object="chat.completion.chunk",
            created=int(time.time()),
            choices=[
                StreamChoice(
                    index=0, delta=StreamDelta(content=""), finish_reason="stop"
                )
            ],
        )
        yield f"data: {json.dumps(final_response.model_dump())}\n\n"
        # 添加 SSE 结束标记
        yield "data: [DONE]\n\n"
        # logger.info(f"final_response message_chunk: {final_response}")

    # 返回 StreamingResponse，不在异步生成器内部
    return StreamingResponse(generate_stream(), media_type="text/event-stream")


# 封装POST请求接口，与大模型进行问答
@app.post("/v1/chat/completions")
async def chat_completions(request: ChatCompletionRequest):
    # 判断初始化是否完成
    if not graph:
        logger.error("服务未初始化")
        raise HTTPException(status_code=500, detail="服务未初始化")

    try:
        logger.info(f"收到聊天完成请求: {request}")

        query_prompt = request.messages[-1].content
        logger.info(f"用户问题是: {query_prompt}")

        # 处理可能为None的userId和conversationId
        user_id = request.userId or "anonymous"
        # user_id="test_user"
        conversation_id = request.conversationId or "default"
        thread_id = f"{user_id}@@{conversation_id}"

        config = {
            "configurable": {"thread_id": thread_id, "user_id": user_id},
            "recursion_limit": 25,
        }
        logger.info(f"用户当前会话信息: {config}")

        prompt = [
            {"role": "system", "content": SQL_AGENT_SYSTEM},
            {"role": "user", "content": query_prompt},
        ]
        # sql_graph = await create_sql_graph_async()
        # 处理流式响应
        if request.stream:
            return await handle_stream_response(prompt, graph, config)

        # 处理非流式响应处理
        return await handle_non_stream_response(prompt, graph, config)

    except Exception as e:
        logger.error(f"处理聊天完成时出错:\n\n {str(e)}")
        raise HTTPException(status_code=500, detail=str(e))


# 在现有路由基础上添加线程管理路由
@app.get("/threads")
async def get_threads_root(
    userId: Optional[str] = None, limit: int = 100, offset: int = 0
):
    """获取线程列表的根路由（兼容前端调用）"""
    from .routes import get_threads

    return await get_threads(userId=userId, limit=limit, offset=offset)


@app.delete("/threads/{thread_id}")
async def delete_thread_root(thread_id: str):
    """删除线程的根路由（兼容前端调用）"""
    from .routes import delete_thread

    return await delete_thread(thread_id=thread_id)


if __name__ == "__main__":
    logger.info(f"在端口 {PORT} 上启动SQL Agent API服务器")
    uvicorn.run(app, host="0.0.0.0", port=PORT)
