"""API路由定义 - 定义具体的API端点。"""

import logging
from typing import Optional
from fastapi import APIRouter, HTTPException
from fastapi.responses import JSONResponse
from langchain_core.runnables import RunnableConfig
from langchain_core.messages import HumanMessage  # 添加这个导入

from .models import SqlQueryRequest, SqlQueryResponse
from agents.graphs import create_sql_graph_async
from agents.states import SqlGraphAnnotation
from .models import ThreadListResponse, ThreadInfo, ThreadRequest
from utils.database_pool import get_async_connection
import sqlite3
from datetime import datetime
from utils.logger import logger
import json
from langgraph.checkpoint.sqlite.aio import AsyncSqliteSaver

# logger = logging.getLogger(__name__)

# 创建SQL相关的路由
sql_router = APIRouter(prefix="/sql", tags=["SQL Agent"])


def safe_json_loads(data, default=None):
    """安全的JSON解析函数"""
    if not data:
        return default or {}

    try:
        if isinstance(data, bytes):
            # 尝试多种编码方式
            for encoding in ["utf-8", "latin-1", "cp1252"]:
                try:
                    text = data.decode(encoding)
                    print(f"Decoded text: {text}")
                    return json.loads(text)
                except (UnicodeDecodeError, json.JSONDecodeError):
                    continue
            logger.warning("所有编码方式都失败，返回默认值")
            return default or {}
        else:
            return json.loads(str(data))
    except json.JSONDecodeError as e:
        logger.warning(f"JSON解析失败: {e}")
        return default or {}


def safe_decode_data(data):
    """安全解码数据，尝试多种编码方式"""
    if isinstance(data, str):
        return data

    # 尝试的编码列表
    encodings = ["utf-8", "gbk", "gb2312", "latin1", "cp1252"]

    for encoding in encodings:
        try:
            if isinstance(data, bytes):
                decoded = data.decode(encoding)
                print(f"Successfully decoded with {encoding}: {decoded[:100]}...")
                return decoded
            elif hasattr(data, "encode"):
                # 如果是字符串但编码错误，尝试重新编码
                try:
                    reencoded = data.encode("latin1").decode(encoding)
                    print(
                        f"Successfully re-encoded with {encoding}: {reencoded[:100]}..."
                    )
                    return reencoded
                except:
                    continue
        except (UnicodeDecodeError, UnicodeEncodeError, AttributeError):
            continue

    print(
        f"All encoding attempts failed, returning original data: {str(data)[:100]}..."
    )
    return str(data)


@sql_router.post("/query", response_model=SqlQueryResponse)
async def sql_query(request: SqlQueryRequest):
    """执行SQL查询或回答数据库相关问题。

    Args:
        request: SQL查询请求

    Returns:
        SqlQueryResponse: 查询结果
    """
    try:
        logger.info(f"收到SQL查询请求: {request.query}")

        # 创建SQL Agent的初始状态 - 确保正确的消息格式
        initial_state = {"messages": [HumanMessage(content=request.query)]}

        # 创建配置
        config = RunnableConfig(
            configurable={
                "database_uri": request.database_uri,
                "thread_id": (
                    f"{request.userId}@@{request.conversationId}"
                    if request.userId and request.conversationId
                    else "default"
                ),
            }
        )

        # 调用SQL Agent (同步)
        result = await create_sql_graph_async().ainvoke(initial_state, config)

        # 提取最后的消息作为结果
        if result and "messages" in result and result["messages"]:
            last_message = result["messages"][-1]
            if hasattr(last_message, "content"):
                response_content = str(last_message.content)
            else:
                response_content = "SQL查询执行完成，但没有返回结果。"
        else:
            response_content = "SQL查询执行完成，但没有返回结果。"

        logger.info(f"SQL查询结果: {response_content}")

        return SqlQueryResponse(result=response_content, success=True)

    except Exception as e:
        logger.error(f"SQL查询执行出错: {str(e)}")
        return SqlQueryResponse(result="", success=False, error=str(e))


@sql_router.get("/health")
async def health_check():
    """健康检查端点。"""
    return {
        "status": "healthy",
        "service": "SQL Agent API",
        "graph_initialized": True,
        "llm_initialized": True,
    }


# 添加线程管理路由
@sql_router.get("/threads", response_model=ThreadListResponse)
async def get_threads(userId: Optional[str] = None, limit: int = 100, offset: int = 0):
    """获取用户的线程列表

    Args:
        userId: 用户ID，可选
        limit: 返回数量限制
        offset: 偏移量

    Returns:
        ThreadListResponse: 线程列表响应
    """
    try:
        logger.info(
            f"获取线程列表请求 - userId: {userId}, limit: {limit}, offset: {offset}"
        )

        # 获取数据库连接
        conn = await get_async_connection()
        # logger.info("数据库连接获取成功")

        saver = AsyncSqliteSaver(conn)
        await saver.setup()
        # logger.info("AsyncSqliteSaver 初始化完成")

        # 首先不使用过滤条件，查看所有数据
        # logger.info("开始查询所有 checkpoints...")
        all_checkpoints = []
        total_count = 0

        async for checkpoint_tuple in saver.alist(config=None, limit=1000):
            total_count += 1
            thread_id = checkpoint_tuple.config["configurable"]["thread_id"]
            # logger.info(f"发现 checkpoint #{total_count}: thread_id={thread_id}")

            # 如果指定了 userId，检查是否匹配
            if userId:
                # 尝试多种匹配方式
                matches_prefix = thread_id.startswith(f"{userId}@@")
                matches_suffix = thread_id.endswith(f"@@{userId}")
                matches_contains = userId in thread_id
                matches_exact = thread_id == userId

                # logger.info(f"  匹配检查 - prefix: {matches_prefix}, suffix: {matches_suffix}, contains: {matches_contains}, exact: {matches_exact}")

                # 如果任何一种方式匹配，就包含这个 checkpoint
                if (
                    matches_prefix
                    or matches_suffix
                    or matches_contains
                    or matches_exact
                ):
                    all_checkpoints.append(checkpoint_tuple)
                #     logger.info(f"  ✓ 包含此 checkpoint")
                # else:
                #     logger.info(f"  ✗ 跳过此 checkpoint")
            else:
                # 如果没有指定 userId，包含所有 checkpoints
                all_checkpoints.append(checkpoint_tuple)
                # logger.info(f"  ✓ 包含此 checkpoint (无userId过滤)")

        # logger.info(f"总共找到 {total_count} 个 checkpoints，过滤后剩余 {len(all_checkpoints)} 个")

        # 应用偏移量和限制
        checkpoints = all_checkpoints[offset : offset + limit]
        # logger.info(f"应用偏移量 {offset} 和限制 {limit} 后，最终处理 {len(checkpoints)} 个 checkpoints")

        # 按线程ID分组，获取每个线程的最新信息
        thread_map = {}
        for i, checkpoint_tuple in enumerate(checkpoints):
            thread_id = checkpoint_tuple.config["configurable"]["thread_id"]
            # logger.info(f"处理 checkpoint {i+1}/{len(checkpoints)}: thread_id={thread_id}")

            if thread_id not in thread_map:
                # 提取全部消息
                all_messages = []
                last_message = None

                if (
                    checkpoint_tuple.checkpoint
                    and "channel_values" in checkpoint_tuple.checkpoint
                ):
                    messages = checkpoint_tuple.checkpoint["channel_values"].get(
                        "messages", []
                    )
                    # logger.info(f"  找到 {len(messages)} 条消息")

                    for idx, msg in enumerate(messages):
                        message_dict = {}

                        # 处理消息内容，并限制长度
                        content = ""
                        if hasattr(msg, "content"):
                            content = str(msg.content)
                        elif isinstance(msg, dict) and "content" in msg:
                            content = str(msg["content"])
                        else:
                            content = str(msg)

                        # 截取内容到1000字符
                        if len(content) > 1000:
                            content = content[:997] + "..."

                        message_dict["content"] = content

                        # 处理消息角色 - 新增这部分
                        if hasattr(msg, "role"):
                            message_dict["role"] = msg.role
                        elif isinstance(msg, dict) and "role" in msg:
                            message_dict["role"] = msg["role"]
                        elif hasattr(msg, "__class__"):
                            # 根据消息类型推断角色
                            class_name = msg.__class__.__name__
                            logger.debug({"class_name": class_name, "message": msg})
                            if "Human" in class_name or "User" in class_name:
                                message_dict["role"] = "user"
                            elif "AI" in class_name or "Assistant" in class_name:
                                message_dict["role"] = "assistant"
                            elif "System" in class_name:
                                message_dict["role"] = "system"
                            elif "Tool" in class_name:
                                message_dict["role"] = "tool"
                            else:
                                message_dict["role"] = "assistant"  # 默认为assistant
                        else:
                            message_dict["role"] = "assistant"  # 默认角色

                        # 处理消息类型
                        if hasattr(msg, "type"):
                            message_dict["type"] = msg.type
                        elif isinstance(msg, dict) and "type" in msg:
                            message_dict["type"] = msg["type"]
                        elif hasattr(msg, "__class__"):
                            message_dict["type"] = msg.__class__.__name__
                        else:
                            message_dict["type"] = "unknown"

                        # 处理消息ID
                        if hasattr(msg, "id"):
                            message_dict["id"] = msg.id
                        elif isinstance(msg, dict) and "id" in msg:
                            message_dict["id"] = msg["id"]
                        else:
                            message_dict["id"] = f"msg_{idx}"

                        # 处理时间戳
                        if hasattr(msg, "timestamp"):
                            message_dict["timestamp"] = msg.timestamp
                        elif isinstance(msg, dict) and "timestamp" in msg:
                            message_dict["timestamp"] = msg["timestamp"]
                        else:
                            message_dict["timestamp"] = None

                        # 处理其他属性
                        if isinstance(msg, dict):
                            for key, value in msg.items():
                                if key not in ["content", "type", "id", "timestamp"]:
                                    message_dict[key] = value

                        all_messages.append(message_dict)
                        # logger.info(f"    消息 {idx+1}: type={message_dict['type']}, content={message_dict['content'][:50]}...")

                    # 设置最后一条消息（用于兼容性）
                    if all_messages:
                        last_msg_content = all_messages[-1]["content"]
                        last_message = (
                            last_msg_content[:100] + "..."
                            if len(last_msg_content) > 100
                            else last_msg_content
                        )
                        # logger.info(f"  最后消息: {last_message}")
                else:
                    logger.info(f"  没有找到消息数据")

                thread_map[thread_id] = ThreadInfo(
                    thread_id=thread_id,
                    created_at=checkpoint_tuple.config["configurable"]["checkpoint_id"],
                    updated_at=checkpoint_tuple.config["configurable"]["checkpoint_id"],
                    message_count=len(all_messages),
                    messages=all_messages,  # 新增：全部消息
                    last_message=last_message,  # 保留用于兼容性
                    metadata=checkpoint_tuple.metadata or {},
                )
                # logger.info(f"  ✓ 创建线程信息: {thread_id}，包含 {len(all_messages)} 条消息")
            else:
                logger.info(f"  线程 {thread_id} 已存在，跳过")

        threads = list(thread_map.values())
        total = len(all_checkpoints)  # 使用过滤后的总数

        logger.info(f"最终结果: 成功获取 {len(threads)} 个线程，总计 {total} 个")

        return ThreadListResponse(threads=threads, total=total, success=True)

    except Exception as e:
        logger.error(f"获取线程列表失败: {str(e)}")
        import traceback

        logger.error(f"错误堆栈: {traceback.format_exc()}")
        return ThreadListResponse(threads=[], total=0, success=False)


@sql_router.delete("/threads/{thread_id}")
async def delete_thread(thread_id: str):
    """删除指定线程

    Args:
        thread_id: 线程ID

    Returns:
        dict: 删除结果
    """
    try:
        logger.info(f"删除线程请求 - thread_id: {thread_id}")

        # 获取数据库连接
        conn = await get_async_connection()

        # 删除线程相关的checkpoints
        delete_query = "DELETE FROM checkpoints WHERE thread_id = ?"

        async with conn.execute(delete_query, [thread_id]) as cursor:
            affected_rows = cursor.rowcount

        await conn.commit()

        logger.info(f"成功删除线程 {thread_id}，影响 {affected_rows} 行")

        return {
            "success": True,
            "message": f"线程 {thread_id} 删除成功",
            "affected_rows": affected_rows,
        }

    except Exception as e:
        logger.error(f"删除线程失败: {str(e)}")
        raise HTTPException(status_code=500, detail=str(e))


# 文件结束，删除之前的孤立代码
