from flask import Blueprint, request, current_app
from app.utils.mysql_db import get_db_connection, get_db_cursor, DatabaseLogger
from app.utils.logger import FlinkLogger, log_api_call, log_database_operation


bp = Blueprint("savepoints", __name__)


@bp.get("")
@log_api_call("savepoints.list_savepoints")
def list_savepoints():
    current_app.logger.info("Listing savepoints")
    job_name = request.args.get("job_name")
    trigger_reason = request.args.get("trigger_reason")
    page = request.args.get("page", 1, type=int)
    size = request.args.get("size", 20, type=int)
    
    # 获取排序参数（默认按创建时间降序）
    sort_field = request.args.get("sort", "creation_time")
    sort_order = request.args.get("order", "desc").upper()

    all_fields = [
        "id","job_id","job_name","savepoint_path","creation_time","trigger_reason","status"
    ]
    fields = request.args.getlist("fields") or [
        "id","job_id","job_name","savepoint_path","creation_time","trigger_reason","status",
    ]
    display_fields = [f for f in fields if f in all_fields]
    query_fields = list(dict.fromkeys(display_fields + ["id"]))
    select_clause = ", ".join(query_fields)

    where = []
    params = []
    if job_name:
        where.append("job_name LIKE %s")
        params.append(f"%{job_name}%")
    if trigger_reason:
        where.append("trigger_reason = %s")
        params.append(trigger_reason)
    where_sql = (" WHERE " + " AND ".join(where)) if where else ""
    
    # 白名单验证排序字段，防止SQL注入
    if sort_field not in all_fields:
        sort_field = "creation_time"
    if sort_order not in ["ASC", "DESC"]:
        sort_order = "DESC"
    
    # 构建排序子句
    order_by_clause = f"ORDER BY {sort_field} {sort_order}"

    # 初始化变量
    total = 0
    total_all = 0
    rows = []
    
    conn = get_db_connection()
    try:
        with conn.cursor() as cur:
            # 获取总数（考虑筛选条件）
            count_sql = f"SELECT COUNT(*) as total FROM flink_cluster_savepoint_history {where_sql}"
            cur.execute(count_sql, params)
            total_result = cur.fetchone()
            total = total_result['total'] if total_result else 0
            
            # 查询全局总数（不考虑筛选条件，用于统计卡片）
            cur.execute("SELECT COUNT(*) as total FROM flink_cluster_savepoint_history")
            total_all_result = cur.fetchone()
            total_all = total_all_result['total'] if total_all_result else 0
            
            # 获取分页数据
            offset = (page - 1) * size
            cur.execute(
                f"""
                SELECT {select_clause}
                FROM flink_cluster_savepoint_history
                {where_sql}
                {order_by_clause}
                LIMIT %s OFFSET %s
                """,
                (*params, size, offset),
            )
            rows = cur.fetchall()
            
            # 格式化时间字段（避免时区转换问题）
            for row in rows:
                if row.get('creation_time') and hasattr(row['creation_time'], 'strftime'):
                    row['creation_time'] = row['creation_time'].strftime('%Y-%m-%d %H:%M:%S')
            
        return {
            "success": True,
            "message": "ok",
            "data": {
                "rows": rows, 
                "total": total,  # 当前筛选条件下的总数
                "total_all": total_all,  # 全局总数（用于统计卡片）
                "page": page, 
                "size": size, 
                "fields": display_fields
            }
        }
    finally:
        conn.close()