from datetime import datetime
import time
import threading
from flask import Blueprint, request, current_app
from app.utils.mysql_db import get_db_connection, get_db_cursor, DatabaseLogger
from app.utils.flink import flink_request, find_latest_jar_id_by_name, get_flink_api_url, set_flink_api_url
from app.utils.logger import FlinkLogger, log_api_call, log_database_operation, log_business_logic, log_flink_operation, log_step
from app.services.task_service import task_manager


bp = Blueprint("jobs", __name__)


@log_flink_operation("verify_job_stopped")
def verify_job_stopped(job_id: str) -> tuple[bool, str]:
    """验证作业是否真的停止了"""
    current_app.logger.info(f"Verifying job stopped status | job_id: {job_id}")
    
    try:
        # 获取作业状态
        current_app.logger.debug(f"Requesting job status from Flink | job_id: {job_id}")
        ok, result = flink_request("GET", f"/jobs/{job_id}")
        
        if not ok:
            current_app.logger.warning(f"Failed to get job status from Flink | job_id: {job_id} | error: {result}")
            return False, f"无法获取作业状态: {result}"
        
        job_state = result.get('state', 'UNKNOWN')
        current_app.logger.debug(f"Got job state from Flink | job_id: {job_id} | state: {job_state}")
        
        if job_state in ['CANCELED', 'CANCELLING', 'FINISHED', 'FAILED']:
            current_app.logger.info(f"Job is stopped | job_id: {job_id} | state: {job_state}")
            return True, f"作业已停止，状态: {job_state}"
        else:
            current_app.logger.info(f"Job is still running | job_id: {job_id} | state: {job_state}")
            return False, f"作业仍在运行，状态: {job_state}"
            
    except Exception as e:
        current_app.logger.error(f"Exception while verifying job status | job_id: {job_id} | error: {str(e)}")
        return False, f"验证作业状态时出错: {str(e)}"


@log_flink_operation("find_job_id_by_name")
def find_job_id_by_name(job_name: str) -> str | None:
    """通过作业名称查找当前运行的job_id（兼容模糊匹配）。"""
    current_app.logger.info(f"Finding job ID by name | job_name: {job_name}")
    
    try:
        # 使用 overview 获取更完整的作业快照
        current_app.logger.debug("Requesting jobs overview from Flink")
        ok, jobs_resp = flink_request("GET", "/jobs/overview")
        
        if not ok or not isinstance(jobs_resp, dict):
            current_app.logger.warning(f"Failed to get jobs overview | ok: {ok} | response_type: {type(jobs_resp)}")
            return None

        jobs = jobs_resp.get("jobs", []) or []
        current_app.logger.debug(f"Got {len(jobs)} jobs from Flink overview")

        # 优先精确匹配其后模糊匹配（包含关系，忽略大小写）
        lower_target = (job_name or "").lower()
        
        # 精确匹配优先
        current_app.logger.debug(f"Attempting exact match for job name: {job_name}")
        for job in jobs:
            name = (job.get("name") or "").strip()
            state = job.get("state")
            if name == job_name and state == "RUNNING":
                job_id = job.get("jid") or job.get("id")
                current_app.logger.info(f"Found exact match | job_name: {job_name} | job_id: {job_id} | state: {state}")
                return job_id
        
        # 模糊匹配
        current_app.logger.debug(f"Attempting fuzzy match for job name: {job_name}")
        for job in jobs:
            name = (job.get("name") or "").lower()
            state = job.get("state")
            if lower_target and lower_target in name and state == "RUNNING":
                job_id = job.get("jid") or job.get("id")
                current_app.logger.info(f"Found fuzzy match | job_name: {job_name} | matched_name: {job.get('name')} | job_id: {job_id} | state: {state}")
                return job_id
                
        current_app.logger.info(f"No running job found for name: {job_name}")
        return None
        
    except Exception as e:
        current_app.logger.error(f"Exception while finding job by name | job_name: {job_name} | error: {str(e)}")
        return None


@bp.get("/field-config")
def get_field_config():
    """获取字段配置信息，包括所有可用字段和分组信息"""
    field_config = {
        "basic": [
            {"value": "job_name", "label": "作业名称", "required": True, "default": True},
            {"value": "job_id", "label": "作业ID", "default": True},
            {"value": "job_description", "label": "作业描述"},
            {"value": "jar_name", "label": "JAR包", "default": True},
            {"value": "jar_id", "label": "JAR ID"},
            {"value": "init_jar_path", "label": "初始化JAR路径"},
            {"value": "last_jar_path", "label": "最新JAR路径"},
            {"value": "entry_class", "label": "入口类", "default": True},
            {"value": "program_args", "label": "程序参数"}
        ],
        "status": [
            {"value": "last_status", "label": "状态", "default": True},
            {"value": "auto_recovery", "label": "自动恢复", "default": True},
            {"value": "recovery_attempts", "label": "恢复次数", "default": True},
            {"value": "max_recovery_attempts", "label": "最大恢复次数"},
            {"value": "monitor_status", "label": "监控状态"},
            {"value": "total_failures", "label": "总失败次数"},
            {"value": "consecutive_failures", "label": "连续失败次数"},
            {"value": "manual_stopped", "label": "手动停止"},
            {"value": "is_alert_enabled", "label": "告警开关"},
            {"value": "health_status", "label": "健康状态"}
        ],
        "time": [
            {"value": "registration_time", "label": "注册时间"},
            {"value": "last_update_time", "label": "更新时间", "default": True},
            {"value": "last_check_time", "label": "最后检查时间"},
            {"value": "last_running_time", "label": "最后运行时间"},
            {"value": "last_stop_time", "label": "最后停止时间"},
            {"value": "last_successful_check", "label": "最后成功检查时间"},
            {"value": "last_health_check", "label": "最后健康检查时间"}
        ],
        "savepoint": [
            {"value": "savepoint_interval", "label": "保存点间隔(小时)"},
            {"value": "last_savepoint_path", "label": "最后保存点路径"},
            {"value": "last_savepoint_time", "label": "最后保存点时间"}
        ],
        "error": [
            {"value": "error_message", "label": "错误信息"},
            {"value": "last_error_type", "label": "最后错误类型"},
            {"value": "last_error_time", "label": "最后错误时间"}
        ],
        "system": [
            {"value": "version", "label": "版本号"}
        ]
    }
    
    # 生成所有字段的扁平列表
    all_fields = []
    for group, fields in field_config.items():
        for field in fields:
            all_fields.append({
                **field,
                "group": group
            })
    
    return {
        "field_config": field_config,
        "all_fields": all_fields,
        "group_titles": {
            "basic": "基础信息",
            "status": "状态信息", 
            "time": "时间信息",
            "savepoint": "保存点信息",
            "error": "错误信息",
            "system": "系统信息"
        }
    }


@bp.post("/field-settings")
def save_field_settings():
    """保存用户的字段设置（字段选择和排序）"""
    data = request.get_json() or {}
    
    selected_fields = data.get("selected_fields", [])
    field_order = data.get("field_order", [])
    user_id = data.get("user_id", "default")  # 可以扩展为多用户支持
    
    # 验证字段有效性
    all_fields = [
        "job_id","job_name","job_description","jar_name","init_jar_path","last_jar_path",
        "entry_class","program_args","registration_time","last_check_time","last_running_time",
        "last_stop_time","last_status","recovery_attempts","total_failures","auto_recovery",
        "max_recovery_attempts","manual_stopped","is_alert_enabled","savepoint_interval","last_savepoint_path",
        "last_savepoint_time","last_update_time","jar_id","error_message","version",
        "last_successful_check","consecutive_failures","last_error_type","last_error_time",
        "health_status","last_health_check","monitor_status",
    ]
    
    # 过滤有效字段
    valid_selected_fields = [f for f in selected_fields if f in all_fields]
    valid_field_order = [f for f in field_order if f in all_fields]
    
    # 这里可以将设置保存到数据库或缓存中
    # 目前返回成功响应，实际存储由前端localStorage处理
    return {
        "message": "Field settings saved successfully",
        "selected_fields": valid_selected_fields,
        "field_order": valid_field_order
    }


@bp.get("")
@log_api_call("jobs.list_jobs")
def list_jobs():
    current_app.logger.info("API Call: jobs.list_jobs started")
    status = request.args.get("status")
    job_name = request.args.get("job_name")
    monitor_status = request.args.get("monitor_status")
    # 分页参数（后端分页）
    try:
        page = int(request.args.get("page", 1))
        size = int(request.args.get("size", request.args.get("pageSize", 20)))
    except ValueError:
        page, size = 1, 20
    page = max(1, page)
    size = max(1, min(size, 200))
    
    # 获取排序参数（默认按更新时间降序）
    sort_field = request.args.get("sort", "last_update_time")
    sort_order = request.args.get("order", "desc").upper()
    
    # 获取字段排序参数
    field_order = request.args.get("field_order")
    if field_order:
        try:
            import json
            field_order_list = json.loads(field_order)
        except (json.JSONDecodeError, TypeError):
            field_order_list = None
    else:
        field_order_list = None

    # 所有可用的数据库字段（按数据库表结构顺序）
    all_fields = [
        "job_id","job_name","job_description","jar_name","init_jar_path","last_jar_path",
        "entry_class","program_args","registration_time","last_check_time","last_running_time",
        "last_stop_time","last_status","recovery_attempts","total_failures","auto_recovery",
        "max_recovery_attempts","manual_stopped","is_alert_enabled","savepoint_interval","last_savepoint_path",
        "last_savepoint_time","last_update_time","jar_id","error_message","version",
        "last_successful_check","consecutive_failures","last_error_type","last_error_time",
        "health_status","last_health_check","monitor_status",
    ]
    
    # 获取用户选择的字段
    # 尝试多种方式获取字段参数
    fields = []
    if "fields[]" in request.args:
        fields = request.args.getlist("fields[]")
    elif "fields" in request.args:
        fields = request.args.getlist("fields")
    else:
        fields = [
        "job_name","job_id","jar_name","entry_class","last_status","auto_recovery",
        "recovery_attempts","last_update_time"
    ]
    
    # 过滤有效字段
    display_fields = [f for f in fields if f in all_fields]
    
    # 如果提供了字段排序，按排序重新排列字段
    if field_order_list:
        # 按用户指定的顺序排列字段，但只包含用户选择的字段
        ordered_fields = []
        for field in field_order_list:
            if field in display_fields and field not in ordered_fields:
                ordered_fields.append(field)
        # 添加用户选择但不在排序列表中的字段
        for field in display_fields:
            if field not in ordered_fields:
                ordered_fields.append(field)
        display_fields = ordered_fields
    
    # 确保查询包含所有需要的字段
    query_fields = list(dict.fromkeys(display_fields))
    
    # 如果没有有效字段，使用默认字段
    if not query_fields:
        query_fields = [
            "job_name","job_id","jar_name","entry_class","last_status","auto_recovery",
            "recovery_attempts","last_update_time"
        ]
        display_fields = query_fields.copy()
    
    select_clause = ", ".join(query_fields)

    where = []
    params = []
    if status:
        where.append("last_status=%s")
        params.append(status)
    if job_name:
        where.append("job_name LIKE %s")
        like = f"%{job_name}%"
        params.append(like)
    if monitor_status:
        where.append("monitor_status=%s")
        params.append(monitor_status)
    where_sql = (" WHERE " + " AND ".join(where)) if where else ""
    
    # 白名单验证排序字段，防止SQL注入
    if sort_field not in all_fields:
        sort_field = "last_update_time"
    if sort_order not in ["ASC", "DESC"]:
        sort_order = "DESC"
    
    # 构建排序子句
    order_by_clause = f"ORDER BY {sort_field} {sort_order}"

    # 初始化变量
    total = 0
    status_stats = {}
    total_all = 0
    rows = []
    
    conn = get_db_connection()
    try:
        with conn.cursor() as cur:
            # 统计总数（考虑筛选条件）
            cur.execute(
                f"""
                SELECT COUNT(*) as total
                FROM flink_cluster_job_metadata
                {where_sql}
                """,
                (*params,),
            )
            total_result = cur.fetchone() or {"total": 0}
            total = int(total_result.get("total", 0))
            
            # 查询全局状态统计（不考虑筛选条件，用于统计卡片）
            cur.execute(
                """
                SELECT last_status, COUNT(*) as count
                FROM flink_cluster_job_metadata
                GROUP BY last_status
                """
            )
            status_stats_rows = cur.fetchall()
            status_stats = {row['last_status']: int(row['count']) for row in status_stats_rows}
            total_all = sum(status_stats.values())

            # 分页查询
            offset = (page - 1) * size
            cur.execute(
                f"""
                SELECT {select_clause}
                FROM flink_cluster_job_metadata
                {where_sql}
                {order_by_clause}
                LIMIT %s OFFSET %s
                """,
                (*params, size, offset),
            )
            rows = cur.fetchall()
    finally:
        conn.close()
    
    # 处理NULL值和时间字段格式化
    processed_rows = []
    
    # 定义时间字段列表
    time_fields = [
        'registration_time', 'last_check_time', 'last_running_time',
        'last_stop_time', 'last_update_time', 'last_successful_check',
        'last_health_check', 'last_savepoint_time', 'last_error_time'
    ]
    
    for row in rows:
        processed_row = {}
        for field in display_fields:
            # 从数据库行中获取字段值（已经是字典格式）
            value = row.get(field)
            
            # 将None、空字符串转换为"-"
            if value is None or value == "":
                processed_row[field] = "-"
            # 格式化时间字段（避免时区转换问题）
            elif field in time_fields and hasattr(value, 'strftime'):
                processed_row[field] = value.strftime('%Y-%m-%d %H:%M:%S')
            else:
                processed_row[field] = value
        processed_rows.append(processed_row)
    
    # 返回数据，包含字段顺序信息和全局统计（统一包装到data）
    return {
        "success": True,
        "message": "ok",
        "data": {
            "rows": processed_rows,
            "fields": display_fields,
            "all_fields": all_fields,  # 返回所有可用字段供前端使用
            "field_order": field_order_list,  # 返回字段排序信息
            "total": total,  # 当前筛选条件下的总数
            "total_all": total_all,  # 全局总数（用于统计卡片）
            "status_stats": status_stats,  # 全局状态统计（用于统计卡片）
            "page": page,
            "size": size
        }
    }


@bp.post("/register")
def register_job():
    """注册新作业：必须提供 jar_name（文件名），后端按配置拼接完整路径"""
    import os
    data = request.get_json() or request.form
    job_name = (data.get("job_name") or "").strip()
    jar_name_input = (data.get("jar_name") or data.get("jar_path") or "").strip()
    entry_class = (data.get("entry_class") or "").strip()
    program_args = data.get("program_args") or ""
    auto_recovery = 1 if str(data.get("auto_recovery", "1")) in ("1", "true", "True") else 0
    max_recovery_attempts = int(data.get("max_recovery_attempts", 3))
    savepoint_interval = int(data.get("savepoint_interval", 24))
    job_id = (data.get("job_id") or "").strip()
    job_description = data.get("job_description") or None

    if not job_name or not jar_name_input or not entry_class:
        return {"success": False, "error": "job_name, jar_name, entry_class are required", "code": "BAD_REQUEST"}, 400

    # 规范化 jar_name（仅文件名，不含路径）
    jar_file = os.path.basename(jar_name_input)
    jar_basename = jar_file.replace('.jar', '') if jar_file.endswith('.jar') else jar_file

    # 读取基础路径（优先请求体 -> 环境变量 -> 默认值）
    jar_base_path = (data.get('jar_base_path') or os.environ.get('JAR_BASE_PATH') 
                     or '/opt/flink/userjars/flink-web-upload')
    jar_base_path = str(jar_base_path).strip()
    if jar_base_path and not jar_base_path.endswith('/'):
        jar_base_path = jar_base_path + '/'
    jar_path = f"{jar_base_path}{jar_file}"

    conn = get_db_connection()
    try:
        with conn.cursor() as cur:
            fields = [
                'job_name','jar_name','init_jar_path','jar_id','entry_class','program_args',
                'auto_recovery','max_recovery_attempts','savepoint_interval','manual_stopped',
                'last_status','recovery_attempts','total_failures','job_description'
            ]
            values = [
                job_name, jar_basename, jar_path, None, entry_class, program_args,
                auto_recovery, max_recovery_attempts, savepoint_interval, 0,
                'REGISTERED', 0, 0, job_description
            ]
            if job_id:
                fields.append('job_id')
                values.append(job_id)
            placeholders = ','.join(['%s'] * len(values))
            sql = f"INSERT INTO flink_cluster_job_metadata ({','.join(fields)}) VALUES ({placeholders})"
            cur.execute(sql, values)
            conn.commit()
        return {"success": True, "message": "作业注册成功"}
    except Exception as e:
        conn.rollback()
        return {"success": False, "error": str(e), "code": "SERVER_ERROR"}, 500
    finally:
        conn.close()


@bp.post("/<string:job_name>/update-job")
def update_job(job_name: str):
    """更新作业配置"""
    data = request.get_json() or request.form
    
    # 检查作业是否存在
    conn = get_db_connection()
    try:
        with conn.cursor() as cur:
            cur.execute("SELECT id FROM flink_cluster_job_metadata WHERE job_name=%s", (job_name,))
            if not cur.fetchone():
                return {"success": False, "error": "作业不存在", "code": "NOT_FOUND"}, 404
            
            # 构建更新字段
            update_fields = []
            values = []
            
            if 'jar_path' in data:
                jar_path = data['jar_path'].strip()
                if jar_path:
                    jar_basename = jar_path.split('/')[-1].replace('.jar', '') if jar_path.endswith('.jar') else jar_path.split('/')[-1]
                    update_fields.extend(["jar_name=%s", "init_jar_path=%s"])
                    values.extend([jar_basename, jar_path])
            
            if 'entry_class' in data:
                entry_class = data['entry_class'].strip()
                if entry_class:
                    update_fields.append("entry_class=%s")
                    values.append(entry_class)
            
            if 'program_args' in data:
                update_fields.append("program_args=%s")
                values.append(data['program_args'])
            
            if 'auto_recovery' in data:
                auto_recovery = 1 if str(data['auto_recovery']) in ("1", "true", "True") else 0
                update_fields.append("auto_recovery=%s")
                values.append(auto_recovery)
            
            if 'manual_stopped' in data:
                manual_stopped = 1 if str(data['manual_stopped']) in ("1", "true", "True") else 0
                update_fields.append("manual_stopped=%s")
                values.append(manual_stopped)
            
            if 'max_recovery_attempts' in data:
                max_attempts = int(data['max_recovery_attempts'])
                update_fields.append("max_recovery_attempts=%s")
                values.append(max_attempts)
            
            if 'savepoint_interval' in data:
                savepoint_interval = int(data['savepoint_interval'])
                update_fields.append("savepoint_interval=%s")
                values.append(savepoint_interval)
            
            if 'job_description' in data:
                update_fields.append("job_description=%s")
                values.append(data['job_description'])
            
            if not update_fields:
                return {"success": False, "error": "没有提供要更新的字段", "code": "BAD_REQUEST"}, 400
            
            # 添加更新时间
            update_fields.append("last_update_time=%s")
            values.append(datetime.now())
            
            # 添加作业名称到WHERE条件
            values.append(job_name)
            
            sql = f"UPDATE flink_cluster_job_metadata SET {', '.join(update_fields)} WHERE job_name=%s"
            cur.execute(sql, values)
            
            # 如果更新了 manual_stopped 或 auto_recovery，需要同步更新 monitor_status
            if 'manual_stopped' in data or 'auto_recovery' in data:
                # 先获取数据库中原始的值
                cur.execute("SELECT manual_stopped, auto_recovery FROM flink_cluster_job_metadata WHERE job_name=%s", (job_name,))
                result = cur.fetchone()
                if result:
                    db_manual_stopped, db_auto_recovery = result
                    
                    # 使用更新后的值（如果提供了的话），否则使用数据库中的值
                    if 'manual_stopped' in data:
                        current_manual_stopped = 1 if str(data['manual_stopped']) in ("1", "true", "True") else 0
                    else:
                        current_manual_stopped = db_manual_stopped
                        
                    if 'auto_recovery' in data:
                        current_auto_recovery = 1 if str(data['auto_recovery']) in ("1", "true", "True") else 0
                    else:
                        current_auto_recovery = db_auto_recovery
                    
                    # 计算新的 monitor_status
                    if current_manual_stopped and not current_auto_recovery:
                        monitor_status = 'DISABLED'
                    elif current_manual_stopped:
                        monitor_status = 'DISMONITOR'
                    elif not current_auto_recovery:
                        monitor_status = 'DISRECOVER'
                    else:
                        monitor_status = 'ACTIVE'
                    
                    # 更新 monitor_status
                    cur.execute("UPDATE flink_cluster_job_metadata SET monitor_status=%s WHERE job_name=%s", (monitor_status, job_name))
            
            conn.commit()
            
        return {"success": True, "message": "作业更新成功"}
    except Exception as e:
        conn.rollback()
        return {"success": False, "error": str(e), "code": "SERVER_ERROR"}, 500
    finally:
        conn.close()


@bp.post("/<string:job_name>/toggle-manual-stopped")
def toggle_manual_stopped(job_name: str):
    """切换手动停止状态"""
    data = request.get_json() or request.form
    manual_stopped = 1 if str(data.get("manual_stopped", "0")) in ("1", "true", "True") else 0
    
    conn = get_db_connection()
    try:
        with conn.cursor() as cur:
            # 检查作业是否存在
            cur.execute("SELECT id FROM flink_cluster_job_metadata WHERE job_name=%s", (job_name,))
            if not cur.fetchone():
                return {"success": False, "error": "作业不存在", "code": "NOT_FOUND"}, 404
            
            # 更新 manual_stopped
            # 如果 manual_stopped=1（停止监控），自动将 is_alert_enabled 设为 0（关闭告警）
            if manual_stopped == 1:
                cur.execute("UPDATE flink_cluster_job_metadata SET manual_stopped=%s, is_alert_enabled=0, last_update_time=%s WHERE job_name=%s", 
                           (manual_stopped, datetime.now(), job_name))
            else:
                cur.execute("UPDATE flink_cluster_job_metadata SET manual_stopped=%s, last_update_time=%s WHERE job_name=%s", 
                           (manual_stopped, datetime.now(), job_name))
            
            # 获取当前的 auto_recovery 值
            cur.execute("SELECT auto_recovery FROM flink_cluster_job_metadata WHERE job_name=%s", (job_name,))
            result = cur.fetchone()
            if result:
                auto_recovery = int(result.get("auto_recovery", 0))
                
                # 计算新的 monitor_status
                if manual_stopped and not auto_recovery:
                    monitor_status = 'DISABLED'
                elif manual_stopped:
                    monitor_status = 'DISMONITOR'
                elif not auto_recovery:
                    monitor_status = 'DISRECOVER'
                else:
                    monitor_status = 'ACTIVE'
                
                # 更新 monitor_status
                cur.execute("UPDATE flink_cluster_job_metadata SET monitor_status=%s WHERE job_name=%s", 
                           (monitor_status, job_name))
            
            conn.commit()
        return {"success": True, "message": "手动停止状态已更新", "manual_stopped": manual_stopped}
    except Exception as e:
        conn.rollback()
        return {"success": False, "error": str(e), "code": "SERVER_ERROR"}, 500
    finally:
        conn.close()


@bp.post("/<string:job_name>/toggle-auto-recovery")
def toggle_auto_recovery(job_name: str):
    """切换自动恢复状态"""
    data = request.get_json() or request.form
    auto_recovery = 1 if str(data.get("auto_recovery", "0")) in ("1", "true", "True") else 0
    
    conn = get_db_connection()
    try:
        with conn.cursor() as cur:
            # 检查作业是否存在
            cur.execute("SELECT id FROM flink_cluster_job_metadata WHERE job_name=%s", (job_name,))
            if not cur.fetchone():
                return {"success": False, "error": "作业不存在", "code": "NOT_FOUND"}, 404
            
            # 更新 auto_recovery
            cur.execute("UPDATE flink_cluster_job_metadata SET auto_recovery=%s, last_update_time=%s WHERE job_name=%s", 
                       (auto_recovery, datetime.now(), job_name))
            
            # 获取当前的 manual_stopped 值
            cur.execute("SELECT manual_stopped FROM flink_cluster_job_metadata WHERE job_name=%s", (job_name,))
            result = cur.fetchone()
            if result:
                manual_stopped = int(result.get("manual_stopped", 0))
                
                # 计算新的 monitor_status
                if manual_stopped and not auto_recovery:
                    monitor_status = 'DISABLED'
                elif manual_stopped:
                    monitor_status = 'DISMONITOR'
                elif not auto_recovery:
                    monitor_status = 'DISRECOVER'
                else:
                    monitor_status = 'ACTIVE'
                
                # 更新 monitor_status
                cur.execute("UPDATE flink_cluster_job_metadata SET monitor_status=%s WHERE job_name=%s", 
                           (monitor_status, job_name))
            
            conn.commit()
        return {"success": True, "message": "自动恢复状态已更新", "auto_recovery": auto_recovery}
    except Exception as e:
        conn.rollback()
        return {"success": False, "error": str(e), "code": "SERVER_ERROR"}, 500
    finally:
        conn.close()


@bp.post("/<string:job_name>/toggle-alert-enabled")
def toggle_alert_enabled(job_name: str):
    """切换告警开关状态"""
    data = request.get_json() or request.form
    is_alert_enabled = 1 if str(data.get("is_alert_enabled", "0")) in ("1", "true", "True") else 0
    
    conn = get_db_connection()
    try:
        with conn.cursor() as cur:
            # 检查作业是否存在
            cur.execute("SELECT id, manual_stopped FROM flink_cluster_job_metadata WHERE job_name=%s", (job_name,))
            result = cur.fetchone()
            if not result:
                return {"success": False, "error": "作业不存在", "code": "NOT_FOUND"}, 404
            
            # 检查 manual_stopped 状态：如果 manual_stopped=1（不监控），不允许开启告警
            manual_stopped = int(result.get("manual_stopped", 0))
            if manual_stopped == 1 and is_alert_enabled == 1:
                return {"success": False, "error": "作业已停止监控，无法开启告警。请先开启监控", "code": "INVALID_STATE"}, 400
            
            # 更新 is_alert_enabled
            cur.execute("UPDATE flink_cluster_job_metadata SET is_alert_enabled=%s, last_update_time=%s WHERE job_name=%s", 
                       (is_alert_enabled, datetime.now(), job_name))
            
            conn.commit()
        return {"success": True, "message": "告警开关状态已更新", "is_alert_enabled": is_alert_enabled}
    except Exception as e:
        conn.rollback()
        return {"success": False, "error": str(e), "code": "SERVER_ERROR"}, 500
    finally:
        conn.close()


@bp.post("/<string:job_name>/update-args")
def update_args(job_name: str):
    program_args = (request.json or {}).get("program_args") or request.form.get("program_args", "")
    with get_db_cursor(autocommit=True) as cur:
        cur.execute(
            """
            UPDATE flink_cluster_job_metadata
            SET program_args=%s, last_update_time=%s
            WHERE job_name=%s
            """,
            (program_args, datetime.now(), job_name),
        )
    return {"success": True, "message": "作业参数已更新"}


@bp.post("/<string:job_name>/toggle-monitor")
def toggle_monitor(job_name: str):
    """切换作业监控状态"""
    monitor_status = (request.json or {}).get("monitor_status") or request.form.get("monitor_status", "ACTIVE")
    if monitor_status not in ("ACTIVE", "DISABLED"):
        monitor_status = "ACTIVE"
    with get_db_cursor(autocommit=True) as cur:
        cur.execute(
            """
            UPDATE flink_cluster_job_metadata
            SET monitor_status=%s, last_update_time=%s
            WHERE job_name=%s
            """,
            (monitor_status, datetime.now(), job_name),
        )
    return {"success": True, "message": "入口类已更新"}


@bp.post("/<string:job_name>/stop-monitor")
def stop_monitor_job(job_name: str):
    """停止监控作业"""
    conn = get_db_connection()
    try:
        with conn.cursor() as cur:
            # 检查作业是否存在并获取 auto_recovery 状态
            cur.execute("SELECT auto_recovery FROM flink_cluster_job_metadata WHERE job_name=%s", (job_name,))
            result = cur.fetchone()
            if not result:
                return {"error": "作业不存在"}, 404
            
            auto_recovery = int(result.get("auto_recovery", 0))
            
            # 根据 auto_recovery 计算 monitor_status
            monitor_status = 'DISABLED' if not auto_recovery else 'DISMONITOR'
            
            # 停止监控（不修改 last_status）
            cur.execute(
                """
                UPDATE flink_cluster_job_metadata
                SET manual_stopped=1, monitor_status=%s, 
                    last_check_time=%s, last_update_time=%s
                WHERE job_name=%s
                """,
                (monitor_status, datetime.now(), datetime.now(), job_name),
            )
            conn.commit()
            return {"success": True, "message": "作业监控已停止"}
    except Exception as e:
        conn.rollback()
        return {"success": False, "error": str(e), "code": "SERVER_ERROR"}, 500
    finally:
        conn.close()


@bp.post("/<string:job_name>/start-monitor")
def start_monitor_job(job_name: str):
    """恢复监控作业"""
    conn = get_db_connection()
    try:
        with conn.cursor() as cur:
            # 检查作业是否存在并获取 auto_recovery 状态
            cur.execute("SELECT auto_recovery FROM flink_cluster_job_metadata WHERE job_name=%s", (job_name,))
            result = cur.fetchone()
            if not result:
                return {"error": "作业不存在"}, 404
            
            auto_recovery = int(result.get("auto_recovery", 0))
            
            # 根据 auto_recovery 计算 monitor_status
            monitor_status = 'DISRECOVER' if not auto_recovery else 'ACTIVE'
            
            # 恢复监控（不修改 last_status）
            cur.execute(
                """
                UPDATE flink_cluster_job_metadata
                SET manual_stopped=0, monitor_status=%s, 
                    last_check_time=%s, last_update_time=%s
                WHERE job_name=%s
                """,
                (monitor_status, datetime.now(), datetime.now(), job_name),
            )
            conn.commit()
            return {"success": True, "message": "作业监控已恢复"}
    except Exception as e:
        conn.rollback()
        return {"success": False, "error": str(e), "code": "SERVER_ERROR"}, 500
    finally:
        conn.close()


@bp.post("/<string:job_name>/update-attempts")
def update_attempts(job_name: str):
    data = request.get_json() or request.form
    try:
        recovery_attempts = int(data.get("recovery_attempts", 0))
        max_recovery_attempts = int(data.get("max_recovery_attempts", 3))
    except ValueError:
        return {"success": False, "error": "invalid integers", "code": "BAD_REQUEST"}, 400
    with get_db_cursor(autocommit=True) as cur:
        cur.execute(
            """
            UPDATE flink_cluster_job_metadata
            SET recovery_attempts=%s, max_recovery_attempts=%s, last_update_time=%s
            WHERE job_name=%s
            """,
            (recovery_attempts, max_recovery_attempts, datetime.now(), job_name),
        )
    return {"success": True, "message": "恢复次数更新成功"}


@bp.post("/<int:job_pk>/toggle-manual-stopped-by-id")
def toggle_manual_stopped_by_id(job_pk: int):
    value = 1 if str((request.json or {}).get("manual_stopped") or request.form.get("manual_stopped", "0")) in ("1","true","True") else 0
    with get_db_cursor(autocommit=True) as cur:
        cur.execute(
            """
            UPDATE flink_cluster_job_metadata
            SET manual_stopped=%s, last_update_time=%s
            WHERE id=%s
            """,
            (value, datetime.now(), job_pk),
        )
    return {"success": True, "message": "字段已更新"}


@bp.post("/<int:job_pk>/update-entry-class")
def update_entry_class(job_pk: int):
    entry_class = ((request.json or {}).get("entry_class") or request.form.get("entry_class") or "").strip()
    if not entry_class:
        return {"success": False, "error": "entry_class required", "code": "BAD_REQUEST"}, 400
    with get_db_cursor(autocommit=True) as cur:
        cur.execute(
            """
            UPDATE flink_cluster_job_metadata
            SET entry_class=%s, last_update_time=%s
            WHERE id=%s
            """,
            (entry_class, datetime.now(), job_pk),
        )
    return {"success": True, "message": "字段已更新"}


@bp.post("/<int:job_pk>/delete")
def delete_job(job_pk: int):
    conn = get_db_connection()
    try:
        with conn.cursor() as cur:
            cur.execute("SELECT job_id, job_name FROM flink_cluster_job_metadata WHERE id=%s", (job_pk,))
            row = cur.fetchone()
            if not row:
                return {"success": False, "error": "not found", "code": "NOT_FOUND"}, 404
            job_id = row.get("job_id")
            # delete main
            cur.execute("DELETE FROM flink_cluster_job_metadata WHERE id=%s", (job_pk,))
            # cascade cleanup
            if job_id:
                cur.execute("DELETE FROM flink_cluster_alert_history WHERE job_id=%s", (job_id,))
                cur.execute("DELETE FROM flink_cluster_savepoint_history WHERE job_id=%s", (job_id,))
            conn.commit()
        return {"success": True, "message": "deleted"}
    except Exception as e:
        conn.rollback()
        return {"success": False, "error": str(e), "code": "SERVER_ERROR"}, 500
    finally:
        conn.close()


@bp.post("/<string:job_name>/delete")
def delete_job_by_name(job_name: str):
    """通过作业名称删除作业"""
    conn = get_db_connection()
    try:
        with conn.cursor() as cur:
            # 先查找作业
            cur.execute("SELECT id, job_id, job_name FROM flink_cluster_job_metadata WHERE job_name=%s", (job_name,))
            row = cur.fetchone()
            if not row:
                return {"success": False, "error": f"Job '{job_name}' not found", "code": "NOT_FOUND"}, 404
            
            job_pk = row.get("id")
            job_id = row.get("job_id")
            
            # 删除主记录
            cur.execute("DELETE FROM flink_cluster_job_metadata WHERE id=%s", (job_pk,))
            
            # 级联清理相关数据
            if job_id:
                cur.execute("DELETE FROM flink_cluster_alert_history WHERE job_id=%s", (job_id,))
                cur.execute("DELETE FROM flink_cluster_savepoint_history WHERE job_id=%s", (job_id,))
            
            conn.commit()
        return {"success": True, "message": f"Job '{job_name}' deleted successfully"}
    except Exception as e:
        conn.rollback()
        return {"success": False, "error": str(e), "code": "SERVER_ERROR"}, 500
    finally:
        conn.close()


@bp.post("/<string:job_name>/stop")
@log_api_call("jobs.stop_job")
@log_business_logic("stop_single_job", log_params=False)
def stop_job(job_name: str):
    """停止单个作业，完全模拟 flink_restart_all_jobs.py 脚本逻辑"""
    import time
    
    data = request.get_json() or request.form
    savepoint = str(data.get("savepoint", "0")) in ("1","true","True")
    flink_env = data.get("flink_env") or request.args.get("flink_env")
    max_retries = int(data.get("max_retries", 3))
    retry_interval = int(data.get("retry_interval", 3))
    update_db = str(data.get("update_db", "0")) in ("1", "true", "True")  # 默认不更新数据库

    current_app.logger.info(f"Starting job stop operation | job_name: {job_name} | savepoint: {savepoint} | max_retries: {max_retries}")

    original_url = None
    if flink_env:
        current_app.logger.info(f"Using custom Flink environment | flink_env: {flink_env}")
        original_url = get_flink_api_url()
        set_flink_api_url(flink_env)

    try:
        # Step 1: 获取所有运行中的作业 (模拟脚本 get_running_jobs)
        current_app.logger.info(f"Step 1: Getting running jobs for job_name: {job_name}")
        ok, jobs_resp = flink_request("GET", "/jobs/overview")
        if not ok:
            current_app.logger.error(f"Failed to get jobs overview | error: {jobs_resp}")
            return {"success": False, "error": f"获取作业列表失败: {jobs_resp}", "code": "FLINK_ERROR"}, 500
        
        running_jobs = jobs_resp.get("jobs", [])
        if not running_jobs:
            running_jobs = []
        
        current_app.logger.debug(f"Found {len(running_jobs)} total running jobs")
        
        # Step 2: 查找匹配的运行中作业（支持模糊匹配）
        current_app.logger.info(f"Step 2: Finding matching jobs to stop | job_name: {job_name}")
        jobs_to_stop = []
        for job in running_jobs:
            job_state = job.get('state', '')
            job_display_name = job.get('name', '')
            if job_state == 'RUNNING' and job_name in job_display_name:
                current_app.logger.debug(f"Found matching job | display_name: {job_display_name} | job_id: {job.get('jid')} | state: {job_state}")
                jobs_to_stop.append(job)
        
        if not jobs_to_stop:
            current_app.logger.info(f"No running jobs found for name: {job_name} - treating as already stopped")
            return {
                "success": True,
                "message": "作业未运行，视为已停止",
                "data": {
                    "job_name": job_name,
                    "stopped_jobs": [],
                    "noop": True
                }
            }
        
        current_app.logger.info(f"Found {len(jobs_to_stop)} jobs to stop | job_name: {job_name}")
        
        # Step 3: 停止匹配的作业 (模拟脚本 stop_job_with_retry)
        stop_results = []
        
        for job in jobs_to_stop:
            job_id = job.get('jid') or job.get('id')
            job_display_name = job.get('name', '')
            
            current_app.logger.info(f"正在停止作业: {job_display_name} ({job_id})")
            
            # 重试机制停止作业
            stop_success = False
            stop_message = ""
            
            for attempt in range(max_retries + 1):
                if attempt > 0:
                    current_app.logger.info(f"第{attempt}次重试停止作业: {job_display_name} ({job_id})")
                    time.sleep(retry_interval)
                
                # 停止作业 - 使用与脚本相同的API
                if savepoint:
                    endpoint = f"/jobs/{job_id}/yarn-cancel"  # 模拟脚本的savepoint逻辑
                else:
                    endpoint = f"/jobs/{job_id}/stop"
                
                stop_ok, stop_result = flink_request("POST", endpoint)
                
                if stop_ok:
                    stop_success = True
                    stop_message = "停止成功"
                    current_app.logger.info(f"作业停止成功: {job_display_name} ({job_id})")
                    break
                else:
                    stop_message = stop_result
                    if attempt < max_retries:
                        current_app.logger.info(f"作业停止失败: {job_display_name} ({job_id}), 错误: {stop_result}, {retry_interval}秒后重试...")
                    else:
                        current_app.logger.info(f"作业停止失败(已达最大重试次数): {job_display_name} ({job_id}), 错误: {stop_result}")
            
            stop_results.append({
                "job_id": job_id,
                "job_name": job_display_name,
                "success": stop_success,
                "message": stop_message
            })
        
        # 统计结果
        success_count = sum(1 for r in stop_results if r["success"])
        failed_count = len(stop_results) - success_count
        
        # 如果需要更新数据库，且至少有一个作业停止成功
        if update_db and success_count > 0:
            current_app.logger.info(f"正在更新数据库中作业 {job_name} 的手动停止状态...")
            try:
                conn = get_db_connection()
                try:
                    with conn.cursor() as cur:
                        update_sql = """
                            UPDATE flink_cluster_job_metadata 
                            SET manual_stopped = 1,
                                monitor_status = CASE 
                                    WHEN auto_recovery = 0 THEN 'DISABLED'
                                    ELSE 'DISMONITOR'
                                END,
                                last_update_time = CURRENT_TIMESTAMP
                            WHERE job_name = %s
                        """
                        cur.execute(update_sql, [job_name])
                        conn.commit()
                        current_app.logger.info(f"✅ 数据库状态更新成功: manual_stopped=1, monitor_status=DISABLED/DISMONITOR")
                except Exception as update_error:
                    current_app.logger.warning(f"⚠️ 数据库状态更新失败（不影响停止结果）: {update_error}")
                    try:
                        conn.rollback()
                    except:
                        pass
                finally:
                    if conn:
                        try:
                            conn.close()
                        except:
                            pass
            except Exception as conn_error:
                current_app.logger.warning(f"⚠️ 数据库连接失败: {conn_error}")
        elif not update_db:
            current_app.logger.info(f"⚠️ 跳过数据库更新（update_db=False，多环境场景）")
        
        return {
            "success": True,
            "message": f"停止完成: 成功 {success_count} 个, 失败 {failed_count} 个",
            "data": {
                "job_name": job_name,
                "stopped_jobs": stop_results,
                "success_count": success_count,
                "failed_count": failed_count
            }
        }
    except Exception as e:
        return {"success": False, "error": f"停止作业时发生异常: {str(e)}", "code": "SERVER_ERROR"}, 500
    finally:
        if original_url:
            set_flink_api_url(original_url)


@bp.get("/<string:job_name>/status")
def job_status(job_name: str):
    # 通过job_name查找对应的job_id
    job_id = find_job_id_by_name(job_name)
    if not job_id:
        return {"success": False, "error": f"Job {job_name} not found or not running", "code": "NOT_FOUND"}, 404
        
    ok, result = flink_request("GET", f"/jobs/{job_id}")
    if ok and isinstance(result, dict):
        return {"state": result.get("state", "UNKNOWN"), "raw": result}
    if ok:
        return {"raw": result}
    return {"success": False, "error": result, "code": "SERVER_ERROR"}, 500


@bp.post("/bulk-stop")
def bulk_stop():
    """批量停止作业，完全模拟 flink_restart_all_jobs.py 脚本逻辑"""
    import time
    
    data = request.get_json() or request.form
    job_names = data.get("job_names", [])
    savepoint = str(data.get("savepoint", "0")) in ("1","true","True")
    flink_env = data.get("flink_env")
    max_retries = int(data.get("max_retries", 3))
    retry_interval = int(data.get("retry_interval", 3))

    original_url = None
    if flink_env:
        original_url = get_flink_api_url()
        set_flink_api_url(flink_env)

    try:
        if not job_names:
            return {"success": False, "error": "no jobs selected", "code": "BAD_REQUEST"}, 400
        
        current_app.logger.info(f"开始批量停止 {len(job_names)} 个作业")
        
        # Step 2: 获取所有运行中的作业 (模拟脚本 get_running_jobs)
        ok, jobs_resp = flink_request("GET", "/jobs/overview")
        if not ok:
            return {"success": False, "error": f"获取作业列表失败: {jobs_resp}", "code": "FLINK_ERROR"}, 500
        
        running_jobs = jobs_resp.get("jobs", [])
        if not running_jobs:
            running_jobs = []
        
        # Step 3: 对每个作业名称停止匹配的作业 (模拟脚本 stop_job_with_retry 逻辑)
        stop_results = []
        
        for job_name in job_names:
            current_app.logger.info(f"正在处理作业: {job_name}")
            
            # 查找匹配的运行中作业（支持模糊匹配）
            jobs_to_stop = []
            for job in running_jobs:
                job_state = job.get('state', '')
                job_display_name = job.get('name', '')
                if job_state == 'RUNNING' and job_name in job_display_name:
                    jobs_to_stop.append(job)
            
            if not jobs_to_stop:
                stop_results.append({
                    "job_name": job_name,
                    "job_id": None,
                    "success": True,
                    "message": "作业未运行，视为已停止",
                    "noop": True
                })
                current_app.logger.info(f"作业 {job_name} 未运行，跳过")
                continue
            
            # 停止所有匹配的作业
            for job in jobs_to_stop:
                job_id = job.get('jid') or job.get('id')
                job_display_name = job.get('name', '')
                
                current_app.logger.info(f"正在停止作业: {job_display_name} ({job_id})")
                
                # 重试机制停止作业 (模拟脚本 stop_job_with_retry)
                stop_success = False
                stop_message = ""
                
                for attempt in range(max_retries + 1):
                    if attempt > 0:
                        current_app.logger.info(f"第{attempt}次重试停止作业: {job_display_name} ({job_id})")
                        time.sleep(retry_interval)
                    
                    # 停止作业 - 使用与脚本相同的API
                    if savepoint:
                        endpoint = f"/jobs/{job_id}/yarn-cancel"
                    else:
                        endpoint = f"/jobs/{job_id}/stop"
                    
                    stop_ok, stop_result = flink_request("POST", endpoint)
                    
                    if stop_ok:
                        stop_success = True
                        stop_message = "停止成功"
                        current_app.logger.info(f"作业停止成功: {job_display_name} ({job_id})")
                        break
                    else:
                        stop_message = stop_result
                        if attempt < max_retries:
                            current_app.logger.info(f"作业停止失败: {job_display_name} ({job_id}), 错误: {stop_result}, {retry_interval}秒后重试...")
                        else:
                            current_app.logger.info(f"作业停止失败(已达最大重试次数): {job_display_name} ({job_id}), 错误: {stop_result}")
                
                stop_results.append({
                    "job_name": job_display_name,
                    "job_id": job_id,
                    "success": stop_success,
                    "message": stop_message
                })
        
        # 统计结果
        success_count = sum(1 for r in stop_results if r["success"])
        failed_count = len(stop_results) - success_count
        
        current_app.logger.info(f"批量停止完成: 成功 {success_count} 个, 失败 {failed_count} 个")
        
        return {
            "success": True,
            "message": f"批量停止完成: 成功 {success_count} 个, 失败 {failed_count} 个",
            "data": {
                "stopped_jobs": stop_results,
                "success_count": success_count,
                "failed_count": failed_count,
                "total_count": len(stop_results)
            }
        }
    except Exception as e:
        current_app.logger.info(f"批量停止作业时发生异常: {str(e)}")
        return {"success": False, "error": str(e), "code": "SERVER_ERROR"}, 500
    finally:
        if original_url:
            set_flink_api_url(original_url)


@bp.post("/bulk-stop-monitor")
def bulk_stop_monitor():
    """批量停止监控作业"""
    data = request.get_json() or {}
    job_names = data.get("job_names", [])  # 如果不提供job_names，则更新所有作业
    
    conn = get_db_connection()
    try:
        with conn.cursor() as cur:
            # 根据是否提供job_names，构建不同的SQL
            if job_names:
                # 只更新指定的作业
                # 当停止监控（manual_stopped=1）时，自动关闭告警（is_alert_enabled=0）
                placeholders = ','.join(['%s'] * len(job_names))
                sql = f"""
                    UPDATE flink_cluster_job_metadata 
                    SET manual_stopped=1,
                        is_alert_enabled=0,
                        monitor_status = CASE 
                            WHEN auto_recovery = 0 THEN 'DISABLED'
                            ELSE 'DISMONITOR'
                        END,
                        last_check_time=%s, 
                        last_update_time=%s
                    WHERE job_name IN ({placeholders})
                """
                cur.execute(sql, (datetime.now(), datetime.now(), *job_names))
            else:
                # 更新所有作业
                # 当停止监控（manual_stopped=1）时，自动关闭告警（is_alert_enabled=0）
                cur.execute(
                    """
                    UPDATE flink_cluster_job_metadata 
                    SET manual_stopped=1,
                        is_alert_enabled=0,
                        monitor_status = CASE 
                            WHEN auto_recovery = 0 THEN 'DISABLED'
                            ELSE 'DISMONITOR'
                        END,
                        last_check_time=%s, 
                        last_update_time=%s
                    """,
                    (datetime.now(), datetime.now())
                )
            affected_rows = cur.rowcount
            conn.commit()
            
            return {
                "success": True,
                "message": f"已停止 {affected_rows} 个作业的监控",
                "data": {"affected_rows": affected_rows}
            }
    except Exception as e:
        conn.rollback()
        return {"success": False, "error": str(e), "code": "SERVER_ERROR"}, 500
    finally:
        conn.close()


@bp.post("/bulk-stop-recovery")
def bulk_stop_recovery():
    """批量停止自动恢复"""
    data = request.get_json() or {}
    job_names = data.get("job_names", [])  # 如果不提供job_names，则更新所有作业
    
    conn = get_db_connection()
    try:
        with conn.cursor() as cur:
            # 根据是否提供job_names，构建不同的SQL
            if job_names:
                # 只更新指定的作业
                placeholders = ','.join(['%s'] * len(job_names))
                sql = f"""
                    UPDATE flink_cluster_job_metadata 
                    SET auto_recovery=0,
                        monitor_status = CASE 
                            WHEN manual_stopped = 1 THEN 'DISABLED'
                            ELSE 'DISRECOVER'
                        END,
                        last_update_time=%s
                    WHERE job_name IN ({placeholders})
                """
                cur.execute(sql, (datetime.now(), *job_names))
            else:
                # 更新所有作业
                cur.execute(
                    """
                    UPDATE flink_cluster_job_metadata 
                    SET auto_recovery=0,
                        monitor_status = CASE 
                            WHEN manual_stopped = 1 THEN 'DISABLED'
                            ELSE 'DISRECOVER'
                        END,
                        last_update_time=%s
                    """,
                    (datetime.now(),)
                )
            affected_rows = cur.rowcount
            conn.commit()
            
            return {
                "success": True,
                "message": f"已停止 {affected_rows} 个作业的自动恢复",
                "data": {"affected_rows": affected_rows}
            }
    except Exception as e:
        conn.rollback()
        return {"success": False, "error": str(e), "code": "SERVER_ERROR"}, 500
    finally:
        conn.close()


@bp.post("/bulk-start-monitor")
def bulk_start_monitor():
    """批量开始监控作业"""
    data = request.get_json() or {}
    job_names = data.get("job_names", [])  # 如果不提供job_names，则更新所有作业
    
    conn = get_db_connection()
    try:
        with conn.cursor() as cur:
            # 根据是否提供job_names，构建不同的SQL
            if job_names:
                # 只更新指定的作业
                placeholders = ','.join(['%s'] * len(job_names))
                sql = f"""
                    UPDATE flink_cluster_job_metadata 
                    SET manual_stopped=0,
                        monitor_status = CASE 
                            WHEN auto_recovery = 0 THEN 'DISRECOVER'
                            ELSE 'ACTIVE'
                        END,
                        last_check_time=%s, 
                        last_update_time=%s
                    WHERE job_name IN ({placeholders})
                """
                cur.execute(sql, (datetime.now(), datetime.now(), *job_names))
            else:
                # 更新所有作业
                cur.execute(
                    """
                    UPDATE flink_cluster_job_metadata 
                    SET manual_stopped=0,
                        monitor_status = CASE 
                            WHEN auto_recovery = 0 THEN 'DISRECOVER'
                            ELSE 'ACTIVE'
                        END,
                        last_check_time=%s, 
                        last_update_time=%s
                    """,
                    (datetime.now(), datetime.now())
                )
            affected_rows = cur.rowcount
            conn.commit()
            
            return {
                "success": True,
                "message": f"已开始 {affected_rows} 个作业的监控",
                "data": {"affected_rows": affected_rows}
            }
    except Exception as e:
        conn.rollback()
        return {"success": False, "error": str(e), "code": "SERVER_ERROR"}, 500
    finally:
        conn.close()


@bp.post("/bulk-start-recovery")
def bulk_start_recovery():
    """批量开始自动恢复"""
    data = request.get_json() or {}
    job_names = data.get("job_names", [])  # 如果不提供job_names，则更新所有作业
    
    conn = get_db_connection()
    try:
        with conn.cursor() as cur:
            # 根据是否提供job_names，构建不同的SQL
            if job_names:
                # 只更新指定的作业
                placeholders = ','.join(['%s'] * len(job_names))
                sql = f"""
                    UPDATE flink_cluster_job_metadata 
                    SET auto_recovery=1,
                        monitor_status = CASE 
                            WHEN manual_stopped = 1 THEN 'DISMONITOR'
                            ELSE 'ACTIVE'
                        END,
                        last_update_time=%s
                    WHERE job_name IN ({placeholders})
                """
                cur.execute(sql, (datetime.now(), *job_names))
            else:
                # 更新所有作业
                cur.execute(
                    """
                    UPDATE flink_cluster_job_metadata 
                    SET auto_recovery=1,
                        monitor_status = CASE 
                            WHEN manual_stopped = 1 THEN 'DISMONITOR'
                            ELSE 'ACTIVE'
                        END,
                        last_update_time=%s
                    """,
                    (datetime.now(),)
                )
            affected_rows = cur.rowcount
            conn.commit()
            
            return {
                "success": True,
                "message": f"已开始 {affected_rows} 个作业的自动恢复",
                "data": {"affected_rows": affected_rows}
            }
    except Exception as e:
        conn.rollback()
        return {"success": False, "error": str(e), "code": "SERVER_ERROR"}, 500
    finally:
        conn.close()


@bp.post("/bulk-disable-alert")
def bulk_disable_alert():
    """批量关闭告警"""
    data = request.get_json() or {}
    job_names = data.get("job_names", [])  # 如果不提供job_names，则更新所有作业
    
    conn = get_db_connection()
    try:
        with conn.cursor() as cur:
            # 根据是否提供job_names，构建不同的SQL
            if job_names:
                # 只更新指定的作业
                placeholders = ','.join(['%s'] * len(job_names))
                sql = f"""
                    UPDATE flink_cluster_job_metadata 
                    SET is_alert_enabled=0,
                        last_update_time=%s
                    WHERE job_name IN ({placeholders})
                """
                cur.execute(sql, (datetime.now(), *job_names))
            else:
                # 更新所有作业
                cur.execute(
                    """
                    UPDATE flink_cluster_job_metadata 
                    SET is_alert_enabled=0,
                        last_update_time=%s
                    """,
                    (datetime.now(),)
                )
            affected_rows = cur.rowcount
            conn.commit()
            
            return {
                "success": True,
                "message": f"已关闭 {affected_rows} 个作业的告警",
                "data": {"affected_rows": affected_rows}
            }
    except Exception as e:
        conn.rollback()
        return {"success": False, "error": str(e), "code": "SERVER_ERROR"}, 500
    finally:
        conn.close()


@bp.post("/bulk-enable-alert")
def bulk_enable_alert():
    """批量开启告警（只对 manual_stopped=0 的作业生效）"""
    data = request.get_json() or {}
    job_names = data.get("job_names", [])  # 如果不提供job_names，则更新所有作业
    
    conn = get_db_connection()
    try:
        with conn.cursor() as cur:
            # 根据是否提供job_names，构建不同的SQL
            if job_names:
                # 只更新指定的作业，且只更新 manual_stopped=0 的作业
                placeholders = ','.join(['%s'] * len(job_names))
                sql = f"""
                    UPDATE flink_cluster_job_metadata 
                    SET is_alert_enabled=1,
                        last_update_time=%s
                    WHERE job_name IN ({placeholders})
                      AND manual_stopped=0
                """
                cur.execute(sql, (datetime.now(), *job_names))
                affected_rows = cur.rowcount
                
                # 查询跳过的作业数量（manual_stopped=1 的作业）
                skip_sql = f"""
                    SELECT COUNT(*) as count
                    FROM flink_cluster_job_metadata
                    WHERE job_name IN ({placeholders})
                      AND manual_stopped=1
                """
                cur.execute(skip_sql, (*job_names,))
                skip_result = cur.fetchone()
                skipped_rows = int(skip_result.get('count', 0)) if skip_result else 0
            else:
                # 更新所有 manual_stopped=0 的作业
                cur.execute(
                    """
                    UPDATE flink_cluster_job_metadata 
                    SET is_alert_enabled=1,
                        last_update_time=%s
                    WHERE manual_stopped=0
                    """,
                    (datetime.now(),)
                )
                affected_rows = cur.rowcount
                skipped_rows = 0
            
            conn.commit()
            
            message = f"已开启 {affected_rows} 个作业的告警"
            if skipped_rows > 0:
                message += f"，跳过 {skipped_rows} 个已停止监控的作业"
            
            return {
                "success": True,
                "message": message,
                "data": {
                    "affected_rows": affected_rows,
                    "skipped_rows": skipped_rows
                }
            }
    except Exception as e:
        conn.rollback()
        return {"success": False, "error": str(e), "code": "SERVER_ERROR"}, 500
    finally:
        conn.close()


@bp.post("/bulk-restart")
def bulk_restart():
    """批量重启作业，完全模拟 flink_restart_all_jobs.py 脚本逻辑"""
    import time
    from datetime import datetime
    
    data = request.get_json() or request.form
    job_names = data.get("job_names", [])
    program_args = data.get("program_args", "")
    savepoint = str(data.get("savepoint", "0")) in ("1","true","True")
    flink_env = data.get("flink_env")
    max_retries = int(data.get("max_retries", 3))
    retry_interval = int(data.get("retry_interval", 3))

    original_url = None
    if flink_env:
        original_url = get_flink_api_url()
        set_flink_api_url(flink_env)

    try:
        if not job_names:
            return {"success": False, "error": "no jobs selected", "code": "BAD_REQUEST"}, 400
        
        current_app.logger.info(f"开始批量重启 {len(job_names)} 个作业")
        
        restart_results = []
        
        # Step 1: 对每个作业名称执行重启操作（串行处理，模拟脚本逻辑）
        for i, job_name in enumerate(job_names):
            current_app.logger.info(f"重启第 {i+1}/{len(job_names)} 个作业: {job_name}")
            
            try:
                # 直接调用单个作业重启的核心逻辑，模拟 flink_restart_all_jobs.py 脚本
                restart_result = restart_job_by_name_core_logic(job_name, program_args, max_retries, retry_interval, savepoint)
                
                restart_results.append({
                    "job_name": job_name,
                    "success": restart_result.get("success", False),
                    "message": restart_result.get("message", ""),
                    "data": restart_result.get("data", {})
                })
                
                if restart_result.get("success"):
                    current_app.logger.info(f"作业 {job_name} 重启成功")
                else:
                    current_app.logger.info(f"作业 {job_name} 重启失败: {restart_result.get('message', '')}")
                    
            except Exception as e:
                current_app.logger.info(f"作业 {job_name} 重启时发生异常: {str(e)}")
                restart_results.append({
                    "job_name": job_name,
                    "success": False,
                    "message": f"重启时发生异常: {str(e)}",
                    "data": {}
                })
        
        # 统计结果
        success_count = sum(1 for r in restart_results if r["success"])
        failed_count = len(restart_results) - success_count
        
        current_app.logger.info(f"批量重启完成: 成功 {success_count} 个, 失败 {failed_count} 个")
        
        return {
            "success": True,
            "message": f"批量重启完成: 成功 {success_count} 个, 失败 {failed_count} 个",
            "data": {
                "restarted_jobs": restart_results,
                "success_count": success_count,
                "failed_count": failed_count,
                "total_count": len(restart_results)
            }
        }
    except Exception as e:
        current_app.logger.info(f"批量重启作业时发生异常: {str(e)}")
        return {"success": False, "error": str(e), "code": "SERVER_ERROR"}, 500
    finally:
        if original_url:
            set_flink_api_url(original_url)


def restart_single_job_internal(job_name: str, jar_name: str, entry_class: str, program_args: str, max_retries: int = 3, retry_interval: int = 3, savepoint: bool = False):
    """内部重启单个作业的函数，模拟脚本逻辑"""
    try:
        # Step 1: 获取所有运行中的作业 (模拟脚本 get_running_jobs)
        ok, jobs_resp = flink_request("GET", "/jobs/overview")
        if not ok:
            return {"success": False, "message": f"获取作业列表失败: {jobs_resp}"}
        
        running_jobs = jobs_resp.get("jobs", [])
        if not running_jobs:
            running_jobs = []
        
        # 查找匹配的运行中作业（支持模糊匹配）
        jobs_to_stop = []
        for job in running_jobs:
            job_state = job.get('state', '')
            job_display_name = job.get('name', '')
            if job_state == 'RUNNING' and job_name in job_display_name:
                jobs_to_stop.append(job)
        
        # Step 2: 停止匹配的作业 (模拟脚本 stop_job_with_retry)
        stop_results = []
        for job in jobs_to_stop:
            job_id = job.get('jid') or job.get('id')
            job_display_name = job.get('name', '')
            
            # 重试机制停止作业
            for attempt in range(max_retries + 1):
                if savepoint:
                    endpoint = f"/jobs/{job_id}/yarn-cancel"
                else:
                    endpoint = f"/jobs/{job_id}/stop"
                
                stop_ok, stop_result = flink_request("POST", endpoint)
                if stop_ok:
                    stop_results.append({"job_id": job_id, "job_name": job_display_name, "success": True})
                    break
                elif attempt >= max_retries:
                    stop_results.append({"job_id": job_id, "job_name": job_display_name, "success": False, "message": stop_result})
                else:
                    time.sleep(retry_interval)
        
        # Step 3: 等待作业停止
        if jobs_to_stop:
            time.sleep(5)
        
        # Step 4: 获取JAR包列表 (模拟脚本 get_jars)
        ok, jars_resp = flink_request("GET", "/jars")
        if not ok:
            return {"success": False, "message": f"获取JAR包列表失败: {jars_resp}"}
        
        jars = jars_resp.get("files", [])
        
        # Step 5: 查找匹配的JAR包 (模拟脚本 filter_latest_jars 逻辑)
        matching_jars = []
        for jar in jars:
            jar_file_name = jar.get('name', '')
            if jar_name and jar_name in jar_file_name:
                matching_jars.append(jar)
        
        if not matching_jars:
            # 尝试更宽松的匹配
            jar_base_name = jar_name.replace('-1.0-SNAPSHOT.jar', '').replace('.jar', '') if jar_name else ''
            for jar in jars:
                jar_file_name = jar.get('name', '')
                if jar_base_name and jar_base_name in jar_file_name:
                    matching_jars.append(jar)
        
        if not matching_jars:
            return {"success": False, "message": f"未找到匹配的JAR包: {jar_name}"}
        
        # 选择最新的JAR包
        latest_jar = max(matching_jars, key=lambda x: x.get('uploaded', 0))
        jar_id = latest_jar.get('id')
        actual_jar_name = latest_jar.get('name')
        
        # Step 6: 提交新作业 (模拟脚本 submit_jar)
        # 处理程序参数：只有在参数明确且有效时才传递
        processed_program_args = ""
        if program_args and program_args.strip() and program_args not in ["-", "null", "NULL", "none", "None"]:
            try:
                # 使用原始参数字符串，Flink API 期望字符串格式
                processed_program_args = program_args.strip()
            except Exception:
                # 如果解析失败，使用空字符串（不传参数）
                processed_program_args = ""
        
        submit_data = {
            'entryClass': entry_class,
            'programArgs': processed_program_args
        }
        
        submit_ok, submit_result = flink_request("POST", f"/jars/{jar_id}/run", json=submit_data)
        
        if not submit_ok:
            return {"success": False, "message": f"提交作业失败: {submit_result}"}
        
        new_job_id = submit_result.get('jobid') or submit_result.get('jobId') or str(submit_result)
        
        # 跳过数据库更新（不同环境，不需要更新job_id和jar_id）
        
        return {
            "success": True,
            "message": "作业重启成功",
            "data": {
                "job_name": job_name,
                "new_job_id": new_job_id,
                "stopped_jobs": stop_results,
                "jar_used": {"id": jar_id, "name": actual_jar_name}
            }
        }
        
    except Exception as e:
        return {"success": False, "message": f"重启作业时发生异常: {str(e)}"}


@bp.post("/<string:job_name>/restart")
def restart_job_by_name(job_name: str):
    """通过作业名称重启作业，完全模拟 flink_restart_all_jobs.py 脚本逻辑"""
    import time
    from datetime import datetime
    
    data = request.get_json() or request.form
    program_args = data.get("program_args") or ""
    flink_env = data.get("flink_env") or request.args.get("flink_env")
    update_db = str(data.get("update_db", "0")) in ("1", "true", "True")  # 默认不更新数据库
    
    original_url = None
    if flink_env:
        original_url = get_flink_api_url()
        set_flink_api_url(flink_env)
    
    conn = None
    try:
        # Step 1: 获取所有运行中的作业 (模拟脚本 get_running_jobs)
        current_app.logger.info(f"开始重启作业: {job_name}")
        ok, jobs_resp = flink_request("GET", "/jobs/overview")
        if not ok:
            return {"success": False, "error": f"获取作业列表失败: {jobs_resp}", "code": "FLINK_ERROR"}, 500
        
        running_jobs = jobs_resp.get("jobs", [])
        if not running_jobs:
            running_jobs = []
        
        # 查找匹配的运行中作业（支持模糊匹配）
        jobs_to_stop = []
        for job in running_jobs:
            job_state = job.get('state', '')
            job_display_name = job.get('name', '')
            if job_state == 'RUNNING' and job_name in job_display_name:
                jobs_to_stop.append(job)
        
        current_app.logger.info(f"找到 {len(jobs_to_stop)} 个需要停止的运行中作业")
        
        stop_results = []
        
        # Step 2: 停止匹配的作业 (模拟脚本 stop_job)
        for job in jobs_to_stop:
            job_id = job.get('jid') or job.get('id')
            job_display_name = job.get('name', '')
            
            current_app.logger.info(f"正在停止作业: {job_display_name} ({job_id})")
            
            # 停止作业 - 使用与脚本相同的API
            stop_ok, stop_result = flink_request("POST", f"/jobs/{job_id}/stop")
            stop_results.append({
                "job_id": job_id,
                "job_name": job_display_name,
                "success": stop_ok,
                "message": stop_result if not stop_ok else "停止成功"
            })
            
            if stop_ok:
                current_app.logger.info(f"作业停止成功: {job_display_name} ({job_id})")
            else:
                current_app.logger.info(f"作业停止失败: {job_display_name} ({job_id}), 错误: {stop_result}")
        
        # Step 3: 等待作业停止 (模拟脚本等待逻辑)
        if jobs_to_stop:
            current_app.logger.info("等待5秒确保作业完全停止...")
            time.sleep(5)
        
        # Step 4: 获取数据库中的作业配置信息
        conn = get_db_connection()
        try:
            with conn.cursor() as cur:
                cur.execute(
                    "SELECT jar_name, entry_class, jar_id, program_args FROM flink_cluster_job_metadata WHERE job_name=%s",
                    (job_name,)
                )
                job_info = cur.fetchone()
                if not job_info:
                    return {"success": False, "error": "数据库中未找到作业配置", "code": "NOT_FOUND"}, 404
            
            jar_name = job_info.get("jar_name")
            entry_class = job_info.get("entry_class")
            db_jar_id = job_info.get("jar_id")  # 数据库中保存的jar_id，可能过时
            db_program_args = job_info.get("program_args", "")
            
            # 如果函数参数没有提供 program_args 或为空，使用数据库中的参数
            if not program_args or program_args.strip() == "":
                program_args = db_program_args or ""
                current_app.logger.info(f"使用数据库中的程序参数: {program_args}")
            
            current_app.logger.info(f"从数据库获取作业配置: jar_name={jar_name}, entry_class={entry_class}, program_args={program_args}")
        finally:
            conn.close()
            conn = None  # 标记连接已关闭
        
        # Step 5: 获取JAR包列表 (模拟脚本 get_jars)
        current_app.logger.info("获取Flink集群中的JAR包列表...")
        ok, jars_resp = flink_request("GET", "/jars")
        if not ok:
            return {"success": False, "error": f"获取JAR包列表失败: {jars_resp}", "code": "FLINK_ERROR"}, 500
        
        jars = jars_resp.get("files", [])
        if not jars:
            jars = []
        
        current_app.logger.info(f"集群中共有 {len(jars)} 个JAR包")
        
        # Step 6: 查找匹配的JAR包 (模拟脚本 filter_latest_jars 逻辑)
        matching_jars = []
        for jar in jars:
            jar_file_name = jar.get('name', '')
            if jar_name and jar_name in jar_file_name:
                matching_jars.append(jar)
        
        if not matching_jars:
            # 如果按exact name没找到，尝试更宽松的匹配
            jar_base_name = jar_name.replace('-1.0-SNAPSHOT.jar', '').replace('.jar', '') if jar_name else ''
            for jar in jars:
                jar_file_name = jar.get('name', '')
                if jar_base_name and jar_base_name in jar_file_name:
                    matching_jars.append(jar)
        
        if not matching_jars:
            available_jars = [jar.get('name', 'unknown') for jar in jars]
            return {
                "success": False, 
                "error": f"未找到匹配的JAR包: {jar_name}", 
                "code": "JAR_NOT_FOUND",
                "data": {"available_jars": available_jars}
            }, 404
        
        # 选择最新的JAR包 (按上传时间排序)
        latest_jar = max(matching_jars, key=lambda x: x.get('uploaded', 0))
        jar_id = latest_jar.get('id')
        actual_jar_name = latest_jar.get('name')
        
        current_app.logger.info(f"选择最新的JAR包: {actual_jar_name} (ID: {jar_id})")
        
        # Step 7: 提交新作业 (模拟脚本 submit_jar)
        current_app.logger.info(f"提交新作业，入口类: {entry_class}, 参数: {program_args}")
        
        # 处理程序参数：只有在参数明确且有效时才传递
        processed_program_args = ""
        if program_args and program_args.strip() and program_args not in ["-", "null", "NULL", "none", "None"]:
            try:
                # 使用原始参数字符串，Flink API 期望字符串格式
                processed_program_args = program_args.strip()
            except Exception:
                # 如果解析失败，使用空字符串（不传参数）
                processed_program_args = ""
        
        submit_data = {
            'entryClass': entry_class,
            'programArgs': processed_program_args
        }
        
        submit_ok, submit_result = flink_request("POST", f"/jars/{jar_id}/run", json=submit_data)
        
        if not submit_ok:
            return {"success": False, "error": f"提交作业失败: {submit_result}", "code": "SUBMIT_FAILED"}, 500
        
        # 获取新作业ID
        new_job_id = submit_result.get('jobid') or submit_result.get('jobId') or str(submit_result)
        current_app.logger.info(f"作业提交成功，新作业ID: {new_job_id}")
        
        # 更新数据库状态（仅在 update_db=True 且非多环境场景时）
        if update_db:
            current_app.logger.info(f"正在更新数据库中作业 {job_name} 的状态...")
            update_conn = None
            try:
                update_conn = get_db_connection()
                with update_conn.cursor() as cur:
                    update_sql = """
                        UPDATE flink_cluster_job_metadata 
                        SET job_id = %s,
                            last_status = 'RUNNING',
                            recovery_attempts = 0,
                            consecutive_failures = 0,
                            last_running_time = CURRENT_TIMESTAMP,
                            last_update_time = CURRENT_TIMESTAMP,
                            error_message = NULL,
                            last_error_type = NULL,
                            last_error_time = NULL,
                            health_status = 'HEALTHY',
                            last_health_check = CURRENT_TIMESTAMP,
                            program_args = %s,
                            jar_id = %s,
                            jar_name = %s,
                            last_jar_path = %s
                        WHERE job_name = %s
                    """
                    cur.execute(update_sql, [
                        new_job_id, 
                        processed_program_args,  # 使用实际提交的参数
                        jar_id, 
                        actual_jar_name,  # 更新为实际使用的jar名称
                        f"/opt/flink/jars/{actual_jar_name}",
                        job_name
                    ])
                    update_conn.commit()
                    current_app.logger.info(f"✅ 数据库状态更新成功: job_id={new_job_id}, status=RUNNING, program_args={processed_program_args}")
            except Exception as update_error:
                # 数据库更新失败不影响重启结果，只记录日志
                current_app.logger.warning(f"⚠️ 数据库状态更新失败（不影响重启结果）: {update_error}")
                # 尝试回滚
                if update_conn:
                    try:
                        update_conn.rollback()
                    except:
                        pass
            finally:
                if update_conn:
                    try:
                        update_conn.close()
                    except:
                        pass
        else:
            current_app.logger.info(f"⚠️ 跳过数据库更新（update_db=False，多环境场景）")
        
        current_app.logger.info(f"作业重启完成")
        
        return {
            "success": True,
            "message": f"作业重启成功",
            "data": {
                "job_name": job_name,
                "new_job_id": new_job_id,
                "stopped_jobs": stop_results,
                "jar_used": {
                    "id": jar_id,
                    "name": actual_jar_name
                },
                "entry_class": entry_class,
                "program_args": program_args
            }
        }
        
    except Exception as e:
        current_app.logger.info(f"重启作业时发生异常: {str(e)}")
        return {"success": False, "error": str(e), "code": "SERVER_ERROR"}, 500
    finally:
        if original_url:
            set_flink_api_url(original_url)
        # conn 已在查询配置后关闭，无需再次关闭


def restart_job_by_name_core_logic(job_name: str, program_args: str, max_retries: int = 3, retry_interval: int = 3, savepoint: bool = False, update_db: bool = False):
    """
    重启作业的核心逻辑，模拟 flink_restart_all_jobs.py 脚本
    不依赖数据库，直接通过作业名称操作
    
    Args:
        job_name: 作业名称
        program_args: 程序参数
        max_retries: 最大重试次数
        retry_interval: 重试间隔
        savepoint: 是否使用 savepoint
        update_db: 是否更新数据库（多环境场景下应该设为 False）
    """
    import time
    from datetime import datetime
    
    conn = None
    try:
        # Step 1: 获取所有运行中的作业 (模拟脚本 get_running_jobs)
        current_app.logger.info(f"开始重启作业: {job_name}")
        ok, jobs_resp = flink_request("GET", "/jobs/overview")
        if not ok:
            return {"success": False, "message": f"获取作业列表失败: {jobs_resp}"}
        
        running_jobs = jobs_resp.get("jobs", [])
        if not running_jobs:
            running_jobs = []
        
        # 查找匹配的运行中作业（支持模糊匹配）
        jobs_to_stop = []
        for job in running_jobs:
            job_state = job.get('state', '')
            job_display_name = job.get('name', '')
            if job_state == 'RUNNING' and job_name in job_display_name:
                jobs_to_stop.append(job)
        
        current_app.logger.info(f"找到 {len(jobs_to_stop)} 个需要停止的运行中作业")
        
        stop_results = []
        
        # Step 2: 停止匹配的作业 (模拟脚本 stop_job)
        for job in jobs_to_stop:
            job_id = job.get('jid') or job.get('id')
            job_display_name = job.get('name', '')
            
            current_app.logger.info(f"正在停止作业: {job_display_name} ({job_id})")
            
            # 停止作业 - 使用与脚本相同的API
            stop_ok, stop_result = flink_request("POST", f"/jobs/{job_id}/stop")
            stop_results.append({
                "job_id": job_id,
                "job_name": job_display_name,
                "success": stop_ok,
                "message": stop_result if not stop_ok else "停止成功"
            })
            
            if stop_ok:
                current_app.logger.info(f"作业停止成功: {job_display_name} ({job_id})")
            else:
                current_app.logger.info(f"作业停止失败: {job_display_name} ({job_id}), 错误: {stop_result}")
        
        # Step 3: 等待作业停止 (模拟脚本等待逻辑)
        if jobs_to_stop:
            current_app.logger.info("等待5秒确保作业完全停止...")
            time.sleep(5)
        
        # Step 4: 获取数据库中的作业配置信息
        conn = get_db_connection()
        try:
            with conn.cursor() as cur:
                cur.execute(
                    "SELECT jar_name, entry_class, jar_id, program_args FROM flink_cluster_job_metadata WHERE job_name=%s",
                    (job_name,)
                )
                job_info = cur.fetchone()
                if not job_info:
                    return {"success": False, "message": "数据库中未找到作业配置"}
            
            jar_name = job_info.get("jar_name")
            entry_class = job_info.get("entry_class")
            db_program_args = job_info.get("program_args", "")
            
            # 如果函数参数没有提供 program_args 或为空，使用数据库中的参数
            if not program_args or program_args.strip() == "":
                program_args = db_program_args or ""
                current_app.logger.info(f"使用数据库中的程序参数: {program_args}")
        finally:
            conn.close()
        
        current_app.logger.info(f"从数据库获取作业配置: jar_name={jar_name}, entry_class={entry_class}, program_args={program_args}")
        
        # Step 5: 获取JAR包列表 (模拟脚本 get_jars)
        current_app.logger.info("获取Flink集群中的JAR包列表...")
        ok, jars_resp = flink_request("GET", "/jars")
        if not ok:
            return {"success": False, "message": f"获取JAR包列表失败: {jars_resp}"}
        
        jars = jars_resp.get("files", [])
        if not jars:
            jars = []
        
        current_app.logger.info(f"集群中共有 {len(jars)} 个JAR包")
        
        # Step 6: 查找匹配的JAR包 (模拟脚本 filter_latest_jars 逻辑)
        matching_jars = []
        for jar in jars:
            jar_file_name = jar.get('name', '')
            if jar_name and jar_name in jar_file_name:
                matching_jars.append(jar)
        
        if not matching_jars:
            # 如果按exact name没找到，尝试更宽松的匹配
            jar_base_name = jar_name.replace('-1.0-SNAPSHOT.jar', '').replace('.jar', '') if jar_name else ''
            for jar in jars:
                jar_file_name = jar.get('name', '')
                if jar_base_name and jar_base_name in jar_file_name:
                    matching_jars.append(jar)
        
        if not matching_jars:
            available_jars = [jar.get('name', 'unknown') for jar in jars]
            return {
                "success": False, 
                "message": f"未找到匹配的JAR包: {jar_name}",
                "data": {"available_jars": available_jars}
            }
        
        # 选择最新的JAR包 (按上传时间排序)
        latest_jar = max(matching_jars, key=lambda x: x.get('uploaded', 0))
        jar_id = latest_jar.get('id')
        actual_jar_name = latest_jar.get('name')
        
        current_app.logger.info(f"选择最新的JAR包: {actual_jar_name} (ID: {jar_id})")
        
        # Step 7: 提交新作业 (模拟脚本 submit_jar)
        current_app.logger.info(f"提交新作业，入口类: {entry_class}, 参数: {program_args}")
        
        # 处理程序参数：只有在参数明确且有效时才传递
        processed_program_args = ""
        if program_args and program_args.strip() and program_args not in ["-", "null", "NULL", "none", "None"]:
            try:
                # 使用原始参数字符串，Flink API 期望字符串格式
                processed_program_args = program_args.strip()
            except Exception:
                # 如果解析失败，使用空字符串（不传参数）
                processed_program_args = ""
        
        submit_data = {
            'entryClass': entry_class,
            'programArgs': processed_program_args
        }
        
        submit_ok, submit_result = flink_request("POST", f"/jars/{jar_id}/run", json=submit_data)
        
        if not submit_ok:
            return {"success": False, "message": f"提交作业失败: {submit_result}"}
        
        # 获取新作业ID
        new_job_id = submit_result.get('jobid') or submit_result.get('jobId') or str(submit_result)
        current_app.logger.info(f"作业提交成功，新作业ID: {new_job_id}")
        
        # 更新数据库状态（仅在 update_db=True 且非多环境场景时）
        if update_db:
            current_app.logger.info(f"正在更新数据库中作业 {job_name} 的状态...")
            # 重新创建数据库连接（之前的连接可能已关闭）
            update_conn = get_db_connection()
            try:
                with update_conn.cursor() as cur:
                    update_sql = """
                        UPDATE flink_cluster_job_metadata 
                        SET job_id = %s,
                            last_status = 'RUNNING',
                            recovery_attempts = 0,
                            consecutive_failures = 0,
                            last_running_time = CURRENT_TIMESTAMP,
                            last_update_time = CURRENT_TIMESTAMP,
                            error_message = NULL,
                            last_error_type = NULL,
                            last_error_time = NULL,
                            health_status = 'HEALTHY',
                            last_health_check = CURRENT_TIMESTAMP,
                            program_args = %s,
                            jar_id = %s,
                            jar_name = %s,
                            last_jar_path = %s
                        WHERE job_name = %s
                    """
                    cur.execute(update_sql, [
                        new_job_id, 
                        processed_program_args,  # 使用实际提交的参数
                        jar_id, 
                        actual_jar_name,  # 更新为实际使用的jar名称
                        f"/opt/flink/jars/{actual_jar_name}",
                        job_name
                    ])
                    update_conn.commit()
                    current_app.logger.info(f"✅ 数据库状态更新成功: job_id={new_job_id}, status=RUNNING, program_args={processed_program_args}")
            except Exception as update_error:
                # 数据库更新失败不影响重启结果，只记录日志
                current_app.logger.warning(f"⚠️ 数据库状态更新失败（不影响重启结果）: {update_error}")
                # 尝试回滚
                try:
                    update_conn.rollback()
                except:
                    pass
            finally:
                # 关闭更新用的数据库连接
                if update_conn:
                    update_conn.close()
        else:
            current_app.logger.info(f"⚠️ 跳过数据库更新（update_db=False，多环境场景）")
        
        current_app.logger.info(f"作业重启完成")
        
        return {
            "success": True,
            "message": f"作业重启成功",
            "data": {
                "job_name": job_name,
                "new_job_id": new_job_id,
                "stopped_jobs": stop_results,
                "jar_used": {
                    "id": jar_id,
                    "name": actual_jar_name
                },
                "entry_class": entry_class,
                "program_args": program_args
            }
        }
        
    except Exception as e:
        current_app.logger.info(f"重启作业时发生异常: {str(e)}")
        return {"success": False, "message": f"重启作业时发生异常: {str(e)}"}
    finally:
        if conn:
            try:
                conn.close()
            except:
                pass  # 连接可能已经关闭，忽略错误


@bp.get("/<int:job_pk>/restart-form")
def restart_form(job_pk: int):
    return {"success": True, "message": "Restart form endpoint"}


def _match_jar_for_job(job_name, available_jars):
    """根据作业名称智能匹配最合适的 JAR 包和入口类
    
    Args:
        job_name: 作业名称，例如 "Talent_Job_Kafka2SR_TDS"
        available_jars: 可用的 JAR 包列表
    
    Returns:
        dict: {'jar_name': str, 'entry_class': str} 或 {'jar_name': None, 'entry_class': None}
    """
    if not job_name or not available_jars:
        return {'jar_name': None, 'entry_class': None}
    
    # 特殊映射表：处理无法通过规则自动匹配的作业
    SPECIAL_MAPPINGS = {
        # 作业名前缀 -> (JAR包关键词, 入口类后缀)
        'TrackTalentNote': ('xhs-analysis', 'XhsAnalysisJob'),
        'TrackTalentVideo': ('talent-analysis', 'TalentAnalysisJob'),
        'DyCommentFilter': ('dy-comment', 'DyCommentJob'),
        'XhsCommentFilter': ('xhs-comment', 'XhsCommentJob'),
        'CollectLogs': ('spring-log', 'CollectLogsJob'),
        # 可以继续添加更多特殊映射
    }
    
    # 提取作业名称的前缀（例如：Talent_Job_Kafka2SR_TDS -> Talent）
    name_parts = job_name.split('_')
    job_prefix = name_parts[0] if name_parts else ''
    
    current_app.logger.info(f"🔍 匹配作业: {job_name}, 提取前缀: {job_prefix}, 可用JAR数: {len(available_jars)}")
    
    # 优先检查特殊映射表
    if job_prefix in SPECIAL_MAPPINGS:
        current_app.logger.info(f"找到特殊映射: {job_prefix} -> {SPECIAL_MAPPINGS[job_prefix]}")
        jar_keyword, entry_suffix = SPECIAL_MAPPINGS[job_prefix]
        
        # 在可用的 JAR 包中查找匹配的
        for jar in available_jars:
            jar_name = jar.get('name', '')
            entry_classes = jar.get('entry', [])
            
            if not entry_classes:
                continue
            
            entry_class = entry_classes[0].get('name', '')
            
            # 检查 JAR 包名是否包含关键词，入口类是否匹配
            if jar_keyword in jar_name.lower() and entry_class.endswith(entry_suffix):
                current_app.logger.info(
                    f"特殊映射匹配成功: {job_prefix} -> JAR: {jar_name}, 入口类: {entry_class}"
                )
                return {
                    'jar_name': jar_name,
                    'entry_class': entry_class
                }
        
        # 如果特殊映射没找到，记录日志继续使用通用规则
        current_app.logger.warning(
            f"特殊映射未找到匹配的 JAR: {job_prefix} -> 关键词: {jar_keyword}"
        )
    
    # 尝试匹配：通过作业前缀匹配 JAR 包名或入口类
    best_match = None
    best_score = 0
    
    for jar in available_jars:
        jar_name = jar.get('name', '')
        entry_classes = jar.get('entry', [])
        
        if not entry_classes:
            continue
        
        entry_class = entry_classes[0].get('name', '')
        
        # 计算匹配分数
        score = 0
        
        # 规则1：JAR 包名包含作业前缀（不区分大小写）
        if job_prefix.lower() in jar_name.lower():
            score += 10
        
        # 规则2：入口类名包含作业前缀
        if job_prefix in entry_class:
            score += 15
        
        # 规则3：精确匹配（例如：TalentJob 匹配 com.jcm.TalentJob）
        if entry_class.endswith(f'.{job_prefix}Job'):
            score += 20
        
        # 规则4：通用 job JAR（例如 yc-kafka2sr-job）
        if '_Job_Kafka2SR_' in job_name and 'kafka2sr' in jar_name.lower():
            score += 8
        
        if score > best_score:
            best_score = score
            best_match = {
                'jar_name': jar_name,
                'entry_class': entry_class
            }
    
    return best_match or {'jar_name': None, 'entry_class': None}


@bp.get("/discover-unregistered")
@log_api_call("jobs.discover_unregistered")
def discover_unregistered_jobs():
    """发现未注册的 Flink 作业
    
    从 Flink API 获取所有运行中和最近完成的作业，
    与数据库中已注册的作业对比，返回未注册的作业列表。
    同时智能匹配 JAR 包、入口类和程序参数。
    """
    current_app.logger.info("开始发现未注册的作业")
    
    # 获取 flink_env 参数
    flink_env = request.args.get("flink_env")
    
    try:
        if flink_env:
            current_app.logger.info(f"使用指定的 Flink 环境: {flink_env}")
            set_flink_api_url(flink_env)
        else:
            current_app.logger.info(f"使用默认 Flink 环境: {get_flink_api_url()}")
        
        # 1. 从 Flink API 获取所有作业
        current_app.logger.debug("正在从 Flink API 获取作业列表")
        ok, jobs_resp = flink_request("GET", "/jobs/overview")
        
        if not ok:
            current_app.logger.error(f"从 Flink 获取作业列表失败: {jobs_resp}")
            return {"success": False, "error": f"无法获取 Flink 作业列表: {jobs_resp}"}, 500
        
        flink_jobs = jobs_resp.get("jobs", [])
        current_app.logger.info(f"从 Flink 获取到 {len(flink_jobs)} 个作业")
        
        # 2. 从 Flink 获取所有 JAR 包信息
        current_app.logger.debug("正在从 Flink API 获取 JAR 包列表")
        jars_ok, jars_resp = flink_request("GET", "/jars")
        available_jars = []
        if jars_ok and isinstance(jars_resp, dict):
            available_jars = jars_resp.get("files", [])
            current_app.logger.info(f"从 Flink 获取到 {len(available_jars)} 个 JAR 包")
        else:
            current_app.logger.warning(f"无法获取 JAR 包列表: {jars_resp}")
        
        # 3. 从数据库获取已注册的作业名称
        conn = get_db_connection()
        try:
            with conn.cursor() as cur:
                cur.execute("SELECT job_name, job_id FROM flink_cluster_job_metadata")
                registered_jobs = cur.fetchall()
                registered_job_names = {job['job_name'] for job in registered_jobs}
                registered_job_ids = {job['job_id'] for job in registered_jobs if job['job_id']}
                
                current_app.logger.info(f"数据库中已注册 {len(registered_job_names)} 个作业")
        finally:
            conn.close()
        
        # 4. 对比找出未注册的作业
        unregistered_jobs = []
        
        for job in flink_jobs:
            job_id = job.get('jid')
            job_name = job.get('name', '')
            job_state = job.get('state', 'UNKNOWN')
            start_time = job.get('start-time', 0)
            duration = job.get('duration', 0)
            end_time = job.get('end-time', 0)
            
            # 跳过已注册的作业（通过 job_id 或 job_name 匹配）
            if job_id in registered_job_ids:
                current_app.logger.debug(f"作业 {job_name} (ID: {job_id}) 已通过 job_id 注册，跳过")
                continue
            
            if job_name in registered_job_names:
                current_app.logger.debug(f"作业 {job_name} (ID: {job_id}) 已通过 job_name 注册，跳过")
                continue
            
            # 获取作业详细信息
            current_app.logger.debug(f"获取作业 {job_id} 的详细信息")
            detail_ok, detail_resp = flink_request("GET", f"/jobs/{job_id}")
            
            # 获取作业配置（包含 user-config 程序参数）
            config_ok, config_resp = flink_request("GET", f"/jobs/{job_id}/config")
            user_config = {}
            program_args = ""
            if config_ok and isinstance(config_resp, dict):
                exec_config = config_resp.get('execution-config', {})
                user_config = exec_config.get('user-config', {})
                # 将 user-config 转换为程序参数格式
                if user_config:
                    args_parts = []
                    for key, value in user_config.items():
                        if key not in ['parallelism']:  # 跳过 parallelism，它不是程序参数
                            args_parts.append(f"--{key} {value}")
                    program_args = " ".join(args_parts)
            
            # 如果程序参数为空，使用默认值
            if not program_args:
                program_args = "--env dev --parallelism 1"
            
            # 智能匹配 JAR 包和入口类
            jar_match = _match_jar_for_job(job_name, available_jars)
            
            if detail_ok:
                vertices = detail_resp.get('vertices', [])
                plan = detail_resp.get('plan', {})
                job_vertex = detail_resp.get('job-vertex', {})
                
                # 尝试从 plan 中获取更多信息
                nodes = plan.get('nodes', []) if plan else []
                
                unregistered_job = {
                    'job_id': job_id,
                    'job_name': job_name or f"未命名作业-{job_id[:8]}",
                    'state': job_state,
                    'start_time': datetime.fromtimestamp(start_time / 1000).strftime('%Y-%m-%d %H:%M:%S') if start_time > 0 else None,
                    'end_time': datetime.fromtimestamp(end_time / 1000).strftime('%Y-%m-%d %H:%M:%S') if end_time > 0 else None,
                    'duration': duration,
                    'tasks_total': detail_resp.get('tasks', {}).get('total', 0),
                    'vertices_count': len(vertices),
                    'parallelism': detail_resp.get('parallelism', 1),
                    # 智能匹配的 JAR 包和入口类
                    'suggested_jar_name': jar_match.get('jar_name'),
                    'suggested_entry_class': jar_match.get('entry_class'),
                    # 从 config API 获取的程序参数
                    'suggested_program_args': program_args,
                    'user_config': user_config,
                    # 提供建议的作业描述
                    'suggested_description': f"从 Flink 自动发现的作业 ({job_state})",
                }
                
                current_app.logger.info(
                    f"发现未注册作业: {unregistered_job['job_name']} (ID: {job_id}, "
                    f"状态: {job_state}, JAR: {jar_match.get('jar_name')}, "
                    f"入口类: {jar_match.get('entry_class')})"
                )
                unregistered_jobs.append(unregistered_job)
            else:
                current_app.logger.warning(f"无法获取作业 {job_id} 的详细信息: {detail_resp}")
                # 即使获取详情失败，也添加基本信息（包含智能匹配）
                unregistered_jobs.append({
                    'job_id': job_id,
                    'job_name': job_name or f"未命名作业-{job_id[:8]}",
                    'state': job_state,
                    'start_time': datetime.fromtimestamp(start_time / 1000).strftime('%Y-%m-%d %H:%M:%S') if start_time > 0 else None,
                    'end_time': datetime.fromtimestamp(end_time / 1000).strftime('%Y-%m-%d %H:%M:%S') if end_time > 0 else None,
                    'duration': duration,
                    'suggested_jar_name': jar_match.get('jar_name'),
                    'suggested_entry_class': jar_match.get('entry_class'),
                    'suggested_program_args': program_args,
                    'user_config': user_config,
                    'suggested_description': f"从 Flink 自动发现的作业 ({job_state})",
                })
        
        current_app.logger.info(f"共发现 {len(unregistered_jobs)} 个未注册作业")
        
        return {
            "success": True,
            "data": {
                "unregistered_jobs": unregistered_jobs,
                "total_flink_jobs": len(flink_jobs),
                "registered_jobs_count": len(registered_job_names),
                "unregistered_count": len(unregistered_jobs)
            }
        }
        
    except Exception as e:
        current_app.logger.error(f"发现未注册作业时发生异常: {str(e)}", exc_info=True)
        return {"success": False, "error": f"发现未注册作业时发生异常: {str(e)}"}, 500
    finally:
        # 重置 Flink API URL，避免影响其他请求
        if flink_env:
            set_flink_api_url(None)


# ========== 异步任务接口 ==========

@bp.post("/bulk-stop-async")
@log_api_call
def bulk_stop_async():
    """批量停止作业（异步）- 立即返回任务ID"""
    data = request.get_json() or request.form
    job_names = data.get("job_names", [])
    savepoint = str(data.get("savepoint", "0")) in ("1", "true", "True")
    flink_env = data.get("flink_env")
    max_retries = int(data.get("max_retries", 3))
    retry_interval = int(data.get("retry_interval", 3))
    update_db = str(data.get("update_db", "0")) in ("1", "true", "True")  # 默认不更新数据库
    
    if not job_names:
        return {"success": False, "error": "没有选择作业", "code": "BAD_REQUEST"}, 400
    
    # 创建任务
    task_id = task_manager.create_task(
        task_type='bulk_stop',
        total_items=len(job_names),
        metadata={
            'job_names': job_names,
            'savepoint': savepoint,
            'flink_env': flink_env
        }
    )
    
    # 保存 Flask 应用实例，用于后台线程中的应用上下文
    app = current_app._get_current_object()
    
    # 启动后台线程执行批量停止
    def worker():
        with app.app_context():
            try:
                original_url = None
                if flink_env:
                    original_url = get_flink_api_url()
                    set_flink_api_url(flink_env)
                
                # 获取运行中的作业
                ok, jobs_resp = flink_request("GET", "/jobs/overview")
                if not ok:
                    task_manager.complete_task(task_id, 'failed', f"获取作业列表失败: {jobs_resp}")
                    return
                
                running_jobs = jobs_resp.get("jobs", []) or []
                
                # 处理每个作业
                for i, job_name in enumerate(job_names):
                    current_app.logger.info(f"[任务{task_id}] 正在处理作业 {i+1}/{len(job_names)}: {job_name}")
                    
                    # 查找匹配的作业
                    jobs_to_stop = []
                    for job in running_jobs:
                        if job.get('state') == 'RUNNING' and job_name in job.get('name', ''):
                            jobs_to_stop.append(job)
                    
                    if not jobs_to_stop:
                        task_manager.update_progress(task_id, i + 1, {
                            'job_name': job_name,
                            'success': True,
                            'message': '作业未运行，视为已停止',
                            'noop': True
                        })
                        continue
                    
                    # 停止作业
                    stop_success_count = 0
                    for job in jobs_to_stop:
                        job_id = job.get('jid') or job.get('id')
                        job_display_name = job.get('name', '')
                        
                        stop_success = False
                        stop_message = ""
                        
                        for attempt in range(max_retries + 1):
                            if attempt > 0:
                                time.sleep(retry_interval)
                            
                            endpoint = f"/jobs/{job_id}/yarn-cancel" if savepoint else f"/jobs/{job_id}/stop"
                            stop_ok, stop_result = flink_request("POST", endpoint)
                            
                            if stop_ok:
                                stop_success = True
                                stop_message = "停止成功"
                                stop_success_count += 1
                                break
                            else:
                                stop_message = stop_result
                        
                        task_manager.update_progress(task_id, i + 1, {
                            'job_name': job_display_name,
                            'job_id': job_id,
                            'success': stop_success,
                            'message': stop_message
                        })
                    
                    # 如果需要更新数据库，且至少有一个作业停止成功
                    if update_db and stop_success_count > 0:
                        current_app.logger.info(f"[任务{task_id}] 正在更新数据库中作业 {job_name} 的手动停止状态...")
                        try:
                            update_conn = get_db_connection()
                            try:
                                with update_conn.cursor() as cur:
                                    update_sql = """
                                        UPDATE flink_cluster_job_metadata 
                                        SET manual_stopped = 1,
                                            monitor_status = CASE 
                                                WHEN auto_recovery = 0 THEN 'DISABLED'
                                                ELSE 'DISMONITOR'
                                            END,
                                            last_update_time = CURRENT_TIMESTAMP
                                        WHERE job_name = %s
                                    """
                                    cur.execute(update_sql, [job_name])
                                    update_conn.commit()
                                    current_app.logger.info(f"[任务{task_id}] ✅ 数据库状态更新成功: manual_stopped=1, monitor_status=DISABLED/DISMONITOR")
                            except Exception as update_error:
                                current_app.logger.warning(f"[任务{task_id}] ⚠️ 数据库状态更新失败（不影响停止结果）: {update_error}")
                                try:
                                    update_conn.rollback()
                                except:
                                    pass
                            finally:
                                if update_conn:
                                    try:
                                        update_conn.close()
                                    except:
                                        pass
                        except Exception as conn_error:
                            current_app.logger.warning(f"[任务{task_id}] ⚠️ 数据库连接失败: {conn_error}")
            
                # 任务完成
                task_manager.complete_task(task_id, 'completed')
                
                if original_url:
                    set_flink_api_url(original_url)
                    
            except Exception as e:
                current_app.logger.error(f"[任务{task_id}] 批量停止异常: {str(e)}", exc_info=True)
                task_manager.complete_task(task_id, 'failed', str(e))
    
    # 启动后台线程
    thread = threading.Thread(target=worker, daemon=True)
    thread.start()
    
    # 立即返回任务ID
    return {
        "success": True,
        "task_id": task_id,
        "message": f"批量停止任务已启动，共{len(job_names)}个作业"
    }


@bp.post("/bulk-restart-async")
@log_api_call
def bulk_restart_async():
    """批量重启作业（异步）- 立即返回任务ID"""
    data = request.get_json() or request.form
    job_names = data.get("job_names", [])
    program_args = data.get("program_args", "")
    savepoint = str(data.get("savepoint", "0")) in ("1", "true", "True")
    flink_env = data.get("flink_env")
    max_retries = int(data.get("max_retries", 3))
    retry_interval = int(data.get("retry_interval", 3))
    update_db = str(data.get("update_db", "0")) in ("1", "true", "True")  # 默认不更新数据库
    
    if not job_names:
        return {"success": False, "error": "没有选择作业", "code": "BAD_REQUEST"}, 400
    
    # 创建任务
    task_id = task_manager.create_task(
        task_type='bulk_restart',
        total_items=len(job_names),
        metadata={
            'job_names': job_names,
            'program_args': program_args,
            'savepoint': savepoint,
            'flink_env': flink_env
        }
    )
    
    # 保存 Flask 应用实例，用于后台线程中的应用上下文
    app = current_app._get_current_object()
    
    # 启动后台线程执行批量重启
    def worker():
        with app.app_context():
            try:
                original_url = None
                if flink_env:
                    original_url = get_flink_api_url()
                    set_flink_api_url(flink_env)
                
                # 处理每个作业
                for i, job_name in enumerate(job_names):
                    current_app.logger.info(f"[任务{task_id}] 正在处理作业 {i+1}/{len(job_names)}: {job_name}")
                    
                    try:
                        # 调用重启核心逻辑
                        restart_result = restart_job_by_name_core_logic(
                            job_name, program_args, max_retries, retry_interval, savepoint, update_db
                        )
                        
                        task_manager.update_progress(task_id, i + 1, {
                            'job_name': job_name,
                            'success': restart_result.get('success', False),
                            'message': restart_result.get('message', ''),
                            'data': restart_result.get('data', {})
                        })
                        
                    except Exception as e:
                        current_app.logger.error(f"[任务{task_id}] 重启作业 {job_name} 异常: {str(e)}")
                        task_manager.update_progress(task_id, i + 1, {
                            'job_name': job_name,
                            'success': False,
                            'message': f"重启异常: {str(e)}"
                        })
            
                # 任务完成
                task_manager.complete_task(task_id, 'completed')
                
                if original_url:
                    set_flink_api_url(original_url)
                    
            except Exception as e:
                current_app.logger.error(f"[任务{task_id}] 批量重启异常: {str(e)}", exc_info=True)
                task_manager.complete_task(task_id, 'failed', str(e))
    
    # 启动后台线程
    thread = threading.Thread(target=worker, daemon=True)
    thread.start()
    
    # 立即返回任务ID
    return {
        "success": True,
        "task_id": task_id,
        "message": f"批量重启任务已启动，共{len(job_names)}个作业"
    }


@bp.get("/task/<string:task_id>")
@log_api_call
def get_task_status(task_id: str):
    """获取任务状态"""
    task = task_manager.get_task(task_id)
    
    if not task:
        return {"success": False, "error": "任务不存在", "code": "NOT_FOUND"}, 404
    
    return {
        "success": True,
        "data": task
    }


@bp.get("/tasks")
@log_api_call
def get_all_tasks():
    """获取所有任务"""
    tasks = task_manager.get_all_tasks()
    
    return {
        "success": True,
        "data": {
            "tasks": tasks,
            "total": len(tasks)
        }
    }


@bp.delete("/task/<string:task_id>")
@log_api_call
def delete_task(task_id: str):
    """删除任务"""
    task = task_manager.get_task(task_id)
    
    if not task:
        return {"success": False, "error": "任务不存在", "code": "NOT_FOUND"}, 404
    
    task_manager.delete_task(task_id)
    
    return {
        "success": True,
        "message": "任务已删除"
    }
