from flask import Blueprint, request, current_app
from app.utils.mysql_db import get_db_connection
from app.utils.logger import log_api_call
from datetime import datetime, timedelta


bp = Blueprint("starrocks_cluster_monitor", __name__)


@bp.get("/configs")
@log_api_call("starrocks_cluster_monitor.list_configs")
def list_configs():
    """获取StarRocks集群配置列表，支持分页、搜索、筛选"""
    current_app.logger.info("Listing StarRocks cluster configs")
    
    # 获取查询参数
    cluster_tag = request.args.get("cluster_tag")
    config_name = request.args.get("config_name")
    is_monitor = request.args.get("is_monitor")
    is_alert = request.args.get("is_alert")
    status = request.args.get("status")
    page = request.args.get("page", 1, type=int)
    size = request.args.get("size", 20, type=int)
    
    # 构建查询条件
    where = []
    params = []
    
    if cluster_tag:
        where.append("cluster_tag = %s")
        params.append(cluster_tag)
    
    if config_name:
        where.append("config_name LIKE %s")
        params.append(f"%{config_name}%")
    
    if is_monitor is not None and is_monitor != '':
        where.append("is_monitor = %s")
        params.append(int(is_monitor))
    
    if is_alert is not None and is_alert != '':
        where.append("is_alert = %s")
        params.append(int(is_alert))
    
    if status:
        where.append("status = %s")
        params.append(status)
    
    where_sql = (" WHERE " + " AND ".join(where)) if where else ""
    
    conn = get_db_connection()
    try:
        with conn.cursor() as cur:
            # 获取总数
            count_sql = f"SELECT COUNT(*) as total FROM starrocks_cluster_monitor_config {where_sql}"
            cur.execute(count_sql, params)
            total_result = cur.fetchone()
            total = total_result['total'] if total_result else 0
            
            # 获取分页数据
            offset = (page - 1) * size
            cur.execute(
                f"""
                SELECT 
                    id, cluster_tag, config_name, config_desc, fe_master_node,
                    fe_slave_nodes, has_fe_slaves, be_nodes, metrics_port,
                    fe_http_port, be_http_port, fe_log_port, starrocks_home,
                    control_script, starrocks_user, fe_log_dir, be_log_dir,
                    log_backup_dir, log_keep_days, log_backup_days,
                    prometheus_url, fe_query, be_query, enable_prometheus,
                    prometheus_required, webhook_url, is_alert, alert_count,
                    alert_interval, max_alert_count, is_restart, restart_count,
                    max_restart_count, restart_timeout, ssh_user, ssh_timeout,
                    is_monitor, status, created_at, updated_at
                FROM starrocks_cluster_monitor_config
                {where_sql}
                ORDER BY cluster_tag, config_name
                LIMIT %s OFFSET %s
                """,
                (*params, size, offset),
            )
            rows = cur.fetchall()
            
            # 格式化时间字段
            for row in rows:
                if row.get('created_at') and hasattr(row['created_at'], 'strftime'):
                    row['created_at'] = row['created_at'].strftime('%Y-%m-%d %H:%M:%S')
                if row.get('updated_at') and hasattr(row['updated_at'], 'strftime'):
                    row['updated_at'] = row['updated_at'].strftime('%Y-%m-%d %H:%M:%S')
            
            return {
                "success": True,
                "message": "ok",
                "data": {
                    "rows": rows,
                    "total": total,
                    "page": page,
                    "size": size
                }
            }
    finally:
        conn.close()


@bp.get("/configs/cluster-tags")
@log_api_call("starrocks_cluster_monitor.get_cluster_tags")
def get_cluster_tags():
    """获取所有集群标识列表"""
    current_app.logger.info("Getting cluster tags")
    
    conn = get_db_connection()
    try:
        with conn.cursor() as cur:
            cur.execute("""
                SELECT DISTINCT cluster_tag, COUNT(*) as count
                FROM starrocks_cluster_monitor_config
                GROUP BY cluster_tag
                ORDER BY cluster_tag
            """)
            tags = cur.fetchall()
            
            return {
                "success": True,
                "message": "ok",
                "data": tags
            }
    finally:
        conn.close()


@bp.get("/configs/<int:config_id>")
@log_api_call("starrocks_cluster_monitor.get_config")
def get_config(config_id):
    """获取单个配置详情"""
    current_app.logger.info(f"Getting config detail | id: {config_id}")
    
    conn = get_db_connection()
    try:
        with conn.cursor() as cur:
            cur.execute("""
                SELECT 
                    id, cluster_tag, config_name, config_desc, fe_master_node,
                    fe_slave_nodes, has_fe_slaves, be_nodes, metrics_port,
                    fe_http_port, be_http_port, fe_log_port, starrocks_home,
                    control_script, starrocks_user, fe_log_dir, be_log_dir,
                    log_backup_dir, log_keep_days, log_backup_days,
                    prometheus_url, fe_query, be_query, enable_prometheus,
                    prometheus_required, webhook_url, is_alert, alert_count,
                    alert_interval, max_alert_count, is_restart, restart_count,
                    max_restart_count, restart_timeout, ssh_user, ssh_timeout,
                    is_monitor, status, created_at, updated_at
                FROM starrocks_cluster_monitor_config
                WHERE id = %s
            """, (config_id,))
            row = cur.fetchone()
            
            if not row:
                return {
                    "success": False,
                    "message": "配置不存在",
                    "data": None
                }, 404
            
            # 格式化时间字段
            if row.get('created_at') and hasattr(row['created_at'], 'strftime'):
                row['created_at'] = row['created_at'].strftime('%Y-%m-%d %H:%M:%S')
            if row.get('updated_at') and hasattr(row['updated_at'], 'strftime'):
                row['updated_at'] = row['updated_at'].strftime('%Y-%m-%d %H:%M:%S')
            
            return {
                "success": True,
                "message": "ok",
                "data": row
            }
    finally:
        conn.close()


@bp.post("/configs")
@log_api_call("starrocks_cluster_monitor.create_config")
def create_config():
    """创建新配置"""
    data = request.get_json()
    current_app.logger.info(f"Creating config | cluster: {data.get('cluster_tag')} | name: {data.get('config_name')}")
    
    # 必填字段验证
    required_fields = ['cluster_tag', 'config_name', 'fe_master_node', 'be_nodes']
    for field in required_fields:
        if not data.get(field):
            return {
                "success": False,
                "message": f"缺少必填字段: {field}",
                "data": None
            }, 400
    
    conn = get_db_connection()
    try:
        with conn.cursor() as cur:
            # 检查 (cluster_tag, config_name) 组合是否已存在
            cur.execute("""
                SELECT id FROM starrocks_cluster_monitor_config 
                WHERE cluster_tag = %s AND config_name = %s
            """, (data['cluster_tag'], data['config_name']))
            if cur.fetchone():
                return {
                    "success": False,
                    "message": f"集群标识 {data['cluster_tag']} 的配置 {data['config_name']} 已存在",
                    "data": None
                }, 400
            
            # 插入新配置
            cur.execute("""
                INSERT INTO starrocks_cluster_monitor_config (
                    cluster_tag, config_name, config_desc, fe_master_node,
                    fe_slave_nodes, has_fe_slaves, be_nodes, metrics_port,
                    fe_http_port, be_http_port, fe_log_port, starrocks_home,
                    control_script, starrocks_user, fe_log_dir, be_log_dir,
                    log_backup_dir, log_keep_days, log_backup_days,
                    prometheus_url, fe_query, be_query, enable_prometheus,
                    prometheus_required, webhook_url, is_alert, alert_interval,
                    max_alert_count, is_restart, max_restart_count, restart_timeout,
                    ssh_user, ssh_timeout, is_monitor, status
                ) VALUES (
                    %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s,
                    %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s
                )
            """, (
                data['cluster_tag'],
                data['config_name'],
                data.get('config_desc'),
                data['fe_master_node'],
                data.get('fe_slave_nodes', ''),
                data.get('has_fe_slaves', 1),
                data['be_nodes'],
                data.get('metrics_port', 9010),
                data.get('fe_http_port', 8060),
                data.get('be_http_port', 8071),
                data.get('fe_log_port', 9010),
                data.get('starrocks_home', '/opt/datasophon/StarRocks-3.3.16'),
                data.get('control_script'),
                data.get('starrocks_user', 'starrocks'),
                data.get('fe_log_dir'),
                data.get('be_log_dir'),
                data.get('log_backup_dir', '/opt/starrocks_log_backup'),
                data.get('log_keep_days', 2),
                data.get('log_backup_days', 7),
                data.get('prometheus_url'),
                data.get('fe_query'),
                data.get('be_query'),
                data.get('enable_prometheus', 1),
                data.get('prometheus_required', 0),
                data.get('webhook_url'),
                data.get('is_alert', 1),
                data.get('alert_interval', 360),
                data.get('max_alert_count', 3),
                data.get('is_restart', 1),
                data.get('max_restart_count', 3),
                data.get('restart_timeout', 120),
                data.get('ssh_user', 'root'),
                data.get('ssh_timeout', 30),
                data.get('is_monitor', 1),
                data.get('status', 'enAll')
            ))
            conn.commit()
            
            new_id = cur.lastrowid
            current_app.logger.info(f"Config created | id: {new_id} | cluster: {data['cluster_tag']}")
            
            return {
                "success": True,
                "message": "配置创建成功",
                "data": {"id": new_id}
            }
    except Exception as e:
        conn.rollback()
        current_app.logger.error(f"Failed to create config | error: {str(e)}")
        return {
            "success": False,
            "message": f"创建配置失败: {str(e)}",
            "data": None
        }, 500
    finally:
        conn.close()


@bp.put("/configs/<int:config_id>")
@log_api_call("starrocks_cluster_monitor.update_config")
def update_config(config_id):
    """更新配置"""
    data = request.get_json()
    current_app.logger.info(f"Updating config | id: {config_id}")
    
    conn = get_db_connection()
    try:
        with conn.cursor() as cur:
            # 查询当前配置的开关状态
            cur.execute("""
                SELECT is_monitor, is_alert, is_restart 
                FROM starrocks_cluster_monitor_config 
                WHERE id = %s
            """, (config_id,))
            current = cur.fetchone()
            
            if not current:
                return {
                    "success": False,
                    "message": "配置不存在",
                    "data": None
                }, 404
            
            # 合并新旧值
            is_monitor = data.get('is_monitor', current['is_monitor'])
            is_alert = data.get('is_alert', current['is_alert'])
            is_restart = data.get('is_restart', current['is_restart'])
            
            # 添加调试日志 - 记录原始值和类型
            current_app.logger.info(
                f"Status calculation START | config_id: {config_id} | "
                f"current in DB: monitor={current['is_monitor']} (type:{type(current['is_monitor'])}), "
                f"alert={current['is_alert']} (type:{type(current['is_alert'])}), "
                f"restart={current['is_restart']} (type:{type(current['is_restart'])}) | "
                f"request data: {data} | "
                f"merged values: monitor={is_monitor} (type:{type(is_monitor)}), "
                f"alert={is_alert} (type:{type(is_alert)}), "
                f"restart={is_restart} (type:{type(is_restart)})"
            )
            
            # 联动逻辑：关闭监控时，同时关闭告警和重启
            if is_monitor == 0:
                data['status'] = 'disAll'
                data['is_alert'] = 0
                data['is_restart'] = 0
                current_app.logger.info(f"Status calculation | monitor=0 → status=disAll, forcing alert=0, restart=0")
            elif is_monitor == 1:
                # 监控开启，根据告警和重启的状态计算
                current_app.logger.info(f"Status calculation | is_monitor==1, checking is_alert: {is_alert} == 1? {is_alert == 1}")
                if is_alert == 1:
                    current_app.logger.info(f"Status calculation | is_alert==1, checking is_restart: {is_restart} == 1? {is_restart == 1}")
                    if is_restart == 1:
                        data['status'] = 'enAll'  # 全部启用
                        current_app.logger.info(f"Status calculation | monitor=1, alert=1, restart=1 → status=enAll")
                    else:
                        data['status'] = 'enAlert'  # 仅告警
                        current_app.logger.info(f"Status calculation | monitor=1, alert=1, restart=0 → status=enAlert")
                else:
                    current_app.logger.info(f"Status calculation | is_alert==0, checking is_restart: {is_restart} == 1? {is_restart == 1}")
                    if is_restart == 1:
                        data['status'] = 'disAlert'  # 监控+重启，无告警
                        current_app.logger.info(f"Status calculation | monitor=1, alert=0, restart=1 → status=disAlert")
                    else:
                        data['status'] = 'running'  # 仅监控
                        current_app.logger.info(f"Status calculation | monitor=1, alert=0, restart=0 → status=running")
            
            current_app.logger.info(
                f"Status calculation END | config_id: {config_id} | final status: {data.get('status')}"
            )
            
            # 构建更新语句
            update_fields = []
            params = []
            
            # 允许更新的字段（不包括cluster_tag，它在编辑时应该只读）
            allowed_fields = {
                'config_name', 'config_desc', 'fe_master_node', 'fe_slave_nodes',
                'has_fe_slaves', 'be_nodes', 'metrics_port', 'fe_http_port',
                'be_http_port', 'fe_log_port', 'starrocks_home', 'control_script',
                'starrocks_user', 'fe_log_dir', 'be_log_dir', 'log_backup_dir',
                'log_keep_days', 'log_backup_days', 'prometheus_url', 'fe_query',
                'be_query', 'enable_prometheus', 'prometheus_required', 'webhook_url',
                'is_alert', 'alert_interval', 'max_alert_count', 'is_restart',
                'max_restart_count', 'restart_timeout', 'ssh_user', 'ssh_timeout',
                'is_monitor', 'status'
            }
            
            current_app.logger.info(f"Building UPDATE statement | data dict keys: {list(data.keys())}, data dict: {data}")
            
            for field in allowed_fields:
                if field in data:
                    update_fields.append(f"{field} = %s")
                    params.append(data[field])
            
            current_app.logger.info(f"UPDATE fields: {update_fields}, params: {params}")
            
            if not update_fields:
                return {
                    "success": False,
                    "message": "没有需要更新的字段",
                    "data": None
                }, 400
            
            params.append(config_id)
            
            cur.execute(
                f"UPDATE starrocks_cluster_monitor_config SET {', '.join(update_fields)} WHERE id = %s",
                params
            )
            conn.commit()
            
            current_app.logger.info(f"Config updated | id: {config_id}, status: {data.get('status', 'N/A')}")
            
            # 查询更新后的完整数据，返回给前端以确保UI状态一致
            with conn.cursor() as cur:
                cur.execute("""
                    SELECT id, cluster_tag, config_name, is_monitor, is_alert, is_restart, status
                    FROM starrocks_cluster_monitor_config
                    WHERE id = %s
                """, (config_id,))
                updated_config = cur.fetchone()
            
            current_app.logger.info(f"Returning updated config to frontend: {updated_config}")
            
            return {
                "success": True,
                "message": "配置更新成功",
                "data": updated_config
            }
    except Exception as e:
        conn.rollback()
        current_app.logger.error(f"Failed to update config | id: {config_id} | error: {str(e)}")
        return {
            "success": False,
            "message": f"更新配置失败: {str(e)}",
            "data": None
        }, 500
    finally:
        conn.close()


@bp.delete("/configs/<int:config_id>")
@log_api_call("starrocks_cluster_monitor.delete_config")
def delete_config(config_id):
    """删除配置"""
    current_app.logger.info(f"Deleting config | id: {config_id}")
    
    conn = get_db_connection()
    try:
        with conn.cursor() as cur:
            # 检查配置是否存在
            cur.execute("""
                SELECT cluster_tag, config_name 
                FROM starrocks_cluster_monitor_config 
                WHERE id = %s
            """, (config_id,))
            row = cur.fetchone()
            if not row:
                return {
                    "success": False,
                    "message": "配置不存在",
                    "data": None
                }, 404
            
            cluster_tag = row['cluster_tag']
            config_name = row['config_name']
            
            # 删除配置
            cur.execute("DELETE FROM starrocks_cluster_monitor_config WHERE id = %s", (config_id,))
            conn.commit()
            
            current_app.logger.info(f"Config deleted | id: {config_id} | cluster: {cluster_tag} | name: {config_name}")
            
            return {
                "success": True,
                "message": "配置删除成功",
                "data": None
            }
    except Exception as e:
        conn.rollback()
        current_app.logger.error(f"Failed to delete config | id: {config_id} | error: {str(e)}")
        return {
            "success": False,
            "message": f"删除配置失败: {str(e)}",
            "data": None
        }, 500
    finally:
        conn.close()


@bp.get("/restart-history")
@log_api_call("starrocks_cluster_monitor.list_restart_history")
def list_restart_history():
    """获取重启历史列表，支持分页、筛选"""
    current_app.logger.info("Listing StarRocks restart history")
    
    # 获取查询参数
    node_name = request.args.get("node_name")
    component = request.args.get("component")
    cluster_tag = request.args.get("cluster_tag")
    restart_result = request.args.get("restart_result")
    status = request.args.get("status")
    start_time = request.args.get("start_time")
    end_time = request.args.get("end_time")
    page = request.args.get("page", 1, type=int)
    size = request.args.get("size", 20, type=int)
    
    # 构建查询条件
    where = []
    params = []
    
    if node_name:
        where.append("node_name LIKE %s")
        params.append(f"%{node_name}%")
    
    if component:
        where.append("component = %s")
        params.append(component)
    
    if cluster_tag:
        where.append("cluster_tag = %s")
        params.append(cluster_tag)
    
    if restart_result:
        where.append("restart_result = %s")
        params.append(restart_result)
    
    if status:
        where.append("status = %s")
        params.append(status)
    
    if start_time:
        where.append("start_time >= %s")
        params.append(start_time)
    
    if end_time:
        where.append("start_time <= %s")
        params.append(end_time)
    
    where_sql = (" WHERE " + " AND ".join(where)) if where else ""
    
    conn = get_db_connection()
    try:
        with conn.cursor() as cur:
            # 获取总数
            count_sql = f"SELECT COUNT(*) as total FROM starrocks_restart_history {where_sql}"
            cur.execute(count_sql, params)
            total_result = cur.fetchone()
            total = total_result['total'] if total_result else 0
            
            # 获取分页数据
            offset = (page - 1) * size
            cur.execute(
                f"""
                SELECT 
                    id, restart_key, node_name, component, cluster_tag,
                    restart_command, restart_result, error_message,
                    execution_count, start_time, end_time, duration_seconds,
                    status, total_attempts, successful_restarts,
                    created_at, updated_at
                FROM starrocks_restart_history
                {where_sql}
                ORDER BY start_time DESC
                LIMIT %s OFFSET %s
                """,
                (*params, size, offset),
            )
            rows = cur.fetchall()
            
            # 格式化时间字段
            for row in rows:
                if row.get('start_time') and hasattr(row['start_time'], 'strftime'):
                    row['start_time'] = row['start_time'].strftime('%Y-%m-%d %H:%M:%S')
                if row.get('end_time') and hasattr(row['end_time'], 'strftime'):
                    row['end_time'] = row['end_time'].strftime('%Y-%m-%d %H:%M:%S')
                if row.get('created_at') and hasattr(row['created_at'], 'strftime'):
                    row['created_at'] = row['created_at'].strftime('%Y-%m-%d %H:%M:%S')
                if row.get('updated_at') and hasattr(row['updated_at'], 'strftime'):
                    row['updated_at'] = row['updated_at'].strftime('%Y-%m-%d %H:%M:%S')
            
            return {
                "success": True,
                "message": "ok",
                "data": {
                    "rows": rows,
                    "total": total,
                    "page": page,
                    "size": size
                }
            }
    finally:
        conn.close()


@bp.get("/stats")
@log_api_call("starrocks_cluster_monitor.get_stats")
def get_stats():
    """获取统计信息"""
    current_app.logger.info("Getting StarRocks monitor stats")
    
    conn = get_db_connection()
    try:
        with conn.cursor() as cur:
            # 总配置数
            cur.execute("SELECT COUNT(*) as total FROM starrocks_cluster_monitor_config")
            total_configs = cur.fetchone()['total']
            
            # 运行中配置（status IN ('running', 'enAll')）
            cur.execute("""
                SELECT COUNT(*) as total 
                FROM starrocks_cluster_monitor_config 
                WHERE status IN ('running', 'enAll')
            """)
            running_configs = cur.fetchone()['total']
            
            # 总重启数
            cur.execute("SELECT COUNT(*) as total FROM starrocks_restart_history")
            total_restarts = cur.fetchone()['total']
            
            # 近7天重启数
            seven_days_ago = (datetime.now() - timedelta(days=7)).strftime('%Y-%m-%d %H:%M:%S')
            cur.execute("""
                SELECT COUNT(*) as total 
                FROM starrocks_restart_history 
                WHERE start_time >= %s
            """, (seven_days_ago,))
            recent_restarts = cur.fetchone()['total']
            
            return {
                "success": True,
                "message": "ok",
                "data": {
                    "total_configs": total_configs,
                    "running_configs": running_configs,
                    "total_restarts": total_restarts,
                    "recent_restarts": recent_restarts
                }
            }
    finally:
        conn.close()

