#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Kafka Topic监控管理模块
提供Kafka Topic监控数据的查询、更新、删除等功能
"""

import json
import logging
import time
import random
from flask import Blueprint, request, jsonify
from app.utils.logger import log_business_logic
from app.utils.mysql_db import get_db_cursor, get_db_connection
from app.utils.kafka_utils import delete_consumer_group_safe

bp = Blueprint('kafka_topic_monitor', __name__, url_prefix='/api/kafka-topic-monitor')
logger = logging.getLogger(__name__)


@bp.route('/kafka-configs', methods=['GET'])
@log_business_logic("获取Kafka配置列表")
def get_kafka_configs():
    """获取Kafka配置列表（用于前端下拉选择）"""
    try:
        with get_db_cursor() as cursor:
            cursor.execute("""
                SELECT id, config_name, host, port, username, description
                FROM service_config 
                WHERE service_type = 'kafka' AND is_deleted = 0 AND is_active = 1
                ORDER BY sort_order, id
            """)
            configs = cursor.fetchall()
            return jsonify({
                "success": True,
                "data": configs
            })
    except Exception as e:
        logger.error(f"获取Kafka配置列表失败: {e}")
        return jsonify({"error": str(e)}), 500


@bp.route('/topics-list', methods=['GET'])
@log_business_logic("获取Topic列表")
def get_topics_list():
    """获取指定Kafka环境下的所有Topic名称列表（去重，用于下拉选择）"""
    try:
        config_id = request.args.get('config_id', type=int)
        if not config_id:
            return jsonify({"error": "缺少参数: config_id"}), 400
        
        with get_db_cursor() as cursor:
            cursor.execute("""
                SELECT DISTINCT topic_name
                FROM kafka_topic_monitor
                WHERE config_id = %s
                ORDER BY topic_name
            """, (config_id,))
            topics = cursor.fetchall()
            
            # 提取topic_name字段，返回字符串数组
            topic_names = [topic['topic_name'] for topic in topics]
            
            return jsonify({
                "success": True,
                "data": topic_names
            })
    except Exception as e:
        logger.error(f"获取Topic列表失败: {e}")
        return jsonify({"error": str(e)}), 500


@bp.route('/list', methods=['GET'])
@log_business_logic("获取Kafka监控列表")
def get_kafka_monitors():
    """
    获取Kafka Topic监控列表 - 支持分页、排序、筛选
    
    告警状态说明:
    - normal: 正常（无告警）
    - alerting: 告警中（消息延迟超过阈值）
    
    注意: 只使用这两种状态，不再使用 'recovered' 状态
    """
    try:
        # 获取查询参数
        config_id = request.args.get('config_id', type=int)
        if not config_id:
            return jsonify({"error": "缺少参数: config_id"}), 400
        
        page = request.args.get('page', 1, type=int)
        page_size = request.args.get('pageSize', 20, type=int)
        search = request.args.get('search', '').strip()
        topic_name = request.args.get('topic_name', '').strip()  # 新增：Topic名称精确筛选
        status = request.args.get('status', '').strip()
        is_monitored = request.args.get('is_monitored', '').strip()
        is_alert_enabled = request.args.get('is_alert_enabled', '').strip()
        alert_status = request.args.get('alert_status', '').strip()
        sort = request.args.get('sort', 'last_alert_time').strip()
        order = request.args.get('order', 'desc').strip()
        
        # 验证排序字段，防止SQL注入
        allowed_sort_fields = [
            'topic_name', 'partition_id', 'latest_offset', 'message_timestamp',
            'consumer_lag', 'topic_size_bytes', 'is_monitored', 'is_alert_enabled',
            'alert_threshold_hours', 'status', 'alert_status', 'alert_count',
            'last_alert_time', 'first_alert_time', 'created_at', 'updated_at'
        ]
        if sort not in allowed_sort_fields:
            sort = 'last_alert_time'
        
        # 验证排序方向
        order = order.lower()
        if order not in ['asc', 'desc']:
            order = 'desc'
        
        with get_db_connection() as conn:
            with conn.cursor() as cursor:
                # 临时增加排序缓冲区大小，避免 "Out of sort memory" 错误
                try:
                    cursor.execute("SET SESSION sort_buffer_size = 4194304")  # 4MB
                except:
                    pass  # 如果设置失败也继续执行
                
                # 构建WHERE条件
                where_conditions = ["config_id = %s"]
                params = [config_id]
                
                # Topic名称精确筛选（优先使用）
                if topic_name:
                    where_conditions.append("topic_name = %s")
                    params.append(topic_name)
                # 搜索条件（topic名称或partition_id模糊搜索）
                elif search:
                    where_conditions.append("(topic_name LIKE %s OR partition_id LIKE %s)")
                    params.extend([f'%{search}%', f'%{search}%'])
                
                # 状态筛选
                if status:
                    where_conditions.append("status = %s")
                    params.append(status)
                
                # 监控状态筛选
                if is_monitored != '' and is_monitored is not None:
                    where_conditions.append("is_monitored = %s")
                    params.append(int(is_monitored))
                
                # 告警启用筛选
                if is_alert_enabled != '' and is_alert_enabled is not None:
                    where_conditions.append("is_alert_enabled = %s")
                    params.append(int(is_alert_enabled))
                
                # 告警状态筛选
                if alert_status:
                    where_conditions.append("alert_status = %s")
                    params.append(alert_status)
                
                where_clause = " AND ".join(where_conditions)
                
                # 查询总数
                count_sql = f"SELECT COUNT(*) as total FROM kafka_topic_monitor WHERE {where_clause}"
                cursor.execute(count_sql, params)
                total = cursor.fetchone()['total']
                
                # 排序和分页
                offset = (page - 1) * page_size
                
                # 构建排序子句（对于时间戳字段，将 NULL 值排在最后）
                nullable_timestamp_fields = ['message_timestamp', 'last_alert_time', 'first_alert_time']
                if sort in nullable_timestamp_fields:
                    # 使用 (column IS NULL) 返回 0(非NULL) 或 1(NULL)
                    # 先按是否为NULL升序（0在前，即非NULL在前），再按实际值排序
                    order_clause = f"({sort} IS NULL), {sort} {order.upper()}"
                else:
                    order_clause = f"{sort} {order.upper()}"
                
                # 查询数据
                sql = f"""
                    SELECT 
                        id, config_id, topic_name, partition_id, latest_offset, 
                        latest_message, message_timestamp, message_key, consumer_lag, 
                        topic_size_bytes, is_monitored, is_alert_enabled, 
                        alert_threshold_hours, webhook_url, status, 
                        alert_first_send_time, alert_second_interval, alert_third_interval, 
                        max_alert_count, alert_status, alert_count, last_alert_time, 
                        first_alert_time, alert_history_id, created_at, updated_at
                    FROM kafka_topic_monitor
                    WHERE {where_clause}
                    ORDER BY {order_clause}
                    LIMIT %s OFFSET %s
                """
                params.extend([page_size, offset])
                cursor.execute(sql, params)
                monitors = cursor.fetchall()
                
                # 转换时间格式
                for monitor in monitors:
                    if monitor.get('message_timestamp'):
                        monitor['message_timestamp'] = monitor['message_timestamp'].strftime('%Y-%m-%d %H:%M:%S')
                    if monitor.get('last_alert_time'):
                        monitor['last_alert_time'] = monitor['last_alert_time'].strftime('%Y-%m-%d %H:%M:%S')
                    if monitor.get('first_alert_time'):
                        monitor['first_alert_time'] = monitor['first_alert_time'].strftime('%Y-%m-%d %H:%M:%S')
                    if monitor.get('created_at'):
                        monitor['created_at'] = monitor['created_at'].strftime('%Y-%m-%d %H:%M:%S')
                    if monitor.get('updated_at'):
                        monitor['updated_at'] = monitor['updated_at'].strftime('%Y-%m-%d %H:%M:%S')
                    # 转换TIME类型字段
                    if monitor.get('alert_first_send_time'):
                        from datetime import timedelta
                        if isinstance(monitor['alert_first_send_time'], timedelta):
                            total_seconds = int(monitor['alert_first_send_time'].total_seconds())
                            hours = total_seconds // 3600
                            minutes = (total_seconds % 3600) // 60
                            seconds = total_seconds % 60
                            monitor['alert_first_send_time'] = f"{hours:02d}:{minutes:02d}:{seconds:02d}"
                    
                    # 解析latest_message（JSON格式）
                    if monitor.get('latest_message'):
                        try:
                            monitor['latest_message'] = json.loads(monitor['latest_message'])
                        except:
                            pass
                
                # 获取统计信息
                stats_sql = f"""
                    SELECT 
                        COUNT(*) as total_count,
                        SUM(CASE WHEN status = 'active' THEN 1 ELSE 0 END) as active_count,
                        SUM(CASE WHEN is_monitored = 1 THEN 1 ELSE 0 END) as monitored_count,
                        SUM(CASE WHEN is_alert_enabled = 1 THEN 1 ELSE 0 END) as alert_enabled_count,
                        SUM(CASE WHEN alert_status = 'alerting' THEN 1 ELSE 0 END) as alerting_count
                    FROM kafka_topic_monitor
                    WHERE config_id = %s
                """
                cursor.execute(stats_sql, [config_id])
                stats = cursor.fetchone()
                
                return jsonify({
                    "success": True,
                    "data": {
                        "monitors": monitors,
                        "total": total,
                        "page": page,
                        "pageSize": page_size,
                        "stats": stats
                    }
                })
                
    except Exception as e:
        logger.error(f"获取Kafka监控列表失败: {e}")
        logger.exception(e)
        return jsonify({"error": f"获取监控列表失败: {str(e)}"}), 500


@bp.route('/detail/<int:monitor_id>', methods=['GET'])
@log_business_logic("获取Kafka监控详情")
def get_monitor_detail(monitor_id):
    """获取Kafka Topic监控详情"""
    try:
        with get_db_cursor() as cursor:
            cursor.execute("""
                SELECT 
                    id, config_id, topic_name, partition_id, latest_offset, 
                    latest_message, message_timestamp, message_key, consumer_lag, 
                    topic_size_bytes, is_monitored, is_alert_enabled, 
                    alert_threshold_hours, webhook_url, status, 
                    alert_first_send_time, alert_second_interval, alert_third_interval, 
                    max_alert_count, alert_status, alert_count, last_alert_time, 
                    first_alert_time, alert_history_id, created_at, updated_at
                FROM kafka_topic_monitor
                WHERE id = %s
            """, (monitor_id,))
            monitor = cursor.fetchone()
            
            if not monitor:
                return jsonify({"error": "监控记录不存在"}), 404
            
            # 转换时间格式
            if monitor.get('message_timestamp'):
                monitor['message_timestamp'] = monitor['message_timestamp'].strftime('%Y-%m-%d %H:%M:%S')
            if monitor.get('last_alert_time'):
                monitor['last_alert_time'] = monitor['last_alert_time'].strftime('%Y-%m-%d %H:%M:%S')
            if monitor.get('first_alert_time'):
                monitor['first_alert_time'] = monitor['first_alert_time'].strftime('%Y-%m-%d %H:%M:%S')
            if monitor.get('created_at'):
                monitor['created_at'] = monitor['created_at'].strftime('%Y-%m-%d %H:%M:%S')
            if monitor.get('updated_at'):
                monitor['updated_at'] = monitor['updated_at'].strftime('%Y-%m-%d %H:%M:%S')
            # 转换TIME类型字段
            if monitor.get('alert_first_send_time'):
                from datetime import timedelta
                if isinstance(monitor['alert_first_send_time'], timedelta):
                    total_seconds = int(monitor['alert_first_send_time'].total_seconds())
                    hours = total_seconds // 3600
                    minutes = (total_seconds % 3600) // 60
                    seconds = total_seconds % 60
                    monitor['alert_first_send_time'] = f"{hours:02d}:{minutes:02d}:{seconds:02d}"
            
            # 解析latest_message
            if monitor.get('latest_message'):
                try:
                    monitor['latest_message'] = json.loads(monitor['latest_message'])
                except:
                    pass
            
            return jsonify({
                "success": True,
                "data": monitor
            })
            
    except Exception as e:
        logger.error(f"获取监控详情失败: {e}")
        return jsonify({"error": str(e)}), 500


@bp.route('/update/<int:monitor_id>', methods=['PUT'])
@log_business_logic("更新Kafka监控配置")
def update_monitor(monitor_id):
    """更新Kafka Topic监控配置"""
    try:
        data = request.get_json()
        
        with get_db_connection() as conn:
            with conn.cursor() as cursor:
                # 先获取当前记录，用于计算status
                cursor.execute("""
                    SELECT is_monitored, is_alert_enabled 
                    FROM kafka_topic_monitor 
                    WHERE id = %s
                """, (monitor_id,))
                current = cursor.fetchone()
                
                if not current:
                    return jsonify({"error": "记录不存在"}), 404
                
                # 获取当前值
                is_monitored = current['is_monitored']
                is_alert_enabled = current['is_alert_enabled']
                
                # 如果请求中有新值，使用新值
                if 'is_monitored' in data:
                    is_monitored = data['is_monitored']
                if 'is_alert_enabled' in data:
                    is_alert_enabled = data['is_alert_enabled']
                
                # 自动计算status
                # 1. is_monitored=0 且 is_alert_enabled=0 → inactive
                # 2. is_monitored=1 且 is_alert_enabled=0 → monitored_only
                # 3. is_monitored=1 且 is_alert_enabled=1 → active
                # 4. is_monitored=0 且 is_alert_enabled=1 → 无意义，强制改为inactive并关闭告警
                if is_monitored == 0:
                    data['status'] = 'inactive'
                    data['is_alert_enabled'] = 0  # 联动关闭告警
                elif is_monitored == 1:
                    if is_alert_enabled == 1:
                        data['status'] = 'active'
                    else:
                        data['status'] = 'monitored_only'
                
                # 构建更新字段
                update_fields = []
                params = []
                
                # 可更新的字段
                updatable_fields = [
                    'is_monitored', 'is_alert_enabled', 'alert_threshold_hours',
                    'webhook_url', 'status', 'alert_first_send_time',
                    'alert_second_interval', 'alert_third_interval', 'max_alert_count'
                ]
                
                for field in updatable_fields:
                    if field in data:
                        update_fields.append(f"{field} = %s")
                        params.append(data[field])
                
                if not update_fields:
                    return jsonify({"error": "没有可更新的字段"}), 400
                
                params.append(monitor_id)
                
                sql = f"""
                    UPDATE kafka_topic_monitor
                    SET {', '.join(update_fields)}, updated_at = CURRENT_TIMESTAMP
                    WHERE id = %s
                """
                cursor.execute(sql, params)
                conn.commit()
                
                return jsonify({
                    "success": True,
                    "message": "更新成功"
                })
                
    except Exception as e:
        logger.error(f"更新监控配置失败: {e}")
        return jsonify({"error": str(e)}), 500


@bp.route('/delete/<int:monitor_id>', methods=['DELETE'])
@log_business_logic("删除Kafka监控记录")
def delete_monitor(monitor_id):
    """删除Kafka Topic监控记录"""
    try:
        with get_db_connection() as conn:
            with conn.cursor() as cursor:
                cursor.execute("DELETE FROM kafka_topic_monitor WHERE id = %s", (monitor_id,))
                conn.commit()
                
                return jsonify({
                    "success": True,
                    "message": "删除成功"
                })
                
    except Exception as e:
        logger.error(f"删除监控记录失败: {e}")
        return jsonify({"error": str(e)}), 500


@bp.route('/batch-update', methods=['POST'])
@log_business_logic("批量更新Kafka监控配置")
def batch_update_monitors():
    """批量更新Kafka Topic监控配置"""
    try:
        data = request.get_json()
        monitor_ids = data.get('monitor_ids', [])
        config_id = data.get('config_id')  # 支持按config_id更新所有记录
        update_data = data.get('update_data', {})
        
        # 如果提供了config_id，获取该环境下的所有记录ID
        if config_id and not monitor_ids:
            with get_db_connection() as conn:
                with conn.cursor() as cursor:
                    cursor.execute("""
                        SELECT id FROM kafka_topic_monitor 
                        WHERE config_id = %s
                    """, (config_id,))
                    records = cursor.fetchall()
                    monitor_ids = [r['id'] for r in records]
        
        if not monitor_ids:
            return jsonify({"error": "未指定监控记录ID或config_id"}), 400
        
        with get_db_connection() as conn:
            with conn.cursor() as cursor:
                # 对每个记录单独处理，确保status逻辑正确
                success_count = 0
                
                for monitor_id in monitor_ids:
                    # 获取当前记录
                    cursor.execute("""
                        SELECT is_monitored, is_alert_enabled 
                        FROM kafka_topic_monitor 
                        WHERE id = %s
                    """, (monitor_id,))
                    current = cursor.fetchone()
                    
                    if not current:
                        continue
                    
                    # 获取当前值
                    is_monitored = current['is_monitored']
                    is_alert_enabled = current['is_alert_enabled']
                    
                    # 如果请求中有新值，使用新值
                    if 'is_monitored' in update_data:
                        is_monitored = update_data['is_monitored']
                    if 'is_alert_enabled' in update_data:
                        is_alert_enabled = update_data['is_alert_enabled']
                    
                    # 自动计算status
                    record_update_data = update_data.copy()
                    if is_monitored == 0:
                        record_update_data['status'] = 'inactive'
                        record_update_data['is_alert_enabled'] = 0  # 联动关闭告警
                    elif is_monitored == 1:
                        if is_alert_enabled == 1:
                            record_update_data['status'] = 'active'
                        else:
                            record_update_data['status'] = 'monitored_only'
                    
                    # 构建更新字段
                    update_fields = []
                    params = []
                    
                    updatable_fields = [
                        'is_monitored', 'is_alert_enabled', 'alert_threshold_hours',
                        'webhook_url', 'status', 'alert_first_send_time',
                        'alert_second_interval', 'alert_third_interval', 'max_alert_count'
                    ]
                    
                    for field in updatable_fields:
                        if field in record_update_data:
                            update_fields.append(f"{field} = %s")
                            params.append(record_update_data[field])
                    
                    if update_fields:
                        params.append(monitor_id)
                        sql = f"""
                            UPDATE kafka_topic_monitor
                            SET {', '.join(update_fields)}, updated_at = CURRENT_TIMESTAMP
                            WHERE id = %s
                        """
                        cursor.execute(sql, params)
                        success_count += 1
                
                conn.commit()
                
                return jsonify({
                    "success": True,
                    "message": f"成功更新{success_count}条记录"
                })
                
    except Exception as e:
        logger.error(f"批量更新失败: {e}")
        return jsonify({"error": str(e)}), 500


@bp.route('/batch-delete', methods=['POST'])
@log_business_logic("批量删除Kafka监控记录")
def batch_delete_monitors():
    """批量删除Kafka Topic监控记录"""
    try:
        data = request.get_json()
        monitor_ids = data.get('monitor_ids', [])
        
        if not monitor_ids:
            return jsonify({"error": "未指定监控记录ID"}), 400
        
        placeholders = ','.join(['%s'] * len(monitor_ids))
        
        with get_db_connection() as conn:
            with conn.cursor() as cursor:
                sql = f"DELETE FROM kafka_topic_monitor WHERE id IN ({placeholders})"
                cursor.execute(sql, monitor_ids)
                conn.commit()
                
                return jsonify({
                    "success": True,
                    "message": f"成功删除{len(monitor_ids)}条记录"
                })
                
    except Exception as e:
        logger.error(f"批量删除失败: {e}")
        return jsonify({"error": str(e)}), 500


@bp.route('/reset-alert/<int:monitor_id>', methods=['POST'])
@log_business_logic("重置告警状态")
def reset_alert_status(monitor_id):
    """
    重置Kafka Topic监控的告警状态
    
    将告警状态从 'alerting' 重置为 'normal'，并清空告警计数和时间
    """
    try:
        with get_db_connection() as conn:
            with conn.cursor() as cursor:
                sql = """
                    UPDATE kafka_topic_monitor
                    SET alert_status = 'normal', 
                        alert_count = 0, 
                        last_alert_time = NULL, 
                        first_alert_time = NULL,
                        updated_at = CURRENT_TIMESTAMP
                    WHERE id = %s
                """
                cursor.execute(sql, (monitor_id,))
                conn.commit()
                
                return jsonify({
                    "success": True,
                    "message": "告警状态已重置为正常"
                })
                
    except Exception as e:
        logger.error(f"重置告警状态失败: {e}")
        return jsonify({"error": str(e)}), 500


@bp.route('/batch-reset-alert', methods=['POST'])
@log_business_logic("批量重置告警状态")
def batch_reset_alert_status():
    """批量重置Kafka Topic监控的告警状态"""
    try:
        data = request.get_json()
        monitor_ids = data.get('monitor_ids', [])
        
        if not monitor_ids:
            return jsonify({"error": "未指定监控记录ID"}), 400
        
        placeholders = ','.join(['%s'] * len(monitor_ids))
        
        with get_db_connection() as conn:
            with conn.cursor() as cursor:
                sql = f"""
                    UPDATE kafka_topic_monitor
                    SET alert_status = 'normal', 
                        alert_count = 0, 
                        last_alert_time = NULL, 
                        first_alert_time = NULL,
                        updated_at = CURRENT_TIMESTAMP
                    WHERE id IN ({placeholders})
                """
                cursor.execute(sql, monitor_ids)
                conn.commit()
                
                return jsonify({
                    "success": True,
                    "message": f"成功重置{len(monitor_ids)}条记录的告警状态"
                })
                
    except Exception as e:
        logger.error(f"批量重置告警状态失败: {e}")
        return jsonify({"error": str(e)}), 500


@bp.route('/query-now', methods=['POST'])
@log_business_logic("立即查询Kafka Topic数据")
def query_topic_now():
    """立即查询单个Topic的最新数据并更新数据库"""
    try:
        data = request.get_json()
        config_id = data.get('config_id')
        topic_name = data.get('topic_name')
        partition_id = data.get('partition_id', 0)
        
        if not config_id or not topic_name:
            return jsonify({"success": False, "message": "缺少必要参数"}), 400
        
        # 获取Kafka配置
        with get_db_connection() as conn:
            with conn.cursor() as cursor:
                cursor.execute("""
                    SELECT id, config_name, host, port, username, password, protocol
                    FROM service_config
                    WHERE id = %s AND service_type = 'kafka'
                """, (config_id,))
                kafka_config = cursor.fetchone()
                
                if not kafka_config:
                    return jsonify({"success": False, "message": "Kafka配置不存在"}), 404
        
        # 导入Kafka相关库
        from kafka import KafkaConsumer
        from kafka.structs import TopicPartition
        from datetime import datetime, timedelta
        import json as json_lib
        
        # 创建Kafka消费者
        bootstrap_servers = f"{kafka_config['host']}:{kafka_config['port']}"
        # 使用随机 group_id（临时查询，不需要保留消费进度）
        consumer_group_id = f"kafka_query_{int(time.time())}_{random.randint(1000, 9999)}"
        consumer_config = {
            'bootstrap_servers': bootstrap_servers,
            'group_id': consumer_group_id,
            'auto_offset_reset': 'latest',
            'enable_auto_commit': False,
            'consumer_timeout_ms': 5000,
            'max_poll_records': 1,
            'value_deserializer': lambda v: v.decode('utf-8', errors='ignore') if v else None,
            'key_deserializer': lambda k: k.decode('utf-8', errors='ignore') if k else None,
            'request_timeout_ms': 30000,
        }
        
        # 如果配置了认证
        if kafka_config.get('protocol') == 'SASL_PLAINTEXT':
            consumer_config.update({
                'security_protocol': 'SASL_PLAINTEXT',
                'sasl_mechanism': 'PLAIN',
                'sasl_plain_username': kafka_config['username'],
                'sasl_plain_password': kafka_config['password'],
            })
        
        consumer = None
        try:
            consumer = KafkaConsumer(**consumer_config)
            
            # 获取Topic分区数据
            tp = TopicPartition(topic_name, partition_id)
            
            # 获取起始和结束offset
            beginning_offsets = consumer.beginning_offsets([tp])
            end_offsets = consumer.end_offsets([tp])
            
            start_offset = beginning_offsets[tp]
            end_offset = end_offsets[tp]
            
            logger.info(f"查询Topic [{topic_name}] 分区 [{partition_id}]: offset范围 {start_offset} -> {end_offset}")
            
            # 计算consumer_lag（暂时设为0）
            consumer_lag = 0
            topic_size_bytes = 0
            
            message_data = None
            message_key = None
            message_timestamp = None
            
            if end_offset > 0:
                # 尝试获取最新的一条消息
                consumer.assign([tp])
                consumer.seek(tp, max(start_offset, end_offset - 1))
                
                records = consumer.poll(timeout_ms=5000, max_records=1)
                
                if records and tp in records and len(records[tp]) > 0:
                    msg = records[tp][0]
                    message_data = msg.value[:1000] if msg.value else None
                    message_key = msg.key[:100] if msg.key else None
                    message_timestamp = datetime.fromtimestamp(msg.timestamp / 1000.0) if msg.timestamp else None
                    logger.info(f"成功获取最新消息: offset={end_offset}, 时间={message_timestamp}")
                else:
                    # 🔧 修复1: 读不到消息，时间戳为 None（不使用当前时间）
                    message_timestamp = None
                    logger.info(f"Topic [{topic_name}] 分区 [{partition_id}] 读取不到消息，时间戳为None")
            else:
                # 🔧 修复1: 完全没有消息，时间戳为 None
                message_timestamp = None
                logger.info(f"Topic [{topic_name}] 分区 [{partition_id}] 没有任何消息，时间戳为None")
            
            # 🔧 修复2: 智能处理消息内容（判断是否为有效JSON）
            message_to_store = None
            if message_data:
                try:
                    # 尝试解析为JSON，验证是否是有效JSON
                    json_lib.loads(message_data)
                    # 是有效JSON，直接存储原始内容
                    message_to_store = message_data
                    logger.debug(f"消息是有效JSON，直接存储原始内容")
                except (json_lib.JSONDecodeError, ValueError):
                    # 不是有效JSON，转为JSON字符串
                    message_to_store = json_lib.dumps(message_data, ensure_ascii=False)
                    logger.debug(f"消息不是JSON，转换为JSON字符串存储")
            
            # 🔧 修复: 使用 UPSERT 避免唯一键冲突
            with get_db_connection() as conn:
                with conn.cursor() as cursor:
                    # 设置首次时间戳（仅INSERT时使用）
                    # 如果有消息时间戳，使用消息时间戳；否则使用 7 天前（避免立即触发告警）
                    # 注意：在UPDATE场景下，如果message_timestamp为None，应传NULL而不是默认值
                    insert_timestamp = message_timestamp if message_timestamp else (datetime.now() - timedelta(days=7))
                    
                    # 使用 INSERT ... ON DUPLICATE KEY UPDATE 实现 UPSERT
                    upsert_sql = """
                        INSERT INTO kafka_topic_monitor (
                            config_id, topic_name, partition_id,
                            latest_offset, latest_message, message_timestamp, message_key,
                            consumer_lag, topic_size_bytes,
                            is_monitored, is_alert_enabled, alert_threshold_hours,
                            webhook_url, status,
                            created_at, updated_at
                        ) VALUES (
                            %s, %s, %s,
                            %s, %s, %s, %s,
                            %s, %s,
                            1, 0, 24,
                            '', 'active',
                            NOW(), NOW()
                        )
                        ON DUPLICATE KEY UPDATE
                            latest_offset = VALUES(latest_offset),
                            message_key = VALUES(message_key),
                            consumer_lag = VALUES(consumer_lag),
                            topic_size_bytes = VALUES(topic_size_bytes),
                            updated_at = NOW(),
                            latest_message = IF(%s IS NOT NULL, %s, latest_message),
                            message_timestamp = IF(%s IS NOT NULL, %s, message_timestamp)
                    """
                    
                    cursor.execute(upsert_sql, (
                        # INSERT VALUES
                        config_id, topic_name, partition_id,
                        end_offset, message_to_store, insert_timestamp, message_key,
                        consumer_lag, topic_size_bytes,
                        # UPDATE 条件判断参数
                        message_to_store, message_to_store,      # latest_message的判断和值
                        message_timestamp, message_timestamp      # message_timestamp的判断和值
                    ))
                    
                    affected_rows = cursor.rowcount
                    if affected_rows == 1:
                        logger.info(f"插入新的Topic监控记录: {topic_name} 分区 {partition_id}")
                    elif affected_rows == 2:
                        logger.info(f"更新已有Topic监控记录: {topic_name} 分区 {partition_id}")
                    else:
                        logger.info(f"Topic监控记录无变化: {topic_name} 分区 {partition_id}")
                    
                    conn.commit()
            
            logger.info(f"成功更新Topic [{topic_name}] 分区 [{partition_id}] 的数据")
            
            return jsonify({
                "success": True,
                "message": "查询成功，数据已更新",
                "data": {
                    "topic_name": topic_name,
                    "partition_id": partition_id,
                    "latest_offset": end_offset,
                    "message_timestamp": message_timestamp.strftime('%Y-%m-%d %H:%M:%S') if message_timestamp else None,
                    "message_key": message_key,
                    "consumer_lag": consumer_lag,
                    "topic_size_bytes": topic_size_bytes,
                    "has_message": message_to_store is not None,
                    "has_timestamp": message_timestamp is not None
                }
            })
            
        finally:
            if consumer:
                consumer.close()
                logger.debug("Kafka消费者已关闭")
                
                # 清理 consumer group（临时 group，立即删除）
                delete_consumer_group_safe(kafka_config, consumer_group_id)
                
    except Exception as e:
        logger.error(f"立即查询Topic失败: {e}")
        import traceback
        traceback.print_exc()
        return jsonify({
            "success": False,
            "message": f"查询失败: {str(e)}"
        }), 500
