#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Kafka监听服务模块
提供纯Kafka数据监听、环境切换、Topic选择、智能匹配功能
"""

import asyncio
import json
import time
import logging
import random
import string
import uuid
from datetime import datetime
from typing import Optional, Dict, Any, List
from concurrent.futures import ThreadPoolExecutor
import threading

import pymysql
from flask import Blueprint, request, jsonify
from kafka import KafkaConsumer, TopicPartition
from kafka.coordinator.assignors.range import RangePartitionAssignor

from app.utils.logger import log_api_call, log_database_operation, log_business_logic
from app.utils.mysql_db import get_db_connection
from app.utils.kafka_utils import delete_consumer_group_safe

# 创建Blueprint（url_prefix在__init__.py中指定为/api/kafka-listener）
kafka_monitor_bp = Blueprint('kafka_monitor', __name__)

# 配置日志
logger = logging.getLogger(__name__)

# 全局任务管理变量 - 采用与api_listener一致的模式
CURRENT_MONITOR_TASK = None
TASK_LOCK = threading.Lock()
TASK_CANCELLED = threading.Event()  # 任务取消事件
CURRENT_KAFKA_CONSUMER = None  # 当前的Kafka消费者实例

# 全局消息存储（用于前端显示）
RECENT_MESSAGES = []
MESSAGE_LOCK = threading.Lock()
MAX_MESSAGES = 100  # 最多保存100条最新消息

# 全局消息统计
MESSAGE_STATS = {
    'total_consumed': 0,      # 总消费消息数
    'total_matched': 0        # 匹配消息数
}
STATS_LOCK = threading.Lock()

# 消息存储管理函数
def add_message_to_recent(message_data: Dict, is_matched: bool = False):
    """添加消息到最近消息列表"""
    global RECENT_MESSAGES
    with MESSAGE_LOCK:
        message_entry = {
            'timestamp': datetime.now().isoformat(),
            'data': message_data,
            'is_matched': is_matched
        }
        RECENT_MESSAGES.append(message_entry)
        # 保持最大数量限制
        if len(RECENT_MESSAGES) > MAX_MESSAGES:
            RECENT_MESSAGES = RECENT_MESSAGES[-MAX_MESSAGES:]

def get_recent_messages() -> List[Dict]:
    """获取最近的消息列表"""
    global RECENT_MESSAGES
    with MESSAGE_LOCK:
        return RECENT_MESSAGES.copy()

def clear_recent_messages():
    """清空最近消息列表"""
    global RECENT_MESSAGES
    with MESSAGE_LOCK:
        RECENT_MESSAGES.clear()

def update_message_stats(consumed: int = 0, matched: int = 0):
    """更新消息统计"""
    global MESSAGE_STATS
    with STATS_LOCK:
        MESSAGE_STATS['total_consumed'] += consumed
        MESSAGE_STATS['total_matched'] += matched

def reset_message_stats():
    """重置消息统计"""
    global MESSAGE_STATS
    with STATS_LOCK:
        MESSAGE_STATS['total_consumed'] = 0
        MESSAGE_STATS['total_matched'] = 0

def get_message_stats():
    """获取消息统计"""
    global MESSAGE_STATS
    with STATS_LOCK:
        return MESSAGE_STATS.copy()

# 任务管理函数
def is_any_task_running() -> bool:
    """检查是否有任务正在运行"""
    global CURRENT_MONITOR_TASK
    with TASK_LOCK:
        return CURRENT_MONITOR_TASK is not None

def register_current_task(task_id: str, thread: threading.Thread, topic: str, match_conditions: List, kafka_config: Dict = None):
    """注册当前运行的任务"""
    global CURRENT_MONITOR_TASK
    with TASK_LOCK:
        CURRENT_MONITOR_TASK = {
            'task_id': task_id,
            'thread': thread,
            'topic': topic,
            'match_conditions': match_conditions,
            'start_time': time.time(),
            'kafka_config': kafka_config or {}
        }

def unregister_current_task():
    """注销当前任务"""
    global CURRENT_MONITOR_TASK
    with TASK_LOCK:
        CURRENT_MONITOR_TASK = None

def get_current_task_info():
    """获取当前任务信息"""
    global CURRENT_MONITOR_TASK
    with TASK_LOCK:
        return CURRENT_MONITOR_TASK.copy() if CURRENT_MONITOR_TASK else None

def cancel_current_task():
    """取消当前任务"""
    global CURRENT_MONITOR_TASK
    with TASK_LOCK:
        if CURRENT_MONITOR_TASK:
            TASK_CANCELLED.set()
            logger.info(f"任务 {CURRENT_MONITOR_TASK['task_id']} 已被标记为取消")
        else:
            logger.info("没有正在运行的任务需要取消")

def is_task_cancelled() -> bool:
    """检查任务是否已被取消"""
    return TASK_CANCELLED.is_set()

def cleanup_existing_kafka_consumer():
    """清理现有的Kafka消费者"""
    global CURRENT_KAFKA_CONSUMER
    with TASK_LOCK:
        if CURRENT_KAFKA_CONSUMER:
            try:
                logger.info("🧹 发现残留的Kafka消费者，正在清理...")
                CURRENT_KAFKA_CONSUMER.close()
                logger.info("✅ 残留的Kafka消费者已清理")
            except Exception as e:
                logger.warning(f"清理Kafka消费者时出错: {e}")
            finally:
                CURRENT_KAFKA_CONSUMER = None

def register_kafka_consumer(consumer):
    """注册当前的Kafka消费者"""
    global CURRENT_KAFKA_CONSUMER
    with TASK_LOCK:
        CURRENT_KAFKA_CONSUMER = consumer

def unregister_kafka_consumer():
    """注销当前的Kafka消费者"""
    global CURRENT_KAFKA_CONSUMER
    with TASK_LOCK:
        CURRENT_KAFKA_CONSUMER = None

def success_response(data: Any = None, message: str = "操作成功") -> Dict:
    """统一成功响应格式"""
    return {
        "success": True,
        "message": message,
        "data": data,
        "timestamp": datetime.now().isoformat()
    }

def error_response(message: str, error_code: str = "UNKNOWN_ERROR") -> Dict:
    """统一错误响应格式"""
    return {
        "success": False,
        "error_code": error_code,
        "message": message,
        "timestamp": datetime.now().isoformat()
    }

@log_database_operation
def get_kafka_environments() -> List[Dict]:
    """获取Kafka环境列表"""
    try:
        with get_db_connection() as conn:
            cursor = conn.cursor(pymysql.cursors.DictCursor)
            
            sql = """
                SELECT id, config_name, host, port, 
                       JSON_EXTRACT(extra_config, '$.security_protocol') as security_protocol,
                       description
                FROM service_config 
                WHERE service_type = 'kafka' AND is_deleted = 0 AND is_active = 1
                ORDER BY sort_order
            """
            cursor.execute(sql)
            return cursor.fetchall()
            
    except Exception as e:
        logger.error(f"获取Kafka环境失败: {e}")
        raise

@log_database_operation
def get_kafka_config_by_id(kafka_config_id: int) -> Dict:
    """根据ID获取Kafka配置"""
    try:
        with get_db_connection() as conn:
            cursor = conn.cursor(pymysql.cursors.DictCursor)
            
            sql = """
                SELECT id, config_name, host, port, username, password, 
                       protocol, extra_config
                FROM service_config 
                WHERE id = %s AND service_type = 'kafka' AND is_deleted = 0
            """
            cursor.execute(sql, (kafka_config_id,))
            config = cursor.fetchone()
            
            if not config:
                raise ValueError(f"Kafka配置 {kafka_config_id} 不存在")
                
            # 解析extra_config
            if config['extra_config']:
                extra = json.loads(config['extra_config'])
                config.update(extra)
            
            return config
            
    except Exception as e:
        logger.error(f"获取Kafka配置失败: {e}")
        raise

@log_business_logic("get_kafka_topics", log_params=False)
def get_kafka_topics(kafka_config: Dict) -> List[str]:
    """获取Kafka主题列表"""
    consumer = None
    try:
        # 解析extra_config获取详细配置
        extra_config = kafka_config.get('extra_config', {})
        if isinstance(extra_config, str):
            extra_config = json.loads(extra_config)
        
        # 构建bootstrap_servers
        bootstrap_servers = extra_config.get('bootstrap_servers', [f"{kafka_config['host']}:{kafka_config['port']}"])
        if isinstance(bootstrap_servers, str):
            bootstrap_servers = [bootstrap_servers]
        
        consumer_config = {
            'bootstrap_servers': bootstrap_servers,
            'group_id': f'topic_list_group_{int(time.time())}_{random.randint(1000, 9999)}',
            'request_timeout_ms': 30000,
            'session_timeout_ms': 10000,
            'auto_offset_reset': 'latest',
            'enable_auto_commit': False,
            'api_version': (2, 6, 0)
        }
        
        # 添加认证配置
        protocol = kafka_config.get('protocol') or extra_config.get('security_protocol')
        if protocol:
            consumer_config.update({
                'security_protocol': protocol,
                'sasl_mechanism': kafka_config.get('sasl_mechanism', 'PLAIN'),
                'sasl_plain_username': kafka_config['username'],
                'sasl_plain_password': kafka_config['password']
            })
        
        logger.info(f"创建Kafka消费者以获取Topic列表: {bootstrap_servers}")
        consumer = KafkaConsumer(**consumer_config)
        
        # 使用AdminClient方式获取topic
        from kafka.admin import KafkaAdminClient
        
        # AdminClient需要不同的配置参数
        admin_config = {
            'bootstrap_servers': bootstrap_servers,
            'request_timeout_ms': 30000,
            'api_version': (2, 6, 0)
        }
        
        # 添加认证配置
        if protocol:
            admin_config.update({
                'security_protocol': protocol,
                'sasl_mechanism': kafka_config.get('sasl_mechanism', 'PLAIN'),
                'sasl_plain_username': kafka_config['username'],
                'sasl_plain_password': kafka_config['password']
            })
        
        logger.info(f"创建Kafka AdminClient以获取Topic列表")
        admin_client = KafkaAdminClient(**admin_config)
        
        # 获取所有topic
        topics_metadata = admin_client.list_topics()
        topics = [topic for topic in topics_metadata if not topic.startswith('__')]  # 过滤内部topic
        
        logger.info(f"通过AdminClient获取到 {len(topics)} 个Topic")
        admin_client.close()
        return sorted(topics)
        
    except Exception as e:
        logger.warning(f"AdminClient方法失败: {e}")
        
        # 降级到Consumer方法
        try:
            if consumer is None:
                logger.info("尝试用Consumer方法获取Topic")
                consumer = KafkaConsumer(**consumer_config)
            
            # 等待消费者连接并获取元数据
            max_wait_time = 10
            start_time = time.time()
            
            while time.time() - start_time < max_wait_time:
                try:
                    consumer._client.poll(timeout_ms=1000)
                    cluster_metadata = consumer._client.cluster
                    if cluster_metadata.topics():
                        topics_set = cluster_metadata.topics()
                        topics = [str(topic) for topic in topics_set if topic and not str(topic).startswith('__')]
                        logger.info(f"通过Consumer方法获取到 {len(topics)} 个Topic")
                        return sorted(topics)
                except Exception as inner_e:
                    logger.warning(f"获取元数据时出错: {inner_e}")
                time.sleep(0.5)
                
        except Exception as fallback_e:
            logger.warning(f"Consumer方法也失败: {fallback_e}")
        
        # 最后的降级：返回常用的topic列表
        logger.warning("所有方法都失败，返回常用Topic列表")
        fallback_topics = [
            "ods_dy_video_details_one",
            "ods_dy_comment_details", 
            "ods_dy_user_details"
        ]
        return fallback_topics
        
    finally:
        if consumer:
            try:
                consumer.close()
            except:
                pass

def extract_field_value(data: dict, path: str):
    """提取嵌套字段值"""
    try:
        keys = path.split('.')
        value = data
        for key in keys:
            if isinstance(value, dict) and key in value:
                value = value[key]
            else:
                return None
        return value
    except:
        return None

def apply_match_conditions(message: dict, enable_matching: bool, logic_operator: str, match_conditions: list) -> dict:
    """应用新的多条件匹配逻辑"""
    logger.debug(f"🔍 开始匹配: enable_matching={enable_matching}, logic_operator={logic_operator}, conditions_count={len(match_conditions) if match_conditions else 0}")
    
    if not enable_matching:
        # 不启用匹配，显示所有消息
        logger.debug("✅ 匹配未启用，显示所有消息")
        return {
            "original_message": message,
            "is_matched": True,
            "matched_conditions": [],
            "timestamp": datetime.now().isoformat()
        }
    
    if not match_conditions:
        # 没有条件，且启用了匹配，则不显示任何消息
        logger.debug("❌ 启用匹配但无有效条件，不匹配任何消息")
        return {
            "original_message": message,
            "is_matched": False,  # 修复：启用匹配但无条件时，不应该匹配所有消息
            "matched_conditions": [],
            "timestamp": datetime.now().isoformat()
        }
    
    matched_conditions = []
    condition_results = []
    
    # 逐个检查匹配条件
    for idx, condition in enumerate(match_conditions):
        condition_type = condition.get("type")
        key = condition.get("key", "")
        value = condition.get("value", "")
        
        logger.debug(f"📋 条件{idx+1}: type={condition_type}, key='{key}', value='{value}'")
        
        if not value:  # 跳过空值条件
            logger.debug(f"⚠️ 条件{idx+1}跳过：value为空")
            continue
            
        is_condition_matched = False
        match_detail = None
        
        if condition_type == "key_value":
            # 键值对匹配
            if key:
                field_value = extract_field_value(message, key)
                logger.debug(f"🔎 条件{idx+1}提取字段值: key='{key}' -> field_value='{field_value}'")
                if field_value is not None and str(field_value) == str(value):
                    is_condition_matched = True
                    logger.info(f"✅ 条件{idx+1}匹配成功: {key}={field_value}")
                    match_detail = {
                        "type": "key_value",
                        "key": key,
                        "expected_value": value,
                        "actual_value": field_value
                    }
                else:
                    logger.debug(f"❌ 条件{idx+1}不匹配: expected='{value}' != actual='{field_value}'")
        elif condition_type == "fuzzy":
            # 模糊匹配 - 在整个消息中搜索
            message_str = json.dumps(message, ensure_ascii=False)
            search_value_str = str(value)
            
            if search_value_str in message_str:
                is_condition_matched = True
                logger.info(f"✅ 条件{idx+1}模糊匹配成功: '{search_value_str}' 存在于消息中")
                match_detail = {
                    "type": "fuzzy",
                    "search_value": value,
                    "found_in": "message_content"
                }
            else:
                logger.warning(f"❌ 条件{idx+1}模糊匹配失败: '{search_value_str}' (长度:{len(search_value_str)}) 不存在于消息中")
                # 提供调试信息：显示消息中相似的值
                if len(search_value_str) > 10:
                    # 尝试查找部分匹配
                    partial = search_value_str[:10]
                    if partial in message_str:
                        logger.warning(f"💡 提示: 找到部分匹配 '{partial}'，请检查完整值是否正确")
                        # 提取周边字符帮助调试
                        idx_pos = message_str.find(partial)
                        if idx_pos >= 0:
                            context = message_str[max(0, idx_pos-20):min(len(message_str), idx_pos+len(search_value_str)+20)]
                            logger.warning(f"💡 周边内容: {context}")
        
        condition_results.append(is_condition_matched)
        if is_condition_matched and match_detail:
            matched_conditions.append(match_detail)
    
    # 根据逻辑运算符计算最终结果
    if logic_operator == "AND":
        # AND: 所有条件都必须满足
        is_matched = len(condition_results) > 0 and all(condition_results)
        logger.debug(f"🔗 AND逻辑: condition_results={condition_results}, is_matched={is_matched}")
    else:  # OR
        # OR: 任意条件满足即可
        is_matched = len(condition_results) > 0 and any(condition_results)
        logger.debug(f"🔗 OR逻辑: condition_results={condition_results}, is_matched={is_matched}")
    
    if is_matched:
        logger.info(f"🎯 最终结果: 匹配成功! 匹配条件数={len(matched_conditions)}")
    else:
        logger.debug(f"💔 最终结果: 不匹配")
    
    return {
        "original_message": message,
        "is_matched": is_matched,
        "matched_conditions": matched_conditions,
        "logic_operator": logic_operator,
        "timestamp": datetime.now().isoformat()
    }

def apply_match_rules(message: dict, match_rules: list) -> dict:
    """兼容旧版本的匹配规则函数"""
    is_matched = False
    matched_rules = []
    
    for rule in match_rules:
        if not rule.get("enabled", True):
            continue
            
        field_path = rule["field_path"]
        match_type = rule["match_type"]
        keywords = rule["keywords"]
        
        # 提取字段值
        field_value = extract_field_value(message, field_path)
        if field_value is None:
            continue
            
        # 检查关键词匹配
        for keyword in keywords:
            if is_match(field_value, keyword, match_type):
                matched_rules.append({
                    "field_path": field_path,
                    "field_value": field_value,
                    "keyword": keyword
                })
                is_matched = True
                break
    
    return {
        "original_message": message,
        "is_matched": is_matched,
        "matched_rules": matched_rules,
        "timestamp": datetime.now().isoformat()
    }

def is_match(field_value: any, keyword: str, match_type: str) -> bool:
    """判断是否匹配"""
    field_str = str(field_value)
    
    if match_type == "exact":
        return field_str == keyword
    elif match_type == "contains":
        return keyword in field_str
    elif match_type == "regex":
        import re
        return bool(re.search(keyword, field_str))
    return False

def get_kafka_consumer_config(kafka_config: Dict, topic: str, consume_strategy: str, start_timestamp: Optional[int] = None, enable_matching: bool = False) -> Dict:
    """获取Kafka消费者配置信息用于显示"""
    try:
        extra_config = kafka_config.get('extra_config', {})
        if isinstance(extra_config, str):
            extra_config = json.loads(extra_config)
        
        bootstrap_servers = extra_config.get('bootstrap_servers', [f"{kafka_config['host']}:{kafka_config['port']}"])
        if isinstance(bootstrap_servers, str):
            bootstrap_servers = [bootstrap_servers]
        
        consumer_group_id = f"kafka_monitor_{int(time.time())}_{random.randint(1000, 9999)}"
        
        # 智能计算最优拉取量
        optimal_poll_records = calculate_optimal_poll_records(enable_matching, consume_strategy)
        
        config_info = {
            'topic': topic,
            'consume_strategy': consume_strategy,
            'group_id': consumer_group_id,
            'bootstrap_servers': bootstrap_servers,
            'auto_offset_reset': consume_strategy if consume_strategy != 'timestamp' else 'latest',
            'start_timestamp': start_timestamp if consume_strategy == 'timestamp' else None,
            'enable_auto_commit': True,
            'auto_commit_interval_ms': 1000,
            'max_poll_records': optimal_poll_records,  # 智能动态调整
            'request_timeout_ms': 120000,
            'session_timeout_ms': 60000,
            'consumer_timeout_ms': 10000,
        }
        
        # 添加认证信息（不包含敏感信息）
        protocol = kafka_config.get('protocol') or extra_config.get('security_protocol')
        if protocol:
            config_info.update({
                'security_protocol': protocol,
                'sasl_mechanism': kafka_config.get('sasl_mechanism', 'PLAIN'),
                'has_authentication': True,
                'username': kafka_config.get('username', '').replace(kafka_config.get('username', '')[:3], '***') if kafka_config.get('username') else None
            })
        else:
            config_info['has_authentication'] = False
            
        return config_info
        
    except Exception as e:
        logger.error(f"获取Kafka配置信息失败: {e}")
        return {
            'topic': topic,
            'consume_strategy': consume_strategy,
            'error': str(e)
        }

def calculate_optimal_poll_records(enable_matching: bool = False, consume_strategy: str = 'latest') -> int:
    """
    智能计算最优的拉取记录数
    
    策略:
    1. 从最新消费 (latest): 100条 - 适合实时监控
    2. 从最早消费 (earliest): 500条 - 适合历史数据扫描
    3. 从指定时间 (timestamp): 300条 - 适合时间范围查询
    4. 启用匹配条件: 额外+50% - 需要过滤更多数据才能找到目标
    """
    base_records = {
        'latest': 100,      # 实时监控，适中拉取
        'earliest': 500,    # 历史扫描，大批量拉取
        'timestamp': 300    # 时间范围，中等批量
    }
    
    poll_records = base_records.get(consume_strategy, 100)
    
    # 如果启用了匹配条件，增加50%拉取量（因为需要过滤）
    if enable_matching:
        poll_records = int(poll_records * 1.5)
    
    # 限制在合理范围内
    poll_records = max(50, min(poll_records, 1000))
    
    logger.info(f"📊 智能拉取策略: strategy={consume_strategy}, matching={enable_matching} → {poll_records}条/次")
    return poll_records

def start_kafka_consumer(kafka_config: Dict, topic: str, consume_strategy: str, start_timestamp: Optional[int] = None, enable_matching: bool = False):
    """启动Kafka消费者 - 采用与api_listener一致的逻辑
    
    Returns:
        tuple: (consumer, group_id) 成功时返回消费者和group_id，失败时返回 (None, None)
    """
    try:
        extra_config = kafka_config.get('extra_config', {})
        if isinstance(extra_config, str):
            extra_config = json.loads(extra_config)
        
        bootstrap_servers = extra_config.get('bootstrap_servers', [f"{kafka_config['host']}:{kafka_config['port']}"])
        if isinstance(bootstrap_servers, str):
            bootstrap_servers = [bootstrap_servers]
        
        consumer_group_id = f"kafka_monitor_{int(time.time())}_{random.randint(1000, 9999)}"
        
        # 健壮的JSON反序列化器：解析失败时直接返回None，自动跳过非JSON消息
        def safe_json_deserializer(x):
            if not x:
                return None
            try:
                return json.loads(x.decode('utf-8'))
            except (json.JSONDecodeError, UnicodeDecodeError, AttributeError) as e:
                # 解析失败，直接返回None，让后续 if not record.value 自动跳过
                logger.warning(f"⚠️ Kafka消息反序列化失败，跳过: {str(e)[:100]}")
                return None
        
        # 智能计算最优拉取量
        optimal_poll_records = calculate_optimal_poll_records(enable_matching, consume_strategy)
        
        consumer_config = {
            'bootstrap_servers': bootstrap_servers,
            'auto_offset_reset': consume_strategy if consume_strategy != 'timestamp' else 'latest',
            'enable_auto_commit': True,
            'auto_commit_interval_ms': 1000,
            'value_deserializer': safe_json_deserializer,  # 使用安全的反序列化器
            'group_id': consumer_group_id,
            'max_poll_records': optimal_poll_records,  # 智能动态调整拉取量
            'request_timeout_ms': 120000,      # 增加到120秒
            'session_timeout_ms': 60000,        # 增加到60秒
            'heartbeat_interval_ms': 10000,     # 心跳间隔10秒
            'max_poll_interval_ms': 300000,     # poll间隔最大5分钟
            'connections_max_idle_ms': 540000,  # 连接最大空闲9分钟
            'consumer_timeout_ms': 10000,
            'partition_assignment_strategy': [RangePartitionAssignor]
        }
        
        # 添加认证配置
        protocol = kafka_config.get('protocol') or extra_config.get('security_protocol')
        if protocol:
            consumer_config.update({
                'security_protocol': protocol,
                'sasl_mechanism': kafka_config.get('sasl_mechanism', 'PLAIN'),
                'sasl_plain_username': kafka_config['username'],
                'sasl_plain_password': kafka_config['password']
            })
        
        logger.info(f"创建Kafka消费者: topic={topic}, group_id={consumer_group_id}, strategy={consume_strategy}")
        
        # 创建消费者
        consumer = KafkaConsumer(**consumer_config)
        
        # 订阅主题
        consumer.subscribe([topic])
        
        # 等待分区分配
        max_wait_seconds = 30
        start_time = time.time()
        while time.time() - start_time < max_wait_seconds:
            consumer.poll(timeout_ms=100)
            assignment = consumer.assignment()
            if assignment:
                logger.info(f"✅ Kafka消费者分区分配成功: {assignment}")
                
                # 根据消费策略设置偏移量
                if consume_strategy == 'timestamp' and start_timestamp:
                    timestamp_ms = start_timestamp
                    for tp in assignment:
                        try:
                            offset_dict = consumer.offsets_for_times({tp: timestamp_ms})
                            if offset_dict[tp]:
                                consumer.seek(tp, offset_dict[tp].offset)
                                logger.info(f"设置分区 {tp.partition} 到时间戳 {start_timestamp}")
                        except Exception as e:
                            logger.warning(f"设置时间戳偏移量失败: {e}")
                elif consume_strategy == 'latest':
                    consumer.seek_to_end()
                elif consume_strategy == 'earliest':
                    consumer.seek_to_beginning()
                
                # 确保分区位置设置完成
                try:
                    for tp in assignment:
                        position = consumer.position(tp)
                        logger.info(f"分区 {tp.partition} 当前位置: {position}")
                    logger.info("Kafka消费者完全就绪，所有分区位置已确认")
                    register_kafka_consumer(consumer)  # 注册Kafka消费者
                    return consumer, consumer_group_id
                except Exception as e:
                    logger.warning(f"获取分区位置时出错: {e}，但消费者已就绪")
                    register_kafka_consumer(consumer)  # 注册Kafka消费者
                    return consumer, consumer_group_id
            time.sleep(1)
        
        # 超时失败
        consumer.close()
        logger.error("Kafka消费者分区分配超时")
        return None, None
        
    except Exception as e:
        logger.error(f"启动Kafka消费者失败: {e}")
        return None, None

def monitor_kafka_data(kafka_config: Dict, topic: str, consume_strategy: str, 
                      enable_matching: bool, logic_operator: str, match_conditions: List,
                      start_timestamp: Optional[int] = None) -> Dict:
    """监听Kafka数据 - 后台任务模式"""
    consumer = None
    consumer_group_id = None
    messages_received = 0
    matched_messages = 0
    
    try:
        logger.info(f"开始监听Kafka数据: topic={topic}, enable_matching={enable_matching}")
        
        # 清空之前的消息和统计
        clear_recent_messages()
        reset_message_stats()
        
        # 启动Kafka消费者（传入enable_matching用于智能调整拉取量）
        consumer, consumer_group_id = start_kafka_consumer(kafka_config, topic, consume_strategy, start_timestamp, enable_matching)
        if not consumer:
            return {
                'success': False,
                'message': 'Kafka消费者启动失败'
            }
        
        # 持续监听数据
        start_time = time.time()
        last_message_time = start_time
        
        while not is_task_cancelled():
            try:
                messages = consumer.poll(timeout_ms=1000)
                
                if messages:
                    last_message_time = time.time()
                    
                    for topic_partition, records in messages.items():
                        for record in records:
                            if is_task_cancelled():
                                logger.info("🚫 任务已被取消，停止监听")
                                return {
                                    'success': False,
                                    'message': '任务已被取消',
                                    'stats': {
                                        'total_messages': messages_received,
                                        'matched_messages': matched_messages,
                                        'duration_seconds': int(time.time() - start_time)
                                    }
                                }
                            
                            if not record.value:
                                continue
                            
                            messages_received += 1
                            # 更新全局统计：消费了一条消息
                            update_message_stats(consumed=1)
                            
                            # 打印原始消息用于调试（前5条）
                            if messages_received <= 5:
                                logger.warning(f"🔍 DEBUG 消息#{messages_received} 原始类型: {type(record.value)}")
                                if isinstance(record.value, bytes):
                                    logger.warning(f"🔍 DEBUG 原始bytes: {record.value[:200]}")
                                elif isinstance(record.value, dict):
                                    logger.warning(f"🔍 DEBUG 原始dict keys: {list(record.value.keys())}")
                                    logger.warning(f"🔍 DEBUG 原始dict: {json.dumps(record.value, ensure_ascii=False)[:500]}")
                                else:
                                    logger.warning(f"🔍 DEBUG 原始value: {str(record.value)[:200]}")
                            
                            # 反序列化器已确保返回dict/list或None，这里直接使用
                            message_data = record.value
                            
                            # 应用匹配逻辑
                            # 打印消息内容用于调试（只打印前200字符）
                            if messages_received <= 5 or messages_received % 100 == 0:
                                message_preview = json.dumps(message_data, ensure_ascii=False)[:200]
                                logger.info(f"📨 消息#{messages_received} 内容预览: {message_preview}...")
                            
                            result = apply_match_conditions(
                                record.value, 
                                enable_matching, 
                                logic_operator, 
                                match_conditions
                            )
                            
                            is_matched = result["is_matched"]
                            
                            # 根据匹配结果决定是否存储消息
                            if enable_matching:
                                # 启用匹配模式：只存储匹配的消息
                                if is_matched:
                                    add_message_to_recent(message_data, is_matched)
                                    matched_messages += 1
                                    # 更新全局统计：匹配了一条消息
                                    update_message_stats(matched=1)
                                    logger.info(f"✅ 找到匹配的消息 #{matched_messages}: {result.get('matched_conditions', [])}")
                            else:
                                # 未启用匹配：存储所有消息
                                add_message_to_recent(message_data, is_matched)
                            
                            # 可以在这里添加消息处理逻辑
                            # 例如：存储到数据库、发送到其他系统等
                
                # 定期输出监听状态
                current_time = time.time()
                if int(current_time - start_time) % 30 == 0:  # 每30秒输出一次
                    duration = int(current_time - start_time)
                    logger.info(f"监听状态: 总消息={messages_received}, 匹配消息={matched_messages}, 持续时间={duration}秒")
                
                # 只有在没有消息时才sleep，有消息时快速处理
                if not messages:
                    time.sleep(0.1)  # 减少到0.1秒，加快消费速度
                
            except Exception as poll_error:
                logger.error(f"轮询Kafka消息时出错: {poll_error}")
                time.sleep(1)
                continue
        
        # 正常结束或被取消
        duration = int(time.time() - start_time)
        logger.info(f"Kafka监听结束: 总消息={messages_received}, 匹配消息={matched_messages}, 持续时间={duration}秒")
        
        return {
            'success': True,
            'message': f'监听完成，共处理{messages_received}条消息，匹配{matched_messages}条',
            'stats': {
                'total_messages': messages_received,
                'matched_messages': matched_messages,
                'duration_seconds': duration
            }
        }
        
    except Exception as e:
        logger.error(f"监听Kafka数据时出错: {e}")
        return {
            'success': False,
            'message': f'监听出错: {str(e)}',
            'stats': {
                'total_messages': messages_received,
                'matched_messages': matched_messages,
                'duration_seconds': int(time.time() - start_time) if 'start_time' in locals() else 0
            }
        }
    finally:
        # 清理资源
        if consumer:
            try:
                consumer.close()
                unregister_kafka_consumer()
                logger.info("🔒 Kafka消费者已安全关闭")
                
                # 清理 consumer group（临时 group，立即删除）
                if consumer_group_id:
                    delete_consumer_group_safe(kafka_config, consumer_group_id)
            except Exception as e:
                logger.error(f"❌ 关闭Kafka消费者时出错: {e}")

# API路由
@kafka_monitor_bp.route('/environments', methods=['GET'], endpoint='get_kafka_environments')
@log_api_call
def get_environments_list():
    """获取Kafka环境列表"""
    try:
        environments = get_kafka_environments()
        return jsonify(success_response(environments, "获取Kafka环境成功"))
        
    except Exception as e:
        logger.error(f"获取Kafka环境失败: {e}")
        return jsonify(error_response(f"获取Kafka环境失败: {str(e)}")), 500

@kafka_monitor_bp.route('/topics', methods=['GET'], endpoint='get_kafka_topics')
@log_api_call
def get_topics_list():
    """获取Topic列表"""
    try:
        kafka_config_id = request.args.get('kafka_config_id', type=int)
        if not kafka_config_id:
            return jsonify(error_response("缺少参数: kafka_config_id")), 400
        
        logger.info(f"开始获取Kafka配置: {kafka_config_id}")
        kafka_config = get_kafka_config_by_id(kafka_config_id)
        logger.info(f"Kafka配置获取成功: {kafka_config.get('config_name', 'unknown')}")
        
        logger.info("开始获取Topic列表")
        topics = get_kafka_topics(kafka_config)
        logger.info(f"Topic列表获取成功: {len(topics)} 个")
        
        # 确保topics是纯字符串列表
        safe_topics = []
        for topic in topics:
            if isinstance(topic, str):
                safe_topics.append(topic)
            else:
                safe_topics.append(str(topic))
        
        logger.info(f"返回安全的Topic列表: {len(safe_topics)} 个")
        return jsonify(success_response(safe_topics, "获取Topic列表成功"))
        
    except Exception as e:
        logger.error(f"获取Topic列表失败: {e}")
        import traceback
        logger.error(f"详细错误信息: {traceback.format_exc()}")
        return jsonify(error_response(f"获取Topic列表失败: {str(e)}")), 500

@kafka_monitor_bp.route('/start-listen', methods=['POST'], endpoint='start_kafka_listen')
@log_api_call
def start_kafka_listen():
    """启动Kafka监听 - 后台任务模式"""
    try:
        data = request.get_json()
        kafka_config_id = data.get('kafka_config_id')
        topic = data.get('topic')
        consume_strategy = data.get('consume_strategy', 'latest')
        start_timestamp = data.get('start_timestamp')
        
        # 新的匹配参数
        enable_matching = data.get('enable_matching', False)
        logic_operator = data.get('logic_operator', 'AND')
        match_conditions = data.get('match_conditions', [])
        
        if not kafka_config_id or not topic:
            return jsonify(error_response("缺少必要参数: kafka_config_id, topic")), 400
        
        # 步骤1: 清理现有的Kafka消费者
        cleanup_existing_kafka_consumer()
        
        # 步骤2: 检查是否有任务正在运行，如果有则取消
        if is_any_task_running():
            current_task = get_current_task_info()
            logger.warning(f"检测到有任务正在执行，正在取消: {current_task.get('task_id', 'unknown')}")
            cancel_current_task()
            time.sleep(2)  # 等待取消生效
        
        # 获取Kafka配置
        kafka_config = get_kafka_config_by_id(kafka_config_id)
        
        # 创建任务ID
        task_id = f"monitor_{int(time.time())}_{str(uuid.uuid4())[:8]}"
        
        # 重置任务取消标志
        TASK_CANCELLED.clear()
        
        # 清空之前的消息（避免旧消息干扰）
        clear_recent_messages()
        logger.info("🧹 清空旧消息，准备接收新消息")
        
        def monitor_task():
            """后台监听任务"""
            try:
                logger.info(f"启动Kafka监听任务: {task_id}")
                result = monitor_kafka_data(
                    kafka_config=kafka_config,
                    topic=topic,
                    consume_strategy=consume_strategy,
                    enable_matching=enable_matching,
                    logic_operator=logic_operator,
                    match_conditions=match_conditions,
                    start_timestamp=start_timestamp
                )
                logger.info(f"监听任务完成: {result}")
            except Exception as e:
                logger.error(f"监听任务执行失败: {e}")
            finally:
                unregister_current_task()
        
        # 启动后台线程
        thread = threading.Thread(target=monitor_task)
        thread.daemon = True
        thread.start()
        
        # 获取Kafka配置信息用于显示（传入enable_matching用于智能调整）
        kafka_consumer_config = get_kafka_consumer_config(kafka_config, topic, consume_strategy, start_timestamp, enable_matching)
        
        # 注册任务（包含Kafka配置信息）
        register_current_task(task_id, thread, topic, match_conditions, kafka_consumer_config)
        
        return jsonify(success_response({
            "task_id": task_id,
            "status": "running",
            "topic": topic,
            "consume_strategy": consume_strategy,
            "enable_matching": enable_matching,
            "logic_operator": logic_operator,
            "match_conditions_count": len(match_conditions),
            "kafka_config": kafka_consumer_config
        }, "Kafka监听任务已启动"))
        
    except Exception as e:
        logger.error(f"启动Kafka监听失败: {e}")
        return jsonify(error_response(f"启动监听失败: {str(e)}")), 500

@kafka_monitor_bp.route('/stop-listen', methods=['POST'], endpoint='stop_kafka_listen')
@log_api_call
def stop_kafka_listen():
    """停止Kafka监听任务"""
    try:
        current_task = get_current_task_info()
        if not current_task:
            return jsonify(error_response("没有正在运行的监听任务")), 404
        
        # 取消当前任务
        cancel_current_task()
        
        # 清理Kafka消费者
        cleanup_existing_kafka_consumer()
        
        logger.info(f"监听任务 {current_task['task_id']} 已停止")
        
        return jsonify(success_response({
            "task_id": current_task['task_id'],
            "status": "stopped"
        }, "监听任务已停止"))
        
    except Exception as e:
        logger.error(f"停止Kafka监听失败: {e}")
        return jsonify(error_response(f"停止监听失败: {str(e)}")), 500

@kafka_monitor_bp.route('/status', methods=['GET'], endpoint='get_kafka_monitor_status')
@log_api_call
def get_kafka_monitor_status():
    """获取Kafka监听状态"""
    try:
        current_task = get_current_task_info()
        
        if current_task:
            duration = int(time.time() - current_task['start_time'])
            stats = get_message_stats()  # 获取消息统计
            return jsonify(success_response({
                "task_id": current_task['task_id'],
                "topic": current_task['topic'],
                "status": "running",
                "duration_seconds": duration,
                "match_conditions_count": len(current_task.get('match_conditions', [])),
                "kafka_config": current_task.get('kafka_config', {}),
                "stats": {
                    "total_consumed": stats['total_consumed'],
                    "total_matched": stats['total_matched']
                }
            }, "有任务正在运行"))
        else:
            return jsonify(success_response({
                "status": "idle"
            }, "当前没有运行的任务"))
        
    except Exception as e:
        logger.error(f"获取监听状态失败: {e}")
        return jsonify(error_response(f"获取状态失败: {str(e)}")), 500

@kafka_monitor_bp.route('/messages', methods=['GET'], endpoint='get_kafka_messages')
@log_api_call
def get_kafka_messages():
    """获取最近接收到的Kafka消息"""
    try:
        messages = get_recent_messages()
        
        return jsonify({
            'success': True,
            'message': '获取消息列表成功',
            'data': {
                'messages': messages,
                'total_count': len(messages)
            },
            'timestamp': datetime.now().isoformat()
        })
        
    except Exception as e:
        logger.error(f"获取消息列表失败: {e}")
        return jsonify(error_response(f"获取消息失败: {str(e)}")), 500

@kafka_monitor_bp.route('/messages/clear', methods=['POST'], endpoint='clear_kafka_messages')
@log_api_call
def clear_kafka_messages():
    """清空消息列表"""
    try:
        clear_recent_messages()
        
        return jsonify({
            'success': True,
            'message': '消息列表已清空',
            'timestamp': datetime.now().isoformat()
        })
        
    except Exception as e:
        logger.error(f"清空消息列表失败: {e}")
        return jsonify(error_response(f"清空消息失败: {str(e)}")), 500

@kafka_monitor_bp.route('/test-match', methods=['POST'], endpoint='test_match_conditions')
@log_api_call
def test_match_conditions():
    """测试匹配条件（用于调试）"""
    try:
        data = request.get_json()
        test_message = data.get('test_message', {})
        enable_matching = data.get('enable_matching', True)
        logic_operator = data.get('logic_operator', 'OR')
        match_conditions = data.get('match_conditions', [])
        
        logger.info(f"🧪 测试匹配条件:")
        logger.info(f"   enable_matching: {enable_matching}")
        logger.info(f"   logic_operator: {logic_operator}")
        logger.info(f"   match_conditions: {json.dumps(match_conditions, ensure_ascii=False)}")
        logger.info(f"   test_message keys: {list(test_message.keys())}")
        
        result = apply_match_conditions(
            test_message,
            enable_matching,
            logic_operator,
            match_conditions
        )
        
        return jsonify({
            'success': True,
            'message': '测试完成',
            'data': {
                'is_matched': result['is_matched'],
                'matched_conditions': result.get('matched_conditions', []),
                'logic_operator': result.get('logic_operator', logic_operator),
                'test_message_structure': {
                    'top_level_keys': list(test_message.keys()),
                    'has_campaignItem': 'campaignItem' in test_message,
                    'campaignItem_keys': list(test_message.get('campaignItem', {}).keys()) if 'campaignItem' in test_message else []
                }
            },
            'timestamp': datetime.now().isoformat()
        })
        
    except Exception as e:
        logger.error(f"测试匹配条件失败: {e}")
        import traceback
        logger.error(f"详细错误: {traceback.format_exc()}")
        return jsonify(error_response(f"测试失败: {str(e)}")), 500

# 注意：该模块不再使用WebSocket，采用后台任务模式
# 如果需要实时更新，可以通过轮询status接口来获取任务状态
