"""
Kafka转发服务 - 从源Topic消费消息并转发到目标Topic
采用单任务模式，启动新任务时自动停止之前的任务
"""

import json
import threading
import time
from datetime import datetime
from typing import Dict, List, Optional, Any
from flask import Blueprint, request, jsonify
from kafka import KafkaConsumer, KafkaProducer, TopicPartition
from kafka.errors import KafkaError
import logging

from app.utils.kafka_utils import delete_consumer_group_safe

# 响应工具函数
def success_response(data: Any = None, message: str = "操作成功") -> Dict:
    """统一成功响应格式"""
    return {
        "success": True,
        "message": message,
        "data": data,
        "timestamp": datetime.now().isoformat()
    }

def error_response(message: str, error_code: str = "UNKNOWN_ERROR") -> Dict:
    """统一错误响应格式"""
    return {
        "success": False,
        "message": message,
        "error_code": error_code,
        "data": None,
        "timestamp": datetime.now().isoformat()
    }

# 日志装饰器
def log_api_call(func):
    """API调用日志装饰器"""
    def wrapper(*args, **kwargs):
        logger.info(f"API调用: {func.__name__}")
        try:
            result = func(*args, **kwargs)
            logger.info(f"API调用成功: {func.__name__}")
            return result
        except Exception as e:
            logger.error(f"API调用失败: {func.__name__} - {e}")
            raise
    wrapper.__name__ = func.__name__
    return wrapper

def log_business_logic(operation_name: str):
    """业务逻辑日志装饰器"""
    def decorator(func):
        def wrapper(*args, **kwargs):
            logger.info(f"业务逻辑开始: {operation_name}")
            try:
                result = func(*args, **kwargs)
                logger.info(f"业务逻辑完成: {operation_name}")
                return result
            except Exception as e:
                logger.error(f"业务逻辑失败: {operation_name} - {e}")
                raise
        wrapper.__name__ = func.__name__
        return wrapper
    return decorator

# 匹配逻辑函数（从Kafka监听复制）
def extract_field_value(data: dict, path: str):
    """从嵌套字典中提取字段值"""
    try:
        keys = path.split('.')
        value = data
        for key in keys:
            value = value[key]
        return value
    except:
        return None

def apply_match_conditions(message: dict, enable_matching: bool, logic_operator: str, match_conditions: list) -> dict:
    """应用匹配条件（与Kafka监听逻辑一致）"""
    if not enable_matching:
        # 不启用匹配，显示所有消息
        return {
            "original_message": message,
            "is_matched": True,
            "matched_conditions": [],
            "timestamp": datetime.now().isoformat()
        }
    
    if not match_conditions:
        # 条件过滤策略但没有有效条件 → 不匹配任何消息
        # 这样与"有条件但value为空"的行为保持一致
        return {
            "original_message": message,
            "is_matched": False,
            "matched_conditions": [],
            "timestamp": datetime.now().isoformat()
        }
    
    matched_conditions = []
    condition_results = []
    
    # 逐个检查匹配条件
    for condition in match_conditions:
        condition_type = condition.get("type")
        key = condition.get("key", "")
        value = condition.get("value", "")
        
        if not value:  # 跳过空值条件
            continue
            
        is_condition_matched = False
        match_detail = None
        
        if condition_type == "key_value":
            # 键值对匹配
            if not key:
                # key为空是无效条件，跳过
                continue
                
            field_value = extract_field_value(message, key)
            if field_value is not None and str(field_value) == str(value):
                is_condition_matched = True
                match_detail = {
                    "type": "key_value",
                    "key": key,
                    "expected_value": value,
                    "actual_value": field_value
                }
        elif condition_type == "fuzzy":
            # 模糊匹配 - 在整个消息中搜索
            message_str = json.dumps(message, ensure_ascii=False)
            if str(value) in message_str:
                is_condition_matched = True
                match_detail = {
                    "type": "fuzzy",
                    "search_value": value,
                    "found_in": "message_content"
                }
        
        condition_results.append(is_condition_matched)
        if is_condition_matched and match_detail:
            matched_conditions.append(match_detail)
    
    # 根据逻辑运算符计算最终结果
    if logic_operator == "AND":
        # AND: 所有条件都必须满足
        is_matched = len(condition_results) > 0 and all(condition_results)
    else:  # OR
        # OR: 任意条件满足即可
        is_matched = len(condition_results) > 0 and any(condition_results)
    
    return {
        "original_message": message,
        "is_matched": is_matched,
        "matched_conditions": matched_conditions,
        "logic_operator": logic_operator,
        "timestamp": datetime.now().isoformat()
    }

def set_field_value(data: dict, path: str, value: Any) -> dict:
    """设置嵌套字典中的字段值"""
    try:
        # 去除路径首尾空格，并分割
        keys = [key.strip() for key in path.strip().split('.')]
        current = data
        
        # 遍历到倒数第二个键
        for key in keys[:-1]:
            if key not in current:
                current[key] = {}
            elif not isinstance(current[key], dict):
                # 如果现有值不是字典，无法继续嵌套
                logger.error(f"字段 {key} 的值不是字典，无法设置嵌套字段")
                return data
            current = current[key]
        
        # 设置最后一个键的值
        current[keys[-1]] = value
        return data
    except Exception as e:
        logger.error(f"设置字段值失败: {path} = {value}, 错误: {e}")
        return data

def apply_replace_rules(message: dict, replace_rules: list) -> dict:
    """应用字段替换规则"""
    if not replace_rules:
        return message
    
    # 创建消息的深拷贝，避免修改原始消息
    import copy
    result_message = copy.deepcopy(message)
    
    for rule in replace_rules:
        field_path = rule.get('fieldPath', '')
        new_value = rule.get('newValue', '')
        value_type = rule.get('valueType', 'string')
        
        if not field_path:
            continue
        
        # 根据值类型转换新值
        try:
            if value_type == 'number':
                converted_value = float(new_value) if '.' in str(new_value) else int(new_value)
            elif value_type == 'boolean':
                converted_value = str(new_value).lower() in ['true', '1', 'yes']
            elif value_type == 'null':
                converted_value = None
            else:  # string
                converted_value = str(new_value)
            
            # 设置字段值
            result_message = set_field_value(result_message, field_path, converted_value)
            logger.info(f"字段替换成功: {field_path} = {converted_value} ({value_type})")
            
        except Exception as e:
            logger.error(f"字段替换失败: {field_path}, 错误: {e}")
            continue
    
    return result_message

# 第三方配置获取函数
def get_kafka_config_by_id(kafka_config_id: int) -> Dict:
    """根据ID获取Kafka配置"""
    try:
        from app.utils.mysql_db import get_db_connection
        import pymysql.cursors
        import json
        
        with get_db_connection() as conn:
            cursor = conn.cursor(pymysql.cursors.DictCursor)
            
            sql = """
                SELECT id, config_name, host, port, username, password, 
                       protocol, extra_config
                FROM service_config 
                WHERE id = %s AND service_type = 'kafka' AND is_deleted = 0
            """
            
            cursor.execute(sql, (kafka_config_id,))
            result = cursor.fetchone()
            
            if not result:
                raise ValueError(f"未找到ID为 {kafka_config_id} 的Kafka配置")
            
            # 解析extra_config
            extra_config = result.get('extra_config', {})
            if isinstance(extra_config, str):
                extra_config = json.loads(extra_config)
            
            # 构建完整配置
            config = {
                'id': result['id'],
                'config_name': result['config_name'],
                'host': result['host'],
                'port': result['port'],
                'username': result.get('username'),
                'password': result.get('password'),
                'protocol': result.get('protocol'),
                'bootstrap_servers': extra_config.get('bootstrap_servers', [f"{result['host']}:{result['port']}"]),
                'security_protocol': extra_config.get('security_protocol'),
                'sasl_mechanism': extra_config.get('sasl_mechanism', 'PLAIN'),
                'sasl_username': result.get('username'),
                'sasl_password': result.get('password')
            }
            
            # 确保bootstrap_servers是列表
            if isinstance(config['bootstrap_servers'], str):
                config['bootstrap_servers'] = [config['bootstrap_servers']]
                
            return config
            
    except Exception as e:
        logger.error(f"获取Kafka配置失败: {e}")
        raise e

# 创建蓝图
kafka_forward_bp = Blueprint('kafka_forward', __name__, url_prefix='/api/kafka-forward')

# 日志记录器
logger = logging.getLogger(__name__)

# 全局任务管理
CURRENT_TASK = None
TASK_LOCK = threading.Lock()
FORWARD_STATS = {
    'processed_count': 0,
    'forwarded_count': 0,
    'filtered_count': 0,
    'error_count': 0,
    'start_time': None,
    'last_message_time': None
}
STATS_LOCK = threading.Lock()

class KafkaForwardTask:
    """Kafka转发任务类"""
    
    def __init__(self, task_id: str, config: Dict):
        self.task_id = task_id
        self.config = config
        self.consumer = None
        self.producer = None
        self.thread = None
        self.running = False
        self.start_time = datetime.now()
        self.forwarded_count = 0  # 已转发条数计数器
        self.max_forward_count = config.get('max_forward_count', 0)  # 最大转发条数限制
    
    def _calculate_optimal_batch_size(self) -> int:
        """
        智能计算最优批量大小
        
        策略:
        1. batch_size = 0: 启用智能调整（推荐）
        2. batch_size > 0: 使用用户配置值
        3. 智能调整规则:
           - 根据消费策略: latest=100, earliest=500, timestamp=300
           - 根据转发策略: filter时+50%
           - 根据转发限制: limit<200时适当减少
        """
        # 1. 获取用户配置的batch_size
        user_batch_size = self.config.get('batch_size')
        
        # 2. 如果用户设置了大于0的值，优先使用（保持向前兼容）
        if user_batch_size and user_batch_size > 0:
            logger.info(f"📊 使用用户配置的批量大小: {user_batch_size}条/次")
            return user_batch_size
        
        # 3. batch_size为0或未设置，使用智能调整
        logger.info(f"📊 启用智能批量调整策略")
        
        # 2. 根据消费策略确定基础批量
        consume_strategy = self.config.get('consume_strategy', 'latest')
        base_batch = {
            'latest': 100,      # 实时转发
            'earliest': 500,    # 历史批量
            'timestamp': 300    # 时间范围
        }.get(consume_strategy, 100)
        
        # 3. 根据转发策略调整
        forward_strategy = self.config.get('forward_strategy', 'all')
        if forward_strategy == 'filter':
            # 过滤策略需要拉取更多数据
            base_batch = int(base_batch * 1.5)
        
        # 4. 考虑转发数量限制
        max_forward = self.max_forward_count
        if max_forward > 0 and max_forward < 200:
            # 如果只转发少量数据，减少批量大小
            base_batch = min(base_batch, max(50, max_forward))
        
        # 5. 限制在合理范围
        optimal_batch = max(50, min(base_batch, 1000))
        
        logger.info(
            f"📊 智能批量策略: consume={consume_strategy}, "
            f"forward={forward_strategy}, limit={max_forward} → {optimal_batch}条/次"
        )
        
        return optimal_batch
        
    def start(self):
        """启动转发任务"""
        try:
            self.running = True
            self.thread = threading.Thread(target=self._forward_loop, daemon=True)
            self.thread.start()
            logger.info(f"Kafka转发任务 {self.task_id} 已启动")
            return True
        except Exception as e:
            logger.error(f"启动转发任务失败: {e}")
            self.running = False
            return False
    
    def stop(self):
        """停止转发任务"""
        try:
            self.running = False
            if self.thread and self.thread.is_alive():
                self.thread.join(timeout=5)
            
            if self.consumer:
                self.consumer.close()
            if self.producer:
                self.producer.close()
            
            # 清理 consumer group（长期任务停止后清理）
            try:
                kafka_config = get_kafka_config_by_id(self.config['kafka_config_id'])
                if kafka_config:
                    group_id = f"forward_group_{self.task_id}"
                    delete_consumer_group_safe(kafka_config, group_id)
            except Exception as e:
                logger.warning(f"清理 consumer group 时出错（已忽略）: {e}")
            
            logger.info(f"Kafka转发任务 {self.task_id} 已停止")
            return True
        except Exception as e:
            logger.error(f"停止转发任务失败: {e}")
            return False
    
    def _forward_loop(self):
        """转发消息循环"""
        global FORWARD_STATS
        
        try:
            # 创建Kafka消费者和生产者
            kafka_config = get_kafka_config_by_id(self.config['kafka_config_id'])
            logger.info(f"获取Kafka配置: {kafka_config['config_name']} - {kafka_config['bootstrap_servers']}")
            
            # 根据消费策略设置消费者配置
            consume_strategy = self.config.get('consume_strategy', 'latest')
            logger.info(f"消费策略: {consume_strategy}")
            
            # 智能计算批量大小
            batch_size = self._calculate_optimal_batch_size()
            
            # 健壮的字符串反序列化器：解码失败时直接返回None，自动跳过无效消息
            def safe_string_deserializer(x):
                if not x:
                    return None
                try:
                    return x.decode('utf-8')
                except (UnicodeDecodeError, AttributeError) as e:
                    # 解码失败，直接返回None，让后续逻辑自动跳过
                    logger.warning(f"⚠️ Kafka消息解码失败，跳过: {str(e)[:100]}")
                    return None
            
            consumer_config = {
                'bootstrap_servers': kafka_config['bootstrap_servers'],
                'group_id': f"forward_group_{self.task_id}",
                'auto_offset_reset': consume_strategy if consume_strategy != 'timestamp' else 'earliest',
                'enable_auto_commit': True,
                'auto_commit_interval_ms': 1000,
                'max_poll_records': batch_size,  # 控制一次poll最多获取多少条消息
                'value_deserializer': safe_string_deserializer,  # 使用安全的反序列化器
                'request_timeout_ms': 120000,      # 增加到120秒
                'session_timeout_ms': 60000,        # 增加到60秒
                'heartbeat_interval_ms': 10000,     # 心跳间隔10秒
                'max_poll_interval_ms': 300000,     # poll间隔最大5分钟
                'connections_max_idle_ms': 540000   # 连接最大空闲9分钟
            }
            
            logger.info(f"批量大小设置: 每次poll最多获取 {batch_size} 条消息")
            
            producer_config = {
                'bootstrap_servers': kafka_config['bootstrap_servers'],
                'value_serializer': lambda x: json.dumps(x).encode('utf-8') if isinstance(x, dict) else str(x).encode('utf-8'),
                'request_timeout_ms': 120000,       # 请求超时120秒
                'delivery_timeout_ms': 180000,      # 交付超时180秒
                'max_block_ms': 60000,              # 最大阻塞60秒
                'connections_max_idle_ms': 540000,  # 连接最大空闲9分钟
                'retries': 3,                       # 重试3次
                'acks': 1                           # 等待leader确认
            }
            
            # 添加认证配置
            if kafka_config.get('security_protocol'):
                auth_config = {
                    'security_protocol': kafka_config['security_protocol'],
                    'sasl_mechanism': kafka_config.get('sasl_mechanism', 'PLAIN'),
                    'sasl_plain_username': kafka_config.get('sasl_username'),
                    'sasl_plain_password': kafka_config.get('sasl_password')
                }
                consumer_config.update(auth_config)
                producer_config.update(auth_config)
            
            logger.info(f"创建Kafka消费者，源Topic: {self.config['source_topic']}")
            logger.info(f"消费者配置: bootstrap_servers={kafka_config['bootstrap_servers']}, group_id=forward_group_{self.task_id}")
            
            # 如果是时间戳消费策略，使用assign模式
            if consume_strategy == 'timestamp':
                start_timestamp = self.config.get('start_timestamp')
                if start_timestamp:
                    logger.info(f"使用时间戳消费策略，开始时间戳: {start_timestamp}")
                    
                    # 创建消费者但不订阅topic
                    self.consumer = KafkaConsumer(**consumer_config)
                    logger.info("Kafka消费者创建成功（时间戳模式）")
                    
                    # 获取分区并手动分配
                    partitions = self.consumer.partitions_for_topic(self.config['source_topic'])
                    if partitions:
                        topic_partitions = [TopicPartition(self.config['source_topic'], p) for p in partitions]
                        self.consumer.assign(topic_partitions)
                        
                        # 获取指定时间戳的偏移量
                        timestamp_dict = {tp: start_timestamp for tp in topic_partitions}
                        offsets = self.consumer.offsets_for_times(timestamp_dict)
                        
                        # 设置消费位置
                        for tp, offset_metadata in offsets.items():
                            if offset_metadata is not None:
                                self.consumer.seek(tp, offset_metadata.offset)
                                logger.info(f"分区 {tp.partition} 设置偏移量为: {offset_metadata.offset}")
                            else:
                                # 如果没有找到对应时间戳的消息，从最新位置开始
                                self.consumer.seek_to_end(tp)
                                logger.warning(f"分区 {tp.partition} 未找到时间戳 {start_timestamp} 对应的消息，从最新位置开始")
                    else:
                        logger.error(f"无法获取Topic {self.config['source_topic']} 的分区信息")
                else:
                    logger.error("时间戳消费策略缺少start_timestamp参数")
            else:
                # 非时间戳模式，使用subscribe
                self.consumer = KafkaConsumer(
                    self.config['source_topic'],
                    **consumer_config
                )
                logger.info("Kafka消费者创建成功（订阅模式）")
            
            logger.info(f"创建Kafka生产者，目标Topic: {self.config['target_topic']}")
            self.producer = KafkaProducer(**producer_config)
            logger.info("Kafka生产者创建成功")
            
            # 重置统计
            with STATS_LOCK:
                FORWARD_STATS = {
                    'processed_count': 0,
                    'forwarded_count': 0,
                    'filtered_count': 0,
                    'error_count': 0,
                    'start_time': datetime.now().isoformat(),
                    'last_message_time': None
                }
            
            batch_messages = []
            process_interval = self.config.get('process_interval', 1000) / 1000.0  # 转换为秒
            
            logger.info(f"开始转发消息: {self.config['source_topic']} -> {self.config['target_topic']}")
            if self.max_forward_count > 0:
                logger.info(f"转发条数限制: 最多转发 {self.max_forward_count} 条消息")
            else:
                logger.info(f"转发条数限制: 无限制")
            
            while self.running:
                # 检查是否达到转发条数限制
                if self.max_forward_count > 0 and self.forwarded_count >= self.max_forward_count:
                    logger.info(f"已达到转发条数限制 {self.max_forward_count}，停止任务")
                    self.running = False
                    break
                try:
                    # 消费消息（超时时间1秒）
                    message_batch = self.consumer.poll(timeout_ms=1000)
                    
                    if message_batch:
                        logger.info(f"消费到消息批次，分区数: {len(message_batch)}")
                    
                    for topic_partition, messages in message_batch.items():
                        logger.info(f"处理分区 {topic_partition.partition}，消息数: {len(messages)}")
                        for message in messages:
                            if not self.running:
                                break
                            
                            logger.info(f"消费消息 - 分区: {message.partition}, 偏移量: {message.offset}, 时间戳: {message.timestamp}")
                            logger.info(f"消息内容预览: {str(message.value)[:200]}...")
                                
                            # 处理消息
                            processed = self._process_message(message)
                            if processed:
                                batch_messages.append(processed)
                                logger.info(f"消息处理成功，添加到转发队列")
                            
                            # 更新统计
                            with STATS_LOCK:
                                FORWARD_STATS['processed_count'] += 1
                                FORWARD_STATS['last_message_time'] = datetime.now().isoformat()
                    
                    # 转发这个poll批次的所有消息
                    if batch_messages:
                        # 检查转发条数限制
                        if self.max_forward_count > 0:
                            remaining_count = self.max_forward_count - self.forwarded_count
                            if remaining_count <= 0:
                                logger.info(f"已达到转发条数限制，跳过剩余 {len(batch_messages)} 条消息")
                                break
                            elif len(batch_messages) > remaining_count:
                                # 只转发剩余数量的消息
                                batch_messages = batch_messages[:remaining_count]
                                logger.info(f"转发条数限制：只转发前 {remaining_count} 条消息")
                        
                        self._forward_batch(batch_messages)
                        batch_messages = []
                    
                    # 处理间隔
                    time.sleep(process_interval)
                    
                except Exception as e:
                    logger.error(f"处理消息时出错: {e}")
                    with STATS_LOCK:
                        FORWARD_STATS['error_count'] += 1
                    time.sleep(1)
            
            # 转发剩余消息
            if batch_messages:
                self._forward_batch(batch_messages)
                
        except Exception as e:
            logger.error(f"转发任务执行失败: {e}")
            with STATS_LOCK:
                FORWARD_STATS['error_count'] += 1
        finally:
            logger.info(f"转发任务 {self.task_id} 执行结束")
    
    def _process_message(self, message) -> Optional[Dict]:
        """处理单个消息"""
        try:
            # 反序列化器已确保返回字符串或None
            if message.value is None:
                return None  # 反序列化失败的消息，直接跳过
            
            message_str = message.value
            
            # 尝试解析JSON
            try:
                message_data = json.loads(message_str)
            except json.JSONDecodeError:
                # 如果不是JSON，就作为字符串处理
                logger.warning(f"⚠️ 消息不是有效JSON，作为字符串处理: {message_str[:100]}...")
                message_data = {"raw_message": message_str}
            
            # 根据转发策略处理消息
            strategy = self.config.get('forward_strategy', 'all')
            
            if strategy == 'filter':
                # 条件过滤策略
                enable_matching = True
                logic_operator = self.config.get('logic_operator', 'AND')
                match_conditions = self.config.get('match_conditions', [])
                
                result = apply_match_conditions(
                    message_data,
                    enable_matching,
                    logic_operator,
                    match_conditions
                )
                
                if not result["is_matched"]:
                    with STATS_LOCK:
                        FORWARD_STATS['filtered_count'] += 1
                    return None
                
                # 返回原始消息
                return message_data
                
            elif strategy == 'replace':
                # 字段替换策略
                replace_rules = self.config.get('replace_rules', [])
                if replace_rules:
                    # 应用替换规则
                    replaced_message = apply_replace_rules(message_data, replace_rules)
                    return replaced_message
                else:
                    # 没有替换规则，返回原始消息
                    return message_data
            
            else:
                # 全部转发策略，返回原始消息
                return message_data
            
        except Exception as e:
            logger.error(f"处理消息失败: {e}")
            return None
    
    def _forward_batch(self, messages: List[Dict]):
        """批量转发消息"""
        try:
            logger.info(f"开始批量转发 {len(messages)} 条消息到目标Topic: {self.config['target_topic']}")
            
            for i, message in enumerate(messages):
                logger.info(f"转发消息 {i+1}/{len(messages)}: {json.dumps(message, ensure_ascii=False)[:300]}...")
                
                future = self.producer.send(
                    self.config['target_topic'],
                    value=message
                )
                
                # 等待发送完成并检查结果
                try:
                    record_metadata = future.get(timeout=10)
                    logger.info(f"消息转发成功 - 分区: {record_metadata.partition}, 偏移量: {record_metadata.offset}")
                except Exception as send_error:
                    logger.error(f"消息发送失败: {send_error}")
                    raise send_error
            
            # 刷新生产者缓冲区
            self.producer.flush()
            
            # 更新转发计数器
            self.forwarded_count += len(messages)
            
            with STATS_LOCK:
                FORWARD_STATS['forwarded_count'] += len(messages)
            
            logger.info(f"批量转发完成: {len(messages)} 条消息全部发送成功，累计已转发: {self.forwarded_count} 条")
            
            # 检查是否达到转发条数限制
            if self.max_forward_count > 0 and self.forwarded_count >= self.max_forward_count:
                logger.info(f"已达到转发条数限制 {self.max_forward_count}，任务即将停止")
                self.running = False
            
        except Exception as e:
            logger.error(f"批量转发消息失败: {e}")
            with STATS_LOCK:
                FORWARD_STATS['error_count'] += len(messages)

def register_current_task(task: KafkaForwardTask):
    """注册当前任务"""
    global CURRENT_TASK
    with TASK_LOCK:
        # 停止之前的任务
        if CURRENT_TASK:
            CURRENT_TASK.stop()
        CURRENT_TASK = task

def get_current_task() -> Optional[KafkaForwardTask]:
    """获取当前任务"""
    global CURRENT_TASK
    with TASK_LOCK:
        return CURRENT_TASK

@kafka_forward_bp.route('/start', methods=['POST'], endpoint='start_kafka_forward')
@log_api_call
def start_kafka_forward():
    """启动Kafka转发任务"""
    try:
        data = request.get_json()
        
        # 验证必需参数
        required_fields = ['kafka_config_id', 'source_topic', 'target_topic']
        for field in required_fields:
            if not data.get(field):
                return jsonify(error_response(f"缺少必需参数: {field}")), 400
        
        # 验证Kafka配置
        kafka_config = get_kafka_config_by_id(data['kafka_config_id'])
        if not kafka_config:
            return jsonify(error_response("无效的Kafka配置ID")), 400
        
        # 创建任务配置
        task_config = {
            'kafka_config_id': data['kafka_config_id'],
            'source_topic': data['source_topic'],
            'target_topic': data['target_topic'],
            'consume_strategy': data.get('consume_strategy', 'latest'),
            'start_timestamp': data.get('start_timestamp'),
            'forward_strategy': data.get('forward_strategy', 'all'),
            'logic_operator': data.get('logic_operator', 'AND'),
            'match_conditions': data.get('match_conditions', []),
            'replace_rules': data.get('replace_rules', []),
            'batch_size': data.get('batch_size', 100),
            'process_interval': data.get('process_interval', 1000),
            'max_forward_count': data.get('max_forward_count', 0)  # 0表示无限制，大于0表示最大转发条数
        }
        
        # 生成任务ID
        task_id = f"forward_{int(time.time())}"
        
        # 创建并启动任务
        task = KafkaForwardTask(task_id, task_config)
        if task.start():
            register_current_task(task)
            
            response_data = {
                'task_id': task_id,
                'status': 'running',
                'config': task_config
            }
            
            return jsonify(success_response(response_data, "Kafka转发任务已启动"))
        else:
            return jsonify(error_response("启动Kafka转发任务失败")), 500
        
    except Exception as e:
        logger.error(f"启动Kafka转发任务失败: {e}")
        return jsonify(error_response(f"启动Kafka转发任务失败: {str(e)}")), 500

@kafka_forward_bp.route('/stop', methods=['POST'], endpoint='stop_kafka_forward')
@log_api_call
def stop_kafka_forward():
    """停止Kafka转发任务"""
    try:
        current_task = get_current_task()
        if not current_task:
            return jsonify(error_response("没有正在运行的转发任务")), 400
        
        if current_task.stop():
            with TASK_LOCK:
                global CURRENT_TASK
                CURRENT_TASK = None
            
            return jsonify(success_response({}, "Kafka转发任务已停止"))
        else:
            return jsonify(error_response("停止Kafka转发任务失败")), 500
        
    except Exception as e:
        logger.error(f"停止Kafka转发任务失败: {e}")
        return jsonify(error_response(f"停止Kafka转发任务失败: {str(e)}")), 500

@kafka_forward_bp.route('/status', methods=['GET'], endpoint='get_kafka_forward_status')
@log_api_call
def get_kafka_forward_status():
    """获取Kafka转发任务状态"""
    try:
        current_task = get_current_task()
        if not current_task:
            return jsonify(success_response(None, "没有正在运行的转发任务"))
        
        status_data = {
            'task_id': current_task.task_id,
            'status': 'running' if current_task.running else 'stopped',
            'config': current_task.config,
            'start_time': current_task.start_time.isoformat()
        }
        
        return jsonify(success_response(status_data, "获取任务状态成功"))
        
    except Exception as e:
        logger.error(f"获取任务状态失败: {e}")
        return jsonify(error_response(f"获取任务状态失败: {str(e)}")), 500

@kafka_forward_bp.route('/stats', methods=['GET'], endpoint='get_kafka_forward_stats')
@log_api_call
def get_kafka_forward_stats():
    """获取Kafka转发统计信息"""
    try:
        with STATS_LOCK:
            stats_data = FORWARD_STATS.copy()
        
        return jsonify(success_response(stats_data, "获取转发统计成功"))
        
    except Exception as e:
        logger.error(f"获取转发统计失败: {e}")
        return jsonify(error_response(f"获取转发统计失败: {str(e)}")), 500