#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
API监听服务模块 - 简化通用版本
基于 api_service.py 的成熟逻辑，实现通用的爬虫队列接口
执行流程：
1. 选择爬虫队列接口配置（从service_config加载）
2. 用户修改JSON模板适配不同业务
3. 用户手动输入监听的Kafka topic
4. 启动Kafka监听（从最新位置开始）
5. 触发第三方API接口
6. 等待Kafka数据返回并进行匹配
"""

import asyncio
import json
import time
import logging
import random
import string
import threading
import hashlib
from datetime import datetime
from typing import Optional, Dict, Any, List

import httpx
import pymysql
from flask import Blueprint, request, jsonify
from kafka import KafkaConsumer
from kafka.coordinator.assignors.range import RangePartitionAssignor

from app.utils.logger import log_api_call, log_database_operation, log_business_logic
from app.utils.kafka_utils import delete_consumer_group_safe

# 全局任务管理变量
CURRENT_TASK = None
TASK_LOCK = threading.Lock()
TASK_CANCELLED = threading.Event()  # 任务取消事件
CURRENT_KAFKA_CONSUMER = None  # 当前的Kafka消费者实例
from app.utils.mysql_db import get_db_connection

# 创建Blueprint
api_listener_bp = Blueprint('api_listener', __name__, url_prefix='/api/api-listener')

# 任务管理函数
def is_any_task_running() -> bool:
    """检查是否有任务正在运行"""
    global CURRENT_TASK
    with TASK_LOCK:
        return CURRENT_TASK is not None

def register_current_task(execution_id: str, thread: threading.Thread, config_id: str, topic: str, match_value: str):
    """注册当前任务"""
    global CURRENT_TASK
    with TASK_LOCK:
        CURRENT_TASK = {
            'execution_id': execution_id,
            'thread': thread,
            'config_id': config_id,
            'topic': topic,
            'match_value': match_value,
            'start_time': time.time()
        }

def unregister_current_task():
    """注销当前任务"""
    global CURRENT_TASK
    with TASK_LOCK:
        CURRENT_TASK = None
        TASK_CANCELLED.clear()  # 清除取消标志

def get_current_task_info() -> Dict:
    """获取当前任务信息"""
    global CURRENT_TASK
    with TASK_LOCK:
        return CURRENT_TASK.copy() if CURRENT_TASK else {}

def cancel_current_task():
    """取消当前任务"""
    global CURRENT_TASK
    with TASK_LOCK:
        if CURRENT_TASK:
            logger.info(f"🚫 正在取消任务: {CURRENT_TASK['execution_id']}")
            TASK_CANCELLED.set()  # 设置取消标志
            return True
    return False

def is_task_cancelled() -> bool:
    """检查任务是否被取消"""
    return TASK_CANCELLED.is_set()

def cleanup_existing_kafka_consumer():
    """清理现有的Kafka消费者"""
    global CURRENT_KAFKA_CONSUMER
    with TASK_LOCK:
        if CURRENT_KAFKA_CONSUMER:
            try:
                logger.info("🧹 发现残留的Kafka消费者，正在清理...")
                CURRENT_KAFKA_CONSUMER.close()
                logger.info("✅ 残留的Kafka消费者已清理")
            except Exception as e:
                logger.warning(f"清理Kafka消费者时出错: {e}")
            finally:
                CURRENT_KAFKA_CONSUMER = None

def register_kafka_consumer(consumer):
    """注册当前的Kafka消费者"""
    global CURRENT_KAFKA_CONSUMER
    with TASK_LOCK:
        CURRENT_KAFKA_CONSUMER = consumer

def unregister_kafka_consumer():
    """注销当前的Kafka消费者"""
    global CURRENT_KAFKA_CONSUMER
    with TASK_LOCK:
        CURRENT_KAFKA_CONSUMER = None

# 配置日志
logger = logging.getLogger(__name__)

def format_timestamp() -> str:
    """返回格式化的时间戳"""
    return datetime.now().strftime("%Y-%m-%d %H:%M:%S")

def success_response(data: Any = None, message: str = "操作成功") -> Dict:
    """统一成功响应格式"""
    return {
        "success": True,
        "message": message,
        "data": data,
        "timestamp": datetime.now().isoformat()
    }

def error_response(message: str, error_code: str = "UNKNOWN_ERROR", details: Any = None) -> Dict:
    """统一错误响应格式"""
    return {
        "success": False,
        "error_code": error_code,
        "message": message,
        "details": details,
        "timestamp": datetime.now().isoformat()
    }

# 全局变量存储活跃的消费者
active_consumers = {}

@api_listener_bp.route('/configs', methods=['GET'], endpoint='get_api_listener_configs')
@log_api_call
def get_api_listener_configs():
    """获取爬虫队列接口配置"""
    try:
        with get_db_connection() as conn:
            with conn.cursor(pymysql.cursors.DictCursor) as cursor:
                cursor.execute("""
                    SELECT 
                        id, config_name, api_url, request_template, 
                        execution_timeout, description, is_active, sort_order
                    FROM service_config 
                    WHERE service_type = 'api_listener' 
                      AND is_deleted = 0 
                      AND is_active = 1
                    ORDER BY sort_order, config_name
                """)
                configs = cursor.fetchall()
                
                # 解析JSON字段
                for config in configs:
                    if config['request_template']:
                        config['request_template'] = json.loads(config['request_template'])
                
                return jsonify(success_response(configs, f"获取到 {len(configs)} 个爬虫队列接口配置"))
                
    except Exception as e:
        logger.error(f"获取爬虫队列接口配置失败: {e}")
        return jsonify(error_response(f"获取配置失败: {str(e)}", "CONFIG_LOAD_ERROR")), 500

@api_listener_bp.route('/kafka-environments', methods=['GET'], endpoint='get_kafka_environments_for_api_listener')
@log_api_call
def get_kafka_environments():
    """获取Kafka环境配置列表"""
    try:
        with get_db_connection() as conn:
            with conn.cursor(pymysql.cursors.DictCursor) as cursor:
                cursor.execute("""
                    SELECT 
                        id, config_name, host, port, username, 
                        protocol, extra_config, description
                    FROM service_config 
                    WHERE service_type = 'kafka' 
                      AND is_deleted = 0 
                      AND is_active = 1
                    ORDER BY sort_order, config_name
                """)
                environments = cursor.fetchall()
                
                # 解析extra_config JSON字段
                for env in environments:
                    if env['extra_config']:
                        env['extra_config'] = json.loads(env['extra_config'])
                
                return jsonify(success_response(environments, f"获取到 {len(environments)} 个Kafka环境"))
                
    except Exception as e:
        logger.error(f"获取Kafka环境失败: {e}")
        return jsonify(error_response(f"获取Kafka环境失败: {str(e)}", "KAFKA_ENV_ERROR")), 500

@api_listener_bp.route('/execute', methods=['POST'], endpoint='execute_api_listener')
@log_api_call
def execute_api_listener():
    """执行爬虫队列接口 - 简化通用版本"""
    try:
        data = request.get_json()
        if not data:
            return jsonify(error_response("请求数据不能为空"))
        
        config_id = data.get('config_id')
        kafka_env_id = data.get('kafka_env_id')
        listen_topic = data.get('listen_topic', '').strip()
        json_template = data.get('json_template', {})
        match_type = data.get('match_type', 'fuzzy')  # 默认模糊匹配
        match_key = data.get('match_key', '').strip()
        match_value = data.get('match_value', '').strip()
        timeout_seconds = data.get('timeout_seconds', data.get('timeout', 300))
        
        if not config_id:
            return jsonify(error_response("配置ID不能为空"))
        if not kafka_env_id:
            return jsonify(error_response("Kafka环境ID不能为空"))
        if not listen_topic:
            return jsonify(error_response("监听主题不能为空"))
        if not json_template:
            return jsonify(error_response("JSON模板不能为空"))
        
        # 根据匹配类型验证参数
        if match_type == 'key_value':
            if not match_key:
                return jsonify(error_response("键值对匹配需要提供键名"))
            if not match_value:
                return jsonify(error_response("键值对匹配需要提供键值"))
        elif match_type == 'fuzzy':
            if not match_value:
                return jsonify(error_response("模糊匹配需要提供匹配值"))
        else:
            return jsonify(error_response("不支持的匹配类型"))
        
        # 获取API配置
        api_config = get_api_config_by_id(config_id)
        if not api_config:
            return jsonify(error_response("爬虫队列接口配置不存在", "CONFIG_NOT_FOUND")), 404
            
        # 获取Kafka配置
        kafka_config = get_kafka_config_by_id(kafka_env_id)
        if not kafka_config:
            return jsonify(error_response("Kafka环境配置不存在", "KAFKA_CONFIG_NOT_FOUND")), 404
        
        # 构建完整的匹配值（向后兼容）
        if match_type == 'key_value':
            full_match_value = f"{match_key}={match_value}"
        else:
            full_match_value = match_value
        
        # 步骤1: 清理现有的Kafka消费者（避免残留进程导致的问题）
        cleanup_existing_kafka_consumer()
        
        # 步骤2: 检查是否有任务正在运行，如果有则取消
        if is_any_task_running():
            current_task = get_current_task_info()
            logger.warning(f"检测到有任务正在执行，正在取消: {current_task.get('execution_id', 'unknown')}")
            cancel_current_task()
            time.sleep(2)  # 等待取消生效
        
        # 生成执行ID
        execution_id = f"{int(time.time())}_{hashlib.md5(f'{config_id}_{listen_topic}_{full_match_value}'.encode()).hexdigest()[:8]}"
        current_thread = threading.current_thread()
        
        # 注册当前任务
        register_current_task(execution_id, current_thread, str(config_id), listen_topic, full_match_value)
        logger.info(f"📝 任务已注册: {execution_id}")
        
        try:
            # 执行完整的API监听流程
            result = execute_crawler_queue_flow(
                api_config, kafka_config, listen_topic, 
                json_template, full_match_value, timeout_seconds
            )
            
            return jsonify(result)
        finally:
            # 确保任务注销
            unregister_current_task()
            logger.info(f"📝 任务已注销: {execution_id}")
        
    except Exception as e:
        logger.error(f"执行爬虫队列接口失败: {e}")
        return jsonify(error_response(f"执行失败: {str(e)}"))

def execute_crawler_queue_flow(api_config: Dict, kafka_config: Dict, listen_topic: str, 
                             json_template: Dict, match_value: str, timeout_seconds: int) -> Dict:
    """执行完整的爬虫队列流程"""
    try:
        logger.info(f"开始执行爬虫队列流程: topic={listen_topic}, match_value={match_value}")
        
        # 步骤1: 启动Kafka消费者（从最新位置开始）
        logger.info("步骤1: 启动Kafka消费者...")
        consumer_group_id = f"crawler_queue_{int(time.time())}_{random.randint(1000, 9999)}"
        consumer = start_kafka_consumer(kafka_config, listen_topic, consumer_group_id)
        
        if not consumer:
            return error_response("Kafka消费者启动失败", "KAFKA_START_ERROR")
        
        try:
            # 步骤2: 确保监听程序完全启动（增加更充分的等待时间）
            logger.info("步骤2: 等待Kafka消费者完全就绪...")
            
            # 验证消费者状态并等待稳定
            verification_attempts = 0
            max_verification_attempts = 10
            
            while verification_attempts < max_verification_attempts:
                try:
                    # 检查分区分配状态
                    assignment = consumer.assignment()
                    if assignment:
                        # 尝试获取分区位置，确保连接稳定
                        for tp in assignment:
                            position = consumer.position(tp)
                            logger.debug(f"验证分区 {tp.partition} 位置: {position}")
                        
                        logger.info("✅ Kafka消费者状态验证完成，连接稳定")
                        break
                    else:
                        logger.warning("分区分配丢失，等待重新分配...")
                        time.sleep(1)
                        verification_attempts += 1
                except Exception as e:
                    logger.warning(f"验证消费者状态时出错 (尝试 {verification_attempts + 1}/{max_verification_attempts}): {e}")
                    time.sleep(1)
                    verification_attempts += 1
            
            if verification_attempts >= max_verification_attempts:
                logger.error("Kafka消费者状态验证失败，但继续执行")
            
            # 额外等待确保消费者完全稳定（参考原始服务的做法）
            logger.info("等待消费者完全稳定...")
            time.sleep(3)  # 增加到3秒，确保消费者完全就绪
            
            # 步骤3: 触发第三方API
            logger.info("步骤3: 触发第三方API...")
            api_result = call_third_party_api(api_config['api_url'], json_template, api_config.get('execution_timeout', 300))
            
            if not api_result['success']:
                return error_response(f"API调用失败: {api_result['message']}", "API_CALL_ERROR", api_result)
            
            # 步骤4: 等待Kafka数据返回
            logger.info("步骤4: 等待Kafka数据...")
            kafka_result = wait_for_matching_kafka_data(consumer, match_value, timeout_seconds)
            
            if kafka_result['success']:
                return success_response({
                    'api_response': api_result['data'],
                    'kafka_data': kafka_result['data'],
                    'match_details': kafka_result.get('match_details'),
                    'execution_summary': {
                        'listen_topic': listen_topic,
                        'match_value': match_value,
                        'api_url': api_config['api_url'],
                        'execution_time': format_timestamp()
                    }
                }, "爬虫队列接口执行成功")
            else:
                return error_response(kafka_result['message'], "KAFKA_WAIT_ERROR", {
                    'api_response': api_result['data'],
                    'kafka_error': kafka_result
                })
                
        finally:
            # 清理资源 - 无论成功、失败、超时都会执行
            try:
                if consumer:
                    consumer.close()
                    unregister_kafka_consumer()  # 注销Kafka消费者
                    logger.info("🔒 Kafka消费者已安全关闭，监听进程结束")
                    
                    # 清理 consumer group（临时 group，立即删除）
                    delete_consumer_group_safe(kafka_config, consumer_group_id)
                else:
                    logger.warning("Kafka消费者为空，无需关闭")
            except Exception as e:
                logger.error(f"❌ 关闭Kafka消费者时出错: {e}")
                # 即使关闭出错，也要确保函数能正常返回
        
    except Exception as e:
        logger.error(f"执行爬虫队列流程失败: {e}")
        return error_response(f"执行流程失败: {str(e)}", "EXECUTION_ERROR")

def get_api_config_by_id(config_id: int) -> Optional[Dict]:
    """根据ID获取API配置"""
    try:
        with get_db_connection() as conn:
            with conn.cursor(pymysql.cursors.DictCursor) as cursor:
                cursor.execute("""
                    SELECT 
                        id, config_name, api_url, request_template, 
                        execution_timeout, description
                    FROM service_config 
                    WHERE id = %s AND service_type = 'api_listener' 
                      AND is_deleted = 0 AND is_active = 1
                """, (config_id,))
                config = cursor.fetchone()
                
                if config and config['request_template']:
                    config['request_template'] = json.loads(config['request_template'])
                
                return config
    except Exception as e:
        logger.error(f"获取API配置失败: {e}")
        return None

def get_kafka_config_by_id(kafka_env_id: int) -> Optional[Dict]:
    """根据ID获取Kafka配置"""
    try:
        with get_db_connection() as conn:
            with conn.cursor(pymysql.cursors.DictCursor) as cursor:
                cursor.execute("""
                    SELECT 
                        id, config_name, host, port, username, password,
                        protocol, extra_config
                    FROM service_config 
                    WHERE id = %s AND service_type = 'kafka' 
                      AND is_deleted = 0 AND is_active = 1
                """, (kafka_env_id,))
                config = cursor.fetchone()
                
                if config and config['extra_config']:
                    config['extra_config'] = json.loads(config['extra_config'])
                
                return config
    except Exception as e:
        logger.error(f"获取Kafka配置失败: {e}")
        return None

def start_kafka_consumer(kafka_config: Dict, topic: str, consumer_group_id: str) -> Optional[KafkaConsumer]:
    """启动Kafka消费者"""
    try:
        extra_config = kafka_config.get('extra_config', {})
        bootstrap_servers = extra_config.get('bootstrap_servers', [f"{kafka_config['host']}:{kafka_config['port']}"])
        
        # 健壮的JSON反序列化器：解析失败时直接返回None，自动跳过非JSON消息
        def safe_json_deserializer(x):
            if not x:
                return None
            try:
                return json.loads(x.decode('utf-8'))
            except (json.JSONDecodeError, UnicodeDecodeError, AttributeError) as e:
                # 解析失败，直接返回None，让后续 if not record.value 自动跳过
                logger.warning(f"⚠️ Kafka消息反序列化失败，跳过: {str(e)[:100]}")
                return None
        
        consumer_config = {
            'bootstrap_servers': bootstrap_servers,
            'auto_offset_reset': 'latest',  # 从最新位置开始消费
            'enable_auto_commit': True,
            'auto_commit_interval_ms': 1000,
            'value_deserializer': safe_json_deserializer,  # 使用安全的反序列化器
            'security_protocol': kafka_config.get('protocol', 'SASL_PLAINTEXT'),
            'sasl_mechanism': 'PLAIN',
            'sasl_plain_username': kafka_config['username'],
            'sasl_plain_password': kafka_config['password'],
            'group_id': consumer_group_id,
            'max_poll_records': 100,            # 每次拉取100条消息
            'request_timeout_ms': 120000,      # 增加到120秒
            'session_timeout_ms': 60000,        # 增加到60秒
            'heartbeat_interval_ms': 10000,     # 心跳间隔10秒
            'max_poll_interval_ms': 300000,     # poll间隔最大5分钟
            'connections_max_idle_ms': 540000,  # 连接最大空闲9分钟
            'consumer_timeout_ms': 10000,
            'partition_assignment_strategy': [RangePartitionAssignor]
        }
        
        logger.info(f"创建Kafka消费者: topic={topic}, group_id={consumer_group_id}")
        
        # 创建消费者
        consumer = KafkaConsumer(**consumer_config)
        
        # 订阅主题
        consumer.subscribe([topic])
        
        # 等待分区分配
        max_wait_seconds = 30
        start_time = time.time()
        while time.time() - start_time < max_wait_seconds:
            consumer.poll(timeout_ms=100)
            assignment = consumer.assignment()
            if assignment:
                logger.info(f"✅ Kafka消费者分区分配成功: {assignment}")
                consumer.seek_to_end()  # 移动到最新位置
                
                # 确保分区位置设置完成并记录位置（参考原始API服务）
                try:
                    for tp in assignment:
                        position = consumer.position(tp)
                        logger.info(f"分区 {tp.partition} 当前位置: {position}")
                    logger.info("Kafka消费者完全就绪，所有分区位置已确认")
                    register_kafka_consumer(consumer)  # 注册Kafka消费者
                    return consumer
                except Exception as e:
                    logger.warning(f"获取分区位置时出错: {e}，但消费者已就绪")
                    register_kafka_consumer(consumer)  # 注册Kafka消费者
                    return consumer
            time.sleep(1)
        
        # 超时失败
        consumer.close()
        logger.error("Kafka消费者分区分配超时")
        return None
        
    except Exception as e:
        logger.error(f"启动Kafka消费者失败: {e}")
        return None

def call_third_party_api(api_url: str, request_body: Dict, timeout: int = 10) -> Dict:
    """调用第三方API - 使用同步方式避免事件循环冲突"""
    try:
        import requests
        
        logger.info(f"调用API: {api_url}, 请求体: {json.dumps(request_body, ensure_ascii=False)}")
        
        # 使用requests进行同步HTTP调用，避免异步事件循环问题
        response = requests.post(
            api_url,
            json=request_body,
            headers={"Content-Type": "application/json"},
            timeout=timeout  # 使用合理的10秒超时，与原始服务一致
        )
        response.raise_for_status()
        
        result = response.json()
        logger.info(f"API调用成功: {api_url}, 响应: {result}")
        
        return {
            'success': True,
            'message': 'API调用成功',
            'data': result
        }
        
    except requests.exceptions.Timeout:
        error_msg = f"API调用超时: {api_url} (超时时间: {timeout}秒)"
        logger.error(error_msg)
        return {
            'success': False,
            'message': error_msg
        }
    except requests.exceptions.ConnectionError as e:
        error_msg = f"API连接失败: {api_url}, 错误: {str(e)}"
        logger.error(error_msg)
        return {
            'success': False,
            'message': error_msg
        }
    except requests.exceptions.HTTPError as e:
        error_msg = f"API HTTP错误: {api_url}, 状态码: {e.response.status_code}, 错误: {str(e)}"
        logger.error(error_msg)
        return {
            'success': False,
            'message': error_msg
        }
    except Exception as e:
        error_msg = f"API调用失败: {api_url}, 错误: {str(e)}"
        logger.error(error_msg)
        return {
            'success': False,
            'message': error_msg
        }

def wait_for_matching_kafka_data(consumer: KafkaConsumer, match_value: str, timeout_seconds: int) -> Dict:
    """等待匹配的Kafka数据"""
    try:
        start_time = time.time()
        poll_count = 0
        
        logger.info(f"开始等待Kafka数据匹配: match_value={match_value}, timeout={timeout_seconds}s")
        
        while time.time() - start_time < timeout_seconds:
            # 检查任务是否被取消
            if is_task_cancelled():
                logger.info("🚫 任务已被取消，停止等待Kafka数据")
                return {
                    'success': False,
                    'message': '任务已被取消'
                }
            
            try:
                # 定期提醒等待状态
                if poll_count > 0 and poll_count % 15 == 0:
                    remaining_time = int(timeout_seconds - (time.time() - start_time))
                    logger.info(f"正在等待Kafka数据，已等待{poll_count}秒，剩余{remaining_time}秒...")
                
                messages = consumer.poll(timeout_ms=1000)
                poll_count += 1
                
                if messages and isinstance(messages, dict):
                    for topic_partition, records in messages.items():
                        for record in records:
                            if not record.value:
                                continue
                            
                            # 反序列化器已确保返回dict或None，这里直接使用
                            message_data = record.value
                            
                            # 检查数据是否匹配
                            if check_data_match(message_data, match_value):
                                elapsed_time = int(time.time() - start_time)
                                logger.info(f"✅ 找到匹配的Kafka数据，等待时间: {elapsed_time}秒")
                                logger.info("🔚 匹配成功，即将关闭Kafka消费者...")
                                return {
                                    'success': True,
                                    'message': f'成功获取匹配的Kafka数据 (等待时间: {elapsed_time}秒)',
                                    'data': message_data,
                                    'match_details': {
                                        'match_value': match_value,
                                        'topic': record.topic,
                                        'partition': record.partition,
                                        'offset': record.offset,
                                        'elapsed_time_seconds': elapsed_time
                                    }
                                }
                
                time.sleep(1)
                
            except Exception as poll_error:
                logger.error(f"轮询Kafka消息时出错: {poll_error}")
                time.sleep(1)
                continue
        
        elapsed_time = int(time.time() - start_time)
        logger.warning(f"⏰ 等待Kafka数据超时: {timeout_seconds}s，实际等待: {elapsed_time}秒")
        logger.info("🔚 超时结束，即将关闭Kafka消费者...")
        return {
            'success': False,
            'message': f'等待数据超时（设置: {timeout_seconds}秒，实际等待: {elapsed_time}秒），未找到匹配数据'
        }
        
    except Exception as e:
        logger.error(f"等待Kafka数据时出错: {e}")
        return {
            'success': False,
            'message': f'等待数据时出错: {str(e)}'
        }

def check_data_match(data: Dict, match_value: str) -> bool:
    """检查数据是否匹配 - 支持键值对匹配 (key=value)"""
    try:
        # 检查是否是键值对格式 (key=value)
        if '=' in match_value:
            return check_key_value_match(data, match_value)
        else:
            # 回退到模糊匹配
            return check_fuzzy_match(data, match_value)
        
    except Exception as e:
        logger.error(f"检查数据匹配时出错: {e}")
        return False

def check_key_value_match(data: Dict, match_value: str) -> bool:
    """检查键值对匹配 - 支持 key=value 格式"""
    try:
        # 解析键值对
        if '=' not in match_value:
            return False
            
        key, expected_value = match_value.split('=', 1)  # 只分割第一个=号
        key = key.strip()
        expected_value = expected_value.strip()
        
        logger.info(f"键值对匹配: 查找 key='{key}', expected_value='{expected_value}'")
        
        # 递归搜索嵌套字典和数组
        if check_nested_key_value(data, key, expected_value):
            logger.info(f"✅ 键值对匹配成功: {key}={expected_value}")
            return True
        
        logger.debug(f"键值对不匹配: {key}={expected_value}")
        return False
        
    except Exception as e:
        logger.error(f"键值对匹配检查时出错: {e}")
        return False

def check_nested_key_value(obj: Any, target_key: str, expected_value: str) -> bool:
    """递归检查嵌套结构中的键值对匹配"""
    try:
        if isinstance(obj, dict):
            # 检查当前字典的键
            for key, value in obj.items():
                if key == target_key:
                    # 找到目标键，检查值是否匹配
                    if value_matches(value, expected_value):
                        return True
                
                # 递归检查嵌套对象
                if check_nested_key_value(value, target_key, expected_value):
                    return True
                    
        elif isinstance(obj, list):
            # 检查数组中的每个元素
            for item in obj:
                if check_nested_key_value(item, target_key, expected_value):
                    return True
                    
        return False
        
    except Exception as e:
        logger.error(f"递归检查嵌套结构时出错: {e}")
        return False

def value_matches(actual_value: Any, expected_value: str) -> bool:
    """检查值是否匹配 - 支持类型转换"""
    try:
        # 将实际值转换为字符串进行比较
        actual_str = str(actual_value)
        
        # 直接字符串比较
        if actual_str == expected_value:
            logger.debug(f"值匹配(字符串): '{actual_str}' == '{expected_value}'")
            return True
        
        # 尝试数字比较
        try:
            # 尝试将期望值转换为数字
            if '.' in expected_value:
                expected_num = float(expected_value)
                actual_num = float(actual_value)
            else:
                expected_num = int(expected_value)
                actual_num = int(actual_value)
                
            if actual_num == expected_num:
                logger.debug(f"值匹配(数字): {actual_num} == {expected_num}")
                return True
                
        except (ValueError, TypeError):
            # 不是数字，跳过数字比较
            pass
        
        # 忽略大小写比较
        if actual_str.lower() == expected_value.lower():
            logger.debug(f"值匹配(忽略大小写): '{actual_str}' == '{expected_value}'")
            return True
            
        logger.debug(f"值不匹配: '{actual_str}' != '{expected_value}'")
        return False
        
    except Exception as e:
        logger.error(f"值匹配检查时出错: {e}")
        return False

def check_fuzzy_match(data: Dict, match_value: str) -> bool:
    """模糊匹配 - 原有的简单字符串包含匹配"""
    try:
        # 将数据转换为JSON字符串进行模糊匹配
        data_str = json.dumps(data, ensure_ascii=False)
        
        # 检查匹配值是否在数据中
        if str(match_value) in data_str:
            logger.info(f"模糊匹配成功: match_value={match_value}")
            return True
        
        logger.debug(f"模糊匹配失败: match_value={match_value}")
        return False
        
    except Exception as e:
        logger.error(f"模糊匹配检查时出错: {e}")
        return False