import json
import redis
import os
import difflib
import sys
import logging
from db_connection import get_redis_connection

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger("redis_company_data")

def load_json_to_redis(file_path, update_existing=False, db_name="default"):
    # 使用从.env获取的Redis连接
    r = get_redis_connection()
    
    # Read JSON file
    with open(file_path, 'r', encoding='utf-8') as f:
        company_data = json.load(f)
    
    loaded_count = 0
    skipped_count = 0
    updated_count = 0
    
    # Store data in Redis
    for company_id, details in company_data.items():
        # 检查ID是否已存在 - 使用db:name前缀
        key = f"db:{db_name}:company:{company_id}"
        if r.exists(key):
            if update_existing:
                # 获取当前数据
                existing_company = r.hgetall(key)
                company_name = details.get('公司名称', existing_company.get('公司名称', '未知'))
                
                # 更新记录
                for field, value in details.items():
                    r.hset(key, field, value)
                
                logger.info(f"更新现有记录: {company_id}, 公司: {company_name}")
                updated_count += 1
            else:
                logger.info(f"警告: 跳过重复ID: {company_id}, 公司: {details.get('公司名称', '未知')}")
                skipped_count += 1
                continue
        else:
            # 新记录，直接添加
            for field, value in details.items():
                r.hset(key, field, value)
            loaded_count += 1
    
    # 记录数据库名称，用于列出所有数据库
    r.sadd("database_list", db_name)
    
    result_msg = ""
    if update_existing:
        result_msg = f"数据加载完成。文件: {file_path}, 新增: {loaded_count} 家公司, 更新: {updated_count} 家公司"
    else:
        result_msg = f"数据加载完成。文件: {file_path}, 成功加载: {loaded_count} 家公司, 跳过重复: {skipped_count} 家公司"
    
    logger.info(result_msg)
    return loaded_count + updated_count

def load_folder_json_to_redis(folder_path, update_existing=False, db_name="default"):
    # 确保文件夹路径存在
    if not os.path.exists(folder_path):
        logger.error(f"错误: 文件夹 {folder_path} 不存在")
        return 0
    
    # 获取文件夹中所有JSON文件
    json_files = [f for f in os.listdir(folder_path) if f.endswith('.json')]
    
    if not json_files:
        logger.warning(f"警告: 文件夹 {folder_path} 中没有找到JSON文件")
        return 0
    
    total_companies = 0
    processed_files = 0
    error_files = 0
    
    # 处理每个JSON文件
    for json_file in json_files:
        file_path = os.path.join(folder_path, json_file)
        try:
            companies_count = load_json_to_redis(file_path, update_existing, db_name)
            total_companies += companies_count
            processed_files += 1
        except Exception as e:
            logger.error(f"处理文件 {file_path} 时出错: {str(e)}")
            error_files += 1
    
    result_msg = f"文件夹处理完成。共处理 {processed_files} 个JSON文件，错误 {error_files} 个，总计导入 {total_companies} 家公司"
    logger.info(result_msg)
    
    return {
        "total_companies": total_companies,
        "processed_files": processed_files,
        "error_files": error_files,
        "message": result_msg
    }

def retrieve_company_from_redis(db_name, company_id):
    # 使用从.env获取的Redis连接
    r = get_redis_connection()
    
    # Get company data using db prefix
    key = f"db:{db_name}:company:{company_id}"
    company_data = r.hgetall(key)
    
    if not company_data:
        logger.info(f"未找到公司ID: {company_id} 的数据 (数据库: {db_name})")
        return None
    
    logger.info(f"成功获取公司数据: {company_data.get('公司名称', '未知')} (数据库: {db_name})")
    return company_data

def list_all_companies(db_name="default"):
    # 使用从.env获取的Redis连接
    r = get_redis_connection()
    
    # List all company keys for the specific database
    company_keys = r.keys(f"db:{db_name}:company:*")
    
    logger.info(f"数据库 {db_name} 中的公司总数: {len(company_keys)}")
    
    companies = []
    for key in company_keys:
        company_id = key.split(":")[-1]
        company_name = r.hget(key, "公司名称")
        companies.append({"id": company_id, "name": company_name})
    
    return companies

def fuzzy_match_company_id(db_name, partial_id, min_similarity=0.6):
    """
    使用模糊匹配查找与部分ID最匹配的公司
    
    Args:
        db_name (str): 数据库名称
        partial_id (str): 部分公司ID字符串
        min_similarity (float): 最小相似度阈值，默认0.6
        
    Returns:
        dict: 包含最佳匹配的公司ID和名称
    """
    # 使用从.env获取的Redis连接
    r = get_redis_connection()
    
    # 获取所有公司键
    company_keys = r.keys(f"db:{db_name}:company:*")
    
    # 提取公司ID
    all_ids = [key.split(":")[-1] for key in company_keys]
    
    if not all_ids:
        logger.warning(f"数据库 {db_name} 中没有公司数据")
        return None
    
    result = []
    # 跟踪已添加的ID，防止重复
    processed_ids = set()
    
    # 策略1: 直接前缀匹配 (最高优先级)
    prefix_matches = [company_id for company_id in all_ids if company_id.startswith(partial_id)]
    if prefix_matches:
        for match_id in prefix_matches:
            if match_id in processed_ids:
                continue
                
            company_name = r.hget(f"db:{db_name}:company:{match_id}", "公司名称")
            # 计算前缀匹配的相似度 (前缀匹配给予更高的基础分)
            prefix_ratio = len(partial_id) / len(match_id)
            # 附加一个基础分0.5，确保前缀匹配总是有较高分数
            similarity = 0.5 + (prefix_ratio * 0.5)
            
            # 记录匹配位置
            match_position = f"位置: 0-{len(partial_id)-1}"
            
            result.append({
                "id": match_id,
                "name": company_name,
                "similarity": similarity,
                "match_type": "前缀匹配",
                "match_field": "公司ID",
                "match_position": match_position,
                "full_id": match_id
            })
            processed_ids.add(match_id)
    
    # 策略2: 使用difflib的序列匹配算法 (次优先级)
    for company_id in all_ids:
        if company_id in processed_ids:
            continue
            
        # 计算相似度分数
        seq_matcher = difflib.SequenceMatcher(None, partial_id, company_id)
        similarity = seq_matcher.ratio()
        
        # 找出最长匹配块
        matching_blocks = seq_matcher.get_matching_blocks()
        best_block = max(matching_blocks, key=lambda x: x.size) if matching_blocks else None
        
        # 只添加达到最小相似度的结果
        if similarity >= min_similarity:
            company_name = r.hget(f"db:{db_name}:company:{company_id}", "公司名称")
            
            # 记录匹配位置
            if best_block and best_block.size > 0:
                match_position = f"位置: {best_block.b}-{best_block.b + best_block.size - 1}"
                matched_text = company_id[best_block.b:best_block.b + best_block.size]
            else:
                match_position = "位置: 未找到明确匹配块"
                matched_text = ""
                
            result.append({
                "id": company_id,
                "name": company_name,
                "similarity": similarity,
                "match_type": "模糊匹配",
                "match_field": "公司ID",
                "match_position": match_position,
                "matched_text": matched_text,
                "full_id": company_id
            })
            processed_ids.add(company_id)
    
    # 策略3: 子串包含匹配 (提供额外参考)
    for company_id in all_ids:
        if company_id in processed_ids:
            continue
            
        if partial_id in company_id:
            company_name = r.hget(f"db:{db_name}:company:{company_id}", "公司名称")
            # 计算包含匹配的相似度 (包含但不是前缀，给予中等分数)
            ratio = len(partial_id) / len(company_id)
            similarity = 0.3 + (ratio * 0.4)  # 最高可达0.7
            
            # 记录匹配位置
            start_pos = company_id.find(partial_id)
            end_pos = start_pos + len(partial_id) - 1
            match_position = f"位置: {start_pos}-{end_pos}"
            
            result.append({
                "id": company_id,
                "name": company_name,
                "similarity": similarity,
                "match_type": "包含匹配",
                "match_field": "公司ID",
                "match_position": match_position,
                "full_id": company_id
            })
            processed_ids.add(company_id)
    
    if result:
        # 按相似度排序
        result.sort(key=lambda x: x["similarity"], reverse=True)
        
        match_count = len(result)
        logger.info(f"找到 {match_count} 个可能的匹配 (数据库: {db_name})")
        
        # API模式下直接返回所有结果
        return {
            "total_matches": match_count,
            "matches": result[:5]  # 限制返回前5个匹配结果
        }
    else:
        logger.info(f"没有找到与 '{partial_id}' 匹配的公司ID (数据库: {db_name})")
        return None

def fuzzy_search_company(db_name, query, field="公司名称", min_similarity=0.3, limit=5):
    """
    根据字段内容模糊搜索公司
    
    Args:
        db_name (str): 数据库名称
        query (str): 搜索关键词
        field (str): 要搜索的字段，默认为"公司名称"
        min_similarity (float): 最小相似度阈值
        limit (int): 最大返回结果数
        
    Returns:
        list: 匹配公司列表
    """
    # 使用从.env获取的Redis连接
    r = get_redis_connection()
    
    # 获取所有公司键
    company_keys = r.keys(f"db:{db_name}:company:*")
    
    if not company_keys:
        logger.warning(f"数据库 {db_name} 中没有公司数据")
        return []
    
    result = []
    
    # 遍历所有公司，检查指定字段是否与查询匹配
    for key in company_keys:
        company_id = key.split(":")[-1]
        field_value = r.hget(key, field)
        
        if field_value:
            # 计算相似度
            seq_matcher = difflib.SequenceMatcher(None, query.lower(), field_value.lower())
            similarity = seq_matcher.ratio()
            
            # 找出最长匹配块
            matching_blocks = seq_matcher.get_matching_blocks()
            best_block = max(matching_blocks, key=lambda x: x.size) if matching_blocks else None
            
            # 检查字段是否包含查询字符串(部分匹配)
            contains_match = query.lower() in field_value.lower()
            match_type = "包含匹配" if contains_match else "相似度匹配"
            
            if similarity >= min_similarity or contains_match:
                company_name = field_value if field == "公司名称" else r.hget(key, "公司名称")
                
                # 对包含匹配但相似度低的情况提高评分
                if contains_match and similarity < min_similarity:
                    similarity = min_similarity + 0.1
                
                # 记录匹配位置
                if contains_match:
                    start_pos = field_value.lower().find(query.lower())
                    end_pos = start_pos + len(query) - 1
                    match_position = f"位置: {start_pos}-{end_pos}"
                    matched_text = field_value[start_pos:start_pos+len(query)]
                elif best_block and best_block.size > 0:
                    match_position = f"位置: {best_block.b}-{best_block.b + best_block.size - 1}"
                    matched_text = field_value[best_block.b:best_block.b + best_block.size]
                else:
                    match_position = "位置: 未找到明确匹配块"
                    matched_text = ""
                
                result.append({
                    "id": company_id,
                    "name": company_name,
                    "field_value": field_value,
                    "similarity": similarity,
                    "match_type": match_type,
                    "match_field": field,
                    "match_position": match_position,
                    "matched_text": matched_text
                })
    
    # 按相似度排序
    result.sort(key=lambda x: x["similarity"], reverse=True)
    
    # 限制结果数量
    result = result[:limit]
    
    if result:
        logger.info(f"根据'{field}'字段查找'{query}'，找到 {len(result)} 个匹配结果 (数据库: {db_name})")
    else:
        logger.info(f"没有找到与'{query}'匹配的公司 (数据库: {db_name})")
    
    return result

def fuzzy_match_company_id_with_spaces(db_name, query_str, min_similarity=0.4, limit=10):
    """
    使用空格分隔的多条件ID模糊匹配，支持无序匹配
    
    Args:
        db_name (str): 数据库名称
        query_str (str): 空格分隔的多个查询条件
        min_similarity (float): 最小相似度阈值，默认0.4
        limit (int): 返回结果数量限制，默认10
        
    Returns:
        dict: 包含匹配结果的字典
    """
    # 使用从.env获取的Redis连接
    r = get_redis_connection()
    
    # 获取所有公司键
    company_keys = r.keys(f"db:{db_name}:company:*")
    
    # 提取公司ID
    all_ids = [key.split(":")[-1] for key in company_keys]
    
    if not all_ids:
        logger.warning(f"数据库 {db_name} 中没有公司数据")
        return None
    
    # 分割查询字符串为多个条件
    query_terms = [term.strip() for term in query_str.split() if term.strip()]
    
    if not query_terms:
        logger.warning(f"查询字符串为空")
        return None
    
    logger.info(f"多条件查询: {query_terms}")
    
    result = []
    processed_ids = set()
    
    # 对每个公司ID评分
    for company_id in all_ids:
        # 初始化综合得分和匹配的条件数量
        total_score = 0
        matched_terms = []
        match_details = []
        
        # 每个查询条件都评分
        for term in query_terms:
            # 检查是否为精确子串
            if term in company_id:
                # 子串匹配得分 (0.4 - 0.8)
                term_ratio = len(term) / len(company_id)
                term_score = 0.4 + (term_ratio * 0.4)
                
                # 记录匹配位置
                start_pos = company_id.find(term)
                end_pos = start_pos + len(term) - 1
                
                matched_terms.append(term)
                match_details.append({
                    "term": term,
                    "score": term_score,
                    "match_type": "子串匹配",
                    "position": f"{start_pos}-{end_pos}"
                })
                
                total_score += term_score
            else:
                # 计算模糊匹配得分
                seq_matcher = difflib.SequenceMatcher(None, term, company_id)
                similarity = seq_matcher.ratio()
                
                # 如果达到最小相似度
                if similarity >= min_similarity:
                    # 模糊匹配得分 (0 - 0.6)
                    term_score = similarity * 0.6
                    
                    # 找出最长匹配块
                    matching_blocks = seq_matcher.get_matching_blocks()
                    best_block = max(matching_blocks, key=lambda x: x.size) if matching_blocks else None
                    
                    if best_block and best_block.size > 0:
                        match_position = f"{best_block.b}-{best_block.b + best_block.size - 1}"
                        matched_text = company_id[best_block.b:best_block.b + best_block.size]
                    else:
                        match_position = "未找到明确匹配块"
                        matched_text = ""
                    
                    matched_terms.append(term)
                    match_details.append({
                        "term": term,
                        "score": term_score,
                        "match_type": "模糊匹配",
                        "position": match_position,
                        "matched_text": matched_text
                    })
                    
                    total_score += term_score
        
        # 只有匹配到至少一个条件才添加到结果
        if matched_terms:
            # 计算加权平均分，匹配的条件越多分数越高
            match_ratio = len(matched_terms) / len(query_terms)
            final_score = (total_score / len(query_terms)) * (0.5 + 0.5 * match_ratio)
            
            # 获取公司名称
            company_name = r.hget(f"db:{db_name}:company:{company_id}", "公司名称")
            
            result.append({
                "id": company_id,
                "name": company_name,
                "similarity": final_score,
                "matched_terms_count": len(matched_terms),
                "total_terms_count": len(query_terms),
                "matched_terms": matched_terms,
                "match_details": match_details,
                "full_id": company_id
            })
    
    if result:
        # 根据最终得分排序
        result.sort(key=lambda x: (x["matched_terms_count"], x["similarity"]), reverse=True)
        
        # 限制返回数量
        result = result[:limit]
        
        match_count = len(result)
        logger.info(f"找到 {match_count} 个多条件匹配结果 (数据库: {db_name})")
        
        return {
            "total_matches": match_count,
            "matches": result
        }
    else:
        logger.info(f"没有找到与查询条件 '{query_str}' 匹配的公司ID (数据库: {db_name})")
        return None

def fuzzy_search_company_with_spaces(db_name, query_str, field="公司名称", min_similarity=0.3, limit=10):
    """
    使用空格分隔的多条件字段模糊搜索，支持无序匹配
    
    Args:
        db_name (str): 数据库名称
        query_str (str): 空格分隔的多个查询条件
        field (str): 要搜索的字段，默认为"公司名称"
        min_similarity (float): 最小相似度阈值，默认0.3
        limit (int): 最大返回结果数，默认10
        
    Returns:
        dict: 包含匹配结果的字典
    """
    # 使用从.env获取的Redis连接
    r = get_redis_connection()
    
    # 获取所有公司键
    company_keys = r.keys(f"db:{db_name}:company:*")
    
    if not company_keys:
        logger.warning(f"数据库 {db_name} 中没有公司数据")
        return None
    
    # 分割查询字符串为多个条件
    query_terms = [term.strip() for term in query_str.split() if term.strip()]
    
    if not query_terms:
        logger.warning(f"查询字符串为空")
        return None
    
    logger.info(f"多条件字段查询: {query_terms}, 字段: {field}")
    
    result = []
    
    # 遍历所有公司
    for key in company_keys:
        company_id = key.split(":")[-1]
        field_value = r.hget(key, field)
        
        if not field_value:
            continue
            
        # 初始化综合得分和匹配的条件数量
        total_score = 0
        matched_terms = []
        match_details = []
        
        # 字段值转小写用于不区分大小写的比较
        field_value_lower = field_value.lower()
        
        # 对每个查询条件评分
        for term in query_terms:
            term_lower = term.lower()
            
            # 检查是否为精确子串
            if term_lower in field_value_lower:
                # 子串匹配得分 (0.4 - 0.8)
                term_ratio = len(term) / len(field_value)
                term_score = 0.4 + (term_ratio * 0.4)
                
                # 记录匹配位置
                start_pos = field_value_lower.find(term_lower)
                end_pos = start_pos + len(term) - 1
                matched_text = field_value[start_pos:start_pos+len(term)]
                
                matched_terms.append(term)
                match_details.append({
                    "term": term,
                    "score": term_score,
                    "match_type": "子串匹配",
                    "position": f"{start_pos}-{end_pos}",
                    "matched_text": matched_text
                })
                
                total_score += term_score
            else:
                # 计算模糊匹配得分
                seq_matcher = difflib.SequenceMatcher(None, term_lower, field_value_lower)
                similarity = seq_matcher.ratio()
                
                # 如果达到最小相似度
                if similarity >= min_similarity:
                    # 模糊匹配得分 (0 - 0.6)
                    term_score = similarity * 0.6
                    
                    # 找出最长匹配块
                    matching_blocks = seq_matcher.get_matching_blocks()
                    best_block = max(matching_blocks, key=lambda x: x.size) if matching_blocks else None
                    
                    if best_block and best_block.size > 0:
                        match_position = f"{best_block.b}-{best_block.b + best_block.size - 1}"
                        matched_text = field_value[best_block.b:best_block.b + best_block.size]
                    else:
                        match_position = "未找到明确匹配块"
                        matched_text = ""
                    
                    matched_terms.append(term)
                    match_details.append({
                        "term": term,
                        "score": term_score,
                        "match_type": "模糊匹配",
                        "position": match_position,
                        "matched_text": matched_text
                    })
                    
                    total_score += term_score
        
        # 只有匹配到至少一个条件才添加到结果
        if matched_terms:
            # 计算加权平均分，匹配的条件越多分数越高
            match_ratio = len(matched_terms) / len(query_terms)
            final_score = (total_score / len(query_terms)) * (0.5 + 0.5 * match_ratio)
            
            # 获取公司名称
            company_name = field_value if field == "公司名称" else r.hget(key, "公司名称")
            
            result.append({
                "id": company_id,
                "name": company_name,
                "field": field,
                "field_value": field_value,
                "similarity": final_score,
                "matched_terms_count": len(matched_terms),
                "total_terms_count": len(query_terms),
                "matched_terms": matched_terms,
                "match_details": match_details
            })
    
    if result:
        # 根据最终得分排序
        result.sort(key=lambda x: (x["matched_terms_count"], x["similarity"]), reverse=True)
        
        # 限制返回数量
        result = result[:limit]
        
        match_count = len(result)
        logger.info(f"找到 {match_count} 个多条件字段匹配结果 (数据库: {db_name}, 字段: {field})")
        
        return {
            "total_matches": match_count,
            "matches": result
        }
    else:
        logger.info(f"没有找到与查询条件 '{query_str}' 匹配的 {field} (数据库: {db_name})")
        return None

# 当作为独立脚本运行时的测试逻辑
if __name__ == "__main__":
    # 从.env获取环境变量
    from dotenv import load_dotenv
    load_dotenv()
    
    # 命令行参数处理示例
    # 默认行为
    update_mode = False
    run_tests = True
    db_name = "default"
    
    # 处理命令行参数
    if len(sys.argv) > 1:
        if '--update' in sys.argv:
            update_mode = True
            print("运行在更新模式: 将更新已存在的公司记录")
        if '--no-tests' in sys.argv:
            run_tests = False
        for arg in sys.argv:
            if arg.startswith('--db='):
                db_name = arg.split('=')[1]
                print(f"使用数据库: {db_name}")
    
    # 确保数据库中有数据
    existing_companies = list_all_companies(db_name)
    
    if len(existing_companies) == 0 or update_mode:
        # 加载整个文件夹中的JSON文件
        folder_path = 'json_files'
        if os.path.exists(folder_path):
            load_folder_json_to_redis(folder_path, update_existing=update_mode, db_name=db_name)
        else:
            folder_path = 'json_file'  # 尝试备用文件夹
            if os.path.exists(folder_path):
                load_folder_json_to_redis(folder_path, update_existing=update_mode, db_name=db_name)
            else:
                print(f"警告: 文件夹 {folder_path} 不存在")
    else:
        print(f"数据库 {db_name} 中已有 {len(existing_companies)} 家公司数据")
        print("如需更新现有记录，请使用 --update 参数运行脚本")
    
    if not run_tests:
        sys.exit(0)
    
    print("\n===== 基本功能测试 =====")
    # 获取特定公司
    company_id = "91120116MA074AJDXP"
    company_data = retrieve_company_from_redis(db_name, company_id)
    
    if company_data:
        print("\n公司详细信息:")
        for field, value in company_data.items():
            print(f"{field}: {value}")
    
    # 列出所有公司
    print("\n数据库中的所有公司:")
    companies = list_all_companies(db_name)
    for company in companies:
        print(f"ID: {company['id']}, 名称: {company['name']}")
    
    print("\n===== 模糊匹配功能测试 =====")
    
    # 测试1: ID模糊匹配 - 精确前缀
    print("\n测试1: ID模糊匹配 - 精确前缀")
    partial_id = "91120116MA074AJ"
    match_result = fuzzy_match_company_id(db_name, partial_id)
    if match_result:
        for i, match in enumerate(match_result["matches"], 1):
            print(f"{i}. ID={match['id']}, 名称={match['name']}, 相似度={match['similarity']:.2f}")
            print(f"   匹配策略: {match['match_type']}, 匹配字段: {match['match_field']}")
            print(f"   {match['match_position']}")
            
            # 对于模糊匹配，显示匹配的文本部分
            if match['match_type'] == "模糊匹配" and 'matched_text' in match:
                print(f"   匹配文本: {match['matched_text']}")
                
            # 显示带有高亮的完整ID
            full_id = match['full_id']
            if match['match_type'] == "前缀匹配":
                highlighted = full_id[:len(partial_id)] + "|" + full_id[len(partial_id):]
            elif match['match_type'] == "包含匹配":
                start_pos = full_id.find(partial_id)
                highlighted = full_id[:start_pos] + "|" + full_id[start_pos:start_pos+len(partial_id)] + "|" + full_id[start_pos+len(partial_id):]
            else:
                highlighted = full_id
                
            print(f"   完整ID: {highlighted}")
            print("")
    
    # 测试2: ID模糊匹配 - 部分ID
    print("\n测试2: ID模糊匹配 - 部分ID")
    partial_id = "MA074AJ"
    match_result = fuzzy_match_company_id(db_name, partial_id)
    if match_result:
        for i, match in enumerate(match_result["matches"], 1):
            print(f"{i}. ID={match['id']}, 名称={match['name']}, 相似度={match['similarity']:.2f}")
            print(f"   匹配策略: {match['match_type']}, 匹配字段: {match['match_field']}")
            print(f"   {match['match_position']}")
            
            if match['match_type'] == "包含匹配":
                full_id = match['full_id']
                start_pos = full_id.find(partial_id)
                highlighted = full_id[:start_pos] + "|" + full_id[start_pos:start_pos+len(partial_id)] + "|" + full_id[start_pos+len(partial_id):]
                print(f"   完整ID: {highlighted}")
            print("")
    
    print("\n===== 字段搜索功能测试 =====")
    
    # 测试3: 公司名称搜索
    print("\n测试3: 公司名称搜索")
    search_results = fuzzy_search_company(db_name, "海纳")
    if search_results:
        for i, match in enumerate(search_results, 1):
            print(f"{i}. ID={match['id']}, 名称={match['name']}, 相似度={match['similarity']:.2f}")
            print(f"   匹配策略: {match['match_type']}, 匹配字段: {match['match_field']}")
            print(f"   {match['match_position']}")
            print(f"   匹配文本: {match['matched_text']}")
            
            # 显示带有高亮的字段值
            field_value = match['field_value']
            if match['match_type'] == "包含匹配":
                start_pos = field_value.lower().find("海纳".lower())
                highlighted = field_value[:start_pos] + "|" + field_value[start_pos:start_pos+len("海纳")] + "|" + field_value[start_pos+len("海纳"):]
                print(f"   完整字段值: {highlighted}")
            print("")