import os, glob, sys
import fitz  # PyMuPDF
from fix_torch import fix_torch_dll_path
fix_torch_dll_path()
import traceback
from utils.pdf_utils import extract_pages, extract_text_words
from utils.ner_utils import simple_rule, deduplicate_hits, filter_ner_hits, is_in_whitelist
from utils.llm_sensitive_utils import llm_sensitive_recognize, match_llm_entities_with_ocr
from utils.desense_utils import apply_text_desense, apply_image_desense
from utils.logger import logger
from config import DESENSE_METHODS
from utils.rapid_ocr_extract import RapidOCRExtractor

# 尝试导入model模块，用于NER识别
try:
    from utils.model import extract_sensitive_info
    MODEL_AVAILABLE = True
except Exception as e:
    logger.warning(f"NER模型加载失败: {str(e)}，将仅使用正则表达式和LLM")
    MODEL_AVAILABLE = False

def merge_adjacent_entities(entities, full_text, max_gap=1):
    """
    根据位置信息合并相邻或近距离的实体
    
    Args:
        entities: 实体列表，每项格式为 {'start':int, 'end':int, 'label':str, ...}
        full_text: 完整文本
        max_gap: 最大允许的字符间隔，默认为3个字符
        
    Returns:
        合并后的实体列表
    """
    if not entities:
        return []
    
    # 按开始位置排序
    sorted_entities = sorted(entities, key=lambda x: x['start'])
    
    # 合并逻辑
    merged = []
    current_group = [sorted_entities[0]]
    
    for i in range(1, len(sorted_entities)):
        prev = sorted_entities[i-1]
        curr = sorted_entities[i]
        prev_end = prev['end']
        curr_start = curr['start']
        
        # 合并条件: 位置连续(相邻或重叠) 或者距离小于阈值
        if curr_start <= prev_end or (curr_start - prev_end) <= max_gap:
            # 满足条件，合并到当前组
            current_group.append(curr)
            if curr_start > prev_end:
                logger.debug(f"合并非相邻实体: 间隔{curr_start - prev_end}个字符")
        else:
            # 处理当前组
            if len(current_group) > 1:
                # 合并多个实体
                group_start = min(entity['start'] for entity in current_group)
                group_end = max(entity['end'] for entity in current_group)
                group_entity = full_text[group_start:group_end]
                # 使用第一个实体的标签类型
                group_label = current_group[0]['label']
                merged.append({
                    'entity': group_entity,
                    'label': group_label,
                    'start': group_start,
                    'end': group_end
                })
                logger.debug(f"合并实体: {group_entity} (位置: {group_start}-{group_end})")
            else:
                # 单个实体，直接添加
                entity = current_group[0]
                entity_text = full_text[entity['start']:entity['end']]
                merged.append({
                    'entity': entity_text,
                    'label': entity['label'],
                    'start': entity['start'],
                    'end': entity['end']
                })
            
            # 开始新的组
            current_group = [curr]
    
    # 处理最后一组
    if current_group:
        if len(current_group) > 1:
            group_start = min(entity['start'] for entity in current_group)
            group_end = max(entity['end'] for entity in current_group)
            group_entity = full_text[group_start:group_end]
            group_label = current_group[0]['label']
            merged.append({
                'entity': group_entity,
                'label': group_label,
                'start': group_start,
                'end': group_end
            })
            logger.debug(f"合并实体: {group_entity} (位置: {group_start}-{group_end})")
        else:
            entity = current_group[0]
            entity_text = full_text[entity['start']:entity['end']]
            merged.append({
                'entity': entity_text,
                'label': entity['label'],
                'start': entity['start'],
                'end': entity['end']
            })
    
    return merged

def combined_sensitive_recognize(text):
    """
    结合正则、NER和LLM的敏感信息识别
    
    Args:
        text: 要识别的文本
        
    Returns:
        合并后的实体列表，每项格式为 {'entity':str, 'label':str, 'start':int, 'end':int}
    """
    all_entities = []
    
    # 1. 正则规则识别
    # logger.info("使用正则规则识别敏感信息...")
    # regex_hits = simple_rule(text)
    # logger.info(f"正则识别到 {len(regex_hits)} 个敏感实体")
    
    # # 将正则结果转换为统一格式
    # for start, end, label in regex_hits:
    #     entity_text = text[start:end]
    #     # 应用白名单过滤 - 在simple_rule中已经应用过，这里是双重保险
    #     if is_in_whitelist(entity_text):
    #         continue
    #     all_entities.append({
    #         'start': start,
    #         'end': end,
    #         'label': label,
    #         'source': 'regex'
    #     })
    #     # 添加详细日志，显示识别到的实体内容和类型
    #     logger.info(f"正则识别: [{label}] '{entity_text}'")
    
    # 2. NER模型识别
    if MODEL_AVAILABLE:
        logger.info("使用NER模型识别敏感信息...")
        try:
            model_results = extract_sensitive_info(text)
            ner_entities = []
            
            # 将NER结果转换为统一格式
            for entity_type, entities in model_results.items():
                for entity in entities:
                    ner_entities.append({
                        'start': entity['start'],
                        'end': entity['end'],
                        'label': entity_type,
                        'source': 'ner'
                    })
            
            # 应用白名单过滤
            filtered_ner_entities = []
            for entity in ner_entities:
                entity_text = text[entity['start']:entity['end']]
                # 检查实体类型和白名单
                if not is_in_whitelist(entity_text):
                    filtered_ner_entities.append(entity)
                    # 添加详细日志，显示识别到的实体内容和类型
                    logger.info(f"NER识别: [{entity['label']}] '{entity_text}'")
            
            logger.info(f"NER识别到 {len(ner_entities)} 个敏感实体，过滤后 {len(filtered_ner_entities)} 个")
            all_entities.extend(filtered_ner_entities)
        except Exception as e:
            logger.error(f"NER模型识别失败: {str(e)}")
    
    # 3. LLM大模型识别
    logger.info("使用LLM大模型识别敏感信息...")
    try:
        llm_results = llm_sensitive_recognize(text)
        llm_entities = []
        
        # 将LLM结果转换为统一格式
        for entity in llm_results:
            entity_text = entity.get('entity', '')
            if not entity_text:
                continue
                
            label = entity.get('label', '敏感信息')
            
            # 查找实体在文本中的位置
            start_idx = 0
            while True:
                start = text.find(entity_text, start_idx)
                if start == -1:
                    break
                end = start + len(entity_text)
                
                # 应用白名单过滤 - 在llm_sensitive_recognize2中已经应用过，这里是双重保险
                if not is_in_whitelist(entity_text):
                    llm_entities.append({
                        'start': start,
                        'end': end,
                        'label': label,
                        'source': 'llm'
                    })
                    # 添加详细日志，显示识别到的实体内容和类型
                    logger.info(f"LLM识别: [{label}] '{entity_text}'")
                start_idx = end
        
        logger.info(f"LLM识别到 {len(llm_entities)} 个敏感实体")
        all_entities.extend(llm_entities)
    except Exception as e:
        logger.error(f"LLM识别失败: {str(e)}")
    
    # 4. 去重处理
    seen_positions = set()
    unique_entities = []
    
    # 排序：优先保留正则 > NER > LLM 的结果（优先级：regex > ner > llm）
    sorted_entities = sorted(all_entities, key=lambda x: {
        'regex': 0, 
        'ner': 1, 
        'llm': 2
    }[x['source']])
    
    for entity in sorted_entities:
        position_key = (entity['start'], entity['end'])
        if position_key not in seen_positions:
            seen_positions.add(position_key)
            unique_entities.append(entity)
    
    logger.info(f"去重后共 {len(unique_entities)} 个敏感实体")
    
    # 5. 合并相邻实体
    merged_entities = merge_adjacent_entities(unique_entities, text)
    logger.info(f"合并相邻实体后共 {len(merged_entities)} 个敏感实体")
    
    # 记录最终合并后的实体
    for entity in merged_entities:
        logger.info(f"最终敏感实体: [{entity['label']}] '{entity['entity']}'")
    
    return merged_entities

def classify_page_type(page):
    """判断页面类型：文本型或图像型"""
    try:
        chars = extract_text_words(page)
        if chars and len(chars) > 10:
            return 'text'
        else:
            return 'image'
    except Exception as e:
        logger.error(f"页面类型检测失败: {e}")
        return 'image'

def process_pdf(path, output_dir, method='mask', mask_color=None, mosaic_size=None, replace_char=None):
    """
    统一处理PDF文件的主函数 - 结合三种识别方法
    """
    logger.info(f"开始处理PDF文件: {os.path.basename(path)}")
    doc = extract_pages(path)
    ocr_extractor = RapidOCRExtractor()
    
    for idx in range(len(doc)):
        page = doc[idx]
        page_type = classify_page_type(page)
        logger.info(f"第 {idx+1} 页类型检测结果: {page_type}型")

        if page_type == 'text':
            # 数字型页面处理
            logger.info(f"{os.path.basename(path)} page#{idx+1}: 数字型页面，直接提取文本")
            chars = extract_text_words(page)
            
            if chars and len(chars) >= 10:
                full_text = ''.join([c[-1] for c in chars])
                
                # 使用结合方法识别敏感信息
                merged_entities = combined_sensitive_recognize(full_text)
                logger.info(f"结合识别方法识别到 {len(merged_entities)} 个敏感实体")
                
                # 转换为hits格式
                hits = []
                for entity in merged_entities:
                    entity_text = entity.get('entity', '')
                    label = entity.get('label', '敏感信息')
                    start = entity.get('start', 0)
                    end = entity.get('end', 0)
                    
                    # 为每个字符创建hits项
                    for pos in range(start, end):
                        if pos < len(chars):
                            x0, y0, x1, y1, text = chars[pos]
                            hit_item = {
                                'rect': (x0, y0, x1, y1), 
                                'label': label, 
                                'text': text,
                                'group_id': f"{start}_{end}",  # 使用start和end作为分组ID
                                'is_first': pos == start,      # 是否是分组的第一个字符
                                'is_last': pos == end - 1      # 是否是分组的最后一个字符
                            }
                            hits.append(hit_item)
                
                # 最终白名单过滤，确保所有遮挡前都过滤
                filtered_hits = []
                for hit in hits:
                    if not is_in_whitelist(hit.get('text', '')):
                        filtered_hits.append(hit)
                hits = filtered_hits
                
                logger.info(f"{os.path.basename(path)} page#{idx+1}: found {len(hits)} items (白名单过滤后)")
                
                # 打印脱敏前后对比
                if hits:
                    logger.info(f"\n=== 第 {idx+1} 页脱敏前后对比 ===")
                    # 创建脱敏后的文本
                    masked_text_list = list(full_text)
                    # 根据hits中的位置信息进行脱敏
                    for hit in hits:
                        # 找到hit对应的字符在full_text中的位置
                        hit_text = hit.get('text', '')
                        hit_rect = hit.get('rect', (0, 0, 0, 0))
                        # 在chars中找到对应的位置
                        for i, (x0, y0, x1, y1, text) in enumerate(chars):
                            if (x0, y0, x1, y1) == hit_rect and text == hit_text:
                                if i < len(masked_text_list):
                                    masked_text_list[i] = '■'
                                break
                    logger.info(f"识别到的敏感信息: {len(merged_entities)} 组，共 {len(hits)} 个字符")
                
                # 应用脱敏处理，并检查是否有内容被实际处理
                processed = False
                if method in ['replace']:
                    processed = apply_text_desense(page, hits, method=method, replace_char=replace_char)
                else:
                    processed = apply_image_desense(page, hits, method=method, mask_color=mask_color, mosaic_size=mosaic_size, replace_char=replace_char)
                
                if processed:
                    logger.info(f"第 {idx+1} 页脱敏处理完成")
                else:
                    logger.info(f"第 {idx+1} 页没有需要处理的内容（可能全部被过滤）")
            else:
                logger.warning(f"{os.path.basename(path)} page#{idx+1}: 文本提取失败，跳过处理")
        else:
            # 图像型页面处理
            logger.info(f"{os.path.basename(path)} page#{idx+1}: 图片型页面，使用RapidOCR处理")
            # 对每个图像型页面分别进行OCR处理
            page_result = ocr_extractor.extract_from_pdf(page)
            
            if page_result and page_result['chars']:
                chars = [
                    (char['bbox'][0], char['bbox'][1], char['bbox'][2], char['bbox'][3], char['char'])
                    for char in page_result['chars']
                ]
                logger.info(f"RapidOCR识别成功，共识别 {len(chars)} 个字符")
                full_text = ''.join([c[-1] for c in chars])
                
                # 使用结合方法识别敏感信息
                merged_entities = combined_sensitive_recognize(full_text)
                logger.info(f"结合识别方法识别到 {len(merged_entities)} 个敏感实体")
                
                # 转换为hits格式
                hits = []
                if merged_entities:
                    ocr_chars = page_result['chars']
                    # 将合并后的实体转换为match_llm_entities_with_ocr期望的格式
                    formatted_entities = [{'entity': entity['entity'], 'label': entity['label']} for entity in merged_entities]
                    matched_hits = match_llm_entities_with_ocr(full_text, ocr_chars, formatted_entities)
                    
                    # 精确转换结果到脱敏函数期望的格式
                    for hit in matched_hits:
                        entity_text = hit.get('entity', '')
                        label = hit.get('label', '敏感信息')
                        bbox_list = hit.get('bbox_list', [])
                        start_idx = hit.get('start', 0)
                        end_idx = hit.get('end', 0)
                        
                        # 为每个字符创建精确的hits项，添加分组信息
                        for i, bbox in enumerate(bbox_list):
                            if i < len(entity_text):  # 确保有对应的字符
                                x0, y0, x1, y1 = bbox
                                char_text = entity_text[i]
                                hit_item = {
                                    'rect': (x0, y0, x1, y1),
                                    'label': label,
                                    'text': char_text,
                                    'group_id': f"{start_idx}_{end_idx}",  # 使用start和end作为分组ID
                                    'is_first': i == 0,                    # 是否是分组的第一个字符
                                    'is_last': i == len(entity_text) - 1   # 是否是分组的最后一个字符
                                }
                                hits.append(hit_item)
                
                # 最终白名单过滤，确保所有遮挡前都过滤
                filtered_hits = []
                for hit in hits:
                    if not is_in_whitelist(hit.get('text', '')):
                        filtered_hits.append(hit)
                hits = filtered_hits
                
                logger.info(f"{os.path.basename(path)} page#{idx+1}: found {len(hits)} items (白名单过滤后)")
                
                # 打印脱敏前后对比
                if hits:
                    logger.info(f"\n=== 第 {idx+1} 页脱敏前后对比 ===")
                    # 创建脱敏后的文本
                    masked_text_list = list(full_text)
                    # 根据识别的敏感信息位置进行精确脱敏
                    for hit in matched_hits:
                        start_idx = hit.get('start', 0)
                        end_idx = hit.get('end', 0)
                        for pos in range(start_idx, end_idx):
                            if pos < len(masked_text_list):
                                masked_text_list[pos] = '■'
                    logger.info(f"识别到的敏感信息: {len(merged_entities)} 组，共 {len(hits)} 个字符")
                
                # 应用脱敏处理，并检查是否有内容被实际处理
                processed = False
                if method in ['replace']:
                    processed = apply_text_desense(page, hits, method=method, replace_char=replace_char)
                else:
                    processed = apply_image_desense(page, hits, method=method, mask_color=mask_color, mosaic_size=mosaic_size, replace_char=replace_char)
                
                if processed:
                    logger.info(f"第 {idx+1} 页脱敏处理完成")
                else:
                    logger.info(f"第 {idx+1} 页没有需要处理的内容（可能全部被过滤）")
            else:
                logger.warning(f"RapidOCR识别失败，无法处理该页面")
                
    out = os.path.join(output_dir, os.path.basename(path))
    doc.save(out)
    logger.info(f"PDF处理完成，保存到: {out}")

if __name__ == "__main__":
    import argparse
    parser = argparse.ArgumentParser(description="PDF脱敏处理工具 - 结合正则+NER+LLM的识别方法")
    parser.add_argument("--input_glob", default="./data/公证书.pdf", help="待脱敏文件或目录通配符")
    parser.add_argument("--out", default="./out", help="输出目录")
    parser.add_argument("--method", default="mask", choices=DESENSE_METHODS, help="脱敏方法")
    parser.add_argument("--mask_color", default=None, help="遮罩颜色 (如 #000000 为黑色)")
    parser.add_argument("--mosaic_size", type=int, default=None, help="马赛克块大小 (像素)")
    parser.add_argument("--replace_char", default=None, help="替换字符 (如 X, *, #)")
    args = parser.parse_args()

    # 确保输出目录存在
    os.makedirs(args.out, exist_ok=True)
    
    # 获取所有PDF文件
    pdf_files = glob.glob(args.input_glob)
    if not pdf_files:
        logger.warning(f"在 {args.input_glob} 中未找到PDF文件")
        sys.exit(1)
    
    logger.info(f"找到 {len(pdf_files)} 个PDF文件待处理")
    
    # 处理每个PDF文件
    success_count = 0
    for pdf_file in pdf_files:
        try:
            logger.info(f"\n{'='*50}")
            logger.info(f"处理文件: {pdf_file}")
            logger.info(f"{'='*50}")
            
            # 调用脱敏处理
            process_pdf(
                pdf_file,
                args.out,
                method=args.method,
                mask_color=args.mask_color,
                mosaic_size=args.mosaic_size,
                replace_char=args.replace_char
            )
            success_count += 1
            
        except Exception as e:
            logger.error(f"处理 {pdf_file} 时发生错误: {str(e)}")
            traceback.print_exc()
    
    logger.info(f"\n{'='*50}")
    logger.info(f"处理完成！成功处理 {success_count}/{len(pdf_files)} 个文件")
    logger.info(f"输出目录: {args.out}")
    logger.info(f"{'='*50}") 