#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
批量Panel图像拼接脚本
支持处理多个PDF文件，优化SAM模型加载
"""

import os
import sys
import cv2
import numpy as np
import torch
import json
import argparse
import glob
import shutil
import re
from pathlib import Path
from typing import Tuple, List, Dict, Any
import gc
from itertools import combinations
import random
from tqdm import tqdm

# 添加SAM模型路径
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'base_scripts', 'models', 'segment-anything-main'))
from segment_anything import SamAutomaticMaskGenerator, sam_model_registry


class BatchPanelSplicingProcessor:
    """批量Panel图像拼接处理器"""
    
    def __init__(self, device: str = "cuda", model_path: str = None, base_data_path: str = None, fg_suffix: str = "FG_001"):
        """
        初始化处理器
        
        Args:
            device: 计算设备
            model_path: SAM模型路径
            base_data_path: 基础数据路径
            fg_suffix: FG后缀 (FG_001 for splicing, FG_002 for copy-move)
        """
        self.device = device
        self.model_path = model_path or "/home/zhangbo/workspace/aigc/SAM/vit_models/sam_vit_h_4b8939.pth"
        self.base_data_path = base_data_path or ""
        self.fg_suffix = fg_suffix
        self.sam_model = None
        self.mask_generator = None
        self.forgery_data = None
        self.splicing_pages = None
        self.processed_pdfs = set()  # 跟踪已经处理过的PDF
        
    def load_sam_model(self):
        """加载SAM模型（只加载一次）"""
        if self.sam_model is None:
            print("=" * 80)
            print("🔄 阶段1: 正在加载SAM模型...")
            print(f"📁 模型路径: {self.model_path}")
            print(f"🖥️  计算设备: {self.device}")
            print("=" * 80)
            
            model_type = "vit_h"
            print(f"📦 模型类型: {model_type}")
            print("⏳ 正在从检查点加载模型...")
            
            self.sam_model = sam_model_registry[model_type](checkpoint=self.model_path)
            self.sam_model.to(device=self.device)
            
            print("⚙️  正在配置掩码生成器...")
            # 配置掩码生成器
            self.mask_generator = SamAutomaticMaskGenerator(
                self.sam_model,
                output_mode="binary_mask",
                points_per_side=16,
                pred_iou_thresh=0.86,
                stability_score_thresh=0.92,
                min_mask_region_area=100
            )
            print("✅ SAM模型加载完成!")
            print("=" * 80)
    
    def load_json_files(self, forgery_json_path: str, splicing_pages_path: str):
        """
        加载JSON配置文件
        
        Args:
            forgery_json_path: forgery.json文件路径
            splicing_pages_path: splicing_pages.jsonl文件路径
        """
        print("=" * 80)
        print("🔄 阶段2: 加载配置文件")
        print("=" * 80)
        
        # 加载forgery.json
        print(f"📖 正在加载forgery.json: {forgery_json_path}")
        if os.path.exists(forgery_json_path):
            with open(forgery_json_path, 'r', encoding='utf-8') as f:
                self.forgery_data = json.load(f)
            print(f"   ✅ 成功加载forgery.json，包含 {len(self.forgery_data.get('papers', []))} 个论文")
        else:
            raise FileNotFoundError(f"❌ 找不到forgery.json文件: {forgery_json_path}")
        
        # 加载splicing_pages.jsonl
        print(f"📖 正在加载splicing_pages.jsonl: {splicing_pages_path}")
        if os.path.exists(splicing_pages_path):
            self.splicing_pages = []
            with open(splicing_pages_path, 'r', encoding='utf-8') as f:
                for line in f:
                    if line.strip():
                        self.splicing_pages.append(json.loads(line.strip()))
            print(f"   ✅ 成功加载splicing_pages.jsonl，包含 {len(self.splicing_pages)} 个页面")
        else:
            raise FileNotFoundError(f"❌ 找不到splicing_pages.jsonl文件: {splicing_pages_path}")
        
        print("=" * 80)
    
    def find_matching_papers(self) -> List[Dict[str, Any]]:
        """
        找到需要处理的论文
        
        Returns:
            需要处理的论文列表
        """
        print("=" * 80)
        print("🔄 阶段3: 匹配需要处理的论文")
        print("=" * 80)
        
        # 创建splicing_pages的basename到页面列表的映射
        splicing_map = {}
        for page in self.splicing_pages:
            basename = page['basename']
            pdf_name = basename.replace('.pdf', '')
            if pdf_name not in splicing_map:
                splicing_map[pdf_name] = []
            splicing_map[pdf_name].append(page)
        
        print(f"📊 找到 {len(splicing_map)} 个需要处理的PDF")
        for pdf_name, pages in splicing_map.items():
            print(f"   📄 {pdf_name}: {len(pages)} 个页面")
        
        # 匹配forgery.json中的论文
        matching_papers = []
        for paper in self.forgery_data.get('papers', []):
            paper_id = paper['paper_id']
            
            # 处理新的数据格式：paper_id可能是整数或字符串
            if isinstance(paper_id, int):
                pdf_name = str(paper_id)
            else:
                # 提取PDF名称（去掉_FG_xxx后缀）
                pdf_name = paper_id.split('_FG_')[0]
            
            if pdf_name in splicing_map:
                pages_info = splicing_map[pdf_name]
                # 为每个页面创建一个论文副本
                for page_info in pages_info:
                    paper_copy = paper.copy()
                    paper_copy['splicing_info'] = page_info
                    matching_papers.append(paper_copy)
                    print(f"   ✅ 匹配论文: {paper_id} -> {pdf_name} (页面: {page_info['page_index_1']})")
            else:
                print(f"   ⏭️  跳过论文: {paper_id} (未在splicing_pages中找到)")
        
        print(f"📋 总共需要处理 {len(matching_papers)} 个论文页面")
        print("=" * 80)
        return matching_papers
    
    def calculate_panel_similarity(self, img1: np.ndarray, img2: np.ndarray) -> float:
        """
        计算两个panel图像的相似度
        
        Args:
            img1: 第一个图像
            img2: 第二个图像
            
        Returns:
            相似度分数 (0-1)
        """
        # 调整图像大小到相同尺寸
        target_size = (224, 224)
        img1_resized = cv2.resize(img1, target_size)
        img2_resized = cv2.resize(img2, target_size)
        
        # 转换为灰度图
        gray1 = cv2.cvtColor(img1_resized, cv2.COLOR_BGR2GRAY)
        gray2 = cv2.cvtColor(img2_resized, cv2.COLOR_BGR2GRAY)
        
        # 计算直方图
        hist1 = cv2.calcHist([gray1], [0], None, [256], [0, 256])
        hist2 = cv2.calcHist([gray2], [0], None, [256], [0, 256])
        
        # 计算直方图相关性
        correlation = cv2.compareHist(hist1, hist2, cv2.HISTCMP_CORREL)
        
        # 计算结构相似性
        ssim_score = self.calculate_ssim(gray1, gray2)
        
        # 计算尺寸相似性
        size_similarity = 1.0 - abs(img1.shape[0] - img2.shape[0]) / max(img1.shape[0], img2.shape[0])
        size_similarity *= 1.0 - abs(img1.shape[1] - img2.shape[1]) / max(img1.shape[1], img2.shape[1])
        
        # 综合相似度
        overall_similarity = (correlation * 0.4 + ssim_score * 0.4 + size_similarity * 0.2)
        
        return max(0, min(1, overall_similarity))
    
    def calculate_ssim(self, img1: np.ndarray, img2: np.ndarray) -> float:
        """计算结构相似性指数"""
        # 简化的SSIM计算
        mu1 = np.mean(img1)
        mu2 = np.mean(img2)
        sigma1 = np.var(img1)
        sigma2 = np.var(img2)
        sigma12 = np.mean((img1 - mu1) * (img2 - mu2))
        
        c1 = 0.01 ** 2
        c2 = 0.03 ** 2
        
        ssim = ((2 * mu1 * mu2 + c1) * (2 * sigma12 + c2)) / \
               ((mu1 ** 2 + mu2 ** 2 + c1) * (sigma1 + sigma2 + c2))
        
        return ssim
    
    def select_similar_panels(self, panel_dir: str, method: str = "similarity") -> Tuple[str, str]:
        """
        选择两个相似的panel图像
        
        Args:
            panel_dir: panel图像目录
            method: 选择方法 ("similarity" 或 "random")
            
        Returns:
            选中的两个panel文件路径
        """
        panel_files = glob.glob(os.path.join(panel_dir, "*.png"))
        
        if len(panel_files) < 2:
            raise ValueError(f"❌ panel目录中图像数量不足，需要至少2个，实际有{len(panel_files)}个")
        
        if method == "random":
            # 随机选择两个panel
            selected = random.sample(panel_files, 2)
            return selected[0], selected[1]
        
        elif method == "similarity":
            # 基于相似度选择
            best_similarity = 0
            best_pair = None
            
            for img1_path, img2_path in combinations(panel_files, 2):
                img1 = cv2.imread(img1_path)
                img2 = cv2.imread(img2_path)
                
                if img1 is None or img2 is None:
                    continue
                
                similarity = self.calculate_panel_similarity(img1, img2)
                
                if similarity > best_similarity:
                    best_similarity = similarity
                    best_pair = (img1_path, img2_path)
            
            if best_pair is None:
                raise ValueError("❌ 无法找到有效的panel对")
            
            return best_pair[0], best_pair[1]
        
        else:
            raise ValueError(f"❌ 不支持的选择方法: {method}")
    
    def clean_mask(self, mask: np.ndarray) -> np.ndarray:
        """
        清理mask，去除黑色杂质和噪声
        
        Args:
            mask: 原始mask
            
        Returns:
            清理后的mask
        """
        # 转换为二值图像
        _, binary_mask = cv2.threshold(mask, 127, 255, cv2.THRESH_BINARY)
        
        # 形态学操作：去除小噪声
        kernel = np.ones((3, 3), np.uint8)
        
        # 开运算：去除小的白色噪声
        cleaned_mask = cv2.morphologyEx(binary_mask, cv2.MORPH_OPEN, kernel, iterations=1)
        
        # 闭运算：填充小的黑色空洞
        cleaned_mask = cv2.morphologyEx(cleaned_mask, cv2.MORPH_CLOSE, kernel, iterations=1)
        
        # 高斯模糊平滑边缘
        cleaned_mask = cv2.GaussianBlur(cleaned_mask, (3, 3), 0)
        
        # 重新二值化
        _, cleaned_mask = cv2.threshold(cleaned_mask, 127, 255, cv2.THRESH_BINARY)
        
        return cleaned_mask

    def add_fg_suffix_to_paper_id(self, paper_id) -> str:
        """
        为paper_id添加FG后缀
        
        Args:
            paper_id: 原始paper_id (可能是整数或字符串)
            
        Returns:
            添加后缀后的paper_id
        """
        if isinstance(paper_id, int):
            return f"{paper_id}_{self.fg_suffix}"
        else:
            # 如果已经有_FG_xxx后缀，先去掉
            if '_FG_' in str(paper_id):
                base_id = str(paper_id).split('_FG_')[0]
            else:
                base_id = str(paper_id)
            return f"{base_id}_{self.fg_suffix}"
    
    def add_fg_suffix_to_path(self, path: str) -> str:
        """
        为路径添加FG后缀

        Args:
            path: 原始路径

        Returns:
            添加后缀后的路径
        """
        # 分离目录和文件名
        dir_path = os.path.dirname(path)
        filename = os.path.basename(path)
        name, ext = os.path.splitext(filename)

        # 在paper_id后添加FG后缀
        # 假设文件名格式为 "paper_id_xxx.png"，需要在paper_id后添加_FG_xxx
        if '_' in name:
            parts = name.split('_')
            if len(parts) >= 2:
                paper_id = parts[0]
                suffix_parts = parts[1:]
                new_name = f"{paper_id}_{self.fg_suffix}_{'_'.join(suffix_parts)}"
            else:
                new_name = f"{name}_{self.fg_suffix}"
        else:
            new_name = f"{name}_{self.fg_suffix}"
        
        new_filename = f"{new_name}{ext}"
        new_path = os.path.join(dir_path, new_filename)

        return new_path
    
    def add_fg_suffix_to_id(self, original_id: str) -> str:
        """
        为ID添加FG后缀
        
        Args:
            original_id: 原始ID (如 "2010000000_1_1")
            
        Returns:
            添加后缀后的ID (如 "2010000000_FG_001_1_1")
        """
        # 在paper_id后添加FG后缀
        if '_' in original_id:
            parts = original_id.split('_')
            if len(parts) >= 2:
                paper_id = parts[0]
                suffix_parts = parts[1:]
                return f"{paper_id}_{self.fg_suffix}_{'_'.join(suffix_parts)}"
            else:
                return f"{original_id}_{self.fg_suffix}"
        else:
            return f"{original_id}_{self.fg_suffix}"
    
    def copy_file_to_results(self, src_path: str, dst_path: str) -> bool:
        """
        复制文件到results目录
        
        Args:
            src_path: 源文件路径
            dst_path: 目标文件路径
            
        Returns:
            是否成功
        """
        try:
            # 构建完整路径
            if self.base_data_path:
                full_src_path = os.path.join(self.base_data_path, src_path)
            else:
                full_src_path = src_path
            
            # 创建目标目录
            os.makedirs(os.path.dirname(dst_path), exist_ok=True)
            
            # 复制文件
            if os.path.exists(full_src_path):
                shutil.copy2(full_src_path, dst_path)
                print(f"   ✅ 复制文件: {src_path} -> {dst_path}")
                return True
            else:
                print(f"   ⚠️  源文件不存在: {full_src_path}")
                return False
                
        except Exception as e:
            print(f"   ❌ 复制文件失败 {src_path}: {e}")
            return False

    def copy_original_files_to_results(self, paper: Dict[str, Any], base_output_dir: str, new_paper_id: str):
        """
        复制原始文件到results目录
        
        Args:
            paper: 论文信息
            base_output_dir: 基础输出目录
            new_paper_id: 新的paper_id
        """
        print(f"   📁 复制原始文件到results目录...")
        
        # 复制PDF文件
        original_pdf_path = paper['path']
        new_pdf_path = self.add_fg_suffix_to_path(original_pdf_path)
        dst_pdf_path = os.path.join(base_output_dir, new_paper_id, os.path.basename(new_pdf_path))
        self.copy_file_to_results(original_pdf_path, dst_pdf_path)
        
        # 复制所有figures和panels
        for page in paper.get('pages', []):
            for figure in page.get('figures', []):
                # 复制figure文件
                original_figure_path = figure['path']
                new_figure_path = self.add_fg_suffix_to_path(original_figure_path)
                dst_figure_path = os.path.join(base_output_dir, new_paper_id, 'figure', os.path.basename(new_figure_path))
                self.copy_file_to_results(original_figure_path, dst_figure_path)
                
                # 复制panel文件
                for panel in figure.get('panels', []):
                    original_panel_path = panel['path']
                    new_panel_path = self.add_fg_suffix_to_path(original_panel_path)

                    # 构建panel目录结构（使用带FG后缀的目录名）
                    # 从原始路径中提取page信息
                    original_dir = os.path.dirname(original_panel_path)
                    original_panel_dir_name = os.path.basename(original_dir)
                    # 假设原始目录名格式为 "paper_id_page_1"，需要添加FG后缀
                    if '_' in original_panel_dir_name:
                        parts = original_panel_dir_name.split('_')
                        if len(parts) >= 3:  # paper_id_page_1
                            paper_id_part = parts[0]
                            page_part = '_'.join(parts[1:])
                            new_panel_dir_name = f"{paper_id_part}_{self.fg_suffix}_{page_part}"
                        else:
                            new_panel_dir_name = f"{original_panel_dir_name}_{self.fg_suffix}"
                    else:
                        new_panel_dir_name = f"{original_panel_dir_name}_{self.fg_suffix}"
                    
                    dst_panel_dir = os.path.join(base_output_dir, new_paper_id, 'panel', new_panel_dir_name)
                    dst_panel_path = os.path.join(dst_panel_dir, os.path.basename(new_panel_path))

                    self.copy_file_to_results(original_panel_path, dst_panel_path)

    def extract_object_and_background(self, image: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
        """
        使用SAM模型提取对象和背景
        
        Args:
            image: 输入图像
            
        Returns:
            (object_mask, background_mask)
        """
        # 清理CUDA缓存
        torch.cuda.empty_cache()
        
        try:
            # 生成masks
            masks = self.mask_generator.generate(image)
            
            if not masks:
                print("    ⚠️  警告: 未检测到任何对象，使用全图作为对象")
                object_mask = np.ones(image.shape[:2], dtype=np.uint8) * 255
                background_mask = np.zeros(image.shape[:2], dtype=np.uint8)
                return object_mask, background_mask
            
            # 按面积排序masks
            masks.sort(key=lambda x: x["area"], reverse=True)
            
            # 选择面积最大的mask作为对象
            raw_object_mask = masks[0]["segmentation"].astype(np.uint8) * 255
            
            # 清理mask
            object_mask = self.clean_mask(raw_object_mask)
            
            # 背景是对象mask的补集
            background_mask = 255 - object_mask
            
            return object_mask, background_mask
            
        except Exception as e:
            print(f"    ❌ 提取对象和背景时出错: {e}")
            # 返回默认mask
            object_mask = np.ones(image.shape[:2], dtype=np.uint8) * 255
            background_mask = np.zeros(image.shape[:2], dtype=np.uint8)
            return object_mask, background_mask
        finally:
            torch.cuda.empty_cache()
            gc.collect()
    
    def resize_mask_to_image(self, mask: np.ndarray, target_image: np.ndarray) -> np.ndarray:
        """
        将mask调整到目标图像尺寸
        
        Args:
            mask: 原始mask
            target_image: 目标图像
            
        Returns:
            调整后的mask
        """
        target_height, target_width = target_image.shape[:2]
        resized_mask = cv2.resize(mask, (target_width, target_height), interpolation=cv2.INTER_NEAREST)
        return resized_mask
    
    def create_synthetic_image(self, object_img: np.ndarray, object_mask: np.ndarray, 
                             background_img: np.ndarray, background_mask: np.ndarray) -> np.ndarray:
        """
        创建合成图像
        
        Args:
            object_img: 对象图像
            object_mask: 对象mask
            background_img: 背景图像
            background_mask: 背景mask
            
        Returns:
            合成后的图像
        """
        # 确保所有图像和mask具有相同的尺寸
        target_height = max(object_img.shape[0], background_img.shape[0])
        target_width = max(object_img.shape[1], background_img.shape[1])
        
        # 调整图像尺寸
        if object_img.shape[:2] != (target_height, target_width):
            object_img = cv2.resize(object_img, (target_width, target_height))
        if background_img.shape[:2] != (target_height, target_width):
            background_img = cv2.resize(background_img, (target_width, target_height))
        
        # 调整mask尺寸
        if object_mask.shape[:2] != (target_height, target_width):
            object_mask = cv2.resize(object_mask, (target_width, target_height), interpolation=cv2.INTER_NEAREST)
        if background_mask.shape[:2] != (target_height, target_width):
            background_mask = cv2.resize(background_mask, (target_width, target_height), interpolation=cv2.INTER_NEAREST)
        
        # 清理mask
        object_mask = self.clean_mask(object_mask)
        background_mask = self.clean_mask(background_mask)
        
        # 检测背景颜色
        background_color = self.detect_background_color(background_img)
        
        # 确保mask是3通道
        if len(object_mask.shape) == 2:
            object_mask = np.stack([object_mask] * 3, axis=-1)
        if len(background_mask.shape) == 2:
            background_mask = np.stack([background_mask] * 3, axis=-1)
        
        # 归一化mask到0-1范围
        object_mask_norm = object_mask.astype(np.float32) / 255.0
        background_mask_norm = background_mask.astype(np.float32) / 255.0
        
        # 确保mask覆盖整个图像（避免黑色区域）
        total_mask = object_mask_norm + background_mask_norm
        total_mask = np.clip(total_mask, 0, 1)
        
        # 归一化mask，确保每个像素的权重和为1
        total_mask = np.where(total_mask > 0, total_mask, 1.0)
        object_mask_norm = object_mask_norm / total_mask
        background_mask_norm = background_mask_norm / total_mask
        
        # 合成图像：对象部分使用原图，背景部分使用原背景图
        synthetic_img = (object_img * object_mask_norm + 
                        background_img * background_mask_norm).astype(np.uint8)
        
        # 智能填充：只填充mask移动后留下的空白区域
        # 找到需要填充的区域（既不是对象也不是原背景的区域）
        empty_region = (total_mask < 0.1).astype(np.uint8)  # 几乎没有任何mask覆盖的区域
        if np.any(empty_region):
            # 只在空白区域填充背景颜色
            empty_region_3d = np.stack([empty_region] * 3, axis=-1)
            synthetic_img = np.where(empty_region_3d, background_color, synthetic_img)
        
        # 后处理：去除可能的黑色像素
        synthetic_img = self.remove_black_artifacts(synthetic_img)
        
        return synthetic_img
    
    def detect_background_color(self, image: np.ndarray) -> np.ndarray:
        """
        检测图像的主要背景颜色
        
        Args:
            image: 输入图像
            
        Returns:
            背景颜色 (B, G, R)
        """
        # 将图像转换为灰度图
        gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        
        # 计算图像边缘的像素值（通常边缘代表背景）
        h, w = gray.shape
        edge_pixels = []
        
        # 收集边缘像素
        edge_pixels.extend(gray[0, :])  # 上边缘
        edge_pixels.extend(gray[-1, :])  # 下边缘
        edge_pixels.extend(gray[:, 0])  # 左边缘
        edge_pixels.extend(gray[:, -1])  # 右边缘
        
        # 计算边缘像素的平均值
        edge_mean = np.mean(edge_pixels)
        
        # 简化为只有白色和黑色两种选择
        if edge_mean > 100:  # 偏白色背景
            background_color = (255, 255, 255)  # 白色
        else:  # 深色背景
            background_color = (0, 0, 0)  # 黑色
        
        print(f"   🎨 检测到背景颜色: {background_color} (边缘平均亮度: {edge_mean:.1f})")
        return background_color
    
    def remove_black_artifacts(self, image: np.ndarray) -> np.ndarray:
        """
        去除图像中的黑色杂质
        
        Args:
            image: 输入图像
            
        Returns:
            清理后的图像
        """
        # 检测黑色像素（RGB值都接近0）
        black_mask = np.all(image < 30, axis=2)
        
        if np.any(black_mask):
            # 对黑色区域进行中值滤波
            filtered_image = cv2.medianBlur(image, 3)
            
            # 只在黑色区域应用滤波结果
            image[black_mask] = filtered_image[black_mask]
        
        return image
    
    def process_single_pdf(self, paper: Dict[str, Any], base_output_dir: str, method: str = "similarity") -> Dict[str, Any]:
        """
        处理单个PDF的panel拼接
        
        Args:
            paper: 论文信息
            base_output_dir: 基础输出目录
            method: 选择方法
            
        Returns:
            处理结果信息
        """
        original_paper_id = paper['paper_id']
        new_paper_id = self.add_fg_suffix_to_paper_id(original_paper_id)
        splicing_info = paper['splicing_info']
        page_index = splicing_info['page_index_1']
        
        print(f"📄 处理论文: {original_paper_id} -> {new_paper_id} (页面: {page_index})")
        
        # 构建panel目录路径
        paper_path = paper['path']
        paper_dir = os.path.dirname(paper_path)
        
        # 处理新的数据格式：paper_id可能是整数或字符串
        if isinstance(original_paper_id, int):
            paper_id_str = str(original_paper_id)
        else:
            paper_id_str = str(original_paper_id).split('_FG_')[0] if '_FG_' in str(original_paper_id) else str(original_paper_id)
        
        # 添加基础数据路径前缀
        if self.base_data_path:
            # 检查paper_dir是否已经包含base_data_path
            if paper_dir.startswith(self.base_data_path):
                panel_dir = os.path.join(paper_dir, 'panel', f"{paper_id_str}_{page_index}_1")
            else:
                panel_dir = os.path.join(self.base_data_path, paper_dir, 'panel', f"{paper_id_str}_{page_index}_1")
        else:
            panel_dir = os.path.join(paper_dir, 'panel', f"{paper_id_str}_{page_index}_1")
        
        if not os.path.exists(panel_dir):
            print(f"   ⚠️  Panel目录不存在: {panel_dir}")
            return None
        
        # 使用统一的paper_id，不包含页面信息
        # 只在第一次处理这个PDF时复制原始文件
        if new_paper_id not in self.processed_pdfs:
            print(f"   📁 复制原始文件到results目录...")
            self.copy_original_files_to_results(paper, base_output_dir, new_paper_id)
            self.processed_pdfs.add(new_paper_id)
        else:
            print(f"   ⏭️  跳过文件复制（PDF已处理过）")
        
        # mask目录将在处理过程中动态创建
        
        try:
            # 选择两个相似的panel
            panel_a_path, panel_b_path = self.select_similar_panels(panel_dir, method)
            
            # 读取图像
            panel_a = cv2.imread(panel_a_path)
            panel_b = cv2.imread(panel_b_path)
            
            if panel_a is None or panel_b is None:
                print(f"   ❌ 无法读取panel图像")
                return None
            
            print(f"   📖 读取Panel A: {os.path.basename(panel_a_path)}")
            print(f"   📖 读取Panel B: {os.path.basename(panel_b_path)}")
            
            # 提取panel A的对象和背景
            print("   🔄 提取Panel A的对象和背景...")
            obj_a_mask, bg_a_mask = self.extract_object_and_background(panel_a)
            
            # 提取panel B的对象和背景
            print("   🔄 提取Panel B的对象和背景...")
            obj_b_mask, bg_b_mask = self.extract_object_and_background(panel_b)
            
            # 调整mask尺寸以匹配目标图像
            obj_a_mask_resized = self.resize_mask_to_image(obj_a_mask, panel_b)
            bg_a_mask_resized = self.resize_mask_to_image(bg_a_mask, panel_b)
            obj_b_mask_resized = self.resize_mask_to_image(obj_b_mask, panel_a)
            bg_b_mask_resized = self.resize_mask_to_image(bg_b_mask, panel_a)
            
            # 创建合成图像
            print("   🎨 创建合成图像...")
            synthetic_c = self.create_synthetic_image(panel_a, obj_a_mask_resized, panel_b, bg_b_mask_resized)
            synthetic_d = self.create_synthetic_image(panel_b, obj_b_mask_resized, panel_a, bg_a_mask_resized)
            
            # 构建新的panel路径（在results目录中）
            panel_a_name = os.path.splitext(os.path.basename(panel_a_path))[0]
            panel_b_name = os.path.splitext(os.path.basename(panel_b_path))[0]
            
            # 构建新的panel目录路径（使用带FG后缀的目录名，包含页面信息）
            panel_dir_name = f"{paper_id_str}_{self.fg_suffix}_{page_index}_1"
            new_panel_dir = os.path.join(base_output_dir, new_paper_id, 'panel', panel_dir_name)
            
            # 确保新panel目录存在
            os.makedirs(new_panel_dir, exist_ok=True)
            
            # 构建新的panel文件路径（在paper_id后添加FG后缀）
            if '_' in panel_a_name:
                parts = panel_a_name.split('_')
                if len(parts) >= 2:
                    paper_id = parts[0]
                    suffix_parts = parts[1:]
                    panel_a_name_with_suffix = f"{paper_id}_{self.fg_suffix}_{'_'.join(suffix_parts)}"
                else:
                    panel_a_name_with_suffix = f"{panel_a_name}_{self.fg_suffix}"
            else:
                panel_a_name_with_suffix = f"{panel_a_name}_{self.fg_suffix}"
                
            if '_' in panel_b_name:
                parts = panel_b_name.split('_')
                if len(parts) >= 2:
                    paper_id = parts[0]
                    suffix_parts = parts[1:]
                    panel_b_name_with_suffix = f"{paper_id}_{self.fg_suffix}_{'_'.join(suffix_parts)}"
                else:
                    panel_b_name_with_suffix = f"{panel_b_name}_{self.fg_suffix}"
            else:
                panel_b_name_with_suffix = f"{panel_b_name}_{self.fg_suffix}"
            new_panel_a_path = os.path.join(new_panel_dir, f"{panel_a_name_with_suffix}.png")
            new_panel_b_path = os.path.join(new_panel_dir, f"{panel_b_name_with_suffix}.png")
            
            # 直接覆盖原始panel文件
            cv2.imwrite(new_panel_a_path, synthetic_c)
            cv2.imwrite(new_panel_b_path, synthetic_d)
            
            # 保存mask图像（使用新的目录结构，不包含_mask后缀）
            mask_dir = os.path.join(base_output_dir, new_paper_id, 'mask', panel_dir_name)
            os.makedirs(mask_dir, exist_ok=True)
            mask_a_path = os.path.join(mask_dir, f"{panel_a_name_with_suffix}.png")
            mask_b_path = os.path.join(mask_dir, f"{panel_b_name_with_suffix}.png")
            
            cv2.imwrite(mask_a_path, obj_a_mask_resized)
            cv2.imwrite(mask_b_path, obj_b_mask_resized)
            
            print(f"   ✅ 合成图像已覆盖: {new_panel_a_path}")
            print(f"   ✅ 合成图像已覆盖: {new_panel_b_path}")
            print(f"   ✅ Mask图像已保存")
            
            # 返回处理结果信息
            result = {
                "paper_id": new_paper_id,
                "original_paper_id": original_paper_id,
                "page_index": page_index,
                "original_panels": {
                    "panel_a": panel_a_path,
                    "panel_b": panel_b_path
                },
                "synthetic_panels": {
                    "synthetic_c": new_panel_a_path,
                    "synthetic_d": new_panel_b_path
                },
                "masks": {
                    "mask_a": mask_a_path,
                    "mask_b": mask_b_path
                },
                "panel_info": {
                    "panel_a_name": panel_a_name_with_suffix,
                    "panel_b_name": panel_b_name_with_suffix
                }
            }
            
            return result
            
        except Exception as e:
            print(f"   ❌ 处理论文 {new_paper_id} 时出错: {e}")
            return None
    
    def update_forgery_json_with_results(self, results: List[Dict[str, Any]], base_output_dir: str):
        """
        更新forgery.json文件，添加拼接结果
        
        Args:
            results: 处理结果列表
            base_output_dir: 基础输出目录
        """
        print("=" * 80)
        print("🔄 阶段5: 更新forgery.json文件")
        print("=" * 80)
        
        # 创建结果映射，按paper_id和页面索引分组
        results_map = {}
        for result in results:
            if result:
                paper_id = result['paper_id']
                page_index = result['page_index']
                if paper_id not in results_map:
                    results_map[paper_id] = {}
                results_map[paper_id][page_index] = result
        
        print(f"📊 需要更新 {len(results_map)} 个论文的结果")
        for paper_id, pages in results_map.items():
            print(f"   📄 {paper_id}: {len(pages)} 个页面")
        
        # 创建新的forgery数据，包含所有原始论文和新的处理结果
        new_forgery_data = {"papers": []}
        
        # 处理所有论文
        for paper in self.forgery_data.get('papers', []):
            original_paper_id = paper['paper_id']
            new_paper_id = self.add_fg_suffix_to_paper_id(original_paper_id)
            
            # 创建新的论文数据
            new_paper = paper.copy()
            new_paper['paper_id'] = new_paper_id
            new_paper['path'] = self.add_fg_suffix_to_path(paper['path'])
            
            # 更新页面数据
            new_pages = []
            for page in paper.get('pages', []):
                new_page = page.copy()
                page_index = page['page_index']
                new_figures = []
                
                for figure in page.get('figures', []):
                    new_figure = figure.copy()
                    new_figure['figure_id'] = self.add_fg_suffix_to_id(figure['figure_id'])
                    new_figure['path'] = self.add_fg_suffix_to_path(figure['path'])
                    
                    # 更新panel数据
                    new_panels = []
                    for panel in figure.get('panels', []):
                        new_panel = panel.copy()
                        new_panel['panel_id'] = self.add_fg_suffix_to_id(panel['panel_id'])
                        new_panel['path'] = self.add_fg_suffix_to_path(panel['path'])
                        
                        # 如果这个论文的当前页面有处理结果，添加issue
                        if (new_paper_id in results_map and 
                            page_index in results_map[new_paper_id]):
                            result = results_map[new_paper_id][page_index]
                            panel_a_name = result['panel_info']['panel_a_name']
                            panel_b_name = result['panel_info']['panel_b_name']
                            
                            # 检查是否是我们要更新的panel（panel_name已经包含FG后缀）
                            if new_panel['panel_id'] == panel_a_name or new_panel['panel_id'] == panel_b_name:
                                # 创建或更新panel_level_issues
                                if 'panel_level_issues' not in new_panel:
                                    new_panel['panel_level_issues'] = {
                                        "has_issue": True,
                                        "issues": []
                                    }
                                else:
                                    # 如果已存在，确保结构正确
                                    if 'has_issue' not in new_panel['panel_level_issues']:
                                        new_panel['panel_level_issues']['has_issue'] = True
                                    if 'issues' not in new_panel['panel_level_issues']:
                                        new_panel['panel_level_issues']['issues'] = []
                                
                                # 确定mask路径
                                mask_path = result['masks']['mask_a'] if new_panel['panel_id'] == panel_a_name else result['masks']['mask_b']
                                
                                # 添加拼接issue
                                splicing_issue = {
                                    "issue_id": self.fg_suffix,
                                    "scope": "within",
                                    "issue_type": "forgery",
                                    "issue_subtype": "splicing",
                                    "evidence": {
                                        "mask_path": mask_path
                                    }
                                }
                                
                                new_panel['panel_level_issues']['issues'].append(splicing_issue)
                                print(f"      ✅ 更新panel: {new_panel['panel_id']} (页面: {page_index})")
                        
                        new_panels.append(new_panel)
                    
                    new_figure['panels'] = new_panels
                    new_figures.append(new_figure)
                
                new_page['figures'] = new_figures
                new_pages.append(new_page)
            
            new_paper['pages'] = new_pages
            new_forgery_data['papers'].append(new_paper)
        
        # 更新self.forgery_data
        self.forgery_data = new_forgery_data
        
        print(f"📝 总共更新了 {len(new_forgery_data['papers'])} 个论文")
        print("=" * 80)
    
    def save_updated_forgery_json(self, output_path: str):
        """
        保存更新后的forgery.json文件
        
        Args:
            output_path: 输出文件路径
        """
        print(f"💾 正在保存更新后的forgery.json: {output_path}")
        with open(output_path, 'w', encoding='utf-8') as f:
            json.dump(self.forgery_data, f, indent=2, ensure_ascii=False)
        print("✅ forgery.json保存完成")
    
    def process_all_papers(self, base_output_dir: str, method: str = "similarity") -> List[Dict[str, Any]]:
        """
        处理所有匹配的论文
        
        Args:
            base_output_dir: 基础输出目录
            method: 选择方法
            
        Returns:
            处理结果列表
        """
        print("=" * 80)
        print("🔄 阶段4: 批量处理所有论文")
        print("=" * 80)
        
        # 找到需要处理的论文
        matching_papers = self.find_matching_papers()
        
        if not matching_papers:
            print("❌ 没有找到需要处理的论文")
            return []
        
        results = []
        total_papers = len(matching_papers)
        
        for i, paper in tqdm(enumerate(matching_papers, 1)):
            page_index = paper['splicing_info']['page_index_1']
            print(f"📄 处理页面 {i}/{total_papers}: {paper['paper_id']} (页面: {page_index})")
            print("-" * 60)
            
            result = self.process_single_pdf(paper, base_output_dir, method)
            results.append(result)
            
            if result:
                print(f"✅ 页面 {paper['paper_id']} (页面: {page_index}) 处理完成")
            else:
                print(f"❌ 页面 {paper['paper_id']} (页面: {page_index}) 处理失败")
            
            print("-" * 60)
        
        # 更新forgery.json
        self.update_forgery_json_with_results(results, base_output_dir)
        
        return results


def main():
    """主函数"""
    print("🎬 启动批量Panel图像拼接处理程序")
    print("=" * 80)
    
    parser = argparse.ArgumentParser(description='批量Panel图像拼接处理')
    parser.add_argument('--forgery_json', type=str,
                       # default='/home/zhangbo/workspace/aigc/psy/splicing/forgery_temp.json',
                       default='/home/zhangbo/workspace/aigc/genome/dataset/real/real.json',
                       help='forgery.json文件路径')
    parser.add_argument('--splicing_pages', type=str,
                       # default="/home/zhangbo/workspace/aigc/psy/splicing/splicing_pages_temp.jsonl",
                       default='/home/zhangbo/workspace/aigc/psy/splicing/splicing_pages_20250903_192853.jsonl',
                       help='splicing_pages.jsonl文件路径')
    parser.add_argument('--output_dir', type=str,
                       default='/home/zhangbo/workspace/aigc/psy/splicing/results',
                       help='输出目录')
    parser.add_argument('--method', type=str, choices=['similarity', 'random'],
                       default='similarity', help='panel选择方法')
    parser.add_argument('--device', type=str, default='cuda', help='计算设备')
    parser.add_argument('--model_path', type=str,
                       default='/home/zhangbo/workspace/aigc/SAM/vit_models/sam_vit_h_4b8939.pth',
                       help='SAM模型路径')
    parser.add_argument('--base_data_path', type=str,
                       default='/home/zhangbo/workspace/aigc/genome',
                       help='基础数据路径')
    parser.add_argument('--fg_suffix', type=str, default='FG_001',
                       help='FG后缀 (FG_001 for splicing, FG_002 for copy-move)')
    
    args = parser.parse_args()
    
    print("📋 程序参数:")
    print(f"   📄 Forgery JSON: {args.forgery_json}")
    print(f"   📄 Splicing Pages: {args.splicing_pages}")
    print(f"   📁 输出目录: {args.output_dir}")
    print(f"   🎯 选择方法: {args.method}")
    print(f"   🖥️  计算设备: {args.device}")
    print(f"   🤖 模型路径: {args.model_path}")
    print(f"   📁 基础数据路径: {args.base_data_path}")
    print(f"   🏷️  FG后缀: {args.fg_suffix}")
    print("=" * 80)
    
    # 创建输出目录
    os.makedirs(args.output_dir, exist_ok=True)
    
    # 创建处理器
    print("🔧 正在初始化处理器...")
    processor = BatchPanelSplicingProcessor(device=args.device, model_path=args.model_path, base_data_path=args.base_data_path, fg_suffix=args.fg_suffix)
    
    try:
        # 加载SAM模型（只加载一次）
        processor.load_sam_model()
        
        # 加载JSON配置文件
        processor.load_json_files(args.forgery_json, args.splicing_pages)
        
        # 处理所有论文
        results = processor.process_all_papers(args.output_dir, args.method)
        
        # 保存更新后的forgery.json
        output_forgery_json = os.path.join(args.output_dir, 'updated_forgery.json')
        processor.save_updated_forgery_json(output_forgery_json)
        
        print("=" * 80)
        print("🎉 所有处理完成!")
        print("=" * 80)
        print("📊 处理结果摘要:")
        successful_count = sum(1 for r in results if r is not None)
        print(f"   📄 总页面数: {len(results)}")
        print(f"   ✅ 成功处理: {successful_count}")
        print(f"   ❌ 处理失败: {len(results) - successful_count}")
        print(f"   📁 输出目录: {args.output_dir}")
        print(f"   📄 更新后的forgery.json: {output_forgery_json}")
        print("=" * 80)
        
    except Exception as e:
        print("=" * 80)
        print(f"❌ 处理过程中出错: {e}")
        print("=" * 80)
        import traceback
        traceback.print_exc()
    
    finally:
        # 清理资源
        print("🧹 正在清理资源...")
        if processor.sam_model is not None:
            del processor.sam_model
        torch.cuda.empty_cache()
        gc.collect()
        print("✅ 资源清理完成")
        print("👋 程序结束")


if __name__ == "__main__":
    main()
