from segment_anything import SamAutomaticMaskGenerator, sam_model_registry
import cv2
import numpy as np
import os
import shutil
import glob
import torch
import json
import argparse
import gc

def process_image(image_path, real_dir, mask_dir, device, max_size=1000, model=None):
    # 获取文件名
    filename = os.path.basename(image_path)
    output_path = os.path.join(mask_dir, filename)
    real_output_path = os.path.join(real_dir, filename)
    
    # 如果mask已经存在，跳过处理
    if os.path.exists(output_path) and os.path.exists(real_output_path):
        print(f"跳过已处理的图像: {filename}")
        return True
    
    # 复制原图
    # shutil.copy(image_path, real_output_path)
    # print(f"复制原图: {image_path} -> {real_output_path}")
    
    # 加载图像
    image = cv2.imread(image_path)
    if image is None:
        print(f"无法读取图像: {image_path}")
        return False
    
    # 记录原始尺寸
    original_height, original_width = image.shape[:2]
    
    # 检查图像尺寸，如果太大则调整大小
    if original_height > max_size or original_width > max_size:
        # 计算缩放比例
        scale = max_size / max(original_height, original_width)
        new_height = int(original_height * scale)
        new_width = int(original_width * scale)
        # 调整图像大小
        resized_image = cv2.resize(image, (new_width, new_height))
        print(f"调整图像大小: {original_width}x{original_height} -> {new_width}x{new_height}")
    else:
        resized_image = image
    
    # 计算源文件面积
    source_area = resized_image.shape[0] * resized_image.shape[1]
    
    # 使用传入的模型或加载新模型
    if model is None:
        # 加载SAM模型和自动掩码生成器
        model_type = "vit_h"
        model_path = "/home/zhangbo/workspace/aigc/SAM/vit_models/sam_vit_h_4b8939.pth"
        sam = sam_model_registry[model_type](checkpoint=model_path)
        # 将模型移到指定设备
        sam.to(device=device)
    else:
        sam = model
    
    # 使用更保守的参数配置掩码生成器，减少内存使用
    mask_generator = SamAutomaticMaskGenerator(
        sam,
        output_mode="binary_mask",
        points_per_side=16,  # 减少点的数量
        pred_iou_thresh=0.86,
        stability_score_thresh=0.92,
        min_mask_region_area=100  # 设置最小面积，减少处理小区域
    )
    
    # 清理CUDA缓存
    torch.cuda.empty_cache()
    
    try:
        # 生成masks
        masks = mask_generator.generate(resized_image)
        
        # 按面积排序masks
        masks.sort(key=lambda x: x["area"], reverse=True)
        
        # 选择面积第二大的mask，确保面积不小于源文件面积的10%
        mask = masks[1]["segmentation"]

        if masks[1]["area"] <= source_area * 0.1:
            print(f"跳过面积小于源文件面积10%的图像: {filename}")
            return False

        for i in range(1, min(len(masks), 4)):
            # 如果都大于50%，则直接return
            if i == 4:
                print(f"跳过面积大于源文件面积50%的图像: {filename}")
                return False
            if masks[i]["area"] < source_area * 0.5:
                mask = masks[i]["segmentation"]
                break
            print(f"选择面积更小的mask: {filename}")
            
        
        # 处理mask（黑白互换）
        mask = (1 - mask) * 255
        
        # 如果进行了缩放，将mask缩放回原始大小
        if original_height != resized_image.shape[0] or original_width != resized_image.shape[1]:
            # 确保mask是有效的图像数据
            if mask is not None and mask.size > 0:
                # 确保是正确的数据类型
                mask = mask.astype(np.uint8)
                # 调整大小
                mask = cv2.resize(mask, (original_width, original_height), interpolation=cv2.INTER_NEAREST)
            else:
                print(f"警告：mask无效，无法调整大小")
                # 创建一个空白mask
                mask = np.zeros((original_height, original_width), dtype=np.uint8)
        
        # 将mask转换为RGB格式
        mask_rgb = np.stack([mask]*3, axis=-1)
        
        # 保存mask
        cv2.imwrite(output_path, mask_rgb)
        print(f"生成mask: {output_path}")
        return True
    
    except Exception as e:
        print(f"生成掩码时出错: {e}")
        # 尝试分批处理图像
        if "CUDA out of memory" in str(e) and (original_height > max_size/2 or original_width > max_size/2):
            # 再次减小分辨率重试
            print(f"减小图像分辨率重试...")
            # 清理当前CUDA内存
            torch.cuda.empty_cache()
            gc.collect()
            # 重新调用但使用更小的最大尺寸
            return process_image(image_path, real_dir, mask_dir, device, max_size=max_size/2, model=sam)
        else:
            # 其他错误则抛出
            raise
    finally:
        # 主动释放内存
        if 'mask_generator' in locals():
            del mask_generator
        torch.cuda.empty_cache()
        gc.collect()

def save_progress(processed_files, progress_file):
    with open(progress_file, 'w') as f:
        json.dump(processed_files, f, indent=4, ensure_ascii=False)

def load_progress(progress_file):
    if os.path.exists(progress_file):
        with open(progress_file, 'r') as f:
            return json.load(f)
    return []

def main():
    parser = argparse.ArgumentParser(description='处理图像并生成掩码')
    parser.add_argument('--resume', action='store_true', help='从上次中断的地方继续')
    parser.add_argument('--max_size', type=int, default=1000, help='处理图像的最大尺寸')
    parser.add_argument('--batch_size', type=int, default=5, help='一次批处理的图像数量')
    parser.add_argument('--input_list', type=str, default=None, help='要处理的图片列表文件')
    parser.add_argument('--input_dir', type=str, default="/home/zhangbo/workspace/wanfang/data/aigc_select", help='要处理的图片目录')
    parser.add_argument('--real_dir', type=str, default="/home/zhangbo/workspace/aigc/Aimages/real", help='原图输出目录')
    parser.add_argument('--mask_dir', type=str, default="/home/zhangbo/workspace/aigc/Aimages/masks", help='掩码输出目录')
    parser.add_argument('--full_forgery_dir', type=str, default="/home/zhangbo/workspace/aigc/Aimages/full_synthetic", help='全局伪造输出目录')
    parser.add_argument('--progress_file', type=str, default="/home/zhangbo/workspace/aigc/Aimages/progress.json", help='进度文件路径')
    args = parser.parse_args()
    
    # 设置目录
    input_dir = args.input_dir
    real_dir = args.real_dir
    mask_dir = args.mask_dir
    full_forgery_dir = args.full_forgery_dir
    progress_file = args.progress_file
    
    # 确保输出目录存在
    os.makedirs(real_dir, exist_ok=True)
    os.makedirs(mask_dir, exist_ok=True)
    os.makedirs(full_forgery_dir, exist_ok=True)

    # 检测是否有GPU可用并使用CUDA_VISIBLE_DEVICES指定的GPU
    if torch.cuda.is_available():
        # 获取环境变量中设置的CUDA_VISIBLE_DEVICES
        cuda_devices = os.environ.get('CUDA_VISIBLE_DEVICES', '0')
        # 使用第一个可见的GPU
        device_id = cuda_devices.split(',')[0]
        device = f"cuda:{0}"  # 在CUDA_VISIBLE_DEVICES环境下，第一个可见设备总是cuda:0
        print(f"使用设备: {device} (物理GPU ID: {device_id})")
        print(f"可见GPU: {cuda_devices}")
        print(f"GPU数量: {torch.cuda.device_count()}")
        print(f"当前GPU名称: {torch.cuda.get_device_name(0)}")
        # 设置CUDA内存分配策略
        if hasattr(torch.cuda, 'set_per_process_memory_fraction'):
            torch.cuda.set_per_process_memory_fraction(0.8)  # 使用80%的可用内存
        # 启用异步执行
        torch.cuda.set_device(0)
    else:
        device = "cpu"
        print(f"使用设备: {device}")

    # 获取要处理的图片列表
    if args.input_list:
        with open(args.input_list) as f:
            png_files = [line.strip() for line in f if line.strip()]
        print(f"从列表文件 {args.input_list} 中读取了 {len(png_files)} 个文件路径")
    else:
        png_files = glob.glob(os.path.join(input_dir, "*.png"))
        print(f"从目录 {input_dir} 中找到 {len(png_files)} 个PNG文件")
    
    # 加载已处理文件列表（用于断点续传）
    processed_files = load_progress(progress_file) if args.resume else []
    print(f"已处理 {len(processed_files)} 个文件")
    
    # 预加载模型以便重用
    model_type = "vit_h"
    model_path = "/home/zhangbo/workspace/aigc/SAM/vit_models/sam_vit_h_4b8939.pth"
    sam = sam_model_registry[model_type](checkpoint=model_path)
    sam.to(device=device)
    print("SAM模型已加载")

    # 处理每个图片
    try:
        # 计算总批次数
        total_batches = (len(png_files) + args.batch_size - 1) // args.batch_size
        
        # 按批次处理图片
        for batch_idx in range(0, len(png_files), args.batch_size):
            # 获取当前批次的图像
            batch_files = png_files[batch_idx:batch_idx + args.batch_size]
            current_batch = batch_idx // args.batch_size + 1
            print(f"处理批次 {current_batch}/{total_batches}，包含 {len(batch_files)} 个图像")
            
            # 处理批次中的每个图像
            for image_path in batch_files:
                filename = os.path.basename(image_path)
                
                # 如果已经处理过，跳过
                if image_path in processed_files and args.resume:
                    print(f"跳过已处理的图像: {filename}")
                    continue
                
                print(f"开始处理图像: {image_path}")
                
                try:
                    # 处理图像
                    success = process_image(image_path, real_dir, mask_dir, device, max_size=args.max_size, model=sam)
                    
                    # 只有处理成功才记录
                    if success:
                        processed_files.append(image_path)
                        # 每处理一个文件就保存进度
                        save_progress(processed_files, progress_file)
                        print(f"图像 {filename} 处理完成并已记录进度")
                    else:
                        print(f"图像 {filename} 处理未完成，不记录进度")
                        
                except Exception as e:
                    print(f"处理图像 {image_path} 时出错: {e}")
                    # 保存当前进度
                    save_progress(processed_files, progress_file)
            
            # 批次处理完毕后清理内存
            torch.cuda.empty_cache()
            gc.collect()
            print(f"已完成批次 {current_batch}/{total_batches}")
    finally:
        # 释放模型资源
        del sam
        torch.cuda.empty_cache()
        gc.collect()
        print(f"共处理了 {len(processed_files)} 个文件")

if __name__ == "__main__":
    main()