#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
两段式去水印网络训练脚本
包含三个损失函数：
Loss1: 引导网络生成的水印分布逐渐向真实载体载密间残差分布靠拢
Loss2: 减少去水印后图像与原始图像间的残差值
Loss3: 水印破坏损失，通过解码验证水印移除效果
"""

import os
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader, Dataset
from torchvision import transforms
import numpy as np
from PIL import Image
import time
from tqdm import tqdm
import yaml
from easydict import EasyDict
import matplotlib.pyplot as plt
import lpips
import json

# 导入网络
from network.TwoStageWatermarkRemoval import TwoStageWatermarkRemoval
from network.Dual_Mark import Network as SepMarkNetwork
from watermark_embding import WatermarkEmbedder


class WatermarkDecoder:
    """水印解码器 - 用于训练时验证水印移除效果"""
    def __init__(self, device, config_path=r'D:\deepfake\SepMark-main\SepMark-main\cfg\test_DualMark.yaml'):
        self.device = device
        self.load_config(config_path)
        self.setup_network()
        
    def load_config(self, config_path):
        """加载配置文件"""
        with open(config_path, 'r') as f:
            test_args = EasyDict(yaml.load(f, Loader=yaml.SafeLoader))
            
        # 加载训练配置
        with open(r'D:\deepfake\SepMark-main\SepMark-main\cfg\train_DualMark.yaml') as f:
            train_args = EasyDict(yaml.load(f, Loader=yaml.SafeLoader))
            
        self.message_length = train_args.message_length
        self.message_range = train_args.message_range
        self.attention_encoder = train_args.attention_encoder
        self.attention_decoder = train_args.attention_decoder
        self.weight = train_args.weight
        self.noise_layers_R = train_args.noise_layers.pool_R
        self.noise_layers_F = train_args.noise_layers.pool_F
        
    def setup_network(self):
        """设置网络结构并加载模型"""
        self.network = SepMarkNetwork(
            self.message_length,
            self.noise_layers_R,
            self.noise_layers_F,
            self.device,
            1,  # batch_size = 1
            0.0002,  # lr (推理时不用)
            0.5,  # beta1 (推理时不用)
            self.attention_encoder,
            self.attention_decoder,
            self.weight
        )
        
        # 加载预训练模型
        model_path = r"D:\deepfake\SepMark-main\SepMark-main\results\Dual_watermark_256_128_0.1_0.0002_0.5_se_se_1_10_10_10_0.1_2023_04_18_16_29_54\models\EC_90.pth"
        if os.path.exists(model_path):
            self.network.load_model_ed(model_path)
        else:
            print("⚠️  警告: 水印解码器模型未找到，将使用随机初始化")
            
        self.network.encoder_decoder.eval()
        
    def generate_message(self, batch_size, seed=None):
        """生成水印消息"""
        if seed is not None:
            np.random.seed(seed)
        message = torch.Tensor(
            np.random.choice([-self.message_range, self.message_range],
                           (batch_size, self.message_length))
        ).to(self.device)
        return message
        
    def extract_watermark(self, image):
        """提取水印"""
        with torch.no_grad():
            decoded_message_C = self.network.encoder_decoder.module.decoder_C(image)
            decoded_message_RF = self.network.encoder_decoder.module.decoder_RF(image)
        return decoded_message_C, decoded_message_RF
        
    def calculate_error_rate(self, original_message, decoded_message):
        """
        计算解码错误率 - 修正版本
        """
        # 获取消息长度
        length = original_message.size(1)

        # 确保张量在 CPU 上进行计算
        original = original_message.cpu()
        decoded = decoded_message.cpu()

        # 比较差异 - 使用 detach() 避免梯度追踪
        error_count = torch.sum((original.detach() != decoded.detach()).float()).item()

        # 计算错误率
        error_rate = error_count / (original.size(0) * length)

        return error_rate



class WatermarkDataset(Dataset):
    """水印数据集"""
    def __init__(self, watermarked_dir, original_dir, transform=None, max_samples=None):
        self.watermarked_dir = watermarked_dir
        self.original_dir = original_dir
        self.transform = transform
        
        # 获取图像文件列表
        self.image_files = []
        for file in os.listdir(watermarked_dir):
            if file.lower().endswith(('.jpg', '.jpeg', '.png')):
                watermarked_path = os.path.join(watermarked_dir, file)
                original_path = os.path.join(original_dir, file)
                if os.path.exists(original_path):
                    self.image_files.append(file)
        
        if max_samples:
            self.image_files = self.image_files[:max_samples]
        
        print(f"数据集大小: {len(self.image_files)}")
    
    def __len__(self):
        return len(self.image_files)
    
    def __getitem__(self, idx):
        file = self.image_files[idx]
        
        # 加载图像
        watermarked_path = os.path.join(self.watermarked_dir, file)
        original_path = os.path.join(self.original_dir, file)
        
        watermarked = Image.open(watermarked_path).convert('RGB')
        original = Image.open(original_path).convert('RGB')
        
        if self.transform:
            watermarked = self.transform(watermarked)
            original = self.transform(original)
        
        return watermarked, original


class ThreeStageLossFunction(nn.Module):
    """三段式损失函数"""
    def __init__(self, device, watermark_decoder, lambda_loss1=1.0, lambda_loss2=1.0, lambda_loss3=1.0):
        super(ThreeStageLossFunction, self).__init__()
        self.device = device
        self.lambda_loss1 = lambda_loss1
        self.lambda_loss2 = lambda_loss2
        self.lambda_loss3 = lambda_loss3
        self.watermark_decoder = watermark_decoder
        
        # 基础损失函数
        self.mse_loss = nn.MSELoss()
        self.l1_loss = nn.L1Loss()
        
        # LPIPS感知损失
        self.lpips_loss = lpips.LPIPS(net='alex').to(device)
        
        print(f"损失函数权重: λ1={lambda_loss1}, λ2={lambda_loss2}, λ3={lambda_loss3}")
    
    def calculate_loss1(self, attack_noise, watermarked_image, cover_image):
        """
        Loss1: 引导网络生成的水印分布逐渐向真实载体载密间残差分布靠拢
        """
        # 计算真实的水印残差（载体载密间残差）
        true_watermark_residual = watermarked_image - cover_image
        
        # 1. MSE损失：像素级别的相似性
        mse_loss = self.mse_loss(attack_noise, true_watermark_residual)
        
        # 2. L1损失：稀疏性约束
        l1_loss = self.l1_loss(attack_noise, true_watermark_residual)
        
        # 3. 统计分布损失：均值和方差约束
        mean_loss = self.mse_loss(torch.mean(attack_noise, dim=[2, 3]), 
                                 torch.mean(true_watermark_residual, dim=[2, 3]))
        var_loss = self.mse_loss(torch.var(attack_noise, dim=[2, 3]), 
                                torch.var(true_watermark_residual, dim=[2, 3]))
        
        # 4. 梯度损失：边缘信息约束
        grad_loss = self.gradient_loss(attack_noise, true_watermark_residual)
        
        # 综合损失
        loss1 = mse_loss + 0.5 * l1_loss + 0.3 * mean_loss + 0.3 * var_loss + 0.2 * grad_loss
        
        return loss1, {
            'mse_loss': mse_loss.item(),
            'l1_loss': l1_loss.item(),
            'mean_loss': mean_loss.item(),
            'var_loss': var_loss.item(),
            'grad_loss': grad_loss.item()
        }
    
    def calculate_loss2(self, restored_image, cover_image):
        """
        Loss2: 减少去水印后图像与原始图像间的残差值
        """
        # 1. MSE损失：像素级别的重构
        mse_loss = self.mse_loss(restored_image, cover_image)
        
        # 2. LPIPS感知损失：感知质量
        lpips_loss = torch.mean(self.lpips_loss(restored_image, cover_image))
        
        # 3. SSIM损失：结构相似性
        ssim_loss = self.ssim_loss(restored_image, cover_image)
        
        # 4. 梯度损失：边缘保持
        grad_loss = self.gradient_loss(restored_image, cover_image)
        
        # 综合损失
        loss2 = mse_loss + 0.5 * lpips_loss + 0.3 * ssim_loss + 0.2 * grad_loss
        
        return loss2, {
            'mse_loss': mse_loss.item(),
            'lpips_loss': lpips_loss.item(),
            'ssim_loss': ssim_loss.item(),
            'grad_loss': grad_loss.item()
        }
    
    def calculate_loss3(self, restored_image, watermarked_image):
        """
        Loss3: 水印破坏损失，通过解码验证水印移除效果
        准确率越低，说明水印破坏得越彻底，损失越小
        """
        batch_size = restored_image.size(0)

        # 生成固定的水印消息（使用固定种子保证一致性）
        original_message = self.watermark_decoder.generate_message(batch_size, seed=42)

        # 提取恢复图像中的水印
        decoded_message_C, decoded_message_RF = self.watermark_decoder.extract_watermark(restored_image)

        # 计算解码错误率
        error_rate_C = self.watermark_decoder.calculate_error_rate(original_message, decoded_message_C)
        error_rate_RF = self.watermark_decoder.calculate_error_rate(original_message, decoded_message_RF)

        # 计算准确率 (1 - 错误率)
        accuracy_C = 1.0 - error_rate_C
        accuracy_RF = 1.0 - error_rate_RF

        # 水印破坏损失 = 准确率 (准确率越低，水印破坏得越彻底，损失越小)
        watermark_destroy_loss = (accuracy_C + accuracy_RF) / 2.0

        # 额外计算：原始水印图像的基准准确率
        with torch.no_grad():
            orig_decoded_C, orig_decoded_RF = self.watermark_decoder.extract_watermark(watermarked_image)
            orig_accuracy_C = 1.0 - self.watermark_decoder.calculate_error_rate(original_message, orig_decoded_C)
            orig_accuracy_RF = 1.0 - self.watermark_decoder.calculate_error_rate(original_message, orig_decoded_RF)

        return watermark_destroy_loss, {
            'watermark_destroy_loss': float(watermark_destroy_loss),
            'accuracy_C': float(accuracy_C),
            'accuracy_RF': float(accuracy_RF),
            'error_rate_C': float(error_rate_C),
            'error_rate_RF': float(error_rate_RF),
            'orig_accuracy_C': float(orig_accuracy_C),
            'orig_accuracy_RF': float(orig_accuracy_RF),
            'watermark_destruction_rate': float(1.0 - watermark_destroy_loss)
        }
    
    def gradient_loss(self, pred, target):
        """梯度损失"""
        # 计算梯度
        pred_grad_x = pred[:, :, :, 1:] - pred[:, :, :, :-1]
        pred_grad_y = pred[:, :, 1:, :] - pred[:, :, :-1, :]
        
        target_grad_x = target[:, :, :, 1:] - target[:, :, :, :-1]
        target_grad_y = target[:, :, 1:, :] - target[:, :, :-1, :]
        
        # 计算梯度损失
        grad_loss_x = self.l1_loss(pred_grad_x, target_grad_x)
        grad_loss_y = self.l1_loss(pred_grad_y, target_grad_y)
        
        return grad_loss_x + grad_loss_y
    
    def ssim_loss(self, pred, target):
        """SSIM损失"""
        # 简化的SSIM计算
        mu1 = torch.mean(pred, dim=[2, 3], keepdim=True)
        mu2 = torch.mean(target, dim=[2, 3], keepdim=True)
        
        sigma1_sq = torch.var(pred, dim=[2, 3], keepdim=True)
        sigma2_sq = torch.var(target, dim=[2, 3], keepdim=True)
        
        sigma12 = torch.mean((pred - mu1) * (target - mu2), dim=[2, 3], keepdim=True)
        
        c1 = 0.01 ** 2
        c2 = 0.03 ** 2
        
        ssim = ((2 * mu1 * mu2 + c1) * (2 * sigma12 + c2)) / \
               ((mu1 ** 2 + mu2 ** 2 + c1) * (sigma1_sq + sigma2_sq + c2))
        
        return 1 - torch.mean(ssim)
    
    def forward(self, results):
        """
        计算总损失
        """
        watermarked_image = results['watermarked_image']
        cover_image = results['cover_image']
        attack_noise = results['attack_noise']
        restored_image = results['restored_image']
        
        # 计算Loss1
        loss1, loss1_details = self.calculate_loss1(attack_noise, watermarked_image, cover_image)
        
        # 计算Loss2
        loss2, loss2_details = self.calculate_loss2(restored_image, cover_image)
        
        # 计算Loss3
        loss3, loss3_details = self.calculate_loss3(restored_image, watermarked_image)
        
        # 总损失
        total_loss = self.lambda_loss1 * loss1 + self.lambda_loss2 * loss2 + self.lambda_loss3 * loss3
        
        # 损失详情
        loss_dict = {
            'total_loss': total_loss.item(),
            'loss1': loss1.item(),
            'loss2': loss2.item(),
            'loss3': loss3,
            'loss1_details': loss1_details,
            'loss2_details': loss2_details,
            'loss3_details': loss3_details
        }
        
        return total_loss, loss_dict


class TwoStageTrainer:
    """两段式训练器"""
    def __init__(self, config_path=r'D:\deepfake\SepMark-main\SepMark-main\config\two_stage_config.yaml', resume_path=None):
        self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        print(f"使用设备: {self.device}")
        
        # 加载配置
        self.load_config(config_path)
        
        # 初始化水印解码器
        self.watermark_decoder = WatermarkDecoder(self.device)
        
        # 初始化网络
        self.setup_networks()
        
        # 设置损失函数
        self.setup_loss_functions()
        
        # 设置优化器
        self.setup_optimizers()
        
        # 训练历史
        self.train_history = {
            'total_loss': [],
            'loss1': [],
            'loss2': [],
            'loss3': [],
            'lpips_loss': [],
            'watermark_destruction_rate': [],
            'accuracy_C': [],
            'accuracy_RF': [],
            'epoch': []
        }
        
        # 恢复训练
        self.start_epoch = 0
        if resume_path:
            self.resume_training(resume_path)
    
    def load_config(self, config_path):
        """加载配置文件"""
        default_config = {
            'model': {
                'input_channels': 3,
                'attack_base_channels': 64,
                'restore_base_channels': 64
            },
            'training': {
                'batch_size': 4,
                'learning_rate': 1e-4,
                'num_epochs': 100,
                'max_samples': 3000
            },
            'loss': {
                'lambda_loss1': 1.0,
                'lambda_loss2': 1.0,
                'lambda_loss3': 0.5
            },
            'data': {
                'watermarked_dir': 'D:/deepfake/CelebAMask-HQ/CelebAMask-HQ/mark/watermarked',
                'original_dir': 'D:/deepfake/CelebAMask-HQ/CelebAMask-HQ/CelebA-HQ-img'
            },
            'save': {
                'checkpoint_dir': 'checkpoints/two_stage',
                'save_interval': 10
            }
        }
        
        if os.path.exists(config_path):
            with open(config_path, 'r', encoding='utf-8') as f:
                config = EasyDict(yaml.load(f, Loader=yaml.SafeLoader))
        else:
            config = EasyDict(default_config)
            os.makedirs(os.path.dirname(config_path), exist_ok=True)
            with open(config_path, 'w') as f:
                yaml.dump(default_config, f, default_flow_style=False)
            print(f"创建默认配置文件: {config_path}")
        
        self.config = config
        print("配置加载完成")
    
    def setup_networks(self):
        """设置网络"""
        self.model = TwoStageWatermarkRemoval(
            input_channels=self.config.model.input_channels,
            attack_base_channels=self.config.model.attack_base_channels,
            restore_base_channels=self.config.model.restore_base_channels
        ).to(self.device)
        
        print(f"网络参数量: {sum(p.numel() for p in self.model.parameters()):,}")
    
    def setup_loss_functions(self):
        """设置损失函数"""
        self.loss_fn = ThreeStageLossFunction(
            device=self.device,
            watermark_decoder=self.watermark_decoder,
            lambda_loss1=self.config.loss.lambda_loss1,
            lambda_loss2=self.config.loss.lambda_loss2,
            lambda_loss3=self.config.loss.lambda_loss3
        )
    
    def setup_optimizers(self):
        """设置优化器"""
        self.optimizer = optim.Adam(
            self.model.parameters(),
            lr=self.config.training.learning_rate,
            betas=(0.9, 0.999)
        )
        
        # 学习率调度器
        self.scheduler = optim.lr_scheduler.CosineAnnealingLR(
            self.optimizer, T_max=self.config.training.num_epochs
        )
    
    def resume_training(self, resume_path):
        """恢复训练"""
        if not os.path.exists(resume_path):
            print(f"⚠️  警告: 恢复路径不存在: {resume_path}")
            return
            
        print(f"从检查点恢复训练: {resume_path}")
        checkpoint = torch.load(resume_path, map_location=self.device, weights_only=False)
        
        # 恢复模型状态
        self.model.load_state_dict(checkpoint['model_state_dict'])
        
        # 恢复优化器状态
        self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
        
        # 恢复学习率调度器状态
        if 'scheduler_state_dict' in checkpoint:
            self.scheduler.load_state_dict(checkpoint['scheduler_state_dict'])
        
        # 恢复训练历史
        if 'train_history' in checkpoint:
            self.train_history = checkpoint['train_history']
        
        # 恢复开始epoch
        self.start_epoch = checkpoint.get('epoch', 0)
        
        print(f"恢复训练成功，从第 {self.start_epoch} epoch 开始")
    
    def train_epoch(self, dataloader):
        """训练一个epoch"""
        self.model.train()
        epoch_losses = {
            'total_loss': 0.0,
            'loss1': 0.0,
            'loss2': 0.0,
            'loss3': 0.0,
            'lpips_loss': 0.0,
            'watermark_destruction_rate': 0.0,
            'accuracy_C': 0.0,
            'accuracy_RF': 0.0
        }
        
        for batch_idx, (watermarked_images, cover_images) in enumerate(tqdm(dataloader, desc="训练")):
            watermarked_images = watermarked_images.to(self.device)
            cover_images = cover_images.to(self.device)
            
            # 前向传播
            results = self.model(watermarked_images, cover_images)
            
            # 计算损失
            total_loss, loss_dict = self.loss_fn(results)
            
            # 反向传播
            self.optimizer.zero_grad()
            total_loss.backward()
            self.optimizer.step()
            
            # 记录损失
            epoch_losses['total_loss'] += loss_dict['total_loss']
            epoch_losses['loss1'] += loss_dict['loss1']
            epoch_losses['loss2'] += loss_dict['loss2']
            epoch_losses['loss3'] += loss_dict['loss3']
            epoch_losses['lpips_loss'] += loss_dict['loss2_details']['lpips_loss']
            epoch_losses['watermark_destruction_rate'] += loss_dict['loss3_details']['watermark_destruction_rate']
            epoch_losses['accuracy_C'] += loss_dict['loss3_details']['accuracy_C']
            epoch_losses['accuracy_RF'] += loss_dict['loss3_details']['accuracy_RF']
            
            # 每50个batch打印一次详细信息
            if batch_idx % 50 == 0:
                print(f"Batch {batch_idx}: Total={loss_dict['total_loss']:.4f}, "
                      f"Loss1={loss_dict['loss1']:.4f}, Loss2={loss_dict['loss2']:.4f}, "
                      f"Loss3={loss_dict['loss3']:.4f}, LPIPS={loss_dict['loss2_details']['lpips_loss']:.4f}, "
                      f"水印破坏率={loss_dict['loss3_details']['watermark_destruction_rate']:.4f}")
        
        # 平均损失
        for key in epoch_losses:
            epoch_losses[key] /= len(dataloader)
        
        return epoch_losses
    
    def evaluate(self, dataloader):
        """评估模型"""
        self.model.eval()
        eval_losses = {
            'total_loss': 0.0,
            'loss1': 0.0,
            'loss2': 0.0,
            'loss3': 0.0,
            'lpips_loss': 0.0,
            'watermark_destruction_rate': 0.0,
            'accuracy_C': 0.0,
            'accuracy_RF': 0.0
        }
        
        with torch.no_grad():
            for watermarked_images, cover_images in dataloader:
                watermarked_images = watermarked_images.to(self.device)
                cover_images = cover_images.to(self.device)
                
                results = self.model(watermarked_images, cover_images)
                total_loss, loss_dict = self.loss_fn(results)
                
                eval_losses['total_loss'] += loss_dict['total_loss']
                eval_losses['loss1'] += loss_dict['loss1']
                eval_losses['loss2'] += loss_dict['loss2']
                eval_losses['loss3'] += loss_dict['loss3']
                eval_losses['lpips_loss'] += loss_dict['loss2_details']['lpips_loss']
                eval_losses['watermark_destruction_rate'] += loss_dict['loss3_details']['watermark_destruction_rate']
                eval_losses['accuracy_C'] += loss_dict['loss3_details']['accuracy_C']
                eval_losses['accuracy_RF'] += loss_dict['loss3_details']['accuracy_RF']
        
        # 平均损失
        for key in eval_losses:
            eval_losses[key] /= len(dataloader)
        
        return eval_losses
    
    def save_checkpoint(self, epoch, is_best=False):
        """保存检查点"""
        checkpoint_dir = self.config.save.checkpoint_dir
        os.makedirs(checkpoint_dir, exist_ok=True)
        
        checkpoint = {
            'epoch': epoch,
            'model_state_dict': self.model.state_dict(),
            'optimizer_state_dict': self.optimizer.state_dict(),
            'scheduler_state_dict': self.scheduler.state_dict(),
            'config': self.config,
            'train_history': self.train_history
        }
        
        # 保存常规检查点
        checkpoint_path = os.path.join(checkpoint_dir, f'checkpoint_epoch_{epoch}.pth')
        torch.save(checkpoint, checkpoint_path, _use_new_zipfile_serialization=False)
        
        # 保存最佳模型
        if is_best:
            best_path = os.path.join(checkpoint_dir, 'best_model.pth')
            torch.save(checkpoint, best_path, _use_new_zipfile_serialization=False)
            print(f"保存最佳模型: {best_path}")
        
        print(f"保存检查点: {checkpoint_path}")
    
    def plot_training_history(self):
        """绘制训练历史"""
        plt.figure(figsize=(18, 12))
        
        # 第一行: 损失函数
        plt.subplot(3, 3, 1)
        plt.plot(self.train_history['epoch'], self.train_history['total_loss'], 'b-', label='Total Loss')
        plt.title('Total Loss')
        plt.xlabel('Epoch')
        plt.ylabel('Loss')
        plt.legend()
        plt.grid(True)
        
        plt.subplot(3, 3, 2)
        plt.plot(self.train_history['epoch'], self.train_history['loss1'], 'r-', label='Loss1')
        plt.plot(self.train_history['epoch'], self.train_history['loss2'], 'g-', label='Loss2')
        plt.plot(self.train_history['epoch'], self.train_history['loss3'], 'orange', label='Loss3')
        plt.title('Individual Losses')
        plt.xlabel('Epoch')
        plt.ylabel('Loss')
        plt.legend()
        plt.grid(True)
        
        plt.subplot(3, 3, 3)
        plt.plot(self.train_history['epoch'], self.train_history['lpips_loss'], 'purple', label='LPIPS Loss')
        plt.title('LPIPS Perceptual Loss')
        plt.xlabel('Epoch')
        plt.ylabel('Loss')
        plt.legend()
        plt.grid(True)
        
        # 第二行: 水印破坏指标
        plt.subplot(3, 3, 4)
        plt.plot(self.train_history['epoch'], self.train_history['watermark_destruction_rate'], 'red', label='Watermark Destruction Rate')
        plt.title('Watermark Destruction Rate')
        plt.xlabel('Epoch')
        plt.ylabel('Rate')
        plt.legend()
        plt.grid(True)
        
        plt.subplot(3, 3, 5)
        plt.plot(self.train_history['epoch'], self.train_history['accuracy_C'], 'blue', label='Decoder C Accuracy')
        plt.plot(self.train_history['epoch'], self.train_history['accuracy_RF'], 'green', label='Decoder RF Accuracy')
        plt.title('Watermark Decoding Accuracy')
        plt.xlabel('Epoch')
        plt.ylabel('Accuracy')
        plt.legend()
        plt.grid(True)
        
        # 第三行: 组合指标
        plt.subplot(3, 3, 6)
        plt.plot(self.train_history['epoch'], self.train_history['loss1'], 'r-', label='Distribution Loss')
        plt.title('Loss1 (Distribution Matching)')
        plt.xlabel('Epoch')
        plt.ylabel('Loss')
        plt.legend()
        plt.grid(True)
        
        plt.subplot(3, 3, 7)
        plt.plot(self.train_history['epoch'], self.train_history['loss2'], 'g-', label='Restoration Loss')
        plt.title('Loss2 (Image Restoration)')
        plt.xlabel('Epoch')
        plt.ylabel('Loss')
        plt.legend()
        plt.grid(True)
        
        plt.subplot(3, 3, 8)
        plt.plot(self.train_history['epoch'], self.train_history['loss3'], 'orange', label='Watermark Destruction Loss')
        plt.title('Loss3 (Watermark Destruction)')
        plt.xlabel('Epoch')
        plt.ylabel('Loss')
        plt.legend()
        plt.grid(True)
        
        # 添加综合评估图
        plt.subplot(3, 3, 9)
        # 归一化处理用于可视化
        normalized_lpips = np.array(self.train_history['lpips_loss']) / np.max(self.train_history['lpips_loss'])
        normalized_destruction = np.array(self.train_history['watermark_destruction_rate'])
        
        plt.plot(self.train_history['epoch'], normalized_lpips, 'purple', label='LPIPS (Normalized)')
        plt.plot(self.train_history['epoch'], normalized_destruction, 'red', label='Watermark Destruction')
        plt.title('Quality vs Destruction Trade-off')
        plt.xlabel('Epoch')
        plt.ylabel('Normalized Value')
        plt.legend()
        plt.grid(True)
        
        plt.tight_layout()
        plt.savefig('training_history_extended.png', dpi=150, bbox_inches='tight')
        plt.show()
    
    def train(self):
        """主训练循环"""
        print("开始训练三段式去水印网络...")
        
        # 数据变换
        transform = transforms.Compose([
            transforms.Resize((256, 256)),
            transforms.ToTensor(),
            transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
        ])
        
        # 创建数据集
        train_dataset = WatermarkDataset(
            self.config.data.watermarked_dir,
            self.config.data.original_dir,
            transform=transform,
            max_samples=self.config.training.max_samples
        )
        
        train_dataloader = DataLoader(
            train_dataset,
            batch_size=self.config.training.batch_size,
            shuffle=True,
            num_workers=4,
            pin_memory=True
        )
        
        # 训练循环
        best_loss = float('inf')
        
        for epoch in range(self.start_epoch, self.config.training.num_epochs):
            print(f"\nEpoch {epoch + 1}/{self.config.training.num_epochs}")
            
            # 训练
            train_losses = self.train_epoch(train_dataloader)
            
            # 调整学习率
            self.scheduler.step()
            
            # 记录训练历史
            self.train_history['epoch'].append(epoch + 1)
            self.train_history['total_loss'].append(train_losses['total_loss'])
            self.train_history['loss1'].append(train_losses['loss1'])
            self.train_history['loss2'].append(train_losses['loss2'])
            self.train_history['loss3'].append(train_losses['loss3'])
            self.train_history['lpips_loss'].append(train_losses['lpips_loss'])
            self.train_history['watermark_destruction_rate'].append(train_losses['watermark_destruction_rate'])
            self.train_history['accuracy_C'].append(train_losses['accuracy_C'])
            self.train_history['accuracy_RF'].append(train_losses['accuracy_RF'])
            
            # 打印训练结果
            print(f"训练损失:")
            print(f"  Total Loss: {train_losses['total_loss']:.6f}")
            print(f"  Loss1 (Distribution): {train_losses['loss1']:.6f}")
            print(f"  Loss2 (Restoration): {train_losses['loss2']:.6f}")
            print(f"  Loss3 (Watermark Destruction): {train_losses['loss3']:.6f}")
            print(f"  LPIPS Loss: {train_losses['lpips_loss']:.6f}")
            print(f"  水印破坏率: {train_losses['watermark_destruction_rate']:.4f}")
            print(f"  解码器C准确率: {train_losses['accuracy_C']:.4f}")
            print(f"  解码器RF准确率: {train_losses['accuracy_RF']:.4f}")
            
            # 保存最佳模型
            if train_losses['total_loss'] < best_loss:
                best_loss = train_losses['total_loss']
                self.save_checkpoint(epoch + 1, is_best=True)
            
            # 定期保存检查点
            if (epoch + 1) % self.config.save.save_interval == 0:
                self.save_checkpoint(epoch + 1)
        
        print("训练完成！")
        
        # 保存训练历史图
        self.plot_training_history()


def main():
    """主函数"""
    import argparse
    
    parser = argparse.ArgumentParser(description='两段式去水印网络训练')
    parser.add_argument('--config', type=str, default=r'D:\deepfake\SepMark-main\SepMark-main\config\two_stage_config.yaml',
                        help='配置文件路径')
    # parser.add_argument('--resume', type=str, default=r"D:\deepfake\SepMark-main\checkpoints\two_stage\checkpoint_epoch_100.pth",
    #                     help='恢复训练的检查点路径')
    parser.add_argument('--resume', type=str, default=None,
                        help='恢复训练的检查点路径')
    
    args = parser.parse_args()
    
    # 创建配置目录
    os.makedirs('config', exist_ok=True)
    
    # 创建训练器
    trainer = TwoStageTrainer(args.config, args.resume)
    
    # 开始训练
    trainer.train()


if __name__ == '__main__':
    main() 