#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
两段式去水印网络训练脚本
包含两个损失函数：
Loss1: 引导网络生成的水印分布逐渐向真实载体载密间残差分布靠拢
Loss2: 减少去水印后图像与原始图像间的残差值
"""

import os
from tkinter import NO
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader, Dataset
from torchvision import transforms
import numpy as np
from PIL import Image
import time
from tqdm import tqdm
import yaml
from easydict import EasyDict
import matplotlib.pyplot as plt
import lpips

# 导入网络
# 导入网络和训练模块
from network.TwoStageWatermarkRemoval import TwoStageWatermarkRemoval

class WatermarkDataset(Dataset):
    """水印数据集"""
    def __init__(self, watermarked_dir, original_dir, transform=None, max_samples=None):
        self.watermarked_dir = watermarked_dir
        self.original_dir = original_dir
        self.transform = transform
        
        # 获取图像文件列表
        self.image_files = []
        for file in os.listdir(watermarked_dir):
            if file.lower().endswith(('.jpg', '.jpeg', '.png')):
                watermarked_path = os.path.join(watermarked_dir, file)
                original_path = os.path.join(original_dir, file)
                if os.path.exists(original_path):
                    self.image_files.append(file)
        
        if max_samples:
            self.image_files = self.image_files[:max_samples]
        
        print(f"数据集大小: {len(self.image_files)}")
    
    def __len__(self):
        return len(self.image_files)
    
    def __getitem__(self, idx):
        file = self.image_files[idx]
        
        # 加载图像
        watermarked_path = os.path.join(self.watermarked_dir, file)
        original_path = os.path.join(self.original_dir, file)
        
        watermarked = Image.open(watermarked_path).convert('RGB')
        original = Image.open(original_path).convert('RGB')
        
        if self.transform:
            watermarked = self.transform(watermarked)
            original = self.transform(original)
        
        return watermarked, original


class TwoStageLossFunction(nn.Module):
    """两段式损失函数"""
    def __init__(self, device, lambda_loss1=1.0, lambda_loss2=1.0):
        super(TwoStageLossFunction, self).__init__()
        self.device = device
        self.lambda_loss1 = lambda_loss1
        self.lambda_loss2 = lambda_loss2
        
        # 基础损失函数
        self.mse_loss = nn.MSELoss()
        self.l1_loss = nn.L1Loss()
        
        # LPIPS感知损失
        self.lpips_loss = lpips.LPIPS(net='alex').to(device)
        
        print(f"损失函数权重: λ1={lambda_loss1}, λ2={lambda_loss2}")
    
    def calculate_loss1(self, attack_noise, watermarked_image, cover_image):
        """
        Loss1: 引导网络生成的水印分布逐渐向真实载体载密间残差分布靠拢
        使网络学习秘密信息嵌入位置规律
        
        Args:
            attack_noise: 攻击噪声
            watermarked_image: 水印图像 (s)
            cover_image: 原始图像 (c)
        Returns:
            loss1: 第一个损失
        """
        # 计算真实的水印残差（载体载密间残差）
        true_watermark_residual = watermarked_image - cover_image
        
        # 计算攻击噪声与真实水印残差的分布差异
        # 使用多种损失来约束分布相似性
        
        # 1. MSE损失：像素级别的相似性
        mse_loss = self.mse_loss(attack_noise, true_watermark_residual)
        
        # 2. L1损失：稀疏性约束
        l1_loss = self.l1_loss(attack_noise, true_watermark_residual)
        
        # 3. 统计分布损失：均值和方差约束
        mean_loss = self.mse_loss(torch.mean(attack_noise, dim=[2, 3]), 
                                 torch.mean(true_watermark_residual, dim=[2, 3]))
        var_loss = self.mse_loss(torch.var(attack_noise, dim=[2, 3]), 
                                torch.var(true_watermark_residual, dim=[2, 3]))
        
        # 4. 梯度损失：边缘信息约束
        grad_loss = self.gradient_loss(attack_noise, true_watermark_residual)
        
        # 综合损失
        loss1 = mse_loss + 0.5 * l1_loss + 0.3 * mean_loss + 0.3 * var_loss + 0.2 * grad_loss
        
        return loss1, {
            'mse_loss': mse_loss.item(),
            'l1_loss': l1_loss.item(),
            'mean_loss': mean_loss.item(),
            'var_loss': var_loss.item(),
            'grad_loss': grad_loss.item()
        }
    
    def calculate_loss2(self, restored_image, cover_image):
        """
        Loss2: 减少去水印后图像与原始图像间的残差值
        利用网络学习能力最大程度恢复图像的原始内容
        
        Args:
            restored_image: 恢复后的图像 (r)
            cover_image: 原始图像 (c)
        Returns:
            loss2: 第二个损失
        """
        # 1. MSE损失：像素级别的重构
        mse_loss = self.mse_loss(restored_image, cover_image)
        
        # 2. LPIPS感知损失：感知质量
        lpips_loss = torch.mean(self.lpips_loss(restored_image, cover_image))
        
        # 3. SSIM损失：结构相似性
        ssim_loss = self.ssim_loss(restored_image, cover_image)
        
        # 4. 梯度损失：边缘保持
        grad_loss = self.gradient_loss(restored_image, cover_image)
        
        # 综合损失
        loss2 = mse_loss + 0.5 * lpips_loss + 0.3 * ssim_loss + 0.2 * grad_loss
        
        return loss2, {
            'mse_loss': mse_loss.item(),
            'lpips_loss': lpips_loss.item(),
            'ssim_loss': ssim_loss.item(),
            'grad_loss': grad_loss.item()
        }
    
    def gradient_loss(self, pred, target):
        """梯度损失"""
        # 计算梯度
        pred_grad_x = pred[:, :, :, 1:] - pred[:, :, :, :-1]
        pred_grad_y = pred[:, :, 1:, :] - pred[:, :, :-1, :]
        
        target_grad_x = target[:, :, :, 1:] - target[:, :, :, :-1]
        target_grad_y = target[:, :, 1:, :] - target[:, :, :-1, :]
        
        # 计算梯度损失
        grad_loss_x = self.l1_loss(pred_grad_x, target_grad_x)
        grad_loss_y = self.l1_loss(pred_grad_y, target_grad_y)
        
        return grad_loss_x + grad_loss_y
    
    def ssim_loss(self, pred, target):
        """SSIM损失"""
        # 简化的SSIM计算
        mu1 = torch.mean(pred, dim=[2, 3], keepdim=True)
        mu2 = torch.mean(target, dim=[2, 3], keepdim=True)
        
        sigma1_sq = torch.var(pred, dim=[2, 3], keepdim=True)
        sigma2_sq = torch.var(target, dim=[2, 3], keepdim=True)
        
        sigma12 = torch.mean((pred - mu1) * (target - mu2), dim=[2, 3], keepdim=True)
        
        c1 = 0.01 ** 2
        c2 = 0.03 ** 2
        
        ssim = ((2 * mu1 * mu2 + c1) * (2 * sigma12 + c2)) / \
               ((mu1 ** 2 + mu2 ** 2 + c1) * (sigma1_sq + sigma2_sq + c2))
        
        return 1 - torch.mean(ssim)
    
    def forward(self, results):
        """
        计算总损失
        
        Args:
            results: 网络输出结果字典
        Returns:
            total_loss: 总损失
            loss_dict: 损失详情
        """
        watermarked_image = results['watermarked_image']
        cover_image = results['cover_image']
        attack_noise = results['attack_noise']
        restored_image = results['restored_image']
        
        # 计算Loss1
        loss1, loss1_details = self.calculate_loss1(attack_noise, watermarked_image, cover_image)
        
        # 计算Loss2
        loss2, loss2_details = self.calculate_loss2(restored_image, cover_image)
        
        # 总损失
        total_loss = self.lambda_loss1 * loss1 + self.lambda_loss2 * loss2
        
        # 损失详情
        loss_dict = {
            'total_loss': total_loss.item(),
            'loss1': loss1.item(),
            'loss2': loss2.item(),
            'loss1_details': loss1_details,
            'loss2_details': loss2_details
        }
        
        return total_loss, loss_dict


class Trainer:
    """两段式训练器"""
    def __init__(self, config_path=None):
        self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        print(f"使用设备: {self.device}")
        
        # 加载配置
        self.load_config(config_path)
        
        # 初始化网络
        self.setup_networks()
        
        # 设置损失函数
        self.setup_loss_functions()
        
        # 设置优化器
        self.setup_optimizers()
        
        # 训练历史
        self.train_history = {
            'total_loss': [],
            'loss1': [],
            'loss2': [],
            'epoch': []
        }
    
    def load_config(self, config_path):
        """加载配置文件"""
        default_config = {
            'model': {
                'input_channels': 3,
                'attack_base_channels': 64,
                'restore_base_channels': 64
            },
            'training': {
                'batch_size': 32,
                'learning_rate': 1e-4,
                'num_epochs': 100,
                'max_samples': 3000
            },
            'loss': {
                'lambda_loss1': 1.0,
                'lambda_loss2': 1.0
            },
            'data': {
                'watermarked_dir': '/data/home/sczc029/run/dataset/RegenMark/watermarked/watermarked',
                'original_dir': '/data/home/sczc029/run/dataset/RegenMark/CelebAMask-HQ/CelebAMask-HQ/CelebA-HQ-img'
            },
            'save': {
                'checkpoint_dir': 'checkpoints/two_stage',
                'save_interval': 10
            }
        }
        
        if os.path.exists(config_path):
            with open(config_path, 'r', encoding='utf-8') as f:
                config = EasyDict(yaml.load(f, Loader=yaml.SafeLoader))
        else:
            config = EasyDict(default_config)
            os.makedirs(os.path.dirname(config_path), exist_ok=True)
            with open(config_path, 'w') as f:
                yaml.dump(default_config, f, default_flow_style=False)
            print(f"创建默认配置文件: {config_path}")
        
        self.config = config
        print("配置加载完成")
    
    def setup_networks(self):
        """设置网络"""
        self.model = TwoStageWatermarkRemoval(
            input_channels=self.config.model.input_channels,
            attack_base_channels=self.config.model.attack_base_channels,
            restore_base_channels=self.config.model.restore_base_channels
        ).to(self.device)
        
        print(f"网络参数量: {sum(p.numel() for p in self.model.parameters()):,}")
    
    def setup_loss_functions(self):
        """设置损失函数"""
        self.loss_fn = TwoStageLossFunction(
            device=self.device,
            lambda_loss1=self.config.loss.lambda_loss1,
            lambda_loss2=self.config.loss.lambda_loss2
        )
    
    def setup_optimizers(self):
        """设置优化器"""
        self.optimizer = optim.Adam(
            self.model.parameters(),
            lr=self.config.training.learning_rate,
            betas=(0.9, 0.999)
        )
        
        # 学习率调度器
        self.scheduler = optim.lr_scheduler.CosineAnnealingLR(
            self.optimizer, T_max=self.config.training.num_epochs
        )
    
    def train_epoch(self, dataloader):
        """训练一个epoch"""
        self.model.train()
        epoch_losses = {
            'total_loss': 0.0,
            'loss1': 0.0,
            'loss2': 0.0
        }
        
        for batch_idx, (watermarked_images, cover_images) in enumerate(tqdm(dataloader, desc="训练")):
            watermarked_images = watermarked_images.to(self.device)
            cover_images = cover_images.to(self.device)
            
            # 前向传播
            results = self.model(watermarked_images, cover_images)
            
            # 计算损失
            total_loss, loss_dict = self.loss_fn(results)
            
            # 反向传播
            self.optimizer.zero_grad()
            total_loss.backward()
            self.optimizer.step()
            
            # 记录损失
            epoch_losses['total_loss'] += loss_dict['total_loss']
            epoch_losses['loss1'] += loss_dict['loss1']
            epoch_losses['loss2'] += loss_dict['loss2']
            
            # 每50个batch打印一次详细信息
            if batch_idx % 50 == 0:
                print(f"Batch {batch_idx}: Total={loss_dict['total_loss']:.4f}, "
                      f"Loss1={loss_dict['loss1']:.4f}, Loss2={loss_dict['loss2']:.4f}")
        
        # 平均损失
        for key in epoch_losses:
            epoch_losses[key] /= len(dataloader)
        
        return epoch_losses
    
    def evaluate(self, dataloader):
        """评估模型"""
        self.model.eval()
        eval_losses = {
            'total_loss': 0.0,
            'loss1': 0.0,
            'loss2': 0.0
        }
        
        with torch.no_grad():
            for watermarked_images, cover_images in dataloader:
                watermarked_images = watermarked_images.to(self.device)
                cover_images = cover_images.to(self.device)
                
                results = self.model(watermarked_images, cover_images)
                total_loss, loss_dict = self.loss_fn(results)
                
                eval_losses['total_loss'] += loss_dict['total_loss']
                eval_losses['loss1'] += loss_dict['loss1']
                eval_losses['loss2'] += loss_dict['loss2']
        
        # 平均损失
        for key in eval_losses:
            eval_losses[key] /= len(dataloader)
        
        return eval_losses
    
    def save_checkpoint(self, epoch, is_best=False):
        """保存检查点"""
        checkpoint_dir = self.config.save.checkpoint_dir
        os.makedirs(checkpoint_dir, exist_ok=True)
        
        checkpoint = {
            'epoch': epoch,
            'model_state_dict': self.model.state_dict(),
            'optimizer_state_dict': self.optimizer.state_dict(),
            'scheduler_state_dict': self.scheduler.state_dict(),
            'config': self.config,
            'train_history': self.train_history
        }
        
                 # 保存常规检查点
        checkpoint_path = os.path.join(checkpoint_dir, f'checkpoint_epoch_{epoch}.pth')
        torch.save(checkpoint, checkpoint_path, _use_new_zipfile_serialization=False)
        
                 # 保存最佳模型
        if is_best:
            best_path = os.path.join(checkpoint_dir, 'best_model.pth')
            torch.save(checkpoint, best_path, _use_new_zipfile_serialization=False)
            print(f"保存最佳模型: {best_path}")
        
        print(f"保存检查点: {checkpoint_path}")
    
    def plot_training_history(self):
        """绘制训练历史"""
        plt.figure(figsize=(12, 4))
        
        plt.subplot(1, 3, 1)
        plt.plot(self.train_history['epoch'], self.train_history['total_loss'])
        plt.title('Total Loss')
        plt.xlabel('Epoch')
        plt.ylabel('Loss')
        plt.grid(True)
        
        plt.subplot(1, 3, 2)
        plt.plot(self.train_history['epoch'], self.train_history['loss1'])
        plt.title('Loss1 (Distribution Matching)')
        plt.xlabel('Epoch')
        plt.ylabel('Loss')
        plt.grid(True)
        
        plt.subplot(1, 3, 3)
        plt.plot(self.train_history['epoch'], self.train_history['loss2'])
        plt.title('Loss2 (Image Restoration)')
        plt.xlabel('Epoch')
        plt.ylabel('Loss')
        plt.grid(True)
        
        plt.tight_layout()
        plt.savefig('training_history.png', dpi=150, bbox_inches='tight')
        plt.show()
    
    def train(self):
        """主训练循环"""
        print("开始训练两段式去水印网络...")
        
        # 数据变换
        transform = transforms.Compose([
            transforms.Resize((256, 256)),
            transforms.ToTensor(),
            transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
        ])
        
        # 创建数据集
        train_dataset = WatermarkDataset(
            self.config.data.watermarked_dir,
            self.config.data.original_dir,
            transform=transform,
            max_samples=self.config.training.max_samples
        )
        
        train_dataloader = DataLoader(
            train_dataset,
            batch_size=self.config.training.batch_size,
            shuffle=True,
            num_workers=4,
            pin_memory=True
        )
        
        # 训练循环
        best_loss = float('inf')
        
        for epoch in range(self.config.training.num_epochs):
            print(f"\nEpoch {epoch + 1}/{self.config.training.num_epochs}")
            
            # 训练
            train_losses = self.train_epoch(train_dataloader)
            
            # 调整学习率
            self.scheduler.step()
            
            # 记录训练历史
            self.train_history['epoch'].append(epoch + 1)
            self.train_history['total_loss'].append(train_losses['total_loss'])
            self.train_history['loss1'].append(train_losses['loss1'])
            self.train_history['loss2'].append(train_losses['loss2'])
            
            # 打印训练结果
            print(f"训练损失:")
            print(f"  Total Loss: {train_losses['total_loss']:.6f}")
            print(f"  Loss1 (Distribution): {train_losses['loss1']:.6f}")
            print(f"  Loss2 (Restoration): {train_losses['loss2']:.6f}")
            
            # 保存最佳模型
            if train_losses['total_loss'] < best_loss:
                best_loss = train_losses['total_loss']
                self.save_checkpoint(epoch + 1, is_best=True)
            
            # 定期保存检查点
            if (epoch + 1) % self.config.save.save_interval == 0:
                self.save_checkpoint(epoch + 1)
        
        print("训练完成！")
        
        # 保存训练历史图
        self.plot_training_history()


def main():
    """主函数"""
    # 创建配置目录
    os.makedirs('config', exist_ok=True)
    
    # 创建训练器
    trainer = Trainer('config/two_stage_config.yaml')
    
    # 开始训练
    trainer.train()


if __name__ == '__main__':
    main() 