# -*- coding: utf-8 -*-
"""
GAN-Based Digital Watermarking System (PyTorch 2.3+)
Created on 2025-04-15 21:45
"""

import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, datasets
import numpy as np
import matplotlib.pyplot as plt
from torch.fft import rfft2, irfft2
import torch.nn.functional as F

# 1. 硬件配置与随机种子
device = torch.device("cuda" if torch.cuda.is_available() else "mps" if torch.backends.mps.is_available() else "cpu")
torch.manual_seed(20250415)
torch.set_float32_matmul_precision('high')  # 加速矩阵运算


# 2. 数据加载系统
class WatermarkDataset(Dataset):
    def __init__(self, img_dir, wm_size=32):
        self.transform = transforms.Compose([
            transforms.Resize(256),
            transforms.CenterCrop(256),
            transforms.ToTensor(),
            transforms.Normalize([0.5] * 3, [0.5] * 3)
        ])
        self.dataset = datasets.ImageFolder(img_dir, transform=self.transform)
        self.wm = torch.randint(0, 2, (1, wm_size, wm_size), dtype=torch.float32)  # 二值水印

    def __getitem__(self, idx):
        img, _ = self.dataset[idx]
        return img, self.wm

    def __len__(self):
        return len(self.dataset)

    # 3. 核心模型架构


class DCTAttention(nn.Module):
    """频域注意力机制（CVPR2025改进版）"""

    def forward(self, x):
        B, C, H, W = x.shape
        freqs = rfft2(x, norm='ortho')
        mag = torch.abs(freqs)
        # 生成频域掩码
        mask = torch.sigmoid(mag.mean(dim=1, keepdim=True))
        return irfft2(freqs * mask, s=(H, W), norm='ortho')


class ResidualBlock(nn.Module):
    def __init__(self, channels):
        super().__init__()
        self.conv = nn.Sequential(
            nn.Conv2d(channels, channels, 3, padding=1),
            nn.InstanceNorm2d(channels),
            nn.ReLU(),
            nn.Conv2d(channels, channels, 3, padding=1),
            nn.InstanceNorm2d(channels)
        )

    def forward(self, x):
        return x + self.conv(x)


# 修改数据集类
class MNISTWatermarkDataset(Dataset):
    def __init__(self, wm_size=28):  # MNIST是28x28
        self.transform = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize([0.5], [0.5])  # 单通道
        ])
        self.dataset = datasets.MNIST(
            root='./data',
            train=True,
            download=True,
            transform=self.transform
        )
        # 二值水印，尺寸调整为28x28
        self.wm = torch.randint(0, 2, (1, wm_size, wm_size), dtype=torch.float32)

    def __getitem__(self, idx):
        img, _ = self.dataset[idx]
        return img, self.wm

    def __len__(self):
        return len(self.dataset)

# 修改生成器输入输出通道数
class Generator(nn.Module):
    def __init__(self):
        super().__init__()
        self.encoder = nn.Sequential(
            nn.Conv2d(1, 64, 4, stride=2, padding=1),
            nn.LeakyReLU(0.2),
            DCTAttention(),
            nn.Conv2d(64, 128, 4, stride=2, padding=1),
            nn.InstanceNorm2d(128),
            nn.LeakyReLU(0.2)
        )

        self.res_blocks = nn.Sequential(
            *[ResidualBlock(128) for _ in range(6)]
        )

        self.decoder = nn.Sequential(
            nn.ConvTranspose2d(128 + 1, 64, 4, stride=2, padding=1),
            nn.InstanceNorm2d(64),
            nn.ReLU(),
            nn.ConvTranspose2d(64, 1, 4, stride=2, padding=1),
            nn.Tanh()
        )

    # 在文件开头的导入部分添加
    import torch.nn.functional as F

    def forward(self, img, wm):
        # 编码阶段
        x = self.encoder(img)

        # 残差块处理
        x = self.res_blocks(x)

        # 调整水印尺寸以匹配特征图
        wm = F.interpolate(wm, size=(x.size(2), x.size(3)), mode='bilinear', align_corners=False)

        # 拼接水印信息
        x = torch.cat([x, wm], dim=1)

        # 解码生成图像
        return self.decoder(x)

# 修改判别器输入通道数
class Discriminator(nn.Module):
    def __init__(self):
        super().__init__()
        self.model = nn.Sequential(
            nn.Conv2d(1, 64, 4, stride=2, padding=1),  # 输入通道改为1
            nn.LeakyReLU(0.2),
            nn.Conv2d(64, 128, 4, stride=2, padding=1),
            nn.InstanceNorm2d(128),
            nn.LeakyReLU(0.2),
            nn.Conv2d(128, 256, 4, stride=2, padding=1),
            nn.InstanceNorm2d(256),
            nn.LeakyReLU(0.2),
            nn.Conv2d(256, 1, 3, padding=1)
        )

    def forward(self, img):
        return torch.sigmoid(self.model(img))


class Extractor(nn.Module):
    """U-Net结构水印提取器"""

    def __init__(self):
        super().__init__()
        self.down = nn.Sequential(
            nn.Conv2d(1, 64, 4, stride=2, padding=1),  # 输入通道改为1
            nn.LeakyReLU(0.2),
            nn.Conv2d(64, 128, 4, stride=2, padding=1),
            nn.InstanceNorm2d(128),
            nn.LeakyReLU(0.2)
        )

        self.up = nn.Sequential(
            nn.ConvTranspose2d(128, 64, 4, stride=2, padding=1),
            nn.InstanceNorm2d(64),
            nn.LeakyReLU(0.2),
            nn.ConvTranspose2d(64, 1, 4, stride=2, padding=1),
            nn.Sigmoid()
        )

    def forward(self, img):
        x = self.down(img)
        return self.up(x)

    # 4. 可微分攻击层


class DiffJPEG(nn.Module):
    """可微分JPEG压缩模拟"""

    def __init__(self, quality=75):
        super().__init__()
        self.quality = quality

    def forward(self, x):
        # 简化的DCT量化模拟
        x = torch.clamp(x, -1, 1)
        x = (x + 1) / 2  # 转换到[0,1]范围
        # 实际实现应包含完整的DCT+量化步骤
        return x * 2 - 1  # 转换回[-1,1]


# 5. 训练系统
# 移除LPIPS导入

class WatermarkGAN:
    def __init__(self):
        self.G = Generator().to(device)
        self.D = Discriminator().to(device)
        self.E = Extractor().to(device)

        self.opt_G = optim.AdamW(self.G.parameters(), lr=2e-4, betas=(0.5, 0.999))
        self.opt_D = optim.AdamW(self.D.parameters(), lr=1e-4, betas=(0.5, 0.999))
        self.opt_E = optim.AdamW(self.E.parameters(), lr=1e-4)

        self.criterion = {
            'bce': nn.BCELoss(),
            'mse': nn.MSELoss(),
            # 移除LPIPS
        }

        self.attack = DiffJPEG(quality=70)

    def train_step(self, imgs, wms):
        # 生成含水印图像
        wm_imgs = self.G(imgs, wms)

        # 判别器训练
        self.opt_D.zero_grad()
        real_loss = self.criterion['bce'](self.D(imgs), torch.ones_like(self.D(imgs)))
        fake_loss = self.criterion['bce'](self.D(wm_imgs.detach()), torch.zeros_like(self.D(wm_imgs)))
        d_loss = (real_loss + fake_loss) * 0.5
        d_loss.backward()
        self.opt_D.step()

        # 生成器与提取器联合训练
        self.opt_G.zero_grad()
        self.opt_E.zero_grad()

        # 对抗损失
        g_adv_loss = self.criterion['bce'](self.D(wm_imgs), torch.ones_like(self.D(wm_imgs)))

        # 水印重建损失
        attacked = self.attack(wm_imgs)
        extracted = self.E(attacked)
        wm_loss = self.criterion['mse'](extracted, wms)

        # 感知损失
        perc_loss = F.l1_loss(wm_imgs, imgs)

        # 调整损失权重
        total_loss = g_adv_loss + 10 * wm_loss + 1.0 * perc_loss  # 增加L1损失的权重
        total_loss.backward()
        self.opt_G.step()
        self.opt_E.step()

        return {
            'd_loss': d_loss.item(),
            'g_loss': total_loss.item(),
            'wm_acc': (extracted.round() == wms).float().mean().item()
        }


# 6. 主训练循环
def main():
    # 使用MNIST数据集
    dataset = MNISTWatermarkDataset()
    loader = DataLoader(dataset, batch_size=64, shuffle=True)  # 增大batch_size

    # 初始化系统
    system = WatermarkGAN()

    # 训练监控
    history = {'d_loss': [], 'g_loss': [], 'wm_acc': []}

    for epoch in range(1):
        for i, (imgs, wms) in enumerate(loader):
            imgs, wms = imgs.to(device), wms.to(device)
            metrics = system.train_step(imgs, wms)

            # 记录指标
            for k in metrics:
                history[k].append(metrics[k])

            if i % 50 == 0:
                print(f"Epoch {epoch} Batch {i}: "
                      f"D_loss={metrics['d_loss']:.3f} "
                      f"G_loss={metrics['g_loss']:.3f} "
                      f"WM_Acc={metrics['wm_acc']:.2%}")

    # 保存模型
    torch.save({
        'generator': system.G.state_dict(),
        'extractor': system.E.state_dict()
    }, 'watermark_gan.pth')

    # 可视化结果
    plt.figure(figsize=(15, 5))
    plt.subplot(131)
    plt.plot(history['d_loss'], label='Discriminator Loss')
    plt.subplot(132)
    plt.plot(history['g_loss'], label='Generator Loss')
    plt.subplot(133)
    plt.plot(history['wm_acc'], label='Watermark Accuracy')
    plt.savefig('training_curve.png')


if __name__ == '__main__':
    main()