import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
import torchvision.utils as vutils
import matplotlib.pyplot as plt
import numpy as np
from tqdm import tqdm
from torch.cuda.amp import autocast, GradScaler

# ----------------------------
# Generator 定义
# ----------------------------
class Generator(nn.Module):
    def __init__(self, nz=100, ngf=64):
        super().__init__()
        self.main = nn.Sequential(
            # 输入: (nz) x 1 x 1
            nn.ConvTranspose2d(nz, ngf*8, 4, 1, 0, bias=False),
            nn.BatchNorm2d(ngf*8),
            nn.ReLU(True),
            # 状态: (ngf*8) x 4 x 4
            nn.ConvTranspose2d(ngf*8, ngf*4, 4, 2, 1, bias=False),
            nn.BatchNorm2d(ngf*4),
            nn.ReLU(True),
            # 状态: (ngf*4) x 8 x 8
            nn.ConvTranspose2d(ngf*4, ngf*2, 4, 2, 1, bias=False),
            nn.BatchNorm2d(ngf*2),
            nn.ReLU(True),
            # 状态: (ngf*2) x 16 x 16
            nn.ConvTranspose2d(ngf*2, ngf, 4, 2, 1, bias=False),
            nn.BatchNorm2d(ngf),
            nn.ReLU(True),
            # 状态: (ngf) x 32 x 32
            nn.ConvTranspose2d(ngf, 3, 4, 2, 1, bias=False),
            nn.Tanh()
            # 输出: 3 x 64 x 64
        )

    def forward(self, x):
        return self.main(x)

# ----------------------------
# Discriminator 定义（输出 logits，无 Sigmoid）
# ----------------------------
class Discriminator(nn.Module):
    def __init__(self, ndf=64):
        super().__init__()
        self.features = nn.Sequential(
            # 输入: 3 x 64 x 64
            nn.Conv2d(3, ndf, 4, 2, 1, bias=False),
            nn.LeakyReLU(0.2, inplace=True),
            # 状态: (ndf) x 32 x 32
            nn.Conv2d(ndf, ndf*2, 4, 2, 1, bias=False),
            nn.BatchNorm2d(ndf*2),
            nn.LeakyReLU(0.2, inplace=True),
            # 状态: (ndf*2) x 16 x 16
            nn.Conv2d(ndf*2, ndf*4, 4, 2, 1, bias=False),
            nn.BatchNorm2d(ndf*4),
            nn.LeakyReLU(0.2, inplace=True),
            # 状态: (ndf*4) x 8 x 8
            nn.Conv2d(ndf*4, ndf*8, 4, 2, 1, bias=False),
            nn.BatchNorm2d(ndf*8),
            nn.LeakyReLU(0.2, inplace=True),
            # 状态: (ndf*8) x 4 x 4
        )
        self.classifier = nn.Conv2d(ndf*8, 1, 4, 1, 0, bias=False)
        # 输出: logits of shape [B, 1, 1, 1]

    def forward(self, x):
        x = self.features(x)
        x = self.classifier(x)
        return x.view(-1)  # [B]

def weights_init(m):
    cls = m.__class__.__name__
    if 'Conv' in cls:
        nn.init.normal_(m.weight.data, 0.0, 0.02)
    elif 'BatchNorm' in cls:
        nn.init.normal_(m.weight.data, 1.0, 0.02)
        nn.init.constant_(m.bias.data, 0)

def main():
    # ----------------------------
    # 1. 配置 & 超参数
    # ----------------------------
    torch.manual_seed(42)
    torch.backends.cudnn.benchmark = True
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    batch_size = 128
    image_size = 64
    nz = 100       # 噪声向量维度
    ngf = 64
    ndf = 64
    num_epochs = 20
    lr = 0.0002
    beta1 = 0.5

    # ----------------------------
    # 2. 数据集 & DataLoader
    # ----------------------------
    transform = transforms.Compose([
        transforms.Resize(image_size),
        transforms.CenterCrop(image_size),
        transforms.ToTensor(),
        transforms.Normalize([0.5]*3, [0.5]*3),
    ])
    dataset = torchvision.datasets.ImageFolder(root="data/anime_faces", transform=transform)
    dataloader = torch.utils.data.DataLoader(
        dataset, batch_size=batch_size, shuffle=True,
        num_workers=8, pin_memory=True, persistent_workers=True
    )

    # ----------------------------
    # 3. 网络 & 初始化
    # ----------------------------
    netG = Generator(nz=nz, ngf=ngf).to(device)
    netD = Discriminator(ndf=ndf).to(device)
    netG.apply(weights_init)
    netD.apply(weights_init)

    # ----------------------------
    # 4. 损失 & 优化器
    # ----------------------------
    criterion = nn.BCEWithLogitsLoss()  # 安全的 AMP 版本
    optimizerD = optim.Adam(netD.parameters(), lr=lr, betas=(beta1, 0.999))
    optimizerG = optim.Adam(netG.parameters(), lr=lr, betas=(beta1, 0.999))

    # ----------------------------
    # 5. 混合精度 & 固定噪声
    # ----------------------------
    scalerD = GradScaler()
    scalerG = GradScaler()
    fixed_noise = torch.randn(64, nz, 1, 1, device=device)
    img_list = []

    # ----------------------------
    # 6. 训练循环
    # ----------------------------
    print("开始训练（Eager + AMP 模式）...")
    for epoch in range(num_epochs):
        for real, _ in tqdm(dataloader, desc=f"Epoch {epoch+1}/{num_epochs}"):
            real = real.to(device, non_blocking=True)
            b_size = real.size(0)
            label_real = torch.ones(b_size, device=device)
            label_fake = torch.zeros(b_size, device=device)

            # —— 更新判别器 D —— #
            netD.zero_grad()
            with autocast():
                logits_real = netD(real)
                lossD_real = criterion(logits_real, label_real)

                noise = torch.randn(b_size, nz, 1, 1, device=device)
                fake = netG(noise)
                logits_fake = netD(fake.detach())
                lossD_fake = criterion(logits_fake, label_fake)

                lossD = lossD_real + lossD_fake

            scalerD.scale(lossD).backward()
            scalerD.step(optimizerD)
            scalerD.update()

            # —— 更新生成器 G —— #
            netG.zero_grad()
            with autocast():
                logits_gen = netD(fake)
                lossG = criterion(logits_gen, label_real)

            scalerG.scale(lossG).backward()
            scalerG.step(optimizerG)
            scalerG.update()

        print(f"Epoch {epoch+1}/{num_epochs}  Loss_D: {lossD.item():.4f}  Loss_G: {lossG.item():.4f}")

        # 保存中间生成结果
        with torch.no_grad():
            fake_imgs = netG(fixed_noise).detach().cpu()
            img_list.append(vutils.make_grid(fake_imgs, padding=2, normalize=True))

    # ----------------------------
    # 7. 可视化最终效果
    # ----------------------------
    plt.figure(figsize=(8,8))
    plt.axis('off')
    plt.title("Final Generated Images")
    plt.imshow(np.transpose(img_list[-1], (1,2,0)))
    plt.show()

if __name__ == '__main__':
    main()
