import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
import torchvision.utils as vutils
import matplotlib.pyplot as plt
import numpy as np
from tqdm import tqdm

# ----------------------------
# 1. 超参数和环境
# ----------------------------
seed = 42
torch.manual_seed(seed)

batch_size = 128
image_size = 64
nz = 100       # 潜在向量维度
ngf = 64       # Generator feature map 基数
ndf = 64       # Discriminator feature map 基数
num_epochs = 20
lr = 0.0002
beta1 = 0.5
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

# ----------------------------
# 2. 数据预处理 & 加载
# ----------------------------
transform = transforms.Compose([
    transforms.Resize(image_size),
    transforms.CenterCrop(image_size),
    transforms.ToTensor(),
    transforms.Normalize([0.5]*3, [0.5]*3),
])
# 假设 data/anime_faces/ 下有 images/ 子文件夹
dataset = torchvision.datasets.ImageFolder(root="data/anime_faces", transform=transform)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=True)

# ----------------------------
# 3. Generator 定义
# ----------------------------
class Generator(nn.Module):
    def __init__(self):
        super().__init__()
        self.main = nn.Sequential(
            # 输入 (nz) → (ngf*8) 4×4
            nn.ConvTranspose2d(nz, ngf*8, 4, 1, 0, bias=False),
            nn.BatchNorm2d(ngf*8), nn.ReLU(True),
            # 8×8
            nn.ConvTranspose2d(ngf*8, ngf*4, 4, 2, 1, bias=False),
            nn.BatchNorm2d(ngf*4), nn.ReLU(True),
            # 16×16
            nn.ConvTranspose2d(ngf*4, ngf*2, 4, 2, 1, bias=False),
            nn.BatchNorm2d(ngf*2), nn.ReLU(True),
            # 32×32
            nn.ConvTranspose2d(ngf*2, ngf, 4, 2, 1, bias=False),
            nn.BatchNorm2d(ngf), nn.ReLU(True),
            # 64×64
            nn.ConvTranspose2d(ngf, 3, 4, 2, 1, bias=False),
            nn.Tanh()
        )

    def forward(self, x):
        return self.main(x)

# ----------------------------
# 4. Discriminator 定义（全局平均池化）
# ----------------------------
class Discriminator(nn.Module):
    def __init__(self):
        super().__init__()
        self.features = nn.Sequential(
            # 输入 3×64×64 → ndf×32×32
            nn.Conv2d(3, ndf, 4, 2, 1, bias=False),
            nn.LeakyReLU(0.2, inplace=True),
            # → ndf*2×16×16
            nn.Conv2d(ndf, ndf*2, 4, 2, 1, bias=False),
            nn.BatchNorm2d(ndf*2), nn.LeakyReLU(0.2, inplace=True),
            # → ndf*4×8×8
            nn.Conv2d(ndf*2, ndf*4, 4, 2, 1, bias=False),
            nn.BatchNorm2d(ndf*4), nn.LeakyReLU(0.2, inplace=True),
            # → ndf*8×4×4
            nn.Conv2d(ndf*4, ndf*8, 4, 2, 1, bias=False),
            nn.BatchNorm2d(ndf*8), nn.LeakyReLU(0.2, inplace=True),
        )
        self.classifier = nn.Sequential(
            # → 1×1×1
            nn.Conv2d(ndf*8, 1, 4, 1, 0, bias=False),
            nn.Sigmoid()
        )

    def forward(self, x):
        x = self.features(x)           # [B, ndf*8, 4, 4]
        x = self.classifier(x)         # [B, 1, 1, 1]
        x = x.view(-1)                 # [B]
        return x

# ----------------------------
# 5. 实例化 & 权重初始化
# ----------------------------
netG = Generator().to(device)
netD = Discriminator().to(device)

def weights_init(m):
    cls = m.__class__.__name__
    if 'Conv' in cls:
        nn.init.normal_(m.weight.data, 0.0, 0.02)
    elif 'BatchNorm' in cls:
        nn.init.normal_(m.weight.data, 1.0, 0.02)
        nn.init.constant_(m.bias.data, 0)

netG.apply(weights_init)
netD.apply(weights_init)

# ----------------------------
# 6. 损失 & 优化器
# ----------------------------
criterion = nn.BCELoss()
optimizerD = optim.Adam(netD.parameters(), lr=lr, betas=(beta1, 0.999))
optimizerG = optim.Adam(netG.parameters(), lr=lr, betas=(beta1, 0.999))

fixed_noise = torch.randn(64, nz, 1, 1, device=device)
img_list = []

# ----------------------------
# 7. 训练循环
# ----------------------------
print("开始训练...")
for epoch in range(num_epochs):
    for data, _ in tqdm(dataloader):
        real = data.to(device)
        b_size = real.size(0)

        # —— 更新判别器 —— #
        netD.zero_grad()
        # 真实样本标签
        label_real = torch.full((b_size,), 1., device=device)
        output_real = netD(real)                   # [B]
        errD_real = criterion(output_real, label_real)

        # 生成假样本
        noise = torch.randn(b_size, nz, 1, 1, device=device)
        fake = netG(noise)
        label_fake = torch.full((b_size,), 0., device=device)
        output_fake = netD(fake.detach())         # [B]
        errD_fake = criterion(output_fake, label_fake)

        errD = errD_real + errD_fake
        errD.backward()
        optimizerD.step()

        # —— 更新生成器 —— #
        netG.zero_grad()
        # 目标：让判别器认为 fake 全是真
        label_gen = torch.full((b_size,), 1., device=device)
        output_gen = netD(fake)                   # [B]
        errG = criterion(output_gen, label_gen)
        errG.backward()
        optimizerG.step()

    print(f"Epoch [{epoch+1}/{num_epochs}]  Loss_D: {errD.item():.4f}  Loss_G: {errG.item():.4f}")

    # 保存中间结果
    with torch.no_grad():
        fake = netG(fixed_noise).detach().cpu()
        img_list.append(vutils.make_grid(fake, padding=2, normalize=True))

# ----------------------------
# 8. 可视化生成效果
# ----------------------------
for img in img_list:
    plt.figure(figsize=(6,6))
    plt.axis('off')
    plt.imshow(np.transpose(img, (1,2,0)))
    plt.show()
