# coding:utf-8
# Author : hiicy redldw
# Date : 2019/06/27

"""
判别器由卷积层、批标准化 层以及LeakyReLU 激活层组成。输入是3x64x64的图像，
输出是输入图像来自实际数据的概率。
生成器由转置卷积层，批标准化层以及ReLU 激活层组成。
输入是一个本征向量，它是从标准正态分布中采样得到的，
输出是一个3x64x64 的RGB图像。
转置卷积层能够把本征向量转换成和图像具有相同大小
"""
import torch
import argparse
import os
import random
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.utils.data
import torchvision.datasets as dset
import torchvision.transforms as transforms
import torchvision.utils as vutils
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from IPython.display import HTML

# 为了可重复性设置随机种子
manualSeed = 999
manualSeed = random.randint(1, manualSeed) # 如果你想有一个不同的结果使用这行代码
print("Random Seed: ", manualSeed)
random.seed(manualSeed)
torch.manual_seed(manualSeed)

# 数据集根目录
dataroot = r"F:\Resources\DataSets\celeba"
# 数据加载器能够使用的进程数量
workers = 1
# 训练时的批大小
batch_size = 128
# 训练图片的大小，所有的图片给都将改变到该大小
# 转换器使用的大小.
image_size = 64
# 训练图片的通道数，彩色图片是3
nc = 3
# 本征向量z的大小(生成器的输入大小)(从正态分布里采集的数据)
nz = 100
# 生成器中特征图大小
ngf = 64
# 判别器中特征图大小
ndf = 64
# 训练次数
num_epochs = 5
# 优化器学习率
lr = 0.0002
# Adam优化器的Beta1超参
beta1 = 0.5
# 可利用的GPU数量，使用0将运行在CPU模式。
ngpu = 1

device = torch.device("cuda:0" if (torch.cuda.is_available() and ngpu > 0) else "cpu")

# 数据准备
dataset = dset.ImageFolder(dataroot,transform=transforms.Compose([
                                    transforms.Resize(image_size),
                                    transforms.CenterCrop(image_size),
                                    transforms.ToTensor(),
                                    transforms.Normalize((0.5,0.5,0.5),(0.5,0.5,0.5))
                            ]))
dataloader = torch.utils.data.DataLoader(dataset,batch_size=batch_size,
                                         shuffle=True,num_workers=0)

# 展示一些训练图片
real_batch = next(iter(dataloader))
plt.figure(figsize=(8,8))
plt.axis("off")
plt.title("Training Images")
plt.imshow(np.transpose(vutils.make_grid(real_batch[0].to(device)[:64],
                                         padding=2, normalize=True).cpu(),(1,2,0)))

# 权重初始化

def weights_init(m):
    classname = m.__class__.__name__
    if classname.find('Conv') != -1:# 字符串没找到返回-1
        nn.init.normal_(m.weight.data,0.0,0.02)
    elif classname.find("BatchNorm") != -1:
        # x-mean / sqrt(var+σ)
        nn.init.normal_(m.weight.data,1.0,0.02)
        nn.init.constant_(m.bias.data,0)


class Generator(nn.Module):
    """
    每个转换层与二维批标准化层和relu激活层配对
    生成器的输出通过tanh层，使其输出数据范围和输入图片一样，在\([ - 1,1] \)之间
    """
    def __init__(self,ngpu):
        super(Generator,self).__init__()
        self.ngpu = ngpu
        self.main = nn.Sequential(
            nn.ConvTranspose2d(nz,ngf*8,4,1,0,bias=False), # 见笔记本反卷积公式
            nn.BatchNorm2d(ngf*8),# 512*4*4
            nn.ReLU(True),

            nn.ConvTranspose2d(ngf*8,ngf*4,4,2,1,bias=False),#256*8*8
            nn.BatchNorm2d(ngf*4),
            nn.ReLU(True),

            nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=False),
            nn.BatchNorm2d(ngf * 2),# 128*16*16
            nn.ReLU(True),

            nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 1, bias=False),
            nn.BatchNorm2d(ngf),# 64*32*32
            nn.ReLU(True),

            nn.ConvTranspose2d(ngf,nc,4,2,1,bias=False),
            nn.Tanh()
            # 3*64*64
        )
    def forward(self, input):
        return self.main(input)

# 创建生成器
netG = Generator(ngpu).to(device)

# 如果期望使用多个GPU，设置一下。
if (device.type == 'cuda') and (ngpu > 1):
    netG = nn.DataParallel(netG, list(range(ngpu)))

# 使用权重初始化函数 weights_init 去随机初始化所有权重
#  mean=0, stdev=0.2.
netG.apply(weights_init,)

# 输出该模型
# print(netG)

# 判别器\(D \)是一个二分类网络它将图像作为输入并输出输入图像是真实的概率（而不是假
# 但使用跨步卷积，BatchNorm和LeakyReLU具有重要意义
class Discriminator(nn.Module):
    def __init__(self,ngpu):
        super(Discriminator,self).__init__()
        self.ngpu = ngpu
        self.main = nn.Sequential(
            nn.Conv2d(nc,ndf,4,2,1,bias=False),
            nn.LeakyReLU(0.2,inplace=True),# 64*32*32

            nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False),
            nn.BatchNorm2d(ndf * 2),
            nn.LeakyReLU(0.2, inplace=True),  # 128*16*16

            # 输入大小. (ndf*2) x 16 x 16
            nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False),
            nn.BatchNorm2d(ndf * 4),
            nn.LeakyReLU(0.2, inplace=True),

            # 输入大小. (ndf*4) x 8 x 8
            nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=False),
            nn.BatchNorm2d(ndf * 8),
            nn.LeakyReLU(0.2, inplace=True),

            # 输入大小. (ndf*8) x 4 x 4
            nn.Conv2d(ndf * 8, 1, 4, 1, 0, bias=False), # 1*1*1
            nn.Sigmoid()  # B,1*1*1
        )
    def forward(self, input):
        return self.main(input)

# 创建判别器
netD = Discriminator(ngpu).to(device)

# 如果期望使用多GPU，设置一下
if (device.type == 'cuda') and (ngpu > 1):
    netD = nn.DataParallel(netD, list(range(ngpu)))

# 使用权重初始化函数 weights_init 去随机初始化所有权重
#  mean=0, stdev=0.2.
netD.apply(weights_init)

# 损失函数和优化器
criterion = nn.BCELoss()  # REW:都是为了最小化某个目标函数

# Create batch of latent vectors that we will use to visualize
#  the progression of the generator
fixed_noise = torch.randn(64,nz,1,1,device=device)
# REW:平滑标签
real_label = 0.8
fake_label = 0

# Setup Adam optimizers for both G and D
optimizerD = optim.Adam(netD.parameters(), lr=lr, betas=(beta1, 0.999))
optimizerG = optim.Adam(netG.parameters(), lr=lr, betas=(beta1, 0.999))

# 训练
img_list = []
G_losses = []
D_losses = []
iters = 0

print("Starting Training Loop...")
# 每个epoh
for epoch in range(num_epochs):
    # 数据加载器中的每个批次
    for i,data in enumerate(dataloader,0):
        ############################
        # (1) 更新 D 网络: 最大化 log(D(x)) + log(1 - D(G(z)))
        ###########################
        ## 使用所有真实样本批次训练
        netD.zero_grad()
        real_cpu = data[0].to(device)
        b_size = real_cpu.size(0)
        label = torch.full((b_size,),real_label,device=device)
        # 通过D向前传递真实批次
        output = netD(real_cpu).view(-1)
        errD_real = criterion(output,label)
        # 计算后向传递中D的梯度
        errD_real.backward()
        D_x = output.mean().item()  # 真实数据的平均标签

        #  使用所有假样本批次训练
        noise = torch.randn(b_size,nz,1,1,device=device)
        # 使用生成器G构造假图片
        fake = netG(noise)
        label.fill_(fake_label)  # REW:
        # 使用判别器分类所有的假批次样本
        output = netD(fake.detach()).view(-1) # REW:这里detach很重要，它保证了解绑fake,这样就不对生成器进行梯度更新了
        # REW:二分类的二进交叉熵损失分开来计算的trick
        errD_fake = criterion(output,label)
        # 对这个批次计算梯度
        errD_fake.backward()
        D_G_z1 = output.mean().item()
        # 把所有真样本和假样本批次的梯度加起来
        errD = errD_real + errD_fake
        # 更新判别器D
        optimizerD.step()
        # FIXME: 可以 多训练几次判别器

        ############################
        # (2) 更新 G 网络: 最大化 log(D(G(z)))
        ###########################
        netG.zero_grad()
        # REW:  最小化-log(D(G(z))) 可以用binary cross entropy
        label.fill_(real_label)  # REW:假样本的标签对于生成器成本是真的
        #因为我们之前更新了D，通过D执行所有假样本批次的正向传递 (因为执行过D与fake的计算吗？)
        output = netD(fake).view(-1)
        # 计算G的损失
        errG = criterion(output,label)
        errG.backward()
        D_G_z2 = output.mean().item()
        optimizerG.step()

        # 输出训练状态
        if i % 50 == 0:
            print('[%d/%d][%d/%d]\tLoss_D: %.4f\tLoss_G: %.4f\tD(x): %.4f\tD(G(z)): %.4f / %.4f'
                  % (epoch, num_epochs, i, len(dataloader),
                     errD.item(), errG.item(), D_x, D_G_z1, D_G_z2))
        G_losses.append(errG.item())
        D_losses.append(errD.item())

        # Check how the generator is doing by saving G's output on fixed_noise
        if (iters % 500 == 0) or ((epoch == num_epochs - 1) and (i == len(dataloader) - 1)):
            with torch.no_grad():
                fake = netG(fixed_noise).detach().cpu()
            img_list.append(vutils.make_grid(fake, padding=2, normalize=True))
        iters += 1

plt.figure(figsize=(10,5))
plt.title("Generator and Discriminator Loss During Training")
plt.plot(G_losses,label="G")
plt.plot(D_losses,label="D")
plt.xlabel("iterations")
plt.ylabel("Loss")
plt.legend()

fig = plt.figure(figsize=(8,8))
plt.axis("off")
ims = [[plt.imshow(np.transpose(i,(1,2,0)), animated=True)] for i in img_list]
ani = animation.ArtistAnimation(fig, ims, interval=1000, repeat_delay=1000, blit=True)

HTML(ani.to_jshtml())


# Real Images vs. Fake Images
# Finally, lets take a look at some real images and fake images side by side.


real_batch = next(iter(dataloader))

# Plot the real images
plt.figure(figsize=(15,15))
plt.subplot(1,2,1)
plt.axis("off")
plt.title("Real Images")
plt.imshow(np.transpose(vutils.make_grid(real_batch[0].to(device)[:64], padding=5, normalize=True).cpu(),(1,2,0)))

# Plot the fake images from the last epoch
plt.subplot(1,2,2)
plt.axis("off")
plt.title("Fake Images")
plt.imshow(np.transpose(img_list[-1],(1,2,0)))
plt.show()


