#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @File  : 12.使用PyTorch搭建对抗生成网络.py
# @Author: Richard Chiming Xu
# @Date  : 2021/11/18
# @Desc  :

import random
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.utils.data
import torchvision.transforms as transforms
import torchvision.datasets as dset
import torchvision.utils as vutils
import numpy as np
import matplotlib.pyplot as plt
from GanNetwork import *
import os

from PIL import Image
import pandas as pd
from torch.utils.data.dataset import Dataset

os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"

config = {
    'BATCH_SIZE': 64,  # batch 大小
    'LEARNING_RATE': 0.0003,  # 学习率
    'EPOCHS': 100,  # 训练次数
    'IMAGE_SIZE': 64,  # 压缩图片的尺寸
    'NC': 3,  # 鉴别器输入，对应图片3原色
    'NZ': 100,  # z向量大小，生成器的输入
    'NGF': 64,  # 生成器基础大小
    'NDF': 64,  # 生成器基础大小
    'BETA': 0.5,  # 优化器超参
    'RANDOM_SEED': 2021
}

# 设定随机因子
print("Random Seed: ", config['RANDOM_SEED'])
random.seed(config['RANDOM_SEED'])
torch.manual_seed(config['RANDOM_SEED'])
# 检查GPU
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

'''
    读取数据
'''
# # 读取自定义图片的，案例用经典的卡通头像
# dataset = dset.ImageFolder(root='data/kt_face/',
#                            transform=transforms.Compose([
#                                transforms.Resize(config['IMAGE_SIZE']),
#                                transforms.CenterCrop(config['IMAGE_SIZE']),
#                                transforms.ToTensor(),
#                                transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
#                            ]))
# # Create the dataloader
# dataloader = torch.utils.data.DataLoader(dataset, batch_size=config['BATCH_SIZE'], shuffle=True)


# 读取作业要求的图片
train_img = np.load('data/face/train.npy')
train_df = pd.read_csv('data/face/train.csv')
torch.backends.cudnn.benchmark = False
# 判定是否使用GPU计算
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")


# 自定义dataset
class XunFeiDataset(Dataset):
    def __init__(self, img, batch_size, transform=None):
        self.img = img
        self.transform = transform
        self.batch_size = batch_size

    def __getitem__(self, index):
        img = Image.fromarray(self.img[:, :, index]).convert('RGB')

        if self.transform is not None:
            img = self.transform(img)
        # 由于不需要标签，模拟默认全部为0
        return img, torch.zeros(self.batch_size)

    def __len__(self):
        return self.img.shape[-1]


# 生成dataloader
dataloader = torch.utils.data.DataLoader(
    XunFeiDataset(train_img[:, :, :-500], config['BATCH_SIZE'], transforms.Compose([
        transforms.Resize(config['IMAGE_SIZE']),
        transforms.CenterCrop(config['IMAGE_SIZE']),
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
    ])
                  ), batch_size=config['BATCH_SIZE'], shuffle=True
)

'''
    创建模型，定义优化器和损失函数
'''
# 创建生成器
netG = Generator(config['NC'], config['NZ'], config['NGF']).to(device)
netG.apply(weights_init)
print(netG)

# 创建鉴别器
netD = Discriminator(config['NC'], config['NDF']).to(device)
netD.apply(weights_init)
print(netD)

# 初始化损失函数
criterion = nn.BCELoss()

# 定义生成器的噪声
fixed_noise = torch.randn(64, config['NZ'], 1, 1, device=device)

# 初始化正确与错误的结果，提供给鉴别器
real_label = 1
fake_label = 0

# 定义优化器
optimizerD = optim.Adam(netD.parameters(), lr=config['LEARNING_RATE'], betas=(config['BETA'], 0.999))
optimizerG = optim.Adam(netG.parameters(), lr=config['LEARNING_RATE'], betas=(config['BETA'], 0.999))

# 缓存数据变量
img_list = []
G_losses = []
D_losses = []
iters = 0

'''
    训练模型
'''
print("初始化完毕看是训练...")
for epoch in range(config['EPOCHS']):
    import time

    start = time.time()
    # 便利dataloader
    for i, data in enumerate(dataloader, 0):

        ############################
        # (1) 更新鉴别器网络: maximize log(D(x)) + log(1 - D(G(z)))
        ###########################
        ## 训练所有真是数据
        netD.zero_grad()
        real_cpu = data[0].to(device)
        b_size = real_cpu.size(0)
        # 为真实数据填充真标签
        label = torch.full((b_size,), real_label, device=device, dtype=torch.float32)
        # 传入网络
        output = netD(real_cpu).view(-1)
        # 计算loss
        errD_real = criterion(output, label)
        # 反向传播计算梯度
        errD_real.backward()
        D_x = output.mean().item()

        ## 训练所有虚假数据，这里很具大小随机生成噪声数据
        noise = torch.randn(b_size, config['NZ'], 1, 1, device=device, dtype=torch.float32)
        # 传入生成器
        fake = netG(noise)
        # 填充假标签
        label.fill_(fake_label)
        # 基于生成器生成的数据传入鉴别器
        output = netD(fake.detach()).view(-1)
        # 假标签与鉴别器结果计算loss
        errD_fake = criterion(output, label)
        # 反向传播计算梯度
        errD_fake.backward()
        D_G_z1 = output.mean().item()
        # 叠加增加loss
        errD = errD_real + errD_fake
        # 更新鉴别器网络
        optimizerD.step()

        ############################
        # (2) 更新生成器: maximize log(D(G(z)))
        ###########################
        netG.zero_grad()
        # 重新填充真实标签
        label.fill_(real_label)
        # 将生成器通过噪音生成的图片传入鉴别器
        output = netD(fake).view(-1)
        # 假设都为真实数据，与鉴别器的结果做对比求loss
        errG = criterion(output, label)
        # 计算梯度
        errG.backward()
        D_G_z2 = output.mean().item()
        # 更新完网络
        optimizerG.step()

        # 打印
        if i % 50 == 0:
            print('[%d/%d][%d/%d]\tLoss_D: %.4f\tLoss_G: %.4f\tD(x): %.4f\tD(G(z)): %.4f / %.4f'
                  % (epoch, config['EPOCHS'], i, len(dataloader),
                     errD.item(), errG.item(), D_x, D_G_z1, D_G_z2))

        # 记录历史数据
        G_losses.append(errG.item())
        D_losses.append(errD.item())

        # 定时打印生成
        if (iters % 20 == 0) or ((epoch == config['EPOCHS'] - 1) and (i == len(dataloader) - 1)):
            with torch.no_grad():
                fake = netG(fixed_noise).detach().cpu()

            img_list.append(vutils.make_grid(fake, padding=2, normalize=True))
            i = vutils.make_grid(fake, padding=2, normalize=True)
            fig = plt.figure(figsize=(8, 8))
            plt.imshow(np.transpose(i, (1, 2, 0)))
            plt.axis('off')  # 关闭坐标轴
            plt.savefig("./out/%d_%d.png" % (epoch, iters))
            plt.close(fig)
        iters += 1
    print('time:', time.time() - start)
