# CGAN = conditional GAN
# DCGAN = deep convolution generative adversarial nets
# THIS_WORK = CGAN + DCGAN
# AUTHOR: ZHOUYUXUAN
# ngf64, ndf64
# 0.99, 0.01
# condepth=3
# creative and suitable Discriminator
# three classes but can be globalized to other datasets with more classes
# be careful to choose the correct path to load images
# loading model checkpoints is optional


import os
import imageio
import math
import random
import numpy as np
import torch
import torch.nn as nn
from torchvision import transforms
from torch.utils.data import Dataset, DataLoader
from matplotlib import pyplot as plt
from torchvision import utils as vutils
from torch import optim
from PIL import Image

condition_dict = {"red":    [0.99, 0.01, 0.01],
                  "yellow": [0.01, 0.99, 0.01],
                  "blue":   [0.01, 0.01, 0.99],
                  }


class BirdsDataset(Dataset):
    def __init__(self, data_dir, transform=None):
        """
        :param data_dir: str, 数据集所在路径
        :param transform: torch.transform，数据预处理
        """

        self.data_info = self.get_img_info(data_dir)  # data_info存储所有图片路径和标签，在DataLoader中通过index读取样本
        self.transform = transform

    def __getitem__(self, index):
        path_img, condition = self.data_info[index]
        img = Image.open(path_img).convert('RGB')  # 0~255

        if self.transform is not None:
            img = self.transform(img)  # 在这里做transform，转为tensor等等

        return img, condition

    def __len__(self):
        return len(self.data_info)

    @staticmethod
    def get_img_info(data_dir):
        data_info = list()
        for root, dirs, files in os.walk(data_dir):
            for dir_i in dirs:
                # print('dir_i:', dir_i)
                file_names = os.listdir(os.path.join(root, dir_i))
                # print('file_names:', file_names)

                # 遍历图片
                for file_name_i in file_names:
                    file_path = os.path.join(root, dir_i, file_name_i)
                    file_label = condition_dict[dir_i]
                    data_info.append((file_path, file_label))

        # print('data_info is ', data_info)  # 打印一下data_info
        print('number of images:', len(data_info))  # 打印一下训练图片总数
        return data_info


# =============== device =======================
ngpu = 1
device = 'cuda' if (torch.cuda.is_available() and ngpu > 0) else 'cpu'
print(device)


# =============== Hyper parameters =============
def set_seed(my_seed=1):
    random.seed(my_seed)
    np.random.seed(my_seed)
    torch.manual_seed(my_seed)
    torch.cuda.manual_seed(my_seed)


set_seed(1)
MAX_EPOCH = 400
Gload_epoch = 0
Dload_epoch = 0
VALID_INTERVAL = 1
CHECKPOINT_INTERVAL = 10
BATCH_SIZE = 64
IMAGE_CHANNELS = 3
IMAGE_SIZE = 64
COND_LENGTH = len(condition_dict['red'])  # length of condition vector
NOIZ_LENGTH = 100 - len(condition_dict['red'])  # length of noise vector Z
G_FEATURE_MAPS = 64
D_FEATURE_MAPS = 64
LEARNING_RATE = 0.0002
BETA1 = 0.5
CONDEPTH = 3  # 条件feature-map张量的深度
COND_WEIGHT = 1  # 条件feature-map张量的超参数权重
G_range = 3  # G训练时固定类别遍历范围


# ================== Data ======================
# Data directory
data_dir = os.path.join('..', '..', 'data', 'bird_images')

norm_mean = [0.5, 0.5, 0.5]
norm_std = [0.5, 0.5, 0.5]

# Pre-transforms
transform = transforms.Compose([
    transforms.Resize((IMAGE_SIZE, IMAGE_SIZE)),
    transforms.ToTensor(),
    transforms.Normalize(norm_mean, norm_std),
])

# Dataset and DataLoader
dataset = BirdsDataset(data_dir=data_dir, transform=transform)
dataloader = DataLoader(dataset=dataset, batch_size=BATCH_SIZE, shuffle=True)


# Inputs Visualization
# flag = 0
flag = 1
if flag:
    plt.figure('Input images')
    plt.title('real images')
    plt.axis('off')
    input_images, input_labels = next(iter(dataloader))
    img_grid = np.transpose(vutils.make_grid(input_images, nrow=16, padding=5, normalize=True, scale_each=True),
                            (1, 2, 0))
    plt.imshow(img_grid)
    # plt.pause(3)


# Concatenate with a Noise Vector Z:
def concat2noise(condition):
    '''
    :param input condition is a tensor: size=[batch, COND_LENGTH]
    :param output x is a tensor: size=[batch, NOIZ_LENGTH + COND_LENGTH, 1, 1]
    '''
    batch = condition.shape[0]
    z = torch.randn(batch, NOIZ_LENGTH, 1, 1, device=device)
    c = condition.unsqueeze(2).unsqueeze(2).to(device)  # to 4D
    x = torch.cat([z, c], dim=1)
    return x


# ================== Net model ==================
# DCGAN + CGAN == conditional deep convolution generative adversarial nets


# initialize
def weights_init(m):
    classname = m.__class__.__name__
    if classname.find('Conv') != -1:
        nn.init.normal_(m.weight.data, 0.0, 0.02)
    elif classname.find('BatchNorm') != -1:
        nn.init.normal_(m.weight.data, 1.0, 0.02)
        nn.init.constant_(m.bias.data, 0)


# Generator
class Generator(nn.Module):
    def __init__(self):
        super(Generator, self).__init__()

        self.nc = IMAGE_CHANNELS
        self.nx = NOIZ_LENGTH + COND_LENGTH
        self.ngf = G_FEATURE_MAPS

        self.core = nn.Sequential(
            nn.ConvTranspose2d(in_channels=self.nx, out_channels=self.ngf * 8, kernel_size=4, stride=1, padding=0,
                               bias=False),
            nn.BatchNorm2d(self.ngf * 8),
            nn.ReLU(True),

            nn.ConvTranspose2d(in_channels=self.ngf * 8, out_channels=self.ngf * 4, kernel_size=4, stride=2, padding=1,
                               bias=False),
            nn.BatchNorm2d(self.ngf * 4),
            nn.ReLU(True),

            nn.ConvTranspose2d(in_channels=self.ngf * 4, out_channels=self.ngf * 2, kernel_size=4, stride=2, padding=1,
                               bias=False),
            nn.BatchNorm2d(self.ngf * 2),
            nn.ReLU(True),

            nn.ConvTranspose2d(in_channels=self.ngf * 2, out_channels=self.ngf, kernel_size=4, stride=2, padding=1,
                               bias=False),
            nn.BatchNorm2d(self.ngf),
            nn.ReLU(True),

            nn.ConvTranspose2d(in_channels=self.ngf, out_channels=self.nc, kernel_size=4, stride=2, padding=1,
                               bias=False),
            nn.Tanh(),
        )

    def forward(self, inputs):
        return self.core(inputs)


netG = Generator().to(device)
netG.apply(weights_init)


# print(netG)


# Discriminator
class Discriminator(nn.Module):
    def __init__(self):
        super(Discriminator, self).__init__()

        self.nc = IMAGE_CHANNELS
        self.ndf = D_FEATURE_MAPS
        self.condlen = COND_LENGTH
        self.condepth = CONDEPTH
        self.cond_w = COND_WEIGHT

        self.core1 = nn.Sequential(
            nn.Conv2d(self.nc, self.ndf, 4, 2, 1, bias=False),  # don't BN at D's input layer
            nn.LeakyReLU(0.2, inplace=True),

            nn.Conv2d(self.ndf, self.ndf * 2, 4, 2, 1, bias=False),
            nn.BatchNorm2d(self.ndf * 2),
            nn.LeakyReLU(0.2, inplace=True),

            nn.Conv2d(self.ndf * 2, self.ndf * 4, 4, 2, 1, bias=False),
            nn.BatchNorm2d(self.ndf * 4),
            nn.LeakyReLU(0.2, inplace=True),

            nn.Conv2d(self.ndf * 4, self.ndf * 8, 4, 2, 1, bias=False),
            nn.BatchNorm2d(self.ndf * 8),
            nn.LeakyReLU(0.2, inplace=True),
            # tensor: (ndf*8) x 4 x 4
        )

        self.core2 = nn.Sequential(
            nn.ConvTranspose2d(self.condlen, self.condepth, 4, 1, 0, bias=False),  # equal to fc
            nn.ReLU(),  # rectification
            # tensor: (CONDEPTH) x 4 x 4
        )

        self.core3 = nn.Sequential(
            nn.Conv2d(self.ndf * 8 + self.condepth, self.ndf * 8, 1, 1, 0, bias=False),  # 1x1 kernel
            nn.BatchNorm2d(self.ndf * 8),  # BN is performed on all convolution layers
            nn.ReLU(),  # rectification
            nn.Conv2d(self.ndf * 8, 1, 4, 1, 0, bias=False),  # 4x4 kernel
            nn.Sigmoid(),  # real and match score
        )

    def forward(self, x):
        '''
        :param x is a tuple: x[0] is image_tensor, size=[B, C, H, W]
                             x[1] is condition_tensor, size=[batch, COND_LENGTH]
        '''
        h1 = self.core1(x[0])
        xc = x[1].unsqueeze(2).unsqueeze(2)
        h2 = self.core2(xc)
        h3 = torch.cat([h1, h2 * self.cond_w], dim=1)  # concatenation  # hyper weights or not?
        score = self.core3(h3)
        return score


netD = Discriminator().to(device)
netD.apply(weights_init)


# print(netD)


# ================== Optimizer ==================
optimizerD = optim.Adam(netD.parameters(), lr=LEARNING_RATE, betas=(BETA1, 0.999))
optimizerG = optim.Adam(netG.parameters(), lr=LEARNING_RATE, betas=(BETA1, 0.999))


# ================== Object Function ============
HIGH_SCORE, LOW_SCORE = 0.99, 0.01
criterion = nn.BCELoss()


# ================== Train ======================
def lets_valid(num=4, cond=None):
    '''
    :param num: <int>
    :param cond: <string> "red" "yellow" "blue"
    :return: <tuple> (images, cond)
    '''
    valid_condition = torch.zeros(num, COND_LENGTH, dtype=torch.float, device=device)
    if cond is None:
        j = int(np.random.randint(3, size=(1,)))
        cond_dict = {0: "red", 1: "yellow", 2: "blue"}
        cond = cond_dict[j]
    valid_condition[:, ...] = torch.tensor(condition_dict[cond], dtype=torch.float, device=device)
    x = concat2noise(valid_condition)  # already to device
    generated_img = netG(x)
    score = netD((generated_img.detach(), valid_condition)).view(-1)
    sc = score.mean().item()
    print('test condition: {}, score: {}'.format(cond, sc))
    img_grid = vutils.make_grid(generated_img.detach().cpu(), nrow=math.ceil(math.sqrt(num)), padding=2, normalize=True,
                                scale_each=True)
    images = np.transpose(img_grid, (1, 2, 0))
    return images, cond


# Load state dict #######################################################
last_epoch = 0

# flag = 0
flag = 1 if Gload_epoch != 0 else 0
if flag:
    # load D
    path_checkpointD = "./checkpointD_{}_epoch_cgan_bird.pkl".format(Dload_epoch)
    checkpointD = torch.load(path_checkpointD)
    netD.load_state_dict(checkpointD['model_state_dict'])
    optimizerD.load_state_dict(checkpointD['optimizer_state_dict'])

    # load G
    path_checkpointG = "./checkpointG_{}_epoch_cgan_bird.pkl".format(Gload_epoch)
    checkpointG = torch.load(path_checkpointG)
    netG.load_state_dict(checkpointG['model_state_dict'])
    optimizerG.load_state_dict(checkpointG['optimizer_state_dict'])

    last_epoch = checkpointG['epoch']
    print('state dicts loaded, last_epoch={}'.format(last_epoch))

print('start training loop...')
img_list = list()
DLoss_curve = list()
GLoss_curve = list()
D_rm = list()
D_fm1 = list()
D_rd = list()
D_fm2 = list()

for epoch in range(last_epoch + 1, MAX_EPOCH + 1):
    for iteration, data in enumerate(dataloader):  # for each batch
        real_img, match_cond = data
        this_batch_size = len(real_img)
        match_cond = torch.transpose(torch.stack(match_cond, dim=0), dim0=0, dim1=1)
        real_img = real_img.to(device)
        match_cond = match_cond.to(torch.float).to(device)

        High_scores = torch.full((this_batch_size,), HIGH_SCORE, device=device)
        Low_scores = torch.full((this_batch_size,), LOW_SCORE, device=device)

        #################################### train D ######################################
        optimizerD.zero_grad()

        # train real match
        score = netD((real_img, match_cond)).view(-1)
        DLoss_1 = criterion(score, High_scores)
        DLoss_1.backward(retain_graph=True)
        score_real_match = score.mean().item()

        # train fake match
        x = concat2noise(match_cond)  # already to device
        fake_img = netG(x)
        score = netD((fake_img.detach(), match_cond)).view(-1)  # don't need G's gradient
        DLoss_2 = criterion(score, Low_scores)
        DLoss_2.backward(retain_graph=True)
        score_fake_match = score.mean().item()

        # forward real but dismatch
        # 混淆条件，01取反
        dismatch_cond = torch.ones_like(match_cond, dtype=torch.float, device=device) - match_cond
        score = netD((real_img, dismatch_cond)).view(-1)
        DLoss_3 = criterion(score, Low_scores)
        DLoss_3.backward(retain_graph=True)
        score_real_dismatch = score.mean().item()

        DLoss = DLoss_1 + DLoss_2 + DLoss_3

        # update D
        optimizerD.step()

        ##################################### train G #####################################
        optimizerG.zero_grad()
        for gi in range(G_range):
            # train G using every condition
            train_condition = torch.zeros(this_batch_size, COND_LENGTH, dtype=torch.float, device=device)
            cond_dict = {0: "red", 1: "yellow", 2: "blue"}
            cond = cond_dict[gi]
            train_condition[:, ...] = torch.tensor(condition_dict[cond], dtype=torch.float, device=device)
            x = concat2noise(train_condition)  # already to device
            fake_img = netG(x)
            score = netD((fake_img, train_condition)).view(-1)
            High_scores = torch.full((this_batch_size,), HIGH_SCORE, device=device)
            GLoss = criterion(score, High_scores)
            GLoss.backward(retain_graph=True)
            if gi == G_range - 1:
                score_generated = score.mean().item()

        # update G
        optimizerG.step()

        # print and record epoch info
        print("epoch[%d/%d]\titer[%d/%d]\tDLoss:%.4f\tGLoss:%.4f\tDrm:%.4f\tDfm:%.4f\tDrd:%.4f\tD_G:%.4f\tG_range:%d"
              % (epoch, MAX_EPOCH, iteration + 1, len(dataloader),
                 DLoss.item(), GLoss.item(),
                 score_real_match, score_fake_match, score_real_dismatch, score_generated, G_range))
        DLoss_curve.append(DLoss.item())
        GLoss_curve.append(GLoss.item())
        D_rm.append(score_real_match)
        D_fm1.append(score_fake_match)
        D_rd.append(score_real_dismatch)
        D_fm2.append(score_generated)

    # if (epoch % 3 == 0) and (G_range > 1):
    #     G_range = G_range - 1

    # Valid the nets
    if (epoch % VALID_INTERVAL == 0) or (epoch == MAX_EPOCH):
        with torch.no_grad():
            images1, cond1 = lets_valid(num=25, cond='red')
            images2, cond2 = lets_valid(num=25, cond='yellow')
            images3, cond3 = lets_valid(num=25, cond='blue')

            plt.figure(figsize=(20, 10))

            plt.subplot(1, 3, 1)
            plt.imshow(images1)
            plt.axis('off')
            plt.title('{}_epoch{}'.format(cond1, epoch))

            plt.subplot(1, 3, 2)
            plt.imshow(images2)
            plt.axis('off')
            plt.title('{}_epoch{}'.format(cond2, epoch))

            plt.subplot(1, 3, 3)
            plt.imshow(images3)
            plt.axis('off')
            plt.title('{}_epoch{}'.format(cond3, epoch))

            plt.savefig('{}_epoch.png'.format(epoch))

    # Save state dict #################################################
    if (epoch % CHECKPOINT_INTERVAL == 0) or (epoch == MAX_EPOCH):
        # save D
        checkpointD = {"model_state_dict": netD.state_dict(),
                       "optimizer_state_dict": optimizerD.state_dict(),
                       "epoch": epoch}
        path_checkpointD = "./checkpointD_{}_epoch_cgan_bird.pkl".format(epoch)
        torch.save(checkpointD, path_checkpointD)

        # save G
        checkpointG = {"model_state_dict": netG.state_dict(),
                       "optimizer_state_dict": optimizerG.state_dict(),
                       "epoch": epoch}
        path_checkpointG = "./checkpointG_{}_epoch_cgan_bird.pkl".format(epoch)
        torch.save(checkpointG, path_checkpointG)
        print('state dict epoch={} saved'.format(epoch))


# ================== Test =======================
# draw loss and score curves
plt.figure(figsize=(20, 10))
plt.subplot(2, 1, 1)
plt.title('Loss curve')
xx = range(last_epoch + 1, last_epoch + len(DLoss_curve) + 1)
plt.plot(DLoss_curve, label='DLoss')
plt.plot(GLoss_curve, label='GLoss')
plt.legend()
plt.xlabel('epoch')
plt.ylabel('Loss value')

plt.subplot(2, 1, 2)
plt.title('score curve')
# xx = range(last_epoch+1, last_epoch+len(DLoss_curve)+1)
plt.plot(xx, D_rm, label='real-match')
plt.plot(xx, D_fm1, label='fake-match')
plt.plot(xx, D_rd, label='real-dismatch')
plt.plot(xx, D_fm2, label='generated')
plt.legend()
plt.xlabel('epoch')
plt.ylabel('score')

plt.savefig('loss_and_score_curves.png')


# save gif
print('saving gif...')
imgs_epoch = [int(name.split("_")[0]) for name in list(filter(lambda x: x.endswith("epoch.png"), os.listdir('.')))]
imgs_epoch = sorted(imgs_epoch)
imgs = list()
for i in range(len(imgs_epoch)):
    img_name = "{}_epoch.png".format(imgs_epoch[i])
    imgs.append(imageio.imread(img_name))
imageio.mimsave("generation_animation.gif", imgs, fps=2)

print("done")
