import os
import os.path as osp
import shutil
import datetime
import random
import imageio
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision.transforms as transforms
import torchvision.utils as vutils
from PIL import Image
from torch.utils.data import Dataset, DataLoader
from matplotlib import pyplot as plt
import matplotlib
matplotlib.use('Qt5Agg')


# ============= PATH CONFIG ==============
BASEDIR = osp.dirname(osp.abspath(__file__))
dataroot = osp.join(BASEDIR, 'GH_selected', 'G')
print("dataroot:", dataroot)

runtime = datetime.datetime.now()
runtime_stamp = str(runtime).replace(' ', '_').replace(':', '_').replace('.', '_')

RUNS_DIR = osp.join(BASEDIR, 'runs', runtime_stamp)

CKPT_DIR = osp.join(RUNS_DIR, 'checkpoints')
GEN_IMG_DIR = osp.join(RUNS_DIR, 'generated_imgs')
LOG_DIR = osp.join(RUNS_DIR, 'logs')
for DIR in [CKPT_DIR, GEN_IMG_DIR, LOG_DIR]:
    if not osp.exists(DIR):
        os.makedirs(DIR)
LOGFILENAME = 'training_log.txt'
PATH_LOGFILE = osp.join(LOG_DIR, LOGFILENAME)

# ============= Hyper Parameters =================
MAX_EPOCH = 20
LOAD_PATH_G = 'runs/2021-08-13_16_58_06_444667/checkpoints/ckpt_G_ep2.pkl'
LOAD_PATH_D = 'runs/2021-08-13_16_58_06_444667/checkpoints/ckpt_D_ep2.pkl'
VALID_INTERVAL = 1
CHECKPOINT_INTERVAL = 100
BATCH_SIZE = 64
IMG_SIZE = 64
nc = 3           # Number of channels
nz = 100         # Size of z latent vector (i.e. size of generator input)
ngf = 64         # Size of feature maps in generator
ndf = 64         # Size of feature maps in discriminator
G_LR = 0.0002    # Learning rate for G
D_LR = 0.0001    # Learning rate for D
G_beta1 = 0.5    # Beta1 for Adam on G
D_beta1 = 0.5    # Beta1 for Adam on D
G_range = 2      # diligence of G
D_range = 1      # diligence of D
G_range_DecaySTEP = 3
D_range_DecaySTEP = 3
real_label = 0.999
fake_label = 0.001

with open(PATH_LOGFILE, 'w') as fp:
    fp.write(f"MAX_EPOCH={MAX_EPOCH}\n"
             f"LOAD_PATH_G={LOAD_PATH_G}\n"
             f"LOAD_PATH_D={LOAD_PATH_D}\n"
             f"VALID_INTERVAL={VALID_INTERVAL}\n"
             f"CHECKPOINT_INTERVAL={CHECKPOINT_INTERVAL}\n"
             f"BATCH_SIZE={BATCH_SIZE}\n"
             f"IMG_SIZE={IMG_SIZE}\n"
             f"nc={nc}  # Number of channels\n"
             f"nz={nz}  # Size of z latent vector (i.e. size of generator input)\n"
             f"ngf={ngf}  # Size of feature maps in generator\n"
             f"ndf={ndf}  # Size of feature maps in discriminator\n"
             f"G_LR={G_LR}\n"
             f"D_LR={D_LR}\n"
             f"G_beta1={G_beta1}  # Beta1 G hyper_param for Adam optimizers\n"
             f"D_beta1={D_beta1}  # Beta1 D hyper_param for Adam optimizers\n"
             f"G_range={G_range}  # diligence of G\n"
             f"D_range={D_range}  # diligence of D\n"
             f"G_range_DecaySTEP={G_range_DecaySTEP}\n"  
             f"D_range_DecaySTEP={D_range_DecaySTEP}\n"
             f"real_label={real_label}\n"
             f"fake_label={fake_label}\n"
             f"dataroot={dataroot}\n"
             )

# ============= seed ==============
def set_seed(my_seed=1):
    random.seed(my_seed)
    np.random.seed(my_seed)
    torch.manual_seed(my_seed)
    torch.cuda.manual_seed(my_seed)


manualSeed = 0
# manualSeed = random.randint(1, 10000)  # use this if want new results
print("Random Seed:", manualSeed)
set_seed(manualSeed)


# ============= device ============
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")


class BirdDataset(Dataset):
    def __init__(self, data_dir, transform=None):
        self.data_info = self.get_img_info(data_dir)  # data_info存储所有图片路径和标签，在DataLoader中通过index读取样本
        self.transform = transform

    def __getitem__(self, index):
        path_img, label = self.data_info[index]
        img = Image.open(path_img).convert('RGB')     # 0~255
        if self.transform is not None:
            img = self.transform(img)   # 在这里做transform，转为tensor等等
        return img, label

    def __len__(self):
        return len(self.data_info)

    @staticmethod
    def get_img_info(data_dir):
        data_info = list()
        
        # for root, dirs, files in os.walk(data_dir):
        #     for dir_i in dirs:
        #         img_names = os.listdir(osp.join(root, dir_i))
        #         for img_name in img_names:
        #             path_img = osp.join(root, dir_i, img_name)
        #             data_info.append((path_img, 1))
        
        for root, _, files in os.walk(data_dir):
            imgnames = [name for name in files if name.endswith('.png')]
            for imgname in imgnames:
                path_img = osp.join(root, imgname)
                data_info.append((path_img, 1))
            
        # print('data_info is ', data_info)  # 打印一下data_info
        print("train img num:", len(data_info))
        return data_info


# =========== data ============
transform = transforms.Compose([
            transforms.Resize((IMG_SIZE, IMG_SIZE)),
            # transforms.CenterCrop(IMG_SIZE),
            transforms.ToTensor(),
            transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
            ])

fixed_noise = torch.randn(BATCH_SIZE, nz, 1, 1, device=device)
dataset = BirdDataset(data_dir=dataroot, transform=transform)
dataloader = DataLoader(dataset, batch_size=BATCH_SIZE, shuffle=True)

# === === === Plot some training images
flag = 0
# flag = 1
if flag:
    real_batch = next(iter(dataloader))
    plt.figure('Real Images')
    plt.axis("off")
    plt.title("Real Images")
    plt.imshow(np.transpose(vutils.make_grid(real_batch[0].to(device)[:64], padding=2, nrow=8, normalize=True).cpu(), (1, 2, 0)))


# ============= model ==============
def weights_init(m):
    classname = m.__class__.__name__
    if classname.find('Conv') != -1:
        nn.init.normal_(m.weight.data, 0.0, 0.02)
    elif classname.find('BatchNorm') != -1:
        nn.init.normal_(m.weight.data, 1.0, 0.02)
        nn.init.constant_(m.bias.data, 0)


# Generator
class Generator(nn.Module):
    def __init__(self):
        super(Generator, self).__init__()
        self.main = nn.Sequential(
            # input is Z, going into a convolution
            nn.ConvTranspose2d(nz, ngf * 8, 4, 1, 0, bias=False),
            nn.BatchNorm2d(ngf * 8),
            nn.ReLU(True),
            # state size. (ngf*8) x 4 x 4
            nn.ConvTranspose2d(ngf * 8, ngf * 4, 4, 2, 1, bias=False),
            nn.BatchNorm2d(ngf * 4),
            nn.ReLU(True),
            # state size. (ngf*4) x 8 x 8
            nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=False),
            nn.BatchNorm2d(ngf * 2),
            nn.ReLU(True),
            # state size. (ngf*2) x 16 x 16
            nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 1, bias=False),
            nn.BatchNorm2d(ngf),
            nn.ReLU(True),
            # state size. (ngf) x 32 x 32
            nn.ConvTranspose2d(ngf, nc, 4, 2, 1, bias=False),
            nn.Tanh()
            # state size. (nc) x 64 x 64
        )

    def forward(self, inputs):
        return self.main(inputs)


# Create the generator
netG = Generator().to(device)
netG.apply(weights_init)
# print(netG)


# Discriminator
class Discriminator(nn.Module):
    def __init__(self):
        super(Discriminator, self).__init__()
        self.main = nn.Sequential(
            # input is (nc) x 64 x 64
            nn.Conv2d(nc, ndf, 4, 2, 1, bias=False),
            nn.LeakyReLU(0.2, inplace=True),
            # state size. (ndf) x 32 x 32
            nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False),
            nn.BatchNorm2d(ndf * 2),
            nn.LeakyReLU(0.2, inplace=True),
            # state size. (ndf*2) x 16 x 16
            nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False),
            nn.BatchNorm2d(ndf * 4),
            nn.LeakyReLU(0.2, inplace=True),
            # state size. (ndf*4) x 8 x 8
            nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=False),
            nn.BatchNorm2d(ndf * 8),
            nn.LeakyReLU(0.2, inplace=True),
            # state size. (ndf*8) x 4 x 4
            nn.Conv2d(ndf * 8, 1, 4, 1, 0, bias=False),
            nn.Sigmoid()
        )

    def forward(self, inputs):
        return self.main(inputs)


# Create the Discriminator
netD = Discriminator().to(device)
netD.apply(weights_init)
# print(netD)


# =================== Loss ========================
criterion = nn.BCELoss()


# ================= optimizer =====================
optimizerG = optim.Adam(netG.parameters(), lr=G_LR, betas=(G_beta1, 0.999))
optimizerD = optim.Adam(netD.parameters(), lr=D_LR, betas=(D_beta1, 0.999))


# ================= load state dict ===============
G_last_epoch = 0
if LOAD_PATH_G is not None:
    path_checkpointG = LOAD_PATH_G
    checkpointG = torch.load(path_checkpointG)
    netG.load_state_dict(checkpointG['model_state_dict'])
    optimizerG.load_state_dict(checkpointG['optimizer_state_dict'])
    print(f"{LOAD_PATH_G} loaded.")
    G_last_epoch = checkpointG['epoch']
print(f"G_last_epoch: {G_last_epoch}")

D_last_epoch = 0
if LOAD_PATH_D is not None:
    path_checkpointD = LOAD_PATH_D
    checkpointD = torch.load(path_checkpointD)
    netD.load_state_dict(checkpointD['model_state_dict'])
    optimizerD.load_state_dict(checkpointD['optimizer_state_dict'])
    print(f"{LOAD_PATH_D} loaded.")
    D_last_epoch = checkpointD['epoch']
print(f"D_last_epoch: {D_last_epoch}")

# =================== train =======================
# Lists to keep track of progress
img_list = []
G_losses = []
D_losses = []
D_real = []
D_fake = []

print("Starting Training Loop...")
for epoch in range(G_last_epoch+1, MAX_EPOCH+1):
    for i, data in enumerate(dataloader, 0):  # For each batch
        real_pic = data[0].to(device)
        this_batch_size = real_pic.size(0)

        ############################
        # (1) Update D network: minimize: - log(D(x)) - log(1 - D(G(z)))
        ###########################
        for d_i in range(D_range):
            netD.zero_grad()

            # ===== Train with all-real batch
            output = netD(real_pic).view(-1)
            label = torch.full((this_batch_size,), real_label, device=device)
            errD_real = criterion(output, label)
            errD_real.backward(retain_graph=True)
            if d_i == D_range - 1:
                D_x = output.mean().item()

            # ===== Train with all-fake batch
            noise = torch.randn(this_batch_size, nz, 1, 1, device=device)
            fake = netG(noise)
            output = netD(fake.detach()).view(-1)  # 此时不训练G，因此不需要G的梯度
            label = torch.full((this_batch_size,), fake_label, device=device)
            errD_fake = criterion(output, label)
            errD_fake.backward(retain_graph=True)
            if d_i == D_range - 1:
                D_G_z1 = output.mean().item()

            # Update D
            optimizerD.step()

            # do statistics
            if d_i == D_range - 1:
                errD = errD_real + errD_fake
                D_real.append(D_x)
                D_losses.append(errD.item())  # Save Losses for plotting later

        ############################
        # (2) Update G network: minimize log(1 - D(G(z)))
        ###########################
        for g_i in range(G_range):
            netG.zero_grad()

            noise = torch.randn(this_batch_size, nz, 1, 1, device=device)
            fake = netG(noise)
            label = torch.full((this_batch_size,), real_label, device=device)  # as for generator, we want high score
            output = netD(fake).view(-1)
            errG = criterion(output, label)
            errG.backward(retain_graph=True)

            # Update G
            optimizerG.step()

            # do statistics
            if g_i == G_range - 1:
                D_G_z2 = output.mean().item()
                D_fake.append(D_G_z2)
                G_losses.append(errG.item())  # Save Losses for plotting later

        # print iteration info
        str_info = 'epoch[%d/%d]\titer[%d/%d]\tDLoss:%.4f\tGLoss:%.4f\tD(x):%.4f\tD(G(z)):%.4f/%.4f\tD_range:%d G_range:%d D_LR:%e G_LR:%e' % (epoch, MAX_EPOCH, i+1, len(dataloader), errD.item(), errG.item(), D_x, D_G_z1, D_G_z2, D_range, G_range, optimizerD.state_dict()['param_groups'][0]['lr'], optimizerG.state_dict()['param_groups'][0]['lr'])
        print(str_info)
        with open(PATH_LOGFILE, 'a') as fp:
            fp.write(str_info)
            fp.write('\n')

    # update G_range
    if (epoch % G_range_DecaySTEP == 0) and (G_range > 1):
        G_range = G_range - 1

    # update D_range
    if (epoch % D_range_DecaySTEP == 0) and (D_range > 1):
        D_range = D_range - 1

    # Check how the generator is doing by saving G's output on fixed_noise
    if (epoch % VALID_INTERVAL == 0) or (epoch == MAX_EPOCH):
        with torch.no_grad():
            fake = netG(fixed_noise).detach().cpu()
            img_grid = np.transpose(vutils.make_grid(fake, padding=1, nrow=8, normalize=True), (1, 2, 0))
            plt.figure(figsize=(10, 10))
            plt.imshow(img_grid)
            plt.title("Epoch:{}".format(epoch))
            plt.axis('off')
            plt.savefig(osp.join(GEN_IMG_DIR, f"{epoch}_epoch.png"))

    # save state dict
    if (epoch % CHECKPOINT_INTERVAL == 0) or (epoch == MAX_EPOCH):
        checkpointG = {"model_state_dict": netG.state_dict(),
                       "optimizer_state_dict": optimizerG.state_dict(),
                       "epoch": epoch}
        path_checkpointG = osp.join(CKPT_DIR, f"ckpt_G_ep{epoch}.pkl")
        torch.save(checkpointG, path_checkpointG)

        checkpointD = {"model_state_dict": netD.state_dict(),
                       "optimizer_state_dict": optimizerD.state_dict(),
                       "epoch": epoch}
        path_checkpointD = osp.join(CKPT_DIR, f"ckpt_D_ep{epoch}.pkl")
        torch.save(checkpointD, path_checkpointD)


# ===================== result visualization =====================
# loss curve
plt.figure('D_Loss and G_Loss', figsize=(20, 10))
plt.subplot(2, 1, 1)
plt.title("Generator and Discriminator Loss During Training")
plt.plot(G_losses, label="G_loss")
plt.plot(D_losses, label="D_loss")
plt.xlabel("iterations")
plt.ylabel("Loss")
plt.legend()

# output probability
plt.subplot(2, 1, 2)
plt.title("output probability for real and fake images")
plt.plot(D_real, label="D_real")
plt.plot(D_fake, label="D_fake")
plt.xlabel("iterations")
plt.ylabel("Probability")
plt.legend()
plt.savefig(osp.join(LOG_DIR, 'loss and score curves.tif'))

# save gif
print('saving gif...')
imgs_epoch = [int(name.split("_")[0]) for name in list(filter(lambda x: x.endswith("epoch.png"), os.listdir(GEN_IMG_DIR)))]
imgs_epoch = sorted(imgs_epoch)
imgs = list()
for i in range(len(imgs_epoch)):
    img_name = "{}_epoch.png".format(imgs_epoch[i])
    path_img = osp.join(GEN_IMG_DIR, img_name)
    imgs.append(imageio.imread(path_img))
imageio.mimsave(osp.join(GEN_IMG_DIR, "generation_animation.gif"), imgs, fps=2)

print("done")
