import glob

import torch
import torch.nn as nn
import time
import os

from torch.backends import cudnn
from torch.optim.lr_scheduler import StepLR
from torch.utils.tensorboard import SummaryWriter
from torchvision.models import vgg19
from tqdm import tqdm

from data.DataHelper import get_train_loader, get_evaluation_loader, toNormalPic
from main import get_config
from models.model import Generator, Discriminator
import utils.visual_utils as vutils
from utils.warp import genPerturbations, vec2mtrx, transformImage

vgg_activation = dict()


def get_activation(name):
    def hook(model, input, output):
        vgg_activation[name] = output.detach()

    return hook


class BootHelper:
    def __init__(self, config, train_loader, test_loader):
        self.train_loader = train_loader
        self.test_loader = test_loader
        # init params
        self.best_fid = 999
        self.config = config
        self.epoch = config['TRAINING_CONFIG']['EPOCH']
        self.img_size = config['MODEL_CONFIG']['IMAGE_SIZE']
        self.g_lr = float(config['TRAINING_CONFIG']['G_LR'])
        self.d_lr = float(config['TRAINING_CONFIG']['D_LR'])
        self.mode = config['MODEL_CONFIG']['MODE']
        self.train = self.mode == 'train'
        self.batch_size = config['TRAINING_CONFIG']['BATCH_SIZE']
        self.lambda_g_fake = config['TRAINING_CONFIG']['LAMBDA_G_FAKE']
        self.lambda_g_recon = config['TRAINING_CONFIG']['LAMBDA_G_RECON']
        self.lambda_g_style = config['TRAINING_CONFIG']['LAMBDA_G_SYTLE']
        self.lambda_g_percep = config['TRAINING_CONFIG']['LAMBDA_G_PERCEP']
        self.lambda_d_fake = config['TRAINING_CONFIG']['LAMBDA_D_FAKE']
        self.lambda_d_real = config['TRAINING_CONFIG']['LAMBDA_D_REAL']
        self.lambda_d_gp = config['TRAINING_CONFIG']['LAMBDA_GP']
        self.d_critic = config['TRAINING_CONFIG']['D_CRITIC']
        self.g_critic = config['TRAINING_CONFIG']['G_CRITIC']
        self.mse_loss = nn.MSELoss()
        self.triplet = config['TRAINING_CONFIG']['TRIPLE_LOSS'] == 'True'
        self.gan_loss = config['TRAINING_CONFIG']['GAN_LOSS']
        assert self.gan_loss in ['lsgan', 'wgan', 'vanilla','hinge']

        if self.triplet:
            self.triplet_loss = nn.TripletMarginLoss(margin=config['TRAINING_CONFIG']['LAMBDA_TR_MARGIN'])
            self.lambda_triplet = config['TRAINING_CONFIG']['LAMBDA_TR']
        else:
            self.lambda_triplet = 0

        self.optim = config['TRAINING_CONFIG']['OPTIM']
        self.beta1 = config['TRAINING_CONFIG']['BETA1']
        self.beta2 = config['TRAINING_CONFIG']['BETA2']

        self.schedule = config['TRAINING_CONFIG']['SHCEDULER']
        self.step_sie = config['TRAINING_CONFIG']['STEP_SIZE']
        self.gamma = config['TRAINING_CONFIG']['GAMMA']

        if self.gan_loss == 'lsgan':
            self.adversarial_loss = torch.nn.MSELoss()
        elif self.gan_loss == 'vanilla':
            self.adversarial_loss = torch.nn.BCELoss()
        self.l1_loss = torch.nn.L1Loss()

        self.cpu_seed = config['TRAINING_CONFIG']['CPU_SEED']
        self.gpu_seed = config['TRAINING_CONFIG']['GPU_SEED']

        self.g_spec = config['TRAINING_CONFIG']['G_SPEC'] == 'True'
        self.d_spec = config['TRAINING_CONFIG']['D_SPEC'] == 'True'

        self.device = config['MODEL_CONFIG']['DEVICE']
        self.use_tensorboard = config['TRAINING_CONFIG']['USE_TENSORBOARD']

        # vgg activation
        # self.target_layer = ['relu_3', 'relu_8', 'relu_13', 'relu_17']
        self.target_layer = ['relu_3', 'relu_8']

        # Directory
        # self.train_dir = config['TRAINING_CONFIG']['IMG_DIR']
        self.log_dir = config['TRAINING_CONFIG']['LOG_DIR']
        # self.sample_dir = os.path.join(self.train_dir, config['TRAINING_CONFIG']['SAMPLE_DIR'])
        # self.result_dir = os.path.join(self.train_dir, config['TRAINING_CONFIG']['RESULT_DIR'])
        self.model_path = config['TRAINING_CONFIG']['MODEL_PATH']

        # Steps
        # self.log_step = config['TRAINING_CONFIG']['LOG_STEP']
        # self.sample_step = config['TRAINING_CONFIG']['SAMPLE_STEP']
        self.save_step = config['TRAINING_CONFIG']['SAVE_STEP']
        # self.save_start = config['TRAINING_CONFIG']['SAVE_START']
        # self.lr_decay_step = config['TRAINING_CONFIG']['LR_DECAY_STEP']
        self.evaluation_step = config['TRAINING_CONFIG']['EVALUATION_STEP']

        if self.use_tensorboard == 'True':
            self.logger = self.build_tensorboard()

        self.build_model()

    def build_tensorboard(self):
        timestamp = time.strftime("%Y_%m_%d_%H_%M", time.localtime())
        p = os.path.join(self.log_dir, f'{timestamp}/' + self.mode)
        writer = SummaryWriter(log_dir=p)
        return writer

    def build_model(self):
        self.G = Generator(spec_norm=self.g_spec).to(self.device)
        self.D = Discriminator(spec_norm=self.d_spec, LR=0.2).to(self.device)
        self.vgg = vgg19(pretrained=True)
        self.handles = []
        for layer in self.target_layer:
            self.handles.append(
                self.vgg.features[int(layer.split('_')[-1])].register_forward_hook(get_activation(layer)))
        self.vgg.to(self.device)
        if self.optim == 'ADAMW':
            self.g_optimizer = torch.optim.AdamW(self.G.parameters(), self.g_lr, (self.beta1, self.beta2))
            self.d_optimizer = torch.optim.AdamW(self.D.parameters(), self.d_lr, (self.beta1, self.beta2))
        else:
            self.g_optimizer = torch.optim.Adam(self.G.parameters(), self.g_lr, (self.beta1, self.beta2))
            self.d_optimizer = torch.optim.Adam(self.D.parameters(), self.d_lr, (self.beta1, self.beta2))

        self.print_network(self.G, 'G')
        self.print_network(self.D, 'D')

    def print_network(self, model, name):
        """Print out the network information."""
        num_params = 0
        for p in model.parameters():
            num_params += p.numel()
        # print(model)
        print(name)
        print("The number of parameters: {}".format(num_params))

        with open(os.path.join(self.log_dir, 'model_arch.txt'), 'a') as fp:
            print(model, file=fp)
            print(name, file=fp)
            print("The number of parameters: {}".format(num_params), file=fp)

    def gradient_penalty(self, y, x):
        """Compute gradient penalty: (L2_norm(dy/dx) - 1)**2."""
        weight = torch.ones(y.size()).to(self.device)
        dydx = torch.autograd.grad(outputs=y,
                                   inputs=x,
                                   grad_outputs=weight,
                                   retain_graph=True,
                                   create_graph=True,
                                   only_inputs=True)[0]

        dydx = dydx.view(dydx.size(0), -1)
        dydx_l2norm = torch.sqrt(torch.sum(dydx ** 2, dim=1))
        return torch.mean((dydx_l2norm - 1) ** 2)

    def reset_grad(self):
        """Reset the gradient buffers."""
        self.g_optimizer.zero_grad()
        self.d_optimizer.zero_grad()

    def gram_matrix(self, input):
        a, b, c, d = input.size()  # a=batch size(=1)
        # b=number of feature maps
        # (c,d)=dimensions of a f. map (N=c*d)

        features = input.view(a * b, c * d)  # resise F_XL into \hat F_XL

        G = torch.mm(features, features.t())  # compute the gram product

        # we 'normalize' the values of the gram matrix
        # by dividing by the number of element in each feature maps.
        return G.div(a * b * c * d)

    def train_epochs(self):
        init_epoch = self.resume_model()

        if self.schedule == 'StepLR':
            params = {"step_size": self.step_sie, "gamma": self.gamma}
            schedulerG = StepLR(self.g_optimizer, **params, last_epoch=init_epoch)
            schedulerD = StepLR(self.d_optimizer, **params, last_epoch=init_epoch)
        else:
            params = {"step_size": self.step_sie, "gamma": self.gamma}
            schedulerG = StepLR(self.g_optimizer, **params, last_epoch=init_epoch)
            schedulerD = StepLR(self.d_optimizer, **params, last_epoch=init_epoch)

        for i in tqdm(range(init_epoch, self.epoch)):
            self.G.train()
            self.D.train()
            dict = self.train_one_epoch()
            # adjust lr
            schedulerG.step()
            schedulerD.step()

            # Log
            if self.use_tensorboard:
                self.logger.add_scalar("Loss/D/fake", dict['D/loss_fake'], i + 1)
                self.logger.add_scalar("Loss/D/real", dict['D/loss_real'], i + 1)
                self.logger.add_scalar("Loss/D/Total", dict['D/Loss'], i + 1)

                self.logger.add_scalar("Loss/G/adv", dict['G/loss_fake'], i + 1)
                self.logger.add_scalar("Loss/G/recon", dict['G/loss_recon'], i + 1)
                self.logger.add_scalar("Loss/G/percep", dict['G/percep'], i + 1)
                self.logger.add_scalar("Loss/G/style", dict['G/style'], i + 1)
                # self.logger.add_scalar("Loss/G/triple", dict['G/triple'], i + 1)
                self.logger.add_scalar("Loss/G/Total", dict['G/Loss'], i + 1)

            # evaluation
            if (i + 1) % self.evaluation_step == 0:
                self.G.eval()
                total_img = None
                sketch_img = None
                ref_img = None

                for ref, sketch in self.test_loader:
                    ref = ref.to(self.device)
                    sketch = sketch.to(self.device)
                    fake_images = self.G(ref, sketch)
                    if total_img is None:
                        total_img = fake_images
                        sketch_img = sketch
                        ref_img = ref
                    else:
                        total_img = torch.cat([total_img, fake_images], dim=0)
                        sketch_img = torch.cat([sketch_img, sketch], dim=0)
                        ref_img = torch.cat([ref_img, ref], dim=0)


                sketch_img = sketch_img.detach().mul(0.5).add(0.5)
                ref_img = ref_img.detach().mul(0.5).add(0.5)
                total_img = total_img.detach().mul(0.5).add(0.5)
                self.logger.add_image('result/sketch', vutils.make_grid(sketch_img, nrow=4), global_step=i)
                self.logger.add_image('result/reference', vutils.make_grid(ref_img, nrow=4), global_step=i)
                self.logger.add_image('result/images', vutils.make_grid(total_img, nrow=4), global_step=i)

            # save model
            if (i + 1) % self.save_step == 0:
                self.save_model_full(epoch=i)

        print('train finish')

    def resume_model(self):
        if not os.path.exists(self.model_path):
            return -1
        print('-----------------Resume from Last----------------')
        checkpoint = torch.load(self.model_path, map_location=self.device)
        self.G.load_state_dict(checkpoint['G'])
        self.D.load_state_dict(checkpoint['D'])
        self.g_optimizer.load_state_dict(checkpoint['optimG'])
        self.d_optimizer.load_state_dict(checkpoint['optimD'])
        initEpoch = checkpoint['epoch']
        self.best_fid = checkpoint['best_fid']
        return initEpoch

    def save_model_full(self, epoch):
        ckp = {
            'epoch': epoch,
            'G': self.G.state_dict(),
            'D': self.D.state_dict(),
            'optimG': self.g_optimizer.state_dict(),
            'optimD': self.d_optimizer.state_dict(),
            'best_fid': self.best_fid
        }
        torch.save(ckp, self.model_path)

    def train_one_epoch(self):
        loader = self.train_loader
        leng = len(loader)
        i = 0
        pbar = tqdm(loader, desc='batch')

        LOSS_D_REAL = 0
        LOSS_D_FAKE = 0
        LOSS_PG = 0
        LOSS_D = 0
        LOSS_G_PERCEP = 0
        LOSS_G_RECON = 0
        LOSS_G_STYLE = 0
        LOSS_G_ADV = 0
        LOSS_G = 0

        for GT, sketch, reference in pbar:
            i = i + 1
            # warp ref
            pInit = genPerturbations(self.config, self.batch_size)
            pInitMatrix = vec2mtrx(self.config, pInit)
            reference = transformImage(self.config, reference, pInitMatrix)
            # init params
            GT = GT.to(self.device)
            sketch = sketch.to(self.device)
            reference = reference.to(self.device)
            log_dict = dict()
            log_dict['batch'] = i
            # train D
            if i % self.d_critic == 0:
                fake_images = self.G(reference, sketch)
                if self.gan_loss in ['lsgan', 'vanilla']:
                    real_score = self.D(torch.cat([GT, sketch], dim=1))
                    fake_score = self.D(torch.cat([fake_images.detach(), sketch], dim=1))
                    d_loss_real = self.adversarial_loss(real_score, torch.ones_like(real_score)).mean()
                    d_loss_fake = self.adversarial_loss(fake_score, torch.zeros_like(fake_score)).mean()
                    d_loss = self.lambda_d_real * d_loss_real + self.lambda_d_fake * d_loss_fake
                elif self.gan_loss in ['wgan']:
                    real_score = self.D(torch.cat([GT, sketch], dim=1))
                    fake_score = self.D(torch.cat([fake_images.detach(), sketch], dim=1))
                    d_loss_real = -torch.mean(real_score)
                    d_loss_fake = torch.mean(fake_score)
                    alpha = torch.rand(GT.size(0), 1, 1, 1).to(self.device)
                    x_hat = (alpha * GT.data + (1 - alpha) * fake_images.data).requires_grad_(True)
                    out_src = self.D(x_hat)
                    d_loss_gp = self.gradient_penalty(out_src, x_hat)
                    d_loss = self.lambda_d_real * d_loss_real + self.lambda_d_fake * d_loss_fake + self.lambda_d_gp * d_loss_gp
                elif self.gan_loss in ['hinge']:
                    real_score = self.D(torch.cat([GT, sketch], dim=1))
                    fake_score = self.D(torch.cat([fake_images.detach(), sketch], dim=1))
                    d_loss_real = torch.relu(torch.ones_like(real_score) - real_score).mean()
                    d_loss_fake = torch.relu(torch.ones_like(fake_score) + fake_score).mean()
                    d_loss = self.lambda_d_real * d_loss_real + self.lambda_d_fake * d_loss_fake
                else:
                    print('Not found corresponding loss type, please check')
                    d_loss_real = torch.zeros(size=(1,)).to(self.device)
                    d_loss_fake = torch.zeros(size=(1,)).to(self.device)
                    d_loss = torch.zeros(size=(1,)).to(self.device)
                    if self.gan_loss == 'wgan':
                        d_loss_gp = torch.zeros(size=(1,)).to(self.device)

                # Backward and optimize
                self.reset_grad()
                d_loss.backward()
                self.d_optimizer.step()

            if i % self.g_critic == 0:
                fake_images = self.G(reference, sketch)
                fake_score = self.D(torch.cat([fake_images, sketch], dim=1))
                # calculate G loss
                if self.gan_loss in ['lsgan', 'vanilla']:
                    g_loss_fake = self.adversarial_loss(fake_score, torch.ones_like(fake_score))
                elif self.gan_loss in ['wgan']:
                    g_loss_fake = - torch.mean(fake_score)
                elif self.gan_loss in ['hinge']:
                    g_loss_fake = torch.relu(torch.ones_like(fake_score) - fake_score).mean()
                else:
                    print('Not found corresponding loss type, please check')
                    g_loss_fake = torch.zeros(size=(1,)).to(self.device)

                g_recon_loss = self.l1_loss(fake_images, GT)
                fake_activation = dict()
                real_activation = dict()

                self.vgg(fake_images)
                for layer in self.target_layer:
                    fake_activation[layer] = vgg_activation[layer]
                vgg_activation.clear()

                self.vgg(GT)
                for layer in self.target_layer:
                    real_activation[layer] = vgg_activation[layer]
                vgg_activation.clear()

                g_loss_style = 0
                g_loss_percep = 0

                for layer in self.target_layer:
                    g_loss_percep += self.l1_loss(fake_activation[layer], real_activation[layer])
                    g_loss_style += self.l1_loss(self.gram_matrix(fake_activation[layer]),
                                                 self.gram_matrix(real_activation[layer]))

                # if self.triplet:
                #     anchor = q_k_v_list[0].view(self.batch_size, -1)
                #     positive = q_k_v_list[1].contiguous().view(self.batch_size, -1)
                #     negative = q_k_v_list[2].contiguous().view(self.batch_size, -1)
                #     g_loss_triple = self.triplet_loss(anchor=anchor, positive=positive, negative=negative)
                # else:
                #     g_loss_triple = torch.zeros(size=(1,)).to(self.device)

                g_loss = self.lambda_g_fake * g_loss_fake + self.lambda_g_recon * g_recon_loss + \
                         self.lambda_g_percep * g_loss_percep + self.lambda_g_style * g_loss_style \


                # backward
                self.reset_grad()
                g_loss.backward()
                self.g_optimizer.step()

            # Log
            log_dict['D/loss_real'] = self.lambda_d_real * d_loss_real.item()
            log_dict['D/loss_fake'] = self.lambda_d_fake * d_loss_fake.item()
            if self.gan_loss == 'wgan':
                log_dict['D/loss_pg'] = self.lambda_d_gp * d_loss_gp.item()
            log_dict['D/Loss'] = d_loss.item()
            log_dict['G/loss_fake'] = self.lambda_g_fake * g_loss_fake.item()
            log_dict['G/loss_recon'] = self.lambda_g_recon * g_recon_loss.item()
            log_dict['G/percep'] = self.lambda_g_percep * g_loss_percep.item()
            log_dict['G/style'] = self.lambda_g_style * g_loss_style.item()
            # log_dict['G/triple'] = self.lambda_triplet * g_loss_triple.item()
            log_dict['G/Loss'] = g_loss.item()
            pbar.set_postfix(log_dict)

            LOSS_D_REAL += log_dict['D/loss_real'] / leng
            LOSS_D_FAKE += log_dict['D/loss_fake'] / leng
            if self.gan_loss == 'wgan':
                LOSS_PG += log_dict['D/loss_pg'] / leng
            LOSS_D += log_dict['D/Loss'] / leng
            LOSS_G_ADV += log_dict['G/loss_fake'] / leng
            LOSS_G_RECON += log_dict['G/loss_recon'] / leng
            LOSS_G_PERCEP += log_dict['G/percep']/ leng
            LOSS_G_STYLE += log_dict['G/style'] / leng
            LOSS_G += log_dict['G/Loss'] / leng

        EPOCH_LOG = {
            'D/loss_real': LOSS_D_REAL,
            'D/loss_fake' : LOSS_D_FAKE,
            'D/loss_pg' : LOSS_PG,
            'D/Loss': LOSS_D,
            'G/loss_fake' : LOSS_G_ADV,
            'G/loss_recon' : LOSS_G_RECON,
            'G/percep' : LOSS_G_PERCEP,
            'G/style' : LOSS_G_STYLE,
            'G/Loss' : LOSS_G
        }
        return EPOCH_LOG


if __name__ == '__main__':
    cudnn.benchmark = True
    config = get_config()
    helper = BootHelper(config, get_train_loader(config), get_evaluation_loader(config))
    helper.train_epochs()
