# -*- coding: utf-8 -*-
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim.lr_scheduler import ExponentialLR
from torch.autograd import Variable

import datetime
import numpy as np
import json
from tqdm import tqdm, trange
# TODO
# from evaluate_code import evaluate_tvsum

from layers import Summarizer, Discriminator  # , apply_weight_norm
from utils import TensorboardWriter
# from feature_extraction import ResNetFeature
import warnings
warnings.filterwarnings("ignore")

class DppLoss(nn.Module):
    """Docstring for DppLoss. """

    def __init__(self):
        nn.Module.__init__(self)

    def forward(self, scores, e_output):
        """Summary-Length Regularization
        scores shape: (seq_len, 1)
        e_output shape: (seq_len, batch=1, hidden_size)
        """

        seq_len, batch, hidden_size = e_output.shape
        e_n = e_output.squeeze()
        sco = scores.squeeze()
        ss = torch.ger(sco, sco)
        ee = torch.mm(e_n, e_n.t())
        ans = ss*ee
        indicator = scores.ge(0.5).squeeze()

        # TODO ?
        if torch.sum(indicator) == 0:
            loss = torch.Tensor([1e-8]).cuda()
        else:
            Lz = ans[:, indicator==1][indicator==1, :]
            top = torch.det(Lz)
            dpploss = - torch.log(top) + torch.log(torch.det(ans + torch.eye(seq_len).cuda()))
            if torch.isnan(dpploss) or top < 1e-9:
                loss = torch.det(Lz + torch.eye(Lz.shape[0]).cuda()) + torch.Tensor([1e-8]).cuda()
            else:
                loss = dpploss + torch.Tensor([1e-8]).cuda()

        return loss


class Solver(object):
    def __init__(self, config=None, train_loader=None, test_loader=None):
        """Class that Builds, Trains and Evaluates SUM-GAN model"""
        self.config = config
        self.train_loader = train_loader
        self.test_loader = test_loader

    def build(self):

        input_size = self.config.input_size
        hidden_size = self.config.hidden_size
        num_layers = self.config.num_layers
        # Build Modules
        self.linear_compress = nn.Linear(input_size, hidden_size).cuda()
        self.summarizer = Summarizer(input_size=hidden_size,
                                     hidden_size=hidden_size,
                                     num_layers=num_layers).cuda()
        self.discriminator = Discriminator(input_size=hidden_size,
                                           hidden_size=hidden_size,
                                           num_layers=num_layers).cuda()
        self.model = nn.ModuleList([
            self.linear_compress, self.summarizer, self.discriminator])
        self.dpploss = DppLoss()

        if self.config.mode == 'train':
            # Build Optimizers
            self.s_e_optimizer = optim.Adam(
                list(self.summarizer.s_lstm.parameters())
                + list(self.summarizer.vae.e_lstm.parameters())
                + list(self.linear_compress.parameters()),
                lr=self.config.lr)
            self.d_optimizer = optim.Adam(
                list(self.summarizer.vae.d_lstm.parameters())
                + list(self.linear_compress.parameters()),
                lr=self.config.lr)
            self.c_optimizer = optim.Adam(
                list(self.discriminator.parameters())
                + list(self.linear_compress.parameters()),
                lr=self.config.discriminator_lr)

            self.s_e_scheduler = ExponentialLR(self.s_e_optimizer, gamma=0.995)
            self.d_scheduler = ExponentialLR(self.d_optimizer, gamma=0.995)
            self.c_scheduler = ExponentialLR(self.c_optimizer, gamma=0.995)

            # TODO why error
            # self.s_e_optimizer = ExponentialLR(self.s_e_optimizer, 0.995)

            self.model.train()
            # self.model.apply(apply_weight_norm)

            # Overview Parameters
            # print('Model Parameters')
            # for name, param in self.model.named_parameters():
            #     print('\t' + name + '\t', list(param.size()))

            # pip install tensorflow; tensorboard --logdir=../data/
            self.writer = TensorboardWriter(self.config.log_dir)

    @staticmethod
    def freeze_model(module):
        for p in module.parameters():
            p.requires_grad = False

    def reconstruction_loss(self, h_origin, h_fake):
        """L2 loss between original-regenerated features at cLSTM's last hidden layer"""

        return torch.norm(h_origin - h_fake, p=2)

    def prior_loss(self, mu, log_variance):
        """KL( q(e|x) || N(0,1) )"""
        return 0.5 * torch.sum(-1 + log_variance.exp() + mu.pow(2) - log_variance)

    def sparsity_loss(self, scores):
        """Summary-Length Regularization"""

        return torch.abs(torch.mean(scores) - self.config.summary_rate)

    def sparsity_loss_sup(self, s, s_hat):
        """Summary-Length Regularization
        s_hat: ground truth
        ref to https://blog.csdn.net/xg123321123/article/details/80781611
        """
        return -torch.mean(s_hat.squeeze().cuda()*torch.log(s))

    def sparsity_loss_rep(self, e_output):
        """Summary-Length Regularization
        e_output shape: (seq_len, batch, hidden_size), e.g. (300, 1, 2048)
        假设batch=1
        Note: 矩阵计算时存储冗余的tensor, 导致OOM
        """
        seq_len, batch, hidden_size = e_output.shape
        e_n = e_output.squeeze()
        E = torch.norm(e_n, dim=1)
        EE = torch.ger(E, E)
        ee = torch.mm(e_n, e_n.t())
        ans = ee/EE
        fact = torch.ones((seq_len, seq_len)) - torch.eye(seq_len)
        ans *= fact.cuda()
        ret = torch.sum(ans, (0, 1))/(seq_len*(seq_len-1))
        return ret

    def gan_loss(self, original_prob, fake_prob, uniform_prob):
        """Typical GAN loss + Classify uniformly scored features"""

        gan_loss = torch.mean(torch.log(original_prob) + torch.log(1 - fake_prob)
                              + torch.log(1 - uniform_prob))  # Discriminate uniform score

        return gan_loss

    def train(self):
        start_tim = datetime.datetime.now()
        print('Start time:', start_tim.strftime('%Y-%m-%d %H:%M:%S'))

        step = 0
        # for epoch_i in trange(self.config.n_epochs, desc='Epoch', ncols=80):
        for epoch_i in range(self.config.n_epochs):
            s_e_loss_history = []
            d_loss_history = []
            c_loss_history = []
            self.s_e_scheduler.step()
            self.d_scheduler.step()
            self.c_scheduler.step()
            s_e_lr = self.s_e_optimizer.param_groups[0]['lr']
            d_lr = self.d_optimizer.param_groups[0]['lr']
            c_lr = self.c_optimizer.param_groups[0]['lr']
            print(f'epk{epoch_i+1}: s_e_lr={s_e_lr}, d_lr={d_lr}, c_lr={c_lr}')
            # for batch_i, (img_features, label) in enumerate(tqdm(
            #         self.train_loader, desc='Batch', ncols=80, leave=False)):
            for batch_i, (img_features, label) in enumerate(self.train_loader):
                print(f'\nepoch{epoch_i+1}.batch{batch_i+1}')
                # if img_features.size(2) > 600:
                #     continue
                if img_features.size(1) > 10000:    # if seq len too long
                    continue

                # [batch_size=1, seq_len, 2048] -> [seq_len, 2048]
                img_features = img_features.view(-1, self.config.input_size)

                img_features_ = Variable(img_features).cuda()

                #---- Train sLSTM, eLSTM ----#
                if self.config.verbose:
                    print('Training sLSTM and eLSTM...')
                    # tqdm.write('\nTraining sLSTM and eLSTM...')

                # torch.Size([seq_len, batch=1(批次数), input_size])
                # fig.3: x=original_features;
                original_features = self.linear_compress(img_features_.detach()).unsqueeze(1)

                # fig.3: sLSTM scores, VAE mean, VAE log var, \hat{x}
                scores, h_mu, h_log_variance, generated_features, e_output = self.summarizer(
                    original_features)
                # fig.3: _, _, _, \hat{x}_p
                _, _, _, uniform_features, _ = self.summarizer(
                    original_features, uniform=True)

                # fig.3: cLSTM位置的三条通路; fake 伪装
                h_origin, original_prob = self.discriminator(original_features)
                h_fake, fake_prob = self.discriminator(generated_features)
                h_uniform, uniform_prob = self.discriminator(uniform_features)

                # tqdm.write(f'original_p: {original_prob.data.item():.3f}, '
                #            f'fake_p: {fake_prob.data.item():.3f}, '
                #            f'uniform_p: {uniform_prob.data.item():.3f}')
                print(f'original_p: {original_prob.data.item():.3f}, '
                      f'fake_p: {fake_prob.data.item():.3f}, '
                      f'uniform_p: {uniform_prob.data.item():.3f}')

                reconstruction_loss = self.reconstruction_loss(h_origin, h_fake)
                prior_loss = self.prior_loss(h_mu, h_log_variance)
                # Variants of original paper
                if self.config.shift == 'original':
                    sparsity_loss = self.sparsity_loss(scores)
                elif self.config.shift == 'sup':
                    sparsity_loss = self.sparsity_loss_sup(scores, label)
                elif self.config.shift == 'dpp':
                    # TODO 还有一些问题
                    sparsity_loss = self.dpploss(scores, e_output)
                elif self.config.shift == 'rep':
                    sparsity_loss = self.sparsity_loss_rep(e_output)

                # tqdm.write(f'recon loss {reconstruction_loss.data.item():.3f}, '
                #            f'prior loss: {prior_loss.data.item():.3f}, '
                #            f'sparsity loss: {sparsity_loss.data.item():.3f}')
                print(f'recon loss {reconstruction_loss.data.item():.3f}, '
                      f'prior loss: {prior_loss.data.item():.3f}, '
                      f'sparsity loss: {sparsity_loss.data.item():.3f}')

                # ref to paper, for learning s, e
                s_e_loss = reconstruction_loss + prior_loss + sparsity_loss

                self.s_e_optimizer.zero_grad()
                s_e_loss.backward(retain_graph=False)  # retain_graph=True
                torch.cuda.empty_cache()

                # Gradient cliping
                torch.nn.utils.clip_grad_norm(self.model.parameters(), self.config.clip)
                self.s_e_optimizer.step()

                s_e_loss_history.append(s_e_loss.data)

                #---- Train dLSTM ----#
                if self.config.verbose:
                    # tqdm.write('Training dLSTM...')
                    print('Training dLSTM...')

                # [seq_len, 1, hidden_size]
                original_features = self.linear_compress(img_features_.detach()).unsqueeze(1)

                scores, h_mu, h_log_variance, generated_features, _ = self.summarizer(
                    original_features)
                _, _, _, uniform_features, _ = self.summarizer(original_features,
                                                            uniform=True)

                h_origin, original_prob = self.discriminator(original_features)
                h_fake, fake_prob = self.discriminator(generated_features)
                h_uniform, uniform_prob = self.discriminator(uniform_features)

                # tqdm.write(f'original_p: {original_prob.data.item():.3f}, '
                #            f'fake_p: {fake_prob.data.item():.3f}, '
                #            f'uniform_p: {uniform_prob.data.item():.3f}')
                print(f'original_p: {original_prob.data.item():.3f}, '
                      f'fake_p: {fake_prob.data.item():.3f}, '
                      f'uniform_p: {uniform_prob.data.item():.3f}')

                reconstruction_loss = self.reconstruction_loss(h_origin, h_fake)
                # fig.3中的L_{GAN}; (4)式
                gan_loss = self.gan_loss(original_prob, fake_prob, uniform_prob)

                print(f'recon loss {reconstruction_loss.data.item():.3f}, '
                      f'gan loss: {gan_loss.data.item():.3f}')
                # tqdm.write(f'recon loss {reconstruction_loss.data.item():.3f}, '
                #            f'gan loss: {gan_loss.data.item():.3f}')

                d_loss = reconstruction_loss + gan_loss

                self.d_optimizer.zero_grad()
                d_loss.backward()  # retain_graph=True)
                # Gradient cliping
                torch.nn.utils.clip_grad_norm(self.model.parameters(), self.config.clip)
                self.d_optimizer.step()

                d_loss_history.append(d_loss.data)

                #---- Train cLSTM ----#
                if batch_i > self.config.discriminator_slow_start:
                    if self.config.verbose:
                        # tqdm.write('Training cLSTM...')
                        print('Training cLSTM...')
                    # [seq_len, 1, hidden_size]
                    original_features = self.linear_compress(img_features_.detach()).unsqueeze(1)

                    scores, h_mu, h_log_variance, generated_features, _ = self.summarizer(
                        original_features)
                    _, _, _, uniform_features, _ = self.summarizer(
                        original_features, uniform=True)

                    h_origin, original_prob = self.discriminator(original_features)
                    h_fake, fake_prob = self.discriminator(generated_features)
                    h_uniform, uniform_prob = self.discriminator(uniform_features)
                    # tqdm.write(f'original_p: {original_prob.data[0]:.3f}, '
                    #            f'fake_p: {fake_prob.data[0]:.3f}, '
                    #            f'uniform_p: {uniform_prob.data[0]:.3f}')
                    print(f'original_p: {original_prob.data[0]:.3f}, '
                          f'fake_p: {fake_prob.data[0]:.3f}, '
                          f'uniform_p: {uniform_prob.data[0]:.3f}')

                    # Maximization
                    c_loss = -1 * self.gan_loss(original_prob, fake_prob, uniform_prob)

                    # tqdm.write(f'gan loss: {gan_loss.data[0]:.3f}')
                    print(f'gan loss: {gan_loss.data[0]:.3f}')

                    self.c_optimizer.zero_grad()
                    c_loss.backward()
                    # Gradient cliping
                    torch.nn.utils.clip_grad_norm(self.model.parameters(), self.config.clip)
                    self.c_optimizer.step()

                    c_loss_history.append(c_loss.data)

                if self.config.verbose:
                    # tqdm.write('Plotting...')
                    print('Plotting...')

                self.writer.update_loss(reconstruction_loss.data, step, 'recon_loss')
                self.writer.update_loss(prior_loss.data, step, 'prior_loss')
                self.writer.update_loss(sparsity_loss.data, step, 'sparsity_loss')
                self.writer.update_loss(gan_loss.data, step, 'gan_loss')

                # self.writer.update_loss(s_e_loss.data, step, 's_e_loss')
                # self.writer.update_loss(d_loss.data, step, 'd_loss')
                # self.writer.update_loss(c_loss.data, step, 'c_loss')

                self.writer.update_loss(original_prob.data, step, 'original_prob')
                self.writer.update_loss(fake_prob.data, step, 'fake_prob')
                self.writer.update_loss(uniform_prob.data, step, 'uniform_prob')

                step += 1    # 步数累计值

            s_e_loss = torch.stack(s_e_loss_history).mean()
            d_loss = torch.stack(d_loss_history).mean()
            c_loss = torch.stack(c_loss_history).mean()
            if self.config.verbose:
                print('Plotting...')  # tqdm.write('Plotting...')
            self.writer.update_loss(s_e_loss, epoch_i, 's_e_loss_epoch')
            self.writer.update_loss(d_loss, epoch_i, 'd_loss_epoch')
            self.writer.update_loss(c_loss, epoch_i, 'c_loss_epoch')

            # Save parameters at checkpoint
            ckpt_path = str(self.config.save_dir) + f'_epoch-{epoch_i}.pkl'
            # tqdm.write(f'Save parameters at {ckpt_path}')
            print(f'Save parameters at {ckpt_path}')
            torch.save(self.model.state_dict(), ckpt_path)

            self.evaluate(epoch_i)

            self.model.train()

        end_tim = datetime.datetime.now()
        print('End time:', end_tim.strftime('%Y-%m-%d %H:%M:%S'))
        total_tim = end_tim - start_tim  # timedelta
        print('Total time:', str(total_tim))

    def evaluate(self, epoch_i):
        # checkpoint = self.config.ckpt_path
        # print(f'Load parameters from {checkpoint}')
        # self.model.load_state_dict(torch.load(checkpoint))

        self.model.eval()

        out_dict = {}

        # TODO why need []
        # for batch_i, (vdo_tensor, [vdo_name], label) in enumerate(tqdm(
        #         self.test_loader, desc='Evaluate', ncols=80, leave=False)):
        for batch_i, (vdo_tensor, [vdo_name], label) in enumerate(self.test_loader):
        # for vdo_tensor, vdo_name in tqdm(
        #         self.test_loader, desc='Evaluate', ncols=80, leave=False):

            # [seq_len, batch=1, 2048]
            vdo_tensor = vdo_tensor.view(-1, self.config.input_size)
            with torch.no_grad():
                vdo_feature = Variable(vdo_tensor).cuda()    # volatile=True

            # [seq_len, 1, hidden_size]
            vdo_feature = self.linear_compress(vdo_feature.detach()).unsqueeze(1)

            # [seq_len]
            scores = self.summarizer.s_lstm(vdo_feature).squeeze(1)

            scores = np.array(scores.data).tolist()
            label = np.array(label.squeeze()).tolist()

            out_dict[vdo_name] = scores, label

            self.config.score_dir.mkdir(parents=True, exist_ok=True)
            # TODO {self.config.video_type} video_type并未使用
            score_save_path = self.config.score_dir.joinpath(
                f'epoch_{epoch_i}.json')
            # score_save_path.touch(exist_ok=True)
            with open(score_save_path, 'w') as f:
                # tqdm.write(f'Saving score at {str(score_save_path)}.')
                print(f'Saving score at {str(score_save_path)}.')
                json.dump(out_dict, f)
            score_save_path.chmod(0o777)

    def pretrain(self):
        pass


if __name__ == '__main__':
    # test of sparsity_loss_rep
    # modified to staticmethod
    inp = torch.tensor([[[0.5, 0.3, 0.4]], [[0.2, 0.1, 0.1]], [[0.1, 0.2, 0.2]]]).cuda()
    sco = torch.tensor([0.1, 0.2, 0.3]).cuda()
    # Solver.sparsity_loss_rep(inp)
    dpploss = DppLoss()
    dpploss(sco, inp)
