import os
import math
import time


import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import pandas as pd


from lib.data import batch_generator, next_batch, get_delta_pre
from utils import extract_time, random_generator, get_logger
from lib.gan_noed_model import Generator, Discriminator, Imputation
from lib.metrics.visualization_metrics import visualization
from lib.metrics.dis_metrics import discriminative_score_metrics
from lib.metrics.pred_metrics import predictive_score_metrics
from lib.metrics.mae_metrics import mae_score_metrics


class GAN_NOALL:
    """TimeGAN Class
    """

    @property
    def name(self):
        return 'TimeGAN'

    def __init__(self, opt, dataset):
        # todo 改为只有训练加载数据，即改成setter，不是初始化参数
        # 1. 数据处理
        # super(TimeGAN, self).__init__(opt, ori_data)
        self.seed(opt.manualseed)

        # Initalize variables.
        self.opt = opt
        self.dataset = dataset
        self.train_data, self.scalar = dataset.load_train_data(), dataset.scalar
        self.opt.z_dim = self.train_data[0].shape[-1]
        self.ori_time, self.max_seq_len = extract_time(self.train_data)
        self.data_num, _, _ = np.asarray(self.train_data).shape  # 3661; 24; 6
        self.delta_type = opt.delta_type

        self.device = torch.device("cuda:"+self.opt.gpu_id if self.opt.device != 'cpu' else "cpu")

        # -- Misc attributes
        self.epoch = 0
        self.times = []
        self.total_steps = 0

        # logger, 有时间戳，
        self.log_dir = self.get_log_dir(opt.run_id)
        self.logger = self.get_logger()

        # Create and initialize networks. 四个组件，encoder（embedding），decoder（recovery），g，d，
        # 2. 模型结构
        self.netg = Generator(self.opt).to(self.device)
        self.netd = Discriminator(self.opt).to(self.device)

        if self.opt.resume != '':  # 接着训练，如果不调用train，也可以是加载训练好的模型
            self.logger.info("\nLoading pre-trained networks.")
            if self.load():
                self.logger.info('load success')
            else:
                self.logger.info('load failed')

        # 3. forward计算
        #       pre_train_er_forward
        #       pre_train_s_forward
        #       joint_train_gs_forward
        #       joint_train_er_forward
        #       joint_train_d_forward

        # 4. loss
        self.l_mse = nn.MSELoss()
        self.l_r = nn.L1Loss()
        self.l_bce = nn.BCEWithLogitsLoss()

        # Setup optimizer
        if self.opt.mode == 'train':
            self.optimizer_g = optim.Adam(self.netg.parameters(), lr=self.opt.lr, betas=(self.opt.beta1, 0.999))
            self.optimizer_d = optim.Adam(self.netd.parameters(), lr=self.opt.lr, betas=(self.opt.beta1, 0.999))

        self.logger.info(opt)
        self.logger.info(self.scalar.statistics)

    def get_log_dir(self, run_id=''):
        # 模型参数
        num_layers = self.opt.num_layer
        hidden_size = self.opt.hidden_dim
        layer_size = self.opt.layer_size

        # 训练参数
        epoch = self.opt.iteration
        lr = self.opt.lr
        beta = self.opt.beta1

        # 数据参数
        seq_len = self.opt.seq_len
        batch_size = self.opt.batch_size
        if run_id == '':
            # 不要时间戳，没用
            run_id = '{}_{}_{}_{}_{}_{}_{}'.format(num_layers, hidden_size, epoch, lr, beta, seq_len, batch_size)
        dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
        if self.opt.data_name == 'water':
            masked_indicator = self.opt.masked_indicator if self.opt.masked_indicator != '' else 'all'
            log_dir = os.path.join(dir, self.opt.output_dir, self.opt.data_name, masked_indicator, self.opt.gru_type,
                                   'fc_'+self.opt.is_fc+'_ln_'+self.opt.is_ln+'_res_'+self.opt.is_res, layer_size, run_id)
        else:
            log_dir = os.path.join(dir, self.opt.output_dir, self.opt.data_name, self.opt.module, run_id)
        if not os.path.exists(log_dir):
            os.makedirs(log_dir)
        return log_dir

    def get_logger(self, log_type='train'):
        # opt.name = model + data_name
        log_dir = os.path.join(self.log_dir)
        if not os.path.exists(log_dir):
            os.makedirs(log_dir)
        return get_logger(log_dir, log_type, log_filename=log_type+'.log')

    def seed(self, seed_value):
        # Check if seed is default value
        if seed_value == -1:
            return

        # Otherwise seed all functionality
        import random
        random.seed(seed_value)
        torch.manual_seed(seed_value)
        torch.cuda.manual_seed_all(seed_value)
        np.random.seed(seed_value)
        torch.backends.cudnn.deterministic = True

    def set_train_mode(self, train):
        # 没有说，用到某一部分，需要其他部分输出但是不更新的。所以可以同时设置状态
        #   联合训练这几个组件都需要更新
        #   预训练，用不到其他部分
        if train:

            self.netg.train()
            self.netd.train()
        else:
            self.netg.train(False)
            self.netd.train(False)

    def train(self):
        '''
        先加载模型，如果没有训练好的在训练，然后评价，如果有训练好的不用评价，可以单独暴露一个函数用作评价新的数据集
        Returns:

        '''
        if self.opt.is_load == 'True':
            if self.load():
                self.logger.info('load success')
                return
            else:
                self.logger.info('load failed, start train')

        self.set_train_mode(True)



        self.logger.info('Finish Training with Supervised Loss Only')
        self.logger.info('============================================================================')

        # 3. Joint Training 对抗训练
        self.logger.info('Start Joint Training')
        self.logger.info('============================================================================')
        for itt in range(self.opt.iteration):
            # Train for one iter，
            # 一次迭代更新两次G一次D
            for kk in range(2):
                # 训练G, 更新G和S
                g_loss_u, g_loss_s, g_loss_v = self.joint_train_one_iter_gs()
                # 训练embedder， 更新Encoder和decoder，没有e-d这部分肯定不存在
                # e_loss_t0 = self.joint_train_one_iter_er()  #
            d_loss = self.joint_train_one_iter_d()  # 就是D_loss
            if (itt % self.opt.log_interval == 0):
                msg = 'Joint training step: {}/{}    d_loss: {:.4f}     g_loss_u: {:.4f}    g_loss_s:{:.4f}    ' \
                      'g_loss_v:{:.4f} ' \
                    .format(itt, self.opt.iteration, d_loss, g_loss_u, math.sqrt(g_loss_s), g_loss_v, )
                self.logger.info(msg)
                self.save(itt)
        self.save()  # 0号

        self.logger.info('Finish Joint Training')
        self.logger.info('============================================================================')

        self.set_train_mode(False)  # 关闭训练模式

        self.logger.info('Start Synthetic Data Generation')
        self.logger.info('============================================================================')
        generated_data = self.generation()  # ndarray
        self.logger.info('Finish Synthetic Data Generation')
        self.logger.info('============================================================================')

        self.logger.info('Mertic:')
        self.logger.info('============================================================================')
        # 这里不能用训练集，应该用完整数据集，因为如果是训练集可能是有缺失的， 用来预测和分类不对，分类任务直接0多就是真了，而且是否shuffle无关
        ori_data = self.dataset.load_ori_data()
        metric_results = self.evaluation(ori_data, generated_data)
        self.logger.info('discriminative:{:.4f},predictive:{:.4f}'
                         .format(metric_results['discriminative'], metric_results['predictive']))

    def evaluation(self, ori_data, generated_data):
        # Output initialization
        metric_results = dict()

        # 1. Discriminative Score
        discriminative_score = list()
        for _ in range(self.opt.metric_iteration):
            temp_disc = discriminative_score_metrics(ori_data, generated_data, self.device)
            discriminative_score.append(temp_disc)

        metric_results['discriminative'] = np.mean(discriminative_score)

        # 2. Predictive score
        predictive_score = list()
        for tt in range(self.opt.metric_iteration):
            plot_pred = True if tt == 0 else False
            temp_pred = predictive_score_metrics(ori_data, generated_data, self.device, self.log_dir, plot_pred)
            predictive_score.append(temp_pred)

        metric_results['predictive'] = np.mean(predictive_score)

        # 3. Visualization (PCA and tSNE)
        visualization(ori_data, generated_data, 'pca', self.log_dir)
        visualization(ori_data, generated_data, 'tsne', self.log_dir)

        return metric_results

    def generation(self):
        # Synthetic data generation
        X, T = batch_generator(self.train_data, self.ori_time, self.data_num)
        Z = torch.tensor(X, dtype=torch.float32).to(self.device)
        z_delta = torch.tensor(get_delta_pre(X, self.delta_type), dtype=torch.float32).to(self.device) if self.opt.gru_type == 'grui' else None

        E_hat = self.netg(Z, z_delta)
        generated_data_curr = E_hat

        generated_data = list()

        for i in range(self.data_num):
            temp = generated_data_curr[i, :self.ori_time[i], :]
            generated_data.append(temp.detach().cpu().numpy())

        return generated_data  # ndarray

    def joint_train_d_forward(self, X, Z, x_delta, z_delta):
        # 1. 前向传播
        Y_real = self.netd(X, x_delta)

        E_hat = self.netg(Z, z_delta)
        Y_fake_e = self.netd(E_hat, z_delta)


        # 2. 计算损失
        # Discriminator loss，三部分
        D_loss_real = self.l_bce(Y_real, torch.ones_like(Y_real))
        D_loss_fake_e = self.l_bce(Y_fake_e, torch.zeros_like(Y_fake_e))
        D_loss = D_loss_real + D_loss_fake_e * self.opt.w_gamma

        return D_loss

    def joint_train_g_forward(self, X, Z, x_delta, z_delta):

        M = (X != 0).int()
        # 1. 前向传播
        # Generator loss 分为3部分，
        # 1. Adversarial loss，GAN部分的损失
        # 前向传播
        E_hat = self.netg(X, x_delta)
        Y_fake_e = self.netd(E_hat, x_delta)
        # 计算损失,让生成的数据分类为1（真实数据）
        G_loss_U_e = self.l_bce(Y_fake_e, torch.ones_like(Y_fake_e))


        # 3. Two Momments，让真实数据指导一下G,g输出的样本和真实数据的方差和均值一致
        # 前向传播
        X_hat = E_hat  # 没有encoder，g的输出直接就是数据
        # 计算损失,用真实数据指导G，让他们具有相同的均值和标准差，注意这里用的不是重构误差，而是均值和标准差，因为生成数据和真实数据不一定是接近的，而是分布接近
        X_hat = torch.multiply(X_hat, M)  # 都只要有效值 todo 空值对均值和标准差的影响 无影响， 因为都是只计算有有效值
        X = torch.multiply(X, M)
        # todo 经过了s，所以生成数据的时候最好也要经过s
        # |std(a)-std(b)|  torch.var(X_hat, [0]) [seq_len, feature_dim]，每个时间点，每个维度的均值和方差，样本量的bsz
        G_loss_V1 = torch.mean(
            torch.abs(torch.sqrt(torch.var(X_hat, [0]) + 1e-6) - torch.sqrt(torch.var(X, [0]) + 1e-6)))
        # |mean(a) - mean(b)|
        G_loss_V2 = torch.mean(torch.abs(torch.mean(X_hat, [0]) - torch.mean(X, [0])))
        G_loss_V = G_loss_V1 + G_loss_V2

        # 4. Summation
        G_loss = self.opt.w_gamma * G_loss_U_e + 100 * G_loss_V

        return G_loss, 0, 0, G_loss_V

    def joint_train_one_iter_gs(self):
        # 1. 获取输入
        X0, T = batch_generator(self.train_data, self.ori_time, self.opt.batch_size)
        X = torch.tensor(X0, dtype=torch.float32).to(self.device)
        x_delta = torch.tensor(get_delta_pre(X0, self.delta_type), dtype=torch.float32).to(self.device) if self.opt.gru_type == 'grui' else None
        Z, z_delta = None, None  # 不用随机向量作为输入了

        # 2. 前向传播
        # 3. 计算loss
        G_loss, _, _, G_loss_V = self.joint_train_g_forward(X, Z, x_delta, z_delta)

        # 4. backward
        self.optimizer_g.zero_grad()
        G_loss.backward(retain_graph=True)

        # 5. 更新参数
        self.optimizer_g.step()

        return float(G_loss.detach().cpu().numpy()), 0, 0


    def joint_train_one_iter_d(self):
        # 1. 获取输入
        X0, T = batch_generator(self.train_data, self.ori_time, self.opt.batch_size)
        X = torch.tensor(X0, dtype=torch.float32).to(self.device)
        x_delta = torch.tensor(get_delta_pre(X0, self.delta_type), dtype=torch.float32).to(self.device) if self.opt.gru_type == 'grui' else None

        Z, z_delta = None, None


        # 2. 前向传播
        # 3. 计算loss
        D_loss = self.joint_train_d_forward(X, Z, x_delta, z_delta)

        if (float(D_loss.detach().cpu().numpy()) > 0.15):  # d不能训练太好
            # 4. backward
            self.optimizer_d.zero_grad()
            D_loss.backward(retain_graph=True)

            # 5. 更新参数
            self.optimizer_d.step()

        return float(D_loss.detach().cpu().numpy())

    def save(self, epoch=-1):
        # -1表示最后一个epoch得
        model_dir = os.path.join(self.log_dir, 'saved_model')
        if not os.path.exists(model_dir):
            os.makedirs(model_dir)
        model_path = os.path.join(model_dir, '{}.pth'.format(epoch))
        torch.save({
            # 'min': self.min_val,
            # 'max': self.max_val,
            'netg': self.netg.state_dict(),
            'netd': self.netd.state_dict(),
        }, model_path)

    def load(self, model_path=None):
        if model_path is None:
            model_path = os.path.join(self.log_dir, 'saved_model', '-1.pth')
        try:
            checkpoint = torch.load(model_path)
            # 加载min，max，用于直接生成数据，todo 暂时没啥用，因为需要插值，所以一定会加载数据，后面可能实际应用需要对一个序列插值，而不是整个数据集
            # self.min_val = checkpoint['min']
            # self.max_val = checkpoint['max']
            # 加载模型
            self.netg.load_state_dict(checkpoint['netg'])
            self.netd.load_state_dict(checkpoint['netd'])
            return True
        except Exception as e:
            self.logger.error(e)
            return False

    def imputation(self, masked_data):
        masked_indicator = self.opt.masked_indicator if self.opt.masked_indicator != '' else 'all'
        impute_id = '{}_{}_{}'.format(masked_indicator, self.opt.impute_iter, self.opt.w_lambda)
        self.log_dir = os.path.join(self.log_dir, 'masked_ratio-'+str(self.opt.masked_ratio), impute_id)
        ori_data = self.dataset.load_ori_data()
        impute_logger = self.get_logger(log_type='impute')
        imputed_data = []

        batch_id = 1
        for x in next_batch(masked_data, self.opt.batch_size):  # 每个batch都插值,masked_data没有被shuffle
            # 1.获取输入, x，z，每个batch都重新取一个z
            neti = Imputation(self.opt, x)
            z_delta = torch.tensor(get_delta_pre(x, self.delta_type), dtype=torch.float32).to(
                self.device) if self.opt.gru_type == 'grui' else None

            x = torch.tensor(x, dtype=torch.float32).to(self.device)  # [bsz, seq_len, feature_dim]
            m = (x != 0).int()

            optimizer_i = optim.Adam(neti.parameters(), lr=1*self.opt.lr, betas=(self.opt.beta1, 0.999))

            self.set_train_mode(True)  # 不开启train，不能backward
            # z_delta 是不变的，因此放在最外面就可以
            z = neti().to(self.device)  # 获取更新后的z
            z_delta = torch.tensor(get_delta_pre(z.detach().cpu().numpy(), self.delta_type),
                                   dtype=torch.float32).to(self.device) if self.opt.gru_type == 'grui' else None
            for i in range(self.opt.impute_iter):  # 调整z，
                # 2.前向传播 impute_out = G(z)
                # 生成数据
                X_hat, Y_fake_e, _ = self.impute_forward(neti, z_delta)

                # 3.计算loss
                # Limpute = Lr + λLd
                # 重构误差
                loss_rec = self.l_mse(torch.multiply(X_hat, m), x)  # x就不用乘以m了,因为本身就是0再乘0也没用
                # 判别器损失函数，调整z让输出看起来更像是真实值
                D_loss_fake_e = self.l_bce(Y_fake_e, torch.ones_like(Y_fake_e))
                dis_loss = D_loss_fake_e
                impute_loss = loss_rec + self.opt.w_lambda*dis_loss

                # 4.反向传播,
                optimizer_i.zero_grad()
                impute_loss.backward()

                # 5.更新参数,更新z
                optimizer_i.step()

                if (i+1) % self.opt.log_interval == 0:
                    impute_logger.info('imputing: batch_id:{} {}/{} impute_loss:{:.4f}    reconstruction_loss:{:.4f}   discrimination_loss:{:.4f}'
                                       .format(batch_id, i+1, self.opt.impute_iter, impute_loss.item(), loss_rec.item(), dis_loss.item()))
            self.set_train_mode(False)  # todo 这个关不关啊，可以测试一下
            X_hat, _, _ = self.impute_forward(neti, z_delta)

            imputed_out = torch.multiply((1-m), X_hat) + x
            imputed_data.append(imputed_out)

            batch_id += 1
        imputed_data = torch.cat(imputed_data, 0).detach().cpu().numpy()

        impute_logger.info('Mertic:')
        impute_logger.info('============================================================================')
        ori_data = ori_data[:len(imputed_data)]  # 这里ori_data应该是无缺失值的
        metric_results = self.evaluation(ori_data, imputed_data)
        mae_score = mae_score_metrics(ori_data, imputed_data)
        metric_results['mae'] = mae_score
        impute_logger.info('discriminative:{:.4f},predictive:{:.4f},mae:{:.4f}'
                           .format(metric_results['discriminative'],metric_results['predictive'],metric_results['mae']))

        imputed_data = imputed_data[:, 0, :]  # [n_sample, feautre_dim]
        imputed_data = self.scalar.renormalize(imputed_data)
        self.save_csv(imputed_data, 'imputed_data')

        return imputed_data

    def impute_forward(self, neti, z_delta):
        z = neti().to(self.device)  # 获取更新后的z
        # z_delta =
        # 生成数据
        E_hat = self.netg(z, z_delta)
        Y_fake_e = self.netd(E_hat, z_delta)
        return E_hat, Y_fake_e, 0

    def save_csv(self, data, file_name):
        data_path = os.path.join(self.log_dir, file_name+'.csv')
        df = pd.DataFrame(data)
        df.to_csv(data_path, index=False)
