import os
import math
import time


import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import pandas as pd


from lib.data import batch_generator, next_batch, get_delta_pre
from utils import extract_time, random_generator, get_logger, wgn
from lib.model import Encoder, Recovery, Generator, Discriminator, Supervisor, Imputation
from lib.metrics.visualization_metrics import visualization
from lib.metrics.dis_metrics import discriminative_score_metrics
from lib.metrics.pred_metrics import predictive_score_metrics
from lib.metrics.mae_metrics import mae_score_metrics


class TimeGAN:
    """TimeGAN Class
    """

    @property
    def name(self):
        return 'TimeGAN'

    def __init__(self, opt, dataset):
        # todo 改为只有训练加载数据，即改成setter，不是初始化参数
        # 1. 数据处理
        # super(TimeGAN, self).__init__(opt, ori_data)
        self.seed(opt.manualseed)

        # Initalize variables. TODO 没有测试集啊
        self.opt = opt
        self.dataset = dataset
        self.train_data, self.scalar = dataset.load_train_data(), dataset.scalar
        self.opt.z_dim = self.train_data[0].shape[-1]
        self.ori_time, self.max_seq_len = extract_time(self.train_data)
        self.data_num, _, _ = np.asarray(self.train_data).shape  # 3661; 24; 6
        self.delta_type = opt.delta_type
        self.gan_type = opt.GAN_type
        self.device = torch.device("cuda:"+self.opt.gpu_id if self.opt.device != 'cpu' else "cpu")

        # -- Misc attributes
        self.epoch = 0
        self.times = []
        self.total_steps = 0

        # logger, 有时间戳，
        self.log_dir = self.get_log_dir(opt.run_id)
        self.logger = self.get_logger()

        # Create and initialize networks. 四个组件，encoder（embedding），decoder（recovery），g，d，
        # 2. 模型结构
        self.nete = Encoder(self.opt).to(self.device)
        self.netr = Recovery(self.opt).to(self.device)
        self.netg = Generator(self.opt).to(self.device)
        self.netd = Discriminator(self.opt).to(self.device)
        self.nets = Supervisor(self.opt).to(self.device)

        if self.opt.resume != '':  # 接着训练，如果不调用train，也可以是加载训练好的模型
            self.logger.info("\nLoading pre-trained networks.")
            if self.load():
                self.logger.info('load success')
            else:
                self.logger.info('load failed')

        # 3. forward计算
        #       pre_train_er_forward
        #       pre_train_s_forward
        #       joint_train_gs_forward
        #       joint_train_er_forward
        #       joint_train_d_forward

        # 4. loss
        self.l_mse = nn.MSELoss()
        self.l_r = nn.L1Loss()
        self.l_bce = nn.BCEWithLogitsLoss()  # 没经过sigmoid之前的logit，如果D由sigmoid就不能用这个交叉熵

        # Setup optimizer
        if self.opt.mode == 'train':
            if self.gan_type != "WGAN":
                self.optimizer_e = optim.Adam(self.nete.parameters(), lr=self.opt.lr, betas=(self.opt.beta1, 0.999))
                self.optimizer_r = optim.Adam(self.netr.parameters(), lr=self.opt.lr, betas=(self.opt.beta1, 0.999))
                self.optimizer_g = optim.Adam(self.netg.parameters(), lr=self.opt.lr, betas=(self.opt.beta1, 0.999))
                self.optimizer_d = optim.Adam(self.netd.parameters(), lr=self.opt.lr, betas=(self.opt.beta1, 0.999))
                self.optimizer_s = optim.Adam(self.nets.parameters(), lr=self.opt.lr, betas=(self.opt.beta1, 0.999))
            else:
                self.optimizer_e = optim.Adam(self.nete.parameters(), lr=self.opt.lr, betas=(self.opt.beta1, 0.999))
                self.optimizer_r = optim.Adam(self.netr.parameters(), lr=self.opt.lr, betas=(self.opt.beta1, 0.999))
                self.optimizer_g = optim.RMSprop(self.netg.parameters(), lr=self.opt.w_lr)
                self.optimizer_d = optim.RMSprop(self.netd.parameters(), lr=self.opt.w_lr)
                self.optimizer_s = optim.Adam(self.nets.parameters(), lr=self.opt.lr, betas=(self.opt.beta1, 0.999))

        self.logger.info(opt)
        self.logger.info(self.scalar.statistics)

    def get_log_dir(self, run_id=''):
        # 模型参数
        num_layers = self.opt.num_layer
        hidden_size = self.opt.hidden_dim
        # w_lambda = self.opt.w_lambda
        layer_size = self.opt.layer_size

        # 训练参数
        epoch = self.opt.iteration
        lr = self.opt.lr
        beta = self.opt.beta1

        # 数据参数
        seq_len = self.opt.seq_len
        batch_size = self.opt.batch_size

        # 超参数 gamma 在impute目录下
        w_eat = self.opt.w_eta
        w_lambda = self.opt.w_lambda2
        w_zeta = self.opt.w_zeta

        if run_id == '':
            # run_id = '{}_{}_{}_{}_{}_{}_{}_{}'.format(num_layers, hidden_size, epoch, lr, beta, seq_len, batch_size,
            #                                         time.strftime('%y%m%d%H%M%S'))
            # 不要时间戳，没用
            run_id = '{}_{}_{}_{}_{}_{}_{}_{}_{}_{}'.\
                format(num_layers, hidden_size, epoch, lr, beta, seq_len, batch_size,
                       w_eat, w_lambda, w_zeta)
        dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
        if self.opt.data_name == 'water':
            masked_indicator = self.opt.masked_indicator if self.opt.masked_indicator != '' else 'all'
            log_dir = os.path.join(dir, self.opt.output_dir, self.opt.GAN_type, self.opt.gru_type,
                                   run_id)
        else:
            log_dir = os.path.join(dir, self.opt.output_dir, self.opt.data_name, self.opt.module, run_id)
        if not os.path.exists(log_dir):
            os.makedirs(log_dir)
        return log_dir

    def get_logger(self, log_type='train'):
        # opt.name = model + data_name
        log_dir = os.path.join(self.log_dir)
        if not os.path.exists(log_dir):
            os.makedirs(log_dir)
        return get_logger(log_dir, log_type, log_filename=log_type+'.log')

    def seed(self, seed_value):
        # Check if seed is default value
        if seed_value == -1:
            return

        # Otherwise seed all functionality
        import random
        random.seed(seed_value)
        torch.manual_seed(seed_value)
        torch.cuda.manual_seed_all(seed_value)
        np.random.seed(seed_value)
        torch.backends.cudnn.deterministic = True

    def set_train_mode(self, train):
        # 没有说，用到某一部分，需要其他部分输出但是不更新的。所以可以同时设置状态
        #   联合训练这几个组件都需要更新
        #   预训练，用不到其他部分
        if train:
            self.nete.train()
            self.netr.train()
            self.nets.train()
            self.netg.train()
            self.netd.train()
        else:
            self.nete.train(False)
            self.netr.train(False)
            self.nets.train(False)
            self.netg.train(False)
            self.netd.train(False)

    def train(self):
        '''
        先加载模型，如果没有训练好的在训练，然后评价，如果有训练好的不用评价，可以单独暴露一个函数用作评价新的数据集
        Returns:

        '''
        if self.opt.is_load == 'True':
            if self.load():
                self.logger.info('load success')
                return
            else:
                self.logger.info('load failed, start train')

        self.set_train_mode(True)
        # 1. 训练EncoderDecoder网络，通过重构误差
        self.logger.info('Start Embedding Network Training')
        self.logger.info('============================================================================')
        for itt in range(self.opt.iteration):  # todo 这里的iteration是每个batch的次数，不是遍历一次完整数据集
            # Train for one iter
            er_loss = self.pre_train_one_iter_er()
            if (itt % self.opt.log_interval == 0):
                msg = 'Encoder training step: {}/{}    er_loss: {:.4f}'.format(itt, self.opt.iteration, math.sqrt(er_loss))
                self.logger.info(msg)
        self.logger.info('Finish Embedding Network Training')
        self.logger.info('============================================================================')

        # 2. Training only with supervised loss
        #  这里只训练了S，没有训练G，因为L_S和G没有关系（不是supervisor网络是L_S损失函数）
        self.logger.info('Start Training with Supervised Loss Only')
        self.logger.info('============================================================================')
        for itt in range(self.opt.iteration):
            # Train for one iter
            s_loss = self.pre_train_one_iter_s()  # 就是G_loss_S
            if (itt % self.opt.log_interval == 0):
                msg = 'Superviser training step: {}/{}    s_loss: {:.4f}'.format(itt, self.opt.iteration, math.sqrt(s_loss))
                self.logger.info(msg)

        self.logger.info('Finish Training with Supervised Loss Only')
        self.logger.info('============================================================================')

        # 3. Joint Training 对抗训练
        self.logger.info('Start Joint Training')
        self.logger.info('============================================================================')
        for itt in range(self.opt.iteration):
            self.epoch = itt
            # Train for one iter，
            if self.gan_type != "WGAN":
                # 一次迭代更新两次G一次D
                for kk in range(2):
                    # 训练G, 更新G和S  TODO 为啥更新S呢
                    # TODO 我的目的是让S(G)输入g时网络行为与S(E)输入真实数据接近，又两种实现，1.不改动S(E), 让S(G)接近，2.同时改动让二者接近
                    #
                    g_loss_fake, jonit_g_ar, g_loss_distance = self.joint_train_one_iter_gs()
                    # 训练embedder， 更新Encoder和decoder，supervisor
                    # G和AR整体被看成生成器，因此调整生成器时要调整G和AR。调整后AR就变了可能没有指导意义了，因此还需要用真实数据在调整一下。参考professor-forcing，同样的数据以teacher-forcing模式时decoder的行为，就是新的指导。每次有你(TF)的指导让我(FR)变得更好。每次TF方式就是指导，在本方法中AR就是指导，但是他是变化的，TF是固定的。
                    joint_er_loss, joint_er_s = self.joint_train_one_iter_er()  #
                d_loss = self.joint_train_one_iter_d()  # 就是D_loss
            else:
                for kk in range(self.opt.w_d_iter):
                    d_loss = self.joint_train_one_iter_d()  # 就是D_loss
                # 训练G, 更新G和S  TODO 为啥更新S呢
                g_loss_fake, jonit_g_ar, g_loss_distance = self.joint_train_one_iter_gs()
                # 训练embedder， 更新Encoder和decoder
                # TODO 我的目的是让S(G)输入g时网络行为与S(E)输入真实数据接近，又两种实现，1.不改动S(E), 让S(G)接近，2.同时改动让二者接近
                joint_er_loss, joint_er_s = self.joint_train_one_iter_er()  #

            if itt % self.opt.log_interval == 0:
                msg = 'Joint training step: {}/{}    d_loss: {:.4f}     joint_g_fake: {:.4f}    joint_g_ar:{:.4f}    ' \
                      'joint_g_distance:{:.4f}      joint_er_res_loss:{:.4f}  joint_er_ar:{:.4f}' \
                    .format(itt, self.opt.iteration, d_loss,
                            g_loss_fake, math.sqrt(jonit_g_ar), g_loss_distance,
                            math.sqrt(joint_er_loss), joint_er_s)
                self.logger.info(msg)
                self.save(itt)
        self.save()  # 0号

        self.logger.info('Finish Joint Training')
        self.logger.info('============================================================================')

        self.set_train_mode(False)  # 关闭训练模式

        self.logger.info('Start Synthetic Data Generation')
        self.logger.info('============================================================================')
        generated_data = self.generation()  # ndarray
        self.logger.info('Finish Synthetic Data Generation')
        self.logger.info('============================================================================')

        self.logger.info('Mertic:')
        self.logger.info('============================================================================')
        # 这里不能用训练集，应该用完整数据集，因为如果是训练集可能是有缺失的， 用来预测和分类不对，分类任务直接0多就是真了，而且是否shuffle无关
        ori_data = self.dataset.load_ori_data()
        metric_results = self.evaluation(ori_data, generated_data)
        self.logger.info('discriminative:{:.4f},predictive:{:.4f}'
                         .format(metric_results['discriminative'], metric_results['predictive']))

    def evaluation(self, ori_data, generated_data):
        # Output initialization
        metric_results = dict()

        # 1. Discriminative Score
        discriminative_score = list()
        for _ in range(self.opt.metric_iteration):
            temp_disc = discriminative_score_metrics(ori_data, generated_data, self.device, self.log_dir, self.opt.dis_iteration)
            discriminative_score.append(temp_disc)
        # 去掉两个最大值，两个最小值取平均
        discriminative_score = np.sort(discriminative_score)
        metric_results['discriminative'] = np.mean(discriminative_score[2:-2])

        # 2. Predictive score
        predictive_score = list()
        for tt in range(self.opt.metric_iteration):
            plot_pred = True if tt == 0 else False
            temp_pred = predictive_score_metrics(ori_data, generated_data, self.device, self.log_dir, plot_pred)
            predictive_score.append(temp_pred.reshape(1)[0])
        predictive_score = np.sort(predictive_score)
        metric_results['predictive'] = np.mean(predictive_score[2:-1])
        self.logger.info({"dis": discriminative_score, "pred": predictive_score})
        # 3. Visualization (PCA and tSNE)
        visualization(ori_data, generated_data, 'pca', self.log_dir)
        visualization(ori_data, generated_data, 'tsne', self.log_dir)

        return metric_results

    def generation(self):
        # Synthetic data generation
        X, _ = batch_generator(self.train_data, self.ori_time, self.data_num)
        # Z0 = random_generator(self.data_num, self.opt.z_dim, T, self.max_seq_len)
        # Z = torch.tensor(Z0, dtype=torch.float32).to(self.device)
        # z_delta = torch.tensor(get_delta_pre(Z0, self.delta_type), dtype=torch.float32).to(self.device) if self.opt.gru_type == 'grui' else None
        Z = torch.tensor(X, dtype=torch.float32).to(self.device)
        z_delta = torch.tensor(get_delta_pre(X, self.delta_type), dtype=torch.float32).to(self.device) if self.opt.gru_type == 'grui' else None

        with torch.no_grad():
            E_hat = self.netg(Z, z_delta)
            H_hat = self.nets(E_hat, z_delta)
            X_hat = self.netr(H_hat, z_delta)
            generated_data_curr = X_hat

        generated_data = list()

        for i in range(self.data_num):
            temp = generated_data_curr[i, :self.ori_time[i], :]
            generated_data.append(temp.detach().cpu().numpy())

        # Renormalization  不要反归一化,后面评价还要训练呢
        # generated_data = generated_data * self.max_val
        # generated_data = generated_data + self.min_val

        return generated_data  # ndarray

    # forward结尾的函数是前向传播+计算loss
    # iter_X结尾的是一次迭代（更新参数）
    def pre_train_er_forward(self, X, x_delta):
        M = (X != 0).int()
        H = self.nete(X, x_delta)
        X_tilde = self.netr(H, x_delta)
        E_loss_T0 = self.l_mse(X, torch.multiply(M, X_tilde))  # 只计算有效值的重构误差,x不用乘M，因为缺失x是0，有效M是1，
        E_loss0 = 10 * torch.sqrt(E_loss_T0)
        return E_loss0, E_loss_T0

    def joint_train_er_forward(self, X, x_delta):
        '''
        注意和与预训练ED网络不同，他还有L_S,这里没有更新S网络，但是需要L_s来评价E是否捕获时间依赖，因此需要L_s指导E
        联合训练 min(λL_S+L_R) λ=0.1
        '''
        M = (X != 0).int()
        # 1. 前向传播
        H = self.nete(X, x_delta)
        X_tilde = self.netr(H, x_delta)
        H_supervise = self.nets(H, x_delta)

        # 2. 计算损失
        G_loss_S = self.l_mse(H[:, 1:, :], H_supervise[:, :-1, :])
        E_loss_T0 = self.l_mse(X, torch.multiply(X_tilde, M))
        # E_loss0 = 10 * torch.sqrt(E_loss_T0)
        # E_loss = E_loss0 + 0.1 * G_loss_S  #  作为一个超参数
        E_loss0 = torch.sqrt(E_loss_T0)
        E_loss = E_loss0 + self.opt.w_lambda2 * G_loss_S  # lambda默认100，g_loss太小？
        return E_loss, E_loss_T0, G_loss_S

    def joint_train_d_forward(self, X, Z, x_delta, z_delta):
        # 1. 前向传播
        H = self.nete(X, x_delta)
        Y_real = self.netd(H, x_delta)

        E_hat = self.netg(X, x_delta)
        Y_fake_e = self.netd(E_hat, x_delta)

        H_hat = self.nets(E_hat, x_delta)
        Y_fake = self.netd(H_hat, x_delta)

        # 2. 计算损失
        # Discriminator loss，三部分
        if self.gan_type != "WGAN":
            D_loss_real = self.l_bce(Y_real, torch.ones_like(Y_real))
            D_loss_fake = self.l_bce(Y_fake, torch.zeros_like(Y_fake))
            D_loss_fake_e = self.l_bce(Y_fake_e, torch.zeros_like(Y_fake_e))
            # D_loss = D_loss_real + D_loss_fake + D_loss_fake_e * self.opt.w_gamma  # 这样相当于根据根据G调整了两次，根据真实数据调整了1次
            D_loss = 0.5*D_loss_real + 0.5*D_loss_fake + D_loss_fake_e
        else:
            D_loss = 0.5 * torch.mean(Y_fake) + 0.5 * torch.mean(Y_fake_e) - torch.mean(Y_real)
        return D_loss

    def joint_train_g_forward(self, X, Z, x_delta, z_delta):
        '''
        min(ηL_S + max L_U)   更新G的时候也要更新S，G_loss里面有G_loss_S
        解释：这里应该是避免S被带坏吧，如果没有G_loss_S，那么更新G，G的输出作为S输入，更新G使得让S输出一些能够欺骗D的，而不是时间依赖
        G_solver = tf.train.AdamOptimizer().minimize(G_loss, var_list=g_vars + s_vars)
        '''

        M = (X != 0).int()
        # 1. 前向传播
        # Generator loss 分为3部分，
        # 1. Adversarial loss，GAN部分的损失
        # 前向传播
        E_hat = self.netg(X, x_delta)
        Y_fake_e = self.netd(E_hat, x_delta)
        H_hat = self.nets(E_hat, x_delta)
        Y_fake = self.netd(H_hat, x_delta)
        # 计算损失,让生成的数据分类为1（真实数据）
        G_loss_U = self.l_bce(Y_fake, torch.ones_like(Y_fake))  # 交叉熵
        G_loss_U_e = self.l_bce(Y_fake_e, torch.ones_like(Y_fake_e))

        # 2. Supervised loss
        # 前向传播
        H = self.nete(X, x_delta)
        H_supervise = self.nets(H, x_delta)
        # 计算损失
        G_loss_S = self.l_mse(H[:, 1:, :], H_supervise[:, :-1, :])

        # 3. Two Momments，让真实数据指导一下G
        # 前向传播
        X_hat = self.netr(E_hat, x_delta)  # 不要经过s的，target是原数据不是经过s的.
        # X_hat = self.netr(H_hat, x_delta)
        # #  将x_hat加上噪音然后添加到训练集中, 只有G训练三分之一迭代后，才加入训练集
        # if self.opt.is_parallel == 'True' and self.epoch > self.opt.iteration/3:
        #     x_add_noise = wgn(X_hat.detach().cpu().numpy(), snr=self.opt.snr)
        #     x_add_noise = x_add_noise * M.detach().cpu().numpy()
        #     self.train_data = np.concatenate([self.train_data, x_add_noise], axis=0)
        # 计算损失,用真实数据指导G，让他们具有相同的均值和标准差，注意这里用的不是重构误差，而是均值和标准差，因为生成数据和真实数据不一定是接近的，而是分布接近
        X_hat = torch.multiply(X_hat, M)  #不能mask 如果mask那么G直接恒等映射岂不是距离为0？所以生成的数据不应该Mask
        X = torch.multiply(X, M)
        # # |std(a)-std(b)|  torch.var(X_hat, [0]) [seq_len, feature_dim]，每个时间点，每个维度的均值和方差，样本量的bsz
        # G_loss_V1 = torch.mean(torch.abs(torch.sqrt(torch.var(X_hat, [0]) + 1e-6) - torch.sqrt(torch.var(X, [0]) + 1e-6)))
        # # |mean(a) - mean(b)|
        # G_loss_V2 = torch.mean(torch.abs(torch.mean(X_hat, [0]) - torch.mean(X, [0])))
        # G_loss_V = G_loss_V1 + G_loss_V2
        G_loss_V = torch.sqrt(self.l_mse(X, X_hat))

        # 4. Summation lambda  w_gamma=1
        if self.opt.GAN_type != 'WGAN':
            # G_loss = G_loss_U + self.opt.w_gamma * G_loss_U_e + 100 * torch.sqrt(G_loss_S) + 100 * G_loss_V  #  作为超参数 100 会不会太高了？
            G_loss = 0.5*G_loss_U + 0.5*G_loss_U_e + self.opt.w_eta * torch.sqrt(G_loss_S) + self.opt.w_zeta * G_loss_V
        else:
            G_loss = -0.5 * (torch.mean(Y_fake_e) + torch.mean(Y_fake_e)) + self.opt.w_eta * torch.sqrt(
                G_loss_S) + self.opt.w_zeta * G_loss_V
        return G_loss, G_loss_U, G_loss_S, G_loss_V

    def pre_train_s_forward(self, X, x_delta):
        # 1. 前向传播
        M = (X!=0).int()
        H = self.nete(X, x_delta)
        H_supervise = self.nets(H, x_delta)

        # 2. 计算损失
        # pred = H_supervise[:, :-1, :]
        # todo 这里有问题，因为M是[bsz, seq_len, feature_size]，而pred是[bsz,seq_len,layer_size]，这个影响不大不要了
        # pred = torch.multiply(pred, M[:, 1:, -1:])
        # target = H[:, 1:, :]
        # target = torch.multiply(target, M[:, 1:, -1:])
        # G_loss_S = self.l_mse(pred, target)
        G_loss_S = self.l_mse(H[:, 1:, :], H_supervise[:, :-1, :])

        return G_loss_S

    def pre_train_one_iter_er(self):
        """ Train the model for one epoch.
        """
        # 1. 获取输入 mini-batch
        X0, _ = batch_generator(self.train_data, self.ori_time, self.opt.batch_size)
        X = torch.tensor(X0, dtype=torch.float32).to(self.device)  # [bsz, seq_len, feature_dim]
        x_delta = torch.tensor(get_delta_pre(X0, self.delta_type), dtype=torch.float32).to(self.device) if self.opt.gru_type == 'grui' else None

        # 2. 前向传播
        # 3. 计算损失函数
        E_loss0, E_loss_T0 = self.pre_train_er_forward(X, x_delta)

        # 4.backward
        self.optimizer_e.zero_grad()
        self.optimizer_r.zero_grad()
        E_loss0.backward(retain_graph=True)

        # 5.梯度下降，更新参数
        self.optimizer_e.step()
        self.optimizer_r.step()

        return float(E_loss_T0.detach().cpu().numpy())

    def pre_train_one_iter_s(self):
        """ Train the model for one epoch.
        """
        # 1. 获取输入
        X0, _ = batch_generator(self.train_data, self.ori_time, self.opt.batch_size)
        X = torch.tensor(X0, dtype=torch.float32).to(self.device)
        x_delta = torch.tensor(get_delta_pre(X0, self.delta_type), dtype=torch.float32).to(self.device) if self.opt.gru_type == 'grui' else None
        # 2. 前向传播
        # 3. 计算loss
        G_loss_S = self.pre_train_s_forward(X, x_delta)

        # 4. Backward
        self.optimizer_s.zero_grad()
        G_loss_S.backward(retain_graph=True)

        # 5. 更新参数
        self.optimizer_s.step()

        return float(G_loss_S.detach().cpu().numpy())

    def joint_train_one_iter_gs(self):
        # 1. 获取输入，随机取一个batch
        X0, _ = batch_generator(self.train_data, self.ori_time, self.opt.batch_size)
        X = torch.tensor(X0, dtype=torch.float32).to(self.device)
        x_delta = torch.tensor(get_delta_pre(X0, self.delta_type), dtype=torch.float32).to(self.device) if self.opt.gru_type == 'grui' else None

        # Z0 = random_generator(self.opt.batch_size, self.opt.z_dim, T, self.max_seq_len)
        # Z = torch.tensor(Z0, dtype=torch.float32).to(self.device)
        # z_delta = torch.tensor(get_delta_pre(Z0, self.delta_type), dtype=torch.float32).to(self.device) if self.opt.gru_type == 'grui' else None
        Z, z_delta = None, None  # 不用随机向量作为输入了

        # 2. 前向传播
        # 3. 计算loss
        G_loss, g_loss_fake, jonit_g_ar, g_loss_distance = self.joint_train_g_forward(X, Z, x_delta, z_delta)

        # 4. backward
        self.optimizer_g.zero_grad()
        self.optimizer_s.zero_grad()
        G_loss.backward(retain_graph=True)

        # 5. 更新参数
        self.optimizer_g.step()
        self.optimizer_s.step()

        return float(g_loss_fake.detach().cpu().numpy()), float(jonit_g_ar.detach().cpu().numpy()), \
               float(g_loss_distance.detach().cpu().numpy())

    def joint_train_one_iter_er(self):
        # 1. 获取输入
        X0, _ = batch_generator(self.train_data, self.ori_time, self.opt.batch_size)
        x_delta = torch.tensor(get_delta_pre(X0, self.delta_type), dtype=torch.float32).to(self.device) if self.opt.gru_type == 'grui' else None
        X = torch.tensor(X0, dtype=torch.float32).to(self.device)

        # 2. 前向传播
        # 3. 计算loss
        E_loss, E_loss_T0, joint_er_s = self.joint_train_er_forward(X, x_delta)

        # 4.backward
        self.optimizer_e.zero_grad()
        self.optimizer_r.zero_grad()
        E_loss.backward(retain_graph=True)
        # 5.更新参数
        self.optimizer_e.step()
        self.optimizer_r.step()
        # 更新s TODO 暂时不要
        # g_loss = self.pre_train_s_forward(X, x_delta)
        # self.optimizer_s.zero_grad()
        # g_loss.backward(retain_graph=True)
        # self.optimizer_s.step()  # 应该也更新s，让AR一直都有指导意义，避免因为更新G导致S代表概率分布被破坏

        return float(E_loss_T0.detach().cpu().numpy()), float(joint_er_s.detach().cpu().numpy())

    def joint_train_one_iter_d(self):
        # 1. 获取输入
        X0, _ = batch_generator(self.train_data, self.ori_time, self.opt.batch_size)
        X = torch.tensor(X0, dtype=torch.float32).to(self.device)
        x_delta = torch.tensor(get_delta_pre(X0, self.delta_type), dtype=torch.float32).to(self.device) if self.opt.gru_type == 'grui' else None

        # Z0 = random_generator(self.opt.batch_size, self.opt.z_dim, T, self.max_seq_len)
        # Z = torch.tensor(Z0, dtype=torch.float32).to(self.device)
        # z_delta = torch.tensor(get_delta_pre(Z0, self.delta_type), dtype=torch.float32).to(self.device) if self.opt.gru_type == 'grui' else None
        Z, z_delta = None, None

        # 2. 前向传播
        # 3. 计算loss
        D_loss = self.joint_train_d_forward(X, Z, x_delta, z_delta)
        if self.gan_type != "WGAN":
            if float(D_loss.detach().cpu().numpy()) > 0.15:  # d不能训练太好
                # 4. backward
                self.optimizer_d.zero_grad()
                D_loss.backward(retain_graph=True)

                # 5. 更新参数
                self.optimizer_d.step()
        else:
            self.optimizer_d.zero_grad()
            D_loss.backward(retain_graph=True)

            # 5. 更新参数
            self.optimizer_d.step()
            for p in self.netd.parameters():  # 截断判别器的参数
                p.data.clamp_(-self.opt.w_clip_c, self.opt.w_clip_c)
        return float(D_loss.detach().cpu().numpy())

    def save(self, epoch=-1):
        # -1表示最后一个epoch得
        model_dir = os.path.join(self.log_dir, 'saved_model')
        if not os.path.exists(model_dir):
            os.makedirs(model_dir)
        model_path = os.path.join(model_dir, '{}.pth'.format(epoch))
        torch.save({
            # 'min': self.min_val,
            # 'max': self.max_val,
            'nete': self.nete.state_dict(),
            'netr': self.netr.state_dict(),
            'netg': self.netg.state_dict(),
            'netd': self.netd.state_dict(),
            'nets': self.nets.state_dict()
        }, model_path)

    def load(self, model_path=None):
        if model_path is None:
            model_path = os.path.join(self.log_dir, 'saved_model', '-1.pth')
        try:
            checkpoint = torch.load(model_path)
            # 加载min，max，用于直接生成数据，todo 暂时没啥用，因为需要插值，所以一定会加载数据，后面可能实际应用需要对一个序列插值，而不是整个数据集
            # self.min_val = checkpoint['min']
            # self.max_val = checkpoint['max']
            # 加载模型
            self.nete.load_state_dict(checkpoint['nete'])
            self.netr.load_state_dict(checkpoint['netr'])
            self.netg.load_state_dict(checkpoint['netg'])
            self.netd.load_state_dict(checkpoint['netd'])
            self.nets.load_state_dict(checkpoint['nets'])
            return True
        except Exception as e:
            self.logger.error(e)
            return False

    def imputation(self, masked_data):
        masked_indicator = self.opt.masked_indicator if self.opt.masked_indicator != '' else 'all'
        impute_id = '{}_{}_{}'.format(masked_indicator, self.opt.impute_iter, self.opt.w_lambda)
        self.log_dir = os.path.join(self.log_dir, 'masked_ratio-'+str(self.opt.masked_ratio), impute_id)
        ori_data = self.dataset.load_ori_data()
        impute_logger = self.get_logger(log_type='impute')
        self.logger = impute_logger
        imputed_data = []

        batch_id = 1
        for x in next_batch(masked_data, self.opt.batch_size):  # 每个batch都插值,masked_data没有被shuffle
            # 1.获取输入, x，z，每个batch都重新取一个z
            neti = Imputation(self.opt, x)
            # TODO 生成数据是不是要当成完整数据啊
            z_delta = torch.tensor(get_delta_pre(x, self.delta_type), dtype=torch.float32).to(
                self.device) if self.opt.gru_type == 'grui' else None

            x = torch.tensor(x, dtype=torch.float32).to(self.device)  # [bsz, seq_len, feature_dim]
            m = (x != 0).int()
            if self.gan_type != "WGAN":
                optimizer_i = optim.Adam(neti.parameters(), lr=1*self.opt.lr, betas=(self.opt.beta1, 0.999))
            else:
                optimizer_i = optim.RMSprop(neti.parameters(), lr=self.opt.w_lr)
            self.set_train_mode(True)  # 不开启train，不能backward
            # z = neti().to(self.device)  # 获取更新后的z
            # z_delta = torch.tensor(get_delta_pre(z.detach().cpu().numpy(), self.delta_type),
            #                        dtype=torch.float32).to(self.device) if self.opt.gru_type == 'grui' else None
            # 调整z，
            for i in range(self.opt.impute_iter):
                # 2.前向传播 impute_out = G(z)
                # 生成数据
                X_hat, Y_fake_e, Y_fake = self.impute_forward(neti, z_delta)

                # 3.计算loss
                # Limpute = Lr + λLd
                # 重构误差
                loss_rec = self.l_mse(torch.multiply(X_hat, m), x)  # x就不用乘以m了,因为本身就是0再乘0也没用
                # 判别器损失函数，调整z让输出看起来更像是真实值
                if self.gan_type != "WGAN":
                    D_loss_fake = self.l_bce(Y_fake, torch.ones_like(Y_fake))
                    D_loss_fake_e = self.l_bce(Y_fake_e, torch.ones_like(Y_fake_e))
                    dis_loss = 0.5 * (D_loss_fake + D_loss_fake_e)
                else:
                    dis_loss = -0.5 * (torch.mean(Y_fake) + torch.mean(Y_fake_e))
                # TODO 跟这里无关，w_lambda=0
                impute_loss = loss_rec + self.opt.w_lambda*dis_loss

                # 4.反向传播,
                optimizer_i.zero_grad()
                impute_loss.backward()

                # 5.更新参数,更新z
                optimizer_i.step()

                if (i+1) % self.opt.log_interval == 0:
                    impute_logger.info('imputing: batch_id:{} {}/{} impute_loss:{:.4f}    reconstruction_loss:{:.4f}   discrimination_loss:{:.4f}'
                                       .format(batch_id, i+1, self.opt.impute_iter, impute_loss.item(), loss_rec.item(), dis_loss.item()))
            self.set_train_mode(False)  # todo 这个关不关啊，可以测试一下
            X_hat, _, _ = self.impute_forward(neti, z_delta)

            imputed_out = torch.multiply((1-m), X_hat) + x  # [bsz, seq_len, feature_dim]
            imputed_data.append(imputed_out)

            batch_id += 1
        imputed_data = torch.cat(imputed_data, 0).detach().cpu().numpy()  # [n_samples, seq_len, feature_dim]

        impute_logger.info('Mertic:')
        impute_logger.info('============================================================================')
        ori_data = ori_data[:len(imputed_data)]  # 这里ori_data是无缺失值的,只要n_samples个
        metric_results = self.evaluation(ori_data, imputed_data)
        mae_score = mae_score_metrics(ori_data, imputed_data)
        metric_results['mae'] = mae_score
        impute_logger.info('discriminative:{:.4f},predictive:{:.4f},mae:{:.4f}'
                           .format(metric_results['discriminative'],metric_results['predictive'],metric_results['mae']))

        imputed_data = imputed_data[:, 0, :]  # [n_sample, feautre_dim]
        imputed_data = self.scalar.renormalize(imputed_data)
        self.save_csv(imputed_data, 'imputed_data')

        return imputed_data

    def impute_forward(self, neti, z_delta):
        z = neti().to(self.device)  # 获取更新后的z
        # z_delta =
        # 生成数据 todo 这里经过了s
        E_hat = self.netg(z, z_delta)
        H_hat = self.nets(E_hat, z_delta)
        # X_hat = self.netr(H_hat, z_delta)
        X_hat = self.netr(E_hat, z_delta)
        Y_fake_e = self.netd(E_hat, z_delta)
        Y_fake = self.netd(H_hat, z_delta)
        return X_hat, Y_fake_e, Y_fake

    def save_csv(self, data, file_name):
        data_path = os.path.join(self.log_dir, file_name+'.csv')
        df = pd.DataFrame(data)
        df.to_csv(data_path, index=False)
