import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import random
# from kgdlg.Model import NMTModel,similar_PriorNet,similar_RecogNet,MultiGen,diverse_PriorNet,diverse_RecogNet
class diverse_PriorNet(nn.Module):
    def __init__(self, encoder):
        super(diverse_PriorNet, self).__init__()
        self.encoder = encoder


        # 这个线性函数，输入的是上文xencoder后的隐层状态值，预测当前的u
        self.mean_linear = nn.Linear(self.encoder.hidden_size * 2, self.encoder.hidden_size)
        # 这个线性函数，输入的是上文xencoder后的隐层状态值，预测当前的方差
        self.variance_linear = nn.Linear(self.encoder.hidden_size * 2, self.encoder.hidden_size)

    def encode(self, inputs, lengths):

        # print("借鉴一下对prior_X的encoder的网络,",self.encoder)
        # print("借鉴一下对prior_X的输入,以及此时的length", inputs.size(),lengths)
        order_inputs, sorted_length, sorted_idx = self.order_sequence(inputs, lengths)
        enc_outputs, enc_hidden = self.encoder(order_inputs, sorted_length, None)
        enc_hidden = self.reorder_enc_seqence(enc_hidden, sorted_idx)
        enc_outputs = self.reorder_enc_seqence(enc_outputs, sorted_idx)
        return enc_outputs, enc_hidden

    def order_sequence(self, inputs, lengths):
        idx_len_pair = []
        for i, l in enumerate(lengths):
            idx_len_pair.append((i, l))
        sorted_idx_len_pair = sorted(idx_len_pair, key=lambda x: x[1], reverse=True)
        sorted_idx = []
        sorted_length = []
        for x in sorted_idx_len_pair:
            sorted_idx.append(x[0])
            sorted_length.append(x[1])

        order_inputs = inputs[:, sorted_idx]
        return order_inputs, sorted_length, sorted_idx

    def reorder_enc_seqence(self, inputs, sorted_idx):
        raw_sorted_pair = []
        for raw_idx, sorted_idx in enumerate(sorted_idx):
            raw_sorted_pair.append((raw_idx, sorted_idx))
        sorted_pair = sorted(raw_sorted_pair, key=lambda x: x[1], reverse=False)
        raw_indices = [x[0] for x in sorted_pair]
        reorder_inputs = inputs[:, raw_indices, :]
        return reorder_inputs

    def save_checkpoint(self, epoch, opt, filename):
        torch.save({'model_dict': self.state_dict(),
                    'opt': opt,
                    'epoch': epoch,
                    },
                   filename)

    def load_checkpoint(self, filename):
        ckpt = torch.load(filename, map_location=lambda storage, loc: storage)
        self.load_state_dict(ckpt['model_dict'])
        epoch = ckpt['epoch']
        return epoch

    def gaussian_encoder(self, src_inputs, lengths):
        # print("进入diverse的prior的高斯预测部分")
        enc_outputs, enc_hidden = self.encode(src_inputs, lengths)
        # print("对x进行encode完毕")
        mean = self.mean_linear(enc_hidden)
        variance = self.variance_linear(enc_hidden)
        # print("diverse_prior部分的 均值和方差预测完毕：")
        return mean, variance

class diverse_RecogNet(nn.Module):
    def __init__(self, src_encoder, tgt_encoder):
        super(diverse_RecogNet, self).__init__()
        self.src_encoder = src_encoder
        self.tgt_encoder = tgt_encoder

        # 目前不确定是不是*3，因为y是一组
        self.mean_linear = nn.Linear(self.src_encoder.hidden_size * 2,
                                     self.src_encoder.hidden_size)
        self.variance_linear = nn.Linear(self.src_encoder.hidden_size * 2,
                                         self.src_encoder.hidden_size)

    def tgt_encode(self, inputs, lengths):
        # print("当前的tgtencoder网络有什么问题吗，" ,self.tgt_encoder)
        order_inputs, sorted_length, sorted_idx = self.order_sequence(inputs, lengths)
        enc_outputs, enc_hidden = self.tgt_encoder(order_inputs, sorted_length, None)
        enc_hidden = self.reorder_enc_seqence(enc_hidden, sorted_idx)
        enc_outputs = self.reorder_enc_seqence(enc_outputs, sorted_idx)

        return enc_outputs, enc_hidden

    def order_sequence(self, inputs, lengths):
        idx_len_pair = []
        for i, l in enumerate(lengths):
            idx_len_pair.append((i, l))
        sorted_idx_len_pair = sorted(idx_len_pair, key=lambda x: x[1], reverse=True)
        sorted_idx = []
        sorted_length = []
        for x in sorted_idx_len_pair:
            sorted_idx.append(x[0])
            sorted_length.append(x[1])

        order_inputs = inputs[:, sorted_idx]
        return order_inputs, sorted_length, sorted_idx

    def reorder_enc_seqence(self, inputs, sorted_idx):
        raw_sorted_pair = []
        for raw_idx, sorted_idx in enumerate(sorted_idx):
            raw_sorted_pair.append((raw_idx, sorted_idx))
        sorted_pair = sorted(raw_sorted_pair, key=lambda x: x[1], reverse=False)
        raw_indices = [x[0] for x in sorted_pair]
        reorder_inputs = inputs[:, raw_indices, :]
        return reorder_inputs

    def save_checkpoint(self, epoch, opt, filename):
        torch.save({'model_dict': self.state_dict(),
                    'opt': opt,
                    'epoch': epoch,
                    },
                   filename)

    def load_checkpoint(self, filename):
        ckpt = torch.load(filename, map_location=lambda storage, loc: storage)
        self.load_state_dict(ckpt['model_dict'])
        epoch = ckpt['epoch']
        return epoch

    def gaussian_encoder(self, inputs_list, tgt_legnths):
        y_mean_diverse_list = []
        y_variance_diverse_list = []
        for input_i in range(len(inputs_list)):
            # 首先由神经网络得到隐层状态值
            enc_outputs, enc_hidden =  self.tgt_encode(inputs_list[input_i], tgt_legnths[input_i])
            # 接着进行方差和均值偏差的预测
            mean_error = self.mean_linear(enc_hidden)
            variance_error = self.variance_linear(enc_hidden)

            y_mean_diverse_list.append(mean_error)
            y_variance_diverse_list.append(variance_error)
        return y_mean_diverse_list, y_variance_diverse_list


# 与diverse_RecogNet不同的只有，这个网络的输入是x
class similar_PriorNet(nn.Module):
    def __init__(self, encoder):
        super(similar_PriorNet, self).__init__()
        self.encoder = encoder
        # 这个线性函数，输入的是上文xencoder后的隐层状态值，预测当前的u
        self.mean_linear = nn.Linear(self.encoder.hidden_size*2,self.encoder.hidden_size)
        # 这个线性函数，输入的是上文xencoder后的隐层状态值，预测当前的方差
        self.variance_linear = nn.Linear(self.encoder.hidden_size*2,self.encoder.hidden_size)
    def encode(self, inputs, lengths):
        order_inputs, sorted_length, sorted_idx =self.order_sequence(inputs,lengths)
        enc_outputs, enc_hidden = self.encoder(order_inputs, sorted_length, None)
        enc_hidden = self.reorder_enc_seqence(enc_hidden,sorted_idx)
        enc_outputs = self.reorder_enc_seqence(enc_outputs,sorted_idx)
        return enc_outputs, enc_hidden
    def order_sequence(self, inputs, lengths):
        idx_len_pair = []
        for i,l in enumerate(lengths):
            idx_len_pair.append((i,l))
        sorted_idx_len_pair=sorted(idx_len_pair,key=lambda x:x[1],reverse=True)
        sorted_idx = []
        sorted_length = []
        for x in sorted_idx_len_pair:
            sorted_idx.append(x[0])
            sorted_length.append(x[1])

        order_inputs = inputs[:,sorted_idx]
        return order_inputs, sorted_length, sorted_idx
    def reorder_enc_seqence(self, inputs, sorted_idx):
        raw_sorted_pair = []
        for raw_idx,sorted_idx in enumerate(sorted_idx):
            raw_sorted_pair.append((raw_idx,sorted_idx))
        sorted_pair=sorted(raw_sorted_pair,key=lambda x:x[1],reverse=False)
        raw_indices = [x[0] for x in sorted_pair]
        reorder_inputs = inputs[:,raw_indices,:]
        return reorder_inputs
    def save_checkpoint(self, epoch, opt, filename):
        torch.save({'model_dict': self.state_dict(),
                    'opt': opt,
                    'epoch': epoch,
                    },
                   filename)
    def load_checkpoint(self, filename):   
        ckpt = torch.load(filename,map_location=lambda storage, loc: storage)
        self.load_state_dict(ckpt['model_dict'])
        epoch = ckpt['epoch']
        return epoch
    def gaussian_encoder(self,src_inputs, lengths):
        # print("正在similar的prior的部分之中按理应该跟diversity prior的过程一致")
        enc_outputs, enc_hidden = self.encode(src_inputs, lengths)
        mean = self.mean_linear(enc_hidden)
        variance = self.variance_linear(enc_hidden)
        # print("已经顺利得到了simialr的prior的均值和方差，尺寸为")
        return mean,variance

class similar_RecogNet(nn.Module):
    def __init__(self, src_encoder, tgt_encoder):
        super(similar_RecogNet, self).__init__()
        self.src_encoder = src_encoder
        self.tgt_encoder = tgt_encoder

        # 目前不确定是不是*3，因为y是一组
        self.mean_linear = nn.Linear(self.src_encoder.hidden_size*2,
                                     self.src_encoder.hidden_size )
        self.variance_linear = nn.Linear(self.src_encoder.hidden_size*2,
                                     self.src_encoder.hidden_size)
        # 对y一整句话的本身进行映射
        self.y_value = nn.Linear(self.src_encoder.hidden_size*2,
                                     self.src_encoder.hidden_size)
                    


    def src_encode(self, inputs, lengths):
        order_inputs, sorted_length, sorted_idx =self.order_sequence(inputs,lengths)
        enc_outputs, enc_hidden = self.src_encoder(order_inputs, sorted_length, None)
        enc_hidden = self.reorder_enc_seqence(enc_hidden,sorted_idx)
        enc_outputs = self.reorder_enc_seqence(enc_outputs,sorted_idx)
        return enc_outputs, enc_hidden
    def tgt_encode(self, inputs, lengths):
        order_inputs, sorted_length, sorted_idx =self.order_sequence(inputs,lengths)
        enc_outputs, enc_hidden = self.tgt_encoder(order_inputs, sorted_length, None)
        enc_hidden = self.reorder_enc_seqence(enc_hidden,sorted_idx)
        enc_outputs = self.reorder_enc_seqence(enc_outputs,sorted_idx)

        return enc_outputs, enc_hidden     
    def order_sequence(self, inputs, lengths):
        idx_len_pair = []
        for i,l in enumerate(lengths):
            idx_len_pair.append((i,l))
        sorted_idx_len_pair=sorted(idx_len_pair,key=lambda x:x[1],reverse=True)
        sorted_idx = []
        sorted_length = []
        for x in sorted_idx_len_pair:
            sorted_idx.append(x[0])
            sorted_length.append(x[1])

        order_inputs = inputs[:,sorted_idx]
        return order_inputs, sorted_length, sorted_idx
    def reorder_enc_seqence(self, inputs, sorted_idx):
        raw_sorted_pair = []
        for raw_idx,sorted_idx in enumerate(sorted_idx):
            raw_sorted_pair.append((raw_idx,sorted_idx))
        sorted_pair=sorted(raw_sorted_pair,key=lambda x:x[1],reverse=False)
        raw_indices = [x[0] for x in sorted_pair]
        reorder_inputs = inputs[:,raw_indices,:]
        return reorder_inputs

    def save_checkpoint(self, epoch, opt, filename):
        torch.save({'model_dict': self.state_dict(),
                    'opt': opt,
                    'epoch': epoch,
                    },
                   filename)

    def load_checkpoint(self, filename):   
        ckpt = torch.load(filename,map_location=lambda storage, loc: storage)
        self.load_state_dict(ckpt['model_dict'])
        epoch = ckpt['epoch']
        return epoch
    # 这个inputs待修改，因为不知道一组y是几个
    def gaussian_encoder(self,inputs_list,tgt_legnths):
        # print("现在进入到了similar的recognet的部分")
        tgt_hidden_all = []
        y_value_all = []
        for input_i in range(len(inputs_list)):
            enc_outputs, enc_hidden = self.tgt_encode(inputs_list[input_i], tgt_legnths[input_i])
            y_value_i = self.y_value(enc_hidden)
            tgt_hidden_all.append(enc_hidden)
            y_value_all.append(y_value_i)
        hidden_all_mean = torch.mean(torch.stack(tgt_hidden_all), 0)

        mean = self.mean_linear(hidden_all_mean)
        variance = self.variance_linear(hidden_all_mean)

        y_error_list = []
        for y_value in y_value_all:
            error = (y_value - mean)/variance
            y_error_list.append(error)
        # print("成功结束similar的recognet的部分")

        return mean,variance,y_error_list

class NMTModel(nn.Module):
    def __init__(self, enc_embedding, dec_embedding, encoder, decoder, generator):
        super(NMTModel, self).__init__()
        self.enc_embedding = enc_embedding
        self.dec_embedding = dec_embedding
        self.encoder = encoder
        self.decoder = decoder
        self.generator = generator

    def forward(self, src_inputs, tgt_inputs, src_lengths):

        # Run wrods through encoder

        enc_outputs, enc_hidden = self.encode(src_inputs, src_lengths, None)


        dec_init_hidden = self.init_decoder_state(enc_hidden, enc_outputs)

        dec_outputs , dec_hiddens, attn = self.decode(
                tgt_inputs, enc_outputs, dec_init_hidden
            )

        return dec_outputs, attn



    def encode(self, input, lengths=None, hidden=None):
        emb = self.enc_embedding(input)
        enc_outputs, enc_hidden = self.encoder(emb, lengths, None)

        return enc_outputs, enc_hidden

    def init_decoder_state(self, enc_hidden, context):
        return enc_hidden

    def decode(self, input, context, state):
        emb = self.dec_embedding(input)
        dec_outputs , dec_hiddens, attn = self.decoder(
                emb, context, state
            )

        return dec_outputs, dec_hiddens, attn

    def save_checkpoint(self, epoch, opt, filename):
        torch.save({'encoder_dict': self.encoder.state_dict(),
                    'decoder_dict': self.decoder.state_dict(),
                    'enc_embedding_dict': self.enc_embedding.state_dict(),
                    'dec_embedding_dict': self.dec_embedding.state_dict(),
                    'generator_dict': self.generator.state_dict(),
                    'opt': opt,
                    'epoch': epoch,
                    },
                   filename)

    def load_checkpoint(self, filename):
        ckpt = torch.load(filename)
        self.enc_embedding.load_state_dict(ckpt['enc_embedding_dict'])
        self.dec_embedding.load_state_dict(ckpt['dec_embedding_dict'])
        self.encoder.load_state_dict(ckpt['encoder_dict'])
        self.decoder.load_state_dict(ckpt['decoder_dict'])
        self.generator.load_state_dict(ckpt['generator_dict'])
        epoch = ckpt['epoch']
        return epoch

class MultiGen(nn.Module):
    def __init__(self, similar_prior_net, similar_recog_net,diverse_prior_net, diverse_recog_net,
                        encoder, decoder, generator,
                         mask_set,opt,diverse_PRnet= None):
        super(MultiGen, self).__init__()
        self.similar_prior_net = similar_prior_net
        self.similar_recog_net = similar_recog_net
        self.diverse_prior_net = diverse_prior_net
        self.diverse_recog_net = diverse_recog_net
        self.diverse_PRnet = diverse_PRnet

        self.encoder = encoder
        self.decoder = decoder
        self.generator = generator
        self.mask_set = mask_set
        self.train_mode = opt.train_mode
        self.dropout_set = opt.drop_out_set
        self.enc2dec = nn.Linear(self.decoder.hidden_size*2,self.decoder.hidden_size)
        self.fusion = nn.Linear(self.encoder.hidden_size*2,self.decoder.hidden_size)
        self.fc_z = nn.Linear(self.encoder.embedding.embedding_size*2,self.decoder.hidden_size)
        self.bow_fc = nn.Linear(self.decoder.hidden_size, encoder.embedding.input_size)



    def forward(self,src_inputs, src_lengths,
                        tgt_inputs_list, tgt_legnths):
        print("&&&"*60)
        print("当前的mode为：",self.train_mode)
        if self.train_mode == 100:
            # 这个模式下只有基本的seq_to_seq
            x_mean, x_variance = self.similar_prior_net.gaussian_encoder(src_inputs, src_lengths)
            similar_sample = torch.zeros(x_mean.size()).cuda()
            diverse_sample = torch.zeros(similar_sample.size()).cuda()

            diverse_sample = diverse_sample.to(torch.float32)
            similar_sample = similar_sample.to(torch.float32)

            cat_fusion = torch.cat((diverse_sample, similar_sample), dim=-1)
            latent_z_fusion = self.fusion(cat_fusion)

            kld_loss = torch.zeros([1, self.decoder.hidden_size]).cuda()
            enc_outputs, enc_hidden = self.encode(src_inputs, src_lengths)
            dec_init_hidden = self.enc2dec(torch.cat((enc_hidden, diverse_sample, similar_sample), -1))

            dec_outputs_list = []
            bow_logits_list = []
            for i in range(len(tgt_inputs_list)):
                dec_outputs, dec_hiddens, attn = self.decoder(
                    tgt_inputs_list[i], enc_outputs, dec_init_hidden, latent_z_fusion
                )
                bow_logits = self.bow_fc(latent_z_fusion)
                dec_outputs_list.append(dec_outputs)
                bow_logits_list.append(bow_logits)

        elif self.train_mode == 101:
            # 这个模式下，预训练similarity的prior，只用这个部分给decoder
            x_mean, x_variance = self.similar_prior_net.gaussian_encoder(src_inputs, src_lengths)
            error_similarity = torch.from_numpy(np.random.normal(0, 1, 1))
            kld_loss = torch.zeros([1,self.decoder.hidden_size]).cuda()
            similar_sample = x_mean+x_variance*(error_similarity.cuda())
            diverse_sample = torch.zeros(similar_sample.size()).cuda()

            diverse_sample = diverse_sample.to(torch.float32)
            similar_sample = similar_sample.to(torch.float32)

            cat_fusion = torch.cat((diverse_sample, similar_sample), dim=-1)
            latent_z_fusion = self.fusion(cat_fusion)

            enc_outputs, enc_hidden = self.encode(src_inputs, src_lengths)
            dec_init_hidden = self.enc2dec(torch.cat((enc_hidden, diverse_sample, similar_sample), -1))

            dec_outputs_list = []
            bow_logits_list = []
            for i in range(len(tgt_inputs_list)):
                dec_outputs, dec_hiddens, attn = self.decoder(
                    tgt_inputs_list[i], enc_outputs, dec_init_hidden, latent_z_fusion
                )
                bow_logits = self.bow_fc(latent_z_fusion)
                dec_outputs_list.append(dec_outputs)
                bow_logits_list.append(bow_logits)

        elif self.train_mode == 102:
            # 这个模式下，预训练diverity的prior，只用这个部分给decoder
            x_diverse_mean, x_diverse_variance = self.diverse_prior_net.gaussian_encoder(src_inputs, src_lengths)
            error_diversity = torch.from_numpy(np.random.normal(0, 1, 1))
            kld_loss = torch.zeros([1,self.decoder.hidden_size]).cuda()
            diverse_sample = x_diverse_mean+x_diverse_variance*(error_diversity.cuda())
            similar_sample = torch.zeros(diverse_sample.size()).cuda()

            diverse_sample = diverse_sample.to(torch.float32)
            similar_sample = similar_sample.to(torch.float32)

            cat_fusion = torch.cat((diverse_sample, similar_sample), dim=-1)
            latent_z_fusion = self.fusion(cat_fusion)

            enc_outputs, enc_hidden = self.encode(src_inputs, src_lengths)
            dec_init_hidden = self.enc2dec(torch.cat((enc_hidden, diverse_sample, similar_sample), -1))

            dec_outputs_list = []
            bow_logits_list = []
            for i in range(len(tgt_inputs_list)):
                dec_outputs, dec_hiddens, attn = self.decoder(
                    tgt_inputs_list[i], enc_outputs, dec_init_hidden, latent_z_fusion
                )
                bow_logits = self.bow_fc(latent_z_fusion)
                dec_outputs_list.append(dec_outputs)
                bow_logits_list.append(bow_logits)

        elif self.train_mode == 103:
            # 对于diverse部分：
            x_similar_mean, x_similar_variance = self.similar_prior_net.gaussian_encoder(src_inputs, src_lengths)
            error_similarity = torch.from_numpy(np.random.normal(0, 1, 1))
            kld_loss = torch.zeros([1, self.decoder.hidden_size]).cuda()
            similar_sample = self.drop_out_gate(x_similar_mean + x_similar_variance * (error_similarity.cuda()))

            x_diverse_mean, x_diverse_variance = self.diverse_prior_net.gaussian_encoder(src_inputs, src_lengths)
            error_diversity = torch.from_numpy(np.random.normal(0, 1, 1))
            kld_loss = torch.zeros([1, self.decoder.hidden_size]).cuda()
            diverse_sample = self.drop_out_gate(x_diverse_mean + x_diverse_variance * (error_diversity.cuda()))

            diverse_sample = diverse_sample.to(torch.float32)
            similar_sample = similar_sample.to(torch.float32)

            cat_fusion = torch.cat((diverse_sample, similar_sample), dim=-1)
            latent_z_fusion = self.fusion(cat_fusion)

            enc_outputs, enc_hidden = self.encode(src_inputs, src_lengths)
            dec_init_hidden = self.enc2dec(torch.cat((enc_hidden, diverse_sample, similar_sample), -1))

            dec_outputs_list = []
            bow_logits_list = []
            for i in range(len(tgt_inputs_list)):
                dec_outputs, dec_hiddens, attn = self.decoder(
                    tgt_inputs_list[i], enc_outputs, dec_init_hidden, latent_z_fusion
                )
                bow_logits = self.bow_fc(latent_z_fusion)
                dec_outputs_list.append(dec_outputs)
                bow_logits_list.append(bow_logits)

        elif self.train_mode == 104:
            # 对于diverse部分：
            x_diverse_mean, x_diverse_variance = self.diverse_prior_net.gaussian_encoder(src_inputs, src_lengths)

            y_mean_diverse_list, y_variance_diverse_list = self.diverse_recog_net.gaussian_encoder(tgt_inputs_list,
                                                                                                   tgt_legnths)
            error = torch.from_numpy(np.random.normal(0, 1, 1))

            diverse_sample_list = []
            for y_i in range(len(tgt_inputs_list)):
                sample_diverse = x_diverse_mean + self.drop_out_gate(y_mean_diverse_list[y_i]) + (x_diverse_variance + self.drop_out_gate(y_variance_diverse_list[y_i])) * (
                    error.cuda())
                diverse_sample_list.append(sample_diverse)


            # 对于similarity部分：
            # print("进入到similar的部分，即将进行prior")
            x_similar_mean, x_similar_variance = self.similar_prior_net.gaussian_encoder(src_inputs, src_lengths)
            # print("similar部分的prior结束，即将进行recognet")
            y_mean, y_variance, y_error_list = self.similar_recog_net.gaussian_encoder(tgt_inputs_list, tgt_legnths)
            # print("similarrecognet的部分已经结束开始进行采样")
            similar_sample_list = []
            for y_error in y_error_list:
                sample_y = x_similar_mean + x_similar_variance * self.drop_out_gate(y_error)
                similar_sample_list.append(sample_y)
            # print("采样结束")

            # print("进行similar部分的KL散度计算")
            # 检查结束，是两个log计算后得到了nan
            kld_loss = self.gaussian_kld(y_mean, y_variance, x_similar_mean, x_similar_variance)
            # print("kl散度结束")



            # 开始seq2seq的部分
            # print("两个样本采样完毕，进行正常seq2seq中对上文的encoder")
            # 这儿按照正常seq2seq得到上文的enc的hidden
            enc_outputs, enc_hidden = self.encode(src_inputs, src_lengths)

            # print("上文src的表达已知，进行decoder")
            # 进行fusion，并送入色漆seq的decoder空间
            dec_outputs_list = []
            bow_logits_list = []

            for i in range(len(tgt_inputs_list)):
                diverse_sample = self.drop_out_gate(diverse_sample_list[i])
                similar_sample = self.drop_out_gate(similar_sample_list[i])
                diverse_sample = diverse_sample.to(torch.float32)
                similar_sample = similar_sample.to(torch.float32)

                cat_fusion = torch.cat((diverse_sample,similar_sample), dim= -1)
                # print("首先进行两个样本的fusion")

                latent_z_fusion = self.fusion(cat_fusion)
                # print("完成两个sample的fusion:")

                # 构建decoder的init——hidden，将src的hidden与diverse——
                dec_init_hidden = self.enc2dec(torch.cat((enc_hidden,diverse_sample,similar_sample), -1))
                # print("得到decoder的init状态值并输入")
                # 进入到decoder的过程
                # print("进入到decoder的过程中")
                dec_outputs, dec_hiddens, attn = self.decoder(
                    tgt_inputs_list[i], enc_outputs, dec_init_hidden, latent_z_fusion
                )
                bow_logits = self.bow_fc(latent_z_fusion)
                dec_outputs_list.append(dec_outputs)
                bow_logits_list.append(bow_logits)
            # print("完成了整个前向过程")

        elif self.train_mode == 999:
            # 首先进行调整diversity部分的部分的
            diverse_sample_list = self.diverse_forward(src_inputs, src_lengths,
                            tgt_inputs_list, tgt_legnths)

            # 接着调整similar部分部分的
            # print("diverse forward部分结束，进行similar forward：")
            similar_sample_list,kld_loss = self.similar_forward(src_inputs, src_lengths,
                            tgt_inputs_list, tgt_legnths)

            # print("两个样本采样完毕，进行正常seq2seq中对上文的encoder")
            # 这儿按照正常seq2seq得到上文的enc的hidden
            enc_outputs, enc_hidden = self.encode(src_inputs, src_lengths)

            # print("上文src的表达已知，进行decoder")
            # 进行fusion，并送入色漆seq的decoder空间
            dec_outputs_list = []
            bow_logits_list = []

            for i in range(len(tgt_inputs_list)):
                diverse_sample = diverse_sample_list[i]
                similar_sample = similar_sample_list[i]
                diverse_sample = diverse_sample.to(torch.float32)

                cat_fusion = torch.cat((diverse_sample,similar_sample), dim= -1)
                # print("首先进行两个样本的fusion")

                latent_z_fusion = self.fusion(cat_fusion)
                # print("完成两个sample的fusion:")

                # 构建decoder的init——hidden，将src的hidden与diverse——
                dec_init_hidden = self.enc2dec(torch.cat((enc_hidden,diverse_sample,similar_sample), -1))
                # print("得到decoder的init状态值并输入")
                # 进入到decoder的过程
                # print("进入到decoder的过程中")
                dec_outputs, dec_hiddens, attn = self.decoder(
                    tgt_inputs_list[i], enc_outputs, dec_init_hidden, latent_z_fusion
                )
                bow_logits = self.bow_fc(latent_z_fusion)
                dec_outputs_list.append(dec_outputs)
                bow_logits_list.append(bow_logits)
            # print("完成了整个前向过程")

        elif self.train_mode == 110:
            # 目前跟999没有区别
            # 首先进行调整diversity部分的部分的
            diverse_sample_list = self.diverse_forward(src_inputs, src_lengths,
                            tgt_inputs_list, tgt_legnths)

            # 接着调整similar部分部分的
            # print("diverse forward部分结束，进行similar forward：")
            similar_sample_list,kld_loss = self.similar_forward(src_inputs, src_lengths,
                            tgt_inputs_list, tgt_legnths)

            # print("两个样本采样完毕，进行正常seq2seq中对上文的encoder")
            # 这儿按照正常seq2seq得到上文的enc的hidden
            enc_outputs, enc_hidden = self.encode(src_inputs, src_lengths)

            # print("上文src的表达已知，进行decoder")
            # 进行fusion，并送入色漆seq的decoder空间
            dec_outputs_list = []
            bow_logits_list = []

            for i in range(len(tgt_inputs_list)):
                diverse_sample = diverse_sample_list[i]
                similar_sample = similar_sample_list[i]
                diverse_sample = diverse_sample.to(torch.float32)

                cat_fusion = torch.cat((diverse_sample,similar_sample), dim= -1)
                # print("首先进行两个样本的fusion")

                latent_z_fusion = self.fusion(cat_fusion)
                # print("完成两个sample的fusion:")

                # 构建decoder的init——hidden，将src的hidden与diverse——
                dec_init_hidden = self.enc2dec(torch.cat((enc_hidden,diverse_sample,similar_sample), -1))
                # print("得到decoder的init状态值并输入")
                # 进入到decoder的过程
                # print("进入到decoder的过程中")
                dec_outputs, dec_hiddens, attn = self.decoder(
                    tgt_inputs_list[i], enc_outputs, dec_init_hidden, latent_z_fusion
                )
                bow_logits = self.bow_fc(latent_z_fusion)
                dec_outputs_list.append(dec_outputs)
                bow_logits_list.append(bow_logits)
            # print("完成了整个前向过程")

        elif self.train_mode == 200:
            # 该莫模式：增加了一个类型的网络，使得在diversity之中用x能预测y带来的d_min和d_varinance
            # 首先进行调整diversity部分的部分的
            diverse_sample_list,kl_loss_pr_list = self.diverse_RP_forward(src_inputs, src_lengths,
                        tgt_inputs_list, tgt_legnths,)
            pr_loss = sum(kl_loss_pr_list)
            # 接着调整similar部分部分的
            # print("diverse forward部分结束，进行similar forward：")
            similar_sample_list,kld_loss = self.similar_forward(src_inputs, src_lengths,
                            tgt_inputs_list, tgt_legnths)

            # print("两个样本采样完毕，进行正常seq2seq中对上文的encoder")
            # 这儿按照正常seq2seq得到上文的enc的hidden
            enc_outputs, enc_hidden = self.encode(src_inputs, src_lengths)

            # print("上文src的表达已知，进行decoder")
            # 进行fusion，并送入色漆seq的decoder空间
            dec_outputs_list = []
            bow_logits_list = []

            for i in range(len(tgt_inputs_list)):
                diverse_sample = diverse_sample_list[i]
                similar_sample = similar_sample_list[i]
                diverse_sample = diverse_sample.to(torch.float32)

                cat_fusion = torch.cat((diverse_sample,similar_sample), dim= -1)
                # print("首先进行两个样本的fusion")

                latent_z_fusion = self.fusion(cat_fusion)
                # print("完成两个sample的fusion:")

                # 构建decoder的init——hidden，将src的hidden与diverse——
                dec_init_hidden = self.enc2dec(torch.cat((enc_hidden,diverse_sample,similar_sample), -1))
                # print("得到decoder的init状态值并输入")
                # 进入到decoder的过程
                # print("进入到decoder的过程中")
                dec_outputs, dec_hiddens, attn = self.decoder(
                    tgt_inputs_list[i], enc_outputs, dec_init_hidden, latent_z_fusion
                )
                bow_logits = self.bow_fc(latent_z_fusion)
                dec_outputs_list.append(dec_outputs)
                bow_logits_list.append(bow_logits)
            # print("完成了整个前向过程")
            kld_loss = kld_loss+pr_loss
        elif self.train_mode == 201:
            # 该模型没有新建一个网络去预测，但是让diversity的的d_mean和d_variance.

            # 首先进行调整diversity部分的部分的
            x_mean, x_variance = self.diverse_prior_net.gaussian_encoder(src_inputs, src_lengths)

            y_mean_diverse_list, y_variance_diverse_list = self.diverse_recog_net.gaussian_encoder(tgt_inputs_list,
                                                                                                   tgt_legnths)
            error = torch.from_numpy(np.random.normal(0, 1, 1))
            zero_mean = torch.zeros(x_mean.size()).cuda().to(torch.float32)

            diverse_sample_list = []
            zero_kld_loss = []
            for y_i in range(len(tgt_inputs_list)):
                sample_diverse = x_mean + y_mean_diverse_list[y_i] + (x_variance + y_variance_diverse_list[y_i]) * (
                    error.cuda())
                diverse_sample_list.append(sample_diverse)

                zero_kld_loss.append(self.gaussian_kld(zero_mean,zero_mean,y_mean_diverse_list[y_i],y_variance_diverse_list[y_i]))


            # 接着调整similar部分部分的
            # print("diverse forward部分结束，进行similar forward：")
            similar_sample_list,kld_loss = self.similar_forward(src_inputs, src_lengths,
                            tgt_inputs_list, tgt_legnths)
            kld_loss = kld_loss+sum(zero_kld_loss)/len(zero_kld_loss)
            # print("两个样本采样完毕，进行正常seq2seq中对上文的encoder")
            # 这儿按照正常seq2seq得到上文的enc的hidden
            enc_outputs, enc_hidden = self.encode(src_inputs, src_lengths)

            # print("上文src的表达已知，进行decoder")
            # 进行fusion，并送入色漆seq的decoder空间
            dec_outputs_list = []
            bow_logits_list = []

            for i in range(len(tgt_inputs_list)):
                diverse_sample = diverse_sample_list[i]
                similar_sample = similar_sample_list[i]
                diverse_sample = diverse_sample.to(torch.float32)

                cat_fusion = torch.cat((diverse_sample,similar_sample), dim= -1)
                # print("首先进行两个样本的fusion")

                latent_z_fusion = self.fusion(cat_fusion)
                # print("完成两个sample的fusion:")

                # 构建decoder的init——hidden，将src的hidden与diverse——
                dec_init_hidden = self.enc2dec(torch.cat((enc_hidden,diverse_sample,similar_sample), -1))
                # print("得到decoder的init状态值并输入")
                # 进入到decoder的过程
                # print("进入到decoder的过程中")
                dec_outputs, dec_hiddens, attn = self.decoder(
                    tgt_inputs_list[i], enc_outputs, dec_init_hidden, latent_z_fusion
                )
                bow_logits = self.bow_fc(latent_z_fusion)
                dec_outputs_list.append(dec_outputs)
                bow_logits_list.append(bow_logits)
            # print("完成了整个前向过程")
        elif self.train_mode == 202:
            # 这个模式是让diversity和similarity部分的误差都越小越好，调整量级
            # 首先进行调整diversity部分的部分的
            x_mean, x_variance = self.diverse_prior_net.gaussian_encoder(src_inputs, src_lengths)

            y_mean_diverse_list, y_variance_diverse_list = self.diverse_recog_net.gaussian_encoder(tgt_inputs_list,
                                                                                                   tgt_legnths)
            error = torch.from_numpy(np.random.normal(0, 1, 1))
            zero_mean = torch.zeros(x_mean.size()).cuda().to(torch.float32)

            diverse_sample_list = []
            zero_kld_loss = []
            for y_i in range(len(tgt_inputs_list)):
                sample_diverse = x_mean + y_mean_diverse_list[y_i] + (x_variance + y_variance_diverse_list[y_i]) * (
                    error.cuda())
                diverse_sample_list.append(sample_diverse)

                zero_kld_loss.append(self.gaussian_kld(zero_mean,zero_mean,y_mean_diverse_list[y_i],y_variance_diverse_list[y_i]))


            # 接着调整similar部分部分的
            # print("diverse forward部分结束，进行similar forward：")
            x_similar_mean, x_similar_variance = self.similar_prior_net.gaussian_encoder(src_inputs, src_lengths)
            # print("similar部分的prior结束，即将进行recognet")
            y_similar_mean, y_similar_variance, y_similar_error_list = self.similar_recog_net.gaussian_encoder(tgt_inputs_list, tgt_legnths)

            # print("similarrecognet的部分已经结束开始进行采样")
            similar_sample_list = []
            for y_error in y_similar_error_list:
                sample_y = x_mean + x_variance * y_error
                similar_sample_list.append(sample_y)


            # print("进行similar部分的KL散度计算")
            # 检查结束，是两个log计算后得到了nan
            kld_loss = self.gaussian_kld(y_similar_mean, y_similar_variance, x_similar_mean, x_similar_variance)
            # print("kl散度结束")




            # kld_loss = kld_loss+sum(zero_kld_loss)/len(zero_kld_loss)+sum(y_similar_error_list)/len(y_similar_error_list)
            kld_loss = (kld_loss,sum(zero_kld_loss)/len(zero_kld_loss),sum(y_similar_error_list)/len(y_similar_error_list))

            # print("两个样本采样完毕，进行正常seq2seq中对上文的encoder")
            # 这儿按照正常seq2seq得到上文的enc的hidden
            enc_outputs, enc_hidden = self.encode(src_inputs, src_lengths)

            # print("上文src的表达已知，进行decoder")
            # 进行fusion，并送入色漆seq的decoder空间
            dec_outputs_list = []
            bow_logits_list = []

            for i in range(len(tgt_inputs_list)):
                diverse_sample = diverse_sample_list[i]
                similar_sample = similar_sample_list[i]
                diverse_sample = diverse_sample.to(torch.float32)

                cat_fusion = torch.cat((diverse_sample,similar_sample), dim= -1)
                # print("首先进行两个样本的fusion")

                latent_z_fusion = self.fusion(cat_fusion)
                # print("完成两个sample的fusion:")

                # 构建decoder的init——hidden，将src的hidden与diverse——
                dec_init_hidden = self.enc2dec(torch.cat((enc_hidden,diverse_sample,similar_sample), -1))
                # print("得到decoder的init状态值并输入")
                # 进入到decoder的过程
                # print("进入到decoder的过程中")
                dec_outputs, dec_hiddens, attn = self.decoder(
                    tgt_inputs_list[i], enc_outputs, dec_init_hidden, latent_z_fusion
                )
                bow_logits = self.bow_fc(latent_z_fusion)
                dec_outputs_list.append(dec_outputs)
                bow_logits_list.append(bow_logits)
            # print("完成了整个前向过程")
        elif self.train_mode == 203:
            print("进入mode203")
            # 这个模式是让diversity和similarity部分的误差都越小越好，调整量级

            # 接着调整similar部分部分的
            # print("diverse forward部分结束，进行similar forward：")
            x_similar_mean, x_similar_variance = self.similar_prior_net.gaussian_encoder(src_inputs, src_lengths)
            # print("similar部分的prior结束，即将进行recognet")
            y_similar_mean, y_similar_variance, y_similar_error_list = self.similar_recog_net.gaussian_encoder(
                tgt_inputs_list, tgt_legnths)

            # print("similarrecognet的部分已经结束开始进行采样")
            similar_sample_list = []

            print("111" * 30)
            for y_similar_error in y_similar_error_list:
                y_similar_error = F.softmax(y_similar_error)
                c = y_similar_error.gt(1)

                simialr_sample_y = x_similar_mean + x_similar_variance * y_similar_error
                similar_sample_list.append(simialr_sample_y)
                print("222" * 30)
            print("333" * 30)

            # print("进行similar部分的KL散度计算")
            # 检查结束，是两个log计算后得到了nan
            similar_kld_loss = self.gaussian_kld(y_similar_mean, y_similar_variance, x_similar_mean, x_similar_variance)
            # print("kl散度结束")
            print("444" * 30)
            similar_kld_loss = torch.sum(similar_kld_loss)
            print("555" * 30)
            print(similar_kld_loss)
            # similar_kld_loss = sum(similar_kld_loss)
            print("666" * 30)
            # similar_kld_loss = similar_kld_loss.item()
            print("777" * 30)
            similar_kld_loss.backward()
            print("888" * 30)
            print("similarity_部分可以backwward")






            # 首先进行调整diversity部分的部分的
            diversity_x_mean, diversity_x_variance = self.diverse_prior_net.gaussian_encoder(src_inputs, src_lengths)

            y_mean_diverse_list, y_variance_diverse_list = self.diverse_recog_net.gaussian_encoder(tgt_inputs_list,
                                                                                                   tgt_legnths)
            diversity_error = torch.from_numpy(np.random.normal(0, 1, 1))
            zero_mean = torch.zeros(diversity_x_mean.size()).cuda().to(torch.float32)

            diverse_sample_list = []
            diverse_zero_kld_loss = []
            for y_i in range(len(tgt_inputs_list)):
                mean_diverse = F.softmax(y_mean_diverse_list[y_i])
                variance_diverse = F.softmax(y_variance_diverse_list[y_i])

                a = mean_diverse.gt(1)
                b = variance_diverse.gt(1)

                sample_diverse = diversity_x_mean + mean_diverse + (diversity_x_variance + variance_diverse) * (diversity_error.cuda())


                diverse_sample_list.append(sample_diverse)
                loss_kld_diversity = self.gaussian_kld(zero_mean,zero_mean,y_mean_diverse_list[y_i],y_variance_diverse_list[y_i])
                loss_kld_diversity.backward()
                diverse_zero_kld_loss.append(loss_kld_diversity)

            print("检查a,b,c,不应该存在比1大的数字", a, b, c)
            # kld_loss = kld_loss+sum(zero_kld_loss)/len(zero_kld_loss)+sum(y_similar_error_list)/len(y_similar_error_list)
            kld_loss = (kld_loss,sum(zero_kld_loss)/len(zero_kld_loss),sum(y_similar_error_list)/len(y_similar_error_list))

            # print("两个样本采样完毕，进行正常seq2seq中对上文的encoder")
            # 这儿按照正常seq2seq得到上文的enc的hidden
            enc_outputs, enc_hidden = self.encode(src_inputs, src_lengths)

            # print("上文src的表达已知，进行decoder")
            # 进行fusion，并送入色漆seq的decoder空间
            dec_outputs_list = []
            bow_logits_list = []

            for i in range(len(tgt_inputs_list)):
                diverse_sample = diverse_sample_list[i]
                similar_sample = similar_sample_list[i]
                diverse_sample = diverse_sample.to(torch.float32)

                cat_fusion = torch.cat((diverse_sample,similar_sample), dim= -1)
                # print("首先进行两个样本的fusion")

                latent_z_fusion = self.fusion(cat_fusion)
                # print("完成两个sample的fusion:")

                # 构建decoder的init——hidden，将src的hidden与diverse——
                dec_init_hidden = self.enc2dec(torch.cat((enc_hidden,diverse_sample,similar_sample), -1))
                # print("得到decoder的init状态值并输入")
                # 进入到decoder的过程
                # print("进入到decoder的过程中")
                dec_outputs, dec_hiddens, attn = self.decoder(
                    tgt_inputs_list[i], enc_outputs, dec_init_hidden, latent_z_fusion
                )
                bow_logits = self.bow_fc(latent_z_fusion)
                dec_outputs_list.append(dec_outputs)
                bow_logits_list.append(bow_logits)
            # print("完成了整个前向过程")


        return dec_outputs_list, kld_loss,bow_logits

    def diverse_forward(self, src_inputs, src_lengths,
                        tgt_inputs_list, tgt_legnths,):
        x_mean, x_variance = self.diverse_prior_net.gaussian_encoder(src_inputs, src_lengths)

        y_mean_diverse_list, y_variance_diverse_list = self.diverse_recog_net.gaussian_encoder(tgt_inputs_list,
                                                                                                       tgt_legnths)
        error = torch.from_numpy(np.random.normal(0, 1, 1))

        sample_diverse_list = []
        for y_i in range(len(tgt_inputs_list)):
            sample_diverse = x_mean + y_mean_diverse_list[y_i] + (x_variance + y_variance_diverse_list[y_i]) * (error.cuda())
            sample_diverse_list.append(sample_diverse)

        return sample_diverse_list

    def diverse_RP_forward(self, src_inputs, src_lengths,
                        tgt_inputs_list, tgt_legnths,):
        x_mean, x_variance = self.diverse_prior_net.gaussian_encoder(src_inputs, src_lengths)

        y_mean_diverse_list, y_variance_diverse_list = self.diverse_recog_net.gaussian_encoder(tgt_inputs_list,
                                                                                                       tgt_legnths)

        pr_mean_divers,pr_mean_variance = self.diverse_PRnet.gaussian_encoder(src_inputs, src_lengths)

        kl_loss_pr_list = []

        sample_diverse_list = []
        for y_i in range(len(tgt_inputs_list)):
            error = torch.from_numpy(np.random.normal(0, 1, 1)).cuda()

            y_mean_diverse = y_mean_diverse_list[y_i]
            y_variance_diverse = y_variance_diverse_list[y_i]
            pr_mean_divers = pr_mean_divers*error
            pr_mean_variance = pr_mean_variance*error

            sample_diverse = x_mean + pr_mean_divers + (x_variance + pr_mean_variance) * (error.cuda())
            kl_loss_pr = self.gaussian_kld(y_mean_diverse, y_variance_diverse, pr_mean_divers, pr_mean_variance)
            kl_loss_pr_list.append(kl_loss_pr)
            sample_diverse_list.append(sample_diverse)

        return sample_diverse_list,kl_loss_pr_list


    def similar_forward(self, src_inputs, src_lengths,
                        tgt_inputs_list, tgt_legnths, ):

        # print("进入到similar的部分，即将进行prior")
        x_mean, x_variance = self.similar_prior_net.gaussian_encoder(src_inputs, src_lengths)
        # print("similar部分的prior结束，即将进行recognet")
        y_mean, y_variance, y_error_list = self.similar_recog_net.gaussian_encoder(tgt_inputs_list, tgt_legnths)

        # print("similarrecognet的部分已经结束开始进行采样")
        sample_similar_list = []
        for y_error in y_error_list:
            sample_y = x_mean + x_variance * y_error
            sample_similar_list.append(sample_y)
        # print("采样结束")

        # print("进行similar部分的KL散度计算")
        # 检查结束，是两个log计算后得到了nan
        kld_loss = self.gaussian_kld(y_mean, y_variance, x_mean,x_variance)
        # print("kl散度结束")


        return sample_similar_list, kld_loss



    def gaussian_kld(self, recog_mu, recog_logvar, prior_mu, prior_logvar):
        kld = -0.5 * torch.sum(1 + (recog_logvar - prior_logvar)
                               - torch.div(torch.pow(prior_mu - recog_mu, 2), torch.exp(prior_logvar))
                               - torch.div(torch.exp(recog_logvar), torch.exp(prior_logvar)), 1)
        return kld

    def order_sequence(self, inputs, lengths):
        idx_len_pair = []
        for i,l in enumerate(lengths):
            idx_len_pair.append((i,l))
        sorted_idx_len_pair=sorted(idx_len_pair,key=lambda x:x[1],reverse=True)
        sorted_idx = []
        sorted_length = []
        for x in sorted_idx_len_pair:
            sorted_idx.append(x[0])
            sorted_length.append(x[1])

        order_inputs = inputs[:,sorted_idx]
        return order_inputs, sorted_length, sorted_idx

    def encode(self, inputs, lengths):
        # print("借鉴一下对prior_X的encoder的网络,",self.encoder)
        # print("借鉴一下对prior_X的输入,以及此时的length", inputs.size(),lengths)
        order_inputs, sorted_length, sorted_idx = self.order_sequence(inputs, lengths)
        enc_outputs, enc_hidden = self.encoder(order_inputs, sorted_length, None)
        enc_hidden = self.reorder_enc_seqence(enc_hidden, sorted_idx)
        enc_outputs = self.reorder_enc_seqence(enc_outputs, sorted_idx)
        return enc_outputs, enc_hidden

    def reorder_enc_seqence(self, inputs, sorted_idx):
        raw_sorted_pair = []
        for raw_idx,sorted_idx in enumerate(sorted_idx):
            raw_sorted_pair.append((raw_idx,sorted_idx))
        sorted_pair=sorted(raw_sorted_pair,key=lambda x:x[1],reverse=False)
        raw_indices = [x[0] for x in sorted_pair]
        reorder_inputs = inputs[:,raw_indices,:]
        return reorder_inputs

    def save_checkpoint(self, epoch, opt, filename):
        torch.save({'model_dict': self.state_dict(),
                    'opt': opt,
                    'epoch': epoch,
                    },
                   filename)

    def load_checkpoint(self, filename):
        ckpt = torch.load(filename,map_location=lambda storage, loc: storage)
        self.load_state_dict(ckpt['model_dict'])
        epoch = ckpt['epoch']
        return epoch

    def drop_out_gate(self,x):
        p = random.random()
        if p <= self.dropout_set:
            # print("保留")
            return x
        else:
            # print("丢弃")
            return torch.zeros(x.size()).cuda()


    # 改代码只有predict的前半段，得到两个sample，后半段在infer.py之中
    def predict_encoder(self,src_inputs,src_lengths,tgt_input,tgt_length,i_range,model_mode):
        #        # 直接给tgt
        if model_mode == 2:
            tgt_inputs_list = [tgt_input]
            tgt_legnths = [tgt_length]
            sample_diverse_list = self.diverse_forward(src_inputs, src_lengths,
                                                       tgt_inputs_list, tgt_legnths)

            # 接着调整similar部分部分的
            # print("diverse forward部分结束，进行similar forward：")
            sample_similar_list, kld_loss = self.similar_forward(src_inputs, src_lengths,
                                                                 tgt_inputs_list, tgt_legnths)

        # 这个模式用了diversity的recong+
        elif model_mode == 6:
            sample_diverse_list = []
            sample_similar_list = []
            tgt_inputs_list = [tgt_input]
            tgt_legnths = [tgt_length]
            for i in range(i_range):
                [sample_diverse], y_variance_diverse_list = self.diverse_recog_net.gaussian_encoder(tgt_inputs_list,
                                                                                                          tgt_legnths)
                x_mean, x_variance = self.diverse_prior_net.gaussian_encoder(src_inputs, src_lengths)
                sample_similar, y_similar_variance, [y_error] = self.similar_recog_net.gaussian_encoder(tgt_inputs_list, tgt_legnths)
                sample_diverse_list.append(sample_diverse)
                sample_similar_list.append(sample_similar)

        #这个用了similar的全部+diversity的recong
        elif model_mode == 4:
            sample_diverse_list = []
            tgt_inputs_list = [tgt_input]
            tgt_legnths = [tgt_length]
            [sample_diverse], y_variance_diverse_list = self.diverse_recog_net.gaussian_encoder(tgt_inputs_list,
                                                                                                    tgt_legnths)
            sample_similar_list, kld_loss = self.similar_forward(src_inputs, src_lengths,
                                                                     tgt_inputs_list, tgt_legnths)
            sample_diverse_list.append(sample_diverse)

        elif model_mode == 7:
            sample_diverse_list = []
            sample_similar_list = []
            tgt_inputs_list = [tgt_input]
            tgt_legnths = [tgt_length]
            [sample_diverse], y_variance_diverse_list = self.diverse_recog_net.gaussian_encoder(tgt_inputs_list,
                                                                                                tgt_legnths)
            y_simialr_mean, y_similar_variance, [y_error] = self.similar_recog_net.gaussian_encoder(tgt_inputs_list,
                                                                                                    tgt_legnths)

            sample_similar = y_simialr_mean + y_similar_variance*y_error
            sample_diverse_list.append(sample_diverse)
            sample_similar_list.append(sample_similar)

        else:
            # 首先进行diversity阶段的前向：
            x_mean_diverse, x_variance_diverse = self.diverse_prior_net.gaussian_encoder(src_inputs, src_lengths)

            # 首先进行similarity阶段的前向：
            x_mean_similar, x_variance_similar = self.similar_prior_net.gaussian_encoder(src_inputs, src_lengths)

            sample_diverse_list = []
            sample_similar_list = []

            # 该种模式下

            if model_mode == 0:
                # print("模式0")
                diversity_error = torch.from_numpy(np.random.normal(0, 1, 1)).cuda()
                for i in range(i_range):
                    # 进行diversity的sample样本的get
                    mean_error = torch.rand(x_mean_diverse.size()).cuda()
                    variance_error = torch.rand(x_variance_diverse.size()).cuda()
                    sample_diverse = x_mean_diverse + mean_error + (x_variance_diverse + variance_error) * (diversity_error)
                    sample_diverse_list.append(sample_diverse)

                    # 进行similarity的sample样本的get
                    similarity_error = torch.from_numpy(np.random.normal(0, 1, 1)).cuda()
                    sample_similar = x_mean_similar + x_variance_similar * similarity_error
                    sample_similar_list.append(sample_similar)
            # mode =1 diversity和similarity都是0
            elif model_mode == 1:
                # print("模式1")
                for i in range(i_range):
                    sample_diverse = x_mean_diverse
                    sample_diverse_list.append(sample_diverse)
                    sample_similar = x_mean_similar
                    sample_similar_list.append(sample_similar)

            # 这个模式只有seqtoseq
            elif model_mode == 3:
                for i in range(i_range):
                    sample_diverse = torch.zeros(x_mean_diverse.size()).cuda().to(torch.float32)
                    sample_similar = sample_diverse
                    sample_diverse_list.append(sample_diverse)
                    sample_similar_list.append(sample_similar)

        return sample_diverse_list,sample_similar_list



