"""
@author: 石沙
@date: 2020-10-13
@content：本模块用于定义模型和搜索方法
@attention:
（1）RNN系列模型，其输入可以一个时间步一个时间步的输入和输出，也可以利用
    pack_padded_sequence一次性输入整个完整序列（整个batch），并获得所有的outputs；
（2）值得注意的问题：pad_packed_sequence在整合输出时会出现如下情况：丢弃最后为padding部分的内容。
    比如inputs.shape: (64, 100, 300)，如果在dim=1的维度上，最后2列都是padding，则在pad_packed_sequence之后
    outputs.shape变为(64, 98, 300)，蜜汁操作
（3）一律使用batch=True，可以减少很多张量变换操作，简化运算逻辑
"""

import torch
from torch import nn
import torch.nn.functional as F
import configs.settings as conf
from torch.nn.utils.rnn import pad_packed_sequence, pack_padded_sequence
from site_packages.utils.job import DataOp
from random import random
from features import sentences_from_indices
import pandas as pd
from site_packages.utils.job import DataOp
from datasets import load_dic, load_expanded_dic, load_embedding_matrix


dic = load_dic()
if conf.MODEL_CONF['pointer']:
    expanded_dic = load_expanded_dic()

if conf.MODEL_CONF['weight_tying']:
    embedding_matrix = load_embedding_matrix()
    embedding_matrix_t = torch.from_numpy(embedding_matrix).transpose(1, 0).to(conf.DEVICE)


class BaseEncoderGRU(nn.Module):
    """
    GRU encoder

    attr：
        input_size: 输入序列的特征数量
        hidden_size: 隐层维度，等同于词嵌入的维度
        embedding: 训练好的词嵌入矩阵
        num_layers: 神经网络的层数
        bidirectional: False, 单向RNN；True，双向RNN
        dropout: 丢弃率
    """
    def __init__(self, input_size, hidden_size, embedding, num_layers=1, bidirectional=False, dropout=0):
        super(BaseEncoderGRU, self).__init__()
        self.input_size = input_size
        self.hidden_size = hidden_size
        self.num_layers = num_layers
        self.embedding = embedding
        self.bidirectional = bidirectional
        self.gru = nn.GRU(
            input_size,
            hidden_size,
            num_layers=num_layers,
            bidirectional=bidirectional,
            dropout=dropout,
            batch_first=True
        )

    def forward(self, inputs, input_lens, hidden=None):
        if hidden is not None:
            hidden = hidden.to(conf.DEVICE)

        # embeded: (batch_size, INPUT_MAX_LEN, hidden_size)
        embedded = self.embedding[inputs]

        packed = pack_padded_sequence(embedded, input_lens, enforce_sorted=True, batch_first=True).to(conf.DEVICE)

        # output: (batch_size, input_max_len of this batch, hidden_size x num_layers)
        outputs, hidden = self.gru(packed, hidden)
        outputs, _ = pad_packed_sequence(outputs, batch_first=True)

        return outputs, hidden


class Attention(nn.Module):
    """
    注意力机制
    按照Luong的论文，有三种注意力的计算方式，分为'dot', 'general'和'concat', 具体请参考论文说明

    attr:
        input_size: encoder_output的输入维度
        hidden_size: 隐层的维度，等同于词嵌入的维度
        attn_method: 可以选择'dot', 'general', 'concat'
    """
    def __init__(self, hidden_size, attn_method='general', pointer=False):
        assert attn_method in ('dot', 'general', 'concat'), '方法不在允许的范围内！'
        super(Attention, self).__init__()
        self.hidden_size = hidden_size
        self.pointer = pointer
        self.method = 'concat' if pointer else attn_method
        if self.method == 'general':
            self.linear = nn.Linear(self.hidden_size, self.hidden_size, bias=False)
        if self.method == 'concat':
            self.W = nn.Linear(self.hidden_size * 2, self.hidden_size, bias=False)
            self.V = nn.Parameter(torch.FloatTensor(hidden_size))
        self.scores = {
            'general': self.general,
            'concat': self.concat,
            'dot': self.dot
        }

    def dot(self, decoder_rnn_output, encoder_outputs):
        score = torch.bmm(decoder_rnn_output, encoder_outputs.transpose(2, 1))
        return score

    def general(self, decoder_rnn_output, encoder_outputs):
        # prefix: (batch_size, 1, hidden_size)
        prefix = self.linear(decoder_rnn_output)
        # 其中为了进行3维张量的批次运算，需要保持prefix和encoder_outputs在dim=0的维度大小相等，统一将batch_size放在dim=0的位置
        # score: (batch_size, 1, max_input_len)
        score = torch.bmm(prefix, encoder_outputs.transpose(2, 1))
        return score.squeeze()

    def concat(self, decoder_rnn_output, encoder_outputs):
        score = self.W(
            torch.cat(
                (decoder_rnn_output.expand(encoder_outputs.size(0), -1, -1), encoder_outputs), dim=2
            )
        ).tanh()
        return torch.sum(self.V * score, dim=2)

    def forward(self, decoder_rnn_output, encoder_outputs, mask):
        scores = self.scores[self.method](decoder_rnn_output, encoder_outputs)

        # weights: (bacth_size, max_len of this inputs)
        weights = F.softmax(scores, dim=1)

        # 利用mask去除pad的影响，并进行归一化
        weights = weights * mask
        normalization_factor = weights.sum(dim=1, keepdim=True)
        weights = weights / normalization_factor

        # context: (batch_size, 1, hidden_size)
        context = weights.unsqueeze(1).bmm(encoder_outputs)

        # 返回时调成为常规张量的维度：
        # context: (batch_size, hidden_size)
        return context.squeeze(1)


class AttentionPrt(nn.Module):

    def __init__(self, hidden_size, input_size=None):
        super(AttentionPrt, self).__init__()
        self.hidden_size = hidden_size
        self.W = nn.Linear(hidden_size * 2, hidden_size, bias=False)
        self.V = nn.Parameter(torch.FloatTensor(hidden_size))
        if conf.MODEL_CONF['is_coverage']:
            self.W_c = nn.Linear(input_size, hidden_size, bias=False)

    def concat(self, decoder_rnn_output, encoder_outputs, coverage_feature=None):
        features = self.W(
            torch.cat(
                (decoder_rnn_output.expand(encoder_outputs.shape),
                 encoder_outputs), dim=2
            ))
        # print('features.shape:', features.shape)
        if coverage_feature is not None:
            features += coverage_feature

        score = torch.tanh(features)
        return torch.sum(self.V * score, dim=2)

    def forward(self, decoder_rnn_output, encoder_outputs, mask, coverage=None):
        if coverage is not None:
            coverage_feature = self.W_c(coverage).unsqueeze(1)
            # print('coverage_feature.shape:', coverage_feature.shape)
        else:
            coverage_feature = None

        scores = self.concat(decoder_rnn_output, encoder_outputs, coverage_feature=coverage_feature)

        # weights: (bacth_size, max_len of this inputs)
        weights = F.softmax(scores, dim=1)

        # 利用mask去除pad的影响，并进行归一化
        weights = weights * mask
        normalization_factor = weights.sum(dim=1, keepdim=True)
        weights = weights / normalization_factor

        # context: (batch_size, 1, hidden_size)
        context = weights.unsqueeze(1).bmm(encoder_outputs)

        if coverage is not None:
            coverage = coverage + weights
            return weights, context, coverage
        else:
            return weights, context


class BaseDecoderGRU(nn.Module):
    """
    GRU decoder
    attr：
        input_size: 输入序列的特征数量
        hidden_size: 隐层维度，等同于词嵌入的维度
        embedding: 训练好的词嵌入矩阵
        num_layers: 神经网络的层数
        attn_method：可以选择'dot', 'general', 'concat'
        dropout: 丢弃率
    """
    def __init__(self, hidden_size,
                 embedding,
                 vocab_size,
                 num_layers=1,
                 attn_method='general',
                 dropout=0, pointer=False, input_size=None):
        super(BaseDecoderGRU, self).__init__()
        self.hidden_size = hidden_size
        self.pointer = pointer
        self.embedding = embedding
        self.num_layers = num_layers
        self.gru = nn.GRU(hidden_size, hidden_size, num_layers=num_layers, batch_first=True)
        self.dropout = nn.Dropout(dropout)
        self.attn = (
            Attention(hidden_size, attn_method=attn_method) if not pointer
            else AttentionPrt(hidden_size, input_size=input_size)
        )
        self.concat = nn.Linear(hidden_size * 2, hidden_size)
        self.out = nn.Linear(hidden_size, vocab_size)
        if self.pointer:
            self.W_gen = nn.Linear(self.hidden_size * 3, 1)
            self.acc_attn = None

    def forward(self, decoder_inputs, hidden, encoder_outputs, input_mask, expanded_inputs=None, coverage=None, weight_tying=False):
        hidden = hidden.to(conf.DEVICE)

        # embeded: (batch_size, 1, hidden_size)
        embeded = self.embedding[decoder_inputs].unsqueeze(1)
        embeded = self.dropout(embeded)

        # 运行gru
        # rnn_output: (batch_size, 1, hidden_size)
        # hidden: (num_layers, batch_size, hidden_size)
        rnn_output, hidden = self.gru(embeded, hidden)

        if not self.pointer:
            # attention运算
            # context: (batch_size, hidden_size)
            context = self.attn(rnn_output, encoder_outputs, mask=input_mask)

            # 拼接
            # concat_vector: (batch_size, hidden_size * 2)
            concat_vector = torch.cat((rnn_output.squeeze(), context), dim=1).squeeze()

            # concat_output: (batch_size, hidden_size)
            concat_output = self.concat(concat_vector)

            # Z: (batch_size, vocab_size)
            if weight_tying:
                Z = concat_output.mm(embedding_matrix_t)
            else:
                Z = self.out(concat_output)
            output = F.softmax(Z, dim=1)
            return output.unsqueeze(1), hidden, coverage
        else:
            # attention计算
            if coverage is not None:
                weights, context, coverage = self.attn(rnn_output, encoder_outputs, mask=input_mask, coverage=coverage)
            else:
                weights, context = self.attn(rnn_output, encoder_outputs, mask=input_mask)

            # 补充0值
            VOCAB_ZEROS = torch.zeros((rnn_output.shape[0], expanded_dic.num_words - dic.num_words)).to(
                conf.DEVICE)

            # P_vocab计算
            concat_vector = torch.cat((rnn_output.squeeze(), context.squeeze()), dim=1).squeeze()
            # concat_output: (batch_size, hidden_size)
            concat_output = self.concat(concat_vector)
            # Z: (batch_size, vocab_size)
            if weight_tying:
                Z = concat_output.mm(embedding_matrix_t)
            else:
                Z = self.out(concat_output)
            p_vocab = F.softmax(Z, dim=1)

            # p_gen计算
            # x_gen: (batch_size, 1)
            x_gen = self.W_gen(
                torch.cat((context.squeeze(), rnn_output.squeeze(), embeded.squeeze()), dim=1)
            )
            # p_gen: (batch_size, 1)
            p_gen = torch.sigmoid(x_gen)

            vocab_dist = p_gen * p_vocab
            # print('vocab_dist.shape before:', vocab_dist.shape)
            vocab_dist = torch.cat((vocab_dist, VOCAB_ZEROS), dim=1)
            attn_dist = (1 - p_gen) * weights
            # print('vocab_dist.shape after:', vocab_dist.shape)
            # p_w计算
            output = vocab_dist.scatter_add(1, expanded_inputs[:, :attn_dist.shape[1]], attn_dist)
            if coverage is not None:
                coverage = torch.min(attn_dist, coverage)
            return output.unsqueeze(1), hidden, coverage


class Seq2SeqGRU(nn.Module):
    """
    seq2seq模型，将encoder和decoder结合在一起，统一整体模型的输入和输出路径

    args:
        teacher_forcing：教学率，当教学率 > 0.5才启用teacher_forcing机制
        dropout: 丢弃率
    attr:
        encoder: 编码器
        decoder: 解码器
        training: 训练模式，在forward的时候，执行forward_by_train方法；非训练模式，采用greedy_search或者beam_search
        target_pad: 由于每个batch的最大长度是每个batch中的最长句子的长度，需要将最后seq_outputs都补齐为target的最大长度限制。
            具体数值见configs.settings.py的全局参数TARGET_MAX_LEN的值
        greedy_search: greedy_search进行评估时的模型输出
        beam_search: beam_search进行评估时的模型输出
    """
    def __init__(self, teacher_forcing=0, dropout=0):
        super(Seq2SeqGRU, self).__init__()
        embedding = DataOp.load_data('gensim_embedding').to(conf.DEVICE)
        self.teacher_forcing = teacher_forcing
        self.use_teacher_forcing = True if self.teacher_forcing > 0 else False
        self.encoder = BaseEncoderGRU(
            conf.MODEL_CONF['hidden_size'],
            conf.MODEL_CONF['hidden_size'],
            embedding,
            conf.MODEL_CONF['encoder_num_layers'],
            bidirectional=conf.MODEL_CONF['bidirectional'],
            dropout=dropout
        )
        self.decoder = BaseDecoderGRU(
            conf.MODEL_CONF['hidden_size'],
            embedding,
            dic.num_words,
            num_layers=conf.MODEL_CONF['encoder_num_layers'],
            attn_method=conf.MODEL_CONF['attn_method'],
            dropout=dropout,
            pointer=conf.MODEL_CONF['pointer'],
            input_size=conf.INPUT_MAX_LEN + 2 if conf.MODEL_CONF['is_coverage'] else None # +2因为有SOS和EOS
        )
        self.training = True
        if self.training:
            self.target_pad = [float(dic.PAD) for i in range(conf.MODEL_CONF['batch_size'])]
            self.input_pad = [[float(dic.PAD) for i in range(conf.MODEL_CONF['batch_size'])] for i in range(conf.MODEL_CONF['hidden_size'])]
        else:
            self.greedy_search = greedy_search
            self.beam_search = BeamSearch()

    def to_eval(self):
        self.training = False
        self.use_teacher_forcing = False
        self.greedy_search = greedy_search
        self.beam_search = BeamSearch()

    def to_train(self):
        self.training = True
        self.use_teacher_forcing = True if self.teacher_forcing > 0 else False

    def renew_decoder_inputs(self, decoder_output, targets=None):
        output_as_inputs = True
        if self.use_teacher_forcing:
            if random() <= self.teacher_forcing:
                # decoder_inputs: (batch_size,)
                decoder_inputs = targets
                output_as_inputs = False

        if output_as_inputs:
            # max_ids: (batch_size, 1, 1)
            probs, max_ids = decoder_output.topk(1, dim=2)

            # decoder_inputs: (batch_size,)
            decoder_inputs = max_ids.squeeze()
        return decoder_inputs

    def locate_topk(self, decoder_output, top_k=2):
        # probs: (batch_size, 1, 1)
        # max_ids: (batch_size, 1, 1)
        probs, max_ids = decoder_output.topk(top_k, dim=2)
        return probs.transpose(2, 1), max_ids.transpose(2, 1)

    def forward_by_train(self, encoder_outputs,
                         decoder_inputs, decoder_hidden,
                         max_target_len, targets, input_mask,
                         target_mask, expanded_inputs=None,
                         expanded_targets=None):
        seq_outputs = []
        coverage_losses = []
        coverage = torch.zeros((conf.MODEL_CONF['batch_size'], encoder_outputs.shape[1]), dtype=torch.float).to(conf.DEVICE) if conf.MODEL_CONF['is_coverage'] else None
        for t in range(max_target_len):
            # decode_output: (batch_size, 1, vocab_size)
            decoder_output, decoder_hidden, coverage = self.decoder(decoder_inputs, decoder_hidden,
                                                                    encoder_outputs, input_mask,
                                                                    expanded_inputs=expanded_inputs,
                                                                    coverage=coverage,
                                                                    weight_tying=conf.MODEL_CONF['weight_tying'])
            if conf.MODEL_CONF['pointer']:
                decoder_inputs = targets[:, t]
                seq_output = torch.gather(decoder_output.squeeze(), 1, expanded_targets[:, [t]])
            else:
                decoder_inputs = self.renew_decoder_inputs(decoder_output, targets=targets[:, t])
                seq_output = torch.gather(decoder_output.squeeze(), 1, targets[:, t].view(-1, 1))

            # seq_output: (batch_size, 1)
            seq_outputs.append(seq_output)
            if conf.MODEL_CONF['is_coverage']:
                coverage_loss = torch.sum(coverage, dim=1, keepdim=True)
                coverage_losses.append(coverage_loss)

        len_diff = targets.shape[1] - max_target_len
        if len_diff > 0:
            # pads: (batch_size, len_diff)
            pads = torch.tensor([self.target_pad for i in range(len_diff)]).transpose(1, 0).to(conf.DEVICE)
        else:
            pads = None

        # seq_outputs: (batch_size, max_len of this targets)
        seq_outputs = torch.cat(seq_outputs, dim=1)
        if conf.MODEL_CONF['is_coverage']:
            coverage_losses = torch.cat(coverage_losses, dim=1)
            # print('coverage_losses.head():', coverage_losses)

        if pads is not None:
            # seq2seq_outputs: (batch_size, max_len)
            seq_outputs = torch.cat([seq_outputs, pads], dim=1)
            if conf.MODEL_CONF['is_coverage']:
                coverage_losses = torch.cat([coverage_losses, pads], dim=1)

        # seq2seq_outputs: (batch_size * max_len, 1)
        seq_outputs = torch.flatten(seq_outputs)

        if conf.MODEL_CONF['is_coverage']:
            # coverage_losses: (bacth_size * max_len, 1)
            coverage_losses = torch.flatten(coverage_losses)

        # mask: (bacth_size * max_len, 1)
        mask = torch.flatten(target_mask)
        return seq_outputs, mask, coverage_losses

    def forward(self, inputs, input_lens, input_mask,
                targets=None, max_target_len=None, target_mask=None,
                expanded_inputs=None, expanded_targets=None, beam_search=False, top_k=2):

        # encoder_outputs: (batch_size, max_len of this inputs, hidden_size)
        # encoder_hidden: (batch_size, en_num_layers * 2 if 双向 else 1, hidden_size)
        encoder_outputs, encoder_hidden = self.encoder(inputs, input_lens)
        if encoder_outputs.shape[1] < input_mask.shape[1]:
            len_diff = input_mask.shape[1] - encoder_outputs.shape[1]
            pads = torch.tensor([self.input_pad for i in range(len_diff)]).permute(2, 0, 1).to(conf.DEVICE)
            # print('pads.shape:', pads.shape)
            # print('encoder_outputs.shape:', encoder_outputs.shape)
            # input_mask = input_mask[:, :encoder_outputs.shape[1]]
            encoder_outputs = torch.cat((encoder_outputs, pads), dim=1)
            
        # decoder_hidden: (batch_size, en_num_layers * 2 if 双向 else en_num_layers, hidden_size)
        decoder_hidden = encoder_hidden

        # 以句子起始符为初始输入
        # decoder_inputs: (batch_size,)
        decoder_inputs = torch.tensor([dic.SOS for i in range(inputs.shape[0])]).to(conf.DEVICE)

        max_target_len = conf.TARGET_MAX_LEN if max_target_len is None else max_target_len

        if self.training:
            return self.forward_by_train(encoder_outputs, decoder_inputs, decoder_hidden, max_target_len, targets, input_mask, target_mask, expanded_inputs=expanded_inputs, expanded_targets=expanded_targets)
        else:
            if beam_search:
                return self.beam_search(self.decoder, encoder_outputs, decoder_inputs, decoder_hidden, max_target_len, input_mask, top_k=top_k, expanded_inputs=expanded_inputs, weight_tying=conf.MODEL_CONF['weight_tying'])
            else:
                return self.greedy_search(self.decoder, encoder_outputs, decoder_inputs, decoder_hidden, max_target_len, input_mask, expanded_inputs=expanded_inputs, weight_tying=conf.MODEL_CONF['weight_tying'])


def greedy_search(decoder, encoder_outputs, decoder_inputs,
                  decoder_hidden, max_target_len, input_mask,
                  expanded_inputs=None, weight_tying=False):
    seq_outputs = []
    coverage = torch.zeros((conf.MODEL_CONF['batch_size'], encoder_outputs.shape[1]), dtype=torch.float).to(
        conf.DEVICE) if conf.MODEL_CONF['is_coverage'] else None

    for t in range(max_target_len):
        # decode_output: (batch_size, 1, vocab_size)
        # decoder_output, decoder_hidden = decoder(decoder_inputs, decoder_hidden, encoder_outputs, input_mask, expanded_inputs=expanded_inputs, expanded_targets=expanded_targets)
        decoder_output, decoder_hidden, coverage = decoder(
            decoder_inputs, decoder_hidden,
            encoder_outputs, input_mask,
            expanded_inputs=expanded_inputs,
            coverage=coverage)
        # decoder_inputs: (batch_size,)
        probs, max_ids = decoder_output.topk(1, dim=2)
        # decoder_inputs: (batch_size,)
        decoder_inputs = max_ids.squeeze()
        seq_outputs.append(decoder_inputs.unsqueeze(1))
        # seq_outputs: (bacth_size, max_len)
    seq_outputs = torch.cat(seq_outputs, dim=1)

    return seq_outputs


class BeamSearch:
    """
    执行beam search操作

    attr:
        在第一个EOS或者PAD出现之后，将后面的序列统一替换成EOS
        EOS_prob: 对EOS token的概率为1，可以让序列概率不再改变
        best_seqs: 存储top2序列，(this_batch_size, top_k, max_target_len)
        seq_prob_records: 存储序列出现的概率值，(this_batch_size, top_k * top_k)
        probs_in_current_step: 存储当前t步出现的词的概率值，(this_batch_size, top_k * top_k)
        words_in_current_step: 存储当前t步出现的词的索引，(this_batch_size, top_k * top_k)
    更新序列概率的机制：
        假设有2个句子输入S1和S2，则this_batch_size=2，以beam search的topk=2为例，搜索逻辑如下：
        在t=0时，每个句子会产生自己的两个词W11和W12, 对应概率P11，P12，构成序列概率seq_prob_records=[S1:[P11, P11, P12, P12], S2:[P11, P11, P12, P12]]；
        在t=1时，每个句子由W11生成W21、W22，有W12生成W23、W24，构成当前时间步的词的概率probs_in_current_step=[S1:[P21, P22, P23, P24], S2:[P21, P22, P23, P24]]；
        则4个新序列的概率 seq_prob_records = seq_prob_records * probs_in_current_step
        由此循环迭代至t=max_target_len截止，即可得到最大的概率序列
    """
    def __init__(self):
        self.EOS = torch.tensor(dic.EOS, dtype=torch.long).to(conf.DEVICE)
        self.EOS_prob = torch.tensor(1.0).to(conf.DEVICE)
        self.PAD = torch.tensor(dic.PAD, dtype=torch.long).to(conf.DEVICE)

    def init_containers(self, this_batch_size, top_k, max_target_len):
        self.best_seqs = torch.zeros((this_batch_size, top_k, max_target_len)).long().to(conf.DEVICE)
        self.seq_prob_records = torch.ones((this_batch_size, top_k * top_k)).to(conf.DEVICE)
        self.probs_in_current_step = torch.zeros((this_batch_size, top_k * top_k), dtype=torch.float).to(conf.DEVICE)
        self.words_in_current_step = torch.zeros((this_batch_size, top_k * top_k)).to(conf.DEVICE)

    def locate_topk(self, decoder_output, top_k=2):
        # probs: (batch_size, 1, 1)
        # max_ids: (batch_size, 1, 1)
        probs, max_ids = decoder_output.topk(top_k, dim=2)
        return probs.transpose(2, 1), max_ids.transpose(2, 1)

    def __call__(self, decoder, encoder_outputs, decoder_inputs,
                 decoder_hidden, max_target_len, input_mask,
                 top_k=2, expanded_inputs=None, weight_tying=False):
        # 初始化数据容器
        this_batch_size = encoder_outputs.shape[0]
        self.init_containers(this_batch_size, top_k, max_target_len)
        coverage = torch.zeros((conf.MODEL_CONF['batch_size'], encoder_outputs.shape[1]), dtype=torch.float).to(
            conf.DEVICE) if conf.MODEL_CONF['is_coverage'] else None

        for t in range(max_target_len):

            if t == 0:
                # decode_output: (batch_size, 1, vocab_size)
                decoder_output, decoder_hidden, coverage = decoder(decoder_inputs, decoder_hidden, encoder_outputs,
                                                              input_mask, expanded_inputs=expanded_inputs, coverage=coverage, weight_tying=weight_tying)
                # probs: (this_batch_size, top_k, 1)
                # max_ids: (this_batch_size, top_k, 1)
                probs, max_ids = self.locate_topk(decoder_output, top_k=top_k)
                self.seq_prob_records *= probs.expand((this_batch_size, top_k, top_k)).flatten(1, 2)
                self.best_seqs[:, :, [t]] = max_ids
            else:

                is_last_word_eos_or_pad = torch.logical_or(self.best_seqs[:, :, [t - 1]] == self.EOS,
                                                    self.best_seqs[:, :, [t - 1]] == self.PAD)
                if is_last_word_eos_or_pad.sum() == this_batch_size * 2:
                    # 如果前一次输出全部是EOS或者PAD，那么意味着不需要再进行后续循环，直接退出
                    break

                for k in range(top_k):
                    decoder_inputs = self.best_seqs[:, k, t - 1].squeeze()
                    decoder_output, decoder_hidden, coverage = decoder(decoder_inputs, decoder_hidden,
                                                                       encoder_outputs, input_mask,
                                                                       expanded_inputs=expanded_inputs,
                                                                       coverage=coverage, weight_tying=weight_tying)
                    probs, max_ids = self.locate_topk(decoder_output, top_k=top_k)

                    # 如果上一步的输出词为EOS或者PAD，则将当前输出替换为EOS，当前输出的概率替换为1
                    max_ids = torch.where(is_last_word_eos_or_pad[:, [k, k], :], self.EOS, max_ids)
                    probs = torch.where(is_last_word_eos_or_pad[:, [k, k], :], self.EOS_prob, probs)

                    self.probs_in_current_step[:, k * top_k: (1 + k) * top_k] = probs.squeeze()
                    self.probs_in_current_step[:, k * top_k: (1 + k) * top_k] = max_ids.squeeze()

                new_seq_probs = self.seq_prob_records * self.probs_in_current_step
                new_probs, new_max_ids = new_seq_probs.topk(top_k, dim=1)
                self.seq_prob_records = new_probs.unsqueeze(2).expand((this_batch_size, top_k, top_k)).flatten(1, 2)
                pos = (new_max_ids / top_k).unsqueeze(2).long()
                old_words = self.best_seqs[:, :, :t]
                new_words = torch.gather(max_ids, 1, pos)
                self.best_seqs[:, :, :(t + 1)] = torch.cat((old_words, new_words), dim=2)
        _, best_seq_idx = self.seq_prob_records.topk(1, dim=1)
        best_seq_idx = (best_seq_idx / top_k).long()
        best_seqs = torch.gather(self.best_seqs, 1, best_seq_idx.unsqueeze(1).expand(this_batch_size, 1, max_target_len))

        return best_seqs
