import torch
import torch.nn.functional as F
from config import Config


class SampleDecoderWrapper:
    def __init__(self, decoder, trunc_output=True):
        self.decoder = decoder
        self.pad_id = decoder.vocab[Config.PAD_TOKEN]
        self.bos_id = decoder.vocab[Config.BOS_TOKEN]
        self.eos_id = decoder.vocab[Config.EOS_TOKEN]
        self.sampling_top_k = 1
        self.max_iter = Config.MAX_NL_LENGTH
        self.trunc_output = trunc_output

    def sample_from_dist(self, dist):
        """
        从分布dist中采样词汇
        Input:
            dist [batch_size, vocab_size]
        Output:
            [batch_size, 1]
        """
        if self.sampling_top_k == 1:
            top_k_scores, top_k_ids = dist.topk(1, dim=-1)
            return top_k_ids, top_k_scores

        if self.sampling_top_k > 0:
            # dist [batch,vocab_size]
            top_values, top_indices = torch.topk(dist, self.sampling_top_k, dim=1)  # 选取前k个 [batch, k]
            kth_best = top_values[:, -1].view([-1, 1])  # k个最大的中最小的 # [batch , 1]
            kth_best = kth_best.repeat([1, self.decoder.vocab_size]).float()  # 按列重复 [batch, vocab_size]
            ignore = torch.lt(dist, kth_best)  # [batch, vocab_size]
            dist = dist.masked_fill(ignore, float('-inf'))  # [batch, vocab_size]

        dist = F.softmax(dist, dim=1)
        m = torch.distributions.Multinomial(probs=dist, total_count=1)
        top_k_ids = torch.argmax(m.sample(), dim=1, keepdim=True)
        top_k_scores = dist.gather(dim=1, index=top_k_ids)
        return top_k_ids, top_k_scores

    def initialize(self, encoder_final_hidden, encoder_final_cell):
        init_state = self.decoder.generate_init_cell_state(encoder_final_hidden, encoder_final_cell)
        batch_bos = self.decoder.init_pre_output()
        finished = torch.zeros(Config.BATCH_SIZE, dtype=torch.bool)
        lengths = torch.ones(Config.BATCH_SIZE, dtype=torch.int)  # include EOS
        return batch_bos, init_state, finished, lengths

    def finalize(self, outputs, scores, attn_history, lengths):
        # outputs : [batch, ] * max_iter
        # attn_history [batch, ] * max_iter
        # lengths : [batch,]
        outputs, scores = torch.stack(outputs, 1), torch.stack(scores, 1)
        # outputs [batch, max_iter]
        # scores [batch, max_iter]
        if self.trunc_output:
            # 截断 有效长度 lengths之后所有的内容
            batch_indices = range(outputs.shape[0])
            outputs = [outputs[batch_index, :lengths[batch_index]] for batch_index in batch_indices]
            attn_history = torch.stack(attn_history, 1)
            attn_history = [attn_history[batch_index, :lengths[batch_index], ...] for batch_index in batch_indices]
        else:
            max_length = lengths.max()
            row = torch.arange(max_length, dtype=lengths.dtype, device=lengths.device)
            col = lengths.unsqueeze(-1)
            mask = torch.lt(row, col)
            outputs = outputs.masked_fill(~mask, self.pad_id)  # 把有效长度之后的内容都填充为pad
        return outputs, scores, attn_history

    def decode(self, enc_final_hidden, enc_final_cell, encoder_outputs, src_batch, src_lengths):
        """
        Inputs:
            enc_final_hidden
            enc_final_cell
            encoder_outputs
            src_lengths
        Outputs:
            outputs
            scores
            attn_history
        """
        prev_predicts, prev_state, finished, lengths = self.initialize(enc_final_hidden, enc_final_cell)
        # 产生初始的输入
        # prev_predicts [batch, 1] 存放的前一步的输入
        # prev_state
        # finished [batch,] 用来记录当前batch每条数据的完成情况
        # lengths [batch,]  用来记录当前batch每条数据的有效长度
        outputs, scores, attn_history = [], [], []

        for step in range(self.max_iter):
            embed = self.decoder.embedding(prev_predicts)  # embed [batch, embedding_size]
            embed = embed.squeeze(1)
            state, dist = self.decoder.one_step(embed,
                                                prev_state,
                                                encoder_outputs,
                                                src_batch,
                                                src_lengths)

            predicts, score = self.sample_from_dist(dist=dist)  # [batch,1] [batch,1]
            predicts, score = predicts.squeeze(1), score.squeeze(1)  # [batch,] [batch,]

            finished |= predicts.eq(self.eos_id)  # 到结束时完成
            lengths += (~finished).int()

            outputs.append(predicts)
            scores.append(score)
            attn_history.append(state.alignments)

            if finished.all():
                break

            prev_predicts, prev_state = predicts, state

        # outputs : [batch, ] * max_iter
        # scores : [batch, ] * max_iter
        # attn_history [batch, ] * max_iter
        # lengths : [batch,]
        return self.finalize(outputs, scores, attn_history, lengths)
