import torch
from torch.autograd import Variable
from utils import top_k_top_p_filtering
import torch.nn.functional as F
import numpy as np
class Predictor(object):

    def __init__(self, model, src_vocab, tgt_vocab, device):
        """
        Predictor class to evaluate for a given model.
        Args:
            model (seq2seq.models): trained model. This can be loaded from a checkpoint
                using `seq2seq.util.checkpoint.load`
            src_vocab (seq2seq.dataset.vocabulary.Vocabulary): source sequence vocabulary
            tgt_vocab (seq2seq.dataset.vocabulary.Vocabulary): target sequence vocabulary
        """
        self.model = model
        self.device = device
        self.model.eval()
        self.src_vocab = src_vocab
        self.tgt_vocab = tgt_vocab

    def get_decoder_features(self, src_seq):
        src_id_seq = torch.LongTensor([self.src_vocab[tok] for tok in src_seq]).view(1, -1).to(self.device)
    
        with torch.no_grad():
            softmax_list = self.model(src_id_seq)

        return softmax_list

    def predict(self, src_seq, unk_token=0, eos_token=3):
        """ Make prediction given `src_seq` as input.

        Args:
            src_seq (list): list of tokens in source language

        Returns:
            tgt_seq (list): list of tokens in target language as predicted
            by the pre-trained model
        """
        other = self.get_decoder_features(src_seq)

        length = 0
        decoded_words = np.zeros((1, 100), dtype=np.int)
        repetition_penalty = 2.0
        temperature = 1.0
        generated = []
        pre = []
        for di, decoder_di in enumerate(other):
            decoder_di = F.softmax(decoder_di, dim=-1)
            for _id in generated:
                decoder_di[:, _id] /= repetition_penalty
            decoder_di = decoder_di / temperature
            decoder_di[:,unk_token] = -float('Inf')
            filtered_logits = top_k_top_p_filtering(decoder_di[0], top_k=1, top_p=0)
            next_token = torch.multinomial(F.softmax(filtered_logits, dim=-1), num_samples=1)
            generated.append(next_token.item())
            ni = next_token.squeeze().data.cpu().numpy() 
            decoded_words[:,di]=ni
            length += 1
            if next_token.item() == 3:  # 遇到[SEP]则表明response生成结束
                break
        sample_words = decoded_words[0]
        for idx in range(length):
            word = sample_words[idx]
            pre.append(self.tgt_vocab[word])
            if word == eos_token:
                break
        # for i in range(length):
        #     tgt_id_seq.append(other[i].squeeze().argmax(0))
        # tgt_seq = [self.tgt_vocab[tok.tolist()] for tok in tgt_id_seq]
        return pre

    def predict_n(self, src_seq, n=1):
        """ Make 'n' predictions given `src_seq` as input.

        Args:
            src_seq (list): list of tokens in source language
            n (int): number of predicted seqs to return. If None,
                     it will return just one seq.

        Returns:
            tgt_seq (list): list of tokens in target language as predicted
                            by the pre-trained model
        """
        other = self.get_decoder_features(src_seq)

        result = []
        for x in range(0, n):
            length = other['topk_length'][0][x]
            tgt_id_seq = [other['topk_sequence'][di][0][x].data[0] for di in range(length)]
            tgt_seq = [self.tgt_vocab[tok.tolist()] for tok in tgt_id_seq]
            result.append(tgt_seq)

        return result

