import random

import torch
import torch.nn as nn
import torch.nn.functional as f
import seq2seq.config as config


class Decoder(nn.Module):
    def __init__(self):
        super(Decoder, self).__init__()
        self.embedding = nn.Embedding(num_embeddings=len(config.num_sequence), embedding_dim=config.embedding_dim,
                                      padding_idx=config.num_sequence.PAD)
        self.gru = nn.GRU(input_size=config.embedding_dim, batch_first=True, num_layers=config.num_layers,
                          hidden_size=config.hidden_size)
        self.fc = nn.Linear(config.hidden_size, len(config.num_sequence))

    def forward(self, target, encoder_hidden):
        # 获取encoder的输出作为decoder的第一次的hidden state
        decoder_hidden = encoder_hidden
        # 准备decoder的第一个时间步输入，[batch_size,1] sos作为输入
        batch_size = target.size(0)
        decoder_input = torch.LongTensor(torch.ones([batch_size, 1], dtype=torch.int64) * config.num_sequence.SOS).to(
            config.device)
        # 在第一个时间步进行计算，得到第一个时间步的输出和hidden state
        # 把前一个输出进行计算，得到第一个最后的输出结果
        # 把前一次的输出和hidden state作为当前时间步输入和hidden state的输入

        # 保存预测结果
        decoder_outputs = torch.zeros([batch_size, config.max_len + 2, len(config.num_sequence)]).to(config.device)
        for t in range(config.max_len + 2):  # +2是因为dataset中+1，add_eos加了1
            decoder_output_t, decoder_hidden = self.forward_step(decoder_input, decoder_hidden)
            decoder_outputs[:, t, :] = decoder_output_t  # decoder_outputs中第二个维度正好是t
            if random.random() > config.teacher_forcing_ratio:  # teacher forcing 机制，加速收敛
                decoder_input = target[t]  # 取一部分正确答案指导运算，防止训练数据过于模糊
            else:
                value, index = torch.topk(decoder_output_t, 1)  # 获取第一个维度上的最大值
                decoder_input = index
        return decoder_outputs, decoder_hidden

    def forward_step(self, decoder_input, decoder_hidden):
        decoder_input_embedded = self.embedding(decoder_input)  # [batch_size,1]->[batch_size,1,embedding_dim]
        out, decoder_hidden = self.gru(decoder_input_embedded, decoder_hidden)  # [1,batch_size,hidden_size]
        out = out.squeeze(1)  # [1,batch_size,hidden_size]->[batch_size,hidden_size]，就是把第一维度去掉了
        output = f.log_softmax(self.fc(out), dim=-1)  # [batch_size,vocab_size]
        return output, decoder_hidden

    def evaluate(self, encoder_hidden):
        """
        评估
        :param encoder_hiddern:
        :return:
        """
        decoder_hidden = encoder_hidden
        batch_size = encoder_hidden.size(1)
        decoder_input = torch.LongTensor(torch.ones([batch_size, 1], dtype=torch.int64) * config.num_sequence.SOS).to(
            config.device)
        indices = []
        for i in range(config.max_len + 5):
            # 一步一判断
            decoder_output_t, decoder_hidden = self.forward_step(decoder_input, decoder_hidden)
            value, index = torch.topk(decoder_output_t, 1)
            decoder_input = index
            # # 碰到结束符就结束了，说明此时输入的一个结果值到结尾了
            # if index.item() == config.num_sequence.EOS:
            #     break
            indices.append(index.squeeze(-1).cpu().detach().numpy())
        return indices
