"""
实现解码器
"""
import torch.nn as nn
import torch.nn.functional as F
import config
import torch
import random
from chatbot.attention import Attention


class Decoder(nn.Module):
    def __init__(self):
        super(Decoder, self).__init__()
        self.embedding = nn.Embedding(num_embeddings=len(config.chatbot_ws_target),
                                      embedding_dim=config.chatbot_embedding_dim,
                                      padding_idx=config.chatbot_ws_target.PAD)
        self.gru = nn.GRU(input_size=config.chatbot_embedding_dim,
                          hidden_size=config.chatbot_decoder_hidden_size,
                          num_layers=config.chatbot_decoder_num_layers,
                          batch_first=True)
        self.fc = nn.Linear(config.chatbot_decoder_hidden_size, len(config.chatbot_ws_target))
        self.attn = Attention()
        self.Wa = nn.Linear(config.chatbot_decoder_hidden_size + config.chatbot_encoder_hidden_size,
                            config.chatbot_decoder_hidden_size, bias=False)

    def forward(self, target, encoder_hidden, encoder_outputs):
        # 获取encoder的输出，作为decoder第一次的hidden_State
        decoder_hidden = encoder_hidden
        batch_size = target.size(0)
        # 准备decoder第一时间的输入，[batch_size,1] SOS 作为输入
        decoder_input = torch.LongTensor(
            torch.ones([batch_size, 1], dtype=torch.int64) * config.chatbot_ws_target.SOS).to(config.device)
        decoder_outputs = torch.zeros(
            [batch_size, config.chatbot_target_max_len + 1, len(config.chatbot_ws_target)]).to(config.device)
        if random.random() > config.chatbot_teacher_forcing_radio:
            for t in range(config.chatbot_target_max_len + 1):  # add_eos加了1
                decoder_output_t, decoder_hidden = self.forward_step(decoder_input, decoder_hidden, encoder_outputs)
                decoder_outputs[:, t, :] = decoder_output_t
                decoder_input = target[:, t].unsqueeze(-1)
        else:
            for t in range(config.chatbot_target_max_len + 1):
                decoder_output_t, decoder_hidden = self.forward_step(decoder_input, decoder_hidden, encoder_outputs)
                decoder_outputs[:, t, :] = decoder_output_t
                value, index = torch.topk(decoder_output_t, 1)
                decoder_input = index
        return decoder_outputs, decoder_hidden

    def forward_step(self, decoder_input, decoder_hidden, encoder_outputs):
        """
        计算每个时间步上的结果
        :param decoder_input:
        :param decoder_hidden:
        :return:
        """
        decoder_input_embeded = self.embedding(decoder_input)  # [batch_size,1,embedding_dim]
        out, decoder_hidden = self.gru(decoder_input_embeded, decoder_hidden)

        #############添加attention##############
        attention_weight = self.attn(decoder_hidden, encoder_outputs).unsqueeze(1)
        context_vector = attention_weight.bmm(encoder_outputs)
        concated = torch.cat([out, context_vector], dim=-1).squeeze(1)
        out = torch.tanh(self.Wa(concated))
        #############attention结束##############
        # out = out.squeeze(1)
        output = F.log_softmax(self.fc(out), dim=-1)
        return output, decoder_hidden

    def evalute(self, encoder_hidden):
        """评估"""
        decoder_hidden = encoder_hidden
        batch_size = encoder_hidden.size(1)
        decoder_input = torch.LongTensor(
            torch.ones([batch_size, 1], dtype=torch.int64) * config.chatbot_ws_target.SOS).to(config.device)
        indices = []

        for i in range(config.chatbot_target_max_len + 5):
            decoder_output_t, decoder_hidden = self.forward_step(decoder_input, decoder_hidden)
            value, index = torch.topk(decoder_output_t, 1)
            decoder_input = index

            indices.append(index.squeeze(-1).cpu().detach().numpy())
        return indices  # [max_len,batch_size]
