"""解码器"""
import torch.nn as nn
import torch

import config


class Decoder(nn.Module):
    def __init__(self):
        super().__init__()
        self.embedding = nn.Embedding(num_embeddings=len(config.num_seq),
                                      embedding_dim=config.embedding_dim,
                                      padding_idx=config.num_seq.PAD)
        self.gru = nn.GRU(input_size=config.embedding_dim,
                          hidden_size=config.hidden_size,
                          num_layers=config.num_layers,
                          batch_first=True)
        self.fc = nn.Linear(config.hidden_size, len(config.num_seq))

    def forward(self, encoder_hidden):
        """

        :param target: [batch_size, max_len, encoder_hidden_size]
        :param encoder_hidden: [1, batch_size, encoder_hidden_size]
        :return:
        """
        # 1. 获取解码器开始输入的隐藏层数据
        # decoder_hidden: [batch_size, max_len, encoder_hidden_size]
        decoder_hidden = encoder_hidden

        # 2. 获取输入数据的batch_size
        batch_size = encoder_hidden.size(1)

        # 3. 创建第一次输入的数据
        # decoder_input: [batch_size, 1]
        decoder_input = torch.tensor([[config.num_seq.SOS]]*batch_size).to(config.device)

        # 4. 循环输入，循环config.max_len+2次
        # 创建解码器的输出结果
        decoder_outputs = torch.zeros(size=[batch_size, config.max_len+2, len(config.num_seq)]).to(config.device)
        # 因为在dataset中目标值长度加了1，并且加了add_eos，所以要循环max_len+2次
        for t in range(config.max_len+2):
            # decoder_output_t: [batch_size, vocab_size]
            # decoder_hidden: [1, batch_size, hidden_size]
            decoder_output_t, decoder_hidden = self.forward_step(decoder_input, decoder_hidden)

            # 将当前句子输出的结果存入输出结果集中
            decoder_outputs[:, t, :] = decoder_output_t

            # 获取下一个句子的输入
            value, decoder_input = torch.topk(decoder_output_t, 1)

        # decoder_outputs: [batch_size, max_len+2, vocab_size]
        return decoder_outputs, decoder_hidden

    def forward_step(self, decoder_input, decoder_hidden):
        """

        :param decoder_input: [batch_size, 1]
        :param decoder_hidden: [1, batch_size, decoder_hidden_size]
        :return:
        """
        # 对数据进行Embedding操作
        # embed: [batch_size, 1, embedding_dim]  # 128个句子，每个句子的一个词，每个词用embedding_dim向量表示
        embed = self.embedding(decoder_input)

        # GRU
        # out: [batch_size, 1, hidden_size]
        # hidden: [1, batch_size, hidden_size]
        out, hidden = self.gru(embed, decoder_hidden)

        # 创建全连接层
        # out: [batch_size, 1, vocab_size]
        out = self.fc(out)

        # 删除第二个维度
        # out: [batch_size, vocab_size]
        out = out.squeeze(1)

        # 在最后一个维度上计算损失
        # out: [batch_size, vocab_size]
        out = nn.functional.log_softmax(out, dim=-1)
        # print(out.size())

        return out, hidden

    # def evaluate(self, encoder_hidden):
    #     # 1. 创建第一次输入的隐藏层
    #     # [1, batch_size, hidden_size]
    #     decoder_hidden = encoder_hidden
    #
    #     # 2. 获取batch_size
    #     batch_size = encoder_hidden.size(1)
    #
    #     # 3. 创建输入
    #     # [batch_size, 1]
    #     decoder_input = torch.tensor([[config.num_seq.SOS]]*batch_size)
    #
    #     # 4. 创建预测集
    #     decoder_predict = []
    #     # 5. 开始预测
    #     for i in range(9):
    #         # decoder_output_t: [batch_size, vocab_size]
    #         # decoder_hidden: [1, batch_size, hidden_size]
    #         decoder_output_t, decoder_hidden = self.forward_step(decoder_input, decoder_hidden)
    #         # index: [batch_size, 1]
    #         index = torch.topk(decoder_output_t, 1)[1]
    #         # index: [batch_size, 1]
    #         """
    #         index:
    #         [
    #             []  # size是1
    #             []
    #             []
    #             ...  # 共batch_size行
    #         ]
    #         """
    #         decoder_input = index
    #         # index.squeeze(-1)是将size转变为[batch_size]
    #         # decoder_predict.append(index.squeeze(-1).detach().numpy()), 等价于下面的代码
    #         decoder_predict.append(torch.max(decoder_output_t, 1)[1].detach().numpy())
    #
    #     """
    #     decoder_predict:
    #     [
    #         []  # size是batch_size,
    #         []
    #         []
    #         ...  # 共max_len+5行
    #     ]
    #     """
    #     # decoder_predict: [max_len+5, batch_size]
    #     return decoder_predict

    def evaluate(self, encoder_hidden):
        # 1. 准备起始的隐藏层输入数据
        decoder_hidden = encoder_hidden

        # 2. 获取句子的总数
        batch_size = encoder_hidden.size(1)

        # 3. 创建输入数据
        decoder_input = torch.LongTensor([[config.num_seq.SOS]]*batch_size).to(config.device)

        # 4. 预测
        decoder_outputs = []
        for t in range(config.max_len+2):
            # decoder_output_t: [batch_size, vocab_size]
            decoder_output_t, decoder_hidden = self.forward_step(decoder_input, decoder_hidden)

            # torch.topk(decoder_output_t, 1)[1].squeeze(-1)等价于torch.max(decoder_output_t, 1)[1]
            # torch.max中第二个参数为0时是按列取最大值，为1时是按行取最大值
            # index: [batch_size, 1]
            index = torch.topk(decoder_output_t, 1)[1]
            # index = torch.max(decoder_output_t, 1)[1]

            decoder_input = index

            # decoder_outputs.append(index.squeeze(-1).detach().numpy())
            # 上面一条等价于下面一条语句
            # torch.max(decoder_output_t, 1)[1]: [batch_size,]
            decoder_outputs.append(torch.max(decoder_output_t, 1)[1].cpu().detach().numpy())

        # decoder_outputs: [max_len+2, batch_size]
        return decoder_outputs


