import torch
import torch.nn as nn
from pandas.io.formats.info import series_sub_kwargs

import config


class TranslationEncoder(nn.Module):

    def __init__(self, vocab_size, padding_index):
        super().__init__()
        self.embedding = nn.Embedding(num_embeddings=vocab_size, embedding_dim=config.EMBEDDING_DIM,
                                      padding_idx=padding_index)

        self.gru = nn.GRU(input_size=config.EMBEDDING_DIM, hidden_size=config.ENCODER_HIDDEN_SIZE,
                          num_layers=config.ENCODER_LAYERS, batch_first=True, bidirectional=True)

    def forward(self, x):
        # x.shape: [batch_size, seq_len]
        embed = self.embedding(x)
        # x.shape [batch_size,seq_len,embedding_dim]
        outputs, hidden, = self.gru(embed)
        # hidden.shape: [num_layers * num_directions, batch_size, hidden_size]
        last_hidden_forward = hidden[-2]
        last_hidden_backward = hidden[-1]

        context_vector = torch.cat((last_hidden_forward, last_hidden_backward), dim=1)
        # context_vector.shape = [batch_size,2 * hidden_size]
        return context_vector


class TranslationDecoder(nn.Module):
    def __init__(self, vocab_size, padding_index):
        super().__init__()
        self.embedding = nn.Embedding(num_embeddings=vocab_size, embedding_dim=config.EMBEDDING_DIM,
                                      padding_idx=padding_index)

        self.gru = nn.GRU(input_size=config.EMBEDDING_DIM, hidden_size=config.DECODER_HIDDEN_SIZE, batch_first=True)

        self.linear = nn.Linear(in_features=config.DECODER_HIDDEN_SIZE, out_features=vocab_size)

    def forward(self, x, hidden_0):
        """
        这里的x seq_len = 1
        :param x:
        :param hidden_0:
        :return:
        """
        # x.shape: [batch_size, 1]
        embed = self.embedding(x)
        # x.shape: [batch_size,1, embedding_dim]
        outputs, hidden_n = self.gru(embed, hidden_0)
        # output.shape = [batch_size,1,hidden_size]
        # hidden = [1,batch_size,hidden_size]
        outputs = self.linear(outputs)
        # outputs.shape = [batch_size,1,vocab_size]
        return outputs, hidden_n
