import torch
import torch.nn as nn
from MaskedSoftmaxCELoss import MaskedSoftmaxCELoss

class Encoder(nn.Module):
    """编码器"""

    def __init__(self, vocab_size, embed_size, hidden_size, num_layers, dropout=0.0):
        super(Encoder, self).__init__()
        # 嵌入层
        self.embedding = nn.Embedding(vocab_size, embed_size)
        # RNN层
        self.rnn = nn.GRU(embed_size, hidden_size, num_layers, dropout=dropout)

    def forward(self, X):
        # 输出'X'的形状：(batch_size,num_steps,embed_size) num_steps:也是句子长度
        # (batch_size, num_steps, embed_size) <= (batch_size, num_steps, vocab_size)
        X = self.embedding(X)
        # 在循环神经网络模型中，第一个轴对应于时间步
        # (num_steps, batch_size, embed_size) <= (batch_size, num_steps, embed_size)
        # 即 将batch_size 放到中间，num_step 放在前面 原因是RNN是一步一步来的，每一步输入为(batch_size, embed_size)
        X = X.permute(1, 0, 2)
        # 如果未提及状态，则默认为0
        output, state = self.rnn(X)
        # output的形状:(num_steps, batch_size, num_hidden) # 纵向向上输出
        # state[0]的形状:(num_layers, batch_size, num_hidden) # 横向向右输出
        return output, state


class Decoder(nn.Module):
    """解码器"""

    def __init__(self, vocab_size, embed_size, hidden_size, num_layers, dropout=0.0):
        super(Decoder, self).__init__()
        # 嵌入层
        self.embedding = nn.Embedding(vocab_size, embed_size)
        # rnn层 (每次输入是上一步state+当前步x)
        self.rnn = nn.GRU(embed_size + hidden_size, hidden_size, num_layers, dropout=dropout)
        # 全连接层
        self.dense = nn.Linear(hidden_size, vocab_size)

    def init_state(self, enc_outputs):
        return enc_outputs[1]  # 只获取(output, state)中的state

    def forward(self, X, state):
        # 输出'X'的形状：(batch_size,num_steps,embed_size) num_steps:也是句子长度
        # (batch_size, num_steps, embed_size) <= (batch_size, num_steps, vocab_size)
        X = self.embedding(X)
        # 在循环神经网络模型中，第一个轴对应于时间步
        # (num_steps, batch_size, embed_size) <= (batch_size, num_steps, embed_size)
        # 即 将batch_size 放到中间，num_step 放在前面 原因是RNN是一步一步来的，每一步输入为(batch_size, embed_size)
        X = X.permute(1, 0, 2)
        # 广播state，使其具有与X相同的num_steps，得到context
        # state:(num_layers, batch_size, num_hidden) state[-1]:(batch_size, num_hidden)
        # state[-1]: 其实是只拿最后一层得到的state
        # context: (num_steps, batch_size, num_hidden)
        context = state[-1].repeat(X.shape[0], 1, 1)  # 提高维数
        # 将encoder最后时刻的output与当前时间步的输入拼接
        # X_and_context: (num_steps, batch_size, num_hidden + embed_size) (此处num_hidden是encoder的)
        X_and_context = torch.cat((X, context), 2)
        output, state = self.rnn(X_and_context, state)
        # output的形状:(num_steps, batch_size, num_hidden) # 纵向向上输出
        # state[0]的形状:(num_layers, batch_size, num_hidden) # 横向向右输出
        output = self.dense(output).permute(1, 0, 2)  # 先将output的num_hidden转为vocab_size, 再将batch_size放到前面
        # output的形状:(batch_size, num_steps, vocab_size) # 经过全连接层后
        # state[0]的形状:(num_layers, batch_size, num_hidden) # 这个同上
        return output, state


class Seq2Seq(nn.Module):
    def __init__(self, encoder_vocab_size, decoder_vocab_size, embed_size, hidden_size, num_layers, dropout=0.0):
        super(Seq2Seq, self).__init__()
        self.encoder = Encoder(encoder_vocab_size, embed_size, hidden_size, num_layers, dropout)
        self.decoder = Decoder(decoder_vocab_size, embed_size, hidden_size, num_layers, dropout)

    def forward(self, enc_X, dec_X):
        enc_outputs = self.encoder(enc_X)
        dec_state = self.decoder.init_state(enc_outputs)
        outputs = self.decoder(dec_X, dec_state)
        return outputs

    def loss(self, pred, label, valid_len):
        criterion = MaskedSoftmaxCELoss()
        loss = criterion(pred, label, valid_len).sum()
        return loss




