import torch
from torch import nn
import gxl_attention_module
from tokenize_vocab_word import GxlCharTokenizer
from gxl_attention_module import GxlMultiHeadAttention


class Encoder(nn.Module):
    """编码器-解码器架构的基本编码器接⼝"""

    def __init__(self, **kwargs):
        super(Encoder, self).__init__(**kwargs)

    def forward(self, X, *args):
        raise NotImplementedError


class Decoder(nn.Module):
    """编码器-解码器架构的基本解码器接⼝"""

    def __init__(self, **kwargs):
        super(Decoder, self).__init__(**kwargs)

    def init_state(self, enc_outputs, *args):
        raise NotImplementedError

    def forward(self, X, state):
        raise NotImplementedError


class EncoderDecoder(nn.Module):
    """编码器-解码器架构的基类"""

    def __init__(self, encoder, decoder, **kwargs):
        super(EncoderDecoder, self).__init__(**kwargs)
        self.encoder = encoder
        self.decoder = decoder

    def forward(self, enc_X, dec_X, *args):
        enc_outputs = self.encoder(enc_X, *args)
        dec_state = self.decoder.init_state(enc_outputs, *args)
        return self.decoder(dec_X, dec_state)


class Seq2SeqEncoder(Encoder):
    """⽤于序列到序列学习的循环神经⽹络编码器"""

    def __init__(self, vocab_size, embed_size, num_layers,
                 dropout, **kwargs):
        super(Seq2SeqEncoder, self).__init__(**kwargs)
        # 嵌⼊层
        self.embedding = nn.Embedding(vocab_size, embed_size)
        self.rnn = nn.GRU(embed_size, embed_size, num_layers,
                          dropout=dropout)

    def forward(self, X, *args):
        # 输出'X'的形状：(batch_size,num_steps,embed_size)
        padding_mask_without_broadcast = gxl_attention_module.get_padding_mask(X, GxlCharTokenizer.PAD_ID, -1)
        X = self.embedding(X)
        # 在循环神经⽹络模型中，第⼀个轴对应于时间步
        X = X.permute(1, 0, 2)
        # 如果未提及状态，则默认为0
        output, state = self.rnn(X)
        # output的形状:(num_steps,batch_size,num_hiddens)
        # state[0]的形状:(num_layers,batch_size,num_hiddens)
        return output, state, padding_mask_without_broadcast


class Seq2SeqDecoder(Decoder):
    """⽤于序列到序列学习的循环神经⽹络解码器"""

    def __init__(self, vocab_size, embed_size, num_layers,
                 dropout, **kwargs):
        super(Seq2SeqDecoder, self).__init__(**kwargs)
        self.embedding = nn.Embedding(vocab_size, embed_size)
        self.rnn = nn.GRU(embed_size + embed_size, embed_size, num_layers,
                          dropout=dropout)
        self.attention = GxlMultiHeadAttention(head_num=4, embedding_dim=embed_size, representative_dim=embed_size,
                                               dropout=dropout)
        self.dense = nn.Linear(embed_size, vocab_size)

    def forward(self, X, encode_outputs):
        # 输出'X'的形状：(num_steps, batch_size, embed_size)
        X = self.embedding(X).permute(1, 0, 2)
        encode_output, encoder_state, padding_mask_without_broadcast = encode_outputs  # (seq_e, batch, num_hiddens),(num_layers, batch, num_hiddens)
        padding_mask_with_broadcast = padding_mask_without_broadcast.unsqueeze(1).expand(-1, X.size(0), -1)
        attention_context = self.attention(X.permute(1, 0, 2), encode_output.permute(1, 0, 2),
                                           encode_output.permute(1, 0, 2),
                                           padding_mask_with_broadcast)  # (batch, seq_x, num_hiddens)
        attention_context = attention_context.permute(1, 0, 2)  # ( seq_x,batch, num_hiddens)
        X_and_context = torch.cat((X, attention_context), 2)
        output, state = self.rnn(X_and_context, encoder_state)
        output = self.dense(output).permute(1, 0, 2)  # output中每一步都对应一个预测值,组成一个输出序列,每个预测值参考的状态都是在其前面的
        # output的形状:(batch_size,num_steps,vocab_size)
        # state[0]的形状:(num_layers,batch_size,num_hiddens)
        return output, state


class GxlChatRnnModel(nn.Module):
    """总的基于rnn机制的seq2seq模型"""

    def __init__(self, vocab_size, embed_size=252, heads_num=12, layers=2, dropout=0.0897):
        super(GxlChatRnnModel, self).__init__()
        self.encoder = Seq2SeqEncoder(vocab_size, embed_size=embed_size, num_layers=layers, dropout=dropout)
        self.decoder = Seq2SeqDecoder(vocab_size, embed_size=embed_size, num_layers=layers, dropout=dropout)

    def forward(self, enc_inputs, dec_inputs):
        """
        enc_inputs: (batch_size, seq_length1)
        dec_inputs: (batch_size, seq_length2)
        """
        enc_outputs = self.encoder(enc_inputs)  # enc_outputs: (batch_size, seq_length, 252)
        dec_outputs = self.decoder.forward(dec_inputs, enc_outputs)  # dec_outputs: (batch_size, seq_length, vocab_size)
        return dec_outputs[0]


if __name__ == '__main__':
    """"""
    model = GxlChatRnnModel(3000)
    input = torch.randint(1, 100, (100, 20))
    output = torch.randint(1, 100, (100, 35))
    print(model(input, output).shape)
