import torch
from torch import nn
import gxl_attention_module
from tokenize_vocab import GxlCharTokenizer


class GxlAttentionEncoder(nn.Module):
    """
    ⽤于序列到序列学习的基于注意力机制的⽹络编码器
    包括：
    一个word embedding层和position embedding层
    layer_num层self-attention块，每层attention块均运用了残差结构，并伴随一个前馈网络
    示例：
    model = GxlAttentionEncoder(6000)
    input = torch.randint(0, 100, (100, 20))
    input[2] = torch.tensor([1])
    print(input.shape)
    print(model(input).shape)
    >torch.Size([100, 20])
    >torch.Size([100, 20, 252])
    """

    def __init__(self, vocab_size, head_num=12, embed_size=252, layer_num=2,
                 dropout=0.081024, **kwargs):
        super(GxlAttentionEncoder, self).__init__(**kwargs)
        # 嵌⼊层
        self.word_embedding = nn.Embedding(vocab_size, embed_size)
        self.position_embedding = nn.Embedding(1024, embedding_dim=embed_size)
        self.dropout = nn.Dropout(p=dropout, inplace=False)
        self.self_attention_block = nn.ModuleList(
            [gxl_attention_module.GxlMultiHeadAttention(head_num, embed_size, dropout=dropout) for _ in
             range(layer_num)])
        self.ln_f = nn.LayerNorm(embed_size, eps=1e-05, elementwise_affine=True)

    def forward(self, input_ids):
        # 得到self-attention的mask
        padding_id = GxlCharTokenizer.PAD_ID
        padding_mask = gxl_attention_module.get_padding_mask(input_ids, padding_id).to(input_ids.device)
        # input_ids: (b,seq)
        # 词向量编码
        word_emb_output = self.word_embedding(input_ids)  # shape: (batch_size, sequence_length, 252)
        b, seq, emb = word_emb_output.size()
        # 位置编码: 先取前seq个position_embedding的向量
        # ，再广播式扩充值batch，这是batch无关的；position_embedding中的1024行是假设没有句子的大小大于1024
        position_emb_output = self.position_embedding(  # (batch_size, sequence_length, 252)
            torch.arange(seq, device=word_emb_output.device))[None, :, :].expand(b, seq, emb)
        #  合并词汇编码和位置编码
        input_embedded = word_emb_output + position_emb_output  # (batch_size, sequence_length, 252)

        # 应用 dropout
        input_embedded = self.dropout(input_embedded)

        # 遍历selfAttentionBlocks
        for block in self.self_attention_block:
            input_embedded = block(input_embedded, input_embedded, input_embedded, padding_mask)
            # hidden_states shape: (batch_size, sequence_length, 252)

        # 应用 层标准化，也就是对最后一个维度进行标准化
        # output: (batch_size, sequence_length, 252)
        output = self.ln_f(input_embedded)
        return output


class GxlAttentionDecoder(nn.Module):
    """
    完全基于attention的解码器
    包括：
    word position embedding 层
    layer_num层的self-attention
    一层和encoder_out交互的cross-attention，没个attention层均采取了残差网络
    最后是一个前馈线性层，，，
    input:

    """

    def __init__(self, vocab_size, embed_size=256, head_num=4, layer_num=1, dropout=0.0897):
        super(GxlAttentionDecoder, self).__init__()
        self.word_embedding = nn.Embedding(vocab_size, embed_size)
        self.position_embedding = nn.Embedding(1024, embed_size)
        # Self-Attention
        self.self_attention_block = nn.ModuleList(
            [gxl_attention_module.GxlMultiHeadAttention(head_num, embed_size, dropout=dropout) for _ in
             range(layer_num)])

        # encoder-decoder cross-attention
        self.encoder_attention = gxl_attention_module.GxlMultiHeadAttention(head_num, embed_size, dropout=dropout)

        # Feedforward Neural Network
        self.feedforward = gxl_attention_module.GxlOneConvMlp(input_dim=embed_size, dropout=dropout,
                                                              output_dim=vocab_size)

        self.dropout = nn.Dropout(dropout)
        self.norm1 = nn.LayerNorm(embed_size)
        self.norm2 = nn.LayerNorm(embed_size)
        self.norm3 = nn.LayerNorm(vocab_size)

    def forward(self, x, encoder_output, encoder_padding_mask_without_broadcast):
        self_attention_padding_mask = gxl_attention_module.get_padding_mask(x, GxlCharTokenizer.PAD_ID).to(x.device)
        self_attention_seq_mask = gxl_attention_module.get_seq_mask(x.shape[1], x.shape[0]).to(x.device)
        self_attention_mask = self_attention_seq_mask & self_attention_padding_mask
        encoder_attention_mask = encoder_padding_mask_without_broadcast.unsqueeze(1).expand(-1, x.shape[1], -1)
        word_embedding_output = self.word_embedding(x)
        b, seq, emb = word_embedding_output.shape
        position_embedding_output = self.position_embedding(  # (batch_size, sequence_length, 252)
            torch.arange(seq, device=word_embedding_output.device))[None, :, :].expand(b, seq, emb)
        x = word_embedding_output + position_embedding_output
        residual = x
        # 遍历selfAttentionBlocks
        for block in self.self_attention_block:
            x = block(x, x, x, self_attention_mask)
            # hidden_states shape: (batch_size, sequence_length, 252)
        x = residual + self.dropout(x)
        x = self.norm1(x)

        # Multi-Head Attention with Encoder Output
        encoder_attention_output = self.encoder_attention(x, encoder_output, encoder_output,
                                                          mask=encoder_attention_mask)
        x = x + self.dropout(encoder_attention_output)
        x = self.norm2(x)

        # 前馈神经网络
        feedforward_output = self.feedforward(x)
        x = self.dropout(feedforward_output)
        x = self.norm3(x)
        return x


class GxlChatAttentionModel(nn.Module):
    """总的基于attention机制的seq2seq模型"""

    def __init__(self, vocab_size, embed_size=252, heads_num=4, layers=1, dropout=0.0897):
        super(GxlChatAttentionModel, self).__init__()
        self.encoder = GxlAttentionEncoder(vocab_size, embed_size=embed_size, head_num=heads_num, layer_num=layers,
                                           dropout=dropout)
        self.decoder = GxlAttentionDecoder(vocab_size, embed_size=embed_size, head_num=heads_num, layer_num=layers,
                                           dropout=dropout)

    def forward(self, enc_inputs, dec_inputs):
        """
        enc_inputs: (batch_size, seq_length1)
        dec_inputs: (batch_size, seq_length2)
        """
        encoder_mask_without_broadcast = gxl_attention_module.get_padding_mask(enc_inputs, GxlCharTokenizer.PAD_ID, -1)
        enc_outputs = self.encoder(enc_inputs)  # enc_outputs: (batch_size, seq_length, 252)
        dec_outputs = self.decoder.forward(dec_inputs, enc_outputs,
                                           encoder_mask_without_broadcast)  # dec_outputs: (batch_size, seq_length, vocab_size)
        return dec_outputs


if __name__ == '__main__':
    """"""
    model = GxlChatAttentionModel(3000)
    print(model)
