import math

import numpy as np
import torch
import torch.nn as nn
import configparser
config = configparser.ConfigParser()
config.read('config.ini')

source_vocab_size = config.getint('model', 'source_vocab_size')
target_vocab_size = config.getint('model', 'target_vocab_size')
max_len_sentence = config.getint('model', 'max_len_sentence')
len_src = config.getint('model', 'len_src')
len_tgt = config.getint('model', 'len_tgt')
embedding_dim = config.getint('model', 'embedding_dim')
encode_layers = config.getint('model', 'encode_layers')
decode_layers = config.getint('model', 'decode_layers')
ff_hidden_dim = config.getint('model', 'ff_hidden_dim')
dim_q_n_k = config.getint('model', 'dim_q_n_k')
dim_v = config.getint('model', 'dim_v')
head_num = config.getint('model', 'head_num')
dropout_rate = config.getint('model', 'dropout_rate')


class PositionalEncoding(nn.Module):
    def __init__(self, dropout_rate=dropout_rate, max_len_sentence=20, embedding_dim=embedding_dim):
        super(PositionalEncoding, self).__init__()
        self.dropout = nn.Dropout(p=dropout_rate)

        positional_matrix = torch.zeros(max_len_sentence, embedding_dim)

        # position.shape = [max_len_sentence, 1]
        position = torch.arange(0, max_len_sentence).unsqueeze(1)

        # we want 2i move 2 each step,however range only support int,thus there's another way using arange
        # division_list = [10000 **(2 * i/embedding_dim) for i in range(embedding_dim/2)]
        # division_list.shape = [embedding_dim/2]
        division_list = torch.exp(- math.log(10000) * torch.arange(0, embedding_dim, 2) / embedding_dim)

        # broadcast : position * division_list -> [max_len_sentence, embedding_dim/2]
        # similar to projection relations
        positional_matrix[:, 0::2] = torch.sin(position * division_list)
        positional_matrix[:, 1::2] = torch.cos(position * division_list)

        # add batch dimension and change dimension for safer broadcast
        # [max_len_sentence, embedding_dim] -> [1, max_len_sentence, embedding_dim]
        # ->[max_len_sentence, 1, embedding_dim]
        positional_matrix.unsqueeze(0).transpose(0, 1)

        # register buffer so that positional_matrix can be stored together with model
        self.register_buffer('positional_matrix', positional_matrix)

    def forward(self, x):
        '''
        :param x: word embeddings shape = [batch_size, len,src, embedding_dim]
        :return: x: word embeddings after position encoding [batch_size, len,src, embedding_dim]
        '''
        x = x.transpose(0, 1) + self.positional_matrix[:x.size(0), :]
        return self.dropout(x.transpose(0, 1))


class Add_n_Norm(nn.Module):
    def __init__(self, embedding_dim=embedding_dim):
        super(Add_n_Norm, self).__init__()
        self.norm_layer = nn.LayerNorm(embedding_dim)

    def forward(self, x, residual):
        # there's 2 alternatives in the order of add&norm

        # x = self.norm_layer(x)
        # return x + residual

        x = x + residual
        return self.norm_layer(x)


class FeedForward(nn.Module):
    def __init__(self, embedding_dim=embedding_dim, ff_hidden_dim=ff_hidden_dim, dropout_rate=dropout_rate):
        super(FeedForward, self).__init__()
        self.ff = nn.Sequential(nn.Linear(embedding_dim, ff_hidden_dim),
                                nn.ReLU(),
                                nn.Linear(ff_hidden_dim, embedding_dim))
        self.add_n_norm = Add_n_Norm()
        self.dp = nn.Dropout(p=dropout_rate)

    def forward(self, x):
        residual = x
        x = self.ff(x)
        x = self.add_n_norm(x, residual)
        return self.dp(x)


def obtain_padding_mask(Q_src, K_src):
    '''
    padding mask is for the result of Q_matrix @ K_matrix
    this result aligns with word_num * word_num, which means that mask is to hide word rather embeddings
    thus it's vital to know if the shape is len_src or len_tgt
    :param Q_src: input batches of sentence after tokenized
    :param K_src: input batches of sentence after tokenized
    :return: padding_mask
    '''
    _, wordnum_in_Q_src = Q_src.shape
    _, wordnum_in_K_src = K_src.shape

    # create a same_shape matrix like K_src
    # where position aligning padding words in K_src will be marked with True and be masked later
    # 首先，需要明确，由于softmax的运算是在最后一维度上，我们只需要遮盖K_src维度的padding word造成的影响，先从单独这一个维度入手之后再扩展矩阵就比较方便
    padding_mask = K_src.detach().eq(0).unsqueeze(1)
    # shape:[batch_num, 1, wordnum_in_K_src]

    return padding_mask.expand(-1, wordnum_in_Q_src, wordnum_in_K_src)
    # shape:[batch_num, wordnum_in_Q_src, wordnum_in_K_src]



def obtain_look_ahead_mask(Q_src):
    '''
    ***NOTE***: the look ahead mask is only used in decoder's self_attention process,
    thus only one shape are needed to be investigated
    :param src: construct look_ahead_mask using this matrix
    :return: look_ahead_mask
    '''
    batch_size, sequence_num = Q_src.shape
    look_ahead_mask = torch.triu(torch.ones([batch_size, sequence_num, sequence_num]), diagonal=1).bool()

    return look_ahead_mask


class ScaledDotProductAttention(nn.Module):
    '''
    *****NOTE*****
    ScaledDotProductAttention contains Softmax,
    so it's learnable and should be a module layer instead of a function
    '''

    def __init__(self):
        super(ScaledDotProductAttention, self).__init__()
        self.softmax = nn.Softmax(dim=-1)

    def forward(self, Mat_Q, Mat_K, Mat_V, mask):
        Mat_attention = torch.matmul(Mat_Q, Mat_K.transpose(-1, -2)) / math.sqrt(Mat_K.size(-1))
        # [batch_size, head_num, len1, len2]

        # mat_mask = np.ones_like(mask) * (-1e9)
        # Mat_attention = Mat_attention + mat_mask
        # There's an alternative way to do this thing

        Mat_attention.masked_fill(mask, -1e9)
        Mat_attention = self.softmax(Mat_attention)
        # [batch_size, head_num, len1, len2]
        return torch.matmul(Mat_attention, Mat_V)


class MultiHeadAttention(nn.Module):
    def __init__(self, embedding_dim=embedding_dim, head_num=head_num, dim_q_n_k=dim_q_n_k, dim_v=dim_v):
        super(MultiHeadAttention, self).__init__()
        '''
        Q @ K.T is required so dim_q is ought to be equal with dim_k, which is written as dim_q_n_k
        in addition, K_src and V_src are the same, while the Q_src should be the same too if it's a self attention
        '''
        self.Q_Weight = nn.Linear(embedding_dim, head_num * dim_q_n_k)
        self.K_Weight = nn.Linear(embedding_dim, head_num * dim_q_n_k)
        self.V_Weight = nn.Linear(embedding_dim, head_num * dim_v)
        self.linear = nn.Linear(head_num * dim_v, embedding_dim)

    def forward(self, Q_src, K_src, V_src, mask, head_num=head_num, dim_q_n_k=dim_q_n_k, dim_v=dim_v):
        # Input shape:
        #       [batch_size, len1, embedding_dim]
        #       [batch_size, len2, embedding_dim]
        #       [batch_size, len2, embedding_dim]
        # mask: [batch_size, len1, len2]

        Mat_Q = self.Q_Weight(Q_src)
        Mat_K = self.K_Weight(K_src)
        Mat_V = self.V_Weight(V_src)
        # shape:[batch_size, len1, head_num * dim_q_n_k]
        #       [batch_size, len2, head_num * dim_q_n_k]
        #       [batch_size, len2, head_num * dim_v]
        batch_size = Q_src.size(0)

        Mat_Q = Mat_Q.view(batch_size, -1, head_num, dim_q_n_k).transpose(1, 2)
        Mat_K = Mat_K.view(batch_size, -1, head_num, dim_q_n_k).transpose(1, 2)
        Mat_V = Mat_V.view(batch_size, -1, head_num, dim_v).transpose(1, 2)
        # shape:[batch_size, head_num, len1, dim_q_n_k]
        #       [batch_size, head_num, len2, dim_q_n_k]
        #       [batch_size, head_num, len2, dim_v]

        MultiHead_mask = mask.unsqueeze(1).repeat(1, head_num, 1, 1)
        attentioned_embeddings = ScaledDotProductAttention()(Mat_Q, Mat_K, Mat_V, MultiHead_mask)

        # [batch_size, head_num, len1, dim_v] -> transpose&view -> [batch_size, len_1, head_num*dim_v]
        attentioned_embeddings = attentioned_embeddings.transpose(1, 2).reshape(batch_size, -1, head_num * dim_v)

        # [batch_size, len_1, head_num*dim_v] -> linear -> [batch_size, len_1, embedding_dim]
        return self.linear(attentioned_embeddings)


class attention_sublayer(nn.Module):
    def __init__(self, embedding_dim=embedding_dim, dropout_rate=dropout_rate):
        super(attention_sublayer, self).__init__()
        self.backbone_layer = MultiHeadAttention()
        self.add_n_norm = Add_n_Norm(embedding_dim)
        self.dp = nn.Dropout(p=dropout_rate)

    def forward(self, Q_src, K_src, V_src, mask):
        backbone_output = self.backbone_layer(Q_src, K_src, V_src, mask)
        backbone_output = self.add_n_norm(backbone_output, Q_src)

        return self.dp(backbone_output)



class Encoder_layer(nn.Module):
    def __init__(self):
        super(Encoder_layer, self).__init__()
        self.encoder_self_attention = attention_sublayer()
        self.feed_forward = FeedForward()

    def forward(self, x, self_attention_mask):
        """
        :param self_attention_mask: here there's only padding mask needed
        :param x: input data,shape:[batch_size, len_src, embedding_dim]
        :return: tensor: [batch_size, len_src, embedding_dim]
        """
        x = self.encoder_self_attention(Q_src=x, K_src=x, V_src=x, mask=self_attention_mask)

        x = self.feed_forward(x)

        return x


class Decoder_layer(nn.Module):
    def __init__(self):
        super(Decoder_layer, self).__init__()
        self.decoder_self_attention = attention_sublayer()
        self.decoder_encoder_attention = attention_sublayer()
        self.feed_forward = FeedForward()

    def forward(self, decoder_embedding, encoder_output, decoder_self_attention_mask, decoder_encoder_attention_mask):
        decoder_output = self.self_attention(Q_src=decoder_embedding, K_src=decoder_embedding, V_src=decoder_embedding,
                                             mask=decoder_self_attention_mask)

        decoder_output = self.decoder_encoder_attention(Q_src=decoder_output, K_src=encoder_output,
                                                        V_src=encoder_output, mask=decoder_encoder_attention_mask)

        decoder_output = self.feed_forward(decoder_output)

        return decoder_output


class Encoder(nn.Module):
    def __init__(self, encode_layers=encode_layers):
        super(Encoder, self).__init__()
        self.embedding_layer = nn.Embedding(source_vocab_size, embedding_dim)
        self.positional_encoding = PositionalEncoding()
        self.blocks = nn.Sequential(*[Encoder_layer() for _ in range(encode_layers)])

    def forward(self, x, encoder_self_attention_mask):
        '''
        :param encoder_self_attention_mask: encoder_padding_mask used in encoder's self-attention process
        :param x: [batch_size, len_src]
        :return: tensor:[batch_size, len_src, embedding_dim]
        '''
        # x:[batch_size, len_src] -> embedding -> x:[batch_size, len_src, embedding_dim]
        x = self.embedding_layer(x)

        # add positional encode
        x = self.positional_encoding(x)

        # tensor:[batch_size, len_src, embedding_dim]
        return self.blocks(x, x, encoder_self_attention_mask)


class Decoder(nn.Module):
    def __init__(self, decode_layers=decode_layers):
        super(Decoder, self).__init__()
        self.decoder_embedding = nn.Embedding(target_vocab_size, embedding_dim)
        self.positional_encoding = PositionalEncoding()
        self.blocks = nn.Sequential(*[Decoder_layer() for _ in range(decode_layers)])

    def forward(self, decoder_input, encoder_output, decoder_self_attention_mask, decoder_encoder_attention_mask):
        decoder_embedding = self.decoder_embedding(decoder_input)
        decoder_embedding = self.positional_encoding(decoder_embedding)
        x = self.blocks(decoder_embedding, encoder_output, decoder_self_attention_mask, decoder_encoder_attention_mask)

        return x


class Transformer(nn.Module):
    def __init__(self):
        super(Transformer, self).__init__()
        self.encoder = Encoder()
        self.decoder = Decoder()
        self.projection = nn.Linear(embedding_dim, target_vocab_size)

    def forward(self, encoder_input, decoder_input):
        '''
        :param decoder_input: [batch_size, len_src]
        :param encoder_input: [batch_size, len_tgt]
        :return: tensor:[batch_size * len_tgt, target_vocab_size]
        '''

        # x: [batch_size, len_src]
        # encoder_output: [batch_size, len_src, embedding_dim]
        encoder_padding_mask = obtain_padding_mask(Q_src=encoder_input, K_src=encoder_input)
        encoder_output = self.encoder(encoder_input, encoder_padding_mask)

        decoder_self_attn_padding_mask = obtain_padding_mask(decoder_input, decoder_input)
        decoder_self_attn_look_ahead_mask = obtain_look_ahead_mask(decoder_input)
        decoder_encoder_attn_padding_mask = obtain_padding_mask(decoder_input, encoder_input)

        decoder_self_attention_mask = torch.gt((decoder_self_attn_padding_mask + decoder_self_attn_look_ahead_mask), 0)
        decoder_encoder_attention_mask = decoder_encoder_attn_padding_mask

        # output: x:[batch_size, len_tgt, embedding_dim]
        decoder_output = self.decoder(decoder_input, encoder_output, decoder_self_attention_mask,
                                      decoder_encoder_attention_mask)

        # decoder_projection:[batch_size, max_len_tgt, target_vocab_size]
        decoder_projection = self.projection(decoder_output)

        # return:tensor:[batch_size * len_tgt, target_vocab_size]
        # softmax modification will be applied in CrossEntropyLoss
        return decoder_projection.view(-1, target_vocab_size)


if __name__ == '__main__':
    my_transformer = Transformer()
    print(my_transformer)
