# -*- coding: utf-8 -*-  
'''
transformer零件


    - EncoderLayer 编码层
    - DecoderLayer 解码层


    - PositionalEncodingLayer 位置编码层
    
    
    - MutilHeadAttentionLayer 多头注意力层
    - MaskMutilHeadAttentionLayer 带掩码的多头注意力层
    
    
    - SelfAttentionLayer 自注意力层
    
    
    - FeedForwardkLayer 内部全连接层
    

@author: luoyi
Created on 2021年3月21日
'''
import tensorflow as tf

import utils.conf as conf
from math import ceil, floor, sqrt


#    编码层
class EncoderLayer(tf.keras.layers.Layer):
    '''编码层
        连续num_layers个 MutilHeadAttentionLayer,FeedForwardkLayer组合
    '''
    def __init__(self,
                 name='EncoderLayer',
                 num_layers=conf.TRANSFORMER.get_num_layers(),
                 mutil_attention_groups=conf.TRANSFORMER.get_mutil_head_attention_groups(),
                 sentence_maxlen=conf.TRANSFORMER.get_sentence_maxlen(),
                 embedding_dims=conf.TRANSFORMER.get_embedding_dims(),
                 self_attention_dims=conf.TRANSFORMER.get_self_attention_dims(),
                 feed_forward_dims=conf.TRANSFORMER.get_feed_forward_dims(),
                 batch_size=conf.DATASET.get_batch_size(),
                 **kwargs):
        super(EncoderLayer, self).__init__(name=name, **kwargs)
        
        #    连续num_layers个 MutilHeadAttentionLayer,FeedForwardkLayer组合
        self._mutil_attentions = [MutilHeadAttentionLayer(name='encoder_mutil_head_attention_layer_' + str(i), 
                                                          groups=mutil_attention_groups, 
                                                          sentence_maxlen=sentence_maxlen, 
                                                          embedding_dims=embedding_dims, 
                                                          self_attention_dims=self_attention_dims, 
                                                          batch_size=batch_size)\
                                 for i in range(num_layers)]
        self._feed_forwardks = [FeedForwardkLayer(name='encoder_feed_forwardk_Layer_' + str(i), 
                                                  feed_forward_dims=feed_forward_dims)\
                                 for i in range(num_layers)]
        pass
    
    def call(self, encoder_vec, padding_mask=None, **kwargs):
        for (mutil_attention, feed_forwardk) in zip(self._mutil_attentions, self._feed_forwardks):
            encoder_vec = mutil_attention(Xq=encoder_vec, Xk=encoder_vec, Xv=encoder_vec, mask=padding_mask)
            encoder_vec = feed_forwardk(encoder_vec)
            pass
        return encoder_vec
    pass


#    解码层
class DecoderLayer(tf.keras.layers.Layer):
    '''解码层
        连续num_layers个 MaskMutilHeadAttentionLayer,MutilHeadAttentionLayer,FeedForwardkLayer组合
    '''
    def __init__(self,
                 name='DecoderLayer',
                 num_layers=conf.TRANSFORMER.get_num_layers(),
                 mutil_attention_groups=conf.TRANSFORMER.get_mutil_head_attention_groups(),
                 sentence_maxlen=conf.TRANSFORMER.get_sentence_maxlen(),
                 embedding_dims=conf.TRANSFORMER.get_embedding_dims(),
                 self_attention_dims=conf.TRANSFORMER.get_self_attention_dims(),
                 feed_forward_dims=conf.TRANSFORMER.get_feed_forward_dims(),
                 batch_size=conf.DATASET.get_batch_size(),
                 **kwargs):
        super(DecoderLayer, self).__init__(name=name, **kwargs)
        
        #    连续num_layers个 MutilHeadAttentionLayer,FeedForwardkLayer组合
        self._mask_mutil_attentions = [MutilHeadAttentionLayer(name='decoder_mask_mutil_head_attention_layer_' + str(i), 
                                                               groups=mutil_attention_groups, 
                                                               sentence_maxlen=sentence_maxlen, 
                                                               embedding_dims=embedding_dims, 
                                                               self_attention_dims=self_attention_dims, 
                                                               batch_size=batch_size)\
                                         for i in range(num_layers)]
        self._mutil_attentions = [MutilHeadAttentionLayer(name='decoder_mutil_head_attention_layer_' + str(i), 
                                                          groups=mutil_attention_groups, 
                                                          sentence_maxlen=sentence_maxlen, 
                                                          embedding_dims=embedding_dims, 
                                                          self_attention_dims=self_attention_dims, 
                                                          batch_size=batch_size)\
                                 for i in range(num_layers)]
        self._feed_forwardks = [FeedForwardkLayer(name='decoder_feed_forwardk_Layer_' + str(i), 
                                                  feed_forward_dims=feed_forward_dims)\
                                 for i in range(num_layers)]
        pass
    
    def call(self, encoder_vec, decoder_vec, padding_mask=None, sequence_mask=None, **kwargs):
        for (mask_mutil_attention, mutil_attention, feed_forwardk) in zip(self._mask_mutil_attentions, self._mutil_attentions, self._feed_forwardks):
            decoder_vec = mask_mutil_attention(Xq=decoder_vec, Xk=decoder_vec, Xv=decoder_vec, mask=sequence_mask)
            decoder_vec = mutil_attention(Xq=decoder_vec, Xk=encoder_vec, Xv=encoder_vec, padding_mask=padding_mask)
            decoder_vec = feed_forwardk(decoder_vec)
            pass
        return decoder_vec
    pass


#    PositionalEncodingLayer
class PositionalEncodingLayer(tf.keras.layers.Layer):
    '''位置编码
        其实用“位置纹理”更合适，这里是通过三角函数的周期大小来标识位置信息的
        PE(pos, 2i) = sin(pos / 10000^(2i / d))
        PE(pos, 2i+1) = cos(pos / 10000^(2i+1 / d))
        其中：
            pos：表示词向量在句子中的索引，对应maxlen维度的索引
            i：表示词向量中每个位置的索引，对应embedding_dims维度中的索引
            d：词向量的维度
        输出：X + PE(对应位置的值)
    '''
    def __init__(self,
                 name='PositionalEncodingLayer',
                 sentence_maxlen=conf.TRANSFORMER.get_sentence_maxlen(),
                 embedding_dims=conf.TRANSFORMER.get_embedding_dims(),
                 **kwargs):
        '''
            @param sentence_maxlen: 句子最大长度
            @param embedding_dims: 词向量维度
        '''
        super(PositionalEncodingLayer, self).__init__(name=name, **kwargs)
        
        #    提前计算 sentence_maxlen * embedding_dims 的矩阵
        self.init_template_matrix(sentence_maxlen, embedding_dims)
        pass
    
    #    初始化模板矩阵
    def init_template_matrix(self, sentence_maxlen, embedding_dims):
        '''初始化 sentence_maxlen * embedding_dims 的矩阵
            值用公式填充
                PE(pos, 2i) = sin(pos / 10000^(2i / d))
                PE(pos, 2i+1) = cos(pos / 10000^(2i / d))
            self._pe Tensor(sentence_maxlen, embedding_dims)
        '''
        #    偶数位
        pe_even_i = tf.range(start=0, limit=embedding_dims, delta=2, dtype=tf.float32)
        pe_even_i = tf.repeat(tf.expand_dims(pe_even_i, axis=0), repeats=sentence_maxlen, axis=0)
        pe_even_pos = tf.range(sentence_maxlen, dtype=tf.float32)
        pe_even_pos = tf.repeat(tf.expand_dims(pe_even_pos, axis=-1), repeats=ceil(embedding_dims / 2), axis=-1)
        #    偶数位
        pe_odd_i = tf.range(start=1, limit=embedding_dims, delta=2, dtype=tf.float32)
        pe_odd_i = tf.repeat(tf.expand_dims(pe_odd_i, axis=0), repeats=sentence_maxlen, axis=0)
        pe_odd_pos = tf.range(sentence_maxlen, dtype=tf.float32)
        pe_odd_pos = tf.repeat(tf.expand_dims(pe_odd_pos, axis=-1), repeats=floor(embedding_dims / 2), axis=-1)
        
        #    计算偶数位 PE(pos, 2i) = sin(pos / 10000^(2i / d))
        pe_even = tf.math.sin(pe_even_pos / (tf.math.pow(10000, 2 * pe_even_i / embedding_dims)))
        #    计算奇数位 PE(pos, 2i+1) = cos(pos / 10000^(2i / d))
        pe_odd = tf.math.cos(pe_odd_pos / (tf.math.pow(10000, 2 * (pe_odd_i-1) / embedding_dims)))
        
        #    合并，并调整位置
        pe = tf.concat([pe_even, pe_odd], axis=-1)
        #    调整顺序
        perm = []
        half = embedding_dims // 2 if embedding_dims % 2 == 0 else (embedding_dims + 1) // 2
        for i in range(half):
            perm += [i, i + half]
            pass
        if (embedding_dims % 2 > 0): del perm[-1]
        pe = tf.gather(pe, indices=perm, axis=-1)
        
        #    扩展为Tensor(1, sentence_maxlen, embedding_dims)，计算时自行广播
        self._pe = tf.expand_dims(pe, axis=0)
        pass
    
    def call(self, inputs, **kwargs):
        return inputs + self._pe
    
    pass


#    多头注意力层
class MutilHeadAttentionLayer(tf.keras.layers.Layer):
    '''多头注意力层
        分组进行self_attention，最后经过线性变换拿到与单self_attention一样尺寸的结果
        运算过程：
            step1：将Xq,Xk,Xv分别送入groups个独立的自注意力层
            step2：groups个独立的自注意力层输出的结果在sentence_maxlen维度堆叠
            step3：对结果进行线性变换，得到原单层SelfAttention的结果
                    W * X = X.T * W.T
                    
        注：网上有种说法可以用一次矩阵运算吧所有头算出来，感觉有点扯。。。
            α矩阵会包含所有头的关联度信息
            除非也用掩码的方式，在α矩阵中把其他头也给掩起来
    '''
    def __init__(self,
                 name='MutilHeadAttentionLayer',
                 groups=conf.TRANSFORMER.get_mutil_head_attention_groups(),
                 embedding_dims=conf.TRANSFORMER.get_embedding_dims(),
                 sentence_maxlen=conf.TRANSFORMER.get_sentence_maxlen(),
                 self_attention_dims=conf.TRANSFORMER.get_self_attention_dims(),
                 attention_dropout=conf.TRANSFORMER.get_attention_dropout(),
                 batch_size=conf.DATASET.get_batch_size(),
                 **kwargs):
        super(MutilHeadAttentionLayer, self).__init__(name=name, **kwargs)
        
        #    初始化各种参数
        self._groups = groups
        self._batch_size = batch_size
        self._sentence_maxlen = sentence_maxlen
        self._self_attention_dims = self_attention_dims
        self._embedding_dims = embedding_dims
        
        #    自注意力计算层（外部给定Q，K，V 和 各种掩码）
        self._self_attention = SelfAttentionLayer(name=name + '_self_attention',
                                                  self_attention_dims=self_attention_dims)
        
        #    初始化Wq, Wk, Wv
        self._Wq = tf.keras.layers.Dense(name=name + '_fc_Q', units=embedding_dims)
        self._Wk = tf.keras.layers.Dense(name=name + '_fc_K', units=embedding_dims)
        self._Wv = tf.keras.layers.Dense(name=name + '_fc_V', units=embedding_dims)
         
        #    最后线性变换层
        self._liner = tf.keras.layers.Dense(name=name + '_liner', units=embedding_dims)
        #    归一化层
        self._norm = tf.keras.layers.LayerNormalization()
        
        #    遮盖值矩阵（负无穷）        Tensor(batch_size, groups, sentence_maxlen, sentence_maxlen)
        self._mask_matrix = tf.ones(shape=(batch_size, groups, sentence_maxlen, sentence_maxlen), dtype=tf.float32) * -1e8
        
        #    dropout
        self._dropout = None
        if (attention_dropout > 0 and  attention_dropout < 1):
            self._dropout = tf.keras.layers.Dropout(rate=attention_dropout)
        pass
    
    def call(self, Xq, Xk, Xv, mask=None, **kwargs):
        #    所有头统一过线性变换，并按头切分    Tensor(batch_size, groups, sentence_maxlen, self_attention_dims)
        Q = self._Wq(Xq)
        Q = tf.reshape(Q, shape=(self._batch_size, self._sentence_maxlen, self._groups, self._self_attention_dims))
        Q = tf.transpose(Q, perm=[0, 2, 1, 3])
        K = self._Wk(Xk)
        K = tf.reshape(K, shape=(self._batch_size, self._sentence_maxlen, self._groups, self._self_attention_dims))
        K = tf.transpose(K, perm=[0, 2, 1, 3])
        V = self._Wv(Xv)
        V = tf.reshape(V, shape=(self._batch_size, self._sentence_maxlen, self._groups, self._self_attention_dims))
        V = tf.transpose(V, perm=[0, 2, 1, 3])
        
        #    执行自注意力逻辑                    Tensor(batch_size, groups, sentence_maxlen, self_attention_dims)
        out = self._self_attention(Q=Q, K=K, V=V, mask=mask, mask_matrix=self._mask_matrix)
        
        #    合并所有头的输出                    
        out = tf.transpose(out, perm=[0, 2, 1, 3])                                                          #    Tensor(batch_size, sentence_maxlen, groups, self_attention_dims)
        out = tf.reshape(out, shape=(self._batch_size, self._sentence_maxlen, self._embedding_dims))        #    Tensor(batch_size, sentence_maxlen, embedding_dims)
        
        #    过线性变换层    Tensor(batch_size, sentence_maxlen, embedding_dims)
        out = self._liner(out)
        
        #    残差与归一化
        out = tf.math.add(Xv, out)
        out = self._norm(out)
        
        #    过dropout
        if (self._dropout is not None): out = self._dropout(out)
        return out
        
    pass


#    自注意力机制
class SelfAttentionLayer(tf.keras.layers.Layer):
    '''自注意力机制
        运算过程：
            Q = Xq * Wq                    Xq: Tensor(batch_size, sentence_maxlen, embedding_dims), Wq: Tensor(batch_size, sentence_maxlen, attention_dims)
            K = Xk * Wk
            V = Xv * Wv
            α = Q * K.T / sqrt(embedding_dims)
            如果包含掩码：α = 下三角阵(α)
            α = softmax(α)
            out = α * V
    '''
    def __init__(self,
                 name='SelfAttentionLayer',
                 self_attention_dims=conf.TRANSFORMER.get_self_attention_dims(),
                 **kwargs):
        '''
            @param is_mask: 是否包含掩码。若包含掩码则α
        '''
        super(SelfAttentionLayer, self).__init__(name=name, **kwargs)
        
        self._sqrt_d = sqrt(self_attention_dims)
        
        pass
    
    def call(self, Q, K, V, mask=None, mask_matrix=None, **kwargs):
        '''
            @param Q: 已经产生的Q矩阵    Tensor(batch_size, groups, sentence_maxlen, self_attention_dims)
            @param K: 已经产生的K矩阵    Tensor(batch_size, groups, sentence_maxlen, self_attention_dims)
            @param V: 已经产生的V矩阵    Tensor(batch_size, groups, sentence_maxlen, self_attention_dims)
            @param padding_mask: 填充值掩码位置矩阵。掩码标记出<PAD>的位置，根据掩码的位置将计算得来的α相关位置置负无穷        Tensor(batch_size, groups, sentence_maxlen, sentence_maxlen)
            @param sequence_mask: 未来值掩码位置矩阵。掩码标记出未来值的位置，decoder时遮盖掉当前时刻未来的值(置为负无穷)      Tensor(batch_size, groups, sentence_maxlen, sentence_maxlen)
        '''
        #    计算 α = softmax(Q * K.T / sqrt(embedding_dims))
        alpha = tf.matmul(Q, tf.transpose(K, [0,1,3,2])) / self._sqrt_d
        
        #    如果需要过滤填充掩码，将填充值的α置为-∞
        if (mask is not None):
            alpha = alpha + mask * mask_matrix
            pass
        
        alpha = tf.nn.softmax(alpha, axis=-1)        #    Tensor(batch_size, groups, sentence_maxlen, sentence_maxlen)
        
        #    计算输出
        out = tf.matmul(alpha, V)                    #    Tensor(batch_size, groups, sentence_maxlen, attention_dims)
        
        return out
    
    pass



#    FeedForwardkLayer（内部全连接层）
class FeedForwardkLayer(tf.keras.layers.Layer):
    ''' 内部全连接层
    '''
    def __init__(self,
                 name='FeedForwardkLayer',
                 embedding_dims=conf.TRANSFORMER.get_embedding_dims(),
                 feed_forward_dims=conf.TRANSFORMER.get_feed_forward_dims(),
                 attention_dropout=conf.TRANSFORMER.get_attention_dropout(),
                 **kwargs):
        super(FeedForwardkLayer, self).__init__(name=name, **kwargs)
        
        #    两个全连接层
        self._fc1 = tf.keras.layers.Dense(name=name + '_fc1',
                                          units=feed_forward_dims,
                                          activation='relu')
        self._fc2 = tf.keras.layers.Dense(name=name + '_fc2',
                                          units=embedding_dims)
        
        #    dropout
        self._dropout = None
        if (attention_dropout > 0 and attention_dropout < 1):
            self._dropout = tf.keras.layers.Dropout(rate=attention_dropout)
        
        #    归一化层
        self._norm = tf.keras.layers.LayerNormalization()
        pass
    
    def call(self, X, **kwargs):
        #    过两个全连接
        Y = self._fc1(X)
        Y = self._fc2(Y)
        
        #    残差与归一化
        Y = tf.math.add(X, Y)
        Y = self._norm(Y)
        
        if (self._dropout is not None): Y = self._dropout(Y)
        return Y
    
    pass



