# -*- coding: utf-8 -*-  
'''
gpt2.0相关零件

    - MutilHeadAttentionLayer（多头自注意力层）
    
    - EmbeddingLayer（词编码嵌入层）
    - PositionEmbeddingLayer（位置编码嵌入层）
    
    - FeedForwordLayer（线性变换层）
    
    - TransformerBlockLayer（transformer block层）
    
    - PreOutputLayer（预训练输出层）
    

@author: luoyi
Created on 2021年4月7日
'''
import tensorflow as tf
from enum import Enum

import utils.conf as conf
import utils.dictionaries as dictionaries


#    词嵌入操作类型
#    上采样类型
class EmbeddingOpType(Enum):
    #    embedding_lookup
    embedding = 0
    #    线性投影
    projection = 1
    pass
#    词嵌入层
class EmbeddingLayer(tf.keras.layers.Layer):
    '''两种操作类型
        embedding_lookup 和 线性投影
    '''
    def __init__(self, 
                 name='EmbeddingLayer', 
                 vocab_size=dictionaries.dict_size(), 
                 d_model=conf.GPT2.get_d_model(), 
                 op=EmbeddingOpType.embedding,
                 pre_weights=None,
                 embedding_trainable=conf.GPT2.get_embedding_trainable(),
                 **kwargs):
        super(EmbeddingLayer, self).__init__(name=name, **kwargs)
        
        self._name = name
        
        self._vocab_size = vocab_size
        self._d_model = d_model
        self._op = op
        self._pre_weights = pre_weights
        self._embedding_trainable = embedding_trainable
        pass
    
    #    构建参数
    def build(self, input_shape):
        self._embedding_weights = self.add_weight(name=self._name + '_embedding_weights', 
                                                  shape=[self._vocab_size, self._d_model], 
                                                  dtype=tf.float32, 
                                                  initializer=tf.random_normal_initializer(mean=0.0, stddev=0.01),
                                                  trainable=self._embedding_trainable)
        #    如果存在预训练的参数，则用预训练的参数做初始值
        if (self._pre_weights is not None):
            tf.keras.backend.set_value(self._embedding_weights, self._pre_weights)
            pass
        super(EmbeddingLayer, self).build(input_shape)
        pass
    
    #    embedding_lookup
    def embedding(self, inputs):
        '''
            @param inputs: Tensor(batch_size, sentence_maxlen)
        '''
        #    掩码（<PAD>的地方为0，其他为1）    Tensor(batch_size, sentence_maxlen)
        mask = tf.cast(tf.not_equal(inputs, 0), dtype=tf.float32)
        #    embeddiing_lookup              Tensor(batch_size, sentence_maxlen, d_model)
        embeddings = tf.nn.embedding_lookup(self._embedding_weights, inputs)
        embeddings = embeddings * tf.expand_dims(mask, axis=-1)
        return embeddings
    #    线性投影
    def projection(self, inputs):
        '''
            @param inputs: Tensor(batch_size, sentence_maxlen, d_model)
        '''
        return tf.matmul(inputs, self._embedding_weights, transpose_b=True)
    
    #    前向
    def call(self, inputs, op=None, **kwargs):
        if (op is None): op = self._op
        
        #    如果是embedding_lookup操作
        if (op == EmbeddingOpType.embedding): return self.embedding(inputs)
        #    如果是线性投影操作
        elif (op == EmbeddingOpType.projection): return self.projection(inputs)
        
        else: raise ValueError('无法识别的操作类型. op:' + str(self._op))
    pass


#    位置编码嵌入
class PositionEmbeddingLayer(tf.keras.layers.Layer):
    '''
        用位置编码向量做词嵌入，位置编码从给定的start开始
        位置编码做词向量之前也做padding_mask
    '''
    def __init__(self,
                 name='PositionEmbeddingLayer',
                 sentence_maxlen=conf.GPT2.get_pre_training_sentence_maxlen(),
                 batch_size=conf.GPT2.get_batch_size(),
                 d_model=conf.GPT2.get_d_model(), 
                 **kwargs):
        super(PositionEmbeddingLayer, self).__init__(name=name, **kwargs)
        
        self._name = name
        
        self._sentence_maxlen = sentence_maxlen
        self._batch_size = batch_size
        self._d_model = d_model
        pass
    
    def build(self, input_shape):
        self._embedding = EmbeddingLayer(name=self._name + '_embedding_layer', vocab_size=self._sentence_maxlen, d_model=self._d_model)
        
        self._position = tf.cast(tf.range(self._sentence_maxlen), dtype=tf.int64)
        self._position = tf.repeat(tf.expand_dims(self._position, axis=0), repeats=self._batch_size, axis=0)
        
        super(PositionEmbeddingLayer, self).build(input_shape)
        pass
    
    def call(self, inputs, start=1, **kwargs):
        '''
            @param start: 位置编码从start开始
            @param inputs: Tensor(batch_size, sentence_maxlen)
        '''
        #    掩码（<PAD>的地方为0，其他为1）    Tensor(batch_size, sentence_maxlen)
        mask = tf.cast(tf.not_equal(inputs, 0), dtype=tf.int64)
        #    位置编码从start开始    Tensor(batch_size, sentence_maxlen)
        position = self._position + start
        position = position * mask
        
        pos_embedding = self._embedding(position)
        return pos_embedding
    
    pass


#    多头自注意力
class MutilHeadAttentionLayer(tf.keras.layers.Layer):
    def __init__(self,
                 name='MutilHeadAttentionLayer',
                 n_head_attention=conf.GPT2.get_n_head_attention(),
                 sentence_maxlen=conf.GPT2.get_pre_training_sentence_maxlen(),
                 d_model=conf.GPT2.get_d_model(), 
                 batch_size=conf.GPT2.get_batch_size(),
                 dropout_rate=conf.GPT2.get_dropout_rate(),
                 **kwargs):
        super(MutilHeadAttentionLayer, self).__init__(name=name, **kwargs)

        self._name = name
                
        self._n_head_attention = n_head_attention
        self._sentence_maxlen = sentence_maxlen
        self._d_model = d_model
        self._batch_size = batch_size
        self._dropout_rate = dropout_rate
        
        #    每个sub_attention的维度
        assert (d_model % n_head_attention == 0), '多头注意力机制的d_model({})必须能整除n_head_attention({})'.format(d_model, sentence_maxlen)
        self._sub_d_model = tf.convert_to_tensor(d_model // n_head_attention, dtype=tf.float32)
        self._sqrt_sub_d_model = tf.math.sqrt(self._sub_d_model)
        pass
    
    #    构建
    def build(self, input_shape):
        #    qkv参数
        self._qkv_dense = tf.keras.layers.Dense(name=self._name + '_qvk_dence',
                                                units=self._d_model * 3,
                                                kernel_initializer=tf.initializers.he_normal(),
                                                bias_initializer=tf.initializers.zeros())
        
        #    随机断掉
        self._dropout = tf.keras.layers.Dropout(self._dropout_rate)
        
        #    最后线性变换参数
        self._linear_dense = tf.keras.layers.Dense(name=self._name + '_linear_dence',
                                                   units=self._d_model,
                                                   kernel_initializer=tf.initializers.he_normal(),
                                                   bias_initializer=tf.initializers.zeros())

        super(MutilHeadAttentionLayer, self).build(input_shape)
        pass
    
    #    分头
    def split_heads(self, x, sentence_maxlen, sub_d_model, n_head_attention):
        '''
            @param x: Tensor(batch_size, sentence_maxlen, d_model)
            @param sentence_maxlen: 句子长度
            @param sub_d_model: 每个分头维度
            @param n_head_attention: 头数
        '''
        x = tf.reshape(x, shape=[-1, sentence_maxlen, n_head_attention, sub_d_model])
        x = tf.transpose(x, perm=[0, 2, 1, 3])              #    Tensor(batch_size, n_head_attention, sentence_maxlen, sub_d_model)
        return x
    #    合头
    def merge_heads(self, x, sentence_maxlen, d_model):
        '''
            @param x: Tensor(batch_size, n_head_attention, sentence_maxlen, sub_d_model)
            @param sentence_maxlen: 句子长度
            @param d_model: 词向量维度
        '''
        x = tf.transpose(x, perm=[0, 2, 1, 3])      #    Tensor(batch_size, sentence_maxlen, n_head_attention, sub_d_model)
        x = tf.reshape(x, shape=[-1, sentence_maxlen, d_model])
        return x
    
    #    前向
    def call(self, inputs, mask=None, **kwargs):
        '''
            @param inputs: Tensor(batch_size, sentence_maxlen, d_model)
            @param mask: 掩码矩阵
            @param his_kv: 历史上的k,v     Tensor(batch_size, 2, num_heads, sen_len, depty) 
                                                2: k 和 v用同一tensor存储
                                                    (batch_size, 0, num_heads, sen_len, depth) : k
                                                    (batch_size, 1, num_heads, sen_len, depth) : v
                                                num_heads: 多头注意力头数
                                                sen_len: 随着预测的进行，sen_len在不断增大。直到预测到<EOS>为止
                                                depty: 多头注意力每个小头数据维度
        '''
        #    分qkv
        qkv = self._qkv_dense(inputs)                                   #    Tensor(batch_size, sentence_maxlen, d_model * 3)
        q, k, v = tf.split(qkv, 3, axis=-1)                             #    Tensor(batch_size, sentence_maxlen, d_model)
        
        #    分头    Tensor(batch_size, n_head_attention, sentence_maxlen, sub_d_model)
        q = self.split_heads(q, sentence_maxlen=self._sentence_maxlen, sub_d_model=self._sub_d_model, n_head_attention=self._n_head_attention)
        k = self.split_heads(k, sentence_maxlen=self._sentence_maxlen, sub_d_model=self._sub_d_model, n_head_attention=self._n_head_attention)
        v = self.split_heads(v, sentence_maxlen=self._sentence_maxlen, sub_d_model=self._sub_d_model, n_head_attention=self._n_head_attention)
        
        #    自注意力计算逻辑
        #    α    Tensor(batch_size, n_head_attention, sentence_maxlen, sentence_maxlen)
        alpha = tf.matmul(q, k, transpose_b=True) / self._sqrt_sub_d_model
        alpha = alpha + mask * 1e-9                     #    给掩去的值追加极小值，让下面的softmax函数输出极小概率
        alpha = tf.nn.softmax(alpha)
        out = tf.matmul(alpha, v)                       #    Tensor(batch_size, n_head_attention, sentence_maxlen, sub_d_model)
        
        #    合头    Tensor(batch_size, sentence_maxlen, d_model)
        out = self.merge_heads(out, sentence_maxlen=self._sentence_maxlen, d_model=self._d_model)
        
        #    线性变换
        out = self._linear_dense(out)
        
        #    随机断掉
        out = self._dropout(out)
        return out
    
    pass


#    线性变换层
class FeedForwordLayer(tf.keras.layers.Layer):
    def __init__(self,
                 name='FeedForwordLayer',
                 f_model=conf.GPT2.get_f_model(),
                 d_model=conf.GPT2.get_d_model(),
                 dropout_rate=conf.GPT2.get_dropout_rate(),
                 **kwargs):
        super(FeedForwordLayer, self).__init__(name=name, **kwargs)
        
        self._name = name
        self._f_model = f_model
        self._d_model = d_model
        self._dropout_rate = dropout_rate
        pass
    
    def build(self, input_shape):
        self._fc1 = tf.keras.layers.Dense(name=self._name + '_fc1',
                                          units=self._f_model,
                                          activation='relu',
                                          kernel_initializer=tf.initializers.he_normal(),
                                          bias_initializer=tf.initializers.zeros())
        
        self._fc2 = tf.keras.layers.Dense(name=self._name + '_fc2',
                                          units=self._d_model,
                                          kernel_initializer=tf.initializers.he_normal(),
                                          bias_initializer=tf.initializers.zeros())
        
        self._dropout = tf.keras.layers.Dropout(self._dropout_rate)
        
        super(FeedForwordLayer, self).build(input_shape)
        pass
    
    def call(self, inputs, **kwargs):
        y = self._fc1(inputs)
        y = self._fc2(y)
        
        y = self._dropout(y)
        return y
        
    pass


#    TransformerBlockLayer
class TransformerBlockLayer(tf.keras.layers.Layer):
    '''
        LayerNormalization
        MutilHeadAttentionLayer
        残差
        LayerNormalization
        FeedForwardLayer
        残差
    '''
    def __init__(self,
                 name='TransformerBlockLayer',
                 batch_size=conf.GPT2.get_batch_size(),
                 n_head_attention=conf.GPT2.get_n_head_attention(),
                 sentence_maxlen=conf.GPT2.get_pre_training_sentence_maxlen(),
                 f_model=conf.GPT2.get_f_model(),
                 d_model=conf.GPT2.get_d_model(),
                 dropout_rate=conf.GPT2.get_dropout_rate(),
                 **kwargs):
        super(TransformerBlockLayer, self).__init__(name=name, **kwargs)

        self._name = name        
        self._batch_size = batch_size
        self._n_head_attention = n_head_attention
        self._sentence_maxlen = sentence_maxlen
        self._f_model = f_model
        self._d_model = d_model
        self._dropout_rate = dropout_rate
        pass
    
    def build(self, input_shape):
        #    多头注意力部分
        self._norm1 = tf.keras.layers.LayerNormalization(name=self._name + '_norm1')
        self._mha = MutilHeadAttentionLayer(name=self._name + '_mha', 
                                            n_head_attention=self._n_head_attention, 
                                            sentence_maxlen=self._sentence_maxlen, 
                                            d_model=self._d_model, 
                                            batch_size=self._batch_size, 
                                            dropout_rate=self._dropout_rate)
        
        #    feedforward部分
        self._norm2 = tf.keras.layers.LayerNormalization(name=self._name + '_norm2')
        self._ff = FeedForwordLayer(name=self._name + '_ff', 
                                    f_model=self._f_model, 
                                    d_model=self._d_model, 
                                    dropout_rate=self._dropout_rate)
        
        super(TransformerBlockLayer, self).build(input_shape)
        pass

    def call(self, inputs, mask=None, **kwargs):
        '''
            @param inputs: 输入句子词嵌入向量矩阵
            @param mask: 掩码矩阵（padding_mask + attention_mask）
            @param his_kv: 历史上每个时刻的k,v暂存    Tensor(batch_size, num_heads, sen_len, depty) 
                                                            num_heads: 多头注意力头数
                                                            sen_len: 随着预测的进行，sen_len在不断增大。直到预测到<EOS>为止
                                                            depty: 多头注意力每个小头数据维度
        '''
        #    多头注意力部分
        y = self._norm1(inputs)
        y = self._mha(y, mask=mask)
        out_mha = y + inputs
        
        #    feedforward部分
        y = self._norm2(out_mha)
        y = self._ff(y)
        y = y + out_mha
        
        return y
    
    pass


#    预训练输出层
class PreOutputLayer(tf.keras.layers.Layer):
    def __init__(self,
                 name='PreOutputLayer',
                 word_embedding=None,
                 **kwargs):
        super(PreOutputLayer, self).__init__(name=name, **kwargs)
        
        self._word_embedding = word_embedding
        pass
    
    def build(self, input_shape):
        super(PreOutputLayer, self).build(input_shape)
        pass
    
    def call(self, inputs, **kwargs):
        #    投影到vocab_size空间，并做softmax    Tensor(batch_size, sentence_maxlen, vocab_size)
        out = self._word_embedding(inputs, op=EmbeddingOpType.projection)
        out = tf.nn.softmax(out, axis=-1)
        return out
    pass


#    暂存TransformerBlock各层的kv
class KVRegister:
    '''记录每个词在过TransformerBlock时生成的k,v，去掉<PAD>
        数据结构：
            register_k: Tensor(batch_size, num_headers, sen_len, depth)
            register_v: Tensor(batch_size, num_headers, sen_len, depth)
            其中：num_blocks：TransformerBlock层数
                 sen_len：实际有效词数量（去掉<PAD>），顺序为每个词过TransformerBlocks的顺序
                 depth：每个分头维度
    '''
    def __init__(self):
        
        self._register_k = None
        self._register_v = None
        pass
    
    #    寄存
    def deposit(self, inputs, register_k=None, register_v=None, **kwargs):
        self._register_k = register_k           #    Tensor(batch_size, num_headers, sen_maxlen, depth)
        self._register_v = register_v           #    Tensor(batch_size, num_headers, sen_maxlen, depth)
        return inputs
    #    用padding_mask去掉<PAD>
    def remove_pad(self, padding_mask):
        '''
            @param padding_mask: padding掩码，[batch_size, 1, 1, sentence_maxlen]
                                    0的地方不需要掩，1的地方需要掩
        '''
        #    
        if (self._register_k is not None):
            
            pass
        
        if (self._register_v is not None):
            pass
        pass
    
    
    pass

