# -*- coding: utf-8 -*-  
'''
bert相关零件

    - GeLU（激活函数）

    - WordEmbedding（词嵌入）
        embedding_lookup: 词编码 -> 词向量
        线性投影: 词向量 -> 词的每个词预测概率
    - PositionEmbedding（位置嵌入）
        [0,1,...max_sen_len-1] 做 embedding_lookup
    - SentenceEmbedding（句子嵌入）
        [1,1,...2,2,...] 做embedding_lookup
        
    - TransformerEncoderBlock
    - MutilHeadAttention
    - FeedForward
    
    - PreNLPOutput
        取第1个词向量过fc(hidden_dims, tanh) out=Tensor(batch_size, 1, hidden_dims)
        过fc(2, 线性变换) out=Tensor(batch_size, 1, 2)
    - PreMLMOutput
        从第1个往后取所有有效词向量，过WordEmbedding线性投影 out=Tensor(batch_size, max_sen_num, vecab_size) 
        -1维做softmax out=Tensor(batch_size, max_sen_num, vecab_size) 
    

    - BertLayer（Bert层，作为basebone与其他模型联合使用）
        - Embedding
            - WordEmbedding
            - PositionEmbedding
            - SentenceEmbedding
        - TransformerBlock * n（实际输出层）
        - PreNSPOutput（可选，结果保存在其crt_out属性中）
        - PreMLMOutput（可选，结果保存在其crt_out属性中）

    
    

@author: luoyi
Created on 2021年4月15日
'''
import tensorflow as tf
from enum import Enum

import utils.conf as conf
from utils.dictionary import Dictionaries
from models.bert.preporcess import padding_mask


#    GeLU激活函数
class GeLU(tf.keras.layers.Layer):
    '''GeLU对输入x随机的*1个[0,1]组成的mask，mask的生成依赖概率
        假设输入为X, mask为m，则m服从一个伯努利分布Φ(x) Φ(x)=P(X<=x), X服从标准正太分布
        这么选择是因为神经元的输入趋向于正太分布，这么设定使得当输入x减小的时候，输入会有一个更高的概率被dropout掉，这样的激活变换就会随机依赖于输入了。
        
        GeLU(x)=xP(X<=x)=xΦ(x)
        GELU(x)=0.5x * (1 + tanh((2/π)**0.5 * (x + 0.044715x**3)))
    '''
    def __init__(self, approximate=False, **kwargs):
        super(GeLU, self).__init__(**kwargs)
        
        self._approximate = approximate
        pass

    def call(self, inputs, **kwargs):
        return tf.nn.gelu(inputs, self._approximate)
    pass


#    词嵌入操作类型
#    上采样类型
class WorkEmbeddingOpType(Enum):
    #    embedding_lookup
    embedding = 0
    #    线性投影
    projection = 1
    pass
#    词嵌入
class WordEmbedding(tf.keras.layers.Layer):
    def __init__(self, 
                 name='WordEmbedding', 
                 vocab_size=Dictionaries.instance().size(),
                 d_model=conf.BERT.get_d_model(),
                 **kwargs):
        super(WordEmbedding, self).__init__(name, **kwargs)
        
        self._vocab_size = vocab_size
        self._d_model = d_model
        pass
    
    def build(self, input_shape):
        self._weights = self.add_weight(name=self.name + '_weights', 
                                        shape=[self._vocab_size, self._d_model], 
                                        dtype=tf.float32,
                                        initializer=tf.random_normal_initializer(mean=0.0, stddev=0.01))
        super(WordEmbedding, self).build(input_shape)
        pass
    
    #    embedding_lookup
    def embedding_lookup(self, x):
        '''
            @param x: Tensor(batch_size, max_sen_len)
        '''
        #    掩码    Tensor(batch_size, max_sen_len, 1)    有词1，没词0
        mask = tf.expand_dims(tf.cast(tf.not_equal(x, 0), dtype=tf.float32), axis=-1)
        embedding = tf.nn.embedding_lookup(self._weights, x)
        embedding = embedding * mask
        return embedding
    
    #    线性变换
    def projection(self, x):
        '''
            @param x: Tensor(batch_size, max_sen_len, d_model)
        '''
        return tf.matmul(x, self._weights, transpose_b=True)
    
    def call(self, inputs, op=WorkEmbeddingOpType.embedding, **kwargs):
        '''
            @param x: Tensor(batch_size, max_sen_len)
        '''
        if (op == WorkEmbeddingOpType.projection): out = self.projection(inputs)
        else: out = self.embedding_lookup(inputs)
        
        return out
    pass


#    词位置编码
class PositionEmbedding(tf.keras.layers.Layer):
    def __init__(self,
                 name='PositionEmbedding',
                 max_sen_len=conf.BERT.get_max_sen_len(),
                 d_model=conf.BERT.get_d_model(),
                 **kwargs):
        super(PositionEmbedding, self).__init__(name=name, **kwargs)
        self._max_sen_len = max_sen_len
        self._d_model = d_model
        pass
    
    def build(self, input_shape):
        self._embedding_layer = WordEmbedding(name=self.name + '_embedding_layer', vocab_size=self._max_sen_len, d_model=self._d_model)
        
        #    按最大句子长度初始化位置编码 Tensor(1, max_sen_len)。 扩出来的维度用于和Tensor(batch_size, max_sen_len)计算时广播用
        self._position = tf.cast(tf.range(self._max_sen_len), dtype=tf.int64)
        self._position = tf.expand_dims(self._position, axis=0)
        super(PositionEmbedding, self).build(input_shape)
        pass
    
    def call(self, inputs, start=1, **kwargs):
        '''
            @param inputs: Tensor(batch_size, max_sen_len)
        '''
        #    掩码    Tensor(batch_size, max_sen_len)    有词为1，没词为0
        mask = tf.cast(tf.not_equal(inputs, 0), dtype=tf.int64)
        #    位置从1开始，把0位置空出来留给[PAD]
        position = self._position + start
        position = position * mask
        
        embedding = self._embedding_layer(position)
        return embedding
    pass


#    句子位置编码
class SentenceEmbedding(tf.keras.layers.Layer):
    def __init__(self, 
                 name='SentenceEmbedding',
                 max_sen=conf.BERT.get_max_sen_len(),
                 d_model=conf.BERT.get_d_model(), 
                 **kwargs):
        super(SentenceEmbedding, self).__init__(name=name, **kwargs)
        
        self._max_sen = max_sen
        self._d_model = d_model
        pass
    
    def build(self, input_shape):
        self._embedding = WordEmbedding(name=self.name + '_embedding_layer', vocab_size=self._max_sen, d_model=self._d_model)
        
        super(SentenceEmbedding, self).build(input_shape)
        pass
    
    def call(self, inputs, **kwargs):
        '''
            @param inputs: Tensor(batch_size, max_sen_len)
        '''
        return self._embedding(inputs)
    pass


#    多头注意力
class MutilHeadAttention(tf.keras.layers.Layer):
    def __init__(self,
                 name='MutilHeadAttention',
                 n_head=conf.BERT.get_n_head_attention(),
                 max_sen_len=conf.BERT.get_max_sen_len(),
                 d_model=conf.BERT.get_d_model(),
                 dropout_rate=conf.BERT.get_dropout_rate(),
                 **kwargs):
        assert (d_model % n_head == 0), 'd_model[{}]必须能乘除n_head[{}].'.format(d_model, n_head)
        
        super(MutilHeadAttention, self).__init__(name=name, **kwargs)
        
        self._n_head = n_head
        self._max_sen_len = max_sen_len
        self._d_model = d_model
        self._dropout_rate = dropout_rate
        self._sub_d_model = d_model // n_head       #    每个分头维度
        self._sqrt_model = tf.math.sqrt(tf.convert_to_tensor(self._sub_d_model, dtype=tf.float32))
        pass
    
    def build(self, input_shape):
        #    qkv统一计算参数
        self._qkv_dense = tf.keras.layers.Dense(name=self.name + '_qkv_dense',
                                                units=self._d_model * 3, 
                                                kernel_initializer=tf.initializers.he_normal(),
                                                bias_initializer=tf.initializers.zeros())
        
        #    最后的线性变换
        self._linear_dense = tf.keras.layers.Dense(name=self._name + '_linear_dence',
                                                   units=self._d_model,
                                                   kernel_initializer=tf.initializers.he_normal(),
                                                   bias_initializer=tf.initializers.zeros())
        
        #    dropout
        self._dropout = tf.keras.layers.Dropout(self._dropout_rate)
        super(MutilHeadAttention, self).build(input_shape)
        pass
    
    #    分头
    def split(self, x):
        x = tf.reshape(x, shape=(-1, self._max_sen_len, self._n_head, self._sub_d_model))
        x = tf.transpose(x, perm=[0, 2, 1, 3])
        return x
    def merge(self, x):
        x = tf.transpose(x, perm=[0, 2, 1, 3])
        x = tf.reshape(x, shape=(-1, self._max_sen_len, self._d_model))
        return x
    
    def call(self, inputs, mask=None, **kwargs):
        '''
            @param inputs: Tensor(batch_size, max_sen_len, d_model)
            @param mask: Tensor(batch_size, max_sen_len, 1)    有词为0，没词为1
        '''
        #    计算qkv    Tensor(batch_size, max_sen_len, d_model)
        q, k, v = tf.split(self._qkv_dense(inputs), num_or_size_splits=3, axis=-1)
        
        #    分头
        q = self.split(q)       #    Tensor(batch_size, n_head, max_sen_len, sub_d_model)
        k = self.split(k)       #    Tensor(batch_size, n_head, max_sen_len, sub_d_model)
        v = self.split(v)       #    Tensor(batch_size, n_head, max_sen_len, sub_d_model)
        
        #    计算自注意力
        alpha = tf.matmul(q, k, transpose_b=True) / self._sqrt_model        #    Tensor(batch_size, n_head, max_sen_len, max_sen_len)
        alpha = alpha + mask * 1e-9
        alpha = tf.nn.softmax(alpha, axis=-1)
        out = tf.matmul(alpha, v)                                           #    Tensor(batch_size, n_head, max_sen_len, sub_d_model)
        
        #    合头        Tensor(bath_size, max_seq_len, d_model)
        out = self.merge(out)
        
        #    线性变换
        out = self._linear_dense(out)
        
        #    随机断掉
        out = self._dropout(out)
        
        return out
    pass


#    FeedForward
class FeedForward(tf.keras.layers.Layer):
    def __init__(self, 
                 name='FeedForward',
                 f_model=conf.BERT.get_f_model(),
                 d_model=conf.BERT.get_d_model(),
                 dropout_rate=conf.BERT.get_dropout_rate(),
                 **kwargs):
        super(FeedForward, self).__init__(name=name, **kwargs)
        
        self._f_model = f_model
        self._d_model = d_model
        self._dropout_rate = dropout_rate
        pass
    
    def build(self, input_shape):
        #    第一层全连接
        self._dense1 = tf.keras.layers.Dense(name=self.name + '_dense1',
                                             units=self._f_model,
                                             kernel_initializer=tf.keras.initializers.he_normal(),
                                             bias_initializer=tf.initializers.zeros())
        self._active1 = GeLU()
        
        #    第二层全连接
        self._dense2 = tf.keras.layers.Dense(name=self.name + '_dense2',
                                             units=self._d_model,
                                             kernel_initializer=tf.keras.initializers.he_normal(),
                                             bias_initializer=tf.initializers.zeros())
        
        self._dropout = tf.keras.layers.Dropout(self._dropout_rate)
        
        super(FeedForward, self).build(input_shape)
        pass
    
    def call(self, inputs, **kwargs):
        #    过第一层fc
        out = self._dense1(inputs)
        out = self._active1(out)
        
        #    过第二层fc
        out = self._dense2(out)
        
        #    dropout
        out = self._dropout(out)
        
        return out
    pass


#    TransformerEncoderBlock
class TransformerEncoderBlock(tf.keras.layers.Layer):
    def __init__(self,
                 name='TransformerEncoderBlock',
                 n_head=conf.BERT.get_n_head_attention(),
                 max_sen_len=conf.BERT.get_max_sen_len(),
                 d_model=conf.BERT.get_d_model(),
                 f_model=conf.BERT.get_f_model(),
                 dropout_rate=conf.BERT.get_dropout_rate(),
                 **kwargs):
        super(TransformerEncoderBlock, self).__init__(name, **kwargs)

        self._n_head = n_head
        self._max_sen_len = max_sen_len
        self._d_model = d_model
        self._f_model = f_model
        self._dropout_rate = dropout_rate
        pass
    
    def build(self, input_shape):
        #    多头注意力部分
        self._norm_mha = tf.keras.layers.LayerNormalization()
        self._mha = MutilHeadAttention(name=self._name + '_mha', 
                                       n_head=self._n_head, 
                                       max_sen_len=self._max_sen_len, 
                                       d_model=self._d_model, 
                                       dropout_rate=self._dropout_rate)
        
        #    feedforward部分
        self._norm_feedforward = tf.keras.layers.LayerNormalization()
        self._feedforward = FeedForward(name=self.name + '_feedforward', 
                                        f_model=self._f_model, 
                                        d_model=self._d_model, 
                                        dropout_rate=self._dropout_rate)
        
        super(TransformerEncoderBlock, self).build(input_shape)
        pass
    
    def call(self, inputs, mask=None, **kwargs):
        #    多头注意力部分
        x_mha = self._norm_mha(inputs)
        out_mha = self._mha(x_mha, mask)
        out_mha = out_mha + inputs
        
        #    feedforward部分
        x_feedforward = self._norm_feedforward(out_mha)
        out_feedforward = self._feedforward(x_feedforward)
        out_feedforward = out_feedforward + out_mha
        
        return out_feedforward
    pass


#    NLP任务需要的输出
class PreNSPOutput(tf.keras.layers.Layer):
    def __init__(self,
                 name='PreNSPOutput',
                 d_model=conf.BERT.get_d_model(),
                 **kwargs):
        super(PreNSPOutput, self).__init__(name=name, **kwargs)
        
        self._d_model = d_model
        pass

    def build(self, input_shape):
        #    第1层dense
        self._dense1 = tf.keras.layers.Dense(name=self.name + '_dense1',
                                             units=self._d_model,
                                             activation='tanh',
                                             kernel_initializer=tf.keras.initializers.he_normal(),
                                             bias_initializer=tf.initializers.zeros())
        
        #    第2层dense
        self._dense2 = tf.keras.layers.Dense(name=self._name + '_dense2',
                                             units=2,
                                             kernel_initializer=tf.keras.initializers.he_normal(),
                                             bias_initializer=tf.initializers.zeros())
        
        #    保存前向结果
        self._crt_out = tf.Variable(trainable=False, name=self.name + '_out', dtype=tf.float32, shape=(-1, 2))
        
        super(PreNSPOutput, self).build(input_shape)
        pass
    
    def call(self, inputs, **kwargs):
        #    取[CLS]位的词向量
        cls_vec = inputs[:, 0, :]           #    Tensor(batch_size, d_model)
        
        #    过第1层dense
        out = self._dense1(cls_vec)         #    Tensor(batch_size, d_model)
        
        #    过第2层dense
        out = self._dense2(out)             #    Tensor(batch_size, 2)
        out = tf.nn.softmax(out, axis=-1)
        
        #    保存前向结果
        self._crt_out.assign(out)
        
        return out
        
    #    取当前前向运算结果
    def get_crt_out(self):
        return self._crt_out
        
    pass


#    MLM任务需要的输出
class PreMLMOutput(tf.keras.layers.Layer):
    def __init__(self,
                 name='PreMLMOutput',
                 embedding=None,
                 max_sen_len=conf.BERT.get_max_sen_len(),
                 d_model=conf.BERT.get_d_model(),
                 **kwargs):
        super(PreMLMOutput, self).__init__(name, **kwargs)
        
        self._max_sen_len = max_sen_len
        self._d_model = d_model
        
        self._embedding = embedding
        pass
    
    def build(self, input_shape):
        
        #    保存前向结果
        self._crt_out = tf.Variable(trainable=False, name=self.name + '_out', dtype=tf.float32, shape=(-1, self._max_sen_len, self._d_model))
        
        super(PreMLMOutput, self).build(input_shape)
        pass
    
    def call(self, inputs, op=WorkEmbeddingOpType.projection, **kwargs):
        #    取从第1位到后面的所有词向量，第1位是[CLS]，不参与MLM的计算
        x = inputs[:, 1:, :]                #    Tensor(batch_size, max_sen_len-1, d_model)
        
        #    线性变换，并过softmax    Tensor(batch_size, max_sen_len-1, vocab_size)
        x = self._embedding(x, op=op)
        out = tf.nn.softmax(x, axis=-1)     #    Tensor(batch_size, max_sen_len-1, d_model)
        
        #    保存当前结果
        self._crt_out.assign(out)
        
        return out
    
    #    取当前前向运算结果
    def get_crt_out(self):
        return self._crt_out
    
    pass


#    Bert层
class BertLayer(tf.keras.layers.Layer):
    def __init__(self,
                 name='BertLayer',
                 
                 vocab_size=Dictionaries.instance().size(),
                 max_sen_len=conf.BERT.get_max_sen_len(),  
                 max_sen=conf.BERT.get_max_sen(),
                 n_block=conf.BERT.get_n_block(),
                 n_head=conf.BERT.get_n_head_attention(),
                 d_model=conf.BERT.get_d_model(),
                 f_model=conf.BERT.get_f_model(),
                 dropout_rate=conf.BERT.get_dropout_rate(),
                 
                 is_nsp=False,                                  #    是否过预训练的PreNSPOut层
                 is_mlm=False,                                  #    是否过预训练的PreMLMOut层
                 
                 **kwargs):
        super(BertLayer, self).__init__(name=name, **kwargs)
        
        self._name = name
        
        self._vocab_size = vocab_size
        self._max_sen_len = max_sen_len
        self._max_sen = max_sen
        self._n_block = n_block
        self._n_head = n_head
        self._d_model = d_model
        self._f_model = f_model
        self._dropout_rate = dropout_rate
        
        self._is_nsp = is_nsp
        self._is_mlm = is_mlm
        
        pass
    
    def build(self, input_shape):
        
        #    词向量部分
        #    词向量嵌入
        self._word_embedding = WordEmbedding(name=self._name + '_word_embedding', 
                                             vocab_size=self._vocab_size, 
                                             d_model=self._d_model)
        #    词位置嵌入
        self._position_embedding = PositionEmbedding(name=self._name + '_position_embedding', 
                                                     max_sen_len=self._max_sen_len, 
                                                     d_model=self._d_model)
        #    句子位置嵌入
        self._sentence_embedding = SentenceEmbedding(name=self._name + '_sentence_embedding', 
                                                     max_sen=self._max_sen + 1,                         #    最大句子数空1位出来，0位留给[PAD]
                                                     d_model=self._d_model)
        
        #    n层transformer_encoder_block
        self._transformer_encoder_blocks = [TransformerEncoderBlock(name=self._name + '_transformer_encoder_block_' + str(i), 
                                                                    n_head=self._n_head, 
                                                                    max_sen_len=self._max_sen_len, 
                                                                    d_model=self._d_model, 
                                                                    f_model=self._f_model, 
                                                                    dropout_rate=self._dropout_rate)\
                                            for i in range(self._n_block)]
        
        #    n层block后的layer_norm
        self._norm = tf.keras.layers.LayerNormalization()
        
        #    预训练输出
        if (self._is_nsp): self._nsp_out = PreNSPOutput(name=self._name + '_nsp_out', d_model=self._d_model)
        if (self._is_mlm): self._mlm_out = PreMLMOutput(name=self._name + '_mlm_out', embedding=self._word_embedding)
        
        super(BertLayer, self).build(input_shape)
        pass
    
    def call(self, inputs, **kwargs):
        '''
            @param inputs: Tensor(batch_size, 2, max_sen_len)
                                    0: 随机[MASK]后的句子词编码
                                    1: 句子位置编码，从1开始
        '''
        inputs = tf.cast(inputs, dtype=tf.int64)
        #    切分数据
        x, sen = tf.split(inputs, num_or_size_splits=2, axis=1)
        if (x.shape[1] == 1): x = tf.squeeze(x, axis=1)                       #    Tensor(batch_size, max_sen_len)
        if (sen.shape[1] == 1): sen = tf.squeeze(sen, axis=1)                   #    Tensor(batch_size, max_sen_len)
        
        #    先求掩码。有词的地方为0，没词的地方为1
        pad_mask = padding_mask(x)                      #    Tensor(batch_size, max_sen_len)
        
        #    词向量嵌入
        word_embedding = self._word_embedding(x)                    #    Tensor(batch_size, max_sen_len, d_model)
        pos_embedding = self._position_embedding(x)                 #    Tensor(batch_size, max_sen_len, d_model)
        sen_embedding = self._sentence_embedding(sen)               #    Tensor(batch_size, max_sen_len, d_model)
        x = word_embedding + pos_embedding + sen_embedding          #    Tensor(batch_size, max_sen_len, d_model)
        
        #    过n层transformer_encoder_block                          #    Tensor(batch_size, max_sen_len, d_model)
        for encoder_block in self._transformer_encoder_blocks:
            x = encoder_block(x, mask=pad_mask)
            pass
        out = self._norm(x)
        
        #    计算两个Pre的输出。结果拼接成RaggedTensor返回
        if (self._is_nsp): self._nsp_out(out)                       #    Tensor(batch_size, 2)
        if (self._is_mlm): self._mlm_out(out)                       #    Tensor(batch_size, max_sen_len-1, vocab_size)
        
        #    输出n层transformer_block后的结果
        return out
    
    pass


