# -*- coding: utf-8 -*-  
'''
tBert零件

Created on 2021年9月12日
@author: luoyi
'''
import tensorflow as tf

import utils.conf as conf
from utils.iexicon import WordsWarehouse
from models.bert.part import WordEmbedding, PositionEmbedding, SentenceEmbedding
from models.bert.part import TransformerEncoderBlock
from models.bert.preporcess import padding_mask


#    LDA层
class LDALayer(tf.keras.layers.Layer):
    '''lda层
        - lda的预测结果事先已经给到
        - 通过lda返回的每个序列的主题，转为词袋模型的每个语义向量
    '''
    def __init__(self,
                 name='lda_layer',
                 d_model=conf.BERT.get_d_model(),
                 K=conf.LDA.get_k(),
                 **kwargs):
        '''
            @param lda: lda模型
            @param d_model: 语义向量维度
            @param K: 主题数量
        '''
        self._name = name
        
        self._d_model = d_model
        self._K = K
        
        super(LDALayer, self).__init__(name=name, **kwargs)
        pass
    
    def build(self, input_shape):
        #    词袋
        self._embedding = self.add_weight(name=self._name + '_embedding', 
                                          shape=(self._K, self._d_model), 
                                          dtype=tf.float32, 
                                          trainable=True)
        
        super(LDALayer, self).build(input_shape)
        pass
    
    def call(self, inputs, training=None, **kwargs):
        '''
            @param inputs: Tensor(batch_size, max_sen_len)    LDA预测的每个词的主题,-1表示填充
        '''
#         #    去掉填充
#         num_b = tf.math.count_nonzero(inputs >= 0, axis=-1)
#         topics = tf.gather_nd(params=inputs, indices=tf.where(inputs >= 0))
#         
#         #    每个主题转为语义向量
#         th = tf.nn.embedding_lookup(params=self._embedding, ids=topics)
#         th = tf.RaggedTensor.from_row_lengths(th, row_lengths=num_b)
#         
#         #    语义向量求平均
#         th = tf.math.reduce_mean(th, axis=-1)
        #    拿每个句子的语句向量
        topics = inputs[:, 0]
        
        #    过词袋转换为语义向量
        th = tf.nn.embedding_lookup(params=self._embedding, ids=topics)
        return th
        
    pass


#    DMM层
class DMMLayer(tf.keras.layers.Layer):
    '''dmm层
        - 通过dmm预测序列的主题
        - 通过dmm返回的每个序列的主题，转为词袋模型的每个语义向量
    '''
    def __init__(self,
                 name='dmm_layer',
                 d_model=conf.BERT.get_d_model(),
                 K=conf.GSDMM.get_k(),
                 **kwargs):
        '''
            @param lda: lda模型
            @param d_model: 语义向量维度
            @param K: 主题数量
        '''
        self._name = name
        
        self._d_model = d_model
        self._K = K
        
        super(DMMLayer, self).__init__(name=name, **kwargs)
        pass
    
    def build(self, input_shape):
        #    词袋
        self._embedding = self.add_weight(name=self._name + '_embedding', 
                                          shape=(self._K, self._d_model), 
                                          dtype=tf.float32, 
                                          trainable=True)
        
        super(DMMLayer, self).build(input_shape)
        pass
    
    def call(self, inputs, **kwargs):
        '''
            @param inputs: Tensor(batch_size, max_sen_len)    DMM预测的每个句子的主题, -1表示填充
        '''
        #    拿每个句子的语句向量
        topics = inputs[:, 0]
        
        #    过词袋转换为语义向量
        th = tf.nn.embedding_lookup(params=self._embedding, ids=topics)
        return th
    pass


#    Bert层
class BertLayer(tf.keras.layers.Layer):
    '''bert层
        过bert拿语义向量
    '''
    def __init__(self,
                 name='bert_layer',
                 #    bert相关参数
                 vocab_size=WordsWarehouse.instance().words_count(),
                 max_sen_len=conf.BERT.get_max_sen_len(),  
                 max_sen=3,
                 n_block=conf.BERT.get_n_block(),
                 n_head=conf.BERT.get_n_head_attention(),
                 d_model=conf.BERT.get_d_model(),
                 f_model=conf.BERT.get_f_model(),
                 dropout_rate=conf.BERT.get_dropout_rate(),
                 **kwargs):
        self._name = name
        
        #    bert相关参数
        self._vocab_size = vocab_size
        self._max_sen_len = max_sen_len
        self._max_sen = max_sen
        self._n_block = n_block
        self._n_head = n_head
        self._d_model = d_model
        self._f_model = f_model
        self._dropout_rate = dropout_rate
        
        super(BertLayer, self).__init__(name=name, **kwargs)
        pass
    
    def build(self, input_shape):
        #    词向量部分
        #    词向量嵌入
        self._word_embedding = WordEmbedding(name=self._name + '_word_embedding', 
                                             vocab_size=self._vocab_size, 
                                             d_model=self._d_model)
        #    词位置嵌入
        self._position_embedding = PositionEmbedding(name=self._name + '_position_embedding', 
                                                     max_sen_len=self._max_sen_len, 
                                                     d_model=self._d_model)
        #    句子位置嵌入
        self._sentence_embedding = SentenceEmbedding(name=self._name + '_sentence_embedding', 
                                                     max_sen=self._max_sen, 
                                                     d_model=self._d_model)
        
        #    n层transformer_encoder_block
        self._transformer_encoder_blocks = [TransformerEncoderBlock(name=self._name + '_transformer_encoder_block_' + str(i), 
                                                                    n_head=self._n_head, 
                                                                    max_sen_len=self._max_sen_len, 
                                                                    d_model=self._d_model, 
                                                                    f_model=self._f_model, 
                                                                    dropout_rate=self._dropout_rate)\
                                            for i in range(self._n_block)]
        
        #    n层block后的layer_norm
        self._norm = tf.keras.layers.LayerNormalization()
        
        super(BertLayer, self).build(input_shape)
        pass
    
    def call(self, inputs, **kwargs):
        '''
            @param inputs: Tensor(batch_size, 2, max_sen_len)
                                    0: 随机[MASK]后的句子词编码
                                    1: 句子位置编码，从1开始
        '''
        inputs = tf.cast(inputs, dtype=tf.int64)
        #    切分数据
        x, sen = tf.split(inputs, num_or_size_splits=2, axis=1)
        x = tf.squeeze(x, axis=1)                       #    Tensor(batch_size, max_sen_len)
        sen = tf.squeeze(sen, axis=1)                   #    Tensor(batch_size, max_sen_len)
        
        #    先求掩码。有词的地方为0，没词的地方为1
        pad_mask = padding_mask(x)                      #    Tensor(batch_size, max_sen_len)
        
        #    词向量嵌入
        word_embedding = self._word_embedding(x)                    #    Tensor(batch_size, max_sen_len, d_model)    填充的地方为全0
        pos_embedding = self._position_embedding(x)                 #    Tensor(batch_size, max_sen_len, d_model)    填充的地方为全0
        sen_embedding = self._sentence_embedding(sen)               #    Tensor(batch_size, max_sen_len, d_model)    填充的地方为全0
        x = word_embedding + pos_embedding + sen_embedding          #    Tensor(batch_size, max_sen_len, d_model)    填充的地方为全0
        
        #    过n层transformer_encoder_block                          #    Tensor(batch_size, max_sen_len, d_model)    填充的地方为全0
        for encoder_block in self._transformer_encoder_blocks:
            x = encoder_block(x, mask=pad_mask)
            pass
        
        #    过一遍归一化
        x = self._norm(x)
        
        return x
    
    pass


#    分类层
class ClassifyLayer(tf.keras.layers.Layer):
    '''分类层
        lda
    '''
    def __init__(self,
                 name='classify_layer',
                 classify_fc1_hidden=conf.TBERT.get_classify_fc1_hidden(),
                 classify_fc1_dropout=conf.TBERT.get_classify_fc1_dropout(),
                 classify_fc2_hidden=conf.TBERT.get_classify_fc2_hidden(),
                 classify_fc2_dropout=conf.TBERT.get_classify_fc2_dropout(),
                 K=conf.TBERT.get_k(),
                 **kwargs):
        self._name = name
        
        self._classify_fc1_hidden = classify_fc1_hidden
        self._classify_fc1_dropout = classify_fc1_dropout
        self._classify_fc2_hidden = classify_fc2_hidden
        self._classify_fc2_dropout = classify_fc2_dropout
        
        self._K = K
        
        super(ClassifyLayer, self).__init__(name=name, **kwargs)
        pass
    
    def build(self, input_shape):
        #    第一个fc层
        self._fc1 = tf.keras.layers.Dense(name=self._name + '_fc1', 
                                          units=self._classify_fc1_hidden, 
                                          use_bias=True, 
                                          kernel_initializer=tf.initializers.HeNormal(), 
                                          bias_initializer=tf.initializers.HeNormal())
        self._dropout1 = tf.keras.layers.Dropout(rate=self._classify_fc1_dropout)
        
        #    第二个fc层
        self._fc2 = tf.keras.layers.Dense(name=self._name + '_fc2', 
                                          units=self._classify_fc2_hidden, 
                                          use_bias=True, 
                                          kernel_initializer=tf.initializers.HeNormal(), 
                                          bias_initializer=tf.initializers.HeNormal())
        self._dropout2 = tf.keras.layers.Dropout(rate=self._classify_fc2_dropout)
        
        #    输出层
        self._output = tf.keras.layers.Dense(name=self._name + '_output',
                                             units=self._K,
                                             kernel_initializer=tf.initializers.HeNormal(), 
                                             bias_initializer=tf.initializers.HeNormal(),
                                             )
        
        super(ClassifyLayer, self).build(input_shape)
        pass
    
    def call(self, inputs, **kwargs):
        '''
            @param inputs: Tensor(batch_size, 768 * 3)
        '''
        #    过第一个fc
        out = self._fc1(inputs)
        out = self._dropout1(out)
        out = tf.nn.tanh(out)
        
        #    过第二个fc
        out = self._fc2(out)
        out = self._dropout2(out)
        out = tf.nn.tanh(out)
        
        #    输出层
        out = self._output(out)
        out = tf.nn.softmax(out)
        
        return out
    pass
