# -*- coding: utf-8 -*-  
'''
bert网络结构

@author: luoyi
Created on 2021年4月16日
'''
import tensorflow as tf


import utils.conf as conf
import utils.dictionaries_bert as dict_bert
from models.abstract_nets import AbstractModel
from models.bert.part import WordEmbedding, PositionEmbedding, SentenceEmbedding
from models.bert.part import TransformerEncoderBlock
from models.bert.part import PreMLMOutput, PreNSPOutput
from models.bert.losses import PreLoss
from models.bert.metrics import PreMLMMetric, PreNSPMetric
from models.bert.preporcess import padding_mask


#    bert网络结构
class BertModel(AbstractModel):
    
    def __init__(self,
                 name='bert',
                 learning_rate=conf.BERT.get_learning_rate(),
                 input_shape=(None, 2, conf.BERT.get_pre_training_sentence_maxlen()),
                 auto_assembling=True,
                 is_build=True,
                 
                 vocab_size=dict_bert.dict_size(),
                 max_sen_len=conf.BERT.get_pre_training_sentence_maxlen(),  
                 max_sen=conf.BERT.get_pre_training_max_sentences(),
                 n_block=conf.BERT.get_n_block(),
                 n_head=conf.BERT.get_n_head_attention(),
                 d_model=conf.BERT.get_d_model(),
                 f_model=conf.BERT.get_f_model(),
                 dropout_rate=conf.BERT.get_dropout_rate(),
                 
                 lamud_loss_pre_nsp=conf.BERT.get_lamud_loss_pre_nsp(),
                 lamud_loss_pre_mlm=conf.BERT.get_lamud_loss_pre_mlm(),
                 ):
        '''
            @param max_sen_len: 一个句子的最大词数量
            @param max_sen: 一个样本的最大句子数
        '''
        self._vocab_size = vocab_size
        self._max_sen_len = max_sen_len
        self._max_sen = max_sen
        self._n_block = n_block
        self._n_head = n_head
        self._d_model = d_model
        self._f_model = f_model
        self._dropout_rate = dropout_rate
        
        self._lamud_loss_pre_nsp = lamud_loss_pre_nsp
        self._lamud_loss_pre_mlm = lamud_loss_pre_mlm
        
        super(BertModel, self).__init__(learning_rate, name, auto_assembling, is_build, input_shape)
        
        pass
    
    
    #    优化器
    def create_optimizer(self, learning_rate=0.001):
        return tf.optimizers.Adam(learning_rate, beta_1=0.9, beta_2=0.98)
    
    #    损失函数
    def create_loss(self):
        return PreLoss(lamud_loss_pre_nsp=self._lamud_loss_pre_nsp, 
                       lamud_loss_pre_mlm=self._lamud_loss_pre_mlm,
                       max_sen_len=self._max_sen_len,
                       vocab_size=self._vocab_size)
    
    #    评价函数
    def create_metrics(self):
        return [PreMLMMetric(name='mlm_metric', max_sen_len=self._max_sen_len, vocab_size=self._vocab_size), 
                PreNSPMetric(name='nsp_metric')]
    
    #    装配模型
    def assembling(self):
        #    词向量部分
        #    词向量嵌入
        self._word_embedding = WordEmbedding(name=self._name + '_word_embedding', 
                                             vocab_size=dict_bert.dict_size(), 
                                             d_model=self._d_model)
        #    词位置嵌入
        self._position_embedding = PositionEmbedding(name=self._name + '_position_embedding', 
                                                     max_sen_len=self._max_sen_len, 
                                                     d_model=self._d_model)
        #    句子位置嵌入
        self._sentence_embedding = SentenceEmbedding(name=self._name + '_sentence_embedding', 
                                                     max_sen=self._max_sen, 
                                                     d_model=self._d_model)
        
        #    n层transformer_encoder_block
        self._transformer_encoder_blocks = [TransformerEncoderBlock(name=self._name + '_transformer_encoder_block_' + str(i), 
                                                                    n_head=self._n_head, 
                                                                    max_sen_len=self._max_sen_len, 
                                                                    d_model=self._d_model, 
                                                                    f_model=self._f_model, 
                                                                    dropout_rate=self._dropout_rate)\
                                            for i in range(self._n_block)]
        
        #    n层block后的layer_norm
        self._norm = tf.keras.layers.LayerNormalization()
        
        #    预训练输出
        self._nsp_out = PreNSPOutput(name=self._name + '_nsp_out', 
                                     d_model=self._d_model)
        self._mlm_out = PreMLMOutput(name=self._name + '_mlm_out', 
                                     embedding=self._word_embedding)
        pass
    
    #    前向
    def call(self, inputs, training=None, mask=None):
        '''
            @param inputs: Tensor(batch_size, 2, max_sen_len)
                                    0: 随机[MASK]后的句子词编码
                                    1: 句子位置编码，从1开始
        '''
        inputs = tf.cast(inputs, dtype=tf.int64)
        #    切分数据
        x, sen = tf.split(inputs, num_or_size_splits=2, axis=1)
        x = tf.squeeze(x, axis=1)                       #    Tensor(batch_size, max_sen_len)
        sen = tf.squeeze(sen, axis=1)                   #    Tensor(batch_size, max_sen_len)
        
        #    先求掩码。有词的地方为0，没词的地方为1
        pad_mask = padding_mask(x)                      #    Tensor(batch_size, max_sen_len)
        
        #    词向量嵌入
        word_embedding = self._word_embedding(x)                    #    Tensor(batch_size, max_sen_len, d_model)
        pos_embedding = self._position_embedding(x)                 #    Tensor(batch_size, max_sen_len, d_model)
        sen_embedding = self._sentence_embedding(sen)               #    Tensor(batch_size, max_sen_len, d_model)
        x = word_embedding + pos_embedding + sen_embedding          #    Tensor(batch_size, max_sen_len, d_model)
        
        #    过n层transformer_encoder_block                          #    Tensor(batch_size, max_sen_len, d_model)
        for encoder_block in self._transformer_encoder_blocks:
            x = encoder_block(x, mask=pad_mask)
            pass
        
        x = self._norm(x)
        
        #    计算两个Pre的输出。结果拼接成RaggedTensor返回
        nsp_out = self._nsp_out(x)                      #    Tensor(batch_size, 2)
        mlm_out = self._mlm_out(x)                      #    Tensor(batch_size, max_sen_len-1, vocab_size)
        #    RaggedTensor
        #        Tensor(batch_size, 2)
        #        Tensor(batch_size * (max_sen_len - 1), d_model)
        mlm_out = tf.reshape(mlm_out, shape=(-1, self._vocab_size))
        pre_out = tf.ragged.stack([nsp_out, mlm_out])
        
        y_pred = pre_out[1].to_tensor()
        y_pred = tf.reshape(y_pred, shape=(-1, self._max_sen_len-1, self._vocab_size))
        
        return pre_out
    
    pass