# -*- coding: utf-8 -*-  
'''
tbert网络结构

Created on 2021年9月12日
@author: luoyi
'''
import tensorflow as tf

import utils.conf as conf
from utils.iexicon import WordsWarehouse
from models.abstract_nets import AbstractModel
from models.tbert.parts import LDALayer, DMMLayer, BertLayer, ClassifyLayer
from models.tbert.losses import TBertLoss
from models.tbert.metrics import TBertMetric


#    tbert模型
class TBert(AbstractModel):
    def __init__(self,
                 name='tbert',
                 #    主题模型相关配置
                 K_lda=conf.LDA.get_k(),
                 K_dmm=conf.GSDMM.get_k(),
                 #    bert相关配置
                 bert_vocab_size=WordsWarehouse.instance().words_count(),
                 max_sen_len=conf.TBERT.get_max_sen_len(),
                 max_sen=conf.TBERT.get_max_sen(),
                 n_block=conf.BERT.get_n_block(),
                 n_head=conf.BERT.get_n_head_attention(),
                 d_model=conf.BERT.get_d_model(),
                 f_model=conf.BERT.get_f_model(),
                 dropout_rate=conf.BERT.get_dropout_rate(),
                 #    tbert相关配置
                 K=conf.TBERT.get_k(),
                 classify_fc1_hidden=conf.TBERT.get_classify_fc1_hidden(),
                 classify_fc1_dropout=conf.TBERT.get_classify_fc1_dropout(),
                 classify_fc2_hidden=conf.TBERT.get_classify_fc2_hidden(),
                 classify_fc2_dropout=conf.TBERT.get_classify_fc2_dropout(),
                 learning_rate=conf.TBERT.get_learning_rate(),
                 input_shape=(None, 4, conf.TBERT.get_max_sen_len())):
        
        #    bert相关配置
        self._bert_vocab_size = bert_vocab_size
        self._max_sen_len = max_sen_len
        self._max_sen = max_sen
        self._n_block = n_block
        self._n_head = n_head
        self._d_model = d_model
        self._f_model = f_model
        self._dropout_rate = dropout_rate
        #    tbert相关配置
        self._K = K
        self._classify_fc1_hidden = classify_fc1_hidden
        self._classify_fc1_dropout = classify_fc1_dropout
        self._classify_fc2_hidden = classify_fc2_hidden
        self._classify_fc2_dropout = classify_fc2_dropout
        #    主题模型相关配置
        self._K_lda = K_lda
        self._K_dmm = K_dmm
        
        super(TBert, self).__init__(learning_rate, name, auto_assembling=True, is_build=True, input_shape=input_shape)
        pass
    
    
    def create_optimizer(self, learning_rate=0.001):
        return tf.optimizers.Adam(learning_rate)
    
    def create_loss(self):
        return TBertLoss()
    
    def create_metrics(self):
        return [TBertMetric(K=self._K)]
    
    #    装配网络
    def assembling(self):
        #    主题模型层
        #    lda层
        self._lda_layer = LDALayer(name='lda_layer', d_model=self._d_model, K=self._K_lda)
        #    dmm层
        self._dmm_layer = DMMLayer(name='dmm_layer', d_model=self._d_model, K=self._K_dmm)
        
        #    bert层
        self._bert_layer = BertLayer(name='bert_layer', 
                                     vocab_size=self._bert_vocab_size, 
                                     max_sen_len=self._max_sen_len, 
                                     max_sen=self._max_sen, 
                                     n_block=self._n_block, 
                                     n_head=self._n_head, 
                                     d_model=self._d_model, 
                                     f_model=self._f_model, 
                                     dropout_rate=self._dropout_rate)
        
        #    分类层
        self._classify_layer = ClassifyLayer(name='classify_layer', 
                                             classify_fc1_hidden=self._classify_fc1_hidden, 
                                             classify_fc1_dropout=self._classify_fc1_dropout, 
                                             classify_fc2_hidden=self._classify_fc2_hidden, 
                                             classify_fc2_dropout=self._classify_fc2_dropout, 
                                             K=self._K)
        
        pass
    
    #    前向过程
    def call(self, inputs, training=None, mask=None):
        '''
            @param inputs: Tensor(batch_size, 4, max_sen_len)    -1表示填充
        '''
        #    拆分各层训练数据
        inputs = tf.cast(inputs, dtype=tf.int64)
        bert_inputs, lda_inputs, dmm_inputs = tf.split(value=inputs, num_or_size_splits=[2,1,1], axis=1)
        lda_inputs = tf.squeeze(lda_inputs, axis=1)
        dmm_inputs = tf.squeeze(dmm_inputs, axis=1)
        
        #    主题模型层
        #    过lda_layer，拿主题语义向量    Tensor(batch_size, d_model)
        lda_topic = self._lda_layer(lda_inputs)
        #    过dmm_layer，拿主题语义向量    Tensor(batch_size, d_model)
        dmm_topic = self._dmm_layer(dmm_inputs)
        
        #    bert层
        bert_out = self._bert_layer(bert_inputs)    #    Tensor(batch_size, max_sen_len, d_model)    填充的地方为0
        bert_topic = bert_out[:, 0, :]              #    Tensor(batch_size, d_model)
        
        #    3个模型输出合并    Tensor(batch_size, 3 * d_model)
        classify_inputs = tf.concat([lda_topic, dmm_topic, bert_topic], axis=-1)
        
        #    过分类层    Tensor(batch_size, K)
        out = self._classify_layer(classify_inputs)
        return out
    pass


