# -*- coding: utf-8 -*-  
'''
TPLinker网络

@author: luoyi
Created on 2021年6月28日
'''
import datetime
import tensorflow as tf

import utils.logger_factory as logf
import utils.conf as conf
import utils.relationships as rel
import utils.dictionaries as dicts
from models.abstract_nets import AbstractModel
from models.bert.part import BertLayer
from models.tplinker.part import HandshakingKernelLayer, FCLayer
from models.tplinker.losses import TPLinkerLosser
from models.tplinker.metrics import NERMetrics, REMetrics


#    tplinker网络
class TPLinkerNet(AbstractModel):
    def __init__(self,
                 name='TPLinker',
                 
                 #    bert相关参数
                 vocab_size=dicts.dict_size(),
                 n_block=conf.BERT.get_n_block(),
                 n_head=conf.BERT.get_n_head_attention(),
                 d_model=conf.BERT.get_d_model(),
                 f_model=conf.BERT.get_f_model(),
                 dropout_rate=conf.BERT.get_dropout_rate(),
                 #    tplinker相关参数
                 max_sen_len=conf.TPLINKER.get_max_sentence_len(),
                 rel_size=len(rel.id_rel),
                 
                 #    训练相关参数
                 batch_size=conf.DATASET_BAIDU.get_batch_size(),
                 loss_lamda_ner=conf.TPLINKER.get_loss_lamda_ner(), 
                 loss_lamda_re=conf.TPLINKER.get_loss_lamda_re(),
                 learning_rate=0.001,
                 input_shape=(None, 2, conf.TPLINKER.get_max_sentence_len()),
                 
                 auto_assembling=True, 
                 is_build=True,
                 **kwargs):
        
        self._vocab_size = vocab_size
        self._max_sen_len = max_sen_len
        self._n_block = n_block
        self._n_head = n_head
        self._d_model = d_model
        self._f_model = f_model
        self._dropout_rate = dropout_rate
        self._batch_size = batch_size
        self._rel_size = rel_size
        self._loss_lamda_ner = loss_lamda_ner
        self._loss_lamda_re = loss_lamda_re
        
        super(TPLinkerNet, self).__init__(learning_rate, name, auto_assembling, is_build, input_shape)
        pass
    
    
    #    创建优化器
    def create_optimizer(self, learning_rate=0.001):
        return tf.optimizers.Adam(learning_rate=learning_rate)
    
    #    损失函数
    def create_loss(self):
        return TPLinkerLosser(fc_layer=self._fc_layer, 
#                               input_layer=self._input_layer, 
                              max_sen_len=self._max_sen_len, 
                              rel_size=self._rel_size, 
                              loss_lamda_ner=self._loss_lamda_ner, 
                              loss_lamda_re=self._loss_lamda_re)
    
    #    评价函数
    def create_metrics(self):
        return [NERMetrics(name='ner_metric', 
                           max_sen_len=self._max_sen_len, 
                           fc_layer=self._fc_layer, 
#                            input_layer=self._input_layer,
                           ),
                REMetrics(name='re_metric', 
                          max_sen_len=self._max_sen_len, 
                          fc_layer=self._fc_layer, 
#                           input_layer=self._input_layer,
                          rel_size=self._rel_size)]
    
    #    装配网络
    def assembling(self):
        #    input_layer
#         self._input_layer = InputLayer(name='input_layer', 
#                                        max_sen_len=self._max_sen_len, 
#                                        batch_size=self._batch_size)
        
        #    bert layer
        self._bert_layer = BertLayer(name='bert_layer', 
                                     vocab_size=self._vocab_size, 
                                     max_sen_len=self._max_sen_len, 
                                     max_sen=1, 
                                     n_block=self._n_block, 
                                     n_head=self._n_head, 
                                     d_model=self._d_model, 
                                     f_model=self._f_model, 
                                     dropout_rate=self._dropout_rate, 
                                     is_nsp=False, is_mlm=False)
        
        #    握手层
        self._handshaking_layer = HandshakingKernelLayer(name=self.name + '_handshaking_kernel_layer', 
                                                         d_model=self._d_model, 
                                                         max_sen_len=self._max_sen_len)
        
        #    fc层
        self._fc_layer = FCLayer(name=self.name + '_fc_layer', 
                                 max_sen_len=self._max_sen_len, 
                                 rel_size=self._rel_size, 
                                 batch_size=self._batch_size)
        
        pass
    
    #    前向过程
    def call(self, inputs, training=None, mask=None):
        #    过input_layer层，区分bert输入和模板输入
#         bert_inputs = self._input_layer(inputs)
        
        tf.print('----------------------------------------------------------------', output_stream=logf.get_logger_filepath('tplinker_time_consum'))
        
        start = datetime.datetime.now()
        #    过bert_layer，拿到语义向量
        h = self._bert_layer(inputs)
        tf.print('bert_layer time_consum:', (datetime.datetime.now() - start), output_stream=logf.get_logger_filepath('tplinker_time_consum'))
        
        start = datetime.datetime.now()
        #    过握手层, 转为∑max_sen_len向量
        vec = self._handshaking_layer(h)
        tf.print('handshaking_layer time_consum:', (datetime.datetime.now() - start), output_stream=logf.get_logger_filepath('tplinker_time_consum'))
        
        start = datetime.datetime.now()
        #    过fc层，拿到实体和关系预测
        out = self._fc_layer(vec)
        tf.print('fc_layer time_consum:', (datetime.datetime.now() - start), output_stream=logf.get_logger_filepath('tplinker_time_consum'))
        return out
    pass

