# -*- coding: utf-8 -*-  
'''
NLU网络

Created on 2021年10月2日
@author: luoyi
'''
import tensorflow as tf

import utils.conf as conf
from utils.dictionary import Dictionaries
from utils.relationship import Relationship
from models.abstract_nets import AbstractModel
from models.nlu.losses import NLULoss
from models.nlu.metrics import CRFMetric, InformMetric
from models.bert.part import BertLayer
from models.nlu.parts import InformLayer
from models.crf.part import CRFLayer


#    NLU网络
class NLU(AbstractModel):
    def __init__(self,
                 #    bert相关配置
                 max_sen_len=conf.NLU.get_max_sen_len(),
                 max_sen=conf.BERT.get_max_sen(),
                 vocab_size=Dictionaries.instance().size(),
                 n_block=conf.BERT.get_n_block(),
                 n_head=conf.BERT.get_n_head_attention(),
                 d_model=conf.BERT.get_d_model(),
                 f_model=conf.BERT.get_f_model(),
                 dropout_rate=conf.BERT.get_dropout_rate(),
                 #    意图相关
                 inform_size=Relationship.instance().rel_size(),
                 #    实体标注相关
                 pos_size=Relationship.instance().sot_size(),
                 #    模型相关配置
                 learning_rate=conf.NLU.get_learning_rate(),
                 input_shape=(None, 2, conf.NLU.get_max_sen_len()),
                 loss_lamda_crf=conf.NLU.get_loss_lamda_crf(),
                 loss_lamda_inform=conf.NLU.get_loss_lamda_inform(),
                 auto_assembling=True,
                 is_build=True,
                 name='nlu'):
        self._name = name
        self._learning_rate = learning_rate
        
        self._inform_size = inform_size
        self._pos_size = pos_size
        self._vocab_size = vocab_size
        self._max_sen_len = max_sen_len
        self._max_sen = max_sen
        self._n_block = n_block
        self._n_head = n_head
        self._d_model = d_model
        self._f_model = f_model
        self._dropout_rate = dropout_rate
        
        self._loss_lamda_crf = loss_lamda_crf
        self._loss_lamda_inform = loss_lamda_inform
        
        super(NLU, self).__init__(learning_rate, name, auto_assembling, is_build, input_shape)
        pass
    
    #    优化器
    def create_optimizer(self, learning_rate=0.001):
        return tf.optimizers.Adam(learning_rate, beta_1=0.9, beta_2=0.98)
    
    #    损失函数
    def create_loss(self):
        return NLULoss(name='nlu_loss', 
                       loss_lamda_crf=self._loss_lamda_crf, 
                       loss_lamda_inform=self._loss_lamda_inform, 
                       inform_layer=self._inform_layer, 
                       crf_layer=self._crf_layer)
    
    #    评价函数
    def create_metrics(self):
        return [CRFMetric(name='crf_metric', 
                          crf_layer=self._crf_layer),
                InformMetric(name='inform_metric', 
                             inform_layer=self._inform_layer, 
                             inform_size=self._inform_size)]
    
    #    装配模型
    def assembling(self):
        #    bert层
        self._bert_layer = BertLayer(name='bert_layer', 
                                     vocab_size=self._vocab_size, 
                                     max_sen_len=self._max_sen_len, 
                                     max_sen=self._max_sen, 
                                     n_block=self._n_block, 
                                     n_head=self._n_head, 
                                     d_model=self._d_model, 
                                     f_model=self._f_model, 
                                     dropout_rate=self._dropout_rate, 
                                     is_nsp=False, is_mlm=False)
        
        #    意图识别层
        self._inform_layer = InformLayer(name='inform_layer', inform_size=self._inform_size)
        
        #    crf层
        self._crf_layer = CRFLayer(name='crf_layer', pos_size=self._pos_size)
        
        pass
    
    #    前向
    def call(self, inputs, training=None, mask=None):
        '''
            @param inputs: Tensor(batch_size, 2, max_sen_len)
        '''
        #    先过bert，拿语义向量        Tensor(batch_size, max_sen_len, d_model)
        vec = self._bert_layer(inputs)
        
        #    [CLS]位语义向量过意图识别层
        cls_vec = vec[:, 0, :]          #    Tensor(batch_size, inform_size)
        _ = self._inform_layer(cls_vec)
        
        #    语义向量过crf层              #    Tensor(batch_size, max_sen_len, pos_size)
        _ = self._crf_layer(vec)
        
        return inputs
    pass
