# -*- coding: utf-8 -*-  
'''
Transformer网络结构

    ------ 输入 ------
    输入：词编码序列[batch_size, maxlen]
    ------ Encoder Input preprocess ------
    Embedding: out=[batch_size, maxlen, embedding_dims]
    Positional_Encoding: out=[batch_size, maxlen, embedding_dims]
    ------ Encoder ------
    Encoder * 6：
        Mutil_Head_Attention: out=[batch_size, maxlen, embedding_dims]
        Res & Norm
        Feed_Forward_Neural_Network: out=[[batch_size, maxlen, semantics_dims]]
        Res & Norm
    ------ Decoder Input preprocess ------
    Embedding: out=[batch_size, maxlen, embedding_dims]
    Positional_Encoding: out=[batch_size, maxlen, embedding_dims]
    ------ Decoder ------
    Decoder * 6：
        Masked_Mutil_Head_Attention: out=[batch_size, maxlen, embedding_dims]
        Res & Norm
        Mutil_Head_Attention: out=[batch_size, maxlen, embedding_dims]
        Res & Norm
        Feed_Forward_Neural_Network: out=[[batch_size, maxlen, semantics_dims]]
        Res & Norm
    ------ FC ------
    FC: unit=[字典长度] active=softmax out=[batch_size, maxlen, 字典长度]
        输出序列中每个词在字典中的概率


@author: luoyi
Created on 2021年3月22日
'''
import tensorflow as tf

import utils.conf as conf
import utils.dictionaries as dictionaries
from models.abstract_nets import AbstractModel
from models.transformer.part import PositionalEncodingLayer, EncoderLayer, DecoderLayer
from models.transformer.losses import TransformerLoss
from models.transformer.metrics import TransformerMetric
from models.transformer.preprocess import padding_mask_matrix, sequence_mask_matrix


#    Transformer模型
class TransformerModel(AbstractModel):
    
    
    def __init__(self,
                 name='TransformerModel',
                 num_layers=conf.TRANSFORMER.get_num_layers(),
                 mutil_attention_groups=conf.TRANSFORMER.get_mutil_head_attention_groups(),
                 sentence_maxlen=conf.TRANSFORMER.get_sentence_maxlen(),
                 embedding_dims=conf.TRANSFORMER.get_embedding_dims(),
                 self_attention_dims=conf.TRANSFORMER.get_self_attention_dims(),
                 feed_forward_dims=conf.TRANSFORMER.get_feed_forward_dims(),
                 dict_size=dictionaries.dict_size(),
                 batch_size=conf.DATASET.get_batch_size(),
                 **kwargs):
        self._num_layers = num_layers
        self._mutil_attention_groups = mutil_attention_groups
        self._sentence_maxlen = sentence_maxlen
        self._embedding_dims = embedding_dims
        self._self_attention_dims = self_attention_dims
        self._feed_forward_dims = feed_forward_dims
        self._dict_size = dict_size
        self._batch_size = batch_size
        
        super(TransformerModel, self).__init__(name=name, **kwargs)
        
        pass
    
    
    #    梯度更新方式
    def create_optimizer(self, learning_rate=0.001):
#         return tf.optimizers.Nadam(learning_rate=learning_rate)
#         return tf.optimizers.Adadelta(learning_rate=learning_rate)
        return tf.optimizers.Adam(learning_rate=learning_rate)
    
    #    损失函数
    def create_loss(self):
        return TransformerLoss(batch_size=self._batch_size,
                               sentence_maxlen=self._sentence_maxlen)
    
    
    #    评价函数
    def create_metrics(self):
        return TransformerMetric(batch_size=self._batch_size,
                                 sentence_maxlen=self._sentence_maxlen)
    
    
    #    装配网络
    def assembling(self):
        #    Encoder部分
        #    转码词向量
        self._encoder_embedding = tf.keras.layers.Embedding(name='encoder_embedding', 
                                                            input_dim=self._dict_size, 
                                                            output_dim=self._embedding_dims,
                                                            input_length=self._sentence_maxlen)
        #    追加位置编码
        self._encoder_positional_encoding = PositionalEncodingLayer(name='encoder_positional_encoding', 
                                                                    sentence_maxlen=self._sentence_maxlen, 
                                                                    embedding_dims=self._embedding_dims)

        #    编码器层
        self._encoder_layer = EncoderLayer(name='encoder_layer',
                                           num_layers=self._num_layers,
                                           mutil_attention_groups=self._mutil_attention_groups,
                                           sentence_maxlen=self._sentence_maxlen,
                                           embedding_dims=self._embedding_dims,
                                           self_attention_dims=self._self_attention_dims,
                                           feed_forward_dims=self._feed_forward_dims,
                                           batch_size=self._batch_size)
            
        #    Decoder部分
        #    词向量编码
        self._decoder_embedding = tf.keras.layers.Embedding(name='decoder_embedding', 
                                                            input_dim=self._dict_size, 
                                                            output_dim=self._embedding_dims,
                                                            input_length=self._sentence_maxlen)
        #    追加位置编码
        self._decoder_positional_encoding = PositionalEncodingLayer(name='decoder_positional_encoding', 
                                                                    sentence_maxlen=self._sentence_maxlen, 
                                                                    embedding_dims=self._embedding_dims)

        #    解码器层
        self._decoder_layer = DecoderLayer(name='decoder_layer',
                                           num_layers=self._num_layers,
                                           mutil_attention_groups=self._mutil_attention_groups,
                                           sentence_maxlen=self._sentence_maxlen,
                                           embedding_dims=self._embedding_dims,
                                           self_attention_dims=self._self_attention_dims,
                                           feed_forward_dims=self._feed_forward_dims,
                                           batch_size=self._batch_size)
        
        #    最后的fc层
        self._fc = tf.keras.layers.Dense(name='fc_output', 
                                         units=self._dict_size, 
                                         activation='softmax', 
                                         kernel_initializer=tf.initializers.he_normal(), 
                                         bias_initializer=tf.initializers.zeros())
        
        #    初始化sequence_mask矩阵
        self._sequence_mask = sequence_mask_matrix(batch_size=self._batch_size, 
                                                   sentence_maxlen=self._sentence_maxlen, 
                                                   mutil_groups=self._mutil_attention_groups)
        pass
    
    
    #    前向
    def call(self, inputs, training=None, mask=None):
        #    输入数据由两部分组成，[batch_size, 0, maxlen]为encoder输入，[batch_size, 1, maxlen]为decoder输入
        [encoder_inputs, decoder_inputs] = tf.split(inputs, num_or_size_splits=[1, 1], axis=1)
        encoder_inputs = tf.squeeze(encoder_inputs, axis=1)         #    Tensor (batch_size, sentence_maxlen)
        decoder_inputs = tf.squeeze(decoder_inputs, axis=1)         #    Tensor (batch_size, sentence_maxlen)
        #    计算掩码矩阵。自注意力时只关注实际有效的词，填充词不参与注意力运算
        encoder_padding_mask = padding_mask_matrix(seq_src=encoder_inputs)
        decoder_padding_mask = padding_mask_matrix(seq_src=decoder_inputs)
        decoder_sequence_mask = tf.maximum(self._sequence_mask, decoder_padding_mask)
        
        #    encoder输入预处理
        encoder_vec = self._encoder_embedding(encoder_inputs)
        encoder_vec = self._encoder_positional_encoding(encoder_vec)
        #    decoder输入预处理。（右移追加<go>的处理应该在数据集预处理里做掉了）
        decoder_vec = self._decoder_embedding(decoder_inputs)
        decoder_vec = self._decoder_positional_encoding(decoder_vec)
        
        #    编码器部分
        encoder_vec = self._encoder_layer(encoder_vec=encoder_vec, padding_mask=encoder_padding_mask)
        
        #    解码器部分
        decoder_vec = self._decoder_layer(encoder_vec=encoder_vec, decoder_vec=decoder_vec, padding_mask=encoder_padding_mask, sequence_mask=decoder_sequence_mask)
        
#         tf.print('encoder_vec')
#         tf.print(encoder_vec)
#         tf.print('decoder_vec')
#         tf.print(decoder_vec)
        
        #    过最后的fc
        y_pred = self._fc(decoder_vec)
        return y_pred
    
    
    pass
