# -*- coding: utf-8 -*-  
'''
seq2seq网络层

@author: luoyi
Created on 2021年3月18日
'''
import tensorflow as tf

import utils.conf as conf


#    seq2seq
class Seq2SeqLayer(tf.keras.layers.Layer):
    
    def __init__(self,
                 name='Seq2SeqLayer',
                 worddict_size=-1,
                 embedding_dims=conf.MODELS.get_embedding_dims(),
                 hidden_units=conf.MODELS.get_hidden_units(),
                 **kwargs):
        super(Seq2SeqLayer, self).__init__(name=name, **kwargs)
        
        #    编码部分
        self._encoder_embedding = tf.keras.layers.Embedding(name='encoder_embedding', input_dim=worddict_size, output_dim=embedding_dims, mask_zero=True)
        self._encoder_grus = tf.keras.layers.GRU(name='encoder_grus', 
                                                 units=hidden_units, 
                                                 return_sequences=True, 
                                                 return_state=True,
                                                 kernel_initializer=tf.initializers.he_normal(),
                                                 bias_initializer=tf.initializers.zeros())
        
        #    解码部分
        self._decoder_embedding = tf.keras.layers.Embedding(name='decoder_embedding', input_dim=worddict_size, output_dim=embedding_dims, mask_zero=True)
        self._decoder_grus = tf.keras.layers.GRU(name='decoder_grus', 
                                                 units=hidden_units, 
                                                 return_sequences=True, 
                                                 return_state=True,
                                                 kernel_initializer=tf.initializers.he_normal(),
                                                 bias_initializer=tf.initializers.zeros())
        self._attention = tf.keras.layers.Attention()
        
        #    预测部分（全连接，让RNN直接预测字典太大了）
        self._fc = tf.keras.layers.Dense(worddict_size, activation='softmax', 
                                         kernel_initializer=tf.initializers.he_normal(),
                                         bias_initializer=tf.initializers.zeros())
        pass
    
    
    def call(self, inputs, **kwargs):
        #    先从inputs中分离 encoder_inputs 和 decoder_inputs
        [encoder_inputs, decoder_inputs] = tf.split(inputs, num_or_size_splits=[1, 1], axis=1)
        encoder_inputs = tf.squeeze(encoder_inputs, axis=1)
        decoder_inputs = tf.squeeze(decoder_inputs, axis=1)
        
        #    执行编码部分
        encoder_inputs_embedded= self._encoder_embedding(encoder_inputs)
        encoder_outputs, state_h = self._encoder_grus(encoder_inputs_embedded)
        
        #    执行解码器部分
        decoder_inputs_embedded = self._decoder_embedding(decoder_inputs)                                   #    这里其实走的是Teacher机制
        decoder_outputs, state_h = self._decoder_grus(decoder_inputs_embedded, initial_state=state_h)       #    这里直接拿最后一个隐藏状态h做为C
        attention_outputs = self._attention([decoder_outputs, encoder_outputs])
        
        outputs = self._fc(attention_outputs)
        return outputs
    
    pass

