import numpy as np
import tensorflow as tf
import tensorflow.keras.backend as K
from tensorflow.keras import layers
from tensorflow.keras.layers import Layer
#from tensorflow.keras.callbacks import Callback
from transformer.attention import PositionEncoding, MultiHeadAttention, positional_encoding


class PositionWiseFeedForward(Layer):
    def __init__(self, model_dim, inner_dim, trainable=True, **kwargs):
        self._model_dim = model_dim
        self._inner_dim = inner_dim
        self._trainable = trainable
        super(PositionWiseFeedForward, self).__init__(**kwargs)

    def get_config(self):
        config = super().get_config().copy()
        config.update({
            "_model_dim": self._model_dim,
            "_inner_dim": self._inner_dim,
            "_trainable": self._trainable
        })
        return config

    def build(self, input_shape):
        self.weights_inner = self.add_weight(
            shape=(input_shape[-1], self._inner_dim),
            initializer='glorot_uniform',
            trainable=self._trainable,
            name="weights_inner")
        self.weights_out = self.add_weight(
            shape=(self._inner_dim, self._model_dim),
            initializer='glorot_uniform',
            trainable=self._trainable,
            name="weights_out")
        self.bais_inner = self.add_weight(
            shape=(self._inner_dim,),
            initializer='uniform',
            trainable=self._trainable,
            name="bais_inner")
        self.bais_out = self.add_weight(
            shape=(self._model_dim,),
            initializer='uniform',
            trainable=self._trainable,
            name="bais_out")
        super(PositionWiseFeedForward, self).build(input_shape)

    def call(self, inputs):
        if K.dtype(inputs) != 'float32':
            inputs = K.cast(inputs, 'float32')
        inner_out = K.relu(K.dot(inputs, self.weights_inner) + self.bais_inner)
        outputs = K.dot(inner_out, self.weights_out) + self.bais_out
        return outputs

    def compute_output_shape(self, input_shape):
        return self._model_dim

class LayerNormalization(Layer):

    def __init__(self, epsilon=1e-8, **kwargs):
        self._epsilon = epsilon
        super(LayerNormalization, self).__init__(**kwargs)

    def get_config(self):
        config = super().get_config().copy()
        config.update({
            "_epsilon": self._epsilon
        })
        return config

    def build(self, input_shape):
        self.beta = self.add_weight(
            shape=(input_shape[-1],),
            initializer='zero',
            name='beta')
        self.gamma = self.add_weight(
            shape=(input_shape[-1],),
            initializer='one',
            name='gamma')
        super(LayerNormalization, self).build(input_shape)

    def call(self, inputs):
        mean, variance = tf.nn.moments(inputs, [-1], keepdims=True)
        normalized = (inputs - mean) / ((variance + self._epsilon) ** 0.5)
        outputs = self.gamma * normalized + self.beta
        return outputs

    def compute_output_shape(self, input_shape):
        return input_shape
        
class Transformer(Layer):

    def __init__(self, vocab_size, model_dim,
            n_heads=8, encoder_stack=6, decoder_stack=6, feed_forward_size=2048, dropout_rate=0.1, **kwargs):
        self._vocab_size = vocab_size
        self._model_dim = model_dim
        self._n_heads = n_heads
        self._encoder_stack = encoder_stack
        self._decoder_stack = decoder_stack
        self._feed_forward_size = feed_forward_size
        self._dropout_rate = dropout_rate
        super(Transformer, self).__init__(**kwargs)

    def get_config(self):
        config = super().get_config().copy()
        config.update({
            "_vocab_size": self._vocab_size,
            "_model_dim": self._model_dim,
            "_n_heads": self._n_heads,
            "_encoder_stack": self._encoder_stack,
            "_decoder_stack": self._decoder_stack,
            "_feed_forward_size": self._feed_forward_size,
            "_dropout_rate": self._dropout_rate
        })
        return config

    def build(self, input_shape):
        self.position_encodings = positional_encoding(15, self._model_dim)
        self.en_mha = []
        self.en_ln_1 = []
        self.en_pw = []
        self.en_ln_2 = []
        for i in range(self._encoder_stack):
            self.en_mha.append(MultiHeadAttention(self._n_heads, self._model_dim // self._n_heads, masking=False))
            self.en_ln_1.append(LayerNormalization())
            self.en_pw.append(PositionWiseFeedForward(self._model_dim, self._feed_forward_size))
            self.en_ln_2.append(LayerNormalization())
        self.de_mha_1 = []
        self.de_ln_1 = []
        self.de_mha_2 = []
        self.de_ln_2 = []
        self.de_pw = []
        self.de_ln_3 = []
        for i in range(self._decoder_stack):
            self.de_mha_1.append(MultiHeadAttention(self._n_heads, self._model_dim // self._n_heads, masking=False, future=True))
            self.de_ln_1.append(LayerNormalization())
            self.de_mha_2.append(MultiHeadAttention(self._n_heads, self._model_dim // self._n_heads, masking=False))
            self.de_ln_2.append(LayerNormalization())
            self.de_pw.append(PositionWiseFeedForward(self._model_dim, self._feed_forward_size))
            self.de_ln_3.append(LayerNormalization())
#        super(Transformer, self).build(input_shape)


    def encoder(self, inputs):
        # Embedings + Postion-encodings
        encodings = inputs + self.position_encodings[:, :tf.shape(inputs)[1], :]
        # Dropout
        encodings = K.dropout(encodings, self._dropout_rate)

        for i in range(self._encoder_stack):
            # Multi-head-Attention
            attention_input = [encodings, encodings, encodings]
            attention_out = self.en_mha[i](attention_input)
            # Add & Norm
            attention_out += encodings
            attention_out = self.en_ln_1[i](attention_out)
            # Feed-Forward
            ff_out = self.en_pw[i](attention_out)
            # Add & Norm
            ff_out += attention_out
            encodings = self.en_ln_2[i](ff_out)

        return encodings


    def decoder(self, inputs):
        decoder_inputs, encoder_encodings = inputs
#        # Embedings + Postion-encodings
        encodings = decoder_inputs + self.position_encodings[:, :tf.shape(decoder_inputs)[1], :]
#        # Dropout
        encodings = K.dropout(encodings, self._dropout_rate)
        
        for i in range(self._decoder_stack):
            # Masked-Multi-head-Attention
            masked_attention_input = [encodings, encodings, encodings]
            masked_attention_out = self.de_mha_1[i](masked_attention_input)
            # Add & Norm
            masked_attention_out += encodings
            masked_attention_out = self.de_ln_1[i](masked_attention_out)

            # Multi-head-Attention
            attention_input = [masked_attention_out, encoder_encodings, encoder_encodings]
            attention_out = self.de_mha_2[i](attention_input)
            # Add & Norm
            attention_out += masked_attention_out
            attention_out = self.de_ln_2[i](attention_out)

            # Feed-Forward
            ff_out = self.de_pw[i](attention_out)
            # Add & Norm
            ff_out += attention_out
            encodings = self.de_ln_3[i](ff_out)

        # Pre-Softmax 与 Embeddings 共享参数
        #linear_projection = K.dot(encodings, K.transpose(self.embeddings))
        #outputs = K.softmax(linear_projection)
        #outputs = layers.Dense(self._vocab_size, activation="softmax")(encodings)
#        print(outputs.shape)
        return encodings

    def call(self, inputs):
        encoder_inputs, decoder_inputs = inputs
#        encoder_encodings, encoder_masks = self.encoder(encoder_inputs)
#        encoder_outputs = self.decoder([decoder_inputs, encoder_encodings, encoder_masks])
        encoder_encodings = self.encoder(encoder_inputs)
        encoder_outputs = self.decoder([decoder_inputs, encoder_encodings])
        encoder_encodings = self.encoder(encoder_inputs)
        
        return encoder_outputs

    def compute_output_shape(self, input_shape):
        return  (input_shape[0][0], input_shape[1][1], self._model_dim)


def create_model_1(dynamic):
    x_en = layers.Input(shape=dynamic)
    x_de = layers.Input(shape=(None, ))
    
    #model_dim = 16
    y_en = layers.GRU(16, return_sequences=True)(x_en)
    y_de = layers.Embedding(5, 16, embeddings_initializer="glorot_uniform")(x_de)
    y_de *= 5 ** 0.5
    
    y = Transformer(5, 16)([y_en, y_de])
    y = layers.Dense(5, activation="softmax")(y)
    
    model = tf.keras.Model(inputs=[x_en, x_de], outputs=y)
    return model

def create_model_2(dynamic, out_length):
    x_en = layers.Input(shape=dynamic)
    x_de = layers.Input(shape=(out_length, 1))

    y_en = layers.GRU(16, return_sequences=True)(x_en)
    x_de_pre = y_en[:, -1, :]
    x_de_pre = layers.Dense(1, activation="softplus")(x_de_pre)
    x_de_pre = tf.expand_dims(x_de_pre, 2)
    y_de = layers.Concatenate(axis=1)([x_de_pre, x_de])
    y_de = layers.GRU(16, return_sequences=True)(y_de)

    y = Transformer(5, 16)([y_en, y_de])
    y = layers.Dense(1, activation="softplus")(y)

    model = tf.keras.Model(inputs=[x_en, x_de], outputs=y)
    return model
