# encoding=utf8
# -*- coding: utf-8 -*
__author__ = 'mmfu.cn@gmail.com'


import tensorflow as tf
from tensorflow.contrib.layers.python.layers import initializers
# from tensorflow.contrib.layers import xavier_initializer
import tensorflow.contrib.layers as layers
import numpy as np
import math

L2_REG = 1e-4

class Transformer(object):
    def __init__(self,config):
        self.config = config
        self.action_num_classes = config['action_num_classes']
        self.target_num_classes = config['target_num_classes']
        self.rnn_size = config['rnn_size']
        self.sequence_length = config['sequence_length']
        self.vocab_size = config['vocab_size']
        self.embed_size = config['embed_size']
        self.num_blocks = config['num_blocks']
        self.num_heads = config['num_heads']
        self.learning_rate = config['learning_rate']
        self.initializer = initializers.xavier_initializer()

        self.global_step = tf.Variable(0, trainable=False, name="Global_Step")

        self.input_x = tf.placeholder(tf.int32, [None, self.sequence_length], name="input_x")  # X
        self.input_pos = tf.placeholder(tf.int32, [None, None], name="input_pos")  # pos
        self.input_position = tf.placeholder(tf.int32, [None, self.sequence_length], name="input_position")  # X
        self.input_action = tf.placeholder(tf.int32, [None,],name="input_action")  # y:[None,input_action]
        self.input_target = tf.placeholder(tf.int32,[None,], name="input_target")  # y:[None,input_target]
        self.dropout_keep_prob=tf.placeholder(tf.float32,name="dropout_keep_prob")

        # self.batch_size = tf.shape(self.input_x)[0]
        # used = tf.sign(tf.abs(self.input_x))
        # length = tf.reduce_sum(used, reduction_indices=1)
        # self.sequence_length_real = tf.cast(length, tf.int32)

        with tf.variable_scope("encoder"):
            ## Embedding
            self.embed = self.embedding(scope='wordEmbed')

            ## posEmbed
            # self.posEmbed = self.positional_encoding(scope='posEmbed')
            self.posEmbed = positional_timing_signal(self.embed)

            self.enc = self.embed+self.posEmbed

            ## Dropout
            self.enc = layers.dropout(self.enc, keep_prob=self.dropout_keep_prob)

            ## Blocks
            for i in range(self.num_blocks):
                with tf.variable_scope("num_blocks_{}".format(i)):
                    ### Multihead Attention
                    self.enc = self.multihead_attention(self.enc,
                                                        self.enc,
                                                        self.embed_size,
                                                        self.num_heads,
                                                        scope='multihead_attention')

                    ### Feed Forward
                    self.enc = self.feedforward(self.enc, num_units=[4*self.embed_size, self.embed_size],scope='feedforward')

        self.action_logits, self.target_logits = self.logitsOutput(self.enc,scope='logits')

        with tf.variable_scope("loss"):
            self.loss = self.lossLayer(self.action_logits, self.target_logits)

        with tf.variable_scope("train_op"):
            self.opt = tf.train.AdamOptimizer(self.learning_rate)
            grads_vars = self.opt.compute_gradients(self.loss)
            capped_grads_vars = [[tf.clip_by_value(g, -self.config["clip"], self.config["clip"]), v]
                                 for g, v in grads_vars]
            self.train_op = self.opt.apply_gradients(capped_grads_vars, self.global_step)

        self.action_predictions = tf.argmax(self.action_logits, 1, name="action_predictions")
        self.target_predictions = tf.argmax(self.target_logits, 1, name="target_predictions")

        self.action_prob = tf.nn.softmax(self.action_logits, 1, name="action_prob")
        self.target_prob = tf.nn.softmax(self.target_logits, 1, name="target_prob")

        action_correct_prediction = tf.equal(tf.cast(self.action_predictions,tf.int32), self.input_action) #tf.argmax(self.logits, 1)-->[batch_size]
        self.action_accuracy =tf.reduce_mean(tf.cast(action_correct_prediction, tf.float32), name="action_Accuracy")

        target_correct_prediction = tf.equal(tf.cast(self.target_predictions,tf.int32), self.input_target)
        self.target_accuracy = tf.reduce_mean(tf.cast(target_correct_prediction, tf.float32), name="target_Accuracy")

        intent_correct_prediction = tf.logical_and(tf.equal(tf.cast(self.target_predictions,tf.int32), self.input_target),
                                                   tf.equal(tf.cast(self.action_predictions,tf.int32), self.input_action))
        self.intent_accuracy =tf.reduce_mean(tf.cast(intent_correct_prediction, tf.float32), name="intent_Accuracy")




    def embedding(self,scope=None):
        with tf.variable_scope(scope):
            self.Embedding = tf.get_variable(name="Embedding",shape=[self.vocab_size, self.embed_size],
                                             initializer=self.initializer)
            embedded_words = tf.nn.embedding_lookup(self.Embedding,self.input_x)
        return embedded_words

    def multihead_attention(self,queries,
                            keys,
                            num_units,
                            num_heads=8,
                            causality=False,
                            scope="multihead_attention"):
        with tf.variable_scope(scope):

            # Linear projections
            Q = tf.layers.dense(queries, num_units, activation=tf.nn.relu) # (N, T_q, C)
            K = tf.layers.dense(keys, num_units, activation=tf.nn.relu) # (N, T_k, C)
            V = tf.layers.dense(keys, num_units, activation=tf.nn.relu) # (N, T_k, C)

            # Split and concat
            Q_ = tf.concat(tf.split(Q, num_heads, axis=2), axis=0) # (h*N, T_q, C/h)
            K_ = tf.concat(tf.split(K, num_heads, axis=2), axis=0) # (h*N, T_k, C/h)
            V_ = tf.concat(tf.split(V, num_heads, axis=2), axis=0) # (h*N, T_k, C/h)

            # Multiplication
            outputs = tf.matmul(Q_, tf.transpose(K_, [0, 2, 1])) # (h*N, T_q, T_k)

            # Scale
            outputs = outputs / (K_.get_shape().as_list()[-1] ** 0.5)

            # Key Masking
            key_masks = tf.sign(tf.abs(tf.reduce_sum(keys, axis=-1))) # (N, T_k)
            key_masks = tf.tile(key_masks, [num_heads, 1]) # (h*N, T_k)
            key_masks = tf.tile(tf.expand_dims(key_masks, 1), [1, tf.shape(queries)[1], 1]) # (h*N, T_q, T_k)

            paddings = tf.ones_like(outputs)*(-2**32+1)
            outputs = tf.where(tf.equal(key_masks, 0), paddings, outputs) # (h*N, T_q, T_k)

            # Causality = Future blinding
            if causality:
                diag_vals = tf.ones_like(outputs[0, :, :]) # (T_q, T_k)
                tril = tf.contrib.linalg.LinearOperatorTriL(diag_vals).to_dense() # (T_q, T_k)
                masks = tf.tile(tf.expand_dims(tril, 0), [tf.shape(outputs)[0], 1, 1]) # (h*N, T_q, T_k)

                paddings = tf.ones_like(masks)*(-2**32+1)
                outputs = tf.where(tf.equal(masks, 0), paddings, outputs) # (h*N, T_q, T_k)

            # Activation
            outputs = tf.nn.softmax(outputs) # (h*N, T_q, T_k)

            # Query Masking
            query_masks = tf.sign(tf.abs(tf.reduce_sum(queries, axis=-1))) # (N, T_q)
            query_masks = tf.tile(query_masks, [num_heads, 1]) # (h*N, T_q)
            query_masks = tf.tile(tf.expand_dims(query_masks, -1), [1, 1, tf.shape(keys)[1]]) # (h*N, T_q, T_k)
            outputs *= query_masks # broadcasting. (N, T_q, C)

            # Dropouts
            outputs = layers.dropout(outputs, keep_prob=self.dropout_keep_prob)

            # Weighted sum
            outputs = tf.matmul(outputs, V_) # ( h*N, T_q, C/h)

            # Restore shape
            outputs = tf.concat(tf.split(outputs, num_heads, axis=0), axis=2 ) # (N, T_q, C)

            # Residual connection
            outputs += queries

            # Normalize
            outputs = normalize(outputs) # (N, T_q, C)

        return outputs

    def feedforward(self, inputs, num_units, scope=None):
        with tf.variable_scope(scope):
            # Inner layer
            params = {"inputs": inputs, "filters": num_units[0], "kernel_size": 1,
                      "activation": tf.nn.relu, "use_bias": True}
            outputs = tf.layers.conv1d(**params)

            # Readout layer
            params = {"inputs": outputs, "filters": num_units[1], "kernel_size": 1,
                      "activation": None, "use_bias": True}
            outputs = tf.layers.conv1d(**params)

            # Residual connection
            outputs += inputs

            # Normalize
            outputs = normalize(outputs)

        return outputs

    def logitsOutput(self, inputs, scope='logits'):
        # inputs: [batch,seqLen,None]

        with tf.variable_scope(scope):
            ## output attention
            with tf.variable_scope("action_attention"):
                attention_action_vector = tf.get_variable(name='attention_action_vector',
                                                          shape=[self.embed_size],
                                                          regularizer=layers.l2_regularizer(scale=L2_REG),
                                                          dtype=tf.float32)

                action_vector_attn = tf.reduce_sum(tf.multiply(inputs, attention_action_vector), axis=2, keep_dims=True)
                action_attention_weights = tf.nn.softmax(action_vector_attn, dim=1)
                action_weighted_projection = tf.multiply(inputs, action_attention_weights)
                action_outputs = tf.reduce_sum(action_weighted_projection, axis=1)

            with tf.variable_scope("target_attention"):
                attention_target_vector = tf.get_variable(name='attention_target_vector',
                                                          shape=[self.embed_size],
                                                          regularizer=layers.l2_regularizer(scale=L2_REG),
                                                          dtype=tf.float32)

                target_vector_attn = tf.reduce_sum(tf.multiply(inputs, attention_target_vector), axis=2, keep_dims=True)
                target_attention_weights = tf.nn.softmax(target_vector_attn, dim=1)
                target_weighted_projection = tf.multiply(inputs, target_attention_weights)
                target_outputs = tf.reduce_sum(target_weighted_projection, axis=1)

            ## output liner
            with tf.variable_scope("action_output"):

                action_outputs = layers.dropout(action_outputs,keep_prob=self.dropout_keep_prob)
                action_outputs = layers.fully_connected(inputs=action_outputs, num_outputs=self.rnn_size,
                                                        weights_regularizer=layers.l2_regularizer(scale=L2_REG))

                action_W_projection = tf.get_variable("action_W_projection",
                                                           shape=[self.rnn_size,self.action_num_classes],
                                                           initializer=self.initializer) #[embed_size,label_size]
                action_b_projection = tf.get_variable("action_b_projection",shape=[self.action_num_classes])
                action_logits = tf.matmul(action_outputs,action_W_projection) + action_b_projection

            with tf.variable_scope("target_output"):

                target_outputs = layers.dropout(target_outputs,keep_prob=self.dropout_keep_prob)
                target_outputs = layers.fully_connected(inputs=target_outputs, num_outputs=self.rnn_size,
                                                        weights_regularizer=layers.l2_regularizer(scale=L2_REG))

                target_W_projection = tf.get_variable("target_W_projection",
                                                           shape=[self.rnn_size,self.target_num_classes],
                                                           initializer=self.initializer) #[embed_size,label_size]
                target_b_projection = tf.get_variable("target_b_projection",shape=[self.target_num_classes])
                target_logits = tf.matmul(target_outputs,target_W_projection) + target_b_projection

        return action_logits,target_logits

    def lossLayer(self, action_logits, target_logits):
        with tf.variable_scope("loss"):
            action_loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=self.input_action, logits=action_logits))
            target_loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=self.input_target, logits=target_logits))

            loss_at = action_loss+target_loss
            l2_losses = tf.add_n([tf.nn.l2_loss(v) for v in tf.trainable_variables() if 'b' not in v.name]) * L2_REG
            loss = loss_at+l2_losses
        return loss




def normalize(inputs,
              epsilon = 1e-8,
              scope="ln",
              reuse=None):
    '''Applies layer normalization.

    Args:
      inputs: A tensor with 2 or more dimensions, where the first dimension has
        `batch_size`.
      epsilon: A floating number. A very small number for preventing ZeroDivision Error.
      scope: Optional scope for `variable_scope`.
      reuse: Boolean, whether to reuse the weights of a previous layer
        by the same name.

    Returns:
      A tensor with the same shape and data dtype as `inputs`.
    '''
    with tf.variable_scope(scope, reuse=reuse):
        inputs_shape = inputs.get_shape()
        params_shape = inputs_shape[-1:]

        mean, variance = tf.nn.moments(inputs, [-1], keep_dims=True)
        beta= tf.Variable(tf.zeros(params_shape))
        gamma = tf.Variable(tf.ones(params_shape))
        normalized = (inputs - mean) / ( (variance + epsilon) ** (.5) )
        outputs = gamma * normalized + beta

    return outputs

### Positional-Timing-Signal ###
def positional_timing_signal(x, min_timescale=1.0, max_timescale=1.0e4, scope=None):
    """Adds a bunch of sinusoids of different frequencies to a Tensor.

    Each channel of the input Tensor is incremented by a sinusoid of a
    different frequency and phase.

    This allows attention to learn to use absolute and relative positions.
    Timing signals should be added to some precursors of both the query and the
    memory inputs to attention.

    The use of relative position is possible because sin(x+y) and cos(x+y) can
    be experessed in terms of y, sin(x) and cos(x).

    In particular, we use a geometric sequence of timescales starting with
    min_timescale and ending with max_timescale.  The number of different
    timescales is equal to channels / 2. For each timescale, we
    generate the two sinusoidal signals sin(timestep/timescale) and
    cos(timestep/timescale).  All of these sinusoids are concatenated in
    the channels dimension.

    Args:
        x: a Tensor with shape [batch, length, channels]
        min_timescale: a float
        max_timescale: a float
    Returns:
        a Tensor the same shape as x.

    """
    with tf.name_scope(scope or "add_timing_signal"):
        length   = tf.shape(x)[1]
        channels = tf.shape(x)[2]
        position = tf.to_float(tf.range(length))
        num_timescales = channels // 2

        log_timescale_increment = (
                math.log(float(max_timescale) / float(min_timescale)) /
                (tf.to_float(num_timescales) - 1)
        )
        inv_timescales = min_timescale * tf.exp(
            tf.to_float(tf.range(num_timescales)) * -log_timescale_increment
        )

        scaled_time = (tf.expand_dims(position, 1) * tf.expand_dims(inv_timescales, 0))
        signal = tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=1)
        signal = tf.pad(signal, [[0, 0], [0, tf.mod(channels, 2)]])
        signal = tf.reshape(signal, [1, length, channels])

        return signal