# encoding=utf8
# -*- coding: utf-8 -*
__author__ = 'mmfu.cn@gmail.com'

import tensorflow as tf
from tensorflow.contrib.layers.python.layers import initializers
from tensorflow.contrib.layers import variance_scaling_initializer
import tensorflow.contrib.layers as layers
from tensorflow.python.ops import nn
import numpy as np

L2_REG = 1e-4

class TextRNN(object):
    def __init__(self,config):
        self.config = config
        self.action_num_classes = config['action_num_classes']
        self.target_num_classes = config['target_num_classes']
        self.rnn_size = config['rnn_size']
        self.rnn_type = config['rnn_type']
        self.rnn_layer = config['rnn_layer']
        self.rnn_attention_size = config['rnn_attention_size']

        self.sequence_length = config['sequence_length']
        self.vocab_size = config['vocab_size']
        self.embed_size = config['embed_size']
        self.pos_vocab_size = config['pos_vocab_size']
        self.pos_embed_size = config['pos_embed_size']
        self.position_embed_size = config['position_embed_size']
        # self.is_training = config['is_training']
        self.learning_rate = config['learning_rate']
        self.l2_lambda = config['l2_lambda']
        self.use_POS = config['use_POS']
        self.use_position = config['use_position']
        self.initializer = initializers.xavier_initializer()

        self.global_step = tf.Variable(0, trainable=False, name="Global_Step")

        self.input_x = tf.placeholder(tf.int32, [None, self.sequence_length], name="input_x")  # X
        self.input_pos = tf.placeholder(tf.int32, [None, None], name="input_pos")  # pos
        self.input_position = tf.placeholder(tf.int32, [None, self.sequence_length], name="input_position")  # X
        self.input_action = tf.placeholder(tf.int32, [None,],name="input_action")  # y:[None,input_action]
        self.input_target = tf.placeholder(tf.int32,[None,], name="input_target")  # y:[None,input_target]
        self.dropout_keep_prob=tf.placeholder(tf.float32,name="dropout_keep_prob")

        self.batch_size = tf.shape(self.input_x)[0]
        # used = tf.sign(tf.abs(self.input_x))
        # length = tf.reduce_sum(used, reduction_indices=1)
        # self.sequence_length = tf.cast(length, tf.int32)

        embedding = []
        with tf.variable_scope("embedding"):
            self.Embedding = tf.get_variable(name="Embedding",shape=[self.vocab_size, self.embed_size],
                                             initializer=self.initializer)
            embedded_words = tf.nn.embedding_lookup(self.Embedding,self.input_x)
            embedding.append(embedded_words)

            if self.use_POS:
                self.PosEmbedding = tf.get_variable(name="PosEmbedding",shape=[self.pos_vocab_size, self.pos_embed_size],
                                                    initializer=self.initializer)
                embedded_poses = tf.nn.embedding_lookup(self.PosEmbedding,self.input_pos)
                embedding.append(embedded_poses)

            if self.use_position:

                # First part of the PE function: sin and cos argument
                # position_enc = np.array([
                #     [pos / np.power(10000, 2.*i/self.position_embed_size) for i in range(self.position_embed_size)]
                #     for pos in range(self.sequence_length)])
                #
                # # Second part, apply the cosine to even columns and sin to odds.
                # position_enc[:, 0::2] = np.sin(position_enc[:, 0::2])  # dim 2i
                # position_enc[:, 1::2] = np.cos(position_enc[:, 1::2])  # dim 2i+1
                #
                # # Convert to a tensor
                # self.positionEmbedding = tf.convert_to_tensor(position_enc, dtype=tf.float32, name='positionEmbedding')

                self.positionEmbedding = tf.get_variable(name="positionEmbedding",shape=[self.sequence_length, self.position_embed_size],
                                                    initializer=self.initializer)

                embedded_position = tf.nn.embedding_lookup(self.positionEmbedding,self.input_position)

                # if scale:
                # embedded_position = embedded_position * self.position_embed_size**0.5

                embedding.append(embedded_position)

            embed_all = layers.dropout(tf.concat(embedding, axis=-1), self.dropout_keep_prob)

        # def get_rnn(rnn_type,rnn_size):
        #     #创建单个cell，这里需要注意的是一定要使用一个get_lstm的函数，不然直接把cell放在MultiRNNCell会出错
        #     if rnn_type=='lstm':
        #         rnn_cell = tf.contrib.rnn.LSTMCell(rnn_size, initializer=tf.random_uniform_initializer(-0.1, 0.1, seed=123))
        #     elif rnn_type=='gru':
        #         rnn_cell = tf.contrib.rnn.GRUCell(rnn_size, initializer=tf.random_uniform_initializer(-0.1, 0.1, seed=123))
        #     return rnn_cell
        # rnns = tf.contrib.rnn.MultiRNNCell([get_rnn(self.rnn_type,self.rnn_size) for _ in range(self.rnn_layer)])
        # encoder_outputs, encoder_states = tf.nn.dynamic_rnn(rnns, self.embedded_words, sequence_length=self.sequence_length,
        #                                                     dtype=tf.float32)
        # last = encoder_outputs[:, -1, :] # 取最后一个时序输出作为结果

        lstm_fw_cell=tf.contrib.rnn.BasicLSTMCell(self.rnn_size) #forward direction cell
        lstm_bw_cell=tf.contrib.rnn.BasicLSTMCell(self.rnn_size) #backward direction cell
        if self.dropout_keep_prob is not None:
            lstm_fw_cell=tf.contrib.rnn.DropoutWrapper(lstm_fw_cell,output_keep_prob=self.dropout_keep_prob)
            lstm_bw_cell=tf.contrib.rnn.DropoutWrapper(lstm_bw_cell,output_keep_prob=self.dropout_keep_prob)
        outputs,_=tf.nn.bidirectional_dynamic_rnn(lstm_fw_cell,lstm_bw_cell,embed_all,dtype=tf.float32)
        output_rnn=tf.concat(outputs,axis=2)
        if config["rnn_attention"]:
            with tf.variable_scope("action_attention"):
                attention_action_vector = tf.get_variable(name='attention_action_vector',
                                                           shape=[self.rnn_attention_size],
                                                           regularizer=layers.l2_regularizer(scale=config['l2_lambda']),
                                                           dtype=tf.float32)

                action_vector_attn = tf.reduce_sum(tf.multiply(output_rnn, attention_action_vector), axis=2, keep_dims=True)
                action_attention_weights = tf.nn.softmax(action_vector_attn, dim=1, name='action_attention_weights')
                action_weighted_projection = tf.multiply(output_rnn, action_attention_weights)
                action_outputs = tf.reduce_sum(action_weighted_projection, axis=1)

            with tf.variable_scope("target_attention"):
                attention_target_vector = tf.get_variable(name='attention_target_vector',
                                                          shape=[self.rnn_attention_size],
                                                          regularizer=layers.l2_regularizer(scale=config['l2_lambda']),
                                                          dtype=tf.float32)

                target_vector_attn = tf.reduce_sum(tf.multiply(output_rnn, attention_target_vector), axis=2, keep_dims=True)
                target_attention_weights = tf.nn.softmax(target_vector_attn, dim=1, name='target_attention_weights')
                target_weighted_projection = tf.multiply(output_rnn, target_attention_weights)
                target_outputs = tf.reduce_sum(target_weighted_projection, axis=1)
        else:
            action_outputs = tf.reduce_mean(output_rnn,axis=1)
            target_outputs = action_outputs

        with tf.variable_scope("action_output"):

            action_outputs = layers.dropout(action_outputs,keep_prob=self.dropout_keep_prob)
            action_outputs = layers.fully_connected(inputs=action_outputs, num_outputs=self.rnn_size,
                                              weights_regularizer=layers.l2_regularizer(scale=L2_REG))

            self.action_W_projection = tf.get_variable("action_W_projection",
                                                       shape=[self.rnn_size,self.action_num_classes],
                                                       initializer=self.initializer) #[embed_size,label_size]
            self.action_b_projection = tf.get_variable("action_b_projection",shape=[self.action_num_classes])
            self.action_logits = tf.matmul(action_outputs,self.action_W_projection) + self.action_b_projection

        with tf.variable_scope("target_output"):

            target_outputs = layers.dropout(target_outputs,keep_prob=self.dropout_keep_prob)
            target_outputs = layers.fully_connected(inputs=target_outputs, num_outputs=self.rnn_size,
                                                    weights_regularizer=layers.l2_regularizer(scale=L2_REG))

            self.target_W_projection = tf.get_variable("target_W_projection",
                                                       shape=[self.rnn_size,self.target_num_classes],
                                                       initializer=self.initializer) #[embed_size,label_size]
            self.target_b_projection = tf.get_variable("target_b_projection",shape=[self.target_num_classes])
            self.target_logits = tf.matmul(target_outputs,self.target_W_projection) + self.target_b_projection

        with tf.variable_scope("loss"):
            action_loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=self.input_action, logits=self.action_logits))
            target_loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=self.input_target, logits=self.target_logits))

            loss=action_loss+target_loss
            l2_losses = tf.add_n([tf.nn.l2_loss(v) for v in tf.trainable_variables() if 'b' not in v.name]) * self.l2_lambda
            self.loss=loss+l2_losses

        with tf.variable_scope("optimizer"):
            optimizer = config["optimizer"]
            if optimizer == "sgd":
                self.opt = tf.train.GradientDescentOptimizer(self.learning_rate)
            elif optimizer == "adam":
                self.opt = tf.train.AdamOptimizer(self.learning_rate)
            elif optimizer == "adgrad":
                self.opt = tf.train.AdagradOptimizer(self.learning_rate)
            else:
                raise KeyError

        grads_vars = self.opt.compute_gradients(self.loss)
        capped_grads_vars = [[tf.clip_by_value(g, -self.config["clip"], self.config["clip"]), v]
                             for g, v in grads_vars]
        self.train_op = self.opt.apply_gradients(capped_grads_vars, self.global_step)

        self.action_predictions = tf.argmax(self.action_logits, 1, name="action_predictions")
        self.target_predictions = tf.argmax(self.target_logits, 1, name="target_predictions")

        self.action_prob = tf.nn.softmax(self.action_logits, 1, name="action_prob")
        self.target_prob = tf.nn.softmax(self.target_logits, 1, name="target_prob")

        action_correct_prediction = tf.equal(tf.cast(self.action_predictions,tf.int32), self.input_action) #tf.argmax(self.logits, 1)-->[batch_size]
        self.action_accuracy =tf.reduce_mean(tf.cast(action_correct_prediction, tf.float32), name="action_Accuracy")

        target_correct_prediction = tf.equal(tf.cast(self.target_predictions,tf.int32), self.input_target)
        self.target_accuracy = tf.reduce_mean(tf.cast(target_correct_prediction, tf.float32), name="target_Accuracy")

        intent_correct_prediction = tf.logical_and(tf.equal(tf.cast(self.target_predictions,tf.int32), self.input_target),
                                                   tf.equal(tf.cast(self.action_predictions,tf.int32), self.input_action))
        self.intent_accuracy =tf.reduce_mean(tf.cast(intent_correct_prediction, tf.float32), name="intent_Accuracy")


def positional_encoding(inputs,
                        num_units,
                        zero_pad=True,
                        scale=True,
                        scope="positional_encoding",
                        reuse=None):
    '''Sinusoidal Positional_Encoding.
    Args:
      inputs: A 2d Tensor with shape of (N, T).
      num_units: Output dimensionality
      zero_pad: Boolean. If True, all the values of the first row (id = 0) should be constant zero
      scale: Boolean. If True, the output will be multiplied by sqrt num_units(check details from paper)
      scope: Optional scope for `variable_scope`.
      reuse: Boolean, whether to reuse the weights of a previous layer
        by the same name.
    Returns:
        A 'Tensor' with one more rank than inputs's, with the dimensionality should be 'num_units'
    '''

    N, T = inputs.get_shape().as_list()
    with tf.variable_scope(scope, reuse=reuse):
        position_ind = tf.tile(tf.expand_dims(tf.range(T), 0), [N, 1])

        # First part of the PE function: sin and cos argument
        position_enc = np.array([
            [pos / np.power(10000, 2.*i/num_units) for i in range(num_units)]
            for pos in range(T)])

        # Second part, apply the cosine to even columns and sin to odds.
        position_enc[:, 0::2] = np.sin(position_enc[:, 0::2])  # dim 2i
        position_enc[:, 1::2] = np.cos(position_enc[:, 1::2])  # dim 2i+1

        # Convert to a tensor
        lookup_table = tf.convert_to_tensor(position_enc, dtype=tf.float32)

        if zero_pad:
            lookup_table = tf.concat((tf.zeros(shape=[1, num_units]),
                                      lookup_table[1:, :]), 0)
        outputs = tf.nn.embedding_lookup(lookup_table, position_ind)

        if scale:
            outputs = outputs * num_units**0.5

        return outputs