# encoding=utf8
# -*- coding: utf-8 -*
__author__ = 'mmfu.cn@gmail.com'

import tensorflow as tf
from tensorflow.contrib.layers.python.layers import initializers
from tensorflow.contrib.layers import variance_scaling_initializer
import tensorflow.contrib.layers as layers

class TextRNN(object):
    def __init__(self,config):
        self.config = config
        self.action_num_classes = config['action_num_classes']
        self.target_num_classes = config['target_num_classes']
        self.rnn_size = config['rnn_size']
        self.rnn_type = config['rnn_type']
        self.rnn_layer = config['rnn_layer']
        self.rnn_attention_size = config['rnn_attention_size']

        # self.sequence_length = config['sequence_length']
        self.vocab_size = config['vocab_size']
        self.embed_size = config['embed_size']
        # self.is_training = config['is_training']
        self.learning_rate = config['learning_rate']
        self.l2_lambda = config['l2_lambda']
        self.initializer = initializers.xavier_initializer()

        self.global_step = tf.Variable(0, trainable=False, name="Global_Step")

        self.input_x = tf.placeholder(tf.int32, [None, None], name="input_x")  # X
        self.input_action = tf.placeholder(tf.int32, [None,],name="input_action")  # y:[None,input_action]
        self.input_target = tf.placeholder(tf.int32,[None,], name="input_target")  # y:[None,input_target]
        self.dropout_keep_prob=tf.placeholder(tf.float32,name="dropout_keep_prob")

        self.batch_size = tf.shape(self.input_x)[0]
        used = tf.sign(tf.abs(self.input_x))
        length = tf.reduce_sum(used, reduction_indices=1)
        self.sequence_length = tf.cast(length, tf.int32)

        with tf.variable_scope("embedding"):
            self.Embedding = tf.get_variable(name="Embedding",shape=[self.vocab_size, self.embed_size],
                                             initializer=self.initializer)
        self.embedded_words = tf.nn.embedding_lookup(self.Embedding,self.input_x)

        lstm_fw_cell=tf.contrib.rnn.BasicLSTMCell(self.rnn_size) #forward direction cell
        lstm_bw_cell=tf.contrib.rnn.BasicLSTMCell(self.rnn_size) #backward direction cell
        if self.dropout_keep_prob is not None:
            lstm_fw_cell=tf.contrib.rnn.DropoutWrapper(lstm_fw_cell,output_keep_prob=self.dropout_keep_prob)
            lstm_bw_cell=tf.contrib.rnn.DropoutWrapper(lstm_bw_cell,output_keep_prob=self.dropout_keep_prob)
        outputs,_=tf.nn.bidirectional_dynamic_rnn(lstm_fw_cell,lstm_bw_cell,self.embedded_words,dtype=tf.float32)
        output_rnn=tf.concat(outputs,axis=2)
        if config["rnn_attention"]:
            with tf.variable_scope("target_attention"):
                attention_target_vector = tf.get_variable(name='attention_target_vector',
                                                          shape=[self.rnn_attention_size],
                                                          regularizer=layers.l2_regularizer(scale=config['l2_lambda']),
                                                          dtype=tf.float32)

                target_vector_attn = tf.reduce_sum(tf.multiply(output_rnn, attention_target_vector), axis=2, keep_dims=True)
                target_attention_weights = tf.nn.softmax(target_vector_attn, dim=1)
                target_weighted_projection = tf.multiply(output_rnn, target_attention_weights)
                target_outputs = tf.reduce_sum(target_weighted_projection, axis=1)
        else:
            action_outputs = tf.reduce_mean(output_rnn,axis=1)
            target_outputs = action_outputs

        with tf.variable_scope("target_output"):
            self.target_W_projection = tf.get_variable("target_W_projection",
                                                       shape=[self.rnn_size*2,self.target_num_classes],
                                                       initializer=self.initializer) #[embed_size,label_size]
            self.target_b_projection = tf.get_variable("target_b_projection",shape=[self.target_num_classes])
            self.target_logits = tf.matmul(target_outputs,self.target_W_projection) + self.target_b_projection

        with tf.variable_scope("loss"):
            target_loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=self.input_target, logits=self.target_logits))

            loss=target_loss
            l2_losses = tf.add_n([tf.nn.l2_loss(v) for v in tf.trainable_variables() if 'b' not in v.name]) * self.l2_lambda
            self.loss=loss+l2_losses

        with tf.variable_scope("optimizer"):
            optimizer = config["optimizer"]
            if optimizer == "sgd":
                self.opt = tf.train.GradientDescentOptimizer(self.learning_rate)
            elif optimizer == "adam":
                self.opt = tf.train.AdamOptimizer(self.learning_rate)
            elif optimizer == "adgrad":
                self.opt = tf.train.AdagradOptimizer(self.learning_rate)
            else:
                raise KeyError

        grads_vars = self.opt.compute_gradients(self.loss)
        capped_grads_vars = [[tf.clip_by_value(g, -self.config["clip"], self.config["clip"]), v]
                             for g, v in grads_vars]
        self.train_op = self.opt.apply_gradients(capped_grads_vars, self.global_step)

        self.target_predictions = tf.argmax(self.target_logits, 1, name="target_predictions")

        self.target_prob = tf.nn.softmax(self.target_logits, 1, name="target_prob")

        target_correct_prediction = tf.equal(tf.cast(self.target_predictions,tf.int32), self.input_target)
        self.target_accuracy = tf.reduce_mean(tf.cast(target_correct_prediction, tf.float32), name="target_Accuracy")

