# encoding=utf8
# -*- coding: utf-8 -*
__author__ = 'mmfu.cn@gmail.com'

import tensorflow as tf
from tensorflow.contrib.layers.python.layers import initializers
from tensorflow.contrib.layers import variance_scaling_initializer
import tensorflow.contrib.layers as layers

# embeding average

class TextEA(object):
    def __init__(self,config):
        self.config = config
        self.action_num_classes = config['action_num_classes']
        self.target_num_classes = config['target_num_classes']

        # self.sequence_length = config['sequence_length']
        self.vocab_size = config['vocab_size']
        self.embed_size = config['embed_size']
        # self.is_training = config['is_training']
        self.learning_rate = config['learning_rate']
        self.l2_lambda = config['l2_lambda']
        self.initializer = initializers.xavier_initializer()

        self.global_step = tf.Variable(0, trainable=False, name="Global_Step")

        self.input_x = tf.placeholder(tf.int32, [None, None], name="input_x")  # X
        self.input_action = tf.placeholder(tf.int32, [None,],name="input_action")  # y:[None,input_action]
        self.input_target = tf.placeholder(tf.int32,[None,], name="input_target")  # y:[None,input_target]
        self.dropout_keep_prob=tf.placeholder(tf.float32,name="dropout_keep_prob")

        self.batch_size = tf.shape(self.input_x)[0]
        # used = tf.sign(tf.abs(self.input_x))
        # length = tf.reduce_sum(used, reduction_indices=1)
        # self.sequence_length = tf.cast(length, tf.int32)

        with tf.variable_scope("embedding"):
            self.Embedding = tf.get_variable(name="Embedding",shape=[self.vocab_size, self.embed_size],
                                             initializer=self.initializer)
        self.embedded_words = tf.nn.embedding_lookup(self.Embedding,self.input_x)
        self.embedded_words = tf.nn.dropout(self.embedded_words, self.dropout_keep_prob)

        with tf.variable_scope("action_attention"):
            attention_action_vector = tf.get_variable(name='attention_action_vector',
                                                      shape=[self.embed_size],
                                                      regularizer=layers.l2_regularizer(scale=config['l2_lambda']),
                                                      dtype=tf.float32)

            action_vector_attn = tf.reduce_sum(tf.multiply(self.embedded_words, attention_action_vector), axis=2, keep_dims=True)
            action_attention_weights = tf.nn.softmax(action_vector_attn, dim=1)
            action_weighted_projection = tf.multiply(self.embedded_words, action_attention_weights)
            action_outputs = tf.reduce_sum(action_weighted_projection, axis=1)

        with tf.variable_scope("target_attention"):
            attention_target_vector = tf.get_variable(name='attention_target_vector',
                                                      shape=[self.embed_size],
                                                      regularizer=layers.l2_regularizer(scale=config['l2_lambda']),
                                                      dtype=tf.float32)

            target_vector_attn = tf.reduce_sum(tf.multiply(self.embedded_words, attention_target_vector), axis=2, keep_dims=True)
            target_attention_weights = tf.nn.softmax(target_vector_attn, dim=1)
            target_weighted_projection = tf.multiply(self.embedded_words, target_attention_weights)
            target_outputs = tf.reduce_sum(target_weighted_projection, axis=1)

        # self.sentence_embeddings = tf.reduce_mean(self.embedded_words, axis=1)

        with tf.variable_scope("action_output"):
            self.action_W_projection = tf.get_variable("action_W_projection",
                                                       shape=[self.embed_size,self.action_num_classes],
                                                       initializer=self.initializer) #[embed_size,label_size]
            self.action_b_projection = tf.get_variable("action_b_projection",shape=[self.action_num_classes])
            self.action_logits = tf.matmul(action_outputs,self.action_W_projection) + self.action_b_projection

        with tf.variable_scope("target_output"):
            self.target_W_projection = tf.get_variable("target_W_projection",
                                                       shape=[self.embed_size,self.target_num_classes],
                                                       initializer=self.initializer) #[embed_size,label_size]
            self.target_b_projection = tf.get_variable("target_b_projection",shape=[self.target_num_classes])
            self.target_logits = tf.matmul(target_outputs,self.target_W_projection) + self.target_b_projection

        with tf.variable_scope("loss"):
            action_loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=self.input_action, logits=self.action_logits))
            target_loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=self.input_target, logits=self.target_logits))

            loss=action_loss+target_loss
            # l2_losses = tf.add_n([tf.nn.l2_loss(v) for v in tf.trainable_variables() if 'bias' not in v.name]) * self.l2_lambda
            self.loss=loss

        with tf.variable_scope("optimizer"):
            optimizer = config["optimizer"]
            if optimizer == "sgd":
                self.opt = tf.train.GradientDescentOptimizer(self.learning_rate)
            elif optimizer == "adam":
                self.opt = tf.train.AdamOptimizer(self.learning_rate)
            elif optimizer == "adgrad":
                self.opt = tf.train.AdagradOptimizer(self.learning_rate)
            else:
                raise KeyError

        grads_vars = self.opt.compute_gradients(self.loss)
        capped_grads_vars = [[tf.clip_by_value(g, -self.config["clip"], self.config["clip"]), v]
                             for g, v in grads_vars]
        self.train_op = self.opt.apply_gradients(capped_grads_vars, self.global_step)

        self.action_predictions = tf.argmax(self.action_logits, 1, name="action_predictions")
        self.target_predictions = tf.argmax(self.target_logits, 1, name="target_predictions")

        self.action_prob = tf.nn.softmax(self.action_logits, 1, name="action_prob")
        self.target_prob = tf.nn.softmax(self.target_logits, 1, name="target_prob")

        action_correct_prediction = tf.equal(tf.cast(self.action_predictions,tf.int32), self.input_action) #tf.argmax(self.logits, 1)-->[batch_size]
        self.action_accuracy =tf.reduce_mean(tf.cast(action_correct_prediction, tf.float32), name="action_Accuracy")

        target_correct_prediction = tf.equal(tf.cast(self.target_predictions,tf.int32), self.input_target)
        self.target_accuracy = tf.reduce_mean(tf.cast(target_correct_prediction, tf.float32), name="target_Accuracy")

        intent_correct_prediction = tf.logical_and(tf.equal(tf.cast(self.target_predictions,tf.int32), self.input_target),
                                                   tf.equal(tf.cast(self.action_predictions,tf.int32), self.input_action))
        self.intent_accuracy =tf.reduce_mean(tf.cast(intent_correct_prediction, tf.float32), name="intent_Accuracy")
