# encoding=utf8
# -*- coding: utf-8 -*
__author__ = 'mmfu.cn@gmail.com'

import tensorflow as tf
from tensorflow.contrib.layers.python.layers import initializers
from tensorflow.contrib.layers import variance_scaling_initializer
import tensorflow.contrib.layers as layers

class TextSWEM(object):
    def __init__(self,config):
        self.config = config
        self.action_num_classes = config['action_num_classes']
        self.target_num_classes = config['target_num_classes']

        self.sequence_length = config['sequence_length']
        self.vocab_size = config['vocab_size']
        self.embed_size = config['embed_size']
        # self.is_training = config['is_training']
        self.learning_rate = config['learning_rate']
        self.l2_lambda = config['l2_lambda']
        self.initializer = initializers.xavier_initializer()

        self.global_step = tf.Variable(0, trainable=False, name="Global_Step")

        self.input_x = tf.placeholder(tf.int32, [None, self.sequence_length], name="input_x")  # X
        # self.input_x_mask = tf.placeholder(tf.int32, [None, self.sequence_length], name="input_x_mask")  # X_mask
        self.input_action = tf.placeholder(tf.int32, [None,],name="input_action")  # y:[None,input_action]
        self.input_target = tf.placeholder(tf.int32,[None,], name="input_target")  # y:[None,input_target]
        self.dropout_keep_prob=tf.placeholder(tf.float32,name="dropout_keep_prob")

        self.batch_size = tf.shape(self.input_x)[0]
        self.input_x_mask = tf.sign(tf.abs(self.input_x))

        with tf.variable_scope("embedding"):
            self.Embedding = tf.get_variable(name="Embedding",shape=[self.vocab_size, self.embed_size],
                                             initializer=self.initializer,dtype=tf.float32)
        embedded_words = tf.nn.embedding_lookup(self.Embedding,self.input_x)

        x_emb = tf.expand_dims(embedded_words, 3)  # batch L emb 1
        x_emb = tf.nn.dropout(x_emb, self.dropout_keep_prob)   # batch L emb 1

        x_mask = tf.expand_dims(self.input_x_mask, axis=-1)
        x_mask = tf.expand_dims(x_mask, axis=-1)  # batch L 1 1
        x_mask = tf.cast(x_mask,dtype=tf.float32)

        x_sum = tf.multiply(x_emb, x_mask)  # batch L emb 1
        H_enc = tf.reduce_sum(x_sum, axis=1, keep_dims=True)  # batch 1 emb 1
        H_enc = tf.squeeze(H_enc,[1,3])  # batch emb
        x_mask_sum = tf.reduce_sum(x_mask, axis=1, keep_dims=True)  # batch 1 1 1
        x_mask_sum = tf.squeeze(x_mask_sum, [2, 3])  # batch 1
        H_enc_1 = H_enc / x_mask_sum  # batch emb

        H_enc_2 = tf.nn.max_pool(x_emb, [1, self.sequence_length, 1, 1], [1, 1, 1, 1], 'VALID')
        H_enc_2 = tf.squeeze(H_enc_2, [1,3])
        print(H_enc_2.get_shape())
        H_enc = tf.concat([H_enc_1, H_enc_2], 1)

        # H_enc = tf.squeeze(H_enc)
        biasInit = tf.constant_initializer(0.001, dtype=tf.float32)
        print(H_enc.get_shape())
        # tf.nn.dropout(H_enc, keep_prob=self.dropout_keep_prob)
        H_dis = layers.fully_connected(H_enc, num_outputs=self.embed_size,
                                       biases_initializer=biasInit, activation_fn=tf.nn.relu)

        with tf.variable_scope("action_output"):
            self.action_logits = layers.linear(tf.nn.dropout(H_dis, keep_prob=self.dropout_keep_prob),
                                               num_outputs=self.action_num_classes, biases_initializer=biasInit)
            # self.action_W_projection = tf.get_variable("action_W_projection",
            #                                            shape=[self.embed_size,self.action_num_classes],
            #                                            initializer=self.initializer) #[embed_size,label_size]
            # self.action_b_projection = tf.get_variable("action_b_projection",shape=[self.action_num_classes])
            # self.action_logits = tf.matmul(self.sentence_embeddings,self.action_W_projection) + self.action_b_projection

        with tf.variable_scope("target_output"):
            self.target_logits = layers.linear(tf.nn.dropout(H_dis, keep_prob=self.dropout_keep_prob),
                                               num_outputs=self.target_num_classes, biases_initializer=biasInit)
            # self.target_W_projection = tf.get_variable("target_W_projection",
            #                                            shape=[self.embed_size,self.target_num_classes],
            #                                            initializer=self.initializer) #[embed_size,label_size]
            # self.target_b_projection = tf.get_variable("target_b_projection",shape=[self.target_num_classes])
            # self.target_logits = tf.matmul(self.sentence_embeddings,self.target_W_projection) + self.target_b_projection

        with tf.variable_scope("loss"):
            action_loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=self.input_action, logits=self.action_logits))
            target_loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=self.input_target, logits=self.target_logits))

            loss=action_loss+target_loss
            self.loss=loss

        with tf.variable_scope("optimizer"):
            optimizer = config["optimizer"]
            if optimizer == "sgd":
                self.opt = tf.train.GradientDescentOptimizer(self.learning_rate)
            elif optimizer == "adam":
                self.opt = tf.train.AdamOptimizer(self.learning_rate)
            elif optimizer == "adgrad":
                self.opt = tf.train.AdagradOptimizer(self.learning_rate)
            else:
                raise KeyError

        grads_vars = self.opt.compute_gradients(self.loss)
        capped_grads_vars = [[tf.clip_by_value(g, -self.config["clip"], self.config["clip"]), v]
                             for g, v in grads_vars]
        self.train_op = self.opt.apply_gradients(capped_grads_vars, self.global_step)

        self.action_predictions = tf.argmax(self.action_logits, 1, name="action_predictions")
        self.target_predictions = tf.argmax(self.target_logits, 1, name="target_predictions")

        self.action_prob = tf.nn.softmax(self.action_logits, 1, name="action_prob")
        self.target_prob = tf.nn.softmax(self.target_logits, 1, name="target_prob")

        action_correct_prediction = tf.equal(tf.cast(self.action_predictions,tf.int32), self.input_action) #tf.argmax(self.logits, 1)-->[batch_size]
        self.action_accuracy =tf.reduce_mean(tf.cast(action_correct_prediction, tf.float32), name="action_Accuracy")

        target_correct_prediction = tf.equal(tf.cast(self.target_predictions,tf.int32), self.input_target)
        self.target_accuracy = tf.reduce_mean(tf.cast(target_correct_prediction, tf.float32), name="target_Accuracy")

        intent_correct_prediction = tf.logical_and(tf.equal(tf.cast(self.target_predictions,tf.int32), self.input_target),
                                                   tf.equal(tf.cast(self.action_predictions,tf.int32), self.input_action))
        self.intent_accuracy =tf.reduce_mean(tf.cast(intent_correct_prediction, tf.float32), name="intent_Accuracy")
