# encoding=utf8
# -*- coding: utf-8 -*
__author__ = 'mmfu.cn@gmail.com'

import tensorflow as tf
from tensorflow.contrib.layers.python.layers import initializers

def Convolutional_Block(input_, filter_num, scope):
    norm = tf.random_normal_initializer(stddev=0.05)
    filter_shape1 = [3, 1, input_.get_shape()[3], filter_num]

    with tf.variable_scope(scope):
        filter_1 = tf.get_variable('filter1', filter_shape1, initializer=norm)
        conv1 = tf.nn.conv2d(input_, filter_1, strides=[1, 1, filter_shape1[1], 1], padding=
        "SAME")
        batch_normal1 = tf.layers.batch_normalization(conv1)
        batch_normal1_relu = tf.nn.relu(batch_normal1)

        filter_shape2 = [3, 1, batch_normal1_relu.get_shape()[3], filter_num]
        filter_2 = tf.get_variable('filter2', filter_shape2, initializer=norm)
        conv2 = tf.nn.conv2d(batch_normal1_relu, filter_2, strides=[1, 1, filter_shape2[1], 1], padding="SAME")
        batch_normal2 = tf.layers.batch_normalization(conv2)
        batch_normal2_relu = tf.nn.relu(batch_normal2)

        return batch_normal2_relu

def Conv(input_, filter_shape, strides, scope):
    norm = tf.random_normal_initializer(stddev=0.05)
    with tf.variable_scope(scope):
        filter_1 = tf.get_variable('filter1', filter_shape, initializer=norm)
        conv = tf.nn.conv2d(input_, filter_1, strides=strides, padding="SAME")
        batch_normal = tf.layers.batch_normalization(conv)
        return batch_normal


def linear(input, output_dim, scope=None, stddev=0.1):
    norm = tf.random_normal_initializer(stddev=stddev)
    const = tf.constant_initializer(0.0)
    with tf.variable_scope(scope or 'linear'):
        w = tf.get_variable('w', [input.get_shape()[1], output_dim], initializer=norm)
        b = tf.get_variable('b', [output_dim], initializer=const)
        l2_loss = tf.nn.l2_loss(w) + tf.nn.l2_loss(b)
        return tf.matmul(input, w) + b, l2_loss

class TextDCNN(object):
    def __init__(self,config):

        action_l2_loss = tf.constant(0.0)
        target_l2_loss = tf.constant(0.0)

        self.config = config
        self.action_num_classes = config['action_num_classes']
        self.target_num_classes = config['target_num_classes']

        self.sequence_length = config['sequence_length']
        self.vocab_size = config['vocab_size']
        self.embed_size = config['embed_size']
        # self.is_training = config['is_training']
        self.learning_rate = config['learning_rate']
        self.filter_size = config['dcnn_filter_sizes']
        self.num_filter = config['dcnn_num_filters']
        self.k_max_pool = config['dcnn_k_max_pooling']
        self.l2_lambda = config['l2_lambda']
        self.initializer = initializers.xavier_initializer()

        self.global_step = tf.Variable(0, trainable=False, name="Global_Step")

        self.input_x = tf.placeholder(tf.int32, [None, None], name="input_x")  # X
        self.input_action = tf.placeholder(tf.int32, [None,],name="input_action")  # y:[None,input_action]
        self.input_target = tf.placeholder(tf.int32,[None,], name="input_target")  # y:[None,input_target]
        self.dropout_keep_prob=tf.placeholder(tf.float32,name="dropout_keep_prob")

        self.batch_size = tf.shape(self.input_x)[0]

        with tf.variable_scope("embedding"):
            self.Embedding = tf.get_variable(name="Embedding",shape=[self.vocab_size, self.embed_size],
                                             initializer=self.initializer)

        self.embedded_words = tf.nn.embedding_lookup(self.Embedding,self.input_x)#[None,sentence_length,embed_size]
        self.sentence_embeddings_expanded=tf.expand_dims(self.embedded_words,-1) #[None,sentence_length,embed_size,1).

        with tf.variable_scope("layer-0"):
            filter_shape0 = [self.filter_size, self.embed_size, 1, self.num_filter]
            strides0 = [1, 1, self.embed_size, 1]
            self.h0 = Conv(self.sentence_embeddings_expanded, filter_shape0, strides0, 'layer_0')
        with tf.variable_scope("layer_1-9"):

            self.h1 = Convolutional_Block(self.h0, 64, 'layer_1-2')
            pooled_1 = tf.nn.max_pool(self.h1, ksize=[1, self.filter_size, 1, 1], strides=[1, 2, 1, 1], padding='SAME',
                                      name="pool1")

            self.h2 = Convolutional_Block(pooled_1, 128, 'layer_3-4')

            pooled_2 = tf.nn.max_pool(self.h2, ksize=[1, self.filter_size, 1, 1], strides=[1, 2, 1, 1], padding='SAME',
                                      name="pool2")

            self.h3 = Convolutional_Block(pooled_2, 256,  'layer_5-6')
            pooled_3 = tf.nn.max_pool(self.h3, ksize=[1, self.filter_size, 1, 1], strides=[1, 2, 1, 1],
                                      padding='SAME', name="pool3")

            self.h4 = Convolutional_Block(pooled_3, 512,  'layer_7-8')

            self.h5 = tf.transpose(self.h4, [0, 3, 2, 1])
            self.pooled = tf.nn.top_k(self.h5, k=self.k_max_pool, name='k-maxpooling')
            self.h6 = tf.reshape(self.pooled[0], (-1, 512 * self.k_max_pool))

        with tf.variable_scope("action_fc-1-2-3"):
            action_fc1_out, action_fc1_loss = linear(self.h6, 2048, scope='fc1', stddev=0.1)
            action_l2_loss += action_fc1_loss
            action_fc2_out, action_fc2_loss = linear(tf.nn.relu(action_fc1_out), 2048, scope='fc2', stddev=0.1)
            action_l2_loss += action_fc2_loss
            self.action_logits, action_fc3_loss = linear(tf.nn.relu(action_fc2_out), self.action_num_classes, scope='fc3', stddev=0.1)
            action_l2_loss += action_fc3_loss
        with tf.variable_scope("target_fc-1-2-3"):
            target_fc1_out, target_fc1_loss = linear(self.h6, 2048, scope='fc1', stddev=0.1)
            target_l2_loss += target_fc1_loss
            target_fc2_out, target_fc2_loss = linear(tf.nn.relu(target_fc1_out), 2048, scope='fc2', stddev=0.1)
            target_l2_loss += target_fc2_loss
            self.target_logits, target_fc3_loss = linear(tf.nn.relu(target_fc2_out), self.target_num_classes, scope='fc3', stddev=0.1)
            target_l2_loss += target_fc3_loss

        with tf.variable_scope("loss"):
            action_loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=self.input_action, logits=self.action_logits))
            target_loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=self.input_target, logits=self.target_logits))
            self.loss=action_loss+target_loss+action_l2_loss+target_l2_loss

        with tf.variable_scope("optimizer"):
            optimizer = config["optimizer"]
            if optimizer == "sgd":
                self.opt = tf.train.GradientDescentOptimizer(self.learning_rate)
            elif optimizer == "adam":
                self.opt = tf.train.AdamOptimizer(self.learning_rate)
            elif optimizer == "adgrad":
                self.opt = tf.train.AdagradOptimizer(self.learning_rate)
            else:
                raise KeyError

            # apply grad clip to avoid gradient explosion
        grads_vars = self.opt.compute_gradients(self.loss)
        capped_grads_vars = [[tf.clip_by_value(g, -self.config["clip"], self.config["clip"]), v]
                             for g, v in grads_vars]
        self.train_op = self.opt.apply_gradients(capped_grads_vars, self.global_step)

        self.action_predictions = tf.argmax(self.action_logits, 1, name="action_predictions")
        self.target_predictions = tf.argmax(self.target_logits, 1, name="target_predictions")

        self.action_prob = tf.nn.softmax(self.action_logits, 1, name="action_prob")
        self.target_prob = tf.nn.softmax(self.target_logits, 1, name="target_prob")

        action_correct_prediction = tf.equal(tf.cast(self.action_predictions,tf.int32), self.input_action) #tf.argmax(self.logits, 1)-->[batch_size]
        self.action_accuracy =tf.reduce_mean(tf.cast(action_correct_prediction, tf.float32), name="action_Accuracy")

        target_correct_prediction = tf.equal(tf.cast(self.target_predictions,tf.int32), self.input_target)
        self.target_accuracy =tf.reduce_mean(tf.cast(target_correct_prediction, tf.float32), name="target_Accuracy")

        intent_correct_prediction = tf.logical_and(tf.equal(tf.cast(self.target_predictions,tf.int32), self.input_target),
                                                   tf.equal(tf.cast(self.action_predictions,tf.int32), self.input_action))
        self.intent_accuracy =tf.reduce_mean(tf.cast(intent_correct_prediction, tf.float32), name="target_Accuracy")
