#-*- coding: utf-8 -*-
import tensorflow as tf
import math
from tensorflow.contrib import layers


class EMNet(object):
    """
    Embedding Matching Net
    """
    def __init__(self, pool_size, embedding_size, num_sampled, learning_rate, attention_size, item_input_length, other_input_length):
        # model param
        self.pool_size = pool_size
        self.embedding_size = embedding_size
        self.num_sampled = num_sampled
        self.learning_rate = learning_rate
        self.attention_size = attention_size
        self.item_input_length = item_input_length
        self.other_input_length = other_input_length

        # input placeholder
        with tf.name_scope('placeholder'):
            self.input_other = tf.placeholder(tf.float32, shape=[None, self.other_input_length])
            self.input_item = tf.placeholder(tf.int64, shape=[None, self.item_input_length])
            self.input_label = tf.placeholder(tf.int64, shape=[None, 1])
            self.input_weight = tf.placeholder(tf.float32, shape=[None, self.item_input_length])

        # embedding variable
        with tf.name_scope('embedding'):
            self.nce_weight = tf.Variable(tf.truncated_normal([self.pool_size, self.embedding_size],
                                                              stddev=1.0 / math.sqrt(self.embedding_size)))
            self.nce_biases = tf.Variable(tf.zeros([self.pool_size]))

        # network building
        embed, train_weight_reshape = self.embedding_load()
        atten_embed = self.attention_layer(embed, train_weight_reshape)
        # avg_embed = self.weight_layer(embed, train_weight_reshape)
        concat_embed = self.embedding_concat(atten_embed)
        layer_out = self.mlp(concat_embed)
        out = self.multi_classifer(layer_out)

        # network out
        self.out = out

    def embedding_load(self):
        with tf.name_scope("embedding_load"):
            embed = tf.nn.embedding_lookup(self.nce_weight, self.input_item)
            # weight = 1，average
            train_weight_reshape = tf.expand_dims(tf.to_float(tf.abs(self.input_item) > 0), 2)
            # input weight
            # train_weight_reshape = tf.expand_dims(self.train_weight, 2)
        return embed, train_weight_reshape

    def weight_layer(self, input, train_weight_reshape):
        with tf.name_scope("weight_layer"):
            train_weight_num = tf.cast(tf.count_nonzero(train_weight_reshape, axis=1), tf.float32)
            weight_output = tf.div(tf.reduce_sum(tf.multiply(input, train_weight_reshape)), train_weight_num)

        return weight_output

    def attention_layer(self, inputs, train_weight_reshape):
        with tf.name_scope("attention_layer"):
            # inputs shape: [batch_size, item_input_length, embedding_size]
            # u_context: importance vector
            u_context = tf.Variable(tf.truncated_normal([self.attention_size]))

            # FC layer transform
            h = layers.fully_connected(inputs, self.attention_size, activation_fn=tf.nn.tanh)

            hu_sum = tf.reduce_sum(tf.multiply(h, u_context), axis=2, keep_dims=True)
            exp = tf.exp(hu_sum)
            exp_adapt = tf.multiply(exp, train_weight_reshape)
            exp_adapt_sum = tf.reduce_sum(exp_adapt, axis=1, keep_dims=True)
            alpha = tf.div(exp_adapt, exp_adapt_sum)

            # [batch_size, embedding_size]
            attention_output = tf.reduce_sum(tf.multiply(inputs, alpha), axis=1)

        return attention_output

    def embedding_concat(self, input):
        with tf.name_scope("embedding_concat"):
            embedding_concat = tf.concat([input, self.input_other], 1)
        return embedding_concat

    def mlp(self, embedding_concat):
        with tf.name_scope("mlp"):
            weights = {
                'h1': tf.Variable(tf.random_normal([self.embedding_size + self.other_input_length, self.embedding_size])),
                'h2': tf.Variable(tf.random_normal([self.embedding_size, self.embedding_size]))
            }
            biases = {
                'b1': tf.Variable(tf.random_normal([self.embedding_size])),
                'out': tf.Variable(tf.random_normal([self.embedding_size]))
            }
            layer_1 = tf.add(tf.matmul(embedding_concat, weights['h1']), biases['b1'])
            layer_1 = tf.nn.relu(layer_1)
            layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['out'])
            layer_out = tf.nn.relu(layer_2)
        return layer_out

    def multi_classifer(self, layer_out):
        with tf.name_scope("multi_classifer"):
            # NCE LOSS
            self.loss = tf.reduce_mean(
                tf.nn.nce_loss(
                    weights=self.nce_weight,
                    biases=self.nce_biases,
                    labels=self.input_label,
                    inputs=layer_out,
                    num_sampled=self.num_sampled,
                    num_classes=self.pool_size
                )
            )
            # minimized loss with gradient decent
            self.train_op = tf.train.GradientDescentOptimizer(learning_rate=self.learning_rate).minimize(self.loss)

            # predict output
            out = tf.nn.softmax(tf.matmul(layer_out, tf.transpose(self.nce_weight)) + self.nce_biases, dim=1)
        return out







