import tensorflow as tf
import numpy as np
import sys
import os
from slim.nets import vgg

slim = tf.contrib.slim

#sys.path.append(os.path.expanduser("./slim/"))

class Model():
    def __init__(self, learning_rate=0.001, batch_size=16, state_size=512, default_image_size=224):
        self.learning_rate = learning_rate
        self.batch_size = batch_size
        self.state_size = state_size
        self.default_image_size = default_image_size

    def build(self, embedding_file = None, vgg_checkpoint_path = None, is_training = True):

        self.image_x = tf.placeholder(dtype=tf.float32, shape = [self.batch_size, self.default_image_size, \
            self.default_image_size, 3], name='input_image_placeholder')
        self.caption_x = tf.placeholder(tf.int32, [None, None], name='input_caption_placeholder')
        self.has_caption = tf.placeholder(tf.bool, name='input_has_caption_placeholder')
        self.y = tf.placeholder(tf.int32, [None, None], name='labels_placeholder')

        with slim.arg_scope(vgg.vgg_arg_scope()):
            logits, end_points = vgg.vgg_16(self.image_x, num_classes = self.state_size, is_training = is_training)

        vgg_except_fc8_weights = slim.get_variables_to_restore(exclude=['vgg_16/fc8', 'adam_vars'])

        self.init_fn = slim.assign_from_checkpoint_fn(vgg_checkpoint_path, vgg_except_fc8_weights)

        embeddings = np.load(embedding_file)
        self.num_classes = len(embeddings)

        cell = tf.nn.rnn_cell.BasicLSTMCell(self.state_size)
        self.init_state = cell.zero_state(self.batch_size, dtype=tf.float32)

        image_inputs = tf.expand_dims(logits, 1)

        rnn_inputs = tf.cond(tf.equal(self.has_caption,tf.constant(True)), \
            lambda:(tf.concat([image_inputs, tf.nn.embedding_lookup(embeddings, self.caption_x)], axis = 1)), \
            lambda:(image_inputs))

        rnn_outputs, self.final_state = tf.nn.dynamic_rnn(cell, rnn_inputs, initial_state=self.init_state)

        self.global_step = tf.Variable(0, trainable=False, name='self.global_step', dtype=tf.int64)

        with tf.variable_scope('softmax'):
            W_o = tf.get_variable(name = 'W_o', shape=[self.state_size, self.num_classes], initializer=tf.random_normal_initializer(stddev=0.01))
            b_o = tf.get_variable(name = 'b_o', shape=[self.num_classes], initializer=tf.constant_initializer(0.0))

            logits = tf.reshape(tf.matmul(tf.reshape(rnn_outputs,[-1, self.state_size]), W_o) + b_o, [self.batch_size, -1, self.num_classes])
            self.predictions = tf.nn.softmax(logits)

            tf.summary.histogram('logits', logits)

            loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=self.y, logits=logits)
            self.loss = tf.reduce_mean(loss)
            tf.summary.scalar('logits_loss', self.loss)

            mean, var = tf.nn.moments(logits, -1)
            var_loss = tf.divide(10.0, 1.0+tf.reduce_mean(var))
            tf.summary.scalar('var_loss', var_loss)
            
            self.loss = self.loss + var_loss
            tf.summary.scalar('total_loss', self.loss)

            
            optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate)
            gradients, variables = zip(*optimizer.compute_gradients(loss))
            gradients, _ = tf.clip_by_global_norm(gradients, 5.0)
            self.train_step = optimizer.apply_gradients(zip(gradients, variables), global_step=self.global_step)

            tf.summary.scalar('loss', self.loss)
            self.merged_summary_op = tf.summary.merge_all()