import tensorflow as tf
from configuration import ModelConfig


class Img2Text:
    def __init__(self, mode, inference_batch=None):
        config = ModelConfig()
        initializer = tf.random_uniform_initializer(
            minval=-config.initializer_scale,
            maxval=config.initializer_scale
        )

        with tf.name_scope('input'):
            self.input_seqs = tf.placeholder(tf.int32, [None, None], name='input_seqs')
            self.target_seqs = tf.placeholder(tf.int32, [None, None], name='target_seqs')
            self.keep_prob = tf.placeholder(tf.float32, name='keep_prob')
            self.seq_len = tf.placeholder(tf.int32, [None], name='seq_len')
            self.image_feature = tf.placeholder(tf.float32, [None, config.image_feature_size], name='image_feature')

        with tf.variable_scope('seq_embedding'):
            embedding_map = tf.get_variable(name='embedding_map', shape=[config.vocab_size, config.embedding_size],
                                            initializer=initializer)
            self.seq_embedding = tf.nn.embedding_lookup(embedding_map, self.input_seqs)

        cell = tf.nn.rnn_cell.LSTMCell(config.num_lstm_units, state_is_tuple=True)
        cell = tf.nn.rnn_cell.DropoutWrapper(cell, input_keep_prob=self.keep_prob, output_keep_prob=self.keep_prob)

        with tf.variable_scope('lstm', initializer=initializer) as lstm_scope:
            if mode == 'train':
                zero_state = cell.zero_state(batch_size=config.batch_size, dtype=tf.float32)
            elif mode == 'inference':
                zero_state = cell.zero_state(batch_size=inference_batch, dtype=tf.float32)

            with tf.variable_scope('image_embeddings'):
                self.image_embedding = tf.contrib.layers.fully_connected(
                    inputs=self.image_feature,
                    num_outputs=config.embedding_size,
                    activation_fn=None,
                    weights_initializer=initializer,
                    biases_initializer=None
                )

            _, self.initial_state = cell(self.image_embedding, zero_state)
            lstm_scope.reuse_variables()

            self.outputs, self.final_state = tf.nn.dynamic_rnn(cell,
                                                               inputs=self.seq_embedding,
                                                               sequence_length=self.seq_len,
                                                               initial_state=self.initial_state,
                                                               dtype=tf.float32,
                                                               scope=lstm_scope)
            lstm_outputs = tf.reshape(self.outputs, [-1, cell.output_size])

        with tf.variable_scope('logtis'):
            W = tf.get_variable('W', [cell.output_size, config.vocab_size], initializer=initializer)
            b = tf.get_variable('b', [config.vocab_size], initializer=tf.constant_initializer(0.0))
            self.logits = tf.matmul(lstm_outputs, W) + b

        # for inference & validation only
        softmax = tf.nn.softmax(self.logits)
        self.preds = tf.argmax(softmax, 1)

        # for training only
        targets = tf.reshape(self.target_seqs, [-1])

        with tf.name_scope('loss'):
            losses = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=targets,
                                                                    logits=self.logits)
            self.loss = tf.reduce_mean(losses)
