import os
import tensorflow as tf
import datetime
from data_helpers import Data
from model import Img2Text
from configuration import ModelConfig, TrainingConfig

os.environ['CUDA_VISIBLE_DEVICES'] = '2'
mode = 'train'
model_config = ModelConfig()
training_config = TrainingConfig()


def train():
    data = Data()
    batches = data.batch_iter()

    model = Img2Text(mode)
    global_step = tf.Variable(0, trainable=False, name='global_step')

    learning_rate_decay_fn = None
    if training_config.learning_rate_decay_factor > 0:
        num_batches_per_epoch = (training_config.num_examples_per_epoch / model_config.batch_size)
        decay_steps = int(num_batches_per_epoch * training_config.num_epochs_per_decay)

        def _learning_rate_decay_fn(learning_rate, global_step):
            return tf.train.exponential_decay(
                learning_rate,
                global_step,
                decay_steps=decay_steps,
                decay_rate=training_config.learning_rate_decay_factor,
                staircase=True
            )

        learning_rate_decay_fn = _learning_rate_decay_fn

    train_op = tf.contrib.layers.optimize_loss(
        loss=model.loss,
        global_step=global_step,
        learning_rate=training_config.initial_learning_rete,
        optimizer=training_config.optimizer,
        clip_gradients=training_config.clip_gradients,
        learning_rate_decay_fn=learning_rate_decay_fn
    )

    # num_batches_per_epoch = (training_config.num_examples_per_epoch / model_config.batch_size)
    # decay_steps = int(num_batches_per_epoch * training_config.num_epochs_per_decay)
    # learing_rate = tf.train.exponential_decay(training_config.initial_learning_rete,
    #                                           global_step,
    #                                           decay_steps=decay_steps,
    #                                           decay_rate=training_config.learning_rate_decay_factor)
    #
    # train_op = tf.train.AdamOptimizer(learing_rate).minimize(model.loss, global_step=global_step)

    with tf.Session() as sess:
        out_dir = './model'
        print('Writing to {}\n'.format(out_dir))

        tf.summary.scalar('loss', model.loss)
        train_summary_op = tf.summary.merge_all()
        train_summary_writer = tf.summary.FileWriter(out_dir, sess.graph)

        if not os.path.exists(out_dir):
            os.makedirs(out_dir)
        checkpoint_prefix = os.path.join(out_dir, 'model')
        saver = tf.train.Saver(tf.global_variables(), max_to_keep=training_config.num_checkpoints)

        sess.run(tf.global_variables_initializer())
        for batch in batches:
            img_feature, x_batch, y_batch, seq_len = batch
            feed_dict = {
                model.input_seqs: x_batch,
                model.target_seqs: y_batch,
                model.seq_len: seq_len,
                model.keep_prob: 1.0,
                model.image_feature: img_feature
            }
            _, step, summaries, loss = sess.run([train_op, global_step, train_summary_op, model.loss],
                                                feed_dict=feed_dict)
            train_summary_writer.add_summary(summaries, global_step=step)

            if step % training_config.display_every == 0:
                time_str = datetime.datetime.now().isoformat()
                print('{}, step {}, loss {}'.format(time_str, step, loss))

            # TODO, eval display
            if step % training_config.checkpoint_every == 0:
                path = saver.save(sess, checkpoint_prefix, global_step=step)
                print('Saving model checkpoint to\n{}'.format(path))

            # TODO: learning rate decay


if __name__ == '__main__':
    train()
