
import json
import logging
import os
import numpy as np

import tensorflow as tf

from flags import parse_args
from model import Model
from slim.datasets import dataset_utils

logging.basicConfig(
    format='%(asctime)s - %(levelname)s - %(filename)s:%(lineno)d - %(message)s', level=logging.DEBUG)

FLAGS, unparsed = parse_args()

embedding_file = FLAGS.embedding_dir + '/embeddings.npy'
vgg_checkpoint_path = FLAGS.checkpoint_path + '/vgg_16.ckpt'

print(embedding_file)
print(vgg_checkpoint_path)

with open(FLAGS.embedding_dir + '/dictionary.json', 'r') as f:
    char_to_ix = json.load(f,encoding='utf-8')

with open(FLAGS.embedding_dir + '/reversed_dictionary.json', 'r') as f:
    ix_to_char_temp = json.load(f,encoding='utf-8')

ix_to_char = {}
for key, val in ix_to_char_temp.items():
    ix_to_char[int(key)] = val

model = Model(learning_rate=FLAGS.learning_rate, batch_size=FLAGS.batch_size, state_size=FLAGS.state_size)
model.build(embedding_file = embedding_file, vgg_checkpoint_path=vgg_checkpoint_path, is_training=True)

def get_raw_captions(caption_arr):
    result = []
    for c in caption_arr:
        if c in char_to_ix:
            result.append(char_to_ix[c])
        else:
            result.append(char_to_ix['UNK'])

    diff = FLAGS.seq_nums - len(result)
    if diff > 0:
        for i in range(diff):
            result.append(char_to_ix['UNK'])
    else:
        return result[0:FLAGS.seq_nums]
    return result


filenames = [FLAGS.dataset_train + '/flickr_train_0000' + str(x) + '-of-00010.tfrecord' for x in range(10)]

feature = {'image/encoded': tf.FixedLenFeature([], tf.string),'image/class/label': tf.FixedLenFeature([], tf.string)}

sess_config = tf.ConfigProto()
sess_config.gpu_options.allow_growth = True

with tf.Session(config=sess_config) as sess:
    summary_string_writer = tf.summary.FileWriter(FLAGS.output_dir, sess.graph)
    
    filename_queue = tf.train.string_input_producer(filenames, num_epochs=FLAGS.num_epochs)
    reader = tf.TFRecordReader()
    _, serialized_example = reader.read(filename_queue)
    features = tf.parse_single_example(serialized_example, features=feature)
    image_tensor = tf.image.decode_jpeg(features['image/encoded'], channels=3)
    resized_images = tf.image.resize_images(image_tensor, [model.default_image_size, model.default_image_size])
    label = features['image/class/label']
    images, labels = tf.train.batch([resized_images, label], batch_size=FLAGS.batch_size, capacity=80, num_threads=5)
    
    logging.debug('begin running the init op')
    init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
    logging.debug('init global and local variables')
    sess.run(init_op)
    logging.debug('restoring the vgg weights')
    model.init_fn(sess)
    logging.debug('done running the init op')
    
    training_state = sess.run(model.init_state)
    
    saver = tf.train.Saver(max_to_keep=3)

    latest_ckp = tf.train.latest_checkpoint(FLAGS.output_dir)
    if latest_ckp:
        logging.debug('restore checkpoint {0}'.format(latest_ckp))
        saver.restore(sess, latest_ckp)

    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(coord=coord)

    for step in range(FLAGS.max_steps):
        #logging.debug('begin running the first batch')
        img_val, lbl_val = sess.run([images, labels])
            
        #logging.debug('fetched the batch data')
        caption_raw_X = []
        for label in lbl_val:
            caption_raw_X.append(get_raw_captions(label.decode().split(' ')))

        image_X = img_val
        caption_raw_X = np.array(caption_raw_X)
        caption_X = caption_raw_X[:,:-1]
        caption_Y = caption_raw_X
        feed_dict_to_use = {model.image_x : image_X, model.caption_x: caption_X, model.y:caption_Y, model.init_state:training_state, model.has_caption:True}
        #logging.debug('running the loss calculation for input data')
        gs, training_loss_, training_state, _, summary_string = sess.run([model.global_step, model.loss,\
             model.final_state, model.train_step, model.merged_summary_op],feed_dict=feed_dict_to_use)

        summary_string_writer.add_summary(summary_string, gs)
        
        if(step % 100 == 0):
            #logging.debug("step {0}".format(step))
            logging.debug("Average loss at step {0} for last 50 steps {1}".format(step, training_loss_))

        if step % 1000 == 0:
            prediction_values = sess.run([model.predictions], feed_dict=feed_dict_to_use)
            logging.debug('input data: {0}'.format(' '.join(ix_to_char[ix] for ix in caption_X[0,:])))
            char_results = np.argmax(prediction_values[0], axis=-1)
            logging.debug('predict data: {0}'.format(' '.join(ix_to_char[ix] for ix in char_results.ravel())))
            logging.debug('ground true: {0}'.format(' '.join(ix_to_char[ix] for ix in caption_Y[0,:])))
            save_path = saver.save(sess, FLAGS.output_dir + "/model.ckpt", global_step=gs)
            logging.debug('Save checkpoint to path {0}'.format(save_path))

    summary_string_writer.close()
    coord.request_stop()
    coord.join(threads)