import os
import pickle
import tarfile
import numpy as np
import tensorflow as tf

from model import Img2Text
from scipy.misc import imread
from configuration import ExtractionConfig, ModelConfig, DataConfig

os.environ['CUDA_VISIBLE_DEVICES'] = '1'
layer_to_extract = 'pool_3:0'
extraction_config = ExtractionConfig()
model_config = ModelConfig()
data_config = DataConfig()

vocab = pickle.load(open(data_config.vocab_file, 'rb'))
vocab_size = len(vocab)
print('vocab_size', vocab_size)

inference_images_folder = './inference/'


def extract():
    """extract model file"""
    file_name = 'inception-2015-12-05.tgz'
    file_path = os.path.join(extraction_config.model_dir, file_name)
    tarfile.open(file_path, 'r:gz').extractall(extraction_config.pre_trained_folder)


def create_graph():
    """Creates a graph from saved GraphDef file and returns a saver."""
    # TODO: graphdef
    pre_trained_model_name = 'classify_image_graph_def.pb'
    with tf.gfile.FastGFile(os.path.join(extraction_config.pre_trained_folder, pre_trained_model_name), 'rb') as f:
        graph_def = tf.GraphDef()
        graph_def.ParseFromString(f.read())
        _ = tf.import_graph_def(graph_def, name='')


def extract_features(image_dir):
    if not os.path.exists(image_dir):
        print('image_dir does not exit!')
        return None
    extract()
    create_graph()
    with tf.Session() as sess:
        extract_tensor = sess.graph.get_tensor_by_name(layer_to_extract)
        print('Processing image...')

        all_image_vectors = []
        all_image_names = os.listdir(image_dir)
        for image in all_image_names:
            temp_path = os.path.join(image_dir, image)
            image_data = tf.gfile.FastGFile(temp_path, 'rb').read()

            prediction = sess.run(extract_tensor, {'DecodeJpeg/contents:0': image_data})
            prediction = np.squeeze(prediction)

            all_image_vectors.append(prediction)
        return all_image_vectors, all_image_names


def main(_):
    """Extract features for all images in image_dir.
    Args:
        config.image_dir: all images
        config.model_dir: model file
        config.sav_dir: final array
    """
    all_image_vectors, all_images_names = extract_features(inference_images_folder)
    print('vectors:', all_image_vectors)
    print('names:', all_images_names)

    batch_size = len(all_image_vectors)
    model = Img2Text(model_config,
                     'inference',
                     inference_batch=batch_size)

    def run_step(input_x, state, seq_len):
        feed_dict = {
            model.input_seqs: input_x,
            model.initial_state: state,
            model.seq_len: seq_len,
            model.keep_prob: 1.0
        }
        prob, state = sess.run([model.preds, model.final_state], feed_dict=feed_dict)
        prob = np.reshape(prob, (-1, 1))
        print('shape of prob:', np.shape(prob))
        print('prob:', prob)
        return prob, state

    init = tf.global_variables_initializer()
    with tf.Session() as sess:
        sess.run(init)

        saver = tf.train.Saver(tf.global_variables())
        ckpt_file = tf.train.latest_checkpoint('./model')
        saver.restore(sess, ckpt_file)
        print('Model restored!')

        seq_len = np.ones(batch_size)
        final_preds = []
        feed_dict = {model.image_feature: all_image_vectors,
                     model.keep_prob: 1.0}
        state = sess.run(model.initial_state, feed_dict=feed_dict)

        char_to_id = pickle.load(open(data_config.char_to_id_file, 'rb'))
        start = '<start>'
        start = [char_to_id[start]]
        start_inputs = []
        for i in range(batch_size):
            start_inputs.append(start)
        print('start_inputs:', start_inputs)

        next_word, next_state = run_step(start_inputs, state, seq_len)
        id_to_char = pickle.load(open(data_config.id_to_char_file, 'rb'))

        text = ''
        while len(text) < 50:
            text += id_to_char.get(next_word[0][0], 'unk')+' '
            print('word:', id_to_char.get(next_word[0][0], 'unk'))
            input_x = next_word
            next_word, next_state = run_step(input_x, next_state, [1])
        # text += next_word  # end_char??

        print('text:', text)

        # current_pred = np.ones((batch_size, 1))

        # for t in range(model_config.padded_length):
        #     feed_dict = {
        #         model.input_seqs: start_inputs,
        #         model.initial_state: state,
        #         model.seq_len: seq_len,
        #         model.keep_prob: 1.0
        #     }
        #     current_pred, state = sess.run([model.preds, model.final_state], feed_dict=feed_dict)
        #     current_pred = np.reshape(current_pred, (-1, 1))
        #
        #     final_preds.append(current_pred)
        #
        # captions_pred = [unpack.reshape(-1, 1) for unpack in final_preds]
        # captions_pred = np.concatenate(captions_pred, 1)
        #
        # id_to_char = pickle.load(open(data_config.id_to_char_file, 'rb'))
        # print('captions_pred:', captions_pred)
        # captions_deco = decode_captions(captions_pred, id_to_char)
        #
        # print('captions:', captions_deco)

        # saved the images with captions written on them
        # results_dir = './results/'
        # if not os.path.exists(results_dir):
        #     os.makedirs(results_dir)
        # for j in range(len(captions_deco)):
        #     this_image_name = all_images_names['file_name'].values[j]
        #     img_name = os.path.join(results_dir, this_image_name)
        #     img = imread(os.path.join(test_dir, this_image_name))
        #     write_text_on_image(img, img_name, captions_deco[j])

    # if not os.path.exists(extraction_config.img_dir):
    #     print("image_dir does not exit!")
    #     return None
    #
    # extract()
    # create_graph()
    #
    # with tf.Session() as sess:
    #     # Some useful tensors:
    #     # 'softmax:0': A tensor containing the normalized prediction across
    #     #   1000 labels.
    #     # 'pool_3:0': A tensor containing the next-to-last layer containing 2048
    #     #   float description of the image.
    #     # 'DecodeJpeg/contents:0': A tensor containing a string providing JPEG
    #     #   encoding of the image.
    #     # Runs the softmax tensor by feeding the image_data as input to the graph.
    #     image_path_to_vector = {}
    #     extract_tensor = sess.graph.get_tensor_by_name(layer_to_extract)
    #     print('Processing image...')
    #
    #     image_data = tf.gfile.FastGFile(os.path.join(inference_images_folder, 'inference.jpg'), 'rb').read()
    #
    #     image_vector = sess.run(extract_tensor, {'DecodeJpeg/contents:0': image_data})
    #     image_vector = np.squeeze(image_vector)
    #
    #     print('image vector: ', image_vector)


def decode_captions(captions, idx_to_word):
    singleton = False
    if captions.ndim == 1:
        singleton = True
        captions = captions[None]
    decoded = []
    N, T = captions.shape
    for i in range(N):
        words = []
        for t in range(T):
            word = idx_to_word[captions[i, t]]
            if word != '<NULL>':
                words.append(word)
            if word == '<END>':
                break
        decoded.append(' '.join(words))
    if singleton:
        decoded = decoded[0]
    return decoded


if __name__ == '__main__':
    tf.app.run()
