import os
import pickle
import numpy as np
import tensorflow as tf

from model import Img2Text
from configuration import ExtractionConfig, ModelConfig, DataConfig

os.environ['CUDA_VISIBLE_DEVICES'] = '1'
layer_to_extract = 'pool_3:0'
extraction_config = ExtractionConfig()
model_config = ModelConfig()
data_config = DataConfig()

vocab = pickle.load(open(data_config.vocab_file, 'rb'))
vocab_size = len(vocab)
# print('vocab_size', vocab_size)

inference_images_folder = './inference/'


def extract_features(image_dir):
    if not os.path.exists(image_dir):
        print('image_dir does not exit!')
        return None

    pre_trained_model_name = 'classify_image_graph_def.pb'
    with tf.gfile.FastGFile(os.path.join(extraction_config.pre_trained_folder, pre_trained_model_name), 'rb') as f:
        graph_def = tf.GraphDef()
        graph_def.ParseFromString(f.read())
        _ = tf.import_graph_def(graph_def, name='')

    with tf.Session() as sess:
        extract_tensor = sess.graph.get_tensor_by_name(layer_to_extract)
        print('Processing image...')

        all_image_vectors = []
        all_image_names = os.listdir(image_dir)
        for image in all_image_names:
            temp_path = os.path.join(image_dir, image)
            image_data = tf.gfile.FastGFile(temp_path, 'rb').read()

            prediction = sess.run(extract_tensor, {'DecodeJpeg/contents:0': image_data})
            prediction = np.squeeze(prediction)

            all_image_vectors.append(prediction)
        return all_image_vectors, all_image_names


def main(_):
    all_image_vectors, all_images_names = extract_features(inference_images_folder)

    batch_size = len(all_image_vectors)
    model = Img2Text('inference',
                     inference_batch=batch_size)

    # 执行一步
    def run_step(input_x, state, seq_len):
        feed_dict = {
            model.input_seqs: input_x,
            model.initial_state: state,
            model.seq_len: seq_len,
            model.keep_prob: 1.0
        }
        prob, state = sess.run([model.preds, model.final_state], feed_dict=feed_dict)
        prob = np.reshape(prob, (-1, 1))
        return prob, state

    init = tf.global_variables_initializer()
    with tf.Session() as sess:
        sess.run(init)

        saver = tf.train.Saver(tf.global_variables())
        ckpt_file = tf.train.latest_checkpoint('./model')
        saver.restore(sess, ckpt_file)
        print('Model restored!')

        seq_len = np.ones(batch_size)
        feed_dict = {model.image_feature: all_image_vectors,
                     model.keep_prob: 1.0}
        state = sess.run(model.initial_state, feed_dict=feed_dict)

        char_to_id = pickle.load(open(data_config.char_to_id_file, 'rb'))
        start = '<start>'
        start_id = [char_to_id[start]]
        start_inputs = []
        for i in range(batch_size):
            start_inputs.append(start_id)

        next_word, next_state = run_step(start_inputs, state, seq_len)
        id_to_char = pickle.load(open(data_config.id_to_char_file, 'rb'))

        all_texts = []
        for i in range(batch_size):
            all_texts.append('')

        while len(all_texts[0]) < 70:
            for i in range(batch_size):
                text_temp = id_to_char.get(next_word[i][0], 'unk')
                all_texts[i] += text_temp + ' '
                # print('word:', text_temp)
            input_x = next_word
            next_word, next_state = run_step(input_x, next_state, seq_len)

        # 处理结尾字符
        for i in range(batch_size):
            all_texts[i] = all_texts[i].split('<end>')[0]

        print('image name && generated caption: \n')
        for i in range(len(all_images_names)):
            print('{}th -- {} -- {}'.format(i + 1, all_images_names[i], all_texts[i]))


        # text = ''
        # while len(text) < 50:
        #     text += id_to_char.get(next_word[0][0], 'unk') + ' '
        #     print('word:', id_to_char.get(next_word[0][0], 'unk'))
        #     input_x = next_word
        #     next_word, next_state = run_step(input_x, next_state, [1])
        # # text += next_word  # end_char??
        #
        # print('text:', text)


if __name__ == '__main__':
    tf.app.run()
