# -*- coding: utf-8 -*-
# @Author: lidongdong
# @time  : 19-1-24 上午10:07
# @file  : beam_search_inference.py

from model import CaptionModel
from data_loader import CocoLoader
from data_utils import *
import argparse
import tensorflow as tf


parser = argparse.ArgumentParser()
# model parameters
parser.add_argument("--vocab_size", default=10000, type=int)
parser.add_argument("--embed_dim", default=300, type=int)
parser.add_argument("--vocab_file", default="../data/vocab.json", type=str)
parser.add_argument("--train_image_file", default="../data/train.h5", type=str)
parser.add_argument("--val_image_file", default="../data/val.h5", type=str)
parser.add_argument("--train_caption_file", default="../data/train.json", type=str)
parser.add_argument("--val_caption_file", default="../data/val.json", type=str)

# training parameters
parser.add_argument("--epochs", default=100, type=int)
parser.add_argument("--learning_rate", default=1e-4, type=float)
parser.add_argument("--learning_decay", default=0.99, type=float)
parser.add_argument("--epochs_per_decay", default=1, type=int)
parser.add_argument("--epochs_per_val", default=1, type=int)
parser.add_argument("--epochs_per_save", default=10, type=int)


def main():
    args = parser.parse_args()

    # load data
    val_datast = CocoLoader(args.val_image_file, args.val_caption_file, mode="val")

    vocab = load_vocab(vocab_size=args.vocab_size, vocab_file=args.vocab_file)
    word2index = [[x[0] for x in vocab.items()], [x[1] for x in vocab.items()]]
    index2word = [word2index[1], word2index[0]]

    session = tf.InteractiveSession()
    print "[*] constructing model"      # inference 阶段使用存储在checkpoint中的MutableHashTable
    model = CaptionModel(args.vocab_size, args.embed_dim, None, word2index, index2word, mode="infer", beam_width=4)

    saver = tf.train.Saver()
    saver.restore(session, "../checkpoints/mscoco-caption-0")
    # placeholder
    caption_lens = model.caption_lens           # [N,]
    caption_targets = model.caption_targets     # [N, None(sequence_length)]
    image_inputs = model.image_inputs           # [N, H, W, 3]  denotes image inputs

    # outputs
    final_outputs = tf.cast(model.predicted_ids, dtype=tf.int64)   # [N, beam_size, None(sequence_len)]

    predict_words = model.index2word.lookup(final_outputs)

    valfile = open("val.txt", "w")

    index = 0
    try:
        while True:
            images, captions, lens = val_datast.next()
            gen_captions = session.run(predict_words, feed_dict={
                caption_lens: lens,
                image_inputs: images,
                caption_targets: captions
            })

            # load the first line
            gen_captions = gen_captions[:, :, 0]
            # batch_size, sequence_length,
            gen_captions = gen_captions.tolist()
            for gen_caption in gen_captions:
                valfile.write(str(index))
                index += 1
                valfile.write("\n")
                for cap in gen_caption:
                    valfile.write(cap)
                    valfile.write(" ")
                valfile.write("\n")

    except StopIteration:
        print "inference done"
        valfile.close()


if __name__ == '__main__':
    main()
