# -*- coding: utf-8 -*-
# @Author: lidongdong
# @time  : 19-1-24 上午10:07
# @file  : train.py

import argparse
from data_loader import *
from model import *
from nltk.translate.bleu_score import corpus_bleu


parser = argparse.ArgumentParser()
# model parameters
parser.add_argument("--vocab_size", default=10000, type=int)
parser.add_argument("--embed_dim", default=300, type=int)
parser.add_argument("--vocab_file", default="../data/vocab.json", type=str)
parser.add_argument("--train_image_file", default="../data/train.h5", type=str)
parser.add_argument("--val_image_file", default="../data/val.h5", type=str)
parser.add_argument("--train_caption_file", default="../data/train.json", type=str)
parser.add_argument("--val_caption_file", default="../data/val.json", type=str)

# training parameters
parser.add_argument("--epochs", default=100, type=int)
parser.add_argument("--learning_rate", default=1e-4, type=float)
parser.add_argument("--learning_decay", default=0.99, type=float)
parser.add_argument("--epochs_per_decay", default=1, type=int)
parser.add_argument("--epochs_per_val", default=1, type=int)
parser.add_argument("--epochs_per_save", default=10, type=int)


def main():
    args = parser.parse_args()

    # session
    config = tf.ConfigProto()
    config.gpu_options.allow_growth=True
    session = tf.Session(config=config)

    # load data
    train_dataset = CocoLoader(args.train_image_file, args.train_caption_file, mode="train")
    val_datast = CocoLoader(args.val_image_file, args.val_caption_file, mode="val")
    vocab = load_vocab(vocab_size=args.vocab_size, vocab_file=args.vocab_file)
    word2index = [[x[0] for x in vocab.items()], [x[1] for x in vocab.items()]]
    index2word = [word2index[1], word2index[0]]
    print "[*] constructing model"
    model = CaptionModel(args.vocab_size, args.embed_dim, session, word2index, index2word)

    # placeholder
    caption_lens = model.caption_lens           # [N,]
    caption_targets = model.caption_targets     # [N, None(sequence_length)]
    image_inputs = model.image_inputs           # [N, H, W, 3]  denotes image inputs

    # outputs
    output_logits = model.output_logits         # [N, None(sequence_len)]
    cross_entropy = model.cross_entropy

    sequence_mask = tf.sequence_mask(caption_lens, maxlen=tf.reduce_max(caption_lens))
    sequence_mask = tf.cast(sequence_mask, tf.float32)  # batch_size, sequence

    loss = tf.reduce_sum(cross_entropy * sequence_mask) / tf.reduce_sum(sequence_mask)  # element-wise dot
    # optimizer
    learning_rate = args.learning_rate
    learning_decay = None
    epochs_per_decay = None

    if args.learning_decay < 1.:
        learning_rate = tf.Variable(initial_value=learning_rate)
        epochs_per_decay = args.epochs_per_decay
        learning_decay = tf.assign(learning_rate, learning_rate * args.learning_decay)

    optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss)
    session.run(tf.global_variables_initializer())

    # saver
    saver = tf.train.Saver(max_to_keep=args.epochs)
    train_step = 0
    saver.save(session, save_path="../checkpoints/mscoco-caption", global_step=train_step)
    for epoch in range(1, args.epochs + 1):
        # train
        try:
            print "[*] epoch {:0>3} start".format(epoch)
            while True:
                train_step += 1
                images, captions, lens = train_dataset.next()
                _, loss_value = session.run([optimizer, loss], feed_dict={
                    caption_lens: lens,
                    caption_targets: captions,
                    image_inputs: images
                })
                print "[step {:>6}] loss={}".format(train_step, loss_value)
        except StopIteration:
            print "[*] epoch {:0>3} finish".format(epoch)

        # learning rate decay
        if learning_decay is not None and epoch % epochs_per_decay == 0:
            lr = session.run(learning_decay)
            print "lr = {}".format(lr)

        # save checkpoints
        saver.save(session, save_path="../checkpoints/mscoco-caption", global_step=train_step)

        # val, inference
        if epoch % args.epochs_per_val == 0:
            print "[*] eval start"
            bleu1_score = []
            bleu2_score = []
            try:
                while True:
                    # captions: batch_size, sequence_len
                    images, captions, lens = val_datast.next()
                    outputs = session.run(output_logits, feed_dict={
                        caption_lens: lens,
                        caption_targets: captions,
                        image_inputs: images
                    })

                    bleu1_score.append(corpus_bleu(caption_targets, outputs, weights=(1., )))
                    bleu2_score.append(corpus_bleu(caption_targets, outputs, weights=(0.5, 0.5)))

            except StopIteration:
                print "[*] bleu_1: {}".format(np.mean(bleu1_score))
                print "[*] bleu_2: {}".format(np.mean(bleu2_score))
                print "[*] eval finish"


if __name__ == '__main__':
    main()
