import tensorflow as tf
import numpy as np
from dataload import *
from text8 import *
import time
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt

# python main.py --data_path=data --save_path=tmp --mode=std
flags = tf.flags
flags.DEFINE_string("data_path", None, "where data is stored")
flags.DEFINE_string("save_path", None, "model output directory")
flags.DEFINE_string("mode", None, "LSTM mode of std, bn and bren")
FLAGS = flags.FLAGS
save_path = "%s/%s" % (FLAGS.save_path, FLAGS.mode)

def run_epoch(session, model, eval_op=None, verbose=False):
    start_time = time.time()
    costs = 0.0
    accs = 0.0
    iters = 0
    state = session.run(model.initial_state)

    fetches = {
            "cost": model.cost,
            "acc": model.acc,
            "final_state": model.final_state,
    }
    if eval_op is not None:
        fetches["eval_op"] = eval_op

    for step in range(model.input.epoch_size):
        feed_dict = {}
        # use last epoch's final state as new epoch's initial
        for i, (t, c, h) in enumerate(model.initial_state):
            feed_dict[c] = state[i].c
            feed_dict[h] = state[i].h

        vals = session.run(fetches, feed_dict)
        cost = vals["cost"]
        acc = vals["acc"]
        state = vals["final_state"]

        costs += cost
        accs += acc
        iters += model.input.num_steps

        if verbose and step % (model.input.epoch_size // 10) == 10:
            print("%.3f perplexity: %.3f speed: %.0f wps" %
                        (step * 1.0 / model.input.epoch_size, np.exp(costs / iters),
                         iters * model.input.batch_size / (time.time() - start_time)))

    return np.exp(costs / iters), accs / iters

def main(_):
    #-------------------------------------------------------------------
    # data and params
    #-------------------------------------------------------------------
    if not FLAGS.data_path:
        raise ValueError("Must set --data_path to PTB data directory")
    raw_data = fetch_data(FLAGS.data_path)
    train_data, valid_data, test_data, vocab_size = raw_data

    config = get_config()
    config.vocab_size = vocab_size
    eval_config = get_config()
    eval_config.batch_size = len(test_data) // (eval_config.num_steps + 1)
    eval_config.vocab_size = vocab_size

    with tf.Graph().as_default():
        #---------------------------------------------------------------
        # build graph
        #---------------------------------------------------------------
        with tf.name_scope("Train"):
            # Prepare feeding data and info
            train_input = Text8Input(config=config, data=train_data, name="TrainInput")
            # Build Model
            with tf.variable_scope("Model", reuse=None):
                m = Text8Model(is_training=True, config=config,
                    input_=train_input, mode=FLAGS.mode)
            # Prepare summary
            tf.summary.scalar("Training Loss", m.cost)
            tf.summary.scalar("Training Acc", m.acc)

        with tf.name_scope("Valid"):
            # Prepare feeding data and info
            valid_input = Text8Input(config=config, data=valid_data, name="ValidInput")
            # Build Model
            with tf.variable_scope("Model", reuse=True):
                mvalid = Text8Model(is_training=False, config=config,
                    input_=valid_input, mode=FLAGS.mode)
            # Prepare summary
            tf.summary.scalar("Validation Loss", mvalid.cost)
            tf.summary.scalar("Validation Acc", mvalid.acc)
        with tf.name_scope("Test"):
            # Prepare feeding data and info
            test_input = Text8Input(config=eval_config, data=test_data, name="TestInput")
            # Build Model
            with tf.variable_scope("Model", reuse=True):
                mtest = Text8Model(is_training=False, config=eval_config,
                    input_=test_input, mode=FLAGS.mode)

        #---------------------------------------------------------------
        # Start to train
        # --------------------------------------------------------------
        sv = tf.train.Supervisor(logdir=save_path)
        acc_list = []
        loss_list = []
        with sv.managed_session() as session:
            print("Method: ", FLAGS.mode, " LSTM")
            for i in range(config.num_epochs):
                print("\nEpoch: %d\nTraining: %s" % (i + 1, m.training))
                train_perplexity, _ = run_epoch(session, m,
                    eval_op=m.train_op, verbose=True)
                print("Epoch: %d Train Perplexity: %.3f" % (i + 1, train_perplexity))
                
                print("Training: %s" % mvalid.training)
                valid_perplexity, valid_acc = run_epoch(session, mvalid)
                print("Epoch: %d Valid Perplexity: %.3f" % (i + 1, valid_perplexity))

                acc_list.append(valid_acc)
                loss_list.append(valid_perplexity)

            print("Training: %s" % mtest.training)
            test_perplexity, _ = run_epoch(session, mtest)
            print("Test Perplexity: %.3f" % test_perplexity)

            if FLAGS.save_path:
                print("Saving model to %s." % save_path)
                sv.saver.save(session, save_path, global_step=sv.global_step)

        fig, ax = plt.subplots()
        ax.plot(range(0, len(acc_list)), acc_list, label='%s LSTM' % (FLAGS.mode))
        ax.set_xlabel('Epochs')
        ax.set_ylabel('Accuracy')
        ax.set_title('Accuracy')
        ax.legend(loc=4)
        fig.savefig('../../files/text8-acc-%s.png' % FLAGS.mode)

        fig, ax = plt.subplots()
        ax.plot(range(0, len(loss_list)), loss_list, label='%s LSTM' % (FLAGS.mode))
        ax.set_xlabel('Epochs')
        ax.set_ylabel('Perplexity')
        ax.set_title('Perplexity')
        ax.legend(loc=4)
        fig.savefig('../../files/text8-perplexity-%s.png' % FLAGS.mode)

if __name__ == "__main__":
    tf.app.run()