from __future__ import division
import tensorflow as tf
import pprint
import random
import numpy as np
from SfMLearner import SfMLearner
from data_loader import DataLoader
import os
import math
import  time

flags = tf.app.flags
flags.DEFINE_string("dataset_dir", "", "Dataset directory")
flags.DEFINE_string("checkpoint_dir", "./checkpoints/", "Directory name to save the checkpoints")
flags.DEFINE_string("init_checkpoint_file", None, "Specific checkpoint file to initialize from")
flags.DEFINE_float("learning_rate", 0.001, "Learning rate of for adam")
flags.DEFINE_float("beta1", 0.9, "Momentum term of adam")
flags.DEFINE_float("smooth_weight", 0.6, "Weight for smoothness")
flags.DEFINE_float("explain_reg_weight", 0, "Weight for explanability regularization")
flags.DEFINE_integer("batch_size", 16, "The size of of a sample batch")
flags.DEFINE_integer("img_height", 128, "Image height")
flags.DEFINE_integer("img_width", 416, "Image width")
flags.DEFINE_integer("seq_length", 5, "Sequence length for each example")
flags.DEFINE_integer("num_source", 4, "Sequence length for each example")
flags.DEFINE_integer("num_scales", 4, "Sequence length for each example")
flags.DEFINE_integer("max_steps", 160000, "Maximum number of training iterations")
flags.DEFINE_integer("epoch", 60, "Maximum epoch of training iterations")
flags.DEFINE_integer("summary_freq", 50, "Logging every log_freq iterations")
flags.DEFINE_integer("save_latest_freq", 5000, \
                     "Save the latest model every save_latest_freq iterations (overwrites the previous latest model)")
flags.DEFINE_boolean("continue_train", False, "Continue training from previous checkpoint")
flags.DEFINE_string("filenames_file", '/media/wuqi/ubuntu/dataset/kitti/resulting_odometry_data/train.txt', "path to the filenames text file'")
opt = flags.FLAGS


def main(_):
    seed = 8964
    tf.set_random_seed(seed)
    np.random.seed(seed)
    random.seed(seed)

    pp = pprint.PrettyPrinter()
    pp.pprint(flags.FLAGS.__flags)

    if not os.path.exists(opt.checkpoint_dir):
        os.makedirs(opt.checkpoint_dir)

    loader = DataLoader(opt.dataset_dir,
                        opt.filenames_file,
                        opt.batch_size,
                        opt.img_height,
                        opt.img_width,
                        opt.num_source,
                        opt.num_scales)
    image_concat_train, intrinsics_train, rel_pose_train = loader.load_train_batch()
    image_concat_val, intrinsics_val, rel_pose_val = loader.load_val_batch()


    sfm = SfMLearner(opt=opt)
    sfm.build_train_graph()
    sfm.collect_summaries()


    with tf.name_scope("parameter_count"):
        parameter_count = tf.reduce_sum([tf.reduce_prod(tf.shape(v)) \
                                         for v in tf.trainable_variables()])
    sfm.saver = tf.train.Saver([var for var in tf.model_variables()] + \
                                [sfm.global_step],
                                max_to_keep=10)
    # sv = tf.train.Supervisor(logdir=opt.checkpoint_dir,
    #                          save_summaries_secs=0,
    #                          saver=None)

    config = tf.ConfigProto()
    # config.gpu_options.allow_growth = True
    config.gpu_options.per_process_gpu_memory_fraction = 0.7

    # with sv.managed_session(config=config) as sess:

    with tf.Session(config=config) as sess:
        summary_op = tf.summary.merge_all()
        summary_writer1 = tf.summary.FileWriter(opt.checkpoint_dir + '/' +'train', sess.graph)
        summary_writer2 = tf.summary.FileWriter(opt.checkpoint_dir + '/' + 'val')

        print('Trainable variables: ')
        for var in tf.trainable_variables():
            print(var.name)
        print("parameter_count =", sess.run(parameter_count))

        # INIT
        sess.run(tf.global_variables_initializer())
        sess.run(tf.local_variables_initializer())
        coordinator = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coordinator)

        if opt.continue_train:
            if opt.init_checkpoint_file is None:
                checkpoint = tf.train.latest_checkpoint(opt.checkpoint_dir)
            else:
                checkpoint = opt.init_checkpoint_file
            print("Resume training from previous checkpoint: %s" % checkpoint)
            sfm.saver.restore(sess, checkpoint)
        start_time = time.time()

        # sess = tfdbg.LocalCLIDebugWrapperSession(sess)

        opt.max_steps = loader.steps_per_epoch * opt.epoch
        print('step_max:',opt.max_steps)
        print(loader.steps_per_epoch_val)
        start_time = time.time()

        for step in range(1, opt.max_steps):

            image_concat, intrinsics, rel_pose = sess.run([image_concat_train, intrinsics_train, rel_pose_train])
            fetches = {
                "train": sfm.train_op,
                "global_step": sfm.global_step,
                "incr_global_step": sfm.incr_global_step,
            }

            if step % opt.summary_freq == 0:
                fetches["loss"] = sfm.total_loss
                fetches["summary"] =summary_op

            results = sess.run(fetches,
                               feed_dict={sfm.image_concat:image_concat,
                                          sfm.intrincis:intrinsics,
                                          sfm.rel_pose:rel_pose})
            gs = results["global_step"]

            if step % opt.summary_freq == 0:
                # sv.summary_writer.add_summary(results["summary"], gs)
                summary_writer1.add_summary(results["summary"], gs)
                time_sofar = (time.time() - start_time) / 3600
                training_time_left = (opt.max_steps / step - 1.0) * time_sofar
                train_epoch = math.ceil(gs / loader.steps_per_epoch)
                train_step = gs - (train_epoch - 1) * loader.steps_per_epoch
                print("Epoch: [%2d] [%5d/%5d] time: %4.4f/it time_left: %4.2fh loss: %.5f" \
                      % (train_epoch, train_step, loader.steps_per_epoch, \
                         (time.time() - start_time) / step,
                         training_time_left,
                         results["loss"]))


            #val

            if step % opt.summary_freq == 0:
                image_concat, intrinsics, rel_pose = sess.run([image_concat_val, intrinsics_val, rel_pose_val])
                fetches = {
                    "val_loss": sfm.total_loss,
                    "summary": summary_op
                }
                results = sess.run(fetches,
                                   feed_dict={sfm.image_concat: image_concat,
                                              sfm.intrincis: intrinsics,
                                              sfm.rel_pose: rel_pose}
                                   )
                # sv.summary_writer.add_summary(results["summary"], gs)
                summary_writer2.add_summary(results["summary"], gs)
                train_epoch = math.ceil(gs / loader.steps_per_epoch)
                train_step = gs - (train_epoch - 1) * loader.steps_per_epoch
                print("Epoch: [%2d] [%5d/%5d]  val_loss: %.5f" \
                      % (train_epoch, train_step, loader.steps_per_epoch, results["val_loss"]))

            if step % opt.save_latest_freq == 0:
                sfm.save(sess, opt.checkpoint_dir, 'latest')

            if step % loader.steps_per_epoch == 0:
                sfm.save(sess, opt.checkpoint_dir, gs)




if __name__ == '__main__':
    tf.app.run()
