#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@author: liang kang
@contact: gangkanli1219@gmail.com
@time: 12/28/17 7:51 PM
@desc: trainer the network
"""
import argparse
import os

import tensorflow as tf

from crowdcounting.mtl.crowd_count import model_fn
from crowdcounting.mtl.data_loader import input_fn


def parse_args():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '--data-dir', type=str, default='', dest='data_dir',
        help='The directory where the ImageNet input data is stored.')
    parser.add_argument(
        '--model-dir', type=str, default='', dest='model_dir',
        help='The directory where the model will be stored.')
    parser.add_argument(
        '--postfix', help='postfix to file', default='mtl', type=str)
    parser.add_argument(
        '--prefix', help='prefix to file', default='crowdCounting', type=str)
    parser.add_argument(
        '--train-epochs', type=int, default=100,
        help='The number of epochs to use for training.')
    parser.add_argument(
        '--class-num', type=int, default=10, dest='class_num',
        help='The number of class to use for training.')
    parser.add_argument(
        '--train-number', type=int, default=42591,
        help='The number of training dataset instance.')
    parser.add_argument(
        '--epochs-per-eval', type=int, default=1, dest='epochs_per_eval',
        help='The number of training epochs to run between evaluations.')
    parser.add_argument(
        '--batch-size', type=int, default=32, dest='batch_size',
        help='Batch size for training and evaluation.')
    parser.add_argument(
        '--r-mean', type=float, default=0.0, dest='R',
        help='the mean value the color channel r. 80.197326')
    parser.add_argument(
        '--g-mean', type=float, default=0.0, dest='G',
        help='the mean value the color channel g. 75.26667')
    parser.add_argument(
        '--b-mean', type=float, default=0.0, dest='B',
        help='the mean value the color channel b. 72.822747')
    parser.add_argument(
        '--data-format', type=str, default='channels_last', dest='data_format',
        choices=['channels_first', 'channels_last'],
        help='A flag to override the data format used in the model. channels_first '
             'provides a performance boost on GPU but is not always compatible '
             'with CPU. If left unspecified, the data format will be chosen '
             'automatically based on whether TensorFlow was built for CPU or GPU.')
    return parser.parse_args()


def main(_):
    """

    Parameters
    ----------
    _

    Returns
    -------

    """
    # Using the Winograd non-fused algorithms provides a small performance boost.
    os.environ['TF_ENABLE_WINOGRAD_NONFUSED'] = '1'

    # Set up a RunConfig to only save checkpoints once per training cycle.
    run_config = tf.estimator.RunConfig(log_step_count_steps=10,
                                        keep_checkpoint_max=10,
                                        save_summary_steps=100)
    classifier = tf.estimator.Estimator(
        model_fn=model_fn, model_dir=FLAGS.model_dir, config=run_config,
        params={
            'train_number': FLAGS.train_number,
            'batch_size': FLAGS.batch_size,
            'bn': False,
            'class_num': FLAGS.class_num
        })

    r, g, b = FLAGS.R, FLAGS.G, FLAGS.B
    if r == g == b == 0.0:
        means = None
    else:
        means = [r, g, b]

    for _ in range(FLAGS.train_epochs // FLAGS.epochs_per_eval):
        tensors_to_log = {
            'learning_rate': 'learning_rate',
            'cross_entropy': 'cross_entropy',
            'train_accuracy': 'train_accuracy',
            'mse_loss': 'mse_loss'
        }

        logging_hook = tf.train.LoggingTensorHook(
            tensors=tensors_to_log, every_n_iter=10)

        saving_hook = tf.train.CheckpointSaverHook(
            checkpoint_dir=FLAGS.model_dir, save_steps=100)

        print('Starting a training cycle.')
        classifier.train(
            input_fn=lambda: input_fn(
                True, FLAGS.data_dir, FLAGS.batch_size, FLAGS.epochs_per_eval, class_num=FLAGS.class_num,
                prefix=FLAGS.prefix, postfix=FLAGS.postfix, means=means),
            hooks=[logging_hook, saving_hook])

        print('Starting to evaluate.')
        eval_results = classifier.evaluate(
            input_fn=lambda: input_fn(False, FLAGS.data_dir, FLAGS.batch_size, class_num=FLAGS.class_num,
                                      prefix=FLAGS.prefix, postfix=FLAGS.postfix, means=means))
        print(eval_results)


if __name__ == '__main__':
    tf.logging.set_verbosity(tf.logging.INFO)
    FLAGS = parse_args()
    tf.app.run()
