"""

"""

import tensorflow as tf
import os
from six.moves import xrange
import time
import numpy as np
import tensorflow.contrib.slim as slim

from preprocessing import preprocessing_factory
from nets import nets_factory
from datasets import dataset_classification


tf.compat.v1.app.flags.DEFINE_string(
    'train_log_dir',
    '../my-data/t/5/',
    'Director where checkpoints and event los are written to.')
tf.compat.v1.app.flags.DEFINE_string(
    'dataset_dir',
    '../my-data/',
    'The director where the dataset files are stored.')
tf.app.flags.DEFINE_string(
    'labels_file_path', '../labels.txt', 'Label names file path.')
tf.app.flags.DEFINE_string(
    'model_name', 'inception_v4', 'The name of model.')
tf.compat.v1.app.flags.DEFINE_integer('num_classes',3,'Number of classes.')
tf.compat.v1.app.flags.DEFINE_integer('num_samples',40000,'Number of samples.')
tf.compat.v1.app.flags.DEFINE_integer('batch_size',8,'Number of samples.')
tf.compat.v1.app.flags.DEFINE_integer('num_readers',8,'Number of samples.')
tf.compat.v1.app.flags.DEFINE_integer('max_steps',200000,'traing steps.')
tf.app.flags.DEFINE_float('weight_decay', 0.00004, 'The weight decay on the model weights.')
tf.app.flags.DEFINE_integer(
    'num_preprocessing_threads', 4,
    'The number of threads used to create the batches.')
tf.app.flags.DEFINE_float(
    'learning_rate', 0.0001, 'Initial learning rate.')



FLAGS = tf.app.flags.FLAGS


def main(_):
    if not FLAGS.dataset_dir:
        raise ValueError('You must supply the dataset directory with --dataset_dir')

    tf.logging.set_verbosity(tf.logging.INFO)
    with tf.Graph().as_default() :
        # Define the global step
        global_step = slim.create_global_step()

        # Define the is_training
        is_training = tf.placeholder(tf.bool,name='is_training')

        # Create the dataset
        train_dataset = dataset_classification.get_dataset(
            FLAGS.dataset_dir+'/train/',
            FLAGS.num_samples,
            FLAGS.num_classes,
            FLAGS.labels_file_path,)
        val_dataset = dataset_classification.get_dataset(
            FLAGS.dataset_dir+'/val/',
            500,
            FLAGS.num_classes,
            FLAGS.labels_file_path, )

        # Select the network
        network_fn = nets_factory.get_network_fn(
            FLAGS.model_name,
            FLAGS.num_classes,
            FLAGS.weight_decay,
            is_training=is_training)
        train_image_size = network_fn.default_image_size

        # Select the preprocessing function
        image_preprocessing_fn = preprocessing_factory.get_preprocessing(
            FLAGS.model_name,
            is_training=True)

        # Create a dataset provider that loads data from the dataset
        def get_dataset_queue(dataset):
            provider = slim.dataset_data_provider.DatasetDataProvider(
                dataset,
                num_readers=FLAGS.num_readers,
                common_queue_capacity=20 * FLAGS.batch_size,
                common_queue_min=10 * FLAGS.batch_size)
            [image, label] = provider.get(['image', 'label'])

            image = image_preprocessing_fn(image, train_image_size, train_image_size)
            images, labels = tf.train.batch(
                [image, label],
                batch_size=FLAGS.batch_size,
                num_threads=FLAGS.num_preprocessing_threads,
                capacity=5 * FLAGS.batch_size)
            labels = slim.one_hot_encoding(labels, dataset.num_classes)
            batch_queue = slim.prefetch_queue.prefetch_queue([images, labels], capacity=8)
            return batch_queue

        with tf.device('/device:CPU:0'):
            train_batch_queue = get_dataset_queue(train_dataset)
            val_batch_queue = get_dataset_queue(val_dataset)

        ####################
        # Define the model #
        ####################

        # Define the placeholder
        input_pl = tf.placeholder(tf.float32, [None,train_image_size,train_image_size,3], name='input')
        label_pl = tf.placeholder(tf.int8, [None,3], name='label')

        # with slim.arg_scope(inception_v4.inception_v4_arg_scope()):
        # with slim.arg_scope([slim.model_variable, slim.variable]):
        # the name scope include network_fn
        logits, end_points = network_fn(input_pl)

        # Define the loss function
        if 'AuxLogits' in end_points:
            slim.losses.softmax_cross_entropy(
                end_points['AuxLogits'], label_pl,
                label_smoothing=0., weights=0.4,
                scope='aux_loss')
        slim.losses.softmax_cross_entropy(
            logits, label_pl, label_smoothing=0.,weights=1.0)

        # Define the optimization
        # todo : 增加 lr optimizer func
        with tf.device('/device:CPU:0'):
            learning_rate = FLAGS.learning_rate
            # optimizer = tf.train.AdamOptimizer(learning_rate,beta1=.9,beta2=.999,epsilon=1.)
            optimizer = tf.train.MomentumOptimizer(learning_rate,momentum=.9,name='Momentum')
            tf.summary.scalar('learning_rate', learning_rate)

        total_loss = tf.losses.get_total_loss()
        tf.summary.scalar('total_loss', total_loss)

        update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS,scope='')  # 302 op

        variables_to_train = tf.trainable_variables()  # 306

        # Create gradient updates.
        grad = optimizer.compute_gradients(total_loss, var_list=variables_to_train)
        grad_updates = optimizer.apply_gradients(grad,global_step=global_step)

        update_ops.append(grad_updates)
        update_op = tf.group(*update_ops)

        images, labels = train_batch_queue.dequeue()
        val_images, val_labels = val_batch_queue.dequeue()


        # create train op.
        with tf.control_dependencies([update_op]):
            # 1 .
            # train_op = optimizer.minimize(total_loss,global_step=global_step)
            train_op = tf.identity(total_loss, name='train_op')

        summaries = set(tf.get_collection(tf.GraphKeys.SUMMARIES))
        # summaries |= set(tf.get_collection(tf.GraphKeys.SUMMARIES,scope=''))

        accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(logits,1),tf.argmax(label_pl,1)), tf.float32))

        saver = tf.train.Saver(max_to_keep=5)
        saver_best = tf.train.Saver()

        with tf.Session() as sess:
            sess.run([tf.global_variables_initializer(), tf.local_variables_initializer()])

            summary_op = tf.summary.merge(list(summaries), name='summary_op')
            # summary_op = tf.summary.merge_all()
            summary_writer = tf.summary.FileWriter(FLAGS.train_log_dir, sess.graph)

            coord = tf.train.Coordinator()
            threads = tf.train.start_queue_runners(sess,coord)

            print('\n----------- start to train -----------\n')
            best_mAp = -np.Inf
            for step in xrange(FLAGS.max_steps):
                start_time = time.time()
                feed_images, feed_labels = sess.run([images, labels])

                _, loss_value, summary_str = sess.run([train_op,total_loss,summary_op],
                                  feed_dict={input_pl: feed_images, label_pl: feed_labels,is_training: True})
                duration = time.time() - start_time
                if step % 50 == 0:
                    print('Step %d: train_loss = %.2f (%.3f sec)' % (step, loss_value, duration))
                    summary_writer.add_summary(summary_str, step)
                    summary_writer.flush()
                # Save the model
                if (step % 1000 == 0 and step > 0) or step == FLAGS.max_steps:
                    print('-------save: {}-{}'.format('model.ckpt',step))
                    saver.save(sess,FLAGS.train_log_dir+'/model.ckpt-%s' % step)

                # todo: val model
                mean_acc = 0
                if (step % 1000 == 0 and step > 0) or step == FLAGS.max_steps:
                    print('\n----------- start to val -----------\n')
                    val_losses = []
                    val_accs = []
                    for _ in xrange(val_dataset.num_samples):
                        val_feed_images, val_feed_labels = sess.run([val_images, val_labels])
                        val_loss, val_acc = sess.run([total_loss,accuracy],
                                                     feed_dict={input_pl: val_feed_images,
                                                                label_pl: val_feed_labels,
                                                                is_training: False})
                        # print(" Step [%d]  val loss : %f, val acc :  %g" % ( step, val_loss, val_acc))
                        val_losses.append(val_loss)
                        val_accs.append(val_acc)
                    mean_acc = np.array(val_accs,dtype=np.float32).mean()
                    mean_loss = np.array(val_losses,dtype=np.float32).mean()
                    from datetime import datetime
                    print("%s: Step [%d]  val Loss : %f, val accuracy :  %g" % (datetime.now(), step, mean_loss, mean_acc))

                # Save the best model
                if mean_acc > best_mAp and mean_acc > 0.75:
                    best_mAp = mean_acc
                    model_bester = FLAGS.train_log_dir+'/model.ckpt-%s-%s' % (mean_acc,step)
                    saver_best.save(sess,model_bester)

            coord.request_stop()
            coord.join(threads)





if __name__ == '__main__':
    tf.app.run()