"""

"""

import tensorflow as tf
import os
from six.moves import xrange
import time
import numpy as np
import tensorflow.contrib.slim as slim

from preprocessing import preprocessing_factory
from nets import nets_factory
from datasets import dataset_classification
import tools.args as args


def _config_learning_reate(global_step):
    decay_steps = int(args.NUM_TRAIN_SAMPLES * args.NUM_EPOCHS_PER_DECAY / args.BATCH_SIZE)

    if args.LEARNING_TYPE == 'fixed':
        return tf.constant(args.LEARING_RATE, name='fixed_learning_rate')
    elif args.LEARNING_TYPE == 'exponential':
        return tf.train.exponential_decay(args.LEARING_RATE,
                                          global_step,
                                          decay_steps,
                                          args.lr_decay_factor,
                                          staircase=True,
                                          name='exponential_decay_learning_rate')
    else:
        raise ValueError('没有这个lr type')


def _config_optimizer(learning_rate):
    if args.OPTIMIZER == 'adam':
        optimizer = tf.train.AdamOptimizer(learning_rate)
    elif args.OPTIMIZER == 'momentum':
        optimizer = tf.train.MomentumOptimizer(learning_rate,momentum=0.9)
    elif args.OPTIMIZER == 'rmsprop':
        optimizer = tf.train.RMSPropOptimizer(learning_rate)
    elif args.OPTIMIZER == 'sgd':
        optimizer = tf.train.GradientDescentOptimizer(learning_rate)
    else:
        raise ValueError('没有这个opt 哦！！！')

    return optimizer


def main():

    tf.logging.set_verbosity(tf.logging.INFO)
    with tf.Graph().as_default() :
        # Define the global step
        global_step = slim.create_global_step()

        # Define the is_training
        is_training = tf.placeholder(tf.bool,name='is_training')

        # Create the dataset
        train_dataset = dataset_classification.get_dataset(
            args.DATASETS_DIR,
            args.NUM_TRAIN_SAMPLES,
            args.NUM_CLASSES,
            args.LABEL_PATH,)

        val_dataset = dataset_classification.get_dataset(
            args.VAL_DATASETS_DIR,
            args.NUM_VAL_SAMPLES,
            args.NUM_CLASSES,
            args.LABEL_PATH, )

        # Select the network
        network_fn = nets_factory.get_network_fn(
            args.MODEL_NAME,
            args.NUM_CLASSES,
            args.WEIGHT_DECAY,
            is_training=is_training)
        train_image_size = network_fn.default_image_size

        # Select the preprocessing function
        image_preprocessing_fn = preprocessing_factory.get_preprocessing(args.MODEL_NAME)

        # Create a dataset provider that loads data from the dataset
        def get_dataset_queue(dataset,is_training=False):
            provider = slim.dataset_data_provider.DatasetDataProvider(
                dataset,
                num_readers=args.NUM_READERS,
                common_queue_capacity=20 * args.BATCH_SIZE,
                common_queue_min=10 * args.BATCH_SIZE)
            [image, label] = provider.get(['image', 'label'])

            image = image_preprocessing_fn(image, train_image_size, train_image_size,is_training=is_training)
            images, labels = tf.train.batch(
                [image, label],
                batch_size=args.BATCH_SIZE,
                num_threads=args.NUM_PREPROCESSIN_THREADS,
                capacity=5 * args.BATCH_SIZE)
            labels = slim.one_hot_encoding(labels, dataset.num_classes)
            batch_queue = slim.prefetch_queue.prefetch_queue([images, labels], capacity=8)
            return batch_queue

        with tf.device('/device:CPU:0'):
            train_batch_queue = get_dataset_queue(train_dataset,is_training=True)
            val_batch_queue = get_dataset_queue(val_dataset)

        images, labels = train_batch_queue.dequeue()
        val_images, val_labels = val_batch_queue.dequeue()

        ####################
        # Define the model #
        ####################
        logits, end_points = network_fn(images)

        # Define the loss function
        if 'AuxLogits' in end_points:
            slim.losses.softmax_cross_entropy(
                end_points['AuxLogits'], labels,
                label_smoothing=0.1, weights=0.4,
                scope='aux_loss')
        slim.losses.softmax_cross_entropy(
            logits, labels, label_smoothing=0.1,weights=1.0)

        # Define the optimization
        with tf.device('/device:CPU:0'):
            learning_rate = _config_learning_reate(global_step)
            optimizer = _config_optimizer(learning_rate)

            tf.summary.scalar('learning_rate', learning_rate)

        # Add summaries for losses.
        for loss in tf.get_collection(tf.GraphKeys.LOSSES, ''):
            # aux_loss and softmax_cross_entropy_loss
            tf.summary.scalar('losses/%s' % loss.op.name, loss)

        regularization_losses = tf.losses.get_regularization_losses()
        cross_entry_loss = tf.losses.get_losses()
        total_loss = tf.losses.get_total_loss()
        tf.summary.scalar('Losses/regularization_losses', tf.add_n(regularization_losses))
        tf.summary.scalar('Losses/cross_entry_loss', tf.add_n(cross_entry_loss))
        tf.summary.scalar('total_loss', total_loss)

        update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS,scope='')  # 302 op
        variables_to_train = tf.trainable_variables()  # 306

        # Create gradient updates.
        grad = optimizer.compute_gradients(total_loss, var_list=variables_to_train)
        grad_updates = optimizer.apply_gradients(grad,global_step=global_step)

        update_ops.append(grad_updates)
        update_op = tf.group(*update_ops)

        # create train op.
        with tf.control_dependencies([update_op]):
            train_op = tf.identity(total_loss, name='train_op')

        summaries = [summary for summary in tf.get_collection(tf.GraphKeys.SUMMARIES) if summary.name in args.SUMMARY_TENSORS ]
        accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(logits,1),tf.argmax(val_labels,1)), tf.float32))

        saver = tf.train.Saver(max_to_keep=5)
        saver_best = tf.train.Saver()

        config = tf.ConfigProto()
        with tf.Session(config=config) as sess:
            sess.run([tf.global_variables_initializer(), tf.local_variables_initializer()])

            summary_op = tf.summary.merge(summaries, name='summary_op')
            summary_writer = tf.summary.FileWriter(args.TRAIN_DIR, sess.graph)

            coord = tf.train.Coordinator()
            threads = tf.train.start_queue_runners(sess,coord)

            print('\n----------- start to train -----------\n')
            best_mAp = -np.Inf
            for step in xrange(args.NUM_MAX_STEPS):
                start_time = time.time()
                _, loss_value, summary_str = sess.run([train_op,total_loss,summary_op],feed_dict={is_training:True})
                duration = time.time() - start_time

                if step % args.NUM_LOG_INTERVAL == 0:
                    print('Step %d: train_loss = %.2f (%.3f sec)' % (step, loss_value, duration))
                    summary_writer.add_summary(summary_str, step)
                    summary_writer.flush()

                # Save the model
                if (step % args.SNAPSHOT == 0 and step > 0) or step == args.NUM_MAX_STEPS:
                    print('-------save: {}-{}'.format('model.ckpt',step))
                    saver.save(sess,args.TRAIN_DIR+'/model.ckpt-%s' % step)

                # Val the model
                mean_acc = 0
                if (step % args.NUM_VAL_INTERVAL == 0 and step > 0) or step == args.NUM_MAX_STEPS:
                    print('\n----------- start to val -----------\n')
                    val_losses = []
                    val_accs = []
                    for _ in xrange(val_dataset.num_samples):
                        val_feed_images, val_feed_labels = sess.run([val_images, val_labels])
                        val_loss, val_acc = sess.run([total_loss,accuracy],feed_dict={is_training:False})
                        # print(" Step [%d]  val loss : %f, val acc :  %g" % ( step, val_loss, val_acc))
                        val_losses.append(val_loss)
                        val_accs.append(val_acc)
                    mean_acc = np.array(val_accs,dtype=np.float32).mean()
                    mean_loss = np.array(val_losses,dtype=np.float32).mean()
                    from datetime import datetime
                    print("%s: Step [%d]  val Loss : %f, val accuracy :  %g" % (datetime.now(), step, mean_loss, mean_acc))

                # Save the best model
                if mean_acc > best_mAp and mean_acc > 0.75:
                    best_mAp = mean_acc
                    model_bester = args.TRAIN_DIR +'/model.ckpt-%s-%s' % (mean_acc,step)
                    saver_best.save(sess,model_bester)

            coord.request_stop()
            coord.join(threads)


if __name__ == '__main__':
    main()