# -*- coding: utf-8 -*-
#
#    Powered by AIgames of michine intellengence lab
#    Author: Justin Xu
# =================================================================
"""
    Compared to go_train_sync, this function use tf.train.Supervisor to train
    this model.
"""

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import tensorflow as tf
import os.path
import sys
import re
import time
import argparse

from datetime import datetime
import numpy as np
import psigma_go
import go_input
import test_go

FLAGS = tf.app.flags.FLAGS

tf.app.flags.DEFINE_string('train_dir', '/tmp/psigma_go_train',
                           """Directory where to write event logs """
                           """and checkpoint.""")
tf.app.flags.DEFINE_integer('max_steps', 1000000,
                            """Number of batches to run.""")
tf.app.flags.DEFINE_integer('num_gpus',2,
                            """How many GPUs to use.""")
tf.app.flags.DEFINE_boolean('log_device_placement', False,
                            """Whether to log device placement.""")
# tf.app.flags.DEFINE_integer('batch_size', 4,
#                             """How examples for computing one bat""")

IMAGE_SIZE = 19

NUM_CLASSES =361
NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = 2000000
NUM_EXAMPLES_PER_EPOCH_FOR_EVAL = 10000

TRAIN_FILE = 'train.tfrecords'
VALIDATION_FILE = 'validation.tfrecords'

def read_and_decode(filename_queue):
  reader = tf.TFRecordReader()
  _, serialized_example = reader.read(filename_queue)
  features = tf.parse_single_example(
      serialized_example,
      # Defaults are not specified since both keys are required.
      features={
          'image_raw': tf.FixedLenFeature([], tf.string),
          'label': tf.FixedLenFeature([], tf.int64),
      })

  # Convert from a scalar string tensor (whose single string has
  # length mnist.IMAGE_PIXELS) to a uint8 tensor with shape
  # [mnist.IMAGE_PIXELS].
  image = tf.decode_raw(features['image_raw'], tf.uint8)
  # import pdb; pdb.set_trace()
  image.set_shape([psigma_go.IMAGE_PIXELS])
  image = tf.reshape(image, [19,19,2], name=None)
  # OPTIONAL: Could reshape into a 19x19 image and apply distortions
  # here.  Since we are not applying any distortions in this
  # example, and the next step expects the image to be flattened
  # into a vector, we don't bother.

  # Convert from [0, 255] -> [-0.5, 0.5] floats.
  image = tf.cast(image, tf.float32)
  # Convert label from a scalar uint8 tensor to an int32 scalar.
  # import pdb; pdb.set_trace()
  label = tf.cast(features['label'], tf.int32)

  return image, label

def inputs(train, batch_size, num_epochs):
  """Reads input data num_epochs times.

  Args:
    train: Selects between the training (True) and validation (False) data.
    batch_size: Number of examples per returned batch.
    num_epochs: Number of times to read the input data, or 0/None to
       train forever.

  Returns:
    A tuple (images, labels), where:
    * images is a float tensor with shape [batch_size, []]
      in the range [-0.5, 0.5].
    * labels is an int32 tensor with shape [batch_size] with the true label,
      a number in the range [0, mnist.NUM_CLASSES).
    Note that an tf.train.QueueRunner is added to the graph, which
    must be run using e.g. tf.train.start_queue_runners().
  """
  if not num_epochs: num_epochs = None
  filename = os.path.join('./',
                          TRAIN_FILE if train else VALIDATION_FILE)

  with tf.name_scope('input'):
    filename_queue = tf.train.string_input_producer(
        [filename], num_epochs=num_epochs)

    # Even when reading in multiple threads, share the filename
    # queue.
    image, label = read_and_decode(filename_queue)
    # image = image.reshape((19,19,2))
    image.set_shape([19,19,2])
    label.set_shape([1])

    # Ensure that the random shuffling has good mixing properties
    min_fraction_of_examples_in_queue = 0.07
    min_queue_examples = int(NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN *
                             min_fraction_of_examples_in_queue)

    # Shuffle the examples and collect them into batch_size batches.
    # (Internally uses a RandomShuffleQueue.)
    # We run this in two threads to avoid being a bottleneck.
    images, sparse_labels = tf.train.shuffle_batch(
        [image, label], batch_size=batch_size, num_threads=24,
        capacity=1000 + 2 * batch_size,
        # Ensures a minimum amount of shuffling of examples.
        min_after_dequeue=1000)

    # Display the training images in the visualizer
    tf.summary.images('images', images)
    return images, tf.reshape(sparse_labels, [batch_size])

def run_training():
  """Train MNIST for a number of steps."""

  # Tell TensorFlow that the model will be built into the default Graph.
  with tf.Graph().as_default(), tf.device('/cpu:0'):
    # Create a variable to count the number of train() calls. This equals the
    # number of batches processed * FLAGS.num_gpus.

    global_step = tf.get_variable(
        'global_step',[],
        initializer=tf.constant_initializer(0),trainable=False)

    # Calculate the learning rate schedule
    num_batches_per_epoch = (NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN/
                             FLAGS.batch_size)
    decay_steps = 300000

    # Decay the learning rate exponentially based on the number of steps
    lr = tf.train.exponential_decay(psigma_go.INITIAL_LEARNING_RATE,
                                    global_step,
                                    decay_steps,
                                    psigma_go.LEARNING_RATE_DECAY_FACTOR,
                                    staircase=True)

    # Create an optimizer that performs gradient descent.
    opt = tf.train.GradientDescentOptimizer(lr)

    # Calculate the gradients for each model tower
    tower_grads = []
    tower_acc = []
    with tf.variable_scope(tf.get_variable_scope()):
        for i in xrange(FLAGS.num_gpus):
            with tf.device('/gpu:%d' % i):
                with tf.name_scope('%s_%d' % (psigma_go.TOWER_NAME, i)) as scope:
                    # Calculate the loss for one tower of the psigma_go model.
                    # This function constructs the entire psigma_go model but
                    # shares the variables across all towers.
                    loss,acc = tower_loss(scope)
                    # Reuse variables for the next tower.
                    # import pdb; pdb.set_trace()
                    tf.get_variable_scope().reuse_variables()

                    # Retain the summaries from the final tower.
                    summaries = tf.get_collection(tf.GraphKeys.SUMMARIES, scope)

                    # Calculate the gradients for the batch of data on this psigma_go
                    # tower.
                    grads = opt.compute_gradients(loss)
                    # import pdb; pdb.set_trace()

                    # Keep track of the gradients across all tower
                    tower_grads.append(grads)
                    tower_acc.append(acc)

    # import pdb; pdb.set_trace()
    grads = average_gradients(tower_grads)
    accuracy = tf.reduce_mean(tower_acc)
    # Add a summary to track the learning rate.
    summaries.append(tf.summary.scalar('learning_rate', lr))

    # Add histograms for gradients.
    for grad, var in grads:
        if grad is not None:
            summaries.append(tf.summary.histogram(var.op.name + '/gradients',grad))

    apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)

    # Add histograms for trainable variabless.
    for var in tf.trainable_variables():
        summaries.append(tf.summary.histogram(var.op.name, var))

    train_op = tf.group(apply_gradient_op)

    # Create a saver.
    saver = tf.train.Saver(tf.global_variables())

    # Build the summary operation from the last tower summaries
    summary_op = tf.summary.merge(summaries)

    # Build an initialization operation to run below
    init = tf.global_variables_initializer()

    # Start running operations on the Graph. allow_soft_placement must be set
    # to. True to build towers on GPU, as some of the ops do not have GPU
    # implementations.
    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=1)
    # sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
    config=tf.ConfigProto(allow_soft_placement=True,
                         log_device_placement=FLAGS.log_device_placement,
                         gpu_options=gpu_options)
    config.gpu_options.allow_growth=True
    # sess = tf.Session(config=config)
    sv = tf.train.Supervisor(logdir=FLAGS.train_dir)
    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=1)
    # sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
    conf=tf.ConfigProto(allow_soft_placement=True,
                         log_device_placement=False,
                         gpu_options=gpu_options)
    conf.gpu_options.allow_growth=True
    with sv.managed_session(config=conf) as sess:
    # sess = tf.Session(config=config)
        sess.run(init)
        # Start the queue runners.
        tf.train.start_queue_runners(sess=sess)

        summary_writer = tf.summary.FileWriter(FLAGS.train_dir, sess.graph)

        xx = None
        t1 = time.time()
        for step in xrange(FLAGS.max_steps):
            start_time = time.time()
            _, loss_value,accuracy_value = sess.run([train_op, loss,accuracy])
            # import pdb; pdb.set_trace()
            # xx = images_value
            duration = time.time() - start_time
            assert not np.isnan(loss_value), 'Model diverged with loss = NaN'

            if step % 100 == 0:
                num_examples_per_step = FLAGS.batch_size * FLAGS.num_gpus
                examples_per_sec = num_examples_per_step / duration
                sec_per_batch = duration

                format_str = ('%s: step %d, loss = %.2f, acc = %.4f,(%.1f examples/sec; %.3f '
                                'sec/batch)')
                print(format_str % (datetime.now(), step, loss_value,accuracy_value,
                                    examples_per_sec, sec_per_batch))

            if step % 1000 == 0:
                summary_str = sess.run(summary_op)
                summary_writer.add_summary(summary_str, step)

            # Save the model checkpoint periodically.
            if step % 10000 == 0 or (step + 1) == FLAGS.max_steps:
                checkpoint_path = os.path.join(FLAGS.train_dir, 'model.ckpt')
                saver.save(sess, checkpoint_path, global_step=step)

        print("Total time is %d" % time.time()-t1)

def tower_loss(scope):
    images,labels = psigma_go.distorted_inputs()
    # import pdb; pdb.set_trace()

    logits = psigma_go.inference(images)

    # import pdb; pdb.set_trace()
    _ = psigma_go.loss(logits,labels)

    acc = psigma_go.evaluation(logits,labels)

    losses = tf.get_collection('losses', scope)

    total_loss = tf.add_n(losses, name='total_loss')

    for l in losses + [total_loss]:
        loss_name = re.sub('%s_[0-9]*/' % psigma_go.TOWER_NAME, '', l.op.name)
        tf.summary.scalar(loss_name, l)

    return total_loss,acc

def calculate_accuracy():
    images,labels = psigma_go.inputs()

    logits = psigma_go.inference(images)

    correct_prediction = tf.equal(tf.argmax(labels, 1), tf.argmax(logits, 1))

    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

    return accuracy

def average_gradients(tower_grads):
    average_grads = []

    for grad_and_vars in zip(*tower_grads):

        grads = []
        for g, _ in grad_and_vars:
            #Add 0 dimension to the gradients to represent the tower
            expanded_g = tf.expand_dims(g, 0)
            grads.append(expanded_g)

        # Average over the 'tower' dimension.
        grad = tf.concat(axis=0, values=grads)
        grad = tf.reduce_mean(grad, 0)

        # Keep in mind that the variables are redundant because they are shared
        # across towers. So .. we will just return the first towers's pointer to
        # the Variable.
        v = grad_and_vars[0][1]
        grad_and_var = (grad, v)
        average_grads.append(grad_and_var)
    return average_grads

def main(_):
  run_training()

if __name__ == '__main__':
    tf.app.run()
