# -*- coding: utf-8 -*-
#
#    Powered by AIgames of michine intellengence lab
#    Author: Justin Xu
# ==============================================================================

"""Builds the PSIGAM network for go.
Implements the inference/loss/training pattern for model building.
1. inference() - Builds the model as far as is required for running the network
forward to make predictions.
2. loss() - Adds to the inference model the layers required to generate loss.
3. training() - Adds to the loss model the Ops required to generate and
apply gradients.
This file is used by the various "fully_connected_*.py" files and not meant to
be run.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import math
import re
import tensorflow as tf
import go_input

FLAGS = tf.app.flags.FLAGS

# Basic model parameters
tf.app.flags.DEFINE_integer('batch_size', 8,
                            """Number of images to process in a batch.""")
tf.app.flags.DEFINE_string('data_dir', './',
                           """Path to the go data directory.""")
tf.app.flags.DEFINE_boolean('use_fp16', False,
                            """Train the model using fp16.""")

# The go dataset has 361 classes, representing the digits 0 through 361,
# for total proble moves.
NUM_CLASSES = 361

# The GO data matrix are always 19x19 pixels.
IMAGE_SIZE = 19
IMAGE_PIXELS = IMAGE_SIZE * IMAGE_SIZE * 2
TOWER_NAME = 'tower'
MOVING_AVERAGE_DECAY = 0.9999

# Constants describing the training process
INITIAL_LEARNING_RATE = 0.003     #Initial learning rate
LEARNING_RATE_DECAY_FACTOR = 1  #Learning rate decay factor
NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = 2000000
NUM_EXAMPLES_PER_EPOCH_FOR_EVAL = 10000
NUM_EPOCHS_PER_DECAY = 4
TOWER_NAME = 'tower'

def _activation_summary(x):
  """Helper to create summaries for activations.

  Creates a summary that provides a histogram of activations.
  Creates a summary that measures the sparsity of activations.

  Args:
    x: Tensor
  Returns:
    nothing
  """
  # Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
  # session. This helps the clarity of presentation on tensorboard.
  tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name)
  tf.summary.histogram(tensor_name + '/activations', x)
  tf.summary.scalar(tensor_name + '/sparsity',
                                       tf.nn.zero_fraction(x))

def _variable_on_cpu(name, shape, initializer):
  """Helper to create a Variable stored on CPU memory.

  Args:
    name: name of the variable
    shape: list of ints
    initializer: initializer for Variable

  Returns:
    Variable Tensor
  """
  with tf.device('/cpu:0'):
    dtype = tf.float16 if FLAGS.use_fp16 else tf.float32
    var = tf.get_variable(name, shape, initializer=initializer, dtype=dtype)
  return var

def _variable_with_weight_decay(name, shape, stddev, wd):
  """Helper to create an initialized Variable with weight decay.

  Note that the Variable is initialized with a truncated normal distribution.
  A weight decay is added only if one is specified.

  Args:
    name: name of the variable
    shape: list of ints
    stddev: standard deviation of a truncated Gaussian
    wd: add L2Loss weight decay multiplied by this float. If None, weight
        decay is not added for this Variable.

  Returns:
    Variable Tensor
  """
  dtype = tf.float16 if False else tf.float32
  var = _variable_on_cpu(
      name,
      shape,
      tf.truncated_normal_initializer(stddev=stddev, dtype=dtype))
  # if wd is not None:
  #   weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
  #   tf.add_to_collection('losses', weight_decay)
  return var

def _activation_summary(x):
  """Helper to create summaries for activations.

  Creates a summary that provides a histogram of activations.
  Creates a summary that measures the sparsity of activations.

  Args:
    x: Tensor
  Returns:
    nothing
  """
  # Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
  # session. This helps the clarity of presentation on tensorboard.
  tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name)
  tf.summary.histogram(tensor_name + '/activations', x)
  tf.summary.scalar(tensor_name + '/sparsity',
                                       tf.nn.zero_fraction(x))

def inference(images):
  """Build the psigma model up to where it may be used for inference.
  Args:
    images: Images placeholder, from inputs().
    hidden1_units: Size of the first hidden layer.
    hidden2_units: Size of the second hidden layer.
  Returns:
    softmax_linear: Output tensor with the computed logits.
  """

  # conv1
  with tf.variable_scope('conv1') as scope:
    kernel = _variable_with_weight_decay('weights',
                                     shape=[5, 5, 2, 192],
                                     stddev=0.04,
                                     wd=0.0)
    conv = tf.nn.conv2d(images, kernel, [1, 1, 1, 1], padding='SAME')
    conv1 = tf.nn.relu(conv, name=scope.name)
    _activation_summary(conv1)

  # conv2
  with tf.variable_scope('conv2') as scope:
    kernel = _variable_with_weight_decay('weights',
                                   shape=[3, 3, 192, 192],
                                   stddev=0.04,
                                   wd=0.0)
    conv = tf.nn.conv2d(conv1, kernel, [1, 1, 1, 1], padding='SAME')
    conv2 = tf.nn.relu(conv, name=scope.name)
    _activation_summary(conv2)

  # conv3
  with tf.variable_scope('conv3') as scope:
    kernel = _variable_with_weight_decay('weights',
                                   shape=[3, 3, 192, 192],
                                   stddev=0.04,
                                   wd=0.0)
    conv = tf.nn.conv2d(conv2, kernel, [1, 1, 1, 1], padding='SAME')
    conv3 = tf.nn.relu(conv, name=scope.name)
    _activation_summary(conv3)

  # conv4
  with tf.variable_scope('conv4') as scope:
    kernel = _variable_with_weight_decay('weights',
                                   shape=[3, 3, 192, 192],
                                   stddev=0.04,
                                   wd=0.0)
    conv = tf.nn.conv2d(conv3, kernel, [1, 1, 1, 1], padding='SAME')
    conv4 = tf.nn.relu(conv, name=scope.name)
    _activation_summary(conv4)

  # conv5
  with tf.variable_scope('conv5') as scope:
    kernel = _variable_with_weight_decay('weights',
                                   shape=[3, 3, 192, 192],
                                   stddev=0.04,
                                   wd=0.0)
    conv = tf.nn.conv2d(conv4, kernel, [1, 1, 1, 1], padding='SAME')
    conv5 = tf.nn.relu(conv, name=scope.name)
    _activation_summary(conv5)

  # conv6
  with tf.variable_scope('conv6') as scope:
    kernel = _variable_with_weight_decay('weights',
                                   shape=[3, 3, 192, 192],
                                   stddev=0.04,
                                   wd=0.0)
    conv = tf.nn.conv2d(conv5, kernel, [1, 1, 1, 1], padding='SAME')
    conv6 = tf.nn.relu(conv, name=scope.name)
    _activation_summary(conv6)

  # conv7
  with tf.variable_scope('conv7') as scope:
    kernel = _variable_with_weight_decay('weights',
                                   shape=[3, 3, 192, 192],
                                   stddev=0.04,
                                   wd=0.0)
    conv = tf.nn.conv2d(conv6, kernel, [1, 1, 1, 1], padding='SAME')
    conv7 = tf.nn.relu(conv, name=scope.name)
    _activation_summary(conv7)

  # conv8
  with tf.variable_scope('conv8') as scope:
    kernel = _variable_with_weight_decay('weights',
                                   shape=[3, 3, 192, 192],
                                   stddev=0.04,
                                   wd=0.0)
    conv = tf.nn.conv2d(conv7, kernel, [1, 1, 1, 1], padding='SAME')
    conv8 = tf.nn.relu(conv, name=scope.name)
    _activation_summary(conv8)

  # conv9
  with tf.variable_scope('conv9') as scope:
    kernel = _variable_with_weight_decay('weights',
                                       shape=[3, 3, 192, 192],
                                       stddev=0.04,
                                       wd=0.0)
    conv = tf.nn.conv2d(conv8, kernel, [1, 1, 1, 1], padding='SAME')
    conv9 = tf.nn.relu(conv, name=scope.name)
    _activation_summary(conv9)

  # conv10
  with tf.variable_scope('conv10') as scope:
    kernel = _variable_with_weight_decay('weights',
                                   shape=[3, 3, 192, 192],
                                   stddev=0.04,
                                   wd=0.0)
    conv = tf.nn.conv2d(conv9, kernel, [1, 1, 1, 1], padding='SAME')
    conv10 = tf.nn.relu(conv, name=scope.name)
    _activation_summary(conv10)

  # conv11
  with tf.variable_scope('conv11') as scope:
    kernel = _variable_with_weight_decay('weights',
                                   shape=[3, 3, 192, 192],
                                   stddev=0.04,
                                   wd=0.0)
    conv = tf.nn.conv2d(conv10, kernel, [1, 1, 1, 1], padding='SAME')
    conv11 = tf.nn.relu(conv, name=scope.name)
    _activation_summary(conv11)

  # conv12
  with tf.variable_scope('conv12') as scope:
    kernel = _variable_with_weight_decay('weights',
                                   shape=[3, 3, 192, 192],
                                   stddev=0.04,
                                   wd=0.0)
    conv = tf.nn.conv2d(conv11, kernel, [1, 1, 1, 1], padding='SAME')
    conv12 = tf.nn.relu(conv, name=scope.name)
    _activation_summary(conv12)

  # conv13
  with tf.variable_scope('conv13') as scope:
    kernel = _variable_with_weight_decay('weights',
                                   shape=[3, 3, 192, 1],
                                   stddev=0.04,
                                   wd=0.0)
    conv = tf.nn.conv2d(conv12, kernel, [1, 1, 1, 1], padding='SAME')
    conv13 = tf.nn.relu(conv, name=scope.name)
    _activation_summary(conv13)

  # with tf.variable_scope('fc') as scope:
  #   # Move everything into depth so we can perform a single matrix multiply.
  #   # fc = tf.reshape(conv13, [batch_size, -1])
  #   fc = tf.contrib.layers.flatten(conv13)
  # Linear
  with tf.name_scope('softmax_linear'):
    fc = tf.contrib.layers.flatten(conv13)
    weights = _variable_with_weight_decay('weights', [NUM_CLASSES, NUM_CLASSES],
                                          0.04, wd=0.0)
    logits = tf.matmul(fc, weights)
  # import pdb; pdb.set_trace()

  return logits

def loss(logits, labels):
  """Calculates the loss from the logits and the labels.
  Args:
    logits: Logits tensor, float - [batch_size, NUM_CLASSES].
    labels: Labels tensor, int32 - [batch_size].
  Returns:
    loss: Loss tensor of type float.
  """
  # labels = tf.to_int64(labels)
  labels = tf.cast(labels, tf.int64)
  # import pdb; pdb.set_trace()
  cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
      labels=labels, logits=logits, name='cross_entropy_per_example')
  cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
  tf.add_to_collection('losses', cross_entropy_mean)
  return tf.add_n(tf.get_collection('losses'), name='total_loss')

def _add_loss_summaries(total_loss):
  """Add summaries for losses in CIFAR-10 model.

  Generates moving average for all losses and associated summaries for
  visualizing the performance of the network.

  Args:
    total_loss: Total loss from loss().
  Returns:
    loss_averages_op: op for generating moving averages of losses.
  """
  # Compute the moving average of all individual losses and the total loss.
  loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
  losses = tf.get_collection('losses')
  loss_averages_op = loss_averages.apply(losses + [total_loss])

  # Attach a scalar summary to all individual losses and the total loss; do the
  # same for the averaged version of the losses.
  for l in losses + [total_loss]:
    # Name each loss as '(raw)' and name the moving average version of the loss
    # as the original loss name.
    tf.summary.scalar(l.op.name + ' (raw)', l)
    tf.summary.scalar(l.op.name, loss_averages.average(l))
  return loss_averages_op

def training(loss, learning_rate):
  """Sets up the training Ops.
  Creates a summarizer to track the loss over time in TensorBoard.
  Creates an optimizer and applies the gradients to all trainable variables.
  The Op returned by this function is what must be passed to the
  `sess.run()` call to cause the model to train.
  Args:
    loss: Loss tensor, from loss().
    learning_rate: The learning rate to use for gradient descent.
  Returns:
    train_op: The Op for training.
  """
  # Variable that affect learning rate.
  num_batches_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN / FLAGS.batch_size
  decay_steps = int(num_batches_per_epoch * NUM_EPOCHS_PER_DECAY)

  # Decay the learning rate exponentially based on the number of steps.
  lr = tf.train.exponential_decay(INITIAL_LEARNING_RATE,
                                  global_step,
                                  decay_steps,
                                  LEARNING_RATE_DECAY_FACTOR,
                                  staircase=True)
  # Add a scalar summary for the snapshot learning_rate.
  tf.summary.scalar('learning_rate', lr)

  # Generate moving averages of all losses and associated
  loss_averages_op = _add_loss_summaries(total_loss)

  # Compute gradients.
  with tf.control_dependencies([loss_averages_op]):
      opt = tf.train.GradientDescentOptimizer(lr)
      grads = opt.compute_gradients(total_loss)

  # Apply gradients.
  apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)

  # Add histograms for trainable variables.
  for var in tf.trainable_variables():
      tf.summary.histogram(var.op.name, var)

  # Track the moving averages of all trainable variables
  variable_averages = tf.train.ExponentialMovingAverage(
        MOVING_AVERAGE_DECAY, global_step)
  variables_averages_op = variable_averages.apply(tf.trainable_variables())

  with tf.control_dependencies([apply_gradient_op, variables_averages_op]):
      train_op = tf.no_op(name='train')
  return train_op

def distorted_inputs( ):
  """Construct input for CIFAR evaluation using the Reader ops.

  Args:
    eval_data: bool, indicating if one should use the train or eval data set.

  Returns:
    images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
    labels: Labels. 1D tensor of [batch_size] size.

  Raises:
    ValueError: If no data_dir
  """
  if not FLAGS.data_dir:
    raise ValueError('Please supply a data_dir')
  images, labels = go_input.distorted_inputs(batch_size=FLAGS.batch_size)

  if FLAGS.use_fp16:
    images = tf.cast(images, tf.float16)
    labels = tf.cast(labels, tf.float16)
  return images, labels

def inputs():
  """Construct input for CIFAR evaluation using the Reader ops.

  Args:
  eval_data: bool, indicating if one should use the train or eval data set.

  Returns:
    images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
    labels: Labels. 1D tensor of [batch_size] size.

  Raises:
    ValueError: If no data_dir
  """
  if not FLAGS.data_dir:
    raise ValueError('Please supply a data_dir')
  images, labels = go_input.inputs(batch_size=256)

  if FLAGS.use_fp16:
    images = tf.cast(images, tf.float16)
    labels = tf.cast(labels, tf.float16)
  return images, labels

def evaluation(logits, labels):
  """Evaluate the quality of the logits at predicting the label.
  Args:
    logits: Logits tensor, float - [batch_size, NUM_CLASSES].
    labels: Labels tensor, int32 - [batch_size], with values in the
      range [0, NUM_CLASSES).
  Returns:
    A scalar int32 tensor with the number of examples (out of batch_size)
    that were predicted correctly.
  """
  # For a classifier model, we can use the in_top_k Op.
  # It returns a bool tensor with shape [batch_size] that is true for
  # the examples where the label is in the top k (here k=1)
  # of all logits for that example.
  correct = tf.nn.in_top_k(logits, labels, 1)
  # Return the number of true entries.
  return tf.reduce_sum(tf.cast(correct, tf.int32)) / FLAGS.batch_size
