import reader
import rhn
from tensorflow.python.ops import math_ops
import tensorflow as tf
import argparse
import os.path
import sys
import time

FLAGS = None

def data_type():
  return tf.float16 if True else tf.float32

class SmallConfig(object):
  """Small config."""
  init_scale = 0.1
  learning_rate = 1.0
  max_grad_norm = 5
  num_layers = 10
  num_steps = 32
  hidden_size = 830
  max_epoch = 4
  max_max_epoch = 13
  keep_prob = 1.0
  lr_decay = 0.5
  batch_size = 20
  stddev = 0.04
  vocab_size = 10000
  # activation = math_ops.sigmoid
  inner_layers=10
  bias=1
  dtype = tf.float16

  def activation(self,value):
      return math_ops.sigmoid(value)

def run_training(is_training=True):
    """Train rhn for a number of steps."""

    # Tell TensorFlow that the model will be built into the default Graph.
    with tf.Graph().as_default():
        # Input images and labels.
        raw_data = reader.ptb_raw_data("./data")
        train_data, valid_data, test_data, _ = raw_data
        config = SmallConfig()
        epoch_size = ((len(train_data) // config.batch_size) - 1) // config.num_steps
        input_data, targets = rhn.pdb_inputs(batch_size=config.batch_size,num_steps=config.num_steps,
                                    epoch_size=epoch_size,data=train_data,
                                    name="TrainInput")
        vocab_size = config.vocab_size
        with tf.device("/cpu:0"):
          embedding = tf.get_variable(
              "embedding", [config.vocab_size, config.hidden_size], dtype=data_type())
          inputs = tf.nn.embedding_lookup(embedding, input_data)

        if is_training and config.keep_prob < 1:
          inputs = tf.nn.dropout(inputs, config.keep_prob)

        # Build a Graph that computes predictions from the inference model.
        # import pdb; pdb.set_trace()
        # outputs = rhn.rhn(inputs=inputs,input_size=10000,config=config,inner_layers=2,activation=sigmoid,bias=1)
        outputs = rhn.rhn(inputs=inputs,config=config)

        # Add to the Graph the loss calculation.
        loss = rhn.rhn_loss(targets, outputs,config)

        # Add to the Graph operations that train the model.
        train_op = rhn.rhn_update(loss, learning_rate=0.1)

        # The op for initializing the variables.
        init_op = tf.group(tf.global_variables_initializer(),
                           tf.local_variables_initializer())
        # tf.trainable_variables()
        # import pdb; pdb.set_trace()
        # Create a session for running operations in the Graph.
        config = tf.ConfigProto()
        config.gpu_options.allow_growth=True
        sess = tf.Session(config=config)
        # sess = tf.Session()

        # Initialize the variables (the trained variables and the
        # epoch counter).
        sess.run(init_op)

        # Start input enqueue threads.
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)

        try:
          step = 0
          while not coord.should_stop():
            start_time = time.time()

            # Run one step of the model.  The return values are
            # the activations from the `train_op` (which is
            # discarded) and the `loss` op.  To inspect the values
            # of your ops or variables, you may include them in
            # the list passed to sess.run() and the value tensors
            # will be returned in the tuple from the call.
            # import pdb; pdb.set_trace()
            _, loss_value = sess.run([train_op, loss])

            duration = time.time() - start_time

            # Print an overview fairly often.
            if step % 10 == 0:
              print('Step %d: loss = %.2f (%.3f sec)' % (step, loss_value,
                                                         duration))
            step += 1
        except tf.errors.OutOfRangeError:
          print('Done training for %d epochs, %d steps.' % (FLAGS.num_epochs, step))
        finally:
          # When done, ask the threads to stop.
          coord.request_stop()

        # Wait for threads to finish.
        coord.join(threads)
        sess.close()

def main(_):
    run_training(is_training=True)

if __name__ == '__main__':
    # run_training()
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '--learning_rate',
        type=float,
        default=0.01,
        help='Initial learning rate.'
    )
    parser.add_argument(
        '--num_epochs',
        type=int,
        default=2,
        help='Number of epochs to run trainer.'
    )
    parser.add_argument(
        '--hidden1',
        type=int,
        default=128,
        help='Number of units in hidden layer 1.'
    )
    parser.add_argument(
        '--hidden2',
        type=int,
        default=32,
        help='Number of units in hidden layer 2.'
    )
    parser.add_argument(
        '--batch_size',
        type=int,
        default=100,
        help='Batch size.'
    )
    parser.add_argument(
        '--train_dir',
        type=str,
        default='/tmp/data',
        help='Directory with the training data.'
    )

    FLAGS, unparsed = parser.parse_known_args()
    # import pdb; pdb.set_trace()
    tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
