import argparse
import os
import sys
import time

import input_data
import tensorflow as tf
import mnist
import math

NUM_CLASSES = 10
IMAGE_SIZE = 28
IMAGE_PIXELS = IMAGE_SIZE * IMAGE_SIZE
# 占位符->权重，偏置变量-> 图*权重+偏置->损失，优化(最小化损失)，度量（metric）->ckpt保存->summary写->训练->评估

def placeholder_inputs(batch_size):
    images_placeholder = tf.placeholder(tf.float32, shape=[batch_size,
                                                           IMAGE_PIXELS])
    labels_placeholder = tf.placeholder(tf.int32, shape=[batch_size])
    return images_placeholder, labels_placeholder


# 输出结果
def inference(images, hidden1_units, hidden2_units):
    with tf.name_scope("hidden1") as scope:
        # 正态分布
        weights = tf.Variable(tf.truncated_normal([IMAGE_PIXELS, hidden1_units],
                                                  stddev=1 / math.sqrt(float(IMAGE_PIXELS))),
                              name='weights')
        biases = tf.Variable(tf.zeros([hidden1_units]), name='biases')

        hidden1 = tf.nn.relu(tf.matmul(images, weights) + biases)

    with tf.name_scope('hidden2') as scope:
        weights = tf.Variable(
            tf.truncated_normal([hidden1_units, hidden2_units],
                                stddev=1.0 / math.sqrt(float(hidden1_units))),
            name='weights')
        biases = tf.Variable(tf.zeros([hidden2_units]),
                             name='biases')
        hidden2 = tf.nn.relu(tf.matmul(hidden1, weights) + biases)
        # Linear
    with tf.name_scope('softmax_linear'):
        weights = tf.Variable(
            tf.truncated_normal([hidden2_units, NUM_CLASSES],
                                stddev=1.0 / math.sqrt(float(hidden2_units))),
            name='weights')
        biases = tf.Variable(tf.zeros([NUM_CLASSES]),
                             name='biases')
        logits = tf.matmul(hidden2, weights) + biases

    return logits


def dloss(logits, labels):
    # batch_size = tf.size(labels)
    # labels = tf.expand_dims(labels,1)
    # indices = tf.expand_dims(tf.range(0, batch_size, 1), 1)
    # concated = tf.concat(1, [indices, labels])
    # # one-hot
    # onehot_labels = tf.sparse_to_dense(
    #     concated, tf.stack([batch_size, NUM_CLASSES]),1.0,0.0)
    # sparse_softmax_cross_entropy_with_logits:
    # 使用的是实数来表示类别，数据类型为int16，int32，或者
    # int64，标签大小范围为[0，num_classes - 1]，标签的维度为[batch_size]
    # 大小。
    #
    # softmax_cross_entropy_with_logits：
    # 使用的是one - hot二进制码来表示类别，数据类型为float16，float32，或者float64，维度为[batch_size, num_classes]。
    # 这里需要说明一下的时，标签数据类型并不是Bool型的。这是因为实际上在tensorflow中，softmax_cross_entropy_with_logits中的每一个类别是一个概率分布，tensorflow中对该模块的说明中明确指出了这一点，Each
    # row labels[i] must be a valid probability distribution。很显然，one - hot的二进码也可以看是一个有效的概率分布。
    # cross_entropy = tf.nn.softmax_cross_entropy_with_logits(
    #                                                     logits,
    #                                                     onehot_labels,
    #                                                     name='xentropy'
    # )
    # loss = tf.reduce_mean(cross_entropy, name='xentropy_mean')
    labels = tf.to_int64(labels)
    return tf.losses.sparse_softmax_cross_entropy(labels, logits)


def training(loss, learning_rate):
    tf.summary.scalar('loss', loss)
    optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
    global_step = tf.Variable(0, name='global_step', trainable=False)
    train_op = optimizer.minimize(loss, global_step=global_step)

    return train_op


def evaluation(logits, labels):
    correct = tf.nn.in_top_k(logits, labels, 1)
    # Return the number of true entries.
    return tf.reduce_sum(tf.cast(correct, tf.int32))


def fill_feed_dict(data_set, images_pl, labels_pl):
    """
    A feed_dict takes the form of:
  feed_dict = {
      <placeholder>: <tensor of values to be passed for placeholder>,
      ....
  }

  Args:
    data_set: The set of images and labels, from input_data.read_data_sets()
    images_pl: The images placeholder, from placeholder_inputs().
    labels_pl: The labels placeholder, from placeholder_inputs().
    """
    image_feed, labels_feed = data_set.next_batch(FLAGS.batch_size,
                                                  FLAGS.fake_data)
    feed_dict = {
        images_pl: image_feed,
        labels_pl: labels_feed,
    }
    return feed_dict


def do_eval(sess,
            eval_correct,
            images_placeholder,
            labels_placeholder,
            data_set):
    true_count = 0
    steps_per_epoch = data_set.num_examples // FLAGS.batch_size
    num_examples = steps_per_epoch * FLAGS.batch_size
    for step in range(steps_per_epoch):
        feed_dict = fill_feed_dict(data_set,
                                   images_placeholder,
                                   labels_placeholder)
        true_count += sess.run(eval_correct, feed_dict=feed_dict)
    precision = float(true_count) / num_examples
    print('Num examples: %d  Num correct: %d  Precision @ 1: %0.04f' %
          (num_examples, true_count, precision))


def run_training():
    data_sets = input_data.read_data_sets(FLAGS.input_data_dir, FLAGS.fake_data)

    with tf.Graph().as_default():
        # Return the number of true entries.
        images_placeholder, labels_placeholder = placeholder_inputs(
            FLAGS.batch_size)

        # Build a Graph that computes predictions from the inference model.
        logits = inference(images_placeholder,
                           FLAGS.hidden1,
                           FLAGS.hidden2)
        losss = dloss(logits, labels_placeholder)

        train_op = training(losss, FLAGS.learning_rate)

        eval_correct = evaluation(logits, labels_placeholder)

        summary = tf.summary.merge_all()

        init = tf.global_variables_initializer()

        # Create a saver for writing training checkpoints
        saver = tf.train.Saver()

        sess = tf.Session()

        # Instantiate a SummaryWriter to output summaries and the Graph.
        summary_writer = tf.summary.FileWriter(FLAGS.log_dir, sess.graph)

        sess.run(init)

        for step in range(FLAGS.max_steps):
            start_time = time.time()

            feed_dicts = fill_feed_dict(data_sets.train,
                                        images_placeholder,
                                        labels_placeholder)
            _, loss_value = sess.run([train_op, losss],
                                     feed_dict=feed_dicts)
            duration = time.time() - start_time

            if step % 100 == 0:
                print(f'Step {step}: loss = {loss_value:2f} ({duration:.3f} sec)')

                summary_str = sess.run(summary, feed_dict=feed_dicts)
                summary_writer.add_summary(summary_str, step)
                summary_writer.flush()

            # Save a checkpoint and evaluate the model periodically.
            if (step + 1) % 1000 == 0 or (step + 1) == FLAGS.max_steps:
                checkpoint_file = os.path.join(FLAGS.log_dir, 'model.ckpt')
                saver.save(sess, checkpoint_file, global_step=step)
                print('Training Data Eval:')
                do_eval(sess,
                        eval_correct,
                        images_placeholder,
                        labels_placeholder,
                        data_sets.train)
                # Evaluate against the validation set.
                print('Validation Data Eval:')
                do_eval(sess,
                        eval_correct,
                        images_placeholder,
                        labels_placeholder,
                        data_sets.validation)
                # Evaluate against the test set.
                print('Test Data Eval:')
                do_eval(sess,
                        eval_correct,
                        images_placeholder,
                        labels_placeholder,
                        data_sets.test)


def main(_):
    if tf.gfile.Exists(FLAGS.log_dir):
        tf.gfile.DeleteRecursively(FLAGS.log_dir)
    tf.gfile.MakeDirs(FLAGS.log_dir)
    run_training()


if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '--learning_rate',
        type=float,
        default=0.01,
        help='Initial learning rate.'
    )
    parser.add_argument(
        '--max_steps',
        type=int,
        default=2000,
        help='Number of steps to run trainer.'
    )
    parser.add_argument(
        '--hidden1',
        type=int,
        default=128,
        help='Number of units in hidden layer 1.'
    )
    parser.add_argument(
        '--hidden2',
        type=int,
        default=32,
        help='Number of units in hidden layer 2.'
    )
    parser.add_argument(
        '--batch_size',
        type=int,
        default=128,
        help='Batch size.  Must divide evenly into the dataset sizes.'
    )
    parser.add_argument(
        '--input_data_dir',
        type=str,
        default=r'E:\redldw\net\tensorflow\MNIST__\MNIST_data',
        help='Directory to put the input data.'
    )
    parser.add_argument(
        '--log_dir',
        type=str,
        default=r'F:\Resources\log\tensortest\\',
        help='Directory to put the log data.'
    )
    parser.add_argument(
        '--fake_data',
        default=False,
        help='If true, uses fake data for unit testing.',
        action='store_true'
    )
    FLAGS, unparsed = parser.parse_known_args()
    tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
