from resnet import * 
import tensorflow as tf

MOMENTUM = 0.9

FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('train_dir', '/tmp/resnet_train',
                           """Directory where to write event logs """
                           """and checkpoint.""")
tf.app.flags.DEFINE_float('learning_rate', 0.01, "learning rate.")
tf.app.flags.DEFINE_integer('batch_size', 16, "batch size")
tf.app.flags.DEFINE_integer('max_steps', 500000, "max steps")
tf.app.flags.DEFINE_boolean('resume', False,
                            'resume from latest saved state')
tf.app.flags.DEFINE_boolean('minimal_summaries', True,
                            'produce fewer summaries to save HD space')


def top_k_error(predictions, labels, k):
    batch_size = float(FLAGS.batch_size) #tf.shape(predictions)[0]
    in_top1 = tf.to_float(tf.nn.in_top_k(predictions, labels, k=1))
    num_correct = tf.reduce_sum(in_top1)
    return (batch_size - num_correct) / batch_size


def train(is_training, logits, images, labels):

    # 初始化参数
    # Gets an existing variable with these parameters or create a new one
    # tf.constant_initializer -> Initializer that generates tensors with constant values.
    global_step = tf.get_variable('global_step', [],
                                  initializer=tf.constant_initializer(0),
                                  trainable=False)
    val_step = tf.get_variable('val_step', [],
                                  initializer=tf.constant_initializer(0),
                                  trainable=False)

    # 计算损失函数, logits是前向计算的结果
    # loss ???
    loss_ = loss(logits, labels)

    # 计算每个类别的概率 & top_k error
    predictions = tf.nn.softmax(logits)
    top1_error = top_k_error(predictions, labels, 1)


    # loss_avg ???
    #ExponentialMovingAverage -> 通过使用指数衰减来保持变量的移动平均值 ??? 
    #一些训练优化算法，比如GradientDescent 和Momentum 在优化过程中便可以使用到移动平均方法。使用移动平均常常可以较明显地改善结果。
    ema = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step)
    #add_to_collection -> tf.add_to_collection 的作用是将value以name的名称存储在收集器
    #add_to_collection -> 关于获取保存的模型中的tensor或者输出，还有一种办法就是用tf.add_to_collection()，
    #假如上面每次定义一次运算后，可以在后面添加tf.add_to_collection()：恢复模型后，通过tf.get_collection()来获取tensor
    tf.add_to_collection(UPDATE_OPS_COLLECTION, ema.apply([loss_]))
    #scalar_summary -> Outputs a Summary protocol buffer with scalar values -> 保存到日志
    tf.scalar_summary('loss_avg', ema.average(loss_))


    # validation stats
    ema = tf.train.ExponentialMovingAverage(0.9, val_step)
    val_op = tf.group(val_step.assign_add(1), ema.apply([top1_error]))
    top1_error_avg = ema.average(top1_error)
    tf.scalar_summary('val_top1_error_avg', top1_error_avg)

    # log learning_rate
    tf.scalar_summary('learning_rate', FLAGS.learning_rate)


    #准备 optimizer
    opt = tf.train.MomentumOptimizer(FLAGS.learning_rate, MOMENTUM)
    
    #记录梯度的变化
    grads = opt.compute_gradients(loss_)
    for grad, var in grads:
        if grad is not None and not FLAGS.minimal_summaries:
            tf.histogram_summary(var.op.name + '/gradients', grad)
    #Apply gradients to variables. This is the second part of minimize(). It returns an Operation that applies gradients.
    apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)


    if not FLAGS.minimal_summaries:
        # Display the training images in the visualizer.
        tf.image_summary('images', images)

        for var in tf.trainable_variables():
            tf.histogram_summary(var.op.name, var)

    #???
    batchnorm_updates = tf.get_collection(UPDATE_OPS_COLLECTION)
    batchnorm_updates_op = tf.group(*batchnorm_updates)
    train_op = tf.group(apply_gradient_op, batchnorm_updates_op)

    #Saves and restores variables
    saver = tf.train.Saver(tf.all_variables())

    #merge summaries
    summary_op = tf.merge_all_summaries()



    # graph init varialbles
    init = tf.initialize_all_variables()
    #get sess
    sess = tf.Session(config=tf.ConfigProto(log_device_placement=False))
    #执行 init
    sess.run(init)
    #queue -> Starts all queue runners collected in the graph.
    tf.train.start_queue_runners(sess=sess)
    #log to dir
    summary_writer = tf.train.SummaryWriter(FLAGS.train_dir, sess.graph)


    # start from checkpoint
    if FLAGS.resume:
        latest = tf.train.latest_checkpoint(FLAGS.train_dir)
        if not latest:
            print "No checkpoint to continue from in", FLAGS.train_dir
            sys.exit(1)
        print "resume", latest
        saver.restore(sess, latest)


    # iterration ecpo
    for x in xrange(FLAGS.max_steps + 1):
        start_time = time.time()

        # run -> global_step
        step = sess.run(global_step)
        i = [train_op, loss_]

        write_summary = step % 100 and step > 1
        if write_summary:
            i.append(summary_op)


        #run -> i??
        o = sess.run(i, { is_training: True })
        loss_value = o[1]

        duration = time.time() - start_time
        assert not np.isnan(loss_value), 'Model diverged with loss = NaN'


        #每隔5轮 输出
        if step % 5 == 0:
            examples_per_sec = FLAGS.batch_size / float(duration)
            format_str = ('step %d, loss = %.2f (%.1f examples/sec; %.3f '
                          'sec/batch)')
            print(format_str % (step, loss_value, examples_per_sec, duration))


        if write_summary:
            summary_str = o[2]
            summary_writer.add_summary(summary_str, step)


        # Save the model checkpoint periodically.
        # 每100轮 保存结果
        if step > 1 and step % 100 == 0:
            checkpoint_path = os.path.join(FLAGS.train_dir, 'model.ckpt')
            saver.save(sess, checkpoint_path, global_step=global_step)


        # Run validation periodically
        if step > 1 and step % 100 == 0:
            _, top1_error_value = sess.run([val_op, top1_error], { is_training: False })
            print('Validation top1 error %.2f' % top1_error_value)



