import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import mnist_forward
import os

batch_size=200
learning_rate_base=0.1
learning_rate_decay=0.99
regularizer=0.0001  #正则化实参
steps=50000
moving_average_decay=0.99
model_save_path="./model/"
model_name="mnist_model"


def backward(mnist):
    #占位，确定输入x与输出y的每个样本的维数784与维数10.
    #标准答案y_  实际输出y
    x=tf.placeholder(tf.float32,[None,mnist_forward.input_node])
    y_=tf.placeholder(tf.float32,[None,mnist_forward.output_node])
    y=mnist_forward.forward(x,regularizer)
    global_step=tf.Variable(0,trainable=False)   #该变量不可训练,记录训练轮数
    #tf.argmax(y_,1) 返回y_中最大元素的索引号
    #cem求取损失函数
    ce=tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y,labels=tf.argmax(y_,1))
    #reduce_mean求均方差
    cem=tf.reduce_mean(ce)
    #损失函数中加上了w的权重
    #get_collection为得到列表,add_n将losses列表元素相加
    loss=cem+tf.add_n(tf.get_collection('losses'))
    
    learning_rate_step=mnist.train.num_examples/batch_size #总样本数/每次抽取样本数
    #staircase为true，是阶梯型衰减
    learning_rate=tf.train.exponential_decay(learning_rate_base,global_step,learning_rate_step,learning_rate_decay,staircase=True)
    
    
    #因为指数衰减的学习率是伴随global_step的变化而衰减的，所以当global_step不改变时，学习率也变成一个定值
    #即global_step=global_step 实现自动加1
    train_step=tf.train.GradientDescentOptimizer(0.001).minimize(loss,global_step=global_step)
    
    #增强模型的泛化能力,就是使w与b更加合适
    ema = tf.train.ExponentialMovingAverage(moving_average_decay,global_step)
    ema_op = ema.apply(tf.trainable_variables())
    with tf.control_dependencies([train_step,ema_op]):
        #train_step,ema_op 先执行这俩个
        train_op=tf.no_op(name="train")  #tf.no_op 是一个None
    
    #实例化saver类，用来保存训练好的数据
    saver=tf.train.Saver()
    
    with tf.Session() as sess:
        init_op=tf.global_variables_initializer()
        sess.run(init_op)
        
        ckpt = tf.train.get_checkpoint_state(model_save_path) 
        if ckpt and ckpt.model_checkpoint_path:
            saver.restore(sess, ckpt.model_checkpoint_path)  
        
        for i in range(steps):
            xs,ys=mnist.train.next_batch(batch_size)
            feed_dict={x:xs,y_:ys}
            #返回三个数，none，损失值，训练步数
            _,loss_value,step=sess.run([train_op,loss,global_step],feed_dict=feed_dict)
            if i%1000 == 0:
                print("after %d training steps,loss on all data is %g"%(step,loss_value))
                saver.save(sess,os.path.join(model_save_path,model_name),global_step=global_step)
                
def main():
        mnist=input_data.read_data_sets('./data/',one_hot=True)
        backward(mnist)
        
if __name__ == '__main__':
    main()

#了解tf.argmax
# y_=[[1,0,0],[0,0,1],[0,1,0]]
# labels=tf.argmax(y_,1)
# with tf.Session() as sess:
#     print(sess.run(labels))