# -*- coding: utf-8 -*-
"""
Create Time:  2020/3/28 11:32
Author:       jinas
Email:        jinasuo@163.com
Description:  CSDN人工智能工程师课程--深度学习入门模块--第一课
                识别MNIST数据集
"""

import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import numpy as np
data_dir = '../dataset/MNIST'
batch_size = 100
learning_rate = 0.8
learning_rate_decay = 0.99
max_steps = 30000
train_step = tf.Variable(0,trainable=False)
mnist = input_data.read_data_sets(data_dir,one_hot=True)
def hidden_layer(x,w1,b1,w2,b2):
    '''
    Description:隐层 计算
    Input:
    Return:
    Author:       jinas
    Email:        jinasuo@163.com
    Date:         2020/3/28
    '''
    hiddenOut = tf.nn.relu(tf.matmul(x,w1) + b1)
    return tf.matmul(hiddenOut,w2) + b2
    pass
def main(learning_rate=learning_rate):
    '''
    Description:  主函数，
    Input:
    Return:
    Author:       jinas
    Email:        jinasuo@163.com
    Date:         2020/3/28
    '''
    x = tf.placeholder(tf.float32,[None,784],name="x")
    y = tf.placeholder(tf.float32,[None,10],name="y")
    w1 = tf.Variable(tf.truncated_normal(shape=[784,500],stddev=0.1),name='w1')
    b1 = tf.Variable(tf.constant(0.1,shape = [500]))
    w2 = tf.Variable(tf.truncated_normal(shape=[500,10],stddev=0.1),name='w2')
    b2 = tf.Variable(tf.constant(0.1,shape=[10]))
    y_ = hidden_layer(x,w1,b1,w2,b2)

    # <editor-fold desc="滑动平均处理，衰减率为0.99">
    average_class = tf.train.ExponentialMovingAverage(0.99,num_updates=train_step)

    average_op = average_class.apply(tf.trainable_variables())
    average_y = hidden_layer(x,average_class.average(w1),average_class.average(b1),
                             average_class.average(w2),average_class.average(b2))
    # </editor-fold>

    corss_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=tf.argmax(y,1),logits=y_)
    # corss_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=tf.argmax(y,1),logits=tf.argmax(y_,1))
    regulariztion = tf.contrib.layers.l2_regularizer(0.0001)
    learning_rate = tf.train.exponential_decay(learning_rate,train_step,mnist.train.num_examples/batch_size,learning_rate_decay)
    loss = tf.reduce_mean(corss_entropy) +regulariztion(w1) + regulariztion(w2)
    optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)
    crorent_prediction = tf.equal(tf.arg_max(y,1),tf.arg_max(average_y,1))
    accuracy = tf.reduce_mean(tf.cast(crorent_prediction,tf.float32))
    #将常规优化器与滑动平均类捆绑，同时迭代
    train_op = tf.group(optimizer,average_op)
    # with tf.control_dependencies([optimizer,average_op]):
    #     train_op = tf.no_op(name="train")
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        for i in range(max_steps):
            batch_x, baith_y = mnist.train.next_batch(batch_size)
            sess.run(train_op,feed_dict={x:batch_x,y:baith_y})
            test_accuracy  = sess.run(accuracy,feed_dict={x:mnist.test.images,y:mnist.test.labels})

            if i%1000 == 0:
                print('current step {},test accuracy is {}'.format(i,test_accuracy))
    pass
if __name__ == "__main__":
    main()

'''
总结：
本模型结构是单隐层模型，隐层维度是500，（根据常规经验，隐层维度大概在输入维度与输出维度总和的平均，即（784+10）/2
针对权重初始化，采用标准差为0.1的高斯分布，对比初始化为0的方式，此方式效果较好
然后，使用relu作为激活函数，利用随机梯度作为优化器。增加非线性能力。
同时，为了防止过拟合现象，本模型采用超参数为0.001 的L2正则项。
此外，为了得到强壮的模型，本模型添加了两种操作，分别是 滑动模型处理权重参数与偏置和指数衰减学习率。

结果显示，当迭代到4000时候，模型测试准确率达到98%，但是后面准确率没有明显提升了，进一步可能改善点有：正则化的超参数调优；隐层个数以及维度；学习率进一步优化
'''