import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import matplotlib.pyplot as plt

# 读取数据集
mnist = input_data.read_data_sets("E:/VC_project/data/Mnist",one_hot=True) # 将label进行独热编码

# 权重初始化函数
def initialize(shape,stddev):
    return tf.truncated_normal(shape,stddev)

# 激活函数
def swish(x):
    return x*tf.nn.sigmoid(x)
def selu(x):
    with tf.name_scope("elu") as scope:
        alpha = 1.6732632423543772848170429916717
        sacle = 1.0507009873554804934193349852946
        return sacle*tf.where(x>0.0,x,alpha*tf.nn.elu(x))
def activation(x):
    # return selu(x)
    # return swish(x)
    #return tf.nn.sigmoid(x)
    return tf.nn.relu(x)



# 使用tensorflow构建神经网络
# 1、计算图，定义网络结构
# learning_rate = tf.placeholder(dtype=tf.float32) #学习率

# 存储训练轮数，设置为不可训练
global_step = tf.Variable(0, trainable=False)

# 设置学习率指数衰减法
learning_rate = tf.train.exponential_decay(
        0.8, global_step, mnist.train.num_examples / 100,0.99)

# 1.1 第一层：隐层
L1_units_counts = 500 # 包含500个神经元
x = tf.placeholder(dtype=tf.float32,shape=[None,784],name='x')
w1 = tf.Variable(initialize([784,L1_units_counts],0.04))
b1 = tf.Variable(tf.constant(0.1, shape=[L1_units_counts]))
logits1 = tf.matmul(x,w1)+b1
output1 = activation(logits1)

# 1.2 第二层：输出层
L2_units_counts = 10
w2 = tf.Variable(initialize([L1_units_counts,L2_units_counts],0.055))
b2 = tf.Variable(tf.constant(0.1, shape=[L2_units_counts]))
logits2 = tf.matmul(output1,w2)+b2

y = tf.placeholder(dtype=tf.float32,shape=[None,L2_units_counts],name="y") #标签

# 1.3 定义损失函数及正则项
# 交叉熵损失
cross_entorpy_loss = tf.reduce_mean(
    tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits2,labels=y))

# L2正则
REGULARIZATION_RATE=0.0001 #正则化系数
regularizer = tf.contrib.layers.l2_regularizer(REGULARIZATION_RATE)

regularization = regularizer(w1)+regularizer(w2)

total_loss = cross_entorpy_loss+regularization # 加入正则的损失函数
#total_loss = cross_entorpy_loss + 7e-5*l2_loss

# 梯度下降优化器
train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(total_loss)

# 1.4 模型性能评价指标
correct_prediction = tf.equal(tf.arg_max(logits2,1),tf.arg_max(y,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32)) #计算正确率

# 2、训练模型
sess = tf.InteractiveSession() # 实例化
sess.run(tf.global_variables_initializer())

for step in range(50000):
    batch_x,batch_y = mnist.train.next_batch(100)

    _, cross_entorpy_value,total_loss_value = sess.run(
        [train_step,cross_entorpy_loss,total_loss],
        feed_dict={x:batch_x,y:batch_y})

    if (step+1)%1000 == 0:
        print("*"*20)
        print('step: %d, cross entropy: %f, total loss: %f'%
              (step+1,cross_entorpy_value,total_loss_value))
        # 训练集准确率
        trian_accuracy = sess.run(accuracy, feed_dict={x: batch_x, y: batch_y})
        print("trian accuracy: {}".format(trian_accuracy))

        # 验证集准确率
        validation_accuracy = sess.run(accuracy,
                                       feed_dict={x:mnist.validation.images,
                                                  y:mnist.validation.labels
                                                  })
        print("validation accuracy: {}".format(validation_accuracy))

# 测试集准确率
test_accuracy = sess.run(accuracy,
                                 feed_dict={
                                     x:mnist.test.images,
                                     y:mnist.test.labels
                                 })
print("test accuracy: {}".format(test_accuracy))

