#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2019/2/11 15:26
# @Author  : Seven
# @File    : mnist98.py
# @Software: PyCharm
# function :
# 1.导入环境
import numpy as np
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
from matplotlib import pyplot as plt

tf.logging.set_verbosity(tf.logging.INFO)
# 加载并查看数据
mnist = input_data.read_data_sets("data")

# 构建网络结构
x = tf.placeholder("float", [None, 784])
y = tf.placeholder("int64", [None])
learning_rate = tf.placeholder("float")


def initialize(shape, stddev=0.01):
    return tf.truncated_normal(shape, stddev=0.01)


L1_units_count = 256

W_1 = tf.Variable(initialize([784, L1_units_count]))
b_1 = tf.Variable(initialize([L1_units_count]))
logits_1 = tf.matmul(x, W_1) + b_1
output_1 = tf.nn.relu(logits_1)

L2_units_count = 256

W_2 = tf.Variable(initialize([L1_units_count, L2_units_count]))
b_2 = tf.Variable(initialize([L2_units_count]))
logits_2 = tf.matmul(output_1, W_2) + b_2
output_2 = tf.nn.relu(logits_2)

L3_units_count = 10
W_3 = tf.Variable(initialize([L2_units_count, L3_units_count]))
b_3 = tf.Variable(initialize([L3_units_count]))
logits_3 = tf.matmul(output_2, W_3) + b_3

logits = logits_3

cross_entropy_loss = tf.reduce_mean(
    tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=y))
l2_loss = tf.nn.l2_loss(W_1) + tf.nn.l2_loss(W_2) + tf.nn.l2_loss(W_3)
total_loss = cross_entropy_loss + 7e-5*l2_loss

optimizer = tf.train.GradientDescentOptimizer(
    learning_rate=learning_rate).minimize(total_loss)

pred = tf.nn.softmax(logits)
correct_pred = tf.equal(tf.argmax(pred, 1), y)
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))

batch_size = 100
trainig_step = 6000

saver = tf.train.Saver()

with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())

    # 定义验证集与测试集
    validate_data = {
        x: mnist.validation.images,
        y: mnist.validation.labels,
    }
    test_data = {x: mnist.test.images, y: mnist.test.labels}

    for i in range(trainig_step):
        xs, ys = mnist.train.next_batch(batch_size)
        _, loss = sess.run(
            [optimizer, total_loss],
            feed_dict={
                x: xs,
                y: ys,
                learning_rate: 0.5
            })

        # 每100次训练打印一次损失值与验证准确率
        if i > 0 and i % 100 == 0:
            validate_accuracy = sess.run(accuracy, feed_dict=validate_data)
            print(
                "after %d training steps, the loss is %g, the validation accuracy is %g"
                % (i, loss, validate_accuracy))
            saver.save(sess, 'model/model.ckpt', global_step=i)

    print("the training is finish!")
    # 最终的测试准确率
    acc = sess.run(accuracy, feed_dict=test_data)
    print("the test accuarcy is:", acc)
