#两层全连接神经网络
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data

L1_num = 256
L2_num = 128
input_size = 784
class_num = 10

mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)

# 可视化标签
with tf.name_scope('input'):
    x = tf.placeholder(tf.float32, [None, input_size],name='x-input')
    y = tf.placeholder(tf.float32, [None, class_num],name='y-input')

# Network params
# 可视化标签
with tf.name_scope('layer'):
    stddev = 0.1
    with tf.name_scope('weight'):
        ws = {
            'w1': tf.Variable(tf.random.normal([input_size, L1_num], stddev=stddev), name='w1'),
            'w2': tf.Variable(tf.random.normal([L1_num, L2_num], stddev=stddev), name='w2'),
            'out': tf.Variable(tf.random.normal([L2_num, class_num], stddev=stddev), name='out1')
        }
    with tf.name_scope('bias'):
        bs = {
            'b1': tf.Variable(tf.random.normal([L1_num]), name='b1'),
            'b2': tf.Variable(tf.random.normal([L2_num]), name='b2'),
            'out': tf.Variable(tf.random.normal([class_num]), name='out2')
        }
print('===== Network Params is ok =====')

def forward(x, ws, bs):
    L1 = tf.nn.relu(tf.add(tf.matmul(x, ws['w1']), bs['b1']))
    L2 = tf.nn.relu(tf.add(tf.matmul(L1, ws['w2']), bs['b2']))
    tf.summary.histogram("h-w", ws['w2'])  # 可视化观看变量
    tf.summary.histogram("h-b", bs['b2'])
    with tf.name_scope('wx_add_b'):
        out = tf.matmul(L2, ws['out']) + bs['out']
    return out
with tf.name_scope('softmax'):
    pred = forward(x,ws,bs)
with tf.name_scope('loss'):
    loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=pred))
    tf.summary.histogram("h-loss", loss)  # 可视化观看变量
    tf.summary.scalar("s-loss", loss)
with tf.name_scope('train_op'):
    train_op = tf.train.AdamOptimizer(learning_rate=1e-3).minimize(loss)
with tf.name_scope('corr'):
    corr = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
with tf.name_scope('accr'):
    accr = tf.reduce_mean(tf.cast(corr, tf.float32))
    tf.summary.histogram("h-accr", accr)  # 可视化观看变量
    tf.summary.scalar("s-accr", accr)


init = tf.global_variables_initializer()
print('===== function is ok =====')

# training

with tf.Session() as sess:
    sess.run(init)
    step_n = 101     # 训练轮数
    batch_size = 100  # 每批训练大小
    display_step = 10
    # 合并到Summary中
    merged = tf.summary.merge_all()
    # 选定可视化存储目录
    writer = tf.summary.FileWriter('logs/', sess.graph)
    for step in range(step_n):
        avg_cost = 0.
        total_batch = int(mnist.train.num_examples / batch_size)
        for i in range(total_batch):
            batch_x, batch_y = mnist.train.next_batch(batch_size)
            sess.run(train_op, feed_dict={x: batch_x, y: batch_y})
            avg_cost_b = sess.run(loss, feed_dict={x: batch_x, y: batch_y})
            avg_cost = avg_cost_b + avg_cost
        avg_cost = avg_cost / batch_size
        if step % display_step == 0:
            test_accr = sess.run(accr, feed_dict={x: mnist.test.images, y: mnist.test.labels})
            print(step, 'acc:', test_accr, 'loss:', avg_cost)
            summary = sess.run(merged, feed_dict={x: mnist.test.images, y: mnist.test.labels})
            writer.add_summary(summary, step)
    print('end')