
# -*- coding:utf-8 -*-

# @Time    : 18-11-12 下午11:22

# @Author  : Swing



import argparse
import sys

from tensorflow.examples.tutorials.mnist import input_data

import tensorflow as tf

# FLAGS = None



# 导入数据
data_dir = 'data/'
mnist = input_data.read_data_sets(data_dir, one_hot=True)

# 创建模型

x = tf.placeholder(tf.float32, [None, 784], name='x')

batch_size = 100

n_batch = mnist.train.num_examples // batch_size

# 学习率
lr = 0.9

# 添加隐层
w1 = tf.Variable(tf.truncated_normal([784, 200], stddev=0.1), name='w1')
b1 = tf.Variable(tf.zeros([200]) + 0.1, name='b1')
l1 = tf.nn.relu(tf.matmul(x, w1) + b1)

# w2 = tf.Variable(tf.truncated_normal([400, 200], stddev=0.1), name='w2')
# b2 = tf.Variable(tf.zeros([200]) + 0.1, name='b2')
# l2 = tf.nn.sigmoid(tf.matmul(l1, w2) + b2)

w = tf.Variable(tf.truncated_normal([200, 10], stddev=0.1), name='w')
b = tf.Variable(tf.zeros([10]) + 0.1, name='b')
yp = tf.matmul(l1, w) + b  # 预测值

yt = tf.placeholder(tf.float32, [None, 10], name='yt')  # 真值


cross_entropy = tf.reduce_mean(
    tf.nn.softmax_cross_entropy_with_logits(labels=yt, logits=yp)
) + tf.contrib.layers.l2_regularizer(0.0001)(w)

# + tf.contrib.layers.l2_regularizer(0.00001)(w)

# 生成训练步骤
train_step = tf.train.GradientDescentOptimizer(lr).minimize(cross_entropy)

sess = tf.Session()
init_op = tf.global_variables_initializer()
sess.run(init_op)

# 训练
for epoch in range(101):
    for batch in range(n_batch):
        batch_xs, batch_ys = mnist.train.next_batch(100)
        sess.run(train_step, feed_dict={x: batch_xs, yt: batch_ys})

    # 测试准确率
    correct_prediction = tf.equal(tf.argmax(yp, 1), tf.argmax(yt, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

    print('epoch: ', epoch, 'lr: ', lr, 'accuracy: ', sess.run(accuracy, feed_dict={x: mnist.test.images, yt: mnist.test.labels}))

    lr = lr * 0.9

pass
