# coding: utf-8

# 毫无疑问，这个模型是一个非常简陋，性能也不理想的模型。目前只能达到92%左右的准确率。
# 接下来，希望大家利用现有的知识，将这个模型优化至98%以上的准确率。
# Hint：
# - 多隐层
# - 激活函数
# - 正则化
# - 初始化
# - 摸索一下各个超参数
#   - 隐层神经元数量
#   - 学习率
#   - 正则化惩罚因子
#   - 最好每隔几个step就对loss、accuracy等等进行一次输出，这样才能有根据地进行调整

"""A very simple MNIST classifier.
See extensive documentation at
https://www.tensorflow.org/get_started/mnist/beginners
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import argparse
import sys

from tensorflow.examples.tutorials.mnist import input_data

import tensorflow as tf

FLAGS = None

# Import data, input_date api 会读取文件，如果不存在，会从网上下载然后写入到临时目录
data_dir = '/tmp/tensorflow/mnist/input_data'
mnist = input_data.read_data_sets(data_dir, one_hot=True)

# Create the model
x = tf.placeholder(tf.float32, [None, 784])
y = tf.placeholder(tf.float32, [None, 10])

W = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))

# logits
logits = tf.matmul(x, W) + b

# 激活函数
output = tf.nn.sigmoid(logits)

# 交叉熵
cross_entropy = tf.losses.sigmoid_cross_entropy(multi_class_labels=y, logits=logits)

# 梯度下降
train_step = tf.train.GradientDescentOptimizer(0.3).minimize(cross_entropy)
# print(train_step)

# 初始化
sess = tf.Session()
init_op = tf.global_variables_initializer()
sess.run(init_op)


# Train
for _ in range(3000):
    batch_xs, batch_ys = mnist.train.next_batch(100)
    sess.run([output, cross_entropy, train_step], feed_dict={x: batch_xs, y: batch_ys})


# Test trained model
correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

print(sess.run(accuracy, feed_dict={x: mnist.test.images,
                                      y: mnist.test.labels}))


