# coding: utf-8

# 毫无疑问，这个模型是一个非常简陋，性能也不理想的模型。目前只能达到92%左右的准确率。
# 接下来，希望大家利用现有的知识，将这个模型优化至98%以上的准确率。
# Hint：
# - 多隐层
# - 激活函数
# - 正则化
# - 初始化
# - 摸索一下各个超参数
#   - 隐层神经元数量
#   - 学习率
#   - 正则化惩罚因子
#   - 最好每隔几个step就对loss、accuracy等等进行一次输出，这样才能有根据地进行调整

"""A very simple MNIST classifier.
See extensive documentation at
https://www.tensorflow.org/get_started/mnist/beginners
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data

FLAGS = None

# Import data, input_date api 会读取文件，如果不存在，会从网上下载然后写入到临时目录
data_dir = '/tmp/tensorflow/mnist/input_data'
mnist = input_data.read_data_sets(data_dir, one_hot=True)

INPUT_SIZE = 784
OUTPUT_SIZE = 10


def getBestAccuracy(HIDDENS=[], learning_rate=0.3, scale=0.1):
    # Create the model
    x = tf.placeholder(tf.float32, [None, INPUT_SIZE])
    y = tf.placeholder(tf.float32, [None, OUTPUT_SIZE])

    hidden_ws = []
    _input_size = INPUT_SIZE
    _tmp_sigmoid = None
    _input_data = x

    for index in range(len(HIDDENS)):
        hide_size = HIDDENS[index]

        hidden_w1 = tf.Variable(tf.random_normal([_input_size, hide_size], stddev=0.1))
        hidden_b1 = tf.Variable(tf.zeros([hide_size]))

        # 激活，最后一个到输出层的不要激活，走自动激活
        hidden_1 = tf.matmul(_input_data, hidden_w1) + hidden_b1
        if len(HIDDENS) != (index + 1):
            # layer_1 = tf.nn.relu(tf.add(tf.matmul(_X, _weights['w1']), _biases['b1']))  # 隐层
            hidden_1 = tf.nn.relu(hidden_1)
            # hidden_1 = tf.nn.sigmoid(hidden_1)

        hidden_ws.append(hidden_w1)
        _input_data = hidden_1
        _tmp_sigmoid = hidden_1
        _input_size = hide_size

    W = tf.Variable(tf.zeros([_input_size, OUTPUT_SIZE]))
    b = tf.Variable(tf.zeros([OUTPUT_SIZE]))

    # logits
    logits = tf.matmul(_input_data, W) + b

    # 激活函数
    output = tf.nn.sigmoid(logits)

    # 交叉熵
    # cross_entropy = tf.losses.sigmoid_cross_entropy(multi_class_labels=y, logits=logits)
    # TODO reg
    for w in hidden_ws:
        tf.add_to_collection(tf.GraphKeys.WEIGHTS, w)
    tf.add_to_collection(tf.GraphKeys.WEIGHTS, W)

    regularizer = tf.contrib.layers.l2_regularizer(scale=scale)
    reg_term = tf.contrib.layers.apply_regularization(regularizer)
    cross_entropy = tf.reduce_mean(
        tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=logits)) + reg_term

    # 梯度下降
    train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(cross_entropy)
    # print(train_step)

    # 初始化
    sess = tf.Session()
    init_op = tf.global_variables_initializer()
    sess.run(init_op)

    # Train
    best = 0
    for index in range(80):
        for _ in range(500):
            batch_xs, batch_ys = mnist.train.next_batch(100)
            sess.run([cross_entropy, train_step], feed_dict={x: batch_xs, y: batch_ys})

        # Test trained model
        correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(y, 1))
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
        accuracy_value = sess.run(accuracy, feed_dict={x: mnist.test.images,
                                                       y: mnist.test.labels})
        # print(index, accuracy_value)

        if (accuracy_value > best):
            best = accuracy_value

        if best > 0.98:
            break

    print(HIDDENS, learning_rate, best)
    return best


# 0.9509
# getBestAccuracy(H1_SIZE=100,H2_SIZE=20,learning_rate=0.3)
# getBestAccuracy(H1_SIZE=50,H2_SIZE=20,learning_rate=0.3)
# getBestAccuracy(H1_SIZE=30,H2_SIZE=15,learning_rate=0.3)
# getBestAccuracy(H1_SIZE=20,H2_SIZE=15,learning_rate=0.3)
# getBestAccuracy(HIDDENS=[100, 20 , 15], learning_rate=0.05, scale=1.0/50000)
# getBestAccuracy(HIDDENS=[100, 20 , 15], learning_rate=0.1, scale=1.0/50000)
# getBestAccuracy(HIDDENS=[100, 20 , 15], learning_rate=0.3, scale=1.0/50000)
# getBestAccuracy(HIDDENS=[100, 20 , 15], learning_rate=0.5, scale=1.0/50000)
# getBestAccuracy(HIDDENS=[100, 20 , 15], learning_rate=0.8, scale=1.0/50000)
# [100, 20, 15] 0.05 0.9719
# [100, 20, 15] 0.1 0.9767
# [100, 20, 15] 0.3 0.9803
# [100, 20, 15] 0.5 0.979
# [100, 20, 15] 0.8 0.9703

# getBestAccuracy(HIDDENS=[100, 20 , 15], learning_rate=0.3, scale=1.0/5000)
# getBestAccuracy(HIDDENS=[100, 20 , 15], learning_rate=0.3, scale=1.0/10000)
# getBestAccuracy(HIDDENS=[100, 20 , 15], learning_rate=0.3, scale=1.0/50000)
# getBestAccuracy(HIDDENS=[100, 20 , 15], learning_rate=0.3, scale=1.0/100000)
# [100, 20, 15] 0.3 0.9809
# [100, 20, 15] 0.3 0.98
# [100, 20, 15] 0.3 0.9787
# [100, 20, 15] 0.3 0.9787

# getBestAccuracy(HIDDENS=[100, 20 , 15], learning_rate=0.3, scale=1.0/500)
# getBestAccuracy(HIDDENS=[100, 20 , 15], learning_rate=0.3, scale=1.0/1000)
# getBestAccuracy(HIDDENS=[100, 20 , 15], learning_rate=0.3, scale=1.0/5000)
# [100, 20, 15] 0.3 0.9772
# [100, 20, 15] 0.3 0.981
# [100, 20, 15] 0.3 0.9788

# getBestAccuracy(HIDDENS=[100, 15], learning_rate=0.3, scale=1.0/1000)
# getBestAccuracy(HIDDENS=[100, 20], learning_rate=0.3, scale=1.0/1000)
# getBestAccuracy(HIDDENS=[100, 30], learning_rate=0.3, scale=1.0/1000)
# [100, 15] 0.3 0.9805
# [100, 20] 0.3 0.9809
# [100, 30] 0.3 0.981
# getBestAccuracy(HIDDENS=[100, 80], learning_rate=0.3, scale=1.0/1000)
# getBestAccuracy(HIDDENS=[100, 20], learning_rate=0.3, scale=1.0/1000)
# getBestAccuracy(HIDDENS=[100, 15], learning_rate=0.3, scale=1.0/1000)
# getBestAccuracy(HIDDENS=[100, 30, 15], learning_rate=0.3, scale=1.0/1000)
# [100, 30] 0.3 0.9817
# [100, 20] 0.3 0.9809
# [100, 15] 0.3 0.98

# getBestAccuracy(HIDDENS=[150, 30], learning_rate=0.3, scale=1.0/1000)
getBestAccuracy(HIDDENS=[100, 30], learning_rate=0.3, scale=1.0/1000)
# getBestAccuracy(HIDDENS=[50, 30], learning_rate=0.3, scale=1.0/1000)