# coding: utf-8

# 毫无疑问，这个模型是一个非常简陋，性能也不理想的模型。目前只能达到92%左右的准确率。
# 接下来，希望大家利用现有的知识，将这个模型优化至98%以上的准确率。
# Hint：
# - 多隐层
# - 激活函数
# - 正则化
# - 初始化
# - 摸索一下各个超参数
#   - 隐层神经元数量
#   - 学习率
#   - 正则化惩罚因子
#   - 最好每隔几个step就对loss、accuracy等等进行一次输出，这样才能有根据地进行调整

"""A very simple MNIST classifier.
See extensive documentation at
https://www.tensorflow.org/get_started/mnist/beginners
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import os

os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'

import numpy as np
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data

FLAGS = None


def getBestAccuracy(HIDDENS=[], learning_rate=0.3, scale=0.1, INPUT_SIZE=784, OUTPUT_SIZE=10, mnist=None):
    # Create the model
    x = tf.placeholder(tf.float32, [None, INPUT_SIZE])
    y = tf.placeholder(tf.float32, [None, OUTPUT_SIZE])

    hidden_ws = []
    _input_size = INPUT_SIZE
    _tmp_sigmoid = None
    _input_data = x

    for index in range(len(HIDDENS)):
        hide_size = HIDDENS[index]

        hidden_w1 = tf.Variable(tf.random_normal([_input_size, hide_size], stddev=0.1))
        hidden_b1 = tf.Variable(tf.zeros([hide_size]))

        # 激活，最后一个到输出层的不要激活，走自动激活
        hidden_1 = tf.matmul(_input_data, hidden_w1) + hidden_b1
        if len(HIDDENS) != (index + 1):
            # layer_1 = tf.nn.relu(tf.add(tf.matmul(_X, _weights['w1']), _biases['b1']))  # 隐层
            hidden_1 = tf.nn.relu(hidden_1)
            # hidden_1 = tf.nn.sigmoid(hidden_1)

        hidden_ws.append(hidden_w1)
        _input_data = hidden_1
        _tmp_sigmoid = hidden_1
        _input_size = hide_size

    W = tf.Variable(tf.zeros([_input_size, OUTPUT_SIZE]))
    b = tf.Variable(tf.zeros([OUTPUT_SIZE]))

    # logits
    logits = tf.matmul(_input_data, W) + b

    # 激活函数
    output = tf.nn.sigmoid(logits)

    # 交叉熵
    # cross_entropy = tf.losses.sigmoid_cross_entropy(multi_class_labels=y, logits=logits)

    # TODO reg , 正则参数，防止过拟合  scale
    for w in hidden_ws:
        tf.add_to_collection(tf.GraphKeys.WEIGHTS, w)
    tf.add_to_collection(tf.GraphKeys.WEIGHTS, W)
    regularizer = tf.contrib.layers.l2_regularizer(scale=scale)
    reg_term = tf.contrib.layers.apply_regularization(regularizer)
    cross_entropy = tf.reduce_mean(
        tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=logits)) + reg_term

    # 梯度下降
    train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(cross_entropy)
    # print(train_step)

    # 初始化
    sess = tf.Session()
    init_op = tf.global_variables_initializer()
    sess.run(init_op)

    # Train
    best = 0
    for index in range(80):
        for _ in range(500):
            batch_xs, batch_ys = mnist.train.next_batch(100)
            sess.run([cross_entropy, train_step], feed_dict={x: batch_xs, y: batch_ys})

        # Test trained model
        correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(y, 1))
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
        accuracy_value = sess.run(accuracy, feed_dict={x: mnist.test.images,
                                                       y: mnist.test.labels})
        # print(index, accuracy_value)

        if accuracy_value > best:
            best = accuracy_value

        if best > 0.98:
            break

    print(HIDDENS, learning_rate, scale, best)
    return best


if __name__ == '__main__':
    # Import data, input_date api 会读取文件，如果不存在，会从网上下载然后写入到临时目录
    data_dir = '/tmp/tensorflow/mnist/input_data'
    mnist = input_data.read_data_sets(data_dir, one_hot=True)

    learning_rates = np.linspace(0.1, 0.9, 5)
    best_lr_pair = (0, 0)
    for lr in learning_rates:
        accuracy = getBestAccuracy(HIDDENS=[150, 30], learning_rate=lr, scale=1.0 / 5000, mnist=mnist)
        if accuracy > best_lr_pair[1]:
            print("best lr accuracy", lr, accuracy)
            best_lr_pair = (lr, accuracy)

    scales = np.linspace(1.0 / 1000, 1.0 / 50000, 5)
    best_scale_pair = (1.0 / 5000, best_lr_pair[1])
    for sc in scales:
        accuracy = getBestAccuracy(HIDDENS=[150, 30], learning_rate=best_lr_pair[0], scale=sc, mnist=mnist)
        if accuracy > best_scale_pair[1]:
            print("best scale accuracy", sc, accuracy)
            best_scale_pair = (sc, accuracy)

    # 隐层测试先忽略
    hiddens = [[150, 50], [150, 30], [150, 15]]
    best_hidden_pair = (hiddens[0], best_scale_pair[1])
    for hd in hiddens:
        accuracy = getBestAccuracy(HIDDENS=hd, learning_rate=best_lr_pair[0], scale=best_scale_pair[0], mnist=mnist)
        if accuracy > best_hidden_pair[1]:
            print("best hidden accuracy", hd, accuracy)
            best_hidden_pair = (hd, accuracy)

    hiddens2 = [[300, best_hidden_pair[0][1]], [150, best_hidden_pair[0][1]], [100, best_hidden_pair[0][1]]]
    for hd in hiddens2:
        accuracy = getBestAccuracy(HIDDENS=hd, learning_rate=best_lr_pair[0], scale=best_scale_pair[0], mnist=mnist)
        if accuracy > best_hidden_pair[1]:
            print("best hidden accuracy", hd, accuracy)
            best_hidden_pair = (hd, accuracy)

    print("best parmas are : ", best_lr_pair , best_scale_pair, best_hidden_pair)