#!/usr/bin/env python
# -*- coding:utf-8 -*-

"""
@version: ??
@author: happiness
@license: Apache Licence 
@contact: happiness_ws@163.com
@software: PyCharm
@file: nn_for_mnist.py
@time: 2017/11/22 11:03
使用神经网络识别mnist数据集

"""

import tensorflow as tf
import tensorflow.examples.tutorials.mnist.input_data as input_data


class NeuralNet:
    def __init__(self, input_tensor, input_size, hidden_size, output_size, learning_rate, mini_batch, trailing_steps, ):
        self.input_tensor = input_tensor
        self.input_size = input_size
        self.hidden_size = hidden_size
        self.output_size = output_size
        self.learning_rate = learning_rate
        self.mini_batch = mini_batch
        self.trailing_steps = trailing_steps

    def trail(self, is_trail=True):
        x = tf.placeholder(tf.float32, shape=[None, self.input_size], name="input-x")
        lable = tf.placeholder(tf.float32, shape=[None, self.output_size], name="output-y")

        if is_trail:
            x = tf.nn.dropout(x, keep_prob=0.5)

        def get_weight_bias(input_size, out_size, lambda_loss=0.001, weight_name="weight", bias_name="bias"):
            weight = tf.Variable(
                tf.truncated_normal(shape=[input_size, out_size], stddev=0.1, dtype=tf.float32,
                                    name=weight_name))

            bias = tf.Variable(tf.constant(0.1, shape=[out_size]), name=bias_name)

            tf.add_to_collection("losses", tf.contrib.layers.l2_regularizer(lambda_loss)(weight))

            return weight, bias

        # weight_hidden = tf.Variable(
        #     tf.truncated_normal(shape=[self.input_size, self.hidden_size], stddev=0.1, dtype=tf.float32,
        #                         name="weight-hidden"))
        #
        # bias_hidden = tf.Variable(tf.constant(0.1, shape=[self.hidden_size]), name="bias_hidden")

        weight_hidden, bias_hidden = get_weight_bias(self.input_size, self.hidden_size, weight_name="weight-hidden",
                                                     bias_name="bias_hidden")
        weight_output, bias_output = get_weight_bias(self.hidden_size, self.output_size, weight_name="weight_output",
                                                     bias_name="bias_output")

        # weight_output = tf.Variable(
        #     tf.truncated_normal(shape=[self.hidden_size, self.output_size], stddev=0.1, dtype=tf.float32,
        #                         name="weight_output"))
        #
        # bias_output = tf.Variable(tf.constant(0.1, shape=[self.output_size]), name="bias_hidden")
        def inference(in_data, weight1, bias1, weight2, bias2):
            layer1 = tf.nn.relu(tf.matmul(in_data, weight1) + bias1)
            return tf.matmul(layer1, weight2) + bias2

        y_ = inference(x, weight_hidden, bias_hidden, weight_output, bias_output)
        loss_cross = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=tf.argmax(lable, 1), logits=y_)

        global_step = tf.Variable(0, trainable=False)

        step_learning_rate = tf.train.exponential_decay(self.learning_rate, global_step=global_step, decay_steps=100,
                                                        decay_rate=0.95, staircase=False, name="step_learning_rate")

        loss_mean = tf.reduce_mean(loss_cross)

        tf.add_to_collection("losses", loss_mean)

        loss = tf.add_n(tf.get_collection("losses"))

        # 设置移动平均
        var_average = tf.train.ExponentialMovingAverage(0.999, global_step)
        var_average_op = var_average.apply(tf.trainable_variables())

        trail_step_op = tf.train.GradientDescentOptimizer(step_learning_rate).minimize(loss=loss,
                                                                                       global_step=global_step)

        with tf.control_dependencies([var_average_op, trail_step_op]):
            trail_op = tf.no_op(name="trail")

        prediction = tf.equal(tf.argmax(y_, 1), tf.argmax(lable, 1))
        acc = tf.reduce_mean(tf.cast(prediction, dtype=tf.float32))

        config = tf.ConfigProto(allow_soft_placement=True)
        gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.7)
        config.gpu_options.allow_growth = True

        with tf.Session(config=config) as sess:

            tf.global_variables_initializer().run()
            validate_feed = {x: self.input_tensor.validation.images, lable: self.input_tensor.validation.labels}
            test_feed = {x: self.input_tensor.test.images, lable: self.input_tensor.test.labels}
            for i in range(self.trailing_steps):
                if i % 1000 == 0:
                    print("after %d, acc : %g" % (i, sess.run(acc, feed_dict=validate_feed)))

                xs, ys = self.input_tensor.train.next_batch(self.mini_batch)
                sess.run(trail_op, feed_dict={x: xs, lable: ys})

            print("finish acc : %g" % (sess.run(acc, feed_dict=test_feed)))


if __name__ == '__main__':
    mnist = input_data.read_data_sets("./", one_hot=True)

    nn = NeuralNet(mnist, 28 * 28, 500, 10, 0.1, 100, 10000)
    nn.trail()
    pass
