#!/usr/bin/env python
# -*- coding:utf-8 -*-

"""
@version: ??
@author: happiness
@license: Apache Licence 
@contact: happiness_ws@163.com
@software: PyCharm
@file: nn_in_tensorflow.py
@time: 2017/11/20 14:51
@des:使用tensorflow实现神经网络
"""
import tensorflow as tf
import tensorflow.examples.tutorials.mnist.input_data as input_data

class NeuralNet():
    def __init__(self, dataSet, input_size, hidden_size, output_size, mini_batch, learning_rate, training_steps=10000):
        '''
        初始化神经网络
        :param input_size:输入层大小
        :param hidden_size:隐藏层大小
        :param output_size: 输出层大小
        :param mini_batch: 批大小
        :param learning_rate: 学习率
        '''
        self.input_size = input_size
        self.output_size = output_size
        self.hidden_size = hidden_size
        self.learning_rate = learning_rate
        self.mini_batch = mini_batch
        self.dataSet = dataSet
        self.training_steps = training_steps

        # self.graph = tf.get_default_graph()

    def inference(self, input_tensor, weight_hidden, bias_hidden, weight_output, bias_output):
        '''
        前向神经网络
        :param input_tensor: 输入
        :param weight_hidden: 隐藏层权重
        :param bias_hidden: 隐藏层偏置
        :param weight_output: 输出层权重
        :param bias_output: 输出层偏置
        :return:前向神经网络
        '''
        layer1 = tf.nn.relu(tf.matmul(input_tensor, weight_hidden) + bias_hidden)
        return tf.matmul(layer1, weight_output) + bias_output

    def train(self):

        # 创建输入变量
        input_x = tf.placeholder(tf.float32, shape=[None, self.input_size], name="input-x")
        input_y = tf.placeholder(tf.float32, shape=[None, self.output_size], name='input-y')

        # 创建隐藏层权重和偏置
        weight_hidden = tf.Variable(
            initial_value=tf.truncated_normal(shape=[self.input_size, self.hidden_size], stddev=0.1, dtype=tf.float32),
            name="weight_hidden")
        bias_hidden = tf.Variable(tf.constant(0.1, shape=[self.hidden_size], dtype=tf.float32), name="bias_hidden")

        # 创建输出层权重和偏置
        weight_output = tf.Variable(
            initial_value=tf.truncated_normal(shape=[self.hidden_size, self.output_size], stddev=0.1, dtype=tf.float32),
            name="weight_output")
        bias_output = tf.get_variable("bias_output", shape=[self.output_size],
                                      initializer=tf.constant_initializer(0.1))

        trail_y = self.inference(input_x, weight_hidden, bias_hidden, weight_output,
                                 bias_output)

        # 创建损失函数
        loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=trail_y, labels=tf.argmax(input_y, 1),
                                                              name="loss")
        loss_ma = tf.reduce_mean(loss, name="loss_ma")

        # 设置优化器
        trail_op = tf.train.GradientDescentOptimizer(self.learning_rate).minimize(loss=loss_ma)

        # 计算正确率
        prediction = tf.equal(tf.argmax(trail_y, 1), tf.argmax(input_y, 1))
        accuracy = tf.reduce_mean(tf.cast(prediction, dtype=tf.float32))

        with tf.Session() as sess:
            tf.global_variables_initializer().run()
            validate_feed = {input_x: self.dataSet.validation.images, input_y: self.dataSet.validation.labels}
            test_feed = {input_x: self.dataSet.test.images, input_y: self.dataSet.test.labels}

            for i in range(self.training_steps):
                if i % 1000 == 0:
                    validate_acc = sess.run(accuracy, feed_dict=validate_feed)
                    print(
                        "After %d training step(s), validation accuracy using average model is %g " % (i, validate_acc))

                xs, ys = self.dataSet.train.next_batch(self.mini_batch)
                sess.run(trail_op, feed_dict={ input_x: xs,input_y: ys})

            test_acc = sess.run(accuracy, feed_dict=test_feed)
            print("After %d training step(s), validation accuracy using average model is %g " % (
                self.training_steps, test_acc))


if __name__ == '__main__':
    minist = input_data.read_data_sets('./', one_hot=True)
    net = NeuralNet(minist, 28 * 28, 500, 10, 100, 0.0001)
    net.train()
