#!/usr/bin/env python
# encoding: utf-8

"""
@version: ??
@author: phpergao
@license: Apache Licence 
@contact: happiness@163.com
@software: PyCharm Community Edition
@file: lenet_for_mnist.py
@time: 2017/12/11 10:40
"""

import tensorflow as tf
import tensorflow.examples.tutorials.mnist.input_data as input_data
import numpy as np

#   输入大小
INPUT_SIZE = 28 * 28
#   输出大小
OUTPUT_SIZE = 10

BATCH_SIZE = 1000

TRAILING_STEPS = 6000


class LeNet:
    def __init__(self, mnist):
        self.mnist = mnist

    def inference(self, input_tensor, is_trail, regularizer):
        #   创建第一个卷积层weight和bias
        with tf.variable_scope('layer_con1'):
            weight_con1 = tf.get_variable("weight_conv1", [5, 5, 1, 6], tf.float32,
                                          initializer=tf.truncated_normal_initializer(stddev=0.1))
            bais_con1 = tf.get_variable("bias_con1", shape=[6], dtype=tf.float32,
                                        initializer=tf.constant_initializer(0.0))
            conv1 = tf.nn.conv2d(input_tensor, weight_con1, strides=[1, 1, 1, 1], padding="SAME")
            conv1_out = tf.nn.relu(tf.nn.bias_add(conv1, bais_con1, name='conv1'), name='conv1_relu')

        # 池化层
        with tf.variable_scope('layer_pool1'):
            pool1_out = tf.nn.max_pool(conv1_out, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="SAME")

        with tf.variable_scope('layer_conv2'):
            weight_conv2 = tf.get_variable("weight_conv2", shape=[5, 5, 6, 16], dtype=tf.float32,
                                           initializer=tf.truncated_normal_initializer(stddev=0.1))
            bais_conv2 = tf.get_variable('bais_conv2', shape=[16], dtype=tf.float32,
                                         initializer=tf.constant_initializer(0.0))

            conv2 = tf.nn.conv2d(pool1_out, weight_conv2, strides=[1, 1, 1, 1], padding='SAME')
            conv2_out = tf.nn.relu(tf.nn.bias_add(conv2, bais_conv2, name='conv2'), name='conv2_relu')

        with tf.variable_scope('layer_pool2'):
            pool2_out = tf.nn.max_pool(conv2_out, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME',
                                       name="pool2_out")
            shape = pool2_out.get_shape().as_list()

            nodes = shape[1] * shape[2] * shape[3]

            reshape = tf.reshape(pool2_out, shape=[shape[0], nodes])

        with tf.variable_scope('fc1'):
            weight_fc1 = tf.get_variable('weight_fc1', shape=[nodes, 120], dtype=tf.float32,
                                         initializer=tf.truncated_normal_initializer(stddev=0.1))

            bias_fc1 = tf.get_variable('bias_fc1', shape=[120], dtype=tf.float32,
                                       initializer=tf.constant_initializer(0.1))

            if regularizer != None:
                tf.add_to_collection('loss', regularizer(weight_fc1))

            fc1_out = tf.nn.relu(tf.matmul(reshape, weight_fc1) + bias_fc1)

            if is_trail:
                tf.nn.dropout(fc1_out, 0.5)

        with tf.variable_scope('fc2'):
            weight_fc2 = tf.get_variable('weight_fc1', shape=[120, 10], dtype=tf.float32,
                                         initializer=tf.truncated_normal_initializer(stddev=0.1))

            bias_fc2 = tf.get_variable('bias_fc1', shape=[10], dtype=tf.float32,
                                       initializer=tf.constant_initializer(0.1))
            if regularizer != None:
                tf.add_to_collection('loss', regularizer(weight_fc1))

            logit = tf.matmul(fc1_out, weight_fc2) + bias_fc2

        return logit

    def trail(self):
        input_x = tf.placeholder(tf.float32, shape=[BATCH_SIZE, 28, 28, 1], name='input_x')
        input_y = tf.placeholder(tf.float32, shape=[None, 10], name='input_y')

        #   创建正则项
        regularizer = tf.contrib.layers.l2_regularizer(0.0001)

        logit = self.inference(input_x, True, regularizer=regularizer)

        #   创建op

        #   创建指数下降学习率
        global_value = tf.Variable(initial_value=0, trainable=False)
        exponential_learning_rate = tf.train.exponential_decay(0.1, global_step=global_value,
                                                               decay_steps=self.mnist.train.num_examples / BATCH_SIZE,
                                                               decay_rate=0.9)

        #   创建移动平均
        variable_average = tf.train.ExponentialMovingAverage(0.99, global_value)
        variable_average_op = variable_average.apply(tf.trainable_variables())

        #   计算交叉商
        loss_cross = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=tf.arg_max(input_y, 1), logits=logit)
        tf.add_to_collection('loss', tf.reduce_mean(loss_cross))

        loss = tf.add_n(tf.get_collection('loss'))

        trail_step = tf.train.GradientDescentOptimizer(exponential_learning_rate).minimize(loss=loss,
                                                                                           global_step=global_value)

        with tf.control_dependencies([trail_step, variable_average_op]):
            trail_op = tf.no_op()

        prediction = tf.equal(tf.argmax(logit, 1), tf.argmax(input_y, 1))
        acc = tf.reduce_mean(tf.cast(prediction, dtype=tf.float32))

        config = tf.ConfigProto(allow_soft_placement=True)
        gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.7)
        config.gpu_options.allow_growth = True

        with tf.Session(config=config) as sess:
            tf.global_variables_initializer().run()

            for i in range(TRAILING_STEPS):
                x_trail, y_trail = self.mnist.train.next_batch(BATCH_SIZE)
                reshaped_x_trail = np.reshape(x_trail, (BATCH_SIZE, 28, 28, 1))
                _, loss_value, step = sess.run([trail_op, loss, global_value],
                                               feed_dict={input_x: reshaped_x_trail, input_y: y_trail})

                if i % 1000 == 0:
                    x_validation, y_validation = self.mnist.validation.next_batch(BATCH_SIZE)
                    reshaped_x_validation = np.reshape(x_validation, (BATCH_SIZE, 28, 28, 1))
                    loss_value = sess.run(acc, feed_dict={input_x: reshaped_x_validation, input_y: y_validation})

                    print("After %d training step(s), loss on validation batch is %g." % (i, loss_value))


if __name__ == '__main__':
    mnist = input_data.read_data_sets("./", one_hot=True)

    lenet = LeNet(mnist)
    lenet.trail()
