from __future__ import print_function
import tensorflow as tf
import numpy as np
from tensorflow.examples.tutorials.mnist import input_data
# number 1 to 10 data
mnist = input_data.read_data_sets('F:/chenjing/tensorflow/MNIST_data/', one_hot=True)

#若是将参数保存了，restore加载来用时，要保持相同的dtype和shape
def new_weights(shape):
    return tf.Variable(tf.truncated_normal(shape,dtype=tf.float32,stddev=0.05))

def new_biases(length):
    return tf.Variable(tf.constant(0.05,dtype=tf.float32,shape=[length]))

def new_conv_layer(input,  #前一层
                   weights_shape, #weights的shape
                   biases_length, #biases的shape
                   kernel_strides,
                   pooling_strides,
                   use_pooling=True
                   ):
    """
    这个函数创建了一个卷积层。输入为4维的tensor，维度如下：

    图像数量
    图像高度
    图像宽度
    通道数
    输出同样是一个4维的tensor，维度如下：
    :return: 
    图像数量，与输入相同
    图像高度，如果使用2x2 pooling，高宽都除以2
    图像宽度，同上
    由卷积层生成的通道数
    
    """
    with tf.name_scope('conv_layer'):
        with tf.name_scope('weights'):
            weights=new_weights(weights_shape)
            tf.summary.histogram('conv_layer/weights', weights)
        with tf.name_scope('biases'):
            biases=new_biases(biases_length)
            tf.summary.histogram('conv_layer/biases', biases)
    
    # 创建卷积层。注意stride全设置为1。
    # 第1个和第4个必须是1，因为第1个是图像的数目，第4个是图像的通道。
    # 第2和第3指定和左右、上下的步长。
    # padding设置为'SAME' 意味着给图像补零，以保证前后像素相同。
    with tf.name_scope('conv2d'):
        conv_layer=tf.nn.relu( tf.nn.conv2d(input=input,filter=weights,strides=kernel_strides,padding="SAME")+biases)
    
    if use_pooling:
        # 这是 2x2 max-pooling, 表明使用 2x2 的窗口，选择每一窗口的最大值作为该窗口的像素，
        # 然后移动给定格数到下一窗口。
        with tf.name_scope('pooling'):
            conv_layer=tf.nn.max_pool(conv_layer,ksize=[1,2,2,1],strides=pooling_strides,padding='SAME')
    
    return conv_layer,weights,biases

def flatten_layer(layer):
    # 获取输入层的形状，
    # layer_shape == [num_images, img_height, img_width, num_channels]
    with tf.name_scope('flatten_layer'):
        layer_shape = layer.get_shape()

    # 特征数量: img_height * img_width * num_channels
    # 可以使用TensorFlow内建操作计算.
        num_features = layer_shape[1:4].num_elements()

    # 将形状重塑为 [num_images, num_features].
    # 注意只设定了第二个维度的尺寸为num_filters，第一个维度为-1，保证第一个维度num_images不变
    # 展平后的层的形状为:
    # [num_images, img_height * img_width * num_channels]
        layer_flat = tf.reshape(layer, [-1, num_features])

    return layer_flat ,num_features

def add_layer(inputs, in_size, out_size, activation_function=None,keep_prob=1):
    # add one more layer and return the output of this layer
    with tf.name_scope('full_layer'):
        with tf.name_scope('weights'):
            Weights = new_weights(shape=[in_size, out_size])
            tf.summary.histogram('full_layer/weights', Weights)
        with tf.name_scope('biases'):
            biases =new_biases(length=out_size)
            tf.summary.histogram('full_layer/biases', biases)
        with tf.name_scope('Wx_plus_b'):
            Wx_plus_b = tf.matmul(inputs, Weights) + biases
    if activation_function is None:
        outputs = Wx_plus_b
    else:
        outputs = activation_function(Wx_plus_b,)
    with tf.name_scope('outputs_after_dropout'):
        outputs=tf.nn.dropout(outputs, keep_prob)
    tf.summary.histogram('full_layer/outputs',outputs)
    return outputs
    
#占位符为输入与输出占据位置，这些输入输出一般在不同的轮次都会有所变化。由于TensorFlow先构图再计算，所以需要使用占位符为输入和输出预留位置
img_size = 28                        # 图片的高度和宽度
img_size_flat = img_size * img_size  # 展平为向量的尺寸
img_shape = (img_size, img_size)    # 图片的二维尺寸

num_channels = 1                    # 输入为单通道灰度图像
num_classes = 10                    # 类别数目

with tf.name_scope('inputs'):
    x = tf.placeholder(tf.float32, shape=[None, img_size_flat], name='x')          # 原始输入 
    y_true = tf.placeholder(tf.float32, shape=[None, num_classes], name='y_true')  # 原始输出
x_image = tf.reshape(x, [-1, img_size, img_size, num_channels])  # 转换为2维图像
y_true_cls = tf.argmax(y_true, axis=1)                  # 转换为真实类别，与之前的使用placeholder不同
keep_prob = tf.placeholder(tf.float32)

##conv1##
w1_shape=[5,5,1,32]
k1_s=[1,1,1,1]
p1_s=[1,2,2,1]
layer_conv1, weights_conv1,biases1 = new_conv_layer(input=x_image, weights_shape=w1_shape, biases_length=32, kernel_strides=k1_s, pooling_strides=p1_s,use_pooling=True)
##conv2##
w2_shape=[5,5,32,64]
k2_s=[1,1,1,1]
p2_s=[1,2,2,1]
layer_conv2, weights_conv2,biases2 = new_conv_layer(input=layer_conv1, weights_shape=w2_shape, biases_length=64, kernel_strides=k2_s, pooling_strides=p2_s,use_pooling=True)

#展平层将第二个卷积层展平为二维tensor,作为全连接层的输入
layer_full_inputs,in_size= flatten_layer(layer_conv2)

##全连接层1##
layer_full1=add_layer(layer_full_inputs,in_size,1024,tf.nn.relu,1)

##全连接层2##
layer_full2=add_layer(layer_full1,1024,10,None,1)

#预测类别
#第二个全连接层估计输入的图像属于某一类别的程度，这个估计有些粗糙，需要添加一个softmax层归一化为概率表示
with tf.name_scope('softmax'):
    y_pred = tf.nn.softmax(layer_full2)              # softmax归一化
    y_pred_cls = tf.argmax(y_pred, axis=1)         # 真实类别

#代价函数
with tf.name_scope('cross_entropy'):
    cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=layer_full2,labels=y_true)
with tf.name_scope('cost'):
    cost = tf.reduce_mean(cross_entropy)
    tf.summary.scalar('cost',cost)

#优化方法,优化器使用改进版的梯度下降，Adam
with tf.name_scope('train'):
    train_optimizer = tf.train.AdamOptimizer(learning_rate=1e-4).minimize(cost)
saver=tf.train.Saver()

#性能度量
with tf.name_scope('accuracy'):
    correct_prediction = tf.equal(y_pred_cls, y_true_cls)
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

sess=tf.Session()
merged=tf.summary.merge_all()
# summary writer goes in here
train_writer = tf.summary.FileWriter("E:/PyCharm/PyProjects/log/train", sess.graph)
test_writer = tf.summary.FileWriter("E:/PyCharm/PyProjects/log/test", sess.graph)
# direct to the local dir and run this in terminal:
# $ tensorboard --logdir log

sess.run(tf.global_variables_initializer())
for i in range(100):
    batch_xs, batch_ys = mnist.train.next_batch(100)
    batch_x_test,batch_y_test=mnist.test.next_batch(100)
    sess.run(train_optimizer, feed_dict={x: batch_xs, y_true: batch_ys, keep_prob: 0.5})
    if i % 10 == 0:
        #summary必须要run
        train_result = sess.run(merged, feed_dict={x: mnist.train.images[:1000], y_true: mnist.train.labels[:1000]})
        test_result = sess.run(merged, feed_dict={x: mnist.test.images[:1000], y_true: mnist.test.labels[:1000]})
        train_writer.add_summary(train_result, i)
        test_writer.add_summary(test_result, i)
        print(sess.run(accuracy,feed_dict={x: mnist.test.images[:1000], y_true: mnist.test.labels[:1000]}))
        
#保存神经网络训练的参数
#save_path=saver.save(sess,"net_Variables/save_net.ckpt")
#加载的是参数，神经网络的框架还是要从新定义,不需要init
#W=new_weights(w1_shape)
#b=new_biases(length) 
#saver.restore(sess,"net_Variables/save_net.ckpt")
sess.close()