#MNIST for Classification and batch

import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
#number 1 to 10 data
mnist=input_data.read_data_sets('MNIST_data',one_hot=True)


#add one more layer and return the output of this layer
def add_layer(inputs,in_size,out_size,activation_function=None):
    Weights=tf.Variable(tf.random_normal([in_size,out_size]))
    biases=tf.Variable(tf.zeros([1,out_size])+0.1) 
    #biases is recommended not 0
    Wx_plus_b=tf.matmul(inputs,Weights)+biases
    if activation_function is None:
      outputs=Wx_plus_b
    else:
      outputs=activation_function(Wx_plus_b)
    return outputs   #

def compute_accuracy(v_xs,v_ys):
    global prediction
    y_pre=sess.run(prediction,feed_dict={xs:v_xs})
    correct_prediction=tf.equal(tf.argmax(y_pre,1),tf.argmax(v_ys,1))
    accuracy=tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
    result=sess.run(accuracy,feed_dict={xs:v_xs,ys:v_ys})
    return result
    
#define placeholder for inputs to network
xs=tf.placeholder(tf.float32,[None,784])
ys=tf.placeholder(tf.float32,[None,10])
#the number of samples is not defined, but in one sample there is 784(28*28) data/pixels


#add output layer
prediction=add_layer(xs,784,10,activation_function=tf.nn.softmax)

#the error betwwen prediction and real data
cross_entropy=tf.reduce_mean(-tf.reduce_sum(ys*tf.log(prediction),reduction_indices=[1]))     #loss
train_step=tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)

sess=tf.Session()

sess.run(tf.initialize_all_variables())


for i in range(1000):
    batch_xs,batch_ys=mnist.train.next_batch(100)
    sess.run(train_step,feed_dict={xs:batch_xs,ys:batch_ys})
    if i%50==0:
       print(compute_accuracy(mnist.test.images,mnist.test.labels))

