import tensorflow as tf
from tensorflow.examples.tutorials import mnist
'''
1.input data
2.building model
	2.1 known(placeholder) learning(varible)
	2.2 graph structure
	2.3 loss optimizer
3.eval + predict
'''

# building model
# known
input_x =tf.placeholder(dtype=tf.float32,shape=[None,28,28,1],name='input_x')
input_y =tf.placeholder(dtype=tf.float32,shape=[None,10])

#varible learning
F1=tf.Variable(initial_value=tf.truncated_normal(shape=[3,3,1,16],stddev=0.1))
B1=tf.Variable(tf.constant([0.1],dtype=tf.float32,shape=[16]))

# modeling structure
# convitional 28*28*1 --->26*26*16
conv=tf.nn.conv2d(input_x,F1,strides=[1,1,1,1],padding='VALID')+B1
conv=tf.nn.relu(conv)
# pooling 26*26*16--->24*24*16
conv=tf.nn.max_pool(conv,ksize=[1,3,3,1],strides=[1,1,1,1],padding='VALID')

conv=tf.reshape(conv,shape=[-1,24*24*16])

#--------feature extraction---------#
W=tf.Variable(tf.truncated_normal(shape=[24*24*16,10]))
B=tf.Variable(tf.constant([0.0],shape=[10]))

fc=tf.matmul(conv,W)+B
#fc=tf.reshape(conv,shape=[])
y_pred=tf.nn.softmax(fc)

loss=tf.nn.softmax_cross_entropy_with_logits(logits=fc,labels=input_y)

opti=tf.train.GradientDescentOptimizer(learning_rate=0.01).minimize(loss)

#data
datas=mnist.input_data.read_data_sets('MNIST/')
#train
sess=tf.Session()
sess.run(tf.initialize_all_variables())
for i in range(10):
    x_batch,y_batch=datas.train.next_batch(32)
    sess.run(opti,feed_dict={input_x:x_batch,input_y:y_batch})
    print('loss:',sess.run(loss,feed_dict={input_x:x_batch,input_y:y_batch}))




