# coding:utf-8
'''
author:wangyi
'''
import tensorflow as tf
from tensorflow.examples.tutorials import mnist
import numpy as np
'''
1.input data 
2. building model
   2.1 known(placeholder) learning(Varible)
   2.2 gragh structure
   2.3 loss opti
3.eval + predict 
'''

# building model
# known
input_x = tf.placeholder(dtype=tf.float32,shape=[None,28,28,1],name='input_x')
input_y = tf.placeholder(dtype=tf.float32,shape=[None,10])

# varible learning
F1 = tf.Variable(initial_value=tf.truncated_normal(shape=[3,3,1,16],stddev=0.1))
B1 = tf.Variable(tf.constant([0.1],dtype=tf.float32,shape=[16]))

# modeling structure
# convitional   28*28*1 --> 26*26*16
conv = tf.nn.conv2d(input_x,F1,strides=[1,1,1,1],padding='VALID')+B1
conv = tf.nn.relu(conv)
# pooling    26*26*16 --> 24*24*16
conv = tf.nn.max_pool(conv,ksize=[1,3,3,1],strides=[1,1,1,1],padding='VALID')

# -------feature extraction ----------#


W = tf.Variable(tf.truncated_normal(shape=[24*24*16,10],dtype=tf.float32))
B = tf.Variable(tf.constant([0.0],shape=[10]))

conv = tf.reshape(conv,shape=[-1,24*24*16])
fc = tf.matmul(conv,W)+B
#fc = tf.reshape(conv,shape=[])
y_pred = tf.nn.softmax(fc)

loss = tf.nn.softmax_cross_entropy_with_logits(logits=fc,labels=input_y)

opti = tf.train.GradientDescentOptimizer(learning_rate=0.01).minimize(loss)

# data
datas = mnist.input_data.read_data_sets('MNIST/',one_hot=True)
# train
sess = tf.Session()
sess.run(tf.initialize_all_variables())
for i in range(10):
    x_batch,y_batch = datas.train.next_batch(32)
    x_batch = np.reshape(x_batch,[-1,28,28,1])
    y_batch = np.reshape(y_batch,[-1,10])
    sess.run(opti,feed_dict={input_x:x_batch,input_y:y_batch})
    print('loss:',sess.run(loss,feed_dict={input_x:x_batch,input_y:y_batch}))



