import tensorflow as tf
from tensorflow.contrib.learn.python.learn.datasets.mnist import read_data_sets
mnist = read_data_sets("MNIST_data/",one_hot=True)
batch_size = 128
n_batch = mnist.train.num_examples//batch_size

saver_dir = 'finetune_model/model/layer2/'
def weight_variable(shape):
    initial = tf.truncated_normal(shape,stddev=0.1,dtype=tf.float32)
    return tf.Variable(initial)

def bias_variable(shape):
    initial = tf.constant(0.1,shape=shape)
    return tf.Variable(initial)

def conv(x,w,padding):
    return tf.nn.conv2d(x,w,strides=[1,1,1,1],padding=padding)

def max_pool(x):
    return tf.nn.max_pool(x,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME')

def avg_pool_14(x):
    return tf.nn.avg_pool(x, ksize=[1, 14, 14, 1], strides=[1, 14, 14, 1], padding='SAME')

def inference(x,y,keep_prob):
    with tf.name_scope("input"):
        image = tf.reshape(x, [-1, 28, 28, 1])

    with tf.name_scope("conv1"):
        weight1 = weight_variable([5,5,1 ,32])
        b1 = bias_variable([32])
        h_conv1 = tf.nn.relu(conv(image,weight1,'SAME')+b1,'relu1')
        pool1 = max_pool(h_conv1)  #14,14,32

    # with tf.name_scope("softmax_1"):  #第一层监督
    #     flatten1 = tf.reshape(pool1,[-1,14*14*32])
    #     weight1_1 = weight_variable([14*14*32,10])
    #     b1_1 = bias_variable([10])
    #     output1 = tf.matmul(flatten1,weight1_1)+b1_1 #-1,10

    # with tf.name_scope("loss_1"):
    #     loss_1 = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=output1))

    with tf.name_scope("conv2"):
        weight2 = weight_variable([5,5,32,64])
        b2 =  bias_variable([64])
        h_conv2 = tf.nn.relu(conv(pool1,weight2,'SAME')+b2,'relu2')
        pool2 = max_pool(h_conv2)

    with tf.name_scope("softmax_2"):
        flatten2 = tf.reshape(pool2,[-1,7*7*64])
        weight2_1 = weight_variable([7*7*64,10])
        b2_1 = bias_variable([10])
        output2 = tf.matmul(flatten2,weight2_1)+b2_1

    with tf.name_scope("loss_2"):
        loss_2 = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=output2))

    with tf.name_scope("fully_connect1"):
        weight3 = weight_variable([7*7*64,1024])
        b3 = bias_variable([1024])
        pool2_flat = tf.reshape(pool2,[-1,7*7*64])
        h_fc1 = tf.nn.relu(tf.matmul(pool2_flat,weight3)+b3)
        h_fc1_drop = tf.nn.dropout(h_fc1,keep_prob)

    with tf.name_scope("fully_connect2"):
        weight4 = weight_variable([1024,10])
        b4 = bias_variable([10])
        y_ = tf.matmul(h_fc1_drop,weight4)+b4

    with tf.name_scope("total_loss"):
        loss_total = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=y_))
        loss = loss_2 + loss_total
        tf.summary.scalar("loss", loss)

    return loss,y_

if __name__ == '__main__':
    x = tf.placeholder(tf.float32, [None, 784], name='x_input')
    y = tf.placeholder(tf.float32, [None, 10], name='y_input')
    keep_prob = tf.placeholder(tf.float32, name='keep_prob')

    loss,logits = inference(x,y,keep_prob)

    optimizer = tf.train.AdamOptimizer(learning_rate=1e-3).minimize(loss)

    with tf.name_scope('accuracy'):
        correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(y, 1))
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
        tf.summary.scalar("accuracy", accuracy)

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        merged = tf.summary.merge_all()
        saver = tf.train.Saver()
        writer = tf.summary.FileWriter('./finetune_model/graphs/layer2', sess.graph)
        for epoch in range(31):
            for _ in range(n_batch):
                batch_xs,batch_ys=mnist.train.next_batch(batch_size)
                #图片存于xs，标签存于ys， 每次读取100张图片
                summary ,_ = sess.run([merged,optimizer],feed_dict={x:batch_xs,y:batch_ys,keep_prob:0.5})
            writer.add_summary(summary, epoch)
            saver.save(sess, saver_dir + 'model.ckpt')
            acc=sess.run(accuracy,feed_dict={x:mnist.test.images,y:mnist.test.labels,keep_prob:1.0})
            print("Iter"+str(epoch)+",testing accuracy"+str(acc))

        writer.close()

