import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
from HTfc3 import ht1
from bnlayer import bn_layer
tf.reset_default_graph()
#读取数据

mnist = input_data.read_data_sets("D:/Datasets/Mnist/",one_hot=True)
sess=tf.InteractiveSession()


#设置占位符，尺寸为样本输入和输出的尺寸
x=tf.placeholder(tf.float32,[None,784])
y_=tf.placeholder(tf.float32,[None,10])
x_img=tf.reshape(x,[-1,784])


#设置第一个全连接层

[b1,out1]=ht1(x_img,
        [4,7,7,4],
        [4,8,8,4],
        [1024],
        [45,45,45,45,45,45,45],
        [45,45,45,45,45,45,45],
		scope='out1')
out1 = bn_layer(out1,True,name='BatchNorm1')
h_fc1=tf.nn.relu(out1)
 #w_fc1=tf.Variable(tf.truncated_normal([784,1296],stddev=0.1))
 #b_fc1=tf.Variable(tf.constant(0.1,shape=[1296]))
 #h_pool2_flat=tf.reshape(h_pool2,[-1,7*7*50])
 


#dropout（随机权重失活）
keep_prob=tf.placeholder(tf.float32)
h_fc1_drop=tf.nn.dropout(h_fc1,keep_prob)

#设置第二个全连接层
[b2,out2]=ht1(h_fc1_drop,
        [4,8,8,4],
        [4,8,8,4],
        [1024],
        [45,45,45,45,45,45,45],
        [45,45,45,45,45,45,45],
		scope='out2')
out2 = bn_layer(out2,True,name='BatchNorm2')
h_fc2=tf.nn.relu(out2)
 #w_fc2=tf.Variable(tf.truncated_normal([1296,4096],stddev=0.1))
 #b_fc2=tf.Variable(tf.constant(0.1,shape=[4096]))
 #h_fc2=tf.nn.softmax(tf.matmul(h_fc1_drop,w_fc2)+b_fc2)
h_fc2_drop=tf.nn.dropout(h_fc2,keep_prob)

#设置第三个全连接层
[b3,out3]=ht1(h_fc2_drop,
        [4,8,8,4],
        [4,4,4,4],
        [256],
        [45,45,45,45,45,45,45],
        [45,45,45,45,45,45,45],
		scope='out3')
out3 = bn_layer(out3,True,name='BatchNorm3')
h_fc3=tf.nn.relu(out3)
 #w_fc3=tf.Variable(tf.truncated_normal([4096,256],stddev=0.1))
 #b_fc3=tf.Variable(tf.constant(0.1,shape=[256]))
 #h_fc3=tf.nn.softmax(tf.matmul(h_fc2_drop,w_fc3)+b_fc3)
h_fc3_drop=tf.nn.dropout(h_fc3,keep_prob)

 #设置第四个全连接层

[b4,out4]=ht1(h_fc3_drop,
        [4,4,4,4],
        [2,5,1,1],
        [10],
        [45,45,45,45,45,45,45],
        [45,45,45,45,45,45,45],
		scope='yout')
y_out=tf.nn.softmax(out4)
 #w_fc4=tf.Variable(tf.truncated_normal([256,10],stddev=0.1))
 #b_fc4=tf.Variable(tf.constant(0.1,shape=[10]))
 #y_out=tf.nn.softmax(tf.matmul(h_fc3_drop,w_fc4)+b_fc4)


#建立loss function，为交叉熵
loss=tf.reduce_mean(-tf.reduce_sum(y_*tf.log(y_out),reduction_indices=[1]))
#配置Adam优化器，学习速率为1e-4
train_step=tf.train.AdamOptimizer(1e-3).minimize(loss)

#建立正确率计算表达式
correct_prediction=tf.equal(tf.argmax(y_out,1),tf.argmax(y_,1))
accuracy=tf.reduce_mean(tf.cast(correct_prediction,tf.float32))

saver = tf.train.Saver()
#开始喂数据，训练
sess.run(tf.global_variables_initializer())
for i in range(2000):
    batch=mnist.train.next_batch(50)
    if i%100==0:
        train_accuracy=accuracy.eval(feed_dict={x:batch[0],y_:batch[1],keep_prob:1})
        print ("step %d,train_accuracy= %g"%(i,train_accuracy))
#        print(sess.run(b1))
    train_step.run(feed_dict={x:batch[0],y_:batch[1],keep_prob:0.5})
saver.save(sess,'save/model.ckpt')

print("test accuracy %g"% accuracy.eval(feed_dict={x:mnist.test.images,y_:mnist.test.labels,keep_prob:1.0}))

