# coding:utf-8
'''
网络模型
'''
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import numpy as np
# 载入数据
mnist = input_data.read_data_sets('MNIST/data',one_hot=True)
# 生成数据
x_train,y_train,x_test,y_test = mnist.train.images,mnist.train.labels,mnist.test.images,mnist.test.labels

# 构建静态变量
x_input = tf.placeholder('float',[None,784],name='x_input')
y_input = tf.placeholder('float',[None,10],name='y_input')

# 构建权值和偏置项初始化变量
def init_weights_and_bias(shapeW,shapeB):
    return tf.Variable(tf.truncated_normal(shape=shapeW,stddev=0.01),name='weight',dtype='float'),tf.Variable(np.zeros(shapeB),name='bias',dtype='float')
w_h,b_h=init_weights_and_bias([784,625],[625])
w_o,b_o=init_weights_and_bias([625,10],[10])
# 构建模型 生成学习输出值
def model(x,w_h,w_o,b_h,b_o):
    h=tf.nn.sigmoid(tf.matmul(x,w_h)+b_h)
    return tf.matmul(h,w_o)+b_o
y_model = model(x_input,w_h,w_o,b_h,b_o)
# 设计损失函数
cross_entory = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=y_model,labels=y_input))
# 选择优化器
train_op = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entory)
# 预测结果
predict_op = tf.argmax(y_model,1)
# 创建会话
with tf.Session() as sess:
    # 初始化所有变量
    tf.global_variables_initializer().run()
    epoch_num = 50 # 迭代次数
    batch_size = 128 # 每批训练数据
    batch_nums = int(len(x_train)/batch_size)
    for i in range(epoch_num):
        for j in range(batch_nums):
            start = j*batch_size
            end = min((j+1)*batch_size,len(x_train))
            sess.run(train_op,feed_dict={x_input:x_train,y_input:y_train})
        train_acc = np.mean(np.argmax(y_train,1)==sess.run(predict_op,feed_dict={x_input:x_train}))
        test_acc = np.mean(np.argmax(y_test,1)==sess.run(predict_op,feed_dict={x_input:x_test}))
        print('epoch:%d train_acc:%f test_acc:%f'%(i+1,np.cast(train_acc,float),np.cast(test_acc,float)))
        #print(i+1,train_acc,test_acc)
