# coding:utf-8
'''
以mnist数据集为例,训练逻辑回归模型
'''
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import numpy as np
# 载入数据
mnist = input_data.read_data_sets('MNIST_data/',one_hot=True)
# 生成训练数据
x_train,y_train,x_test,y_test=mnist.train.images,mnist.train.labels,mnist.test.images,mnist.test.labels

# 构建占位符变量
x=tf.placeholder('float',name='x',shape=[None,784])
y=tf.placeholder('float',name='y',shape=[None,10])

# 构建权值和偏置项
def createWandB(shapew,shapeb):
    return tf.Variable(tf.truncated_normal(shape=shapew,stddev=0.01,dtype='float'),name='weights'),tf.Variable(np.zeros(shapeb),dtype='float',name='bias')
w,b=createWandB([784,10],[10])
# 构建学习输出
def model(w,x,b):
    #m=tf.matmul(x,w)
    #print(m.shape,b)
    return tf.matmul(x,w)+b
y_model=model(w,x,b)
# 设计损失函数
cross_entory=tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=y_model,labels=y))
# 设计优化器
train_op=tf.train.GradientDescentOptimizer(0.05).minimize(cross_entory)
# 测试结果
predict_op=tf.argmax(y_model,1)
# 创建会话
with tf.Session() as sess:
    # 初始化变量
    tf.global_variables_initializer().run()
    # 设计batch_size
    batch_size = 128
    # 分成batch训练
    batch_nums=len(x_train)/batch_size
    for i in range(100):
        for j in range(int(batch_nums)):
            start=j*batch_size
            end=min((j+1)*batch_size,len(x_train))
            # 迭代训练
            sess.run(train_op,feed_dict={x:x_train[start:end],y:y_train[start:end]})
        print(i+1,y_test,'\n',np.argmax(y_test,1),sess.run(predict_op,feed_dict={x:x_test}),np.mean(np.argmax(y_test,1)==sess.run(predict_op,feed_dict={x:x_test})))
        print('______________________________  ')


