from tensorflow.examples.tutorials.mnist import input_data
import tensorflow as tf
import random
import os as os
import numpy as np
import matplotlib.pyplot as plt

#逻辑回归
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
train_imgs = mnist.train.images
train_labels = mnist.train.labels
test_imgs = mnist.test.images
test_labels = mnist.test.labels
print(train_imgs.shape)
print(train_labels.shape)
print(test_imgs.shape)
print(test_labels.shape)
print(train_labels[1])

x = tf.placeholder(tf.float32,[None,784])
y = tf.placeholder(tf.float32,[None,10])
w = tf.Variable(tf.zeros([784,10]))
b = tf.Variable(tf.zeros([10]))

# actv
actv = tf.nn.softmax(tf.matmul(x,w)+b)
# loss and optm ?why tf.log
loss = tf.reduce_mean(-tf.reduce_sum(y*tf.log(actv),reduction_indices=1))
train_op = tf.train.GradientDescentOptimizer(0.01).minimize(loss)

# accr
pred = tf.equal(tf.argmax(actv,1),tf.argmax(y,1))
accr = tf.reduce_mean(tf.cast(pred,tf.float32))
init = tf.global_variables_initializer()


with tf.Session() as sess:
    sess.run(init)
    step_n = 101     # 训练轮数
    batch_size = 100  # 每批训练大小
    display_step = 10
    for step in range(step_n):
        avg_cost = 0.
        total_batch = int(mnist.train.num_examples / batch_size)
        for i in range(total_batch):
            batch_x, batch_y = mnist.train.next_batch(batch_size)
            sess.run(train_op,feed_dict={x:batch_x,y:batch_y})
            avg_cost += sess.run(loss,feed_dict={x:batch_x,y:batch_y})
        avg_cost = avg_cost/batch_size
        if step % display_step == 0:
            test_accr = sess.run(accr,feed_dict={x:test_imgs,y:test_labels})
            print(step,'acc:',test_accr,'loss:',avg_cost)
    print('end')

