# Lab 10 MNIST and softmax
import torch
from torch.autograd import Variable
import torchvision.datasets as dsets   #pytorch自带的数据集
import torchvision.transforms as transforms #pytorch对数据的处理
import random


torch.manual_seed(777)  # reproducibility

#定义训练参数 parameters
learning_rate = 0.001   #学习率
training_epochs = 15    #epoch迭代次数
batch_size = 100        #批次大小

# MNIST dataset
mnist_train = dsets.MNIST(root='MNIST_data/',  #root路径，若没有，则下载
                          train=True,  #train是否是训练集
                          transform=transforms.ToTensor(), #数据改造成张量
                          download=True)   #download是否下载

mnist_test = dsets.MNIST(root='MNIST_data/',
                         train=False,  #测试集
                         transform=transforms.ToTensor(),
                         download=True)

# 建立数据管道DataLoader：dataset 缓冲加速
data_loader = torch.utils.data.DataLoader(dataset=mnist_train,  #指定数据集
                                          batch_size=batch_size, #指定批次
                                          shuffle=True,  #是否洗牌
                                          )

# 建立神经网络模型 model
model = torch.nn.Linear(784, 10, bias=True) #bias是否要偏置b

# 定义代价和优化器define cost/loss & optimizer
criterion = torch.nn.CrossEntropyLoss() #多分类交叉熵   # Softmax is internally computed.
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)

# 训练模型 train my model
for epoch in range(training_epochs):  #每一代：所有样本参与一次梯度下降

    avg_cost = 0  #每一代的平均代价
    # 小批量梯度下降
    total_batch = len(mnist_train) // batch_size   #计算总批次数
    for i, (batch_xs, batch_ys) in enumerate(data_loader):  #走一步梯度(一个批次)
        # reshape input image into [batch_size by 784]
        X = Variable(batch_xs.view(-1, 28 * 28)) # view相当于pytorch reshape,变换维度
        Y = Variable(batch_ys)    # label is not one-hot encoded

        optimizer.zero_grad() #梯度清零
        hypothesis = model(X) #正向传播-->h
        cost = criterion(hypothesis, Y)  #计算代价
        cost.backward() #反向传播,对代价求偏导数(梯度)=dw/db
        optimizer.step() #更新梯度 w=w-lr*dw b=b-lr*db

        # 代价均值
        avg_cost += cost / total_batch

    print("[Epoch: {:>4}] cost = {:>.9}".format(epoch + 1, avg_cost.item()))

print('Learning Finished!')

# 测试模型 Test model and check accuracy
X_test = Variable(mnist_test.test_data.view(-1, 28 * 28).float())  #view=tensorflow的reshape, test_data= X_test
Y_test = Variable(mnist_test.test_labels)  #test_labels=Y_test

#计算测试集准确率
prediction = model(X_test)
correct_prediction = (torch.max(prediction.data, 1)[1] == Y_test.data)
accuracy = correct_prediction.float().mean()
print('Accuracy:', accuracy)

# 取一个样本测试 Get one and predict
r = random.randint(0, len(mnist_test) - 1)
X_single_data = Variable(mnist_test.test_data[r:r + 1].view(-1, 28 * 28).float())
Y_single_data = Variable(mnist_test.test_labels[r:r + 1])

print("Label: ", Y_single_data.data)  #真实值
single_prediction = model(X_single_data)
print("Prediction: ", torch.max(single_prediction.data, 1)[1]) #预测结果
