import torch
from torch import nn
import torchvision.datasets as dsets
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
from torch.utils.data import DataLoader
import pandas

#下载MNIST数据集
train_data = dsets.MNIST(root='./mnist/',train=True,           
    transform=transforms.ToTensor(),download=True,)
#转成浮点张量并且归一化
train_x= train_data.train_data.type(torch.FloatTensor)/255.
#把目标集转成numpy格式
train_y = train_data.train_labels.numpy()

test_data = dsets.MNIST(root='./mnist/', train=False, transform=transforms.ToTensor())
test_x = test_data.test_data.type(torch.FloatTensor)/255.
test_y = test_data.test_labels.numpy() 

EPOCH =3               #训练轮次     
BATCH_SIZE = 1         #一次训练样本个数
LR = 0.01               # learning rate
train_x1=DataLoader(train_x,batch_size=BATCH_SIZE,drop_last=True)

#初始化神经网络
class Classifier(nn.Module):
    def __init__(self):
        super().__init__()
        self.model = nn.Sequential(
            nn.Linear(784, 200),
            nn.Sigmoid(),
            nn.Linear(200, 10),
            nn.Sigmoid())
        self.loss_function = nn.MSELoss()
        self.optimiser = torch.optim.SGD(self.parameters(),lr=0.01)
        self.counter = 0
        self.progress = []
    
    #前向传播
    def forward(self, inputs):
        # simply run model
        return self.model(inputs)
    
    #训练函数
    def train(self, inputs, targets):
        outputs = self.forward(inputs)#前向传播
        loss = self.loss_function(outputs, targets)#计算loss
        self.counter += 1#计数
        if (self.counter % 10 == 0):
            self.progress.append(loss.item())
        if (self.counter % 10000 == 0):
            print("counter = ", self.counter)
        self.optimiser.zero_grad()#梯度归0
        loss.backward()#loss反向传播
        self.optimiser.step()#梯度优化
    #损失图
    def plot_progress(self):
        df = pandas.DataFrame(self.progress, columns=['loss'])
        df.plot(ylim=(0, 1.0), figsize=(16,8), alpha=0.1, 
                    marker='.', grid=True, yticks=(0, 0.25, 0.5))
        plt.title('loss',fontsize=25)
        

#训练数据：训练集
rnn=Classifier()
for epoch in range(EPOCH):
    for step, b_x in enumerate(train_x):       
        b_x = b_x.view(784)
        # y=torch.zeros(10)  
        # b_y=train_y[step]
        b_y=train_y[step*BATCH_SIZE:(step+1)*BATCH_SIZE]
        y=torch. nn.functional.one_hot(torch.tensor(b_y), num_classes=10)
        #y[b_y,]=1
        # y[b_y,]=1            
        # rnn.train(b_x,y)
        rnn.train(b_x,y.float)

    #测试模型好坏
    score = []
    for i,image_data_tensor in enumerate(test_x):
        #把28*28的变成784的tensor
        image_data_tensor = image_data_tensor.view(784) 
        #前向传播不进行梯度计算转成numpy格式 
        answer = rnn.forward(image_data_tensor).detach().numpy()
        #累计
        score.append(answer.argmax())
        #把结果相等的计数/总数=计算准确率
    accuracy = float((score == test_y).astype(int).sum())/float(test_y.size)
    print('accuracy:',accuracy)
rnn.plot_progress()
plt.show()
