import numpy as np
import torch
from torchvision.datasets import mnist
from torch import nn
from torch.autograd import Variable



def data_tf(x):
    x = np.array(x,dtype='float32')/255
    x = (x-0.5)/0.5
    x = x.reshape((-1,))
    x = torch.from_numpy(x)
    return x

train_set = mnist.MNIST('./data',train=True,transform=data_tf,download=True)
test_set = mnist.MNIST('./data',train=False,transform=data_tf,download=True)

from torch.utils.data import DataLoader

train_data = DataLoader(train_set,batch_size=64,shuffle=True)
test_data = DataLoader(test_set,batch_size=128,shuffle=False)


class nnet(nn.Module):
    def __init__(self,input_num,hidden_num1,hidden_num2,hidden_num3,output_num):
        super(nnet,self).__init__()
        self.layer1 = nn.Linear(input_num,hidden_num1)

        self.layer2 = nn.ReLU()

        self.layer3 = nn.Linear(hidden_num1,hidden_num2)

        self.layer4 = nn.Linear(hidden_num2,hidden_num3)

        self.layer5 = nn.Linear(hidden_num3,output_num)

    def fellow(self,x):
        x1 = self.layer1(x)
        x1 = self.layer2(x1)
        x2 = self.layer3(x1)
        x2 = self.layer2(x2)
        x3 = self.layer4(x2)
        x3 = self.layer2(x3)
        x4 = self.layer5(x3)
        return x4
    
net1 = nnet(784,400,200,100,10)
criterion = nn.CrossEntropyLoss()
optim = torch.optim.SGD(net1.parameters(),0.1)
#模型训练
losses=[]
access=[]
eval_losses=[]
eval_access=[]

for e in range(20):
    train_loss = 0
    train_acc = 0
    net1.train()
    for im,label in train_data:
        im = Variable(im)
        label = Variable(label)
        #向前
        out = net1.fellow(im)
        loss = criterion(out,label)
        #反向
        optim.zero_grad()
        loss.backward()
        optim.step()
        #误差记录
        train_loss +=loss.data
        #分类准确性
        _,pred = out.max(1)
        num_correct = (pred == label).sum()
        acc = num_correct / im.shape[0]
        train_acc +=acc
    losses.append(train_loss/len(train_data))
    access.append(train_acc/len(train_data))
    eval_acc=0
    eval_loss=0
    net1.eval()
    for im,label in test_data:
        im = Variable(im)
        label = Variable(label)
        #向前
        out = net1.fellow(im)
        loss = criterion(out,label)
        #反向
        optim.zero_grad()
        loss.backward()
        optim.step()
        #误差记录
        eval_loss +=loss.data
        #分类准确性
        _,pred = out.max(1)
        num_correct = (pred == label).sum().data
        acc = num_correct / im.shape[0]
        eval_acc +=acc
    eval_losses.append(eval_loss/len(test_data))
    eval_access.append(eval_loss/len(test_data))
    print('epoch: {}, Train Loss: {:.6f}, Train Acc: {:.6f}, Eval Loss: {:.6f}, Eval Acc: {:.6f}'
          .format(e, train_loss / len(train_data), train_acc / len(train_data), 
                     eval_loss / len(test_data), eval_acc / len(test_data)))        
#保存模型参数
# torch.save(net1.state_dict(),'moudule_net.pth')




