import torchvision.datasets as dsets
import torch.nn as nn
from CNN import ConvNet
from DATASET import datasetting
import torch
from tool import rightness
from torch.autograd import Variable
import torch.optim as optim


import numpy as np
import matplotlib.pyplot as plt



num_epochs=20
learning_rate=0.001

#读取数据loader
train_loader, val_loader, test_loader=datasetting()

#网络
cnn=ConvNet()
cnn=cnn.cuda()
criterion=nn.CrossEntropyLoss()
optimizer=optim.SGD(cnn.parameters(),lr=learning_rate,momentum=0.9)

records=[]
weights=[]

for epoch in range(num_epochs):
    train_rights = []
    for batch_idx, (data, target) in enumerate(train_loader):

        data, target = Variable(data), Variable(target)#len(data)=64
        data = data.cuda()
        target = target.cuda()

        cnn.train()
        output = cnn(data)

        #1.算loss-》cpu
        #2.清空梯度
        #3.loss.backward
        #4.一步优化
        loss = criterion(output, target)
        loss = loss.cpu()
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        right = rightness(output, target)
        train_rights.append(right)

        if batch_idx % 500 == 0:
            cnn.eval()
            val_rights = []
            for (data, target) in val_loader:
                data, target = Variable(data), Variable(target)
                data = data.cuda()
                target = target.cuda()
                output = cnn(data)
                right = rightness(output, target)
                val_rights.append(right)

            train_r = (sum(tup[0] for tup in train_rights), sum(tup[1] for tup in train_rights))
            val_r = (sum(tup[0] for tup in val_rights), sum(tup[1] for tup in val_rights))
            print('周期:{}[训练组{}/{}({:.0f}%)]\t,损失:{:.6f}\t,训练准确率:{:.2f}%\t,校验准确率:{:.2f}%'.format
                  (epoch,
                   batch_idx * len(data),
                   len(train_loader.dataset),
                   100. * batch_idx / len(train_loader),
                   loss.data,
                   100. * train_r[0] /train_r[1],
                   100. * val_r[0] / val_r[1]))

            records.append((100 - 100. * train_r[0] / train_r[1], 100 - 100. * val_r[0] / val_r[1]))
            weights.append([cnn.conv1.weight.data.clone(), cnn.conv1.bias.data.clone(), cnn.conv2.weight.data.clone(),
                            cnn.conv2.bias.data.clone()])


torch.save(cnn,'checkpoint/minst_conv_checkpoint')



plt.figure(figsize=(10, 7))
plt.plot(records)
plt.xlabel('Steps')
plt.ylabel('Error rate')

