import torch
from torch import nn, optim
import torch.nn.functional as F
from torch.autograd import Variable
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision import datasets
from ESResnet import ResNet18
import torchvision
import torchvision.transforms as transforms

#参数设置
EPOCH = 20   #遍历数据集次数
pre_epoch = 0  # 定义已经遍历数据集的次数
batch_size = 64      #批处理尺寸(batch_size)
LR = 0.001

#数据集
transform = transforms.Compose([
    # transforms.RandomCrop(32, padding=4),  #先四周填充0，在吧图像随机裁剪成32*32
    transforms.RandomHorizontalFlip(),  #图像一半的概率翻转，一半的概率不翻转
    transforms.ToTensor(),
    transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), #R,G,B每层的归一化用到的均值和方差
])
train_dataset = torchvision.datasets.ImageFolder(r'D:\数据集\Spec\ESC-50\train', transform=transform)

test_dataset = torchvision.datasets.ImageFolder(r'D:\数据集\Spec\ESC-50\test', transform=transform)


train_loader = torch.utils.data.DataLoader(dataset=train_dataset,  # 数据集
                               batch_size=batch_size,  # 批量训练数量
                               shuffle=True,  # （打乱数据顺序）
                               )

test_loader = torch.utils.data.DataLoader(dataset=test_dataset,  # 数据集
                               batch_size=batch_size,  # 批量训练数量
                               shuffle=False,  # （打乱数据顺序）
                               )

net = ResNet18()

criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=LR, momentum=0.9,  weight_decay=0.01)

# train
net.train()
for epoch in range(pre_epoch, EPOCH):
    print('\nEpoch: %d' % (epoch + 1))
    net.train()
    sum_loss = 0.0
    correct = 0.0
    total = 0.0
    acc = 0.0
    for i, data in enumerate(train_loader, 0):
        # prepare dataset
        length = len(train_loader)
        inputs, labels = data
        # print(labels)
        # inputs, labels = inputs.to(device), labels
        # .to(device)


        # forward & backward
        outputs = net(inputs)
        loss = criterion(outputs, labels)

        # print ac & loss in each batch
        sum_loss += loss.item()
        _, predicted = torch.max(outputs, 1)
        # total += labels.size(0)
        # correct += predicted.eq(labels).cpu().sum()

        correct = (predicted == labels).sum()  # 正确结果数量
        print(correct.item())
        acc += correct.item()  # 正确结果总数

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

    print('[epoch:%d, iter:%d] Loss: %.03f | Acc: %.3f%% '
              % (epoch + 1, (i + 1 + epoch * length), sum_loss / (i + 1), 100. * acc / len(train_dataset)))

    # 模型测试
    net.eval()
    eval_loss = 0
    eval_acc = 0
    for data in test_loader:
        img, label = data

        # img = Variable(img, volatile=True)
        out = net(img)
        loss = criterion(out, label)
        eval_loss += loss.item() * label.size(0)
        _, pred = torch.max(out, 1)
        num_correct = (pred == label).sum()
        eval_acc += num_correct.item()
    print('Test Loss:{:.6f}, Acc: {:.6f}'.format(eval_loss / (len(test_dataset)), eval_acc * 1.0 / (len(test_dataset))))

torch.save(obj=net.state_dict(), f="Res0913.pth")



