import  torch
from torch import nn, optim
from torch.nn import functional as F
import torchvision
from    torch.utils.data import DataLoader
from    torchvision import datasets
from    torchvision import transforms
from ResNet import ResNet18

batchsz = 64

cifar_train = datasets.CIFAR100('cifar100', True, transform=transforms.Compose([
        transforms.Resize((32, 32)),
        transforms.ToTensor()
    ]), download=True)
cifar_train = DataLoader(cifar_train, batch_size=batchsz, shuffle=True)

cifar_test = datasets.CIFAR100('cifar100', False, transform=transforms.Compose([
        transforms.Resize((32, 32)),
        transforms.ToTensor()
    ]), download=True)
cifar_test = DataLoader(cifar_test, batch_size=batchsz, shuffle=True)

sample = next(iter(cifar_train))
sample1 = next(iter(cifar_test))
print('train:', sample[0].shape, sample[1].shape, 'test:', sample1[0].shape, sample1[1].shape)

device = torch.device('cuda:0')

def main():

    model = ResNet18().to(device)
    optimizer = optim.Adam(model.parameters(), lr=1e-3)
    criteon = nn.CrossEntropyLoss().to(device)

    for epoch in range(100):
        for step, (x, y) in enumerate(cifar_train):
            x, y = x.to(device), y.to(device)
            logits = model(x)
            loss = criteon(logits, y)

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()


            if step%50 == 0:
                print('epoch:', epoch, 'step:', step, 'loss:', loss.item())

        totalacc = 0
        for x, y in cifar_test:
            x, y = x.to(device), y.to(device)
            # [b, 100]
            logits = model(x)
            # [b]
            prob = logits.argmax(dim=1)
            acc = prob.eq(y).sum().float().item()
            totalacc += acc

        total_num = len(cifar_test.dataset)
        acc = totalacc / total_num
        print('epoch:', epoch, 'accuracy:', acc)
    torch.save(model.state_dict(), 'checkpoint/resnet18_weight.pth')

if __name__ == '__main__':
    main()
