#!/usr/bin/env python # coding: utf-8 # In[2]: # import pytorch and machine learning stuff import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from torch.utils.data import DataLoader # import other stuff import numpy as np # In[3]: # import resnet from torch import torch.library from torchvision.models import squeezenet1_1 from torchvision.models import resnet50 from torchvision.models import resnet18 from torchvision.models import mobilenet_v2 from torchvision import transforms from torchvision.datasets import ImageFolder from torch.utils.data import DataLoader # In[4]: class_num = 5 classes = ['Ak', 'Ala_Idris', 'Buzgulu', 'Dimnit', 'Nazli'] # In[5]: model = mobilenet_v2(pretrained=True) # In[9]: print(model) # In[7]: transform = transforms.Compose([ transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize([0.485,0.456,0.406],[0.229,0.224,0.225]) ]) # In[8]: training_set = ImageFolder('../data/train', transform=transform) test_set = ImageFolder('../data/test', transform=transform) val_set = ImageFolder('../data/val', transform=transform) # In[47]: batch_size = 8 epochs = 5 lr = 1e-5 loss_fn = nn.CrossEntropyLoss() optimizer = optim.Adam(model.parameters(), lr=lr) # In[48]: train_loader = DataLoader(training_set, batch_size=batch_size, shuffle=True) test_loader = DataLoader(test_set, batch_size=batch_size) val_loader = DataLoader(val_set, batch_size=batch_size) # In[49]: model.classifier[1] = nn.Linear(in_features=1280, out_features=class_num) # In[52]: epochs = 1 # In[55]: # train the model for epoch in range(epochs): model.train() for batch_idx, (data, target) in enumerate(train_loader): optimizer.zero_grad() output = model(data) print("Out: ", [a.argmax().item() for a in output]) print("Target: ", target) loss = loss_fn(output, target) loss.backward() optimizer.step() if batch_idx % 10 == 0: print('Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format( epoch, batch_idx * len(data), len(train_loader.dataset), 100. * batch_idx / len(train_loader), loss.item() )) # test the model model.eval() test_loss = 0 correct = 0 with torch.no_grad(): for data, target in test_loader: output = model(data) test_loss += loss_fn(output, target).item() pred = output.argmax(dim=1, keepdim=True) correct += pred.eq(target.view_as(pred)).sum().item() test_loss /= len(test_loader.dataset) print('\nTest set: Avg. loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format( test_loss, correct, len(test_loader.dataset), 100. * correct / len(test_loader.dataset) )) # In[51]: model_scripted = torch.jit.script(model) model_scripted.save('../models/mobilenet.pt') # In[ ]: