|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import torch |
|
import torch.nn as nn |
|
import torch.nn.functional as F |
|
import torch.optim as optim |
|
from torch.utils.data import DataLoader |
|
|
|
|
|
import numpy as np |
|
|
|
|
|
|
|
|
|
|
|
|
|
import torch.library |
|
from torchvision.models import squeezenet1_1 |
|
from torchvision.models import resnet50 |
|
from torchvision.models import resnet18 |
|
from torchvision.models import mobilenet_v2 |
|
from torchvision import transforms |
|
from torchvision.datasets import ImageFolder |
|
from torch.utils.data import DataLoader |
|
|
|
|
|
|
|
|
|
|
|
class_num = 5 |
|
classes = ['Ak', 'Ala_Idris', 'Buzgulu', 'Dimnit', 'Nazli'] |
|
|
|
|
|
|
|
|
|
|
|
model = mobilenet_v2(pretrained=True) |
|
|
|
|
|
|
|
|
|
|
|
print(model) |
|
|
|
|
|
|
|
|
|
|
|
transform = transforms.Compose([ |
|
transforms.RandomResizedCrop(224), |
|
transforms.RandomHorizontalFlip(), |
|
transforms.ToTensor(), |
|
transforms.Normalize([0.485,0.456,0.406],[0.229,0.224,0.225]) |
|
]) |
|
|
|
|
|
|
|
|
|
|
|
training_set = ImageFolder('../data/train', transform=transform) |
|
test_set = ImageFolder('../data/test', transform=transform) |
|
val_set = ImageFolder('../data/val', transform=transform) |
|
|
|
|
|
|
|
|
|
|
|
batch_size = 8 |
|
epochs = 5 |
|
lr = 1e-5 |
|
loss_fn = nn.CrossEntropyLoss() |
|
optimizer = optim.Adam(model.parameters(), lr=lr) |
|
|
|
|
|
|
|
|
|
|
|
train_loader = DataLoader(training_set, batch_size=batch_size, shuffle=True) |
|
test_loader = DataLoader(test_set, batch_size=batch_size) |
|
val_loader = DataLoader(val_set, batch_size=batch_size) |
|
|
|
|
|
|
|
|
|
|
|
model.classifier[1] = nn.Linear(in_features=1280, out_features=class_num) |
|
|
|
|
|
|
|
|
|
|
|
epochs = 1 |
|
|
|
|
|
|
|
|
|
|
|
|
|
for epoch in range(epochs): |
|
model.train() |
|
for batch_idx, (data, target) in enumerate(train_loader): |
|
optimizer.zero_grad() |
|
output = model(data) |
|
print("Out: ", [a.argmax().item() for a in output]) |
|
print("Target: ", target) |
|
loss = loss_fn(output, target) |
|
loss.backward() |
|
optimizer.step() |
|
if batch_idx % 10 == 0: |
|
print('Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format( |
|
epoch, batch_idx * len(data), len(train_loader.dataset), |
|
100. * batch_idx / len(train_loader), loss.item() |
|
)) |
|
|
|
|
|
model.eval() |
|
test_loss = 0 |
|
correct = 0 |
|
with torch.no_grad(): |
|
for data, target in test_loader: |
|
output = model(data) |
|
test_loss += loss_fn(output, target).item() |
|
pred = output.argmax(dim=1, keepdim=True) |
|
correct += pred.eq(target.view_as(pred)).sum().item() |
|
|
|
test_loss /= len(test_loader.dataset) |
|
print('\nTest set: Avg. loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format( |
|
test_loss, correct, len(test_loader.dataset), |
|
100. * correct / len(test_loader.dataset) |
|
)) |
|
|
|
|
|
|
|
|
|
|
|
model_scripted = torch.jit.script(model) |
|
model_scripted.save('../models/mobilenet.pt') |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|