|
import torch |
|
import torch.nn as nn |
|
import torch.nn.functional as F |
|
from torch.utils.data import DataLoader |
|
from torchvision import transforms, datasets, models |
|
from torchvision.utils import make_grid |
|
import os |
|
import time |
|
from PIL import ImageFile |
|
import math |
|
from model import ConvolutionalNet |
|
|
|
ImageFile.LOAD_TRUNCATED_IMAGES = True |
|
|
|
import numpy as np |
|
import pandas as pd |
|
import matplotlib.pyplot as plt |
|
|
|
train_transforms = transforms.Compose([ |
|
transforms.RandomRotation(10), |
|
transforms.RandomHorizontalFlip(), |
|
transforms.ToTensor(), |
|
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) |
|
]) |
|
|
|
test_transform = transforms.Compose([ |
|
transforms.ToTensor(), |
|
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) |
|
]) |
|
|
|
train_dataset = datasets.ImageFolder(root='./data/train', transform=train_transforms) |
|
test_dataset = datasets.ImageFolder(root='./data/test', transform=test_transform) |
|
|
|
torch.manual_seed(42) |
|
train_loader = DataLoader(train_dataset, batch_size=10, shuffle=True) |
|
test_loader = DataLoader(test_dataset, batch_size=10, shuffle=False) |
|
|
|
class_names = train_dataset.classes |
|
|
|
for images, labels in train_loader: |
|
break |
|
|
|
torch.manual_seed(101) |
|
model = ConvolutionalNet() |
|
criterion = nn.CrossEntropyLoss() |
|
optimizer = torch.optim.Adam(model.parameters(), lr=0.001) |
|
|
|
start_time = time.time() |
|
epochs = 5 |
|
|
|
|
|
max_trn_batch = 800 |
|
max_tst_batch = 300 |
|
|
|
train_losses = [] |
|
test_losses = [] |
|
train_correct = [] |
|
test_correct = [] |
|
|
|
for epoch in range(epochs): |
|
trn_corr = 0 |
|
tst_corr = 0 |
|
|
|
for b, (X_train, y_train) in enumerate(train_loader): |
|
|
|
|
|
|
|
y_pred = model(X_train) |
|
loss = criterion(y_pred, y_train) |
|
|
|
if b % 200 == 0: |
|
print(f"Epoch: {epoch+1}/{epochs}\tBatch: {b+1}\tLoss: {loss.item()}") |
|
|
|
predicted = torch.max(y_pred, 1)[1] |
|
batch_corr = (predicted == y_train).sum() |
|
trn_corr += batch_corr |
|
|
|
optimizer.zero_grad() |
|
loss.backward() |
|
optimizer.step() |
|
|
|
train_losses.append(loss) |
|
train_correct.append(trn_corr) |
|
|
|
|
|
with torch.no_grad(): |
|
for b, (X_test, y_test) in enumerate(test_loader): |
|
|
|
|
|
|
|
try: |
|
y_pred = model(X_test) |
|
except: |
|
print("Error testing images") |
|
continue |
|
loss = criterion(y_pred, y_test) |
|
|
|
predicted = torch.max(y_pred, 1)[1] |
|
batch_corr = (predicted == y_test).sum() |
|
tst_corr += batch_corr |
|
|
|
test_losses.append(loss) |
|
test_correct.append(tst_corr) |
|
|
|
end_time = time.time() |
|
total_time = end_time - start_time |
|
print(f"Time taken: minutes: {math.floor(total_time / 60)} seconds: {math.floor(total_time % 60)}") |
|
|
|
torch.save(model.state_dict(), 'model.pt') |
|
|
|
plt.plot([x.detach().numpy() for x in train_losses], label='train loss') |
|
plt.plot(test_losses, label='test loss') |
|
plt.legend() |
|
plt.plot() |
|
|
|
plt.plot([t/80 for t in train_correct], label='train accuracy') |
|
plt.plot([t/30 for t in test_correct], label='test accuracy') |
|
plt.legend() |
|
plt.plot() |
|
|
|
print(f'Accuracy: {100*test_correct[-1].item()/1000}') |
|
|