cifar10_model / main.py
makiisthebes's picture
Model, parameters and utils for evaluating.
e8bc872 verified
raw
history blame contribute delete
No virus
10 kB
# %matplotlib inline
# optuna-dashboard sqlite:///db.sqlite3
# https://github.com/optuna/optuna-dashboard
import optuna # Used for the hyperparameter tuning, because cba anymore to do in any other way.
import gc
import torch
from torch.utils import data
import torchvision.datasets as datasets
from torchvision import transforms
from torch import nn
import random
import my_utils as mu
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
train_trans = transforms.Compose([
# add transformations, and data augmentation, to increase batch size and increase generalization.
# transforms.FiveCrop(size=(32,32)), # might remove this.
transforms.RandomPerspective(distortion_scale=0.6, p=0.4),
transforms.GaussianBlur(kernel_size=(5, 11), sigma=(0.1, 0.2)),
transforms.RandomRotation(degrees=(-8, 8)),
transforms.ToTensor(),
# transforms.Normalize((0.49139968, 0.48215827 ,0.44653124), (0.24703233, 0.24348505, 0.26158768))
])
test_trans = transforms.Compose([
# This is all we need for the normalization of the model.
transforms.ToTensor(),
# transforms.Normalize((0.49139968, 0.48215827, 0.44653124), (0.24703233, 0.24348505, 0.26158768))
])
# Required for data to be in Tensor form not PIL Image
trans = [transforms.ToTensor()]
trans = transforms.Compose(trans)
cifar_trainset = datasets.CIFAR10(root='./data', train=True, download=True, transform=train_trans)
cifar_testset = datasets.CIFAR10(root='./data', train=False, download=True, transform=test_trans)
# 60,000 32x32 color images in 10 different classes
# batch_size = 15
# data_iter = data.DataLoader(cifar_trainset, batch_size, shuffle=True)
# test_iter = data.DataLoader(cifar_testset, batch_size, shuffle=True)
print("Read dataset and create dataloaders - 5%")
def SpatialAveragePool(X):
return torch.mean(X, dim=[2,3])
def init_weights(m):
if type(m) == nn.Linear or type(m) == nn.Conv2d:
torch.nn.init.xavier_uniform_(m.weight)
loss = nn.CrossEntropyLoss()
class MakiNet(nn.Module):
def __init__(self, conv_arch, num_classes, dropout_rate=0.0001): # conv_arch:
super(MakiNet, self).__init__()
self.out_classes = num_classes
self.conv_arch = conv_arch
k= 0
for i, (num_conv, in_channels, out_channels) in enumerate(conv_arch):
self.add_module(f"maki_block{i}", MakiBlock(num_conv, in_channels, out_channels, dropout_rate))
# input_channels = out_channels
# k = out_channels * (32-(2*len(conv_arch))) * (32-(2*len(conv_arch)))
k = out_channels
print(str(k) + " parameters")
self.classifier = nn.Sequential(
nn.Flatten(),
nn.Linear(k, 75),
nn.Dropout(p=dropout_rate),
nn.ReLU(),
nn.Linear(75, 25),
nn.Dropout(p=dropout_rate),
nn.ReLU(),
nn.Linear(25, num_classes)
)
def forward(self, x):
out = x
# print(f"number of blocks: {len(self.conv_arch)}")
for i in range(len(self.conv_arch)):
out = self._modules[f"maki_block{i}"](out)
out = SpatialAveragePool(out)
out = self.classifier(out)
return out
class MakiBlock(nn.Module):
def __init__(self, num_conv, input_channels, output_channels, dropout_rate):
super(MakiBlock, self).__init__()
self.num_convs = num_conv
self.linear = nn.Linear(input_channels, num_conv, bias=False)
self.relu = nn.ReLU()
#self.max = nn.MaxPool2d(kernel_size=3, padding=1, stride=1)
#self.avg = nn.AvgPool2d(kernel_size=3, padding=1, stride=1)
self.dropout = nn.Dropout(p=dropout_rate)
for i in range(num_conv):
# add convolution layer
self.add_module(f"conv{i}", nn.Conv2d(input_channels, output_channels, kernel_size=5, padding=1, stride=1))
# add batch norm layer
self.add_module(f"batch_norm{i}", nn.BatchNorm2d(output_channels))
def forward(self, x):
# apply the linear model x, to a number of outputs.
x = x.to(device)
# Initial MLP part.
avg_out = SpatialAveragePool(x)
avg_out = avg_out.to(device)
lin_out = self.linear(avg_out)
lin_out - self.dropout(lin_out)
a = self.relu(lin_out) # a vector.
total_output = []
for j in range(self.num_convs):
out = self._modules[f"conv{j}"](x)
out = self._modules[f"batch_norm{j}"](out)
out = self.dropout(out)
out = self.relu(out)
# out = self.max(out) # removing it, as shown ineffective.
s = a[:, j].unsqueeze(-1).unsqueeze(-1).unsqueeze(-1) * out
total_output.append(s)
total_output = torch.stack(total_output, dim=0)
out = torch.sum(total_output, dim=0)
return torch.Tensor(out)
print("Create the model - 40%")
# def train_model(net, train_iter, test_iter, num_epochs, lr, wd=1e-9, device=device, param_dict=None):
def train_model(trail, study, device=device):
gc.collect()
torch.cuda.empty_cache()
gpu_memory = torch.cuda.memory_allocated(device='cuda:0')
print(f"GPU memory allocated: {gpu_memory}")
# To be completed.
try:
batch_size = trail.suggest_int("batch_size", 32, 256)
train_iter = data.DataLoader(cifar_trainset, batch_size, shuffle=True)
test_iter = data.DataLoader(cifar_testset, batch_size, shuffle=True)
# dropout_rate = trail.suggest_float("dropout_rate", 1e-5, 1e-1, log=True)
dropout_rate = 0.15
#number_of_layers = trail.suggest_int("number_of_layers", 3, 8)
#number_of_channels = trail.suggest_int("number_of_channels", 50, 200)
# number_of_layers = 4
# number_of_channels = 10
# num_conv = trail.suggest_int(3, 5) # 3, try 12 next after this.
num_conv = 3
#model_arch = [
# [num_conv, 3, number_of_channels], # num_conv, in_channels, out_channels
#]
#for i in range(number_of_layers):
# model_arch.append([num_conv, number_of_channels, number_of_channels])
model_arch = [
[3, 3, 120], # num_conv, in_channels, out_channels
[3, 120, 100],
[3, 100, 80],
]
net = MakiNet(model_arch, 10, dropout_rate=dropout_rate)
net.to(device)
#state_dict = torch.load(f"optuna_coursework_multi_arch_hyper_maki_net_4_10_0.713.params")
#net.load_state_dict(state_dict)
net.apply(init_weights)
lr = trail.suggest_float("lr", 1e-5, 9e-1, log=True)
# wd = trail.suggest_float("wd", 1e-9, 1e-1, log=True)
wd=0
optimizer = torch.optim.SGD(net.parameters(), lr=lr, weight_decay=wd)
loss = nn.CrossEntropyLoss()
timer = mu.Timer()
num_epochs = trail.suggest_int("num_epochs", 20, 50) # 10, 40
metric = mu.Accumulator(3) # train_loss, train_acc, num_examples
train_loss = 0
train_acc = 0
for epoch in range(num_epochs):
print(f"Epoch: {epoch}/ {num_epochs}")
for i, (X, y) in enumerate(train_iter):
timer.start()
net.train()
optimizer.zero_grad()
X, y = X.to(device), y.to(device)
y_hat = net(X)
l = loss(y_hat, y)
l.backward()
optimizer.step()
with torch.no_grad():
metric.add(l*X.shape[0], mu.accuracy(y_hat, y), X.shape[0])
timer.stop()
train_loss, train_acc = metric[0]/metric[2], metric[1]/metric[2]
if (i+1) % 50 == 0:
print(f'batch {i+1}, train loss {train_loss:.3f}, train acc {train_acc:.3f}')
test_acc = mu.evaluate_accuracy_gpu(net, test_iter)
print(f'Test Accuracy for epoch {epoch+1} is {test_acc:.3f}')
if epoch == 5 and test_acc <= 0.1:
# # Stop the trial if the test accuracy is less than 0.1 after 10 epochs. To save time.
raise optuna.exceptions.TrialPruned()
test_acc = mu.evaluate_accuracy_gpu(net, test_iter)
test_acc_delta = test_acc - train_acc
if test_acc_delta < -0.25:
# overfitting of more than 25%, prune.
raise optuna.exceptions.TrialPruned()
try:
if test_acc > study.best_trials[0].values[0]:
torch.save(net.state_dict(), f"attempt6{test_acc:.3f}.params")
except IndexError:
print("No best trial yet.")
torch.save(net.state_dict(), f"attempt6{test_acc:.3f}.params")
except Exception as e:
print("Exception occurred")
return optuna.exceptions.OptunaError(f"Exception occurred during training. {e}")
return test_acc # , train_loss, test_acc_delta
if __name__ == "__main__":
# model_arch = [
# [6, 3, 12], # num_conv, in_channels, out_channels max 73% no aug
# [4, 12, 15],
# [4, 15, 12],
# [5, 12, 9],
# ]
#
# model_arch2 = [
# [3, 3, 5], # num_conv, in_channels, out_channels
# [3, 5, 6],
# [3, 6, 3],
# # [2, 6, 3]
# ]
#study = optuna.create_study(study_name="attempt6", storage="sqlite:///db.sqlite3", directions=["maximize"]) # maximise the test accuracy.
study = optuna.load_study(study_name="attempt6", storage="sqlite:///db.sqlite3")
# print(f"Study Attributes: {study.user_attrs}")
#print(f"{study.best_trials[0].values[0]} - {study.best_trials[0].params}")
study.optimize(lambda trial: train_model(trial, study, device), n_trials=1000) # 2 hours.
# All task runs up to run 25, are with transformer simple model, not with data augmentation.
# optuna-dashboard sqlite:///db.sqlite3
# test_acc, train_loss, test_acc_delta