File size: 4,148 Bytes
a1539a5 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 |
import torch
from torch import nn
from torch.nn import functional as F
import pytorch_lightning as pl
import torchmetrics
from torch.optim.lr_scheduler import OneCycleLR
from torchmetrics.functional import accuracy
class ResBlock(nn.Module):
def __init__(self, in_channel, out_channel, stride=1):
super(ResBlock, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(in_channel, in_channel, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(in_channel),
nn.ReLU(),
nn.Conv2d(in_channel, out_channel, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(out_channel),
nn.ReLU(),
)
def forward(self, x):
return(self.conv(x))
class ResNet18(pl.LightningModule):
def __init__(self, train_loader_len, criterion, num_classes=10, lr=0.001, max_lr=1.45E-03):
super().__init__()
self.save_hyperparameters(ignore=['criterion'])
self.criterion = criterion
self.train_loader_len = train_loader_len
self.accuracy = torchmetrics.Accuracy(task="multiclass", num_classes=self.hparams.num_classes)
self.prep_layer = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(64),
nn.ReLU()
)
self.layer_one = nn.Sequential(
nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1, bias=False),
nn.MaxPool2d(2,2),
nn.BatchNorm2d(128),
nn.ReLU()
)
self.res_block1 = ResBlock(128, 128)
self.layer_two = nn.Sequential(
nn.Conv2d(128, 256, kernel_size=3, stride=1, padding=1, bias=False),
nn.MaxPool2d(2,2),
nn.BatchNorm2d(256),
nn.ReLU()
)
self.layer_three = nn.Sequential(
nn.Conv2d(256, 512, kernel_size=3, stride=1, padding=1, bias=False),
nn.MaxPool2d(2,2),
nn.BatchNorm2d(512),
nn.ReLU()
)
self.res_block2 = ResBlock(512, 512)
self.max_pool = nn.MaxPool2d(4,4)
self.fc = nn.Linear(512, num_classes, bias=False)
def forward(self, x):
x = self.prep_layer(x)
x = self.layer_one(x)
R1 = self.res_block1(x)
x = x + R1
x = self.layer_two(x)
x = self.layer_three(x)
R2 = self.res_block2(x)
x = x + R2
x = self.max_pool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return(x)
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.parameters(), lr=self.hparams.lr, weight_decay=1e-4)
scheduler = OneCycleLR(
optimizer,
max_lr=self.hparams.max_lr,
epochs=self.trainer.max_epochs,
steps_per_epoch=self.train_loader_len,
pct_start=5/self.trainer.max_epochs,
div_factor=100,
three_phase=False,
)
if self.hparams.max_lr==1.45E-03:
return(optimizer)
else:
return([optimizer], [scheduler])
def training_step(self, train_batch, batch_idx):
data, target = train_batch
y_pred = self(data)
loss = self.criterion(y_pred, target)
pred = torch.argmax(y_pred.squeeze(), dim=1)
acc = accuracy(pred, target, task="multiclass", num_classes=self.hparams.num_classes)
self.log('train_loss', loss, prog_bar=True, on_step=False, on_epoch=True)
self.log('train_acc', acc, prog_bar=True, on_step=False, on_epoch=True)
return(loss)
def validation_step(self, batch, batch_idx):
return(self.evaluate(batch, 'val'))
def test_step(self, batch, batch_idx):
return(self.evaluate(batch, 'test'))
def evaluate(self, batch, stage=None):
data, target = batch
y_pred = self(data)
loss = self.criterion(y_pred, target).item()
pred = torch.argmax(y_pred.squeeze(), dim=1)
acc = accuracy(pred, target, task="multiclass", num_classes=self.hparams.num_classes)
if stage:
self.log(f"{stage}_loss", loss, prog_bar=True, on_step=False, on_epoch=True)
self.log(f"{stage}_acc", acc, prog_bar=True, on_step=False, on_epoch=True)
return pred, target |