bhimrazy commited on
Commit
f626705
1 Parent(s): 30df46a

Updates models

Browse files
Files changed (1) hide show
  1. src/model.py +14 -6
src/model.py CHANGED
@@ -1,7 +1,7 @@
1
  import lightning as L
2
  import torch
3
  from torch import nn
4
- from torchmetrics.functional import accuracy
5
  from torchvision import models
6
 
7
 
@@ -19,15 +19,15 @@ class DRModel(L.LightningModule):
19
  self.model = models.densenet169(weights=models.DenseNet169_Weights.DEFAULT)
20
  # self.model = models.vit_b_16(weights=models.ViT_B_16_Weights.DEFAULT)
21
  # freeze the feature extractor
22
- for param in self.model.parameters():
23
- param.requires_grad = False
24
  # Change the output layer to have the number of classes
25
  in_features = self.model.classifier.in_features
26
  # in_features = 768
27
  self.model.classifier = nn.Sequential(
28
  nn.Linear(in_features, in_features // 2),
29
  nn.ReLU(),
30
- nn.Dropout(0.1),
31
  nn.Linear(in_features // 2, num_classes),
32
  )
33
 
@@ -50,14 +50,22 @@ class DRModel(L.LightningModule):
50
  loss = self.criterion(logits, y)
51
  preds = torch.argmax(logits, dim=1)
52
  acc = accuracy(preds, y, task="multiclass", num_classes=self.num_classes)
 
53
  self.log("val_loss", loss, prog_bar=True)
54
  self.log("val_acc", acc, prog_bar=True)
 
55
 
56
  def configure_optimizers(self):
57
- optimizer = torch.optim.Adam(
 
 
 
 
 
58
  self.parameters(), lr=self.learning_rate, weight_decay=1e-4
59
  )
60
- scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=10)
 
61
  return {
62
  "optimizer": optimizer,
63
  "lr_scheduler": {
 
1
  import lightning as L
2
  import torch
3
  from torch import nn
4
+ from torchmetrics.functional import accuracy, cohen_kappa
5
  from torchvision import models
6
 
7
 
 
19
  self.model = models.densenet169(weights=models.DenseNet169_Weights.DEFAULT)
20
  # self.model = models.vit_b_16(weights=models.ViT_B_16_Weights.DEFAULT)
21
  # freeze the feature extractor
22
+ # for param in self.model.parameters():
23
+ # param.requires_grad = False
24
  # Change the output layer to have the number of classes
25
  in_features = self.model.classifier.in_features
26
  # in_features = 768
27
  self.model.classifier = nn.Sequential(
28
  nn.Linear(in_features, in_features // 2),
29
  nn.ReLU(),
30
+ nn.Dropout(0.5),
31
  nn.Linear(in_features // 2, num_classes),
32
  )
33
 
 
50
  loss = self.criterion(logits, y)
51
  preds = torch.argmax(logits, dim=1)
52
  acc = accuracy(preds, y, task="multiclass", num_classes=self.num_classes)
53
+ kappa = cohen_kappa(preds, y, task="multiclass", num_classes=self.num_classes, weights="quadratic")
54
  self.log("val_loss", loss, prog_bar=True)
55
  self.log("val_acc", acc, prog_bar=True)
56
+ self.log("val_kappa", kappa, prog_bar=True)
57
 
58
  def configure_optimizers(self):
59
+ # optimizer = torch.optim.Adam(
60
+ # self.parameters(), lr=self.learning_rate, weight_decay=1e-4
61
+ # )
62
+ # scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode="min", factor=0.1, patience=3, verbose=True)
63
+
64
+ optimizer = torch.optim.AdamW(
65
  self.parameters(), lr=self.learning_rate, weight_decay=1e-4
66
  )
67
+ scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=20)
68
+ # scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode="min", factor=0.1, patience=3, verbose=True)
69
  return {
70
  "optimizer": optimizer,
71
  "lr_scheduler": {