afiliot commited on
Commit
5818ee8
1 Parent(s): 5c64879

Update scripts/trainer.py

Browse files
Files changed (1) hide show
  1. scripts/trainer.py +9 -23
scripts/trainer.py CHANGED
@@ -185,6 +185,8 @@ class TorchTrainer(BaseTrainer):
185
  The weight decay for the optimizer.
186
  device : str = "cpu"
187
  The device to use for training and evaluation.
 
 
188
  optimizer: Callable = Adam
189
  The optimizer class to use.
190
  train_step: Callable = slide_level_train_step
@@ -205,6 +207,7 @@ class TorchTrainer(BaseTrainer):
205
  learning_rate: float = 1.0e-3,
206
  weight_decay: float = 0.0,
207
  device: str = "cpu",
 
208
  optimizer: Callable = Adam,
209
  train_step: Callable = slide_level_train_step,
210
  val_step: Callable = slide_level_val_step,
@@ -226,6 +229,7 @@ class TorchTrainer(BaseTrainer):
226
 
227
  self.collator = collator
228
  self.device = device
 
229
 
230
  self.train_losses: List[float]
231
  self.val_losses: List[float]
@@ -260,6 +264,7 @@ class TorchTrainer(BaseTrainer):
260
  pin_memory=True,
261
  collate_fn=self.collator,
262
  drop_last=True,
 
263
  )
264
  val_dataloader = DataLoader(
265
  dataset=val_set,
@@ -268,6 +273,7 @@ class TorchTrainer(BaseTrainer):
268
  pin_memory=True,
269
  collate_fn=self.collator,
270
  drop_last=False,
 
271
  )
272
 
273
  # Prepare modules.
@@ -355,6 +361,7 @@ class TorchTrainer(BaseTrainer):
355
  pin_memory=True,
356
  collate_fn=self.collator,
357
  drop_last=False,
 
358
  )
359
 
360
  # Prepare modules.
@@ -401,6 +408,7 @@ class TorchTrainer(BaseTrainer):
401
  pin_memory=True,
402
  collate_fn=self.collator,
403
  drop_last=False,
 
404
  )
405
 
406
  # Prepare modules
@@ -415,26 +423,4 @@ class TorchTrainer(BaseTrainer):
415
  device=self.device,
416
  )
417
 
418
- return test_epoch_labels, test_epoch_logits
419
-
420
- def compute_metrics(
421
- self, labels: np.array, logits: np.array
422
- ) -> Dict[str, float]:
423
- """Compute metrics using the provided labels and logits.
424
-
425
- Parameters
426
- ----------
427
- labels: np.ndarray
428
- The ground truth labels.
429
- logits: np.ndarray
430
- The predicted logits.
431
-
432
- Returns:
433
- Dict[str, float]
434
- A dictionary containing the computed metrics.
435
- """
436
- test_metrics = {
437
- k: metric(labels, logits) for k, metric in self.metrics.items()
438
- }
439
- return test_metrics
440
-
 
185
  The weight decay for the optimizer.
186
  device : str = "cpu"
187
  The device to use for training and evaluation.
188
+ num_workers: int = 8
189
+ Number of workers.
190
  optimizer: Callable = Adam
191
  The optimizer class to use.
192
  train_step: Callable = slide_level_train_step
 
207
  learning_rate: float = 1.0e-3,
208
  weight_decay: float = 0.0,
209
  device: str = "cpu",
210
+ num_workers: int = 8,
211
  optimizer: Callable = Adam,
212
  train_step: Callable = slide_level_train_step,
213
  val_step: Callable = slide_level_val_step,
 
229
 
230
  self.collator = collator
231
  self.device = device
232
+ self.num_workers = num_workers
233
 
234
  self.train_losses: List[float]
235
  self.val_losses: List[float]
 
264
  pin_memory=True,
265
  collate_fn=self.collator,
266
  drop_last=True,
267
+ num_workers=self.num_workers,
268
  )
269
  val_dataloader = DataLoader(
270
  dataset=val_set,
 
273
  pin_memory=True,
274
  collate_fn=self.collator,
275
  drop_last=False,
276
+ num_workers=self.num_workers,
277
  )
278
 
279
  # Prepare modules.
 
361
  pin_memory=True,
362
  collate_fn=self.collator,
363
  drop_last=False,
364
+ num_workers=self.num_workers,
365
  )
366
 
367
  # Prepare modules.
 
408
  pin_memory=True,
409
  collate_fn=self.collator,
410
  drop_last=False,
411
+ num_workers=self.num_workers,
412
  )
413
 
414
  # Prepare modules
 
423
  device=self.device,
424
  )
425
 
426
+ return test_epoch_labels, test_epoch_logits