ho11laqe's picture
init
ecf08bc
raw
history blame contribute delete
No virus
2.71 kB
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from nnunet.training.network_training.nnUNetTrainer import nnUNetTrainer
from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2
from torch.optim import lr_scheduler
class nnUNetTrainerV2_SGD_ReduceOnPlateau(nnUNetTrainerV2):
def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None,
unpack_data=True, deterministic=True, fp16=False):
super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data,
deterministic, fp16)
def initialize_optimizer_and_scheduler(self):
self.optimizer = torch.optim.SGD(self.network.parameters(), self.initial_lr, weight_decay=self.weight_decay,
momentum=0.99, nesterov=True)
self.lr_scheduler = lr_scheduler.ReduceLROnPlateau(self.optimizer, mode='min', factor=0.2,
patience=self.lr_scheduler_patience,
verbose=True, threshold=self.lr_scheduler_eps,
threshold_mode="abs")
def maybe_update_lr(self, epoch=None):
# maybe update learning rate
if self.lr_scheduler is not None:
assert isinstance(self.lr_scheduler, (lr_scheduler.ReduceLROnPlateau, lr_scheduler._LRScheduler))
if isinstance(self.lr_scheduler, lr_scheduler.ReduceLROnPlateau):
# lr scheduler is updated with moving average val loss. should be more robust
if self.epoch > 0: # otherwise self.train_loss_MA is None
self.lr_scheduler.step(self.train_loss_MA)
else:
self.lr_scheduler.step(self.epoch + 1)
self.print_to_log_file("lr is now (scheduler) %s" % str(self.optimizer.param_groups[0]['lr']))
def on_epoch_end(self):
return nnUNetTrainer.on_epoch_end(self)