Spaces:
Runtime error
Runtime error
File size: 12,064 Bytes
5e88f62 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 |
import determinism # noqa
determinism.i_do_nothing_but_dont_remove_me_otherwise_things_break() # noqa
import argparse
import bisect
import copy
import os
import sys
import time
from argparse import ArgumentParser
import torch
import wandb
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.engine import PeriodicCheckpointer
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
import config
import losses
import utils
from eval_utils import eval_unsupmf, get_unsup_image_viz, get_vis_header
from mask_former_trainer import setup, Trainer
logger = utils.log.getLogger('gwm')
def freeze(module, set=False):
for param in module.parameters():
param.requires_grad = set
def main(args):
cfg = setup(args)
logger.info(f"Called as {' '.join(sys.argv)}")
logger.info(f'Output dir {cfg.OUTPUT_DIR}')
random_state = utils.random_state.PytorchRNGState(seed=cfg.SEED).to(torch.device(cfg.MODEL.DEVICE))
random_state.seed_everything()
utils.log.checkpoint_code(cfg.OUTPUT_DIR)
if not cfg.SKIP_TB:
writer = SummaryWriter(log_dir=cfg.OUTPUT_DIR)
else:
writer = None
# initialize model
model = Trainer.build_model(cfg)
optimizer = Trainer.build_optimizer(cfg, model)
scheduler = Trainer.build_lr_scheduler(cfg, optimizer)
logger.info(f'Optimiser is {type(optimizer)}')
checkpointer = DetectionCheckpointer(model,
save_dir=os.path.join(cfg.OUTPUT_DIR, 'checkpoints'),
random_state=random_state,
optimizer=optimizer,
scheduler=scheduler)
periodic_checkpointer = PeriodicCheckpointer(checkpointer=checkpointer,
period=cfg.SOLVER.CHECKPOINT_PERIOD,
max_iter=cfg.SOLVER.MAX_ITER,
max_to_keep=None if cfg.FLAGS.KEEP_ALL else 5,
file_prefix='checkpoint')
checkpoint = checkpointer.resume_or_load(cfg.MODEL.WEIGHTS, resume=args.resume_path is not None)
iteration = 0 if args.resume_path is None else checkpoint['iteration']
train_loader, val_loader = config.loaders(cfg)
# overfit single batch for debug
# sample = next(iter(loader))
criterions = {
'reconstruction': (losses.ReconstructionLoss(cfg, model), cfg.GWM.LOSS_MULT.REC, lambda x: 1)}
criterion = losses.CriterionDict(criterions)
if args.eval_only:
if len(val_loader.dataset) == 0:
logger.error("Training dataset: empty")
sys.exit(0)
model.eval()
iou = eval_unsupmf(cfg=cfg, val_loader=val_loader, model=model, criterion=criterion, writer=writer,
writer_iteration=iteration)
logger.info(f"Results: iteration: {iteration} IOU = {iou}")
return
if len(train_loader.dataset) == 0:
logger.error("Training dataset: empty")
sys.exit(0)
logger.info(
f'Start of training: dataset {cfg.GWM.DATASET},'
f' train {len(train_loader.dataset)}, val {len(val_loader.dataset)},'
f' device {model.device}, keys {cfg.GWM.SAMPLE_KEYS}, '
f'multiple flows {cfg.GWM.USE_MULT_FLOW}')
iou_best = 0
timestart = time.time()
dilate_kernel = torch.ones((2, 2), device=model.device)
total_iter = cfg.TOTAL_ITER if cfg.TOTAL_ITER else cfg.SOLVER.MAX_ITER # early stop
with torch.autograd.set_detect_anomaly(cfg.DEBUG) and \
tqdm(initial=iteration, total=total_iter, disable=utils.environment.is_slurm()) as pbar:
while iteration < total_iter:
for sample in train_loader:
if cfg.MODEL.META_ARCHITECTURE != 'UNET' and cfg.FLAGS.UNFREEZE_AT:
if hasattr(model.backbone, 'frozen_stages'):
assert cfg.MODEL.BACKBONE.FREEZE_AT == -1, f"MODEL initial parameters forced frozen"
stages = [s for s, m in cfg.FLAGS.UNFREEZE_AT]
milest = [m for s, m in cfg.FLAGS.UNFREEZE_AT]
pos = bisect.bisect_right(milest, iteration) - 1
if pos >= 0:
curr_setting = model.backbone.frozen_stages
if curr_setting != stages[pos]:
logger.info(f"Updating backbone freezing stages from {curr_setting} to {stages[pos]}")
model.backbone.frozen_stages = stages[pos]
model.train()
else:
assert cfg.MODEL.BACKBONE.FREEZE_AT == -1, f"MODEL initial parameters forced frozen"
stages = [s for s, m in cfg.FLAGS.UNFREEZE_AT]
milest = [m for s, m in cfg.FLAGS.UNFREEZE_AT]
pos = bisect.bisect_right(milest, iteration) - 1
freeze(model, set=False)
freeze(model.sem_seg_head.predictor, set=True)
if pos >= 0:
stage = stages[pos]
if stage <= 2:
freeze(model.sem_seg_head, set=True)
if stage <= 1:
freeze(model.backbone, set=True)
model.train()
else:
logger.debug_once(f'Unfreezing disabled schedule: {cfg.FLAGS.UNFREEZE_AT}')
sample = [e for s in sample for e in s]
flow_key = 'flow'
raw_sem_seg = False
if cfg.GWM.FLOW_RES is not None:
flow_key = 'flow_big'
raw_sem_seg = cfg.MODEL.SEM_SEG_HEAD.PIXEL_DECODER_NAME == 'MegaBigPixelDecoder'
flow = torch.stack([x[flow_key].to(model.device) for x in sample]).clip(-20, 20)
logger.debug_once(f'flow shape: {flow.shape}')
preds = model.forward_base(sample, keys=cfg.GWM.SAMPLE_KEYS, get_eval=True, raw_sem_seg=raw_sem_seg)
masks_raw = torch.stack([x['sem_seg'] for x in preds], 0)
logger.debug_once(f'mask shape: {masks_raw.shape}')
masks_softmaxed_list = [torch.softmax(masks_raw, dim=1)]
total_losses = []
log_dicts = []
for mask_idx, masks_softmaxed in enumerate(masks_softmaxed_list):
loss, log_dict = criterion(sample, flow, masks_softmaxed, iteration)
if cfg.GWM.USE_MULT_FLOW:
flow2 = torch.stack([x[flow_key + '_2'].to(model.device) for x in sample]).clip(-20, 20)
other_loss, other_log_dict = criterion(sample, flow2, masks_softmaxed, iteration)
loss = loss / 2 + other_loss / 2
for k, v in other_log_dict.items():
log_dict[k] = other_log_dict[k] / 2 + v / 2
total_losses.append(loss)
log_dicts.append(log_dict)
loss_ws = cfg.GWM.LOSS_MULT.HEIR_W
total_w = float(sum(loss_ws[:len(total_losses)]))
log_dict = {}
if len(total_losses) == 1:
log_dict = log_dicts[0]
loss = total_losses[0]
else:
loss = 0
for i, (tl, w, ld) in enumerate(zip(total_losses, loss_ws, log_dicts)):
for k, v in ld.items():
log_dict[f'{k}_{i}'] = v * w / total_w
loss += tl * w / total_w
train_log_dict = {f'train/{k}': v for k, v in log_dict.items()}
del log_dict
train_log_dict['train/learning_rate'] = optimizer.param_groups[-1]['lr']
train_log_dict['train/loss_total'] = loss.item()
optimizer.zero_grad()
loss.backward()
optimizer.step()
scheduler.step()
pbar.set_postfix(loss=loss.item())
pbar.update()
# Sanity check for RNG state
if (iteration + 1) % 1000 == 0 or iteration + 1 in {1, 50}:
logger.info(
f'Iteration {iteration + 1}. RNG outputs {utils.random_state.get_randstate_magic_numbers(model.device)}')
if cfg.DEBUG or (iteration + 1) % 100 == 0:
logger.info(
f'Iteration: {iteration + 1}, time: {time.time() - timestart:.01f}s, loss: {loss.item():.02f}.')
for k, v in train_log_dict.items():
if writer:
writer.add_scalar(k, v, iteration + 1)
if cfg.WANDB.ENABLE:
wandb.log(train_log_dict, step=iteration + 1)
if (iteration + 1) % cfg.LOG_FREQ == 0 or (iteration + 1) in [1, 50, 500]:
model.eval()
if writer:
flow = torch.stack([x['flow'].to(model.device) for x in sample]).clip(-20, 20)
image_viz, header_text = get_unsup_image_viz(model, cfg, sample, criterion)
header = get_vis_header(image_viz.size(2), flow.size(3), header_text)
image_viz = torch.cat([header, image_viz], dim=1)
writer.add_image('train/images', image_viz, iteration + 1)
if cfg.WANDB.ENABLE and (iteration + 1) % 2500 == 0:
image_viz = get_unsup_image_viz(model, cfg, sample)
wandb.log({'train/viz': wandb.Image(image_viz.float())}, step=iteration + 1)
if iou := eval_unsupmf(cfg=cfg, val_loader=val_loader, model=model, criterion=criterion,
writer=writer, writer_iteration=iteration + 1, use_wandb=cfg.WANDB.ENABLE):
if cfg.SOLVER.CHECKPOINT_PERIOD and iou > iou_best:
iou_best = iou
if not args.wandb_sweep_mode:
checkpointer.save(name='checkpoint_best', iteration=iteration + 1, loss=loss,
iou=iou_best)
logger.info(f'New best IoU {iou_best:.02f} after iteration {iteration + 1}')
if cfg.WANDB.ENABLE:
wandb.log({'eval/IoU_best': iou_best}, step=iteration + 1)
if writer:
writer.add_scalar('eval/IoU_best', iou_best, iteration + 1)
model.train()
periodic_checkpointer.step(iteration=iteration + 1, loss=loss)
iteration += 1
timestart = time.time()
def get_argparse_args():
parser = ArgumentParser()
parser.add_argument('--resume_path', type=str, default=None)
parser.add_argument('--use_wandb', dest='wandb_sweep_mode', action='store_true') # for sweep
parser.add_argument('--config-file', type=str,
default='configs/maskformer/maskformer_R50_bs16_160k_dino.yaml')
parser.add_argument('--eval_only', action='store_true')
parser.add_argument(
"opts",
help="Modify config options by adding 'KEY VALUE' pairs at the end of the command. "
"See config references at "
"https://detectron2.readthedocs.io/modules/config.html#config-references",
default=None,
nargs=argparse.REMAINDER,
)
return parser
if __name__ == "__main__":
args = get_argparse_args().parse_args()
if args.resume_path:
args.config_file = "/".join(args.resume_path.split('/')[:-2]) + '/config.yaml'
print(args.config_file)
main(args)
|