#!/usr/bin/env python # Copyright (c) Facebook, Inc. and its affiliates. """ Point supervision Training Script. This script is a simplified version of the training script in detectron2/tools. """ import os import detectron2.utils.comm as comm from detectron2.checkpoint import DetectionCheckpointer from detectron2.config import get_cfg from detectron2.data import MetadataCatalog, build_detection_train_loader from detectron2.engine import DefaultTrainer, default_argument_parser, default_setup, launch from detectron2.evaluation import COCOEvaluator, DatasetEvaluators, verify_results from detectron2.projects.point_rend import add_pointrend_config from detectron2.utils.logger import setup_logger from point_sup import PointSupDatasetMapper, add_point_sup_config class Trainer(DefaultTrainer): """ We use the "DefaultTrainer" which contains pre-defined default logic for standard training workflow. They may not work for you, especially if you are working on a new research project. In that case you can write your own training loop. You can use "tools/plain_train_net.py" as an example. """ @classmethod def build_evaluator(cls, cfg, dataset_name, output_folder=None): """ Create evaluator(s) for a given dataset. This uses the special metadata "evaluator_type" associated with each builtin dataset. For your own dataset, you can simply create an evaluator manually in your script and do not have to worry about the hacky if-else logic here. """ if output_folder is None: output_folder = os.path.join(cfg.OUTPUT_DIR, "inference") evaluator_list = [] evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type if evaluator_type == "coco": evaluator_list.append(COCOEvaluator(dataset_name, output_dir=output_folder)) if len(evaluator_list) == 0: raise NotImplementedError( "no Evaluator for the dataset {} with the type {}".format( dataset_name, evaluator_type ) ) elif len(evaluator_list) == 1: return evaluator_list[0] return DatasetEvaluators(evaluator_list) @classmethod def build_train_loader(cls, cfg): if cfg.INPUT.POINT_SUP: mapper = PointSupDatasetMapper(cfg, is_train=True) else: mapper = None return build_detection_train_loader(cfg, mapper=mapper) def setup(args): """ Create configs and perform basic setups. """ cfg = get_cfg() add_pointrend_config(cfg) add_point_sup_config(cfg) cfg.merge_from_file(args.config_file) cfg.merge_from_list(args.opts) cfg.freeze() default_setup(cfg, args) # Setup logger for "point_sup" module setup_logger(output=cfg.OUTPUT_DIR, distributed_rank=comm.get_rank(), name="point_sup") return cfg def main(args): cfg = setup(args) if args.eval_only: model = Trainer.build_model(cfg) DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load( cfg.MODEL.WEIGHTS, resume=args.resume ) res = Trainer.test(cfg, model) if cfg.TEST.AUG.ENABLED: res.update(Trainer.test_with_TTA(cfg, model)) if comm.is_main_process(): verify_results(cfg, res) return res """ If you'd like to do anything fancier than the standard training logic, consider writing your own training loop (see plain_train_net.py) or subclassing the trainer. """ trainer = Trainer(cfg) trainer.resume_or_load(resume=args.resume) return trainer.train() if __name__ == "__main__": args = default_argument_parser().parse_args() print("Command Line Args:", args) launch( main, args.num_gpus, num_machines=args.num_machines, machine_rank=args.machine_rank, dist_url=args.dist_url, args=(args,), )