Spaces:
Runtime error
Runtime error
checkpoint_config = dict(interval=1) | |
# yapf: disable | |
log_config = dict( | |
interval=50, | |
hooks=[ | |
dict(type='TextLoggerHook'), | |
# dict(type='TensorboardLoggerHook') | |
]) | |
# yapf: enable | |
dist_params = dict(backend='nccl') | |
log_level = 'INFO' | |
load_from = None | |
resume_from = None | |
workflow = [('train', 1)] | |
# optimizer | |
optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) | |
optimizer_config = dict(grad_clip=None) | |
# learning policy | |
lr_config = dict( | |
policy='step', | |
warmup='linear', | |
warmup_iters=500, | |
warmup_ratio=0.001, | |
step=[8, 11]) | |
total_epochs = 12 | |
model = dict( | |
type='FasterRCNN', | |
data_preprocessor=dict( | |
type='DetDataPreprocessor', | |
mean=[123.675, 116.28, 103.53], | |
std=[58.395, 57.12, 57.375], | |
bgr_to_rgb=True, | |
pad_size_divisor=32), | |
backbone=dict( | |
type='ResNet', | |
depth=50, | |
num_stages=4, | |
out_indices=(0, 1, 2, 3), | |
frozen_stages=1, | |
norm_cfg=dict(type='BN', requires_grad=True), | |
norm_eval=True, | |
style='pytorch', | |
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), | |
neck=dict( | |
type='FPN', | |
in_channels=[256, 512, 1024, 2048], | |
out_channels=256, | |
num_outs=5), | |
rpn_head=dict( | |
type='RPNHead', | |
in_channels=256, | |
feat_channels=256, | |
anchor_generator=dict( | |
type='AnchorGenerator', | |
scales=[8], | |
ratios=[0.5, 1.0, 2.0], | |
strides=[4, 8, 16, 32, 64]), | |
bbox_coder=dict( | |
type='DeltaXYWHBBoxCoder', | |
target_means=[.0, .0, .0, .0], | |
target_stds=[1.0, 1.0, 1.0, 1.0]), | |
loss_cls=dict( | |
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), | |
loss_bbox=dict(type='L1Loss', loss_weight=1.0)), | |
roi_head=dict( | |
type='StandardRoIHead', | |
bbox_roi_extractor=dict( | |
type='SingleRoIExtractor', | |
roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), | |
out_channels=256, | |
featmap_strides=[4, 8, 16, 32]), | |
bbox_head=dict( | |
type='Shared2FCBBoxHead', | |
in_channels=256, | |
fc_out_channels=1024, | |
roi_feat_size=7, | |
num_classes=80, | |
bbox_coder=dict( | |
type='DeltaXYWHBBoxCoder', | |
target_means=[0., 0., 0., 0.], | |
target_stds=[0.1, 0.1, 0.2, 0.2]), | |
reg_class_agnostic=False, | |
loss_cls=dict( | |
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), | |
loss_bbox=dict(type='L1Loss', loss_weight=1.0))), | |
# model training and testing settings | |
train_cfg=dict( | |
rpn=dict( | |
assigner=dict( | |
type='MaxIoUAssigner', | |
pos_iou_thr=0.7, | |
neg_iou_thr=0.3, | |
min_pos_iou=0.3, | |
match_low_quality=True, | |
ignore_iof_thr=-1), | |
sampler=dict( | |
type='RandomSampler', | |
num=256, | |
pos_fraction=0.5, | |
neg_pos_ub=-1, | |
add_gt_as_proposals=False), | |
allowed_border=-1, | |
pos_weight=-1, | |
debug=False), | |
rpn_proposal=dict( | |
nms_pre=2000, | |
max_per_img=1000, | |
nms=dict(type='nms', iou_threshold=0.7), | |
min_bbox_size=0), | |
rcnn=dict( | |
assigner=dict( | |
type='MaxIoUAssigner', | |
pos_iou_thr=0.5, | |
neg_iou_thr=0.5, | |
min_pos_iou=0.5, | |
match_low_quality=False, | |
ignore_iof_thr=-1), | |
sampler=dict( | |
type='RandomSampler', | |
num=512, | |
pos_fraction=0.25, | |
neg_pos_ub=-1, | |
add_gt_as_proposals=True), | |
pos_weight=-1, | |
debug=False)), | |
test_cfg=dict( | |
rpn=dict( | |
nms_pre=1000, | |
max_per_img=1000, | |
nms=dict(type='nms', iou_threshold=0.7), | |
min_bbox_size=0), | |
rcnn=dict( | |
score_thr=0.05, | |
nms=dict(type='nms', iou_threshold=0.5), | |
max_per_img=100) | |
# soft-nms is also supported for rcnn testing | |
# e.g., nms=dict(type='soft_nms', iou_threshold=0.5, min_score=0.05) | |
)) | |
dataset_type = 'CocoDataset' | |
data_root = 'data/coco/' | |
backend_args = None | |
train_pipeline = [ | |
dict(type='LoadImageFromFile', backend_args=backend_args), | |
dict(type='LoadAnnotations', with_bbox=True), | |
dict(type='Resize', scale=(1333, 800), keep_ratio=True), | |
dict(type='RandomFlip', prob=0.5), | |
dict(type='PackDetInputs') | |
] | |
test_pipeline = [ | |
dict(type='LoadImageFromFile', backend_args=backend_args), | |
dict(type='Resize', scale=(1333, 800), keep_ratio=True), | |
# If you don't have a gt annotation, delete the pipeline | |
dict(type='LoadAnnotations', with_bbox=True), | |
dict( | |
type='PackDetInputs', | |
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', | |
'scale_factor')) | |
] | |
train_dataloader = dict( | |
batch_size=2, | |
num_workers=2, | |
persistent_workers=True, | |
sampler=dict(type='DefaultSampler', shuffle=True), | |
batch_sampler=dict(type='AspectRatioBatchSampler'), | |
dataset=dict( | |
type=dataset_type, | |
data_root=data_root, | |
ann_file='annotations/instances_train2017.json', | |
data_prefix=dict(img='train2017/'), | |
filter_cfg=dict(filter_empty_gt=True, min_size=32), | |
pipeline=train_pipeline, | |
backend_args=backend_args)) | |
val_dataloader = dict( | |
batch_size=1, | |
num_workers=2, | |
persistent_workers=True, | |
drop_last=False, | |
sampler=dict(type='DefaultSampler', shuffle=False), | |
dataset=dict( | |
type=dataset_type, | |
data_root=data_root, | |
ann_file='annotations/instances_val2017.json', | |
data_prefix=dict(img='val2017/'), | |
test_mode=True, | |
pipeline=test_pipeline, | |
backend_args=backend_args)) | |
test_dataloader = val_dataloader | |
val_evaluator = dict( | |
type='CocoMetric', | |
ann_file=data_root + 'annotations/instances_val2017.json', | |
metric='bbox', | |
format_only=False, | |
backend_args=backend_args) | |
test_evaluator = val_evaluator |