Spaces:
Running
on
T4
Running
on
T4
File size: 19,869 Bytes
5ebeb73 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 |
default_scope = "mmdet"
default_hooks = dict(
timer=dict(type="IterTimerHook"),
logger=dict(type="LoggerHook", interval=100),
param_scheduler=dict(type="ParamSchedulerHook"),
checkpoint=dict(type="CheckpointHook", interval=1, max_keep_ckpts=5, save_best="auto"),
sampler_seed=dict(type="DistSamplerSeedHook"),
visualization=dict(type="DetVisualizationHook"),
)
env_cfg = dict(cudnn_benchmark=False, mp_cfg=dict(mp_start_method="fork", opencv_num_threads=0), dist_cfg=dict(backend="nccl"))
vis_backends = [dict(type="LocalVisBackend")]
visualizer = dict(type="DetLocalVisualizer", vis_backends=[dict(type="LocalVisBackend")], name="visualizer", save_dir="./")
log_processor = dict(type="LogProcessor", window_size=50, by_epoch=True)
log_level = "INFO"
load_from = "/home/erik/Riksarkivet/Projects/HTR_Pipeline/models/checkpoints/rtmdet_regions_6/epoch_11.pth"
resume = True
train_cfg = dict(type="EpochBasedTrainLoop", max_epochs=12, val_interval=12, dynamic_intervals=[(10, 1)])
val_cfg = dict(type="ValLoop")
test_cfg = dict(
type="TestLoop",
pipeline=[
dict(type="LoadImageFromFile", file_client_args=dict(backend="disk")),
dict(type="Resize", scale=(640, 640), keep_ratio=True),
dict(type="Pad", size=(640, 640), pad_val=dict(img=(114, 114, 114))),
dict(type="PackDetInputs", meta_keys=("img_id", "img_path", "ori_shape", "img_shape", "scale_factor")),
],
)
param_scheduler = [
dict(type="LinearLR", start_factor=1e-05, by_epoch=False, begin=0, end=1000),
dict(type="CosineAnnealingLR", eta_min=1.25e-05, begin=6, end=12, T_max=6, by_epoch=True, convert_to_iter_based=True),
]
optim_wrapper = dict(
type="OptimWrapper",
optimizer=dict(type="AdamW", lr=0.00025, weight_decay=0.05),
paramwise_cfg=dict(norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True),
)
auto_scale_lr = dict(enable=False, base_batch_size=16)
dataset_type = "CocoDataset"
data_root = "data/coco/"
file_client_args = dict(backend="disk")
train_pipeline = [
dict(type="LoadImageFromFile", file_client_args=dict(backend="disk")),
dict(type="LoadAnnotations", with_bbox=True, with_mask=True, poly2mask=False),
dict(type="CachedMosaic", img_scale=(640, 640), pad_val=114.0),
dict(type="RandomResize", scale=(1280, 1280), ratio_range=(0.1, 2.0), keep_ratio=True),
dict(type="RandomCrop", crop_size=(640, 640), recompute_bbox=True, allow_negative_crop=True),
dict(type="YOLOXHSVRandomAug"),
dict(type="RandomFlip", prob=0.5),
dict(type="Pad", size=(640, 640), pad_val=dict(img=(114, 114, 114))),
dict(type="CachedMixUp", img_scale=(640, 640), ratio_range=(1.0, 1.0), max_cached_images=20, pad_val=(114, 114, 114)),
dict(type="FilterAnnotations", min_gt_bbox_wh=(1, 1)),
dict(type="PackDetInputs"),
]
test_pipeline = [
dict(type="LoadImageFromFile", file_client_args=dict(backend="disk")),
dict(type="Resize", scale=(640, 640), keep_ratio=True),
dict(type="Pad", size=(640, 640), pad_val=dict(img=(114, 114, 114))),
dict(type="PackDetInputs", meta_keys=("img_id", "img_path", "ori_shape", "img_shape", "scale_factor")),
]
tta_model = dict(type="DetTTAModel", tta_cfg=dict(nms=dict(type="nms", iou_threshold=0.6), max_per_img=100))
img_scales = [(640, 640), (320, 320), (960, 960)]
tta_pipeline = [
dict(type="LoadImageFromFile", file_client_args=dict(backend="disk")),
dict(
type="TestTimeAug",
transforms=[
[
{"type": "Resize", "scale": (640, 640), "keep_ratio": True},
{"type": "Resize", "scale": (320, 320), "keep_ratio": True},
{"type": "Resize", "scale": (960, 960), "keep_ratio": True},
],
[{"type": "RandomFlip", "prob": 1.0}, {"type": "RandomFlip", "prob": 0.0}],
[{"type": "Pad", "size": (960, 960), "pad_val": {"img": (114, 114, 114)}}],
[
{
"type": "PackDetInputs",
"meta_keys": ("img_id", "img_path", "ori_shape", "img_shape", "scale_factor", "flip", "flip_direction"),
}
],
],
),
]
model = dict(
type="RTMDet",
data_preprocessor=dict(
type="DetDataPreprocessor", mean=[103.53, 116.28, 123.675], std=[57.375, 57.12, 58.395], bgr_to_rgb=False, batch_augments=None
),
backbone=dict(
type="CSPNeXt",
arch="P5",
expand_ratio=0.5,
deepen_factor=0.67,
widen_factor=0.75,
channel_attention=True,
norm_cfg=dict(type="SyncBN"),
act_cfg=dict(type="SiLU", inplace=True),
),
neck=dict(
type="CSPNeXtPAFPN",
in_channels=[192, 384, 768],
out_channels=192,
num_csp_blocks=2,
expand_ratio=0.5,
norm_cfg=dict(type="SyncBN"),
act_cfg=dict(type="SiLU", inplace=True),
),
bbox_head=dict(
type="RTMDetInsSepBNHead",
num_classes=80,
in_channels=192,
stacked_convs=2,
share_conv=True,
pred_kernel_size=1,
feat_channels=192,
act_cfg=dict(type="SiLU", inplace=True),
norm_cfg=dict(type="SyncBN", requires_grad=True),
anchor_generator=dict(type="MlvlPointGenerator", offset=0, strides=[8, 16, 32]),
bbox_coder=dict(type="DistancePointBBoxCoder"),
loss_cls=dict(type="QualityFocalLoss", use_sigmoid=True, beta=2.0, loss_weight=1.0),
loss_bbox=dict(type="GIoULoss", loss_weight=2.0),
loss_mask=dict(type="DiceLoss", loss_weight=2.0, eps=5e-06, reduction="mean"),
),
train_cfg=dict(assigner=dict(type="DynamicSoftLabelAssigner", topk=13), allowed_border=-1, pos_weight=-1, debug=False),
test_cfg=dict(nms_pre=200, min_bbox_size=0, score_thr=0.4, nms=dict(type="nms", iou_threshold=0.6), max_per_img=50, mask_thr_binary=0.5),
)
train_pipeline_stage2 = [
dict(type="LoadImageFromFile", file_client_args=dict(backend="disk")),
dict(type="LoadAnnotations", with_bbox=True, with_mask=True, poly2mask=False),
dict(type="RandomResize", scale=(640, 640), ratio_range=(0.1, 2.0), keep_ratio=True),
dict(type="RandomCrop", crop_size=(640, 640), recompute_bbox=True, allow_negative_crop=True),
dict(type="FilterAnnotations", min_gt_bbox_wh=(1, 1)),
dict(type="YOLOXHSVRandomAug"),
dict(type="RandomFlip", prob=0.5),
dict(type="Pad", size=(640, 640), pad_val=dict(img=(114, 114, 114))),
dict(type="PackDetInputs"),
]
train_dataloader = dict(
batch_size=2,
num_workers=1,
batch_sampler=None,
pin_memory=True,
persistent_workers=True,
sampler=dict(type="DefaultSampler", shuffle=True),
dataset=dict(
type="ConcatDataset",
datasets=[
dict(
type="CocoDataset",
metainfo=dict(classes="TextRegion", palette=[(220, 20, 60)]),
data_prefix=dict(img="/media/erik/Elements/Riksarkivet/data/datasets/htr/segmentation/police_records/"),
ann_file="/media/erik/Elements/Riksarkivet/data/datasets/htr/segmentation/police_records/gt_files/coco_regions2.json",
pipeline=[
dict(type="LoadImageFromFile", file_client_args=dict(backend="disk")),
dict(type="LoadAnnotations", with_bbox=True, with_mask=True, poly2mask=False),
dict(type="CachedMosaic", img_scale=(640, 640), pad_val=114.0),
dict(type="RandomResize", scale=(1280, 1280), ratio_range=(0.1, 2.0), keep_ratio=True),
dict(type="RandomCrop", crop_size=(640, 640), recompute_bbox=True, allow_negative_crop=True),
dict(type="YOLOXHSVRandomAug"),
dict(type="RandomFlip", prob=0.5),
dict(type="Pad", size=(640, 640), pad_val=dict(img=(114, 114, 114))),
dict(type="CachedMixUp", img_scale=(640, 640), ratio_range=(1.0, 1.0), max_cached_images=20, pad_val=(114, 114, 114)),
dict(type="FilterAnnotations", min_gt_bbox_wh=(1, 1)),
dict(type="PackDetInputs"),
],
),
dict(
type="CocoDataset",
metainfo=dict(classes="TextRegion", palette=[(220, 20, 60)]),
data_prefix=dict(img="/media/erik/Elements/Riksarkivet/data/datasets/htr/segmentation/ICDAR-2019/clean/"),
ann_file="/media/erik/Elements/Riksarkivet/data/datasets/htr/segmentation/ICDAR-2019/clean/gt_files/coco_regions2.json",
pipeline=[
dict(type="LoadImageFromFile", file_client_args=dict(backend="disk")),
dict(type="LoadAnnotations", with_bbox=True, with_mask=True, poly2mask=False),
dict(type="CachedMosaic", img_scale=(640, 640), pad_val=114.0),
dict(type="RandomResize", scale=(1280, 1280), ratio_range=(0.1, 2.0), keep_ratio=True),
dict(type="RandomCrop", crop_size=(640, 640), recompute_bbox=True, allow_negative_crop=True),
dict(type="YOLOXHSVRandomAug"),
dict(type="RandomFlip", prob=0.5),
dict(type="Pad", size=(640, 640), pad_val=dict(img=(114, 114, 114))),
dict(type="CachedMixUp", img_scale=(640, 640), ratio_range=(1.0, 1.0), max_cached_images=20, pad_val=(114, 114, 114)),
dict(type="FilterAnnotations", min_gt_bbox_wh=(1, 1)),
dict(type="PackDetInputs"),
],
),
],
),
)
val_dataloader = dict(
batch_size=1,
num_workers=10,
dataset=dict(
pipeline=[
dict(type="LoadImageFromFile", file_client_args=dict(backend="disk")),
dict(type="Resize", scale=(640, 640), keep_ratio=True),
dict(type="Pad", size=(640, 640), pad_val=dict(img=(114, 114, 114))),
dict(type="PackDetInputs", meta_keys=("img_id", "img_path", "ori_shape", "img_shape", "scale_factor")),
],
type="CocoDataset",
metainfo=dict(classes="TextRegion", palette=[(220, 20, 60)]),
data_prefix=dict(img="/media/erik/Elements/Riksarkivet/data/datasets/htr/segmentation/ICDAR-2019/clean/"),
ann_file="/media/erik/Elements/Riksarkivet/data/datasets/htr/segmentation/police_records/gt_files/coco_regions2.json",
test_mode=True,
),
persistent_workers=True,
drop_last=False,
sampler=dict(type="DefaultSampler", shuffle=False),
)
test_dataloader = dict(
batch_size=1,
num_workers=10,
dataset=dict(
pipeline=[
dict(type="LoadImageFromFile", file_client_args=dict(backend="disk")),
dict(type="Resize", scale=(640, 640), keep_ratio=True),
dict(type="Pad", size=(640, 640), pad_val=dict(img=(114, 114, 114))),
dict(type="PackDetInputs", meta_keys=("img_id", "img_path", "ori_shape", "img_shape", "scale_factor")),
],
type="CocoDataset",
metainfo=dict(classes="TextRegion", palette=[(220, 20, 60)]),
data_prefix=dict(img="/media/erik/Elements/Riksarkivet/data/datasets/htr/segmentation/ICDAR-2019/clean/"),
ann_file="/media/erik/Elements/Riksarkivet/data/datasets/htr/segmentation/police_records/gt_files/coco_regions2.json",
test_mode=True,
),
persistent_workers=True,
drop_last=False,
sampler=dict(type="DefaultSampler", shuffle=False),
)
max_epochs = 12
stage2_num_epochs = 2
base_lr = 0.00025
interval = 12
val_evaluator = dict(
proposal_nums=(100, 1, 10),
metric=["bbox", "segm"],
type="CocoMetric",
ann_file="/media/erik/Elements/Riksarkivet/data/datasets/htr/segmentation/ICDAR-2019/clean/gt_files/coco_regions2.json",
)
test_evaluator = dict(
proposal_nums=(100, 1, 10),
metric=["bbox", "segm"],
type="CocoMetric",
ann_file="/media/erik/Elements/Riksarkivet/data/datasets/htr/segmentation/ICDAR-2019/clean/gt_files/coco_regions2.json",
)
custom_hooks = [
dict(type="EMAHook", ema_type="ExpMomentumEMA", momentum=0.0002, update_buffers=True, priority=49),
dict(
type="PipelineSwitchHook",
switch_epoch=10,
switch_pipeline=[
dict(type="LoadImageFromFile", file_client_args=dict(backend="disk")),
dict(type="LoadAnnotations", with_bbox=True, with_mask=True, poly2mask=False),
dict(type="RandomResize", scale=(640, 640), ratio_range=(0.1, 2.0), keep_ratio=True),
dict(type="RandomCrop", crop_size=(640, 640), recompute_bbox=True, allow_negative_crop=True),
dict(type="FilterAnnotations", min_gt_bbox_wh=(1, 1)),
dict(type="YOLOXHSVRandomAug"),
dict(type="RandomFlip", prob=0.5),
dict(type="Pad", size=(640, 640), pad_val=dict(img=(114, 114, 114))),
dict(type="PackDetInputs"),
],
),
]
work_dir = "/home/erik/Riksarkivet/Projects/HTR_Pipeline/models/checkpoints/rtmdet_regions_6"
train_batch_size_per_gpu = 2
val_batch_size_per_gpu = 1
train_num_workers = 1
num_classes = 1
metainfo = dict(classes="TextRegion", palette=[(220, 20, 60)])
icdar_2019 = dict(
type="CocoDataset",
metainfo=dict(classes="TextRegion", palette=[(220, 20, 60)]),
data_prefix=dict(img="/media/erik/Elements/Riksarkivet/data/datasets/htr/segmentation/ICDAR-2019/clean/"),
ann_file="/media/erik/Elements/Riksarkivet/data/datasets/htr/segmentation/ICDAR-2019/clean/gt_files/coco_regions2.json",
pipeline=[
dict(type="LoadImageFromFile", file_client_args=dict(backend="disk")),
dict(type="LoadAnnotations", with_bbox=True, with_mask=True, poly2mask=False),
dict(type="CachedMosaic", img_scale=(640, 640), pad_val=114.0),
dict(type="RandomResize", scale=(1280, 1280), ratio_range=(0.1, 2.0), keep_ratio=True),
dict(type="RandomCrop", crop_size=(640, 640), recompute_bbox=True, allow_negative_crop=True),
dict(type="YOLOXHSVRandomAug"),
dict(type="RandomFlip", prob=0.5),
dict(type="Pad", size=(640, 640), pad_val=dict(img=(114, 114, 114))),
dict(type="CachedMixUp", img_scale=(640, 640), ratio_range=(1.0, 1.0), max_cached_images=20, pad_val=(114, 114, 114)),
dict(type="FilterAnnotations", min_gt_bbox_wh=(1, 1)),
dict(type="PackDetInputs"),
],
)
icdar_2019_test = dict(
type="CocoDataset",
metainfo=dict(classes="TextRegion", palette=[(220, 20, 60)]),
data_prefix=dict(img="/media/erik/Elements/Riksarkivet/data/datasets/htr/segmentation/ICDAR-2019/clean/"),
ann_file="/media/erik/Elements/Riksarkivet/data/datasets/htr/segmentation/ICDAR-2019/clean/gt_files/coco_regions2.json",
test_mode=True,
pipeline=[
dict(type="LoadImageFromFile", file_client_args=dict(backend="disk")),
dict(type="Resize", scale=(640, 640), keep_ratio=True),
dict(type="Pad", size=(640, 640), pad_val=dict(img=(114, 114, 114))),
dict(type="PackDetInputs", meta_keys=("img_id", "img_path", "ori_shape", "img_shape", "scale_factor")),
],
)
police_records = dict(
type="CocoDataset",
metainfo=dict(classes="TextRegion", palette=[(220, 20, 60)]),
data_prefix=dict(img="/media/erik/Elements/Riksarkivet/data/datasets/htr/segmentation/police_records/"),
ann_file="/media/erik/Elements/Riksarkivet/data/datasets/htr/segmentation/police_records/gt_files/coco_regions2.json",
pipeline=[
dict(type="LoadImageFromFile", file_client_args=dict(backend="disk")),
dict(type="LoadAnnotations", with_bbox=True, with_mask=True, poly2mask=False),
dict(type="CachedMosaic", img_scale=(640, 640), pad_val=114.0),
dict(type="RandomResize", scale=(1280, 1280), ratio_range=(0.1, 2.0), keep_ratio=True),
dict(type="RandomCrop", crop_size=(640, 640), recompute_bbox=True, allow_negative_crop=True),
dict(type="YOLOXHSVRandomAug"),
dict(type="RandomFlip", prob=0.5),
dict(type="Pad", size=(640, 640), pad_val=dict(img=(114, 114, 114))),
dict(type="CachedMixUp", img_scale=(640, 640), ratio_range=(1.0, 1.0), max_cached_images=20, pad_val=(114, 114, 114)),
dict(type="FilterAnnotations", min_gt_bbox_wh=(1, 1)),
dict(type="PackDetInputs"),
],
)
train_list = [
dict(
type="CocoDataset",
metainfo=dict(classes="TextRegion", palette=[(220, 20, 60)]),
data_prefix=dict(img="/media/erik/Elements/Riksarkivet/data/datasets/htr/segmentation/police_records/"),
ann_file="/media/erik/Elements/Riksarkivet/data/datasets/htr/segmentation/police_records/gt_files/coco_regions2.json",
pipeline=[
dict(type="LoadImageFromFile", file_client_args=dict(backend="disk")),
dict(type="LoadAnnotations", with_bbox=True, with_mask=True, poly2mask=False),
dict(type="CachedMosaic", img_scale=(640, 640), pad_val=114.0),
dict(type="RandomResize", scale=(1280, 1280), ratio_range=(0.1, 2.0), keep_ratio=True),
dict(type="RandomCrop", crop_size=(640, 640), recompute_bbox=True, allow_negative_crop=True),
dict(type="YOLOXHSVRandomAug"),
dict(type="RandomFlip", prob=0.5),
dict(type="Pad", size=(640, 640), pad_val=dict(img=(114, 114, 114))),
dict(type="CachedMixUp", img_scale=(640, 640), ratio_range=(1.0, 1.0), max_cached_images=20, pad_val=(114, 114, 114)),
dict(type="FilterAnnotations", min_gt_bbox_wh=(1, 1)),
dict(type="PackDetInputs"),
],
),
dict(
type="CocoDataset",
metainfo=dict(classes="TextRegion", palette=[(220, 20, 60)]),
data_prefix=dict(img="/media/erik/Elements/Riksarkivet/data/datasets/htr/segmentation/ICDAR-2019/clean/"),
ann_file="/media/erik/Elements/Riksarkivet/data/datasets/htr/segmentation/ICDAR-2019/clean/gt_files/coco_regions2.json",
pipeline=[
dict(type="LoadImageFromFile", file_client_args=dict(backend="disk")),
dict(type="LoadAnnotations", with_bbox=True, with_mask=True, poly2mask=False),
dict(type="CachedMosaic", img_scale=(640, 640), pad_val=114.0),
dict(type="RandomResize", scale=(1280, 1280), ratio_range=(0.1, 2.0), keep_ratio=True),
dict(type="RandomCrop", crop_size=(640, 640), recompute_bbox=True, allow_negative_crop=True),
dict(type="YOLOXHSVRandomAug"),
dict(type="RandomFlip", prob=0.5),
dict(type="Pad", size=(640, 640), pad_val=dict(img=(114, 114, 114))),
dict(type="CachedMixUp", img_scale=(640, 640), ratio_range=(1.0, 1.0), max_cached_images=20, pad_val=(114, 114, 114)),
dict(type="FilterAnnotations", min_gt_bbox_wh=(1, 1)),
dict(type="PackDetInputs"),
],
),
]
test_list = [
dict(
type="CocoDataset",
metainfo=dict(classes="TextRegion", palette=[(220, 20, 60)]),
data_prefix=dict(img="/media/erik/Elements/Riksarkivet/data/datasets/htr/segmentation/ICDAR-2019/clean/"),
ann_file="/media/erik/Elements/Riksarkivet/data/datasets/htr/segmentation/ICDAR-2019/clean/gt_files/coco_regions2.json",
test_mode=True,
pipeline=[
dict(type="LoadImageFromFile", file_client_args=dict(backend="disk")),
dict(type="Resize", scale=(640, 640), keep_ratio=True),
dict(type="Pad", size=(640, 640), pad_val=dict(img=(114, 114, 114))),
dict(type="PackDetInputs", meta_keys=("img_id", "img_path", "ori_shape", "img_shape", "scale_factor")),
],
)
]
pipeline = [
dict(type="LoadImageFromFile", file_client_args=dict(backend="disk")),
dict(type="Resize", scale=(640, 640), keep_ratio=True),
dict(type="Pad", size=(640, 640), pad_val=dict(img=(114, 114, 114))),
dict(type="PackDetInputs", meta_keys=("img_id", "img_path", "ori_shape", "img_shape", "scale_factor")),
]
launcher = "pytorch"
|