aaronb commited on
Commit
5fcbf51
1 Parent(s): 9a5c7fa
Files changed (34) hide show
  1. cityscapes/deeplabv3plus_r101_multistep/20230304_140046.log +0 -0
  2. cityscapes/deeplabv3plus_r101_multistep/20230304_140046.log.json +0 -0
  3. cityscapes/deeplabv3plus_r101_multistep/best_mIoU_iter_144000.pth +3 -0
  4. cityscapes/deeplabv3plus_r101_multistep/deeplabv3plus_r101-d8_aspp_head_unet_fc_multi_step_ade_pretrained_freeze_embed_160k_cityscapes20_finetune.py +191 -0
  5. cityscapes/deeplabv3plus_r101_multistep/iter_160000.pth +3 -0
  6. cityscapes/deeplabv3plus_r101_multistep/latest.pth +1 -0
  7. cityscapes/deeplabv3plus_r101_singlestep/20230303_203832.log +1095 -0
  8. cityscapes/deeplabv3plus_r101_singlestep/20230303_203832.log.json +2 -0
  9. cityscapes/deeplabv3plus_r101_singlestep/20230303_203945.log +0 -0
  10. cityscapes/deeplabv3plus_r101_singlestep/20230303_203945.log.json +0 -0
  11. cityscapes/deeplabv3plus_r101_singlestep/best_mIoU_iter_64000.pth +3 -0
  12. cityscapes/deeplabv3plus_r101_singlestep/deeplabv3plus_r101-d8_aspp_head_unet_fc_small_single_step_cityscapes_pretrained_freeze_embed_80k_cityscapes20.py +180 -0
  13. cityscapes/deeplabv3plus_r101_singlestep/iter_80000.pth +3 -0
  14. cityscapes/deeplabv3plus_r101_singlestep/latest.pth +1 -0
  15. cityscapes/deeplabv3plus_r50_multistep/20230303_205044.log +0 -0
  16. cityscapes/deeplabv3plus_r50_multistep/20230303_205044.log.json +0 -0
  17. cityscapes/deeplabv3plus_r50_multistep/best_mIoU_iter_96000.pth +3 -0
  18. cityscapes/deeplabv3plus_r50_multistep/deeplabv3plus_r50-d8_aspp_head_unet_fc_multi_step_ade_pretrained_freeze_embed_160k_cityscapes20_finetune.py +191 -0
  19. cityscapes/deeplabv3plus_r50_multistep/iter_160000.pth +3 -0
  20. cityscapes/deeplabv3plus_r50_multistep/latest.pth +1 -0
  21. cityscapes/deeplabv3plus_r50_singlestep/20230303_152127.log +0 -0
  22. cityscapes/deeplabv3plus_r50_singlestep/20230303_152127.log.json +0 -0
  23. cityscapes/deeplabv3plus_r50_singlestep/best_mIoU_iter_72000.pth +3 -0
  24. cityscapes/deeplabv3plus_r50_singlestep/deeplabv3plus_r50-d8_aspp_head_unet_fc_single_step_ade_pretrained_freeze_embed_80k_cityscapes20.py +180 -0
  25. cityscapes/deeplabv3plus_r50_singlestep/iter_80000.pth +3 -0
  26. cityscapes/deeplabv3plus_r50_singlestep/latest.pth +1 -0
  27. cityscapes/segformer_b0_multistep/best_mIoU_iter_144000.pth +3 -0
  28. cityscapes/segformer_b0_multistep/segformer_mit_b0_segformer_head_unet_fc_single_step_ade_pretrained_freeze_embed_80k_cityscapes20_finetune_cfg.py +68 -0
  29. cityscapes/segformer_b2_multistep/20230302_232152.log +0 -0
  30. cityscapes/segformer_b2_multistep/20230302_232152.log.json +0 -0
  31. cityscapes/segformer_b2_multistep/best_mIoU_iter_128000.pth +3 -0
  32. cityscapes/segformer_b2_multistep/iter_160000.pth +3 -0
  33. cityscapes/segformer_b2_multistep/latest.pth +1 -0
  34. cityscapes/segformer_b2_multistep/segformer_mit_b2_segformer_head_unet_fc_small_multi_step_ade_pretrained_freeze_embed_160k_cityscapes20_finetune_ema.py +195 -0
cityscapes/deeplabv3plus_r101_multistep/20230304_140046.log ADDED
The diff for this file is too large to render. See raw diff
 
cityscapes/deeplabv3plus_r101_multistep/20230304_140046.log.json ADDED
The diff for this file is too large to render. See raw diff
 
cityscapes/deeplabv3plus_r101_multistep/best_mIoU_iter_144000.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:695aa44d075038d156a319c51ab19d43c938622c6f9da85317c00a16a1544a54
3
+ size 690272551
cityscapes/deeplabv3plus_r101_multistep/deeplabv3plus_r101-d8_aspp_head_unet_fc_multi_step_ade_pretrained_freeze_embed_160k_cityscapes20_finetune.py ADDED
@@ -0,0 +1,191 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ norm_cfg = dict(type='SyncBN', requires_grad=True)
2
+ model = dict(
3
+ type='EncoderDecoderDiffusion',
4
+ pretrained=
5
+ 'work_dirs2/deeplabv3plus_r101-d8_aspp_head_unet_fc_small_single_step_cityscapes_pretrained_freeze_embed_80k_cityscapes20/latest.pth',
6
+ backbone=dict(
7
+ type='ResNetV1cCustomInitWeights',
8
+ depth=101,
9
+ num_stages=4,
10
+ out_indices=(0, 1, 2, 3),
11
+ dilations=(1, 1, 2, 4),
12
+ strides=(1, 2, 1, 1),
13
+ norm_cfg=dict(type='SyncBN', requires_grad=True),
14
+ norm_eval=False,
15
+ style='pytorch',
16
+ contract_dilation=True),
17
+ decode_head=dict(
18
+ type='DepthwiseSeparableASPPHeadUnetFCHeadMultiStep',
19
+ pretrained=
20
+ 'work_dirs2/deeplabv3plus_r101-d8_aspp_head_unet_fc_small_single_step_cityscapes_pretrained_freeze_embed_80k_cityscapes20/latest.pth',
21
+ dim=128,
22
+ out_dim=256,
23
+ unet_channels=528,
24
+ dim_mults=[1, 1, 1],
25
+ cat_embedding_dim=16,
26
+ ignore_index=0,
27
+ diffusion_timesteps=100,
28
+ collect_timesteps=[0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 99],
29
+ in_channels=2048,
30
+ in_index=3,
31
+ channels=512,
32
+ dilations=(1, 12, 24, 36),
33
+ c1_in_channels=256,
34
+ c1_channels=48,
35
+ dropout_ratio=0.1,
36
+ num_classes=20,
37
+ norm_cfg=dict(type='SyncBN', requires_grad=True),
38
+ align_corners=False,
39
+ loss_decode=dict(
40
+ type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
41
+ auxiliary_head=None,
42
+ train_cfg=dict(),
43
+ test_cfg=dict(mode='whole'),
44
+ freeze_parameters=['backbone', 'decode_head'])
45
+ dataset_type = 'Cityscapes20Dataset'
46
+ data_root = 'data/cityscapes/'
47
+ img_norm_cfg = dict(
48
+ mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
49
+ crop_size = (512, 1024)
50
+ train_pipeline = [
51
+ dict(type='LoadImageFromFile'),
52
+ dict(type='LoadAnnotationsCityscapes20'),
53
+ dict(type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)),
54
+ dict(type='RandomCrop', crop_size=(512, 1024), cat_max_ratio=0.75),
55
+ dict(type='RandomFlip', prob=0.5),
56
+ dict(type='PhotoMetricDistortion'),
57
+ dict(
58
+ type='Normalize',
59
+ mean=[123.675, 116.28, 103.53],
60
+ std=[58.395, 57.12, 57.375],
61
+ to_rgb=True),
62
+ dict(type='Pad', size=(512, 1024), pad_val=0, seg_pad_val=0),
63
+ dict(type='DefaultFormatBundle'),
64
+ dict(type='Collect', keys=['img', 'gt_semantic_seg'])
65
+ ]
66
+ test_pipeline = [
67
+ dict(type='LoadImageFromFile'),
68
+ dict(
69
+ type='MultiScaleFlipAug',
70
+ img_scale=(2048, 1024),
71
+ flip=False,
72
+ transforms=[
73
+ dict(type='Resize', keep_ratio=True),
74
+ dict(type='RandomFlip'),
75
+ dict(
76
+ type='Normalize',
77
+ mean=[123.675, 116.28, 103.53],
78
+ std=[58.395, 57.12, 57.375],
79
+ to_rgb=True),
80
+ dict(type='ImageToTensor', keys=['img']),
81
+ dict(type='Collect', keys=['img'])
82
+ ])
83
+ ]
84
+ data = dict(
85
+ samples_per_gpu=2,
86
+ workers_per_gpu=2,
87
+ train=dict(
88
+ type='Cityscapes20Dataset',
89
+ data_root='data/cityscapes/',
90
+ img_dir='leftImg8bit/train',
91
+ ann_dir='gtFine/train',
92
+ pipeline=[
93
+ dict(type='LoadImageFromFile'),
94
+ dict(type='LoadAnnotationsCityscapes20'),
95
+ dict(
96
+ type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)),
97
+ dict(type='RandomCrop', crop_size=(512, 1024), cat_max_ratio=0.75),
98
+ dict(type='RandomFlip', prob=0.5),
99
+ dict(type='PhotoMetricDistortion'),
100
+ dict(
101
+ type='Normalize',
102
+ mean=[123.675, 116.28, 103.53],
103
+ std=[58.395, 57.12, 57.375],
104
+ to_rgb=True),
105
+ dict(type='Pad', size=(512, 1024), pad_val=0, seg_pad_val=0),
106
+ dict(type='DefaultFormatBundle'),
107
+ dict(type='Collect', keys=['img', 'gt_semantic_seg'])
108
+ ]),
109
+ val=dict(
110
+ type='Cityscapes20Dataset',
111
+ data_root='data/cityscapes/',
112
+ img_dir='leftImg8bit/val',
113
+ ann_dir='gtFine/val',
114
+ pipeline=[
115
+ dict(type='LoadImageFromFile'),
116
+ dict(
117
+ type='MultiScaleFlipAug',
118
+ img_scale=(2048, 1024),
119
+ flip=False,
120
+ transforms=[
121
+ dict(type='Resize', keep_ratio=True),
122
+ dict(type='RandomFlip'),
123
+ dict(
124
+ type='Normalize',
125
+ mean=[123.675, 116.28, 103.53],
126
+ std=[58.395, 57.12, 57.375],
127
+ to_rgb=True),
128
+ dict(type='ImageToTensor', keys=['img']),
129
+ dict(type='Collect', keys=['img'])
130
+ ])
131
+ ]),
132
+ test=dict(
133
+ type='Cityscapes20Dataset',
134
+ data_root='data/cityscapes/',
135
+ img_dir='leftImg8bit/val',
136
+ ann_dir='gtFine/val',
137
+ pipeline=[
138
+ dict(type='LoadImageFromFile'),
139
+ dict(
140
+ type='MultiScaleFlipAug',
141
+ img_scale=(2048, 1024),
142
+ flip=False,
143
+ transforms=[
144
+ dict(type='Resize', keep_ratio=True),
145
+ dict(type='RandomFlip'),
146
+ dict(
147
+ type='Normalize',
148
+ mean=[123.675, 116.28, 103.53],
149
+ std=[58.395, 57.12, 57.375],
150
+ to_rgb=True),
151
+ dict(type='ImageToTensor', keys=['img']),
152
+ dict(type='Collect', keys=['img'])
153
+ ])
154
+ ]))
155
+ log_config = dict(
156
+ interval=50, hooks=[dict(type='TextLoggerHook', by_epoch=False)])
157
+ dist_params = dict(backend='nccl')
158
+ log_level = 'INFO'
159
+ load_from = None
160
+ resume_from = None
161
+ workflow = [('train', 1)]
162
+ cudnn_benchmark = True
163
+ optimizer = dict(
164
+ type='AdamW', lr=0.00015, betas=[0.9, 0.96], weight_decay=0.045)
165
+ optimizer_config = dict()
166
+ lr_config = dict(
167
+ policy='step',
168
+ warmup='linear',
169
+ warmup_iters=1000,
170
+ warmup_ratio=1e-06,
171
+ step=20000,
172
+ gamma=0.5,
173
+ min_lr=1e-06,
174
+ by_epoch=False)
175
+ runner = dict(type='IterBasedRunner', max_iters=160000)
176
+ checkpoint_config = dict(by_epoch=False, interval=16000, max_keep_ckpts=1)
177
+ evaluation = dict(
178
+ interval=16000, metric='mIoU', pre_eval=True, save_best='mIoU')
179
+ checkpoint = 'work_dirs2/deeplabv3plus_r101-d8_aspp_head_unet_fc_small_single_step_cityscapes_pretrained_freeze_embed_80k_cityscapes20/latest.pth'
180
+ custom_hooks = [
181
+ dict(
182
+ type='ConstantMomentumEMAHook',
183
+ momentum=0.01,
184
+ interval=25,
185
+ eval_interval=16000,
186
+ auto_resume=True,
187
+ priority=49)
188
+ ]
189
+ work_dir = './work_dirs2/deeplabv3plus_r101-d8_aspp_head_unet_fc_multi_step_ade_pretrained_freeze_embed_160k_cityscapes20_finetune'
190
+ gpu_ids = range(0, 8)
191
+ auto_resume = True
cityscapes/deeplabv3plus_r101_multistep/iter_160000.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ac1700a8e4904d3d95f57a108dddee1697a45ace21e88c4e6b4c30eba084f7c8
3
+ size 690272551
cityscapes/deeplabv3plus_r101_multistep/latest.pth ADDED
@@ -0,0 +1 @@
 
 
1
+ iter_160000.pth
cityscapes/deeplabv3plus_r101_singlestep/20230303_203832.log ADDED
@@ -0,0 +1,1095 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2023-03-03 20:38:32,836 - mmseg - INFO - Multi-processing start method is `None`
2
+ 2023-03-03 20:38:32,858 - mmseg - INFO - OpenCV num_threads is `128
3
+ 2023-03-03 20:38:32,858 - mmseg - INFO - OMP num threads is 1
4
+ 2023-03-03 20:38:32,925 - mmseg - INFO - Environment info:
5
+ ------------------------------------------------------------
6
+ sys.platform: linux
7
+ Python: 3.7.16 (default, Jan 17 2023, 22:20:44) [GCC 11.2.0]
8
+ CUDA available: True
9
+ GPU 0,1,2,3,4,5,6,7: NVIDIA A100-SXM4-80GB
10
+ CUDA_HOME: /mnt/petrelfs/laizeqiang/miniconda3/envs/torch
11
+ NVCC: Cuda compilation tools, release 11.6, V11.6.124
12
+ GCC: gcc (GCC) 4.8.5 20150623 (Red Hat 4.8.5-44)
13
+ PyTorch: 1.13.1
14
+ PyTorch compiling details: PyTorch built with:
15
+ - GCC 9.3
16
+ - C++ Version: 201402
17
+ - Intel(R) oneAPI Math Kernel Library Version 2021.4-Product Build 20210904 for Intel(R) 64 architecture applications
18
+ - Intel(R) MKL-DNN v2.6.0 (Git Hash 52b5f107dd9cf10910aaa19cb47f3abf9b349815)
19
+ - OpenMP 201511 (a.k.a. OpenMP 4.5)
20
+ - LAPACK is enabled (usually provided by MKL)
21
+ - NNPACK is enabled
22
+ - CPU capability usage: AVX2
23
+ - CUDA Runtime 11.6
24
+ - NVCC architecture flags: -gencode;arch=compute_37,code=sm_37;-gencode;arch=compute_50,code=sm_50;-gencode;arch=compute_60,code=sm_60;-gencode;arch=compute_61,code=sm_61;-gencode;arch=compute_70,code=sm_70;-gencode;arch=compute_75,code=sm_75;-gencode;arch=compute_80,code=sm_80;-gencode;arch=compute_86,code=sm_86;-gencode;arch=compute_37,code=compute_37
25
+ - CuDNN 8.3.2 (built against CUDA 11.5)
26
+ - Magma 2.6.1
27
+ - Build settings: BLAS_INFO=mkl, BUILD_TYPE=Release, CUDA_VERSION=11.6, CUDNN_VERSION=8.3.2, CXX_COMPILER=/opt/rh/devtoolset-9/root/usr/bin/c++, CXX_FLAGS= -fabi-version=11 -Wno-deprecated -fvisibility-inlines-hidden -DUSE_PTHREADPOOL -fopenmp -DNDEBUG -DUSE_KINETO -DUSE_FBGEMM -DUSE_QNNPACK -DUSE_PYTORCH_QNNPACK -DUSE_XNNPACK -DSYMBOLICATE_MOBILE_DEBUG_HANDLE -DEDGE_PROFILER_USE_KINETO -O2 -fPIC -Wno-narrowing -Wall -Wextra -Werror=return-type -Werror=non-virtual-dtor -Wno-missing-field-initializers -Wno-type-limits -Wno-array-bounds -Wno-unknown-pragmas -Wunused-local-typedefs -Wno-unused-parameter -Wno-unused-function -Wno-unused-result -Wno-strict-overflow -Wno-strict-aliasing -Wno-error=deprecated-declarations -Wno-stringop-overflow -Wno-psabi -Wno-error=pedantic -Wno-error=redundant-decls -Wno-error=old-style-cast -fdiagnostics-color=always -faligned-new -Wno-unused-but-set-variable -Wno-maybe-uninitialized -fno-math-errno -fno-trapping-math -Werror=format -Werror=cast-function-type -Wno-stringop-overflow, LAPACK_INFO=mkl, PERF_WITH_AVX=1, PERF_WITH_AVX2=1, PERF_WITH_AVX512=1, TORCH_VERSION=1.13.1, USE_CUDA=ON, USE_CUDNN=ON, USE_EXCEPTION_PTR=1, USE_GFLAGS=OFF, USE_GLOG=OFF, USE_MKL=ON, USE_MKLDNN=ON, USE_MPI=OFF, USE_NCCL=ON, USE_NNPACK=ON, USE_OPENMP=ON, USE_ROCM=OFF,
28
+
29
+ TorchVision: 0.14.1
30
+ OpenCV: 4.7.0
31
+ MMCV: 1.7.1
32
+ MMCV Compiler: GCC 9.3
33
+ MMCV CUDA Compiler: 11.6
34
+ MMSegmentation: 0.30.0+c844fc6
35
+ ------------------------------------------------------------
36
+
37
+ 2023-03-03 20:38:32,925 - mmseg - INFO - Distributed training: True
38
+ 2023-03-03 20:38:33,574 - mmseg - INFO - Config:
39
+ norm_cfg = dict(type='SyncBN', requires_grad=True)
40
+ model = dict(
41
+ type='EncoderDecoderFreeze',
42
+ pretrained=
43
+ 'pretrained/deeplabv3plus_r101-d8_512x1024_80k_cityscapes_20200606_114143-068fcfe9.pth',
44
+ backbone=dict(
45
+ type='ResNetV1cCustomInitWeights',
46
+ depth=101,
47
+ num_stages=4,
48
+ out_indices=(0, 1, 2, 3),
49
+ dilations=(1, 1, 2, 4),
50
+ strides=(1, 2, 1, 1),
51
+ norm_cfg=dict(type='SyncBN', requires_grad=True),
52
+ norm_eval=False,
53
+ style='pytorch',
54
+ contract_dilation=True),
55
+ decode_head=dict(
56
+ type='DepthwiseSeparableASPPHeadUnetFCHeadSingleStep',
57
+ pretrained=
58
+ 'pretrained/deeplabv3plus_r101-d8_512x1024_80k_cityscapes_20200606_114143-068fcfe9.pth',
59
+ dim=256,
60
+ out_dim=256,
61
+ unet_channels=528,
62
+ dim_mults=[1, 1, 1],
63
+ cat_embedding_dim=16,
64
+ ignore_index=0,
65
+ in_channels=2048,
66
+ in_index=3,
67
+ channels=512,
68
+ dilations=(1, 12, 24, 36),
69
+ c1_in_channels=256,
70
+ c1_channels=48,
71
+ dropout_ratio=0.1,
72
+ num_classes=20,
73
+ norm_cfg=dict(type='SyncBN', requires_grad=True),
74
+ align_corners=False,
75
+ loss_decode=dict(
76
+ type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
77
+ auxiliary_head=None,
78
+ train_cfg=dict(),
79
+ test_cfg=dict(mode='whole'),
80
+ freeze_parameters=['backbone', 'decode_head'])
81
+ dataset_type = 'Cityscapes20Dataset'
82
+ data_root = 'data/cityscapes/'
83
+ img_norm_cfg = dict(
84
+ mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
85
+ crop_size = (512, 1024)
86
+ train_pipeline = [
87
+ dict(type='LoadImageFromFile'),
88
+ dict(type='LoadAnnotationsCityscapes20'),
89
+ dict(type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)),
90
+ dict(type='RandomCrop', crop_size=(512, 1024), cat_max_ratio=0.75),
91
+ dict(type='RandomFlip', prob=0.5),
92
+ dict(type='PhotoMetricDistortion'),
93
+ dict(
94
+ type='Normalize',
95
+ mean=[123.675, 116.28, 103.53],
96
+ std=[58.395, 57.12, 57.375],
97
+ to_rgb=True),
98
+ dict(type='Pad', size=(512, 1024), pad_val=0, seg_pad_val=0),
99
+ dict(type='DefaultFormatBundle'),
100
+ dict(type='Collect', keys=['img', 'gt_semantic_seg'])
101
+ ]
102
+ test_pipeline = [
103
+ dict(type='LoadImageFromFile'),
104
+ dict(
105
+ type='MultiScaleFlipAug',
106
+ img_scale=(2048, 1024),
107
+ flip=False,
108
+ transforms=[
109
+ dict(type='Resize', keep_ratio=True),
110
+ dict(type='RandomFlip'),
111
+ dict(
112
+ type='Normalize',
113
+ mean=[123.675, 116.28, 103.53],
114
+ std=[58.395, 57.12, 57.375],
115
+ to_rgb=True),
116
+ dict(type='ImageToTensor', keys=['img']),
117
+ dict(type='Collect', keys=['img'])
118
+ ])
119
+ ]
120
+ data = dict(
121
+ samples_per_gpu=2,
122
+ workers_per_gpu=2,
123
+ train=dict(
124
+ type='Cityscapes20Dataset',
125
+ data_root='data/cityscapes/',
126
+ img_dir='leftImg8bit/train',
127
+ ann_dir='gtFine/train',
128
+ pipeline=[
129
+ dict(type='LoadImageFromFile'),
130
+ dict(type='LoadAnnotationsCityscapes20'),
131
+ dict(
132
+ type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)),
133
+ dict(type='RandomCrop', crop_size=(512, 1024), cat_max_ratio=0.75),
134
+ dict(type='RandomFlip', prob=0.5),
135
+ dict(type='PhotoMetricDistortion'),
136
+ dict(
137
+ type='Normalize',
138
+ mean=[123.675, 116.28, 103.53],
139
+ std=[58.395, 57.12, 57.375],
140
+ to_rgb=True),
141
+ dict(type='Pad', size=(512, 1024), pad_val=0, seg_pad_val=0),
142
+ dict(type='DefaultFormatBundle'),
143
+ dict(type='Collect', keys=['img', 'gt_semantic_seg'])
144
+ ]),
145
+ val=dict(
146
+ type='Cityscapes20Dataset',
147
+ data_root='data/cityscapes/',
148
+ img_dir='leftImg8bit/val',
149
+ ann_dir='gtFine/val',
150
+ pipeline=[
151
+ dict(type='LoadImageFromFile'),
152
+ dict(
153
+ type='MultiScaleFlipAug',
154
+ img_scale=(2048, 1024),
155
+ flip=False,
156
+ transforms=[
157
+ dict(type='Resize', keep_ratio=True),
158
+ dict(type='RandomFlip'),
159
+ dict(
160
+ type='Normalize',
161
+ mean=[123.675, 116.28, 103.53],
162
+ std=[58.395, 57.12, 57.375],
163
+ to_rgb=True),
164
+ dict(type='ImageToTensor', keys=['img']),
165
+ dict(type='Collect', keys=['img'])
166
+ ])
167
+ ]),
168
+ test=dict(
169
+ type='Cityscapes20Dataset',
170
+ data_root='data/cityscapes/',
171
+ img_dir='leftImg8bit/val',
172
+ ann_dir='gtFine/val',
173
+ pipeline=[
174
+ dict(type='LoadImageFromFile'),
175
+ dict(
176
+ type='MultiScaleFlipAug',
177
+ img_scale=(2048, 1024),
178
+ flip=False,
179
+ transforms=[
180
+ dict(type='Resize', keep_ratio=True),
181
+ dict(type='RandomFlip'),
182
+ dict(
183
+ type='Normalize',
184
+ mean=[123.675, 116.28, 103.53],
185
+ std=[58.395, 57.12, 57.375],
186
+ to_rgb=True),
187
+ dict(type='ImageToTensor', keys=['img']),
188
+ dict(type='Collect', keys=['img'])
189
+ ])
190
+ ]))
191
+ log_config = dict(
192
+ interval=50, hooks=[dict(type='TextLoggerHook', by_epoch=False)])
193
+ dist_params = dict(backend='nccl')
194
+ log_level = 'INFO'
195
+ load_from = None
196
+ resume_from = None
197
+ workflow = [('train', 1)]
198
+ cudnn_benchmark = True
199
+ optimizer = dict(
200
+ type='AdamW', lr=0.00015, betas=[0.9, 0.96], weight_decay=0.045)
201
+ optimizer_config = dict()
202
+ lr_config = dict(
203
+ policy='step',
204
+ warmup='linear',
205
+ warmup_iters=1000,
206
+ warmup_ratio=1e-06,
207
+ step=10000,
208
+ gamma=0.5,
209
+ min_lr=1e-06,
210
+ by_epoch=False)
211
+ runner = dict(type='IterBasedRunner', max_iters=80000)
212
+ checkpoint_config = dict(by_epoch=False, interval=8000, max_keep_ckpts=1)
213
+ evaluation = dict(
214
+ interval=8000, metric='mIoU', pre_eval=True, save_best='mIoU')
215
+ checkpoint = 'pretrained/deeplabv3plus_r101-d8_512x1024_80k_cityscapes_20200606_114143-068fcfe9.pth'
216
+ work_dir = './work_dirs2/deeplabv3plus_r101-d8_aspp_head_unet_fc_small_single_step_cityscapes_pretrained_freeze_embed_80k_cityscapes20'
217
+ gpu_ids = range(0, 8)
218
+ auto_resume = True
219
+
220
+ 2023-03-03 20:38:37,967 - mmseg - INFO - Set random seed to 835892801, deterministic: False
221
+ 2023-03-03 20:38:39,336 - mmseg - INFO - Parameters in backbone freezed!
222
+ 2023-03-03 20:38:39,337 - mmseg - INFO - Trainable parameters in DepthwiseSeparableASPPHeadUnetFCHeadSingleStep: ['unet.init_conv.weight', 'unet.init_conv.bias', 'unet.time_mlp.1.weight', 'unet.time_mlp.1.bias', 'unet.time_mlp.3.weight', 'unet.time_mlp.3.bias', 'unet.downs.0.0.mlp.1.weight', 'unet.downs.0.0.mlp.1.bias', 'unet.downs.0.0.block1.proj.weight', 'unet.downs.0.0.block1.proj.bias', 'unet.downs.0.0.block1.norm.weight', 'unet.downs.0.0.block1.norm.bias', 'unet.downs.0.0.block2.proj.weight', 'unet.downs.0.0.block2.proj.bias', 'unet.downs.0.0.block2.norm.weight', 'unet.downs.0.0.block2.norm.bias', 'unet.downs.0.1.mlp.1.weight', 'unet.downs.0.1.mlp.1.bias', 'unet.downs.0.1.block1.proj.weight', 'unet.downs.0.1.block1.proj.bias', 'unet.downs.0.1.block1.norm.weight', 'unet.downs.0.1.block1.norm.bias', 'unet.downs.0.1.block2.proj.weight', 'unet.downs.0.1.block2.proj.bias', 'unet.downs.0.1.block2.norm.weight', 'unet.downs.0.1.block2.norm.bias', 'unet.downs.0.2.fn.fn.to_qkv.weight', 'unet.downs.0.2.fn.fn.to_out.0.weight', 'unet.downs.0.2.fn.fn.to_out.0.bias', 'unet.downs.0.2.fn.fn.to_out.1.g', 'unet.downs.0.2.fn.norm.g', 'unet.downs.0.3.weight', 'unet.downs.0.3.bias', 'unet.downs.1.0.mlp.1.weight', 'unet.downs.1.0.mlp.1.bias', 'unet.downs.1.0.block1.proj.weight', 'unet.downs.1.0.block1.proj.bias', 'unet.downs.1.0.block1.norm.weight', 'unet.downs.1.0.block1.norm.bias', 'unet.downs.1.0.block2.proj.weight', 'unet.downs.1.0.block2.proj.bias', 'unet.downs.1.0.block2.norm.weight', 'unet.downs.1.0.block2.norm.bias', 'unet.downs.1.1.mlp.1.weight', 'unet.downs.1.1.mlp.1.bias', 'unet.downs.1.1.block1.proj.weight', 'unet.downs.1.1.block1.proj.bias', 'unet.downs.1.1.block1.norm.weight', 'unet.downs.1.1.block1.norm.bias', 'unet.downs.1.1.block2.proj.weight', 'unet.downs.1.1.block2.proj.bias', 'unet.downs.1.1.block2.norm.weight', 'unet.downs.1.1.block2.norm.bias', 'unet.downs.1.2.fn.fn.to_qkv.weight', 'unet.downs.1.2.fn.fn.to_out.0.weight', 'unet.downs.1.2.fn.fn.to_out.0.bias', 'unet.downs.1.2.fn.fn.to_out.1.g', 'unet.downs.1.2.fn.norm.g', 'unet.downs.1.3.weight', 'unet.downs.1.3.bias', 'unet.downs.2.0.mlp.1.weight', 'unet.downs.2.0.mlp.1.bias', 'unet.downs.2.0.block1.proj.weight', 'unet.downs.2.0.block1.proj.bias', 'unet.downs.2.0.block1.norm.weight', 'unet.downs.2.0.block1.norm.bias', 'unet.downs.2.0.block2.proj.weight', 'unet.downs.2.0.block2.proj.bias', 'unet.downs.2.0.block2.norm.weight', 'unet.downs.2.0.block2.norm.bias', 'unet.downs.2.1.mlp.1.weight', 'unet.downs.2.1.mlp.1.bias', 'unet.downs.2.1.block1.proj.weight', 'unet.downs.2.1.block1.proj.bias', 'unet.downs.2.1.block1.norm.weight', 'unet.downs.2.1.block1.norm.bias', 'unet.downs.2.1.block2.proj.weight', 'unet.downs.2.1.block2.proj.bias', 'unet.downs.2.1.block2.norm.weight', 'unet.downs.2.1.block2.norm.bias', 'unet.downs.2.2.fn.fn.to_qkv.weight', 'unet.downs.2.2.fn.fn.to_out.0.weight', 'unet.downs.2.2.fn.fn.to_out.0.bias', 'unet.downs.2.2.fn.fn.to_out.1.g', 'unet.downs.2.2.fn.norm.g', 'unet.downs.2.3.weight', 'unet.downs.2.3.bias', 'unet.ups.0.0.mlp.1.weight', 'unet.ups.0.0.mlp.1.bias', 'unet.ups.0.0.block1.proj.weight', 'unet.ups.0.0.block1.proj.bias', 'unet.ups.0.0.block1.norm.weight', 'unet.ups.0.0.block1.norm.bias', 'unet.ups.0.0.block2.proj.weight', 'unet.ups.0.0.block2.proj.bias', 'unet.ups.0.0.block2.norm.weight', 'unet.ups.0.0.block2.norm.bias', 'unet.ups.0.0.res_conv.weight', 'unet.ups.0.0.res_conv.bias', 'unet.ups.0.1.mlp.1.weight', 'unet.ups.0.1.mlp.1.bias', 'unet.ups.0.1.block1.proj.weight', 'unet.ups.0.1.block1.proj.bias', 'unet.ups.0.1.block1.norm.weight', 'unet.ups.0.1.block1.norm.bias', 'unet.ups.0.1.block2.proj.weight', 'unet.ups.0.1.block2.proj.bias', 'unet.ups.0.1.block2.norm.weight', 'unet.ups.0.1.block2.norm.bias', 'unet.ups.0.1.res_conv.weight', 'unet.ups.0.1.res_conv.bias', 'unet.ups.0.2.fn.fn.to_qkv.weight', 'unet.ups.0.2.fn.fn.to_out.0.weight', 'unet.ups.0.2.fn.fn.to_out.0.bias', 'unet.ups.0.2.fn.fn.to_out.1.g', 'unet.ups.0.2.fn.norm.g', 'unet.ups.0.3.1.weight', 'unet.ups.0.3.1.bias', 'unet.ups.1.0.mlp.1.weight', 'unet.ups.1.0.mlp.1.bias', 'unet.ups.1.0.block1.proj.weight', 'unet.ups.1.0.block1.proj.bias', 'unet.ups.1.0.block1.norm.weight', 'unet.ups.1.0.block1.norm.bias', 'unet.ups.1.0.block2.proj.weight', 'unet.ups.1.0.block2.proj.bias', 'unet.ups.1.0.block2.norm.weight', 'unet.ups.1.0.block2.norm.bias', 'unet.ups.1.0.res_conv.weight', 'unet.ups.1.0.res_conv.bias', 'unet.ups.1.1.mlp.1.weight', 'unet.ups.1.1.mlp.1.bias', 'unet.ups.1.1.block1.proj.weight', 'unet.ups.1.1.block1.proj.bias', 'unet.ups.1.1.block1.norm.weight', 'unet.ups.1.1.block1.norm.bias', 'unet.ups.1.1.block2.proj.weight', 'unet.ups.1.1.block2.proj.bias', 'unet.ups.1.1.block2.norm.weight', 'unet.ups.1.1.block2.norm.bias', 'unet.ups.1.1.res_conv.weight', 'unet.ups.1.1.res_conv.bias', 'unet.ups.1.2.fn.fn.to_qkv.weight', 'unet.ups.1.2.fn.fn.to_out.0.weight', 'unet.ups.1.2.fn.fn.to_out.0.bias', 'unet.ups.1.2.fn.fn.to_out.1.g', 'unet.ups.1.2.fn.norm.g', 'unet.ups.1.3.1.weight', 'unet.ups.1.3.1.bias', 'unet.ups.2.0.mlp.1.weight', 'unet.ups.2.0.mlp.1.bias', 'unet.ups.2.0.block1.proj.weight', 'unet.ups.2.0.block1.proj.bias', 'unet.ups.2.0.block1.norm.weight', 'unet.ups.2.0.block1.norm.bias', 'unet.ups.2.0.block2.proj.weight', 'unet.ups.2.0.block2.proj.bias', 'unet.ups.2.0.block2.norm.weight', 'unet.ups.2.0.block2.norm.bias', 'unet.ups.2.0.res_conv.weight', 'unet.ups.2.0.res_conv.bias', 'unet.ups.2.1.mlp.1.weight', 'unet.ups.2.1.mlp.1.bias', 'unet.ups.2.1.block1.proj.weight', 'unet.ups.2.1.block1.proj.bias', 'unet.ups.2.1.block1.norm.weight', 'unet.ups.2.1.block1.norm.bias', 'unet.ups.2.1.block2.proj.weight', 'unet.ups.2.1.block2.proj.bias', 'unet.ups.2.1.block2.norm.weight', 'unet.ups.2.1.block2.norm.bias', 'unet.ups.2.1.res_conv.weight', 'unet.ups.2.1.res_conv.bias', 'unet.ups.2.2.fn.fn.to_qkv.weight', 'unet.ups.2.2.fn.fn.to_out.0.weight', 'unet.ups.2.2.fn.fn.to_out.0.bias', 'unet.ups.2.2.fn.fn.to_out.1.g', 'unet.ups.2.2.fn.norm.g', 'unet.ups.2.3.weight', 'unet.ups.2.3.bias', 'unet.mid_block1.mlp.1.weight', 'unet.mid_block1.mlp.1.bias', 'unet.mid_block1.block1.proj.weight', 'unet.mid_block1.block1.proj.bias', 'unet.mid_block1.block1.norm.weight', 'unet.mid_block1.block1.norm.bias', 'unet.mid_block1.block2.proj.weight', 'unet.mid_block1.block2.proj.bias', 'unet.mid_block1.block2.norm.weight', 'unet.mid_block1.block2.norm.bias', 'unet.mid_attn.fn.fn.to_qkv.weight', 'unet.mid_attn.fn.fn.to_out.weight', 'unet.mid_attn.fn.fn.to_out.bias', 'unet.mid_attn.fn.norm.g', 'unet.mid_block2.mlp.1.weight', 'unet.mid_block2.mlp.1.bias', 'unet.mid_block2.block1.proj.weight', 'unet.mid_block2.block1.proj.bias', 'unet.mid_block2.block1.norm.weight', 'unet.mid_block2.block1.norm.bias', 'unet.mid_block2.block2.proj.weight', 'unet.mid_block2.block2.proj.bias', 'unet.mid_block2.block2.norm.weight', 'unet.mid_block2.block2.norm.bias', 'unet.final_res_block.mlp.1.weight', 'unet.final_res_block.mlp.1.bias', 'unet.final_res_block.block1.proj.weight', 'unet.final_res_block.block1.proj.bias', 'unet.final_res_block.block1.norm.weight', 'unet.final_res_block.block1.norm.bias', 'unet.final_res_block.block2.proj.weight', 'unet.final_res_block.block2.proj.bias', 'unet.final_res_block.block2.norm.weight', 'unet.final_res_block.block2.norm.bias', 'unet.final_res_block.res_conv.weight', 'unet.final_res_block.res_conv.bias', 'unet.final_conv.weight', 'unet.final_conv.bias', 'conv_seg_new.weight', 'conv_seg_new.bias']
223
+ 2023-03-03 20:38:39,337 - mmseg - INFO - Parameters in decode_head freezed!
224
+ 2023-03-03 20:38:39,389 - mmseg - INFO - load checkpoint from local path: pretrained/deeplabv3plus_r101-d8_512x1024_80k_cityscapes_20200606_114143-068fcfe9.pth
225
+ 2023-03-03 20:38:39,920 - mmseg - WARNING - The model and loaded state dict do not match exactly
226
+
227
+ unexpected key in source state_dict: decode_head.conv_seg.weight, decode_head.conv_seg.bias, decode_head.image_pool.1.conv.weight, decode_head.image_pool.1.bn.weight, decode_head.image_pool.1.bn.bias, decode_head.image_pool.1.bn.running_mean, decode_head.image_pool.1.bn.running_var, decode_head.image_pool.1.bn.num_batches_tracked, decode_head.aspp_modules.0.conv.weight, decode_head.aspp_modules.0.bn.weight, decode_head.aspp_modules.0.bn.bias, decode_head.aspp_modules.0.bn.running_mean, decode_head.aspp_modules.0.bn.running_var, decode_head.aspp_modules.0.bn.num_batches_tracked, decode_head.aspp_modules.1.depthwise_conv.conv.weight, decode_head.aspp_modules.1.depthwise_conv.bn.weight, decode_head.aspp_modules.1.depthwise_conv.bn.bias, decode_head.aspp_modules.1.depthwise_conv.bn.running_mean, decode_head.aspp_modules.1.depthwise_conv.bn.running_var, decode_head.aspp_modules.1.depthwise_conv.bn.num_batches_tracked, decode_head.aspp_modules.1.pointwise_conv.conv.weight, decode_head.aspp_modules.1.pointwise_conv.bn.weight, decode_head.aspp_modules.1.pointwise_conv.bn.bias, decode_head.aspp_modules.1.pointwise_conv.bn.running_mean, decode_head.aspp_modules.1.pointwise_conv.bn.running_var, decode_head.aspp_modules.1.pointwise_conv.bn.num_batches_tracked, decode_head.aspp_modules.2.depthwise_conv.conv.weight, decode_head.aspp_modules.2.depthwise_conv.bn.weight, decode_head.aspp_modules.2.depthwise_conv.bn.bias, decode_head.aspp_modules.2.depthwise_conv.bn.running_mean, decode_head.aspp_modules.2.depthwise_conv.bn.running_var, decode_head.aspp_modules.2.depthwise_conv.bn.num_batches_tracked, decode_head.aspp_modules.2.pointwise_conv.conv.weight, decode_head.aspp_modules.2.pointwise_conv.bn.weight, decode_head.aspp_modules.2.pointwise_conv.bn.bias, decode_head.aspp_modules.2.pointwise_conv.bn.running_mean, decode_head.aspp_modules.2.pointwise_conv.bn.running_var, decode_head.aspp_modules.2.pointwise_conv.bn.num_batches_tracked, decode_head.aspp_modules.3.depthwise_conv.conv.weight, decode_head.aspp_modules.3.depthwise_conv.bn.weight, decode_head.aspp_modules.3.depthwise_conv.bn.bias, decode_head.aspp_modules.3.depthwise_conv.bn.running_mean, decode_head.aspp_modules.3.depthwise_conv.bn.running_var, decode_head.aspp_modules.3.depthwise_conv.bn.num_batches_tracked, decode_head.aspp_modules.3.pointwise_conv.conv.weight, decode_head.aspp_modules.3.pointwise_conv.bn.weight, decode_head.aspp_modules.3.pointwise_conv.bn.bias, decode_head.aspp_modules.3.pointwise_conv.bn.running_mean, decode_head.aspp_modules.3.pointwise_conv.bn.running_var, decode_head.aspp_modules.3.pointwise_conv.bn.num_batches_tracked, decode_head.bottleneck.conv.weight, decode_head.bottleneck.bn.weight, decode_head.bottleneck.bn.bias, decode_head.bottleneck.bn.running_mean, decode_head.bottleneck.bn.running_var, decode_head.bottleneck.bn.num_batches_tracked, decode_head.c1_bottleneck.conv.weight, decode_head.c1_bottleneck.bn.weight, decode_head.c1_bottleneck.bn.bias, decode_head.c1_bottleneck.bn.running_mean, decode_head.c1_bottleneck.bn.running_var, decode_head.c1_bottleneck.bn.num_batches_tracked, decode_head.sep_bottleneck.0.depthwise_conv.conv.weight, decode_head.sep_bottleneck.0.depthwise_conv.bn.weight, decode_head.sep_bottleneck.0.depthwise_conv.bn.bias, decode_head.sep_bottleneck.0.depthwise_conv.bn.running_mean, decode_head.sep_bottleneck.0.depthwise_conv.bn.running_var, decode_head.sep_bottleneck.0.depthwise_conv.bn.num_batches_tracked, decode_head.sep_bottleneck.0.pointwise_conv.conv.weight, decode_head.sep_bottleneck.0.pointwise_conv.bn.weight, decode_head.sep_bottleneck.0.pointwise_conv.bn.bias, decode_head.sep_bottleneck.0.pointwise_conv.bn.running_mean, decode_head.sep_bottleneck.0.pointwise_conv.bn.running_var, decode_head.sep_bottleneck.0.pointwise_conv.bn.num_batches_tracked, decode_head.sep_bottleneck.1.depthwise_conv.conv.weight, decode_head.sep_bottleneck.1.depthwise_conv.bn.weight, decode_head.sep_bottleneck.1.depthwise_conv.bn.bias, decode_head.sep_bottleneck.1.depthwise_conv.bn.running_mean, decode_head.sep_bottleneck.1.depthwise_conv.bn.running_var, decode_head.sep_bottleneck.1.depthwise_conv.bn.num_batches_tracked, decode_head.sep_bottleneck.1.pointwise_conv.conv.weight, decode_head.sep_bottleneck.1.pointwise_conv.bn.weight, decode_head.sep_bottleneck.1.pointwise_conv.bn.bias, decode_head.sep_bottleneck.1.pointwise_conv.bn.running_mean, decode_head.sep_bottleneck.1.pointwise_conv.bn.running_var, decode_head.sep_bottleneck.1.pointwise_conv.bn.num_batches_tracked, auxiliary_head.conv_seg.weight, auxiliary_head.conv_seg.bias, auxiliary_head.convs.0.conv.weight, auxiliary_head.convs.0.bn.weight, auxiliary_head.convs.0.bn.bias, auxiliary_head.convs.0.bn.running_mean, auxiliary_head.convs.0.bn.running_var, auxiliary_head.convs.0.bn.num_batches_tracked
228
+
229
+ 2023-03-03 20:38:39,948 - mmseg - INFO - load checkpoint from local path: pretrained/deeplabv3plus_r101-d8_512x1024_80k_cityscapes_20200606_114143-068fcfe9.pth
230
+ 2023-03-03 20:38:40,463 - mmseg - WARNING - The model and loaded state dict do not match exactly
231
+
232
+ unexpected key in source state_dict: backbone.stem.0.weight, backbone.stem.1.weight, backbone.stem.1.bias, backbone.stem.1.running_mean, backbone.stem.1.running_var, backbone.stem.1.num_batches_tracked, backbone.stem.3.weight, backbone.stem.4.weight, backbone.stem.4.bias, backbone.stem.4.running_mean, backbone.stem.4.running_var, backbone.stem.4.num_batches_tracked, backbone.stem.6.weight, backbone.stem.7.weight, backbone.stem.7.bias, backbone.stem.7.running_mean, backbone.stem.7.running_var, backbone.stem.7.num_batches_tracked, backbone.layer1.0.conv1.weight, backbone.layer1.0.bn1.weight, backbone.layer1.0.bn1.bias, backbone.layer1.0.bn1.running_mean, backbone.layer1.0.bn1.running_var, backbone.layer1.0.bn1.num_batches_tracked, backbone.layer1.0.conv2.weight, backbone.layer1.0.bn2.weight, backbone.layer1.0.bn2.bias, backbone.layer1.0.bn2.running_mean, backbone.layer1.0.bn2.running_var, backbone.layer1.0.bn2.num_batches_tracked, backbone.layer1.0.conv3.weight, backbone.layer1.0.bn3.weight, backbone.layer1.0.bn3.bias, backbone.layer1.0.bn3.running_mean, backbone.layer1.0.bn3.running_var, backbone.layer1.0.bn3.num_batches_tracked, backbone.layer1.0.downsample.0.weight, backbone.layer1.0.downsample.1.weight, backbone.layer1.0.downsample.1.bias, backbone.layer1.0.downsample.1.running_mean, backbone.layer1.0.downsample.1.running_var, backbone.layer1.0.downsample.1.num_batches_tracked, backbone.layer1.1.conv1.weight, backbone.layer1.1.bn1.weight, backbone.layer1.1.bn1.bias, backbone.layer1.1.bn1.running_mean, backbone.layer1.1.bn1.running_var, backbone.layer1.1.bn1.num_batches_tracked, backbone.layer1.1.conv2.weight, backbone.layer1.1.bn2.weight, backbone.layer1.1.bn2.bias, backbone.layer1.1.bn2.running_mean, backbone.layer1.1.bn2.running_var, backbone.layer1.1.bn2.num_batches_tracked, backbone.layer1.1.conv3.weight, backbone.layer1.1.bn3.weight, backbone.layer1.1.bn3.bias, backbone.layer1.1.bn3.running_mean, backbone.layer1.1.bn3.running_var, backbone.layer1.1.bn3.num_batches_tracked, backbone.layer1.2.conv1.weight, backbone.layer1.2.bn1.weight, backbone.layer1.2.bn1.bias, backbone.layer1.2.bn1.running_mean, backbone.layer1.2.bn1.running_var, backbone.layer1.2.bn1.num_batches_tracked, backbone.layer1.2.conv2.weight, backbone.layer1.2.bn2.weight, backbone.layer1.2.bn2.bias, backbone.layer1.2.bn2.running_mean, backbone.layer1.2.bn2.running_var, backbone.layer1.2.bn2.num_batches_tracked, backbone.layer1.2.conv3.weight, backbone.layer1.2.bn3.weight, backbone.layer1.2.bn3.bias, backbone.layer1.2.bn3.running_mean, backbone.layer1.2.bn3.running_var, backbone.layer1.2.bn3.num_batches_tracked, backbone.layer2.0.conv1.weight, backbone.layer2.0.bn1.weight, backbone.layer2.0.bn1.bias, backbone.layer2.0.bn1.running_mean, backbone.layer2.0.bn1.running_var, backbone.layer2.0.bn1.num_batches_tracked, backbone.layer2.0.conv2.weight, backbone.layer2.0.bn2.weight, backbone.layer2.0.bn2.bias, backbone.layer2.0.bn2.running_mean, backbone.layer2.0.bn2.running_var, backbone.layer2.0.bn2.num_batches_tracked, backbone.layer2.0.conv3.weight, backbone.layer2.0.bn3.weight, backbone.layer2.0.bn3.bias, backbone.layer2.0.bn3.running_mean, backbone.layer2.0.bn3.running_var, backbone.layer2.0.bn3.num_batches_tracked, backbone.layer2.0.downsample.0.weight, backbone.layer2.0.downsample.1.weight, backbone.layer2.0.downsample.1.bias, backbone.layer2.0.downsample.1.running_mean, backbone.layer2.0.downsample.1.running_var, backbone.layer2.0.downsample.1.num_batches_tracked, backbone.layer2.1.conv1.weight, backbone.layer2.1.bn1.weight, backbone.layer2.1.bn1.bias, backbone.layer2.1.bn1.running_mean, backbone.layer2.1.bn1.running_var, backbone.layer2.1.bn1.num_batches_tracked, backbone.layer2.1.conv2.weight, backbone.layer2.1.bn2.weight, backbone.layer2.1.bn2.bias, backbone.layer2.1.bn2.running_mean, backbone.layer2.1.bn2.running_var, backbone.layer2.1.bn2.num_batches_tracked, backbone.layer2.1.conv3.weight, backbone.layer2.1.bn3.weight, backbone.layer2.1.bn3.bias, backbone.layer2.1.bn3.running_mean, backbone.layer2.1.bn3.running_var, backbone.layer2.1.bn3.num_batches_tracked, backbone.layer2.2.conv1.weight, backbone.layer2.2.bn1.weight, backbone.layer2.2.bn1.bias, backbone.layer2.2.bn1.running_mean, backbone.layer2.2.bn1.running_var, backbone.layer2.2.bn1.num_batches_tracked, backbone.layer2.2.conv2.weight, backbone.layer2.2.bn2.weight, backbone.layer2.2.bn2.bias, backbone.layer2.2.bn2.running_mean, backbone.layer2.2.bn2.running_var, backbone.layer2.2.bn2.num_batches_tracked, backbone.layer2.2.conv3.weight, backbone.layer2.2.bn3.weight, backbone.layer2.2.bn3.bias, backbone.layer2.2.bn3.running_mean, backbone.layer2.2.bn3.running_var, backbone.layer2.2.bn3.num_batches_tracked, backbone.layer2.3.conv1.weight, backbone.layer2.3.bn1.weight, backbone.layer2.3.bn1.bias, backbone.layer2.3.bn1.running_mean, backbone.layer2.3.bn1.running_var, backbone.layer2.3.bn1.num_batches_tracked, backbone.layer2.3.conv2.weight, backbone.layer2.3.bn2.weight, backbone.layer2.3.bn2.bias, backbone.layer2.3.bn2.running_mean, backbone.layer2.3.bn2.running_var, backbone.layer2.3.bn2.num_batches_tracked, backbone.layer2.3.conv3.weight, backbone.layer2.3.bn3.weight, backbone.layer2.3.bn3.bias, backbone.layer2.3.bn3.running_mean, backbone.layer2.3.bn3.running_var, backbone.layer2.3.bn3.num_batches_tracked, backbone.layer3.0.conv1.weight, backbone.layer3.0.bn1.weight, backbone.layer3.0.bn1.bias, backbone.layer3.0.bn1.running_mean, backbone.layer3.0.bn1.running_var, backbone.layer3.0.bn1.num_batches_tracked, backbone.layer3.0.conv2.weight, backbone.layer3.0.bn2.weight, backbone.layer3.0.bn2.bias, backbone.layer3.0.bn2.running_mean, backbone.layer3.0.bn2.running_var, backbone.layer3.0.bn2.num_batches_tracked, backbone.layer3.0.conv3.weight, backbone.layer3.0.bn3.weight, backbone.layer3.0.bn3.bias, backbone.layer3.0.bn3.running_mean, backbone.layer3.0.bn3.running_var, backbone.layer3.0.bn3.num_batches_tracked, backbone.layer3.0.downsample.0.weight, backbone.layer3.0.downsample.1.weight, backbone.layer3.0.downsample.1.bias, backbone.layer3.0.downsample.1.running_mean, backbone.layer3.0.downsample.1.running_var, backbone.layer3.0.downsample.1.num_batches_tracked, backbone.layer3.1.conv1.weight, backbone.layer3.1.bn1.weight, backbone.layer3.1.bn1.bias, backbone.layer3.1.bn1.running_mean, backbone.layer3.1.bn1.running_var, backbone.layer3.1.bn1.num_batches_tracked, backbone.layer3.1.conv2.weight, backbone.layer3.1.bn2.weight, backbone.layer3.1.bn2.bias, backbone.layer3.1.bn2.running_mean, backbone.layer3.1.bn2.running_var, backbone.layer3.1.bn2.num_batches_tracked, backbone.layer3.1.conv3.weight, backbone.layer3.1.bn3.weight, backbone.layer3.1.bn3.bias, backbone.layer3.1.bn3.running_mean, backbone.layer3.1.bn3.running_var, backbone.layer3.1.bn3.num_batches_tracked, backbone.layer3.2.conv1.weight, backbone.layer3.2.bn1.weight, backbone.layer3.2.bn1.bias, backbone.layer3.2.bn1.running_mean, backbone.layer3.2.bn1.running_var, backbone.layer3.2.bn1.num_batches_tracked, backbone.layer3.2.conv2.weight, backbone.layer3.2.bn2.weight, backbone.layer3.2.bn2.bias, backbone.layer3.2.bn2.running_mean, backbone.layer3.2.bn2.running_var, backbone.layer3.2.bn2.num_batches_tracked, backbone.layer3.2.conv3.weight, backbone.layer3.2.bn3.weight, backbone.layer3.2.bn3.bias, backbone.layer3.2.bn3.running_mean, backbone.layer3.2.bn3.running_var, backbone.layer3.2.bn3.num_batches_tracked, backbone.layer3.3.conv1.weight, backbone.layer3.3.bn1.weight, backbone.layer3.3.bn1.bias, backbone.layer3.3.bn1.running_mean, backbone.layer3.3.bn1.running_var, backbone.layer3.3.bn1.num_batches_tracked, backbone.layer3.3.conv2.weight, backbone.layer3.3.bn2.weight, backbone.layer3.3.bn2.bias, backbone.layer3.3.bn2.running_mean, backbone.layer3.3.bn2.running_var, backbone.layer3.3.bn2.num_batches_tracked, backbone.layer3.3.conv3.weight, backbone.layer3.3.bn3.weight, backbone.layer3.3.bn3.bias, backbone.layer3.3.bn3.running_mean, backbone.layer3.3.bn3.running_var, backbone.layer3.3.bn3.num_batches_tracked, backbone.layer3.4.conv1.weight, backbone.layer3.4.bn1.weight, backbone.layer3.4.bn1.bias, backbone.layer3.4.bn1.running_mean, backbone.layer3.4.bn1.running_var, backbone.layer3.4.bn1.num_batches_tracked, backbone.layer3.4.conv2.weight, backbone.layer3.4.bn2.weight, backbone.layer3.4.bn2.bias, backbone.layer3.4.bn2.running_mean, backbone.layer3.4.bn2.running_var, backbone.layer3.4.bn2.num_batches_tracked, backbone.layer3.4.conv3.weight, backbone.layer3.4.bn3.weight, backbone.layer3.4.bn3.bias, backbone.layer3.4.bn3.running_mean, backbone.layer3.4.bn3.running_var, backbone.layer3.4.bn3.num_batches_tracked, backbone.layer3.5.conv1.weight, backbone.layer3.5.bn1.weight, backbone.layer3.5.bn1.bias, backbone.layer3.5.bn1.running_mean, backbone.layer3.5.bn1.running_var, backbone.layer3.5.bn1.num_batches_tracked, backbone.layer3.5.conv2.weight, backbone.layer3.5.bn2.weight, backbone.layer3.5.bn2.bias, backbone.layer3.5.bn2.running_mean, backbone.layer3.5.bn2.running_var, backbone.layer3.5.bn2.num_batches_tracked, backbone.layer3.5.conv3.weight, backbone.layer3.5.bn3.weight, backbone.layer3.5.bn3.bias, backbone.layer3.5.bn3.running_mean, backbone.layer3.5.bn3.running_var, backbone.layer3.5.bn3.num_batches_tracked, backbone.layer3.6.conv1.weight, backbone.layer3.6.bn1.weight, backbone.layer3.6.bn1.bias, backbone.layer3.6.bn1.running_mean, backbone.layer3.6.bn1.running_var, backbone.layer3.6.bn1.num_batches_tracked, backbone.layer3.6.conv2.weight, backbone.layer3.6.bn2.weight, backbone.layer3.6.bn2.bias, backbone.layer3.6.bn2.running_mean, backbone.layer3.6.bn2.running_var, backbone.layer3.6.bn2.num_batches_tracked, backbone.layer3.6.conv3.weight, backbone.layer3.6.bn3.weight, backbone.layer3.6.bn3.bias, backbone.layer3.6.bn3.running_mean, backbone.layer3.6.bn3.running_var, backbone.layer3.6.bn3.num_batches_tracked, backbone.layer3.7.conv1.weight, backbone.layer3.7.bn1.weight, backbone.layer3.7.bn1.bias, backbone.layer3.7.bn1.running_mean, backbone.layer3.7.bn1.running_var, backbone.layer3.7.bn1.num_batches_tracked, backbone.layer3.7.conv2.weight, backbone.layer3.7.bn2.weight, backbone.layer3.7.bn2.bias, backbone.layer3.7.bn2.running_mean, backbone.layer3.7.bn2.running_var, backbone.layer3.7.bn2.num_batches_tracked, backbone.layer3.7.conv3.weight, backbone.layer3.7.bn3.weight, backbone.layer3.7.bn3.bias, backbone.layer3.7.bn3.running_mean, backbone.layer3.7.bn3.running_var, backbone.layer3.7.bn3.num_batches_tracked, backbone.layer3.8.conv1.weight, backbone.layer3.8.bn1.weight, backbone.layer3.8.bn1.bias, backbone.layer3.8.bn1.running_mean, backbone.layer3.8.bn1.running_var, backbone.layer3.8.bn1.num_batches_tracked, backbone.layer3.8.conv2.weight, backbone.layer3.8.bn2.weight, backbone.layer3.8.bn2.bias, backbone.layer3.8.bn2.running_mean, backbone.layer3.8.bn2.running_var, backbone.layer3.8.bn2.num_batches_tracked, backbone.layer3.8.conv3.weight, backbone.layer3.8.bn3.weight, backbone.layer3.8.bn3.bias, backbone.layer3.8.bn3.running_mean, backbone.layer3.8.bn3.running_var, backbone.layer3.8.bn3.num_batches_tracked, backbone.layer3.9.conv1.weight, backbone.layer3.9.bn1.weight, backbone.layer3.9.bn1.bias, backbone.layer3.9.bn1.running_mean, backbone.layer3.9.bn1.running_var, backbone.layer3.9.bn1.num_batches_tracked, backbone.layer3.9.conv2.weight, backbone.layer3.9.bn2.weight, backbone.layer3.9.bn2.bias, backbone.layer3.9.bn2.running_mean, backbone.layer3.9.bn2.running_var, backbone.layer3.9.bn2.num_batches_tracked, backbone.layer3.9.conv3.weight, backbone.layer3.9.bn3.weight, backbone.layer3.9.bn3.bias, backbone.layer3.9.bn3.running_mean, backbone.layer3.9.bn3.running_var, backbone.layer3.9.bn3.num_batches_tracked, backbone.layer3.10.conv1.weight, backbone.layer3.10.bn1.weight, backbone.layer3.10.bn1.bias, backbone.layer3.10.bn1.running_mean, backbone.layer3.10.bn1.running_var, backbone.layer3.10.bn1.num_batches_tracked, backbone.layer3.10.conv2.weight, backbone.layer3.10.bn2.weight, backbone.layer3.10.bn2.bias, backbone.layer3.10.bn2.running_mean, backbone.layer3.10.bn2.running_var, backbone.layer3.10.bn2.num_batches_tracked, backbone.layer3.10.conv3.weight, backbone.layer3.10.bn3.weight, backbone.layer3.10.bn3.bias, backbone.layer3.10.bn3.running_mean, backbone.layer3.10.bn3.running_var, backbone.layer3.10.bn3.num_batches_tracked, backbone.layer3.11.conv1.weight, backbone.layer3.11.bn1.weight, backbone.layer3.11.bn1.bias, backbone.layer3.11.bn1.running_mean, backbone.layer3.11.bn1.running_var, backbone.layer3.11.bn1.num_batches_tracked, backbone.layer3.11.conv2.weight, backbone.layer3.11.bn2.weight, backbone.layer3.11.bn2.bias, backbone.layer3.11.bn2.running_mean, backbone.layer3.11.bn2.running_var, backbone.layer3.11.bn2.num_batches_tracked, backbone.layer3.11.conv3.weight, backbone.layer3.11.bn3.weight, backbone.layer3.11.bn3.bias, backbone.layer3.11.bn3.running_mean, backbone.layer3.11.bn3.running_var, backbone.layer3.11.bn3.num_batches_tracked, backbone.layer3.12.conv1.weight, backbone.layer3.12.bn1.weight, backbone.layer3.12.bn1.bias, backbone.layer3.12.bn1.running_mean, backbone.layer3.12.bn1.running_var, backbone.layer3.12.bn1.num_batches_tracked, backbone.layer3.12.conv2.weight, backbone.layer3.12.bn2.weight, backbone.layer3.12.bn2.bias, backbone.layer3.12.bn2.running_mean, backbone.layer3.12.bn2.running_var, backbone.layer3.12.bn2.num_batches_tracked, backbone.layer3.12.conv3.weight, backbone.layer3.12.bn3.weight, backbone.layer3.12.bn3.bias, backbone.layer3.12.bn3.running_mean, backbone.layer3.12.bn3.running_var, backbone.layer3.12.bn3.num_batches_tracked, backbone.layer3.13.conv1.weight, backbone.layer3.13.bn1.weight, backbone.layer3.13.bn1.bias, backbone.layer3.13.bn1.running_mean, backbone.layer3.13.bn1.running_var, backbone.layer3.13.bn1.num_batches_tracked, backbone.layer3.13.conv2.weight, backbone.layer3.13.bn2.weight, backbone.layer3.13.bn2.bias, backbone.layer3.13.bn2.running_mean, backbone.layer3.13.bn2.running_var, backbone.layer3.13.bn2.num_batches_tracked, backbone.layer3.13.conv3.weight, backbone.layer3.13.bn3.weight, backbone.layer3.13.bn3.bias, backbone.layer3.13.bn3.running_mean, backbone.layer3.13.bn3.running_var, backbone.layer3.13.bn3.num_batches_tracked, backbone.layer3.14.conv1.weight, backbone.layer3.14.bn1.weight, backbone.layer3.14.bn1.bias, backbone.layer3.14.bn1.running_mean, backbone.layer3.14.bn1.running_var, backbone.layer3.14.bn1.num_batches_tracked, backbone.layer3.14.conv2.weight, backbone.layer3.14.bn2.weight, backbone.layer3.14.bn2.bias, backbone.layer3.14.bn2.running_mean, backbone.layer3.14.bn2.running_var, backbone.layer3.14.bn2.num_batches_tracked, backbone.layer3.14.conv3.weight, backbone.layer3.14.bn3.weight, backbone.layer3.14.bn3.bias, backbone.layer3.14.bn3.running_mean, backbone.layer3.14.bn3.running_var, backbone.layer3.14.bn3.num_batches_tracked, backbone.layer3.15.conv1.weight, backbone.layer3.15.bn1.weight, backbone.layer3.15.bn1.bias, backbone.layer3.15.bn1.running_mean, backbone.layer3.15.bn1.running_var, backbone.layer3.15.bn1.num_batches_tracked, backbone.layer3.15.conv2.weight, backbone.layer3.15.bn2.weight, backbone.layer3.15.bn2.bias, backbone.layer3.15.bn2.running_mean, backbone.layer3.15.bn2.running_var, backbone.layer3.15.bn2.num_batches_tracked, backbone.layer3.15.conv3.weight, backbone.layer3.15.bn3.weight, backbone.layer3.15.bn3.bias, backbone.layer3.15.bn3.running_mean, backbone.layer3.15.bn3.running_var, backbone.layer3.15.bn3.num_batches_tracked, backbone.layer3.16.conv1.weight, backbone.layer3.16.bn1.weight, backbone.layer3.16.bn1.bias, backbone.layer3.16.bn1.running_mean, backbone.layer3.16.bn1.running_var, backbone.layer3.16.bn1.num_batches_tracked, backbone.layer3.16.conv2.weight, backbone.layer3.16.bn2.weight, backbone.layer3.16.bn2.bias, backbone.layer3.16.bn2.running_mean, backbone.layer3.16.bn2.running_var, backbone.layer3.16.bn2.num_batches_tracked, backbone.layer3.16.conv3.weight, backbone.layer3.16.bn3.weight, backbone.layer3.16.bn3.bias, backbone.layer3.16.bn3.running_mean, backbone.layer3.16.bn3.running_var, backbone.layer3.16.bn3.num_batches_tracked, backbone.layer3.17.conv1.weight, backbone.layer3.17.bn1.weight, backbone.layer3.17.bn1.bias, backbone.layer3.17.bn1.running_mean, backbone.layer3.17.bn1.running_var, backbone.layer3.17.bn1.num_batches_tracked, backbone.layer3.17.conv2.weight, backbone.layer3.17.bn2.weight, backbone.layer3.17.bn2.bias, backbone.layer3.17.bn2.running_mean, backbone.layer3.17.bn2.running_var, backbone.layer3.17.bn2.num_batches_tracked, backbone.layer3.17.conv3.weight, backbone.layer3.17.bn3.weight, backbone.layer3.17.bn3.bias, backbone.layer3.17.bn3.running_mean, backbone.layer3.17.bn3.running_var, backbone.layer3.17.bn3.num_batches_tracked, backbone.layer3.18.conv1.weight, backbone.layer3.18.bn1.weight, backbone.layer3.18.bn1.bias, backbone.layer3.18.bn1.running_mean, backbone.layer3.18.bn1.running_var, backbone.layer3.18.bn1.num_batches_tracked, backbone.layer3.18.conv2.weight, backbone.layer3.18.bn2.weight, backbone.layer3.18.bn2.bias, backbone.layer3.18.bn2.running_mean, backbone.layer3.18.bn2.running_var, backbone.layer3.18.bn2.num_batches_tracked, backbone.layer3.18.conv3.weight, backbone.layer3.18.bn3.weight, backbone.layer3.18.bn3.bias, backbone.layer3.18.bn3.running_mean, backbone.layer3.18.bn3.running_var, backbone.layer3.18.bn3.num_batches_tracked, backbone.layer3.19.conv1.weight, backbone.layer3.19.bn1.weight, backbone.layer3.19.bn1.bias, backbone.layer3.19.bn1.running_mean, backbone.layer3.19.bn1.running_var, backbone.layer3.19.bn1.num_batches_tracked, backbone.layer3.19.conv2.weight, backbone.layer3.19.bn2.weight, backbone.layer3.19.bn2.bias, backbone.layer3.19.bn2.running_mean, backbone.layer3.19.bn2.running_var, backbone.layer3.19.bn2.num_batches_tracked, backbone.layer3.19.conv3.weight, backbone.layer3.19.bn3.weight, backbone.layer3.19.bn3.bias, backbone.layer3.19.bn3.running_mean, backbone.layer3.19.bn3.running_var, backbone.layer3.19.bn3.num_batches_tracked, backbone.layer3.20.conv1.weight, backbone.layer3.20.bn1.weight, backbone.layer3.20.bn1.bias, backbone.layer3.20.bn1.running_mean, backbone.layer3.20.bn1.running_var, backbone.layer3.20.bn1.num_batches_tracked, backbone.layer3.20.conv2.weight, backbone.layer3.20.bn2.weight, backbone.layer3.20.bn2.bias, backbone.layer3.20.bn2.running_mean, backbone.layer3.20.bn2.running_var, backbone.layer3.20.bn2.num_batches_tracked, backbone.layer3.20.conv3.weight, backbone.layer3.20.bn3.weight, backbone.layer3.20.bn3.bias, backbone.layer3.20.bn3.running_mean, backbone.layer3.20.bn3.running_var, backbone.layer3.20.bn3.num_batches_tracked, backbone.layer3.21.conv1.weight, backbone.layer3.21.bn1.weight, backbone.layer3.21.bn1.bias, backbone.layer3.21.bn1.running_mean, backbone.layer3.21.bn1.running_var, backbone.layer3.21.bn1.num_batches_tracked, backbone.layer3.21.conv2.weight, backbone.layer3.21.bn2.weight, backbone.layer3.21.bn2.bias, backbone.layer3.21.bn2.running_mean, backbone.layer3.21.bn2.running_var, backbone.layer3.21.bn2.num_batches_tracked, backbone.layer3.21.conv3.weight, backbone.layer3.21.bn3.weight, backbone.layer3.21.bn3.bias, backbone.layer3.21.bn3.running_mean, backbone.layer3.21.bn3.running_var, backbone.layer3.21.bn3.num_batches_tracked, backbone.layer3.22.conv1.weight, backbone.layer3.22.bn1.weight, backbone.layer3.22.bn1.bias, backbone.layer3.22.bn1.running_mean, backbone.layer3.22.bn1.running_var, backbone.layer3.22.bn1.num_batches_tracked, backbone.layer3.22.conv2.weight, backbone.layer3.22.bn2.weight, backbone.layer3.22.bn2.bias, backbone.layer3.22.bn2.running_mean, backbone.layer3.22.bn2.running_var, backbone.layer3.22.bn2.num_batches_tracked, backbone.layer3.22.conv3.weight, backbone.layer3.22.bn3.weight, backbone.layer3.22.bn3.bias, backbone.layer3.22.bn3.running_mean, backbone.layer3.22.bn3.running_var, backbone.layer3.22.bn3.num_batches_tracked, backbone.layer4.0.conv1.weight, backbone.layer4.0.bn1.weight, backbone.layer4.0.bn1.bias, backbone.layer4.0.bn1.running_mean, backbone.layer4.0.bn1.running_var, backbone.layer4.0.bn1.num_batches_tracked, backbone.layer4.0.conv2.weight, backbone.layer4.0.bn2.weight, backbone.layer4.0.bn2.bias, backbone.layer4.0.bn2.running_mean, backbone.layer4.0.bn2.running_var, backbone.layer4.0.bn2.num_batches_tracked, backbone.layer4.0.conv3.weight, backbone.layer4.0.bn3.weight, backbone.layer4.0.bn3.bias, backbone.layer4.0.bn3.running_mean, backbone.layer4.0.bn3.running_var, backbone.layer4.0.bn3.num_batches_tracked, backbone.layer4.0.downsample.0.weight, backbone.layer4.0.downsample.1.weight, backbone.layer4.0.downsample.1.bias, backbone.layer4.0.downsample.1.running_mean, backbone.layer4.0.downsample.1.running_var, backbone.layer4.0.downsample.1.num_batches_tracked, backbone.layer4.1.conv1.weight, backbone.layer4.1.bn1.weight, backbone.layer4.1.bn1.bias, backbone.layer4.1.bn1.running_mean, backbone.layer4.1.bn1.running_var, backbone.layer4.1.bn1.num_batches_tracked, backbone.layer4.1.conv2.weight, backbone.layer4.1.bn2.weight, backbone.layer4.1.bn2.bias, backbone.layer4.1.bn2.running_mean, backbone.layer4.1.bn2.running_var, backbone.layer4.1.bn2.num_batches_tracked, backbone.layer4.1.conv3.weight, backbone.layer4.1.bn3.weight, backbone.layer4.1.bn3.bias, backbone.layer4.1.bn3.running_mean, backbone.layer4.1.bn3.running_var, backbone.layer4.1.bn3.num_batches_tracked, backbone.layer4.2.conv1.weight, backbone.layer4.2.bn1.weight, backbone.layer4.2.bn1.bias, backbone.layer4.2.bn1.running_mean, backbone.layer4.2.bn1.running_var, backbone.layer4.2.bn1.num_batches_tracked, backbone.layer4.2.conv2.weight, backbone.layer4.2.bn2.weight, backbone.layer4.2.bn2.bias, backbone.layer4.2.bn2.running_mean, backbone.layer4.2.bn2.running_var, backbone.layer4.2.bn2.num_batches_tracked, backbone.layer4.2.conv3.weight, backbone.layer4.2.bn3.weight, backbone.layer4.2.bn3.bias, backbone.layer4.2.bn3.running_mean, backbone.layer4.2.bn3.running_var, backbone.layer4.2.bn3.num_batches_tracked, auxiliary_head.conv_seg.weight, auxiliary_head.conv_seg.bias, auxiliary_head.convs.0.conv.weight, auxiliary_head.convs.0.bn.weight, auxiliary_head.convs.0.bn.bias, auxiliary_head.convs.0.bn.running_mean, auxiliary_head.convs.0.bn.running_var, auxiliary_head.convs.0.bn.num_batches_tracked
233
+
234
+ missing keys in source state_dict: unet.init_conv.weight, unet.init_conv.bias, unet.time_mlp.1.weight, unet.time_mlp.1.bias, unet.time_mlp.3.weight, unet.time_mlp.3.bias, unet.downs.0.0.mlp.1.weight, unet.downs.0.0.mlp.1.bias, unet.downs.0.0.block1.proj.weight, unet.downs.0.0.block1.proj.bias, unet.downs.0.0.block1.norm.weight, unet.downs.0.0.block1.norm.bias, unet.downs.0.0.block2.proj.weight, unet.downs.0.0.block2.proj.bias, unet.downs.0.0.block2.norm.weight, unet.downs.0.0.block2.norm.bias, unet.downs.0.1.mlp.1.weight, unet.downs.0.1.mlp.1.bias, unet.downs.0.1.block1.proj.weight, unet.downs.0.1.block1.proj.bias, unet.downs.0.1.block1.norm.weight, unet.downs.0.1.block1.norm.bias, unet.downs.0.1.block2.proj.weight, unet.downs.0.1.block2.proj.bias, unet.downs.0.1.block2.norm.weight, unet.downs.0.1.block2.norm.bias, unet.downs.0.2.fn.fn.to_qkv.weight, unet.downs.0.2.fn.fn.to_out.0.weight, unet.downs.0.2.fn.fn.to_out.0.bias, unet.downs.0.2.fn.fn.to_out.1.g, unet.downs.0.2.fn.norm.g, unet.downs.0.3.weight, unet.downs.0.3.bias, unet.downs.1.0.mlp.1.weight, unet.downs.1.0.mlp.1.bias, unet.downs.1.0.block1.proj.weight, unet.downs.1.0.block1.proj.bias, unet.downs.1.0.block1.norm.weight, unet.downs.1.0.block1.norm.bias, unet.downs.1.0.block2.proj.weight, unet.downs.1.0.block2.proj.bias, unet.downs.1.0.block2.norm.weight, unet.downs.1.0.block2.norm.bias, unet.downs.1.1.mlp.1.weight, unet.downs.1.1.mlp.1.bias, unet.downs.1.1.block1.proj.weight, unet.downs.1.1.block1.proj.bias, unet.downs.1.1.block1.norm.weight, unet.downs.1.1.block1.norm.bias, unet.downs.1.1.block2.proj.weight, unet.downs.1.1.block2.proj.bias, unet.downs.1.1.block2.norm.weight, unet.downs.1.1.block2.norm.bias, unet.downs.1.2.fn.fn.to_qkv.weight, unet.downs.1.2.fn.fn.to_out.0.weight, unet.downs.1.2.fn.fn.to_out.0.bias, unet.downs.1.2.fn.fn.to_out.1.g, unet.downs.1.2.fn.norm.g, unet.downs.1.3.weight, unet.downs.1.3.bias, unet.downs.2.0.mlp.1.weight, unet.downs.2.0.mlp.1.bias, unet.downs.2.0.block1.proj.weight, unet.downs.2.0.block1.proj.bias, unet.downs.2.0.block1.norm.weight, unet.downs.2.0.block1.norm.bias, unet.downs.2.0.block2.proj.weight, unet.downs.2.0.block2.proj.bias, unet.downs.2.0.block2.norm.weight, unet.downs.2.0.block2.norm.bias, unet.downs.2.1.mlp.1.weight, unet.downs.2.1.mlp.1.bias, unet.downs.2.1.block1.proj.weight, unet.downs.2.1.block1.proj.bias, unet.downs.2.1.block1.norm.weight, unet.downs.2.1.block1.norm.bias, unet.downs.2.1.block2.proj.weight, unet.downs.2.1.block2.proj.bias, unet.downs.2.1.block2.norm.weight, unet.downs.2.1.block2.norm.bias, unet.downs.2.2.fn.fn.to_qkv.weight, unet.downs.2.2.fn.fn.to_out.0.weight, unet.downs.2.2.fn.fn.to_out.0.bias, unet.downs.2.2.fn.fn.to_out.1.g, unet.downs.2.2.fn.norm.g, unet.downs.2.3.weight, unet.downs.2.3.bias, unet.ups.0.0.mlp.1.weight, unet.ups.0.0.mlp.1.bias, unet.ups.0.0.block1.proj.weight, unet.ups.0.0.block1.proj.bias, unet.ups.0.0.block1.norm.weight, unet.ups.0.0.block1.norm.bias, unet.ups.0.0.block2.proj.weight, unet.ups.0.0.block2.proj.bias, unet.ups.0.0.block2.norm.weight, unet.ups.0.0.block2.norm.bias, unet.ups.0.0.res_conv.weight, unet.ups.0.0.res_conv.bias, unet.ups.0.1.mlp.1.weight, unet.ups.0.1.mlp.1.bias, unet.ups.0.1.block1.proj.weight, unet.ups.0.1.block1.proj.bias, unet.ups.0.1.block1.norm.weight, unet.ups.0.1.block1.norm.bias, unet.ups.0.1.block2.proj.weight, unet.ups.0.1.block2.proj.bias, unet.ups.0.1.block2.norm.weight, unet.ups.0.1.block2.norm.bias, unet.ups.0.1.res_conv.weight, unet.ups.0.1.res_conv.bias, unet.ups.0.2.fn.fn.to_qkv.weight, unet.ups.0.2.fn.fn.to_out.0.weight, unet.ups.0.2.fn.fn.to_out.0.bias, unet.ups.0.2.fn.fn.to_out.1.g, unet.ups.0.2.fn.norm.g, unet.ups.0.3.1.weight, unet.ups.0.3.1.bias, unet.ups.1.0.mlp.1.weight, unet.ups.1.0.mlp.1.bias, unet.ups.1.0.block1.proj.weight, unet.ups.1.0.block1.proj.bias, unet.ups.1.0.block1.norm.weight, unet.ups.1.0.block1.norm.bias, unet.ups.1.0.block2.proj.weight, unet.ups.1.0.block2.proj.bias, unet.ups.1.0.block2.norm.weight, unet.ups.1.0.block2.norm.bias, unet.ups.1.0.res_conv.weight, unet.ups.1.0.res_conv.bias, unet.ups.1.1.mlp.1.weight, unet.ups.1.1.mlp.1.bias, unet.ups.1.1.block1.proj.weight, unet.ups.1.1.block1.proj.bias, unet.ups.1.1.block1.norm.weight, unet.ups.1.1.block1.norm.bias, unet.ups.1.1.block2.proj.weight, unet.ups.1.1.block2.proj.bias, unet.ups.1.1.block2.norm.weight, unet.ups.1.1.block2.norm.bias, unet.ups.1.1.res_conv.weight, unet.ups.1.1.res_conv.bias, unet.ups.1.2.fn.fn.to_qkv.weight, unet.ups.1.2.fn.fn.to_out.0.weight, unet.ups.1.2.fn.fn.to_out.0.bias, unet.ups.1.2.fn.fn.to_out.1.g, unet.ups.1.2.fn.norm.g, unet.ups.1.3.1.weight, unet.ups.1.3.1.bias, unet.ups.2.0.mlp.1.weight, unet.ups.2.0.mlp.1.bias, unet.ups.2.0.block1.proj.weight, unet.ups.2.0.block1.proj.bias, unet.ups.2.0.block1.norm.weight, unet.ups.2.0.block1.norm.bias, unet.ups.2.0.block2.proj.weight, unet.ups.2.0.block2.proj.bias, unet.ups.2.0.block2.norm.weight, unet.ups.2.0.block2.norm.bias, unet.ups.2.0.res_conv.weight, unet.ups.2.0.res_conv.bias, unet.ups.2.1.mlp.1.weight, unet.ups.2.1.mlp.1.bias, unet.ups.2.1.block1.proj.weight, unet.ups.2.1.block1.proj.bias, unet.ups.2.1.block1.norm.weight, unet.ups.2.1.block1.norm.bias, unet.ups.2.1.block2.proj.weight, unet.ups.2.1.block2.proj.bias, unet.ups.2.1.block2.norm.weight, unet.ups.2.1.block2.norm.bias, unet.ups.2.1.res_conv.weight, unet.ups.2.1.res_conv.bias, unet.ups.2.2.fn.fn.to_qkv.weight, unet.ups.2.2.fn.fn.to_out.0.weight, unet.ups.2.2.fn.fn.to_out.0.bias, unet.ups.2.2.fn.fn.to_out.1.g, unet.ups.2.2.fn.norm.g, unet.ups.2.3.weight, unet.ups.2.3.bias, unet.mid_block1.mlp.1.weight, unet.mid_block1.mlp.1.bias, unet.mid_block1.block1.proj.weight, unet.mid_block1.block1.proj.bias, unet.mid_block1.block1.norm.weight, unet.mid_block1.block1.norm.bias, unet.mid_block1.block2.proj.weight, unet.mid_block1.block2.proj.bias, unet.mid_block1.block2.norm.weight, unet.mid_block1.block2.norm.bias, unet.mid_attn.fn.fn.to_qkv.weight, unet.mid_attn.fn.fn.to_out.weight, unet.mid_attn.fn.fn.to_out.bias, unet.mid_attn.fn.norm.g, unet.mid_block2.mlp.1.weight, unet.mid_block2.mlp.1.bias, unet.mid_block2.block1.proj.weight, unet.mid_block2.block1.proj.bias, unet.mid_block2.block1.norm.weight, unet.mid_block2.block1.norm.bias, unet.mid_block2.block2.proj.weight, unet.mid_block2.block2.proj.bias, unet.mid_block2.block2.norm.weight, unet.mid_block2.block2.norm.bias, unet.final_res_block.mlp.1.weight, unet.final_res_block.mlp.1.bias, unet.final_res_block.block1.proj.weight, unet.final_res_block.block1.proj.bias, unet.final_res_block.block1.norm.weight, unet.final_res_block.block1.norm.bias, unet.final_res_block.block2.proj.weight, unet.final_res_block.block2.proj.bias, unet.final_res_block.block2.norm.weight, unet.final_res_block.block2.norm.bias, unet.final_res_block.res_conv.weight, unet.final_res_block.res_conv.bias, unet.final_conv.weight, unet.final_conv.bias, conv_seg_new.weight, conv_seg_new.bias, embed.weight
235
+
236
+ 2023-03-03 20:38:40,512 - mmseg - INFO - EncoderDecoderFreeze(
237
+ (backbone): ResNetV1cCustomInitWeights(
238
+ (stem): Sequential(
239
+ (0): Conv2d(3, 32, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)
240
+ (1): SyncBatchNorm(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
241
+ (2): ReLU(inplace=True)
242
+ (3): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
243
+ (4): SyncBatchNorm(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
244
+ (5): ReLU(inplace=True)
245
+ (6): Conv2d(32, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
246
+ (7): SyncBatchNorm(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
247
+ (8): ReLU(inplace=True)
248
+ )
249
+ (maxpool): MaxPool2d(kernel_size=3, stride=2, padding=1, dilation=1, ceil_mode=False)
250
+ (layer1): ResLayer(
251
+ (0): Bottleneck(
252
+ (conv1): Conv2d(64, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)
253
+ (bn1): SyncBatchNorm(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
254
+ (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
255
+ (bn2): SyncBatchNorm(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
256
+ (conv3): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
257
+ (bn3): SyncBatchNorm(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
258
+ (relu): ReLU(inplace=True)
259
+ (downsample): Sequential(
260
+ (0): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
261
+ (1): SyncBatchNorm(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
262
+ )
263
+ )
264
+ (1): Bottleneck(
265
+ (conv1): Conv2d(256, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)
266
+ (bn1): SyncBatchNorm(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
267
+ (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
268
+ (bn2): SyncBatchNorm(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
269
+ (conv3): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
270
+ (bn3): SyncBatchNorm(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
271
+ (relu): ReLU(inplace=True)
272
+ )
273
+ (2): Bottleneck(
274
+ (conv1): Conv2d(256, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)
275
+ (bn1): SyncBatchNorm(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
276
+ (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
277
+ (bn2): SyncBatchNorm(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
278
+ (conv3): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
279
+ (bn3): SyncBatchNorm(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
280
+ (relu): ReLU(inplace=True)
281
+ )
282
+ )
283
+ (layer2): ResLayer(
284
+ (0): Bottleneck(
285
+ (conv1): Conv2d(256, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
286
+ (bn1): SyncBatchNorm(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
287
+ (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)
288
+ (bn2): SyncBatchNorm(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
289
+ (conv3): Conv2d(128, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)
290
+ (bn3): SyncBatchNorm(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
291
+ (relu): ReLU(inplace=True)
292
+ (downsample): Sequential(
293
+ (0): Conv2d(256, 512, kernel_size=(1, 1), stride=(2, 2), bias=False)
294
+ (1): SyncBatchNorm(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
295
+ )
296
+ )
297
+ (1): Bottleneck(
298
+ (conv1): Conv2d(512, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
299
+ (bn1): SyncBatchNorm(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
300
+ (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
301
+ (bn2): SyncBatchNorm(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
302
+ (conv3): Conv2d(128, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)
303
+ (bn3): SyncBatchNorm(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
304
+ (relu): ReLU(inplace=True)
305
+ )
306
+ (2): Bottleneck(
307
+ (conv1): Conv2d(512, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
308
+ (bn1): SyncBatchNorm(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
309
+ (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
310
+ (bn2): SyncBatchNorm(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
311
+ (conv3): Conv2d(128, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)
312
+ (bn3): SyncBatchNorm(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
313
+ (relu): ReLU(inplace=True)
314
+ )
315
+ (3): Bottleneck(
316
+ (conv1): Conv2d(512, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
317
+ (bn1): SyncBatchNorm(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
318
+ (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
319
+ (bn2): SyncBatchNorm(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
320
+ (conv3): Conv2d(128, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)
321
+ (bn3): SyncBatchNorm(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
322
+ (relu): ReLU(inplace=True)
323
+ )
324
+ )
325
+ (layer3): ResLayer(
326
+ (0): Bottleneck(
327
+ (conv1): Conv2d(512, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
328
+ (bn1): SyncBatchNorm(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
329
+ (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
330
+ (bn2): SyncBatchNorm(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
331
+ (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)
332
+ (bn3): SyncBatchNorm(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
333
+ (relu): ReLU(inplace=True)
334
+ (downsample): Sequential(
335
+ (0): Conv2d(512, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)
336
+ (1): SyncBatchNorm(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
337
+ )
338
+ )
339
+ (1): Bottleneck(
340
+ (conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
341
+ (bn1): SyncBatchNorm(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
342
+ (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(2, 2), dilation=(2, 2), bias=False)
343
+ (bn2): SyncBatchNorm(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
344
+ (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)
345
+ (bn3): SyncBatchNorm(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
346
+ (relu): ReLU(inplace=True)
347
+ )
348
+ (2): Bottleneck(
349
+ (conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
350
+ (bn1): SyncBatchNorm(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
351
+ (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(2, 2), dilation=(2, 2), bias=False)
352
+ (bn2): SyncBatchNorm(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
353
+ (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)
354
+ (bn3): SyncBatchNorm(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
355
+ (relu): ReLU(inplace=True)
356
+ )
357
+ (3): Bottleneck(
358
+ (conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
359
+ (bn1): SyncBatchNorm(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
360
+ (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(2, 2), dilation=(2, 2), bias=False)
361
+ (bn2): SyncBatchNorm(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
362
+ (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)
363
+ (bn3): SyncBatchNorm(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
364
+ (relu): ReLU(inplace=True)
365
+ )
366
+ (4): Bottleneck(
367
+ (conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
368
+ (bn1): SyncBatchNorm(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
369
+ (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(2, 2), dilation=(2, 2), bias=False)
370
+ (bn2): SyncBatchNorm(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
371
+ (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)
372
+ (bn3): SyncBatchNorm(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
373
+ (relu): ReLU(inplace=True)
374
+ )
375
+ (5): Bottleneck(
376
+ (conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
377
+ (bn1): SyncBatchNorm(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
378
+ (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(2, 2), dilation=(2, 2), bias=False)
379
+ (bn2): SyncBatchNorm(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
380
+ (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)
381
+ (bn3): SyncBatchNorm(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
382
+ (relu): ReLU(inplace=True)
383
+ )
384
+ (6): Bottleneck(
385
+ (conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
386
+ (bn1): SyncBatchNorm(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
387
+ (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(2, 2), dilation=(2, 2), bias=False)
388
+ (bn2): SyncBatchNorm(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
389
+ (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)
390
+ (bn3): SyncBatchNorm(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
391
+ (relu): ReLU(inplace=True)
392
+ )
393
+ (7): Bottleneck(
394
+ (conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
395
+ (bn1): SyncBatchNorm(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
396
+ (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(2, 2), dilation=(2, 2), bias=False)
397
+ (bn2): SyncBatchNorm(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
398
+ (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)
399
+ (bn3): SyncBatchNorm(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
400
+ (relu): ReLU(inplace=True)
401
+ )
402
+ (8): Bottleneck(
403
+ (conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
404
+ (bn1): SyncBatchNorm(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
405
+ (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(2, 2), dilation=(2, 2), bias=False)
406
+ (bn2): SyncBatchNorm(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
407
+ (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)
408
+ (bn3): SyncBatchNorm(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
409
+ (relu): ReLU(inplace=True)
410
+ )
411
+ (9): Bottleneck(
412
+ (conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
413
+ (bn1): SyncBatchNorm(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
414
+ (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(2, 2), dilation=(2, 2), bias=False)
415
+ (bn2): SyncBatchNorm(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
416
+ (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)
417
+ (bn3): SyncBatchNorm(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
418
+ (relu): ReLU(inplace=True)
419
+ )
420
+ (10): Bottleneck(
421
+ (conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
422
+ (bn1): SyncBatchNorm(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
423
+ (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(2, 2), dilation=(2, 2), bias=False)
424
+ (bn2): SyncBatchNorm(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
425
+ (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)
426
+ (bn3): SyncBatchNorm(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
427
+ (relu): ReLU(inplace=True)
428
+ )
429
+ (11): Bottleneck(
430
+ (conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
431
+ (bn1): SyncBatchNorm(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
432
+ (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(2, 2), dilation=(2, 2), bias=False)
433
+ (bn2): SyncBatchNorm(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
434
+ (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)
435
+ (bn3): SyncBatchNorm(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
436
+ (relu): ReLU(inplace=True)
437
+ )
438
+ (12): Bottleneck(
439
+ (conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
440
+ (bn1): SyncBatchNorm(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
441
+ (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(2, 2), dilation=(2, 2), bias=False)
442
+ (bn2): SyncBatchNorm(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
443
+ (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)
444
+ (bn3): SyncBatchNorm(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
445
+ (relu): ReLU(inplace=True)
446
+ )
447
+ (13): Bottleneck(
448
+ (conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
449
+ (bn1): SyncBatchNorm(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
450
+ (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(2, 2), dilation=(2, 2), bias=False)
451
+ (bn2): SyncBatchNorm(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
452
+ (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)
453
+ (bn3): SyncBatchNorm(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
454
+ (relu): ReLU(inplace=True)
455
+ )
456
+ (14): Bottleneck(
457
+ (conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
458
+ (bn1): SyncBatchNorm(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
459
+ (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(2, 2), dilation=(2, 2), bias=False)
460
+ (bn2): SyncBatchNorm(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
461
+ (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)
462
+ (bn3): SyncBatchNorm(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
463
+ (relu): ReLU(inplace=True)
464
+ )
465
+ (15): Bottleneck(
466
+ (conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
467
+ (bn1): SyncBatchNorm(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
468
+ (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(2, 2), dilation=(2, 2), bias=False)
469
+ (bn2): SyncBatchNorm(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
470
+ (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)
471
+ (bn3): SyncBatchNorm(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
472
+ (relu): ReLU(inplace=True)
473
+ )
474
+ (16): Bottleneck(
475
+ (conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
476
+ (bn1): SyncBatchNorm(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
477
+ (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(2, 2), dilation=(2, 2), bias=False)
478
+ (bn2): SyncBatchNorm(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
479
+ (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)
480
+ (bn3): SyncBatchNorm(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
481
+ (relu): ReLU(inplace=True)
482
+ )
483
+ (17): Bottleneck(
484
+ (conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
485
+ (bn1): SyncBatchNorm(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
486
+ (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(2, 2), dilation=(2, 2), bias=False)
487
+ (bn2): SyncBatchNorm(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
488
+ (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)
489
+ (bn3): SyncBatchNorm(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
490
+ (relu): ReLU(inplace=True)
491
+ )
492
+ (18): Bottleneck(
493
+ (conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
494
+ (bn1): SyncBatchNorm(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
495
+ (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(2, 2), dilation=(2, 2), bias=False)
496
+ (bn2): SyncBatchNorm(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
497
+ (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)
498
+ (bn3): SyncBatchNorm(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
499
+ (relu): ReLU(inplace=True)
500
+ )
501
+ (19): Bottleneck(
502
+ (conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
503
+ (bn1): SyncBatchNorm(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
504
+ (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(2, 2), dilation=(2, 2), bias=False)
505
+ (bn2): SyncBatchNorm(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
506
+ (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)
507
+ (bn3): SyncBatchNorm(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
508
+ (relu): ReLU(inplace=True)
509
+ )
510
+ (20): Bottleneck(
511
+ (conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
512
+ (bn1): SyncBatchNorm(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
513
+ (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(2, 2), dilation=(2, 2), bias=False)
514
+ (bn2): SyncBatchNorm(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
515
+ (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)
516
+ (bn3): SyncBatchNorm(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
517
+ (relu): ReLU(inplace=True)
518
+ )
519
+ (21): Bottleneck(
520
+ (conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
521
+ (bn1): SyncBatchNorm(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
522
+ (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(2, 2), dilation=(2, 2), bias=False)
523
+ (bn2): SyncBatchNorm(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
524
+ (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)
525
+ (bn3): SyncBatchNorm(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
526
+ (relu): ReLU(inplace=True)
527
+ )
528
+ (22): Bottleneck(
529
+ (conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
530
+ (bn1): SyncBatchNorm(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
531
+ (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(2, 2), dilation=(2, 2), bias=False)
532
+ (bn2): SyncBatchNorm(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
533
+ (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)
534
+ (bn3): SyncBatchNorm(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
535
+ (relu): ReLU(inplace=True)
536
+ )
537
+ )
538
+ (layer4): ResLayer(
539
+ (0): Bottleneck(
540
+ (conv1): Conv2d(1024, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)
541
+ (bn1): SyncBatchNorm(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
542
+ (conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(2, 2), dilation=(2, 2), bias=False)
543
+ (bn2): SyncBatchNorm(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
544
+ (conv3): Conv2d(512, 2048, kernel_size=(1, 1), stride=(1, 1), bias=False)
545
+ (bn3): SyncBatchNorm(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
546
+ (relu): ReLU(inplace=True)
547
+ (downsample): Sequential(
548
+ (0): Conv2d(1024, 2048, kernel_size=(1, 1), stride=(1, 1), bias=False)
549
+ (1): SyncBatchNorm(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
550
+ )
551
+ )
552
+ (1): Bottleneck(
553
+ (conv1): Conv2d(2048, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)
554
+ (bn1): SyncBatchNorm(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
555
+ (conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(4, 4), dilation=(4, 4), bias=False)
556
+ (bn2): SyncBatchNorm(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
557
+ (conv3): Conv2d(512, 2048, kernel_size=(1, 1), stride=(1, 1), bias=False)
558
+ (bn3): SyncBatchNorm(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
559
+ (relu): ReLU(inplace=True)
560
+ )
561
+ (2): Bottleneck(
562
+ (conv1): Conv2d(2048, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)
563
+ (bn1): SyncBatchNorm(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
564
+ (conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(4, 4), dilation=(4, 4), bias=False)
565
+ (bn2): SyncBatchNorm(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
566
+ (conv3): Conv2d(512, 2048, kernel_size=(1, 1), stride=(1, 1), bias=False)
567
+ (bn3): SyncBatchNorm(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
568
+ (relu): ReLU(inplace=True)
569
+ )
570
+ )
571
+ )
572
+ init_cfg={'type': 'Pretrained', 'checkpoint': 'pretrained/deeplabv3plus_r101-d8_512x1024_80k_cityscapes_20200606_114143-068fcfe9.pth'}
573
+ (decode_head): DepthwiseSeparableASPPHeadUnetFCHeadSingleStep(
574
+ input_transform=None, ignore_index=0, align_corners=False
575
+ (loss_decode): CrossEntropyLoss(avg_non_ignore=False)
576
+ (conv_seg): None
577
+ (dropout): Dropout2d(p=0.1, inplace=False)
578
+ (image_pool): Sequential(
579
+ (0): AdaptiveAvgPool2d(output_size=1)
580
+ (1): ConvModule(
581
+ (conv): Conv2d(2048, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)
582
+ (bn): SyncBatchNorm(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
583
+ (activate): ReLU(inplace=True)
584
+ )
585
+ )
586
+ (aspp_modules): DepthwiseSeparableASPPModule(
587
+ (0): ConvModule(
588
+ (conv): Conv2d(2048, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)
589
+ (bn): SyncBatchNorm(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
590
+ (activate): ReLU(inplace=True)
591
+ )
592
+ (1): DepthwiseSeparableConvModule(
593
+ (depthwise_conv): ConvModule(
594
+ (conv): Conv2d(2048, 2048, kernel_size=(3, 3), stride=(1, 1), padding=(12, 12), dilation=(12, 12), groups=2048, bias=False)
595
+ (bn): SyncBatchNorm(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
596
+ (activate): ReLU(inplace=True)
597
+ )
598
+ (pointwise_conv): ConvModule(
599
+ (conv): Conv2d(2048, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)
600
+ (bn): SyncBatchNorm(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
601
+ (activate): ReLU(inplace=True)
602
+ )
603
+ )
604
+ (2): DepthwiseSeparableConvModule(
605
+ (depthwise_conv): ConvModule(
606
+ (conv): Conv2d(2048, 2048, kernel_size=(3, 3), stride=(1, 1), padding=(24, 24), dilation=(24, 24), groups=2048, bias=False)
607
+ (bn): SyncBatchNorm(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
608
+ (activate): ReLU(inplace=True)
609
+ )
610
+ (pointwise_conv): ConvModule(
611
+ (conv): Conv2d(2048, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)
612
+ (bn): SyncBatchNorm(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
613
+ (activate): ReLU(inplace=True)
614
+ )
615
+ )
616
+ (3): DepthwiseSeparableConvModule(
617
+ (depthwise_conv): ConvModule(
618
+ (conv): Conv2d(2048, 2048, kernel_size=(3, 3), stride=(1, 1), padding=(36, 36), dilation=(36, 36), groups=2048, bias=False)
619
+ (bn): SyncBatchNorm(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
620
+ (activate): ReLU(inplace=True)
621
+ )
622
+ (pointwise_conv): ConvModule(
623
+ (conv): Conv2d(2048, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)
624
+ (bn): SyncBatchNorm(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
625
+ (activate): ReLU(inplace=True)
626
+ )
627
+ )
628
+ )
629
+ (bottleneck): ConvModule(
630
+ (conv): Conv2d(2560, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
631
+ (bn): SyncBatchNorm(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
632
+ (activate): ReLU(inplace=True)
633
+ )
634
+ (c1_bottleneck): ConvModule(
635
+ (conv): Conv2d(256, 48, kernel_size=(1, 1), stride=(1, 1), bias=False)
636
+ (bn): SyncBatchNorm(48, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
637
+ (activate): ReLU(inplace=True)
638
+ )
639
+ (sep_bottleneck): Sequential(
640
+ (0): DepthwiseSeparableConvModule(
641
+ (depthwise_conv): ConvModule(
642
+ (conv): Conv2d(560, 560, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=560, bias=False)
643
+ (bn): SyncBatchNorm(560, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
644
+ (activate): ReLU(inplace=True)
645
+ )
646
+ (pointwise_conv): ConvModule(
647
+ (conv): Conv2d(560, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)
648
+ (bn): SyncBatchNorm(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
649
+ (activate): ReLU(inplace=True)
650
+ )
651
+ )
652
+ (1): DepthwiseSeparableConvModule(
653
+ (depthwise_conv): ConvModule(
654
+ (conv): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=512, bias=False)
655
+ (bn): SyncBatchNorm(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
656
+ (activate): ReLU(inplace=True)
657
+ )
658
+ (pointwise_conv): ConvModule(
659
+ (conv): Conv2d(512, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)
660
+ (bn): SyncBatchNorm(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
661
+ (activate): ReLU(inplace=True)
662
+ )
663
+ )
664
+ )
665
+ (unet): Unet(
666
+ (init_conv): Conv2d(528, 256, kernel_size=(7, 7), stride=(1, 1), padding=(3, 3))
667
+ (time_mlp): Sequential(
668
+ (0): SinusoidalPosEmb()
669
+ (1): Linear(in_features=256, out_features=1024, bias=True)
670
+ (2): GELU(approximate='none')
671
+ (3): Linear(in_features=1024, out_features=1024, bias=True)
672
+ )
673
+ (downs): ModuleList(
674
+ (0): ModuleList(
675
+ (0): ResnetBlock(
676
+ (mlp): Sequential(
677
+ (0): SiLU()
678
+ (1): Linear(in_features=1024, out_features=512, bias=True)
679
+ )
680
+ (block1): Block(
681
+ (proj): WeightStandardizedConv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
682
+ (norm): GroupNorm(8, 256, eps=1e-05, affine=True)
683
+ (act): SiLU()
684
+ )
685
+ (block2): Block(
686
+ (proj): WeightStandardizedConv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
687
+ (norm): GroupNorm(8, 256, eps=1e-05, affine=True)
688
+ (act): SiLU()
689
+ )
690
+ (res_conv): Identity()
691
+ )
692
+ (1): ResnetBlock(
693
+ (mlp): Sequential(
694
+ (0): SiLU()
695
+ (1): Linear(in_features=1024, out_features=512, bias=True)
696
+ )
697
+ (block1): Block(
698
+ (proj): WeightStandardizedConv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
699
+ (norm): GroupNorm(8, 256, eps=1e-05, affine=True)
700
+ (act): SiLU()
701
+ )
702
+ (block2): Block(
703
+ (proj): WeightStandardizedConv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
704
+ (norm): GroupNorm(8, 256, eps=1e-05, affine=True)
705
+ (act): SiLU()
706
+ )
707
+ (res_conv): Identity()
708
+ )
709
+ (2): Residual(
710
+ (fn): PreNorm(
711
+ (fn): LinearAttention(
712
+ (to_qkv): Conv2d(256, 384, kernel_size=(1, 1), stride=(1, 1), bias=False)
713
+ (to_out): Sequential(
714
+ (0): Conv2d(128, 256, kernel_size=(1, 1), stride=(1, 1))
715
+ (1): LayerNorm()
716
+ )
717
+ )
718
+ (norm): LayerNorm()
719
+ )
720
+ )
721
+ (3): Conv2d(256, 256, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))
722
+ )
723
+ (1): ModuleList(
724
+ (0): ResnetBlock(
725
+ (mlp): Sequential(
726
+ (0): SiLU()
727
+ (1): Linear(in_features=1024, out_features=512, bias=True)
728
+ )
729
+ (block1): Block(
730
+ (proj): WeightStandardizedConv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
731
+ (norm): GroupNorm(8, 256, eps=1e-05, affine=True)
732
+ (act): SiLU()
733
+ )
734
+ (block2): Block(
735
+ (proj): WeightStandardizedConv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
736
+ (norm): GroupNorm(8, 256, eps=1e-05, affine=True)
737
+ (act): SiLU()
738
+ )
739
+ (res_conv): Identity()
740
+ )
741
+ (1): ResnetBlock(
742
+ (mlp): Sequential(
743
+ (0): SiLU()
744
+ (1): Linear(in_features=1024, out_features=512, bias=True)
745
+ )
746
+ (block1): Block(
747
+ (proj): WeightStandardizedConv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
748
+ (norm): GroupNorm(8, 256, eps=1e-05, affine=True)
749
+ (act): SiLU()
750
+ )
751
+ (block2): Block(
752
+ (proj): WeightStandardizedConv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
753
+ (norm): GroupNorm(8, 256, eps=1e-05, affine=True)
754
+ (act): SiLU()
755
+ )
756
+ (res_conv): Identity()
757
+ )
758
+ (2): Residual(
759
+ (fn): PreNorm(
760
+ (fn): LinearAttention(
761
+ (to_qkv): Conv2d(256, 384, kernel_size=(1, 1), stride=(1, 1), bias=False)
762
+ (to_out): Sequential(
763
+ (0): Conv2d(128, 256, kernel_size=(1, 1), stride=(1, 1))
764
+ (1): LayerNorm()
765
+ )
766
+ )
767
+ (norm): LayerNorm()
768
+ )
769
+ )
770
+ (3): Conv2d(256, 256, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))
771
+ )
772
+ (2): ModuleList(
773
+ (0): ResnetBlock(
774
+ (mlp): Sequential(
775
+ (0): SiLU()
776
+ (1): Linear(in_features=1024, out_features=512, bias=True)
777
+ )
778
+ (block1): Block(
779
+ (proj): WeightStandardizedConv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
780
+ (norm): GroupNorm(8, 256, eps=1e-05, affine=True)
781
+ (act): SiLU()
782
+ )
783
+ (block2): Block(
784
+ (proj): WeightStandardizedConv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
785
+ (norm): GroupNorm(8, 256, eps=1e-05, affine=True)
786
+ (act): SiLU()
787
+ )
788
+ (res_conv): Identity()
789
+ )
790
+ (1): ResnetBlock(
791
+ (mlp): Sequential(
792
+ (0): SiLU()
793
+ (1): Linear(in_features=1024, out_features=512, bias=True)
794
+ )
795
+ (block1): Block(
796
+ (proj): WeightStandardizedConv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
797
+ (norm): GroupNorm(8, 256, eps=1e-05, affine=True)
798
+ (act): SiLU()
799
+ )
800
+ (block2): Block(
801
+ (proj): WeightStandardizedConv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
802
+ (norm): GroupNorm(8, 256, eps=1e-05, affine=True)
803
+ (act): SiLU()
804
+ )
805
+ (res_conv): Identity()
806
+ )
807
+ (2): Residual(
808
+ (fn): PreNorm(
809
+ (fn): LinearAttention(
810
+ (to_qkv): Conv2d(256, 384, kernel_size=(1, 1), stride=(1, 1), bias=False)
811
+ (to_out): Sequential(
812
+ (0): Conv2d(128, 256, kernel_size=(1, 1), stride=(1, 1))
813
+ (1): LayerNorm()
814
+ )
815
+ )
816
+ (norm): LayerNorm()
817
+ )
818
+ )
819
+ (3): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
820
+ )
821
+ )
822
+ (ups): ModuleList(
823
+ (0): ModuleList(
824
+ (0): ResnetBlock(
825
+ (mlp): Sequential(
826
+ (0): SiLU()
827
+ (1): Linear(in_features=1024, out_features=512, bias=True)
828
+ )
829
+ (block1): Block(
830
+ (proj): WeightStandardizedConv2d(512, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
831
+ (norm): GroupNorm(8, 256, eps=1e-05, affine=True)
832
+ (act): SiLU()
833
+ )
834
+ (block2): Block(
835
+ (proj): WeightStandardizedConv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
836
+ (norm): GroupNorm(8, 256, eps=1e-05, affine=True)
837
+ (act): SiLU()
838
+ )
839
+ (res_conv): Conv2d(512, 256, kernel_size=(1, 1), stride=(1, 1))
840
+ )
841
+ (1): ResnetBlock(
842
+ (mlp): Sequential(
843
+ (0): SiLU()
844
+ (1): Linear(in_features=1024, out_features=512, bias=True)
845
+ )
846
+ (block1): Block(
847
+ (proj): WeightStandardizedConv2d(512, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
848
+ (norm): GroupNorm(8, 256, eps=1e-05, affine=True)
849
+ (act): SiLU()
850
+ )
851
+ (block2): Block(
852
+ (proj): WeightStandardizedConv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
853
+ (norm): GroupNorm(8, 256, eps=1e-05, affine=True)
854
+ (act): SiLU()
855
+ )
856
+ (res_conv): Conv2d(512, 256, kernel_size=(1, 1), stride=(1, 1))
857
+ )
858
+ (2): Residual(
859
+ (fn): PreNorm(
860
+ (fn): LinearAttention(
861
+ (to_qkv): Conv2d(256, 384, kernel_size=(1, 1), stride=(1, 1), bias=False)
862
+ (to_out): Sequential(
863
+ (0): Conv2d(128, 256, kernel_size=(1, 1), stride=(1, 1))
864
+ (1): LayerNorm()
865
+ )
866
+ )
867
+ (norm): LayerNorm()
868
+ )
869
+ )
870
+ (3): Sequential(
871
+ (0): Upsample(scale_factor=2.0, mode=nearest)
872
+ (1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
873
+ )
874
+ )
875
+ (1): ModuleList(
876
+ (0): ResnetBlock(
877
+ (mlp): Sequential(
878
+ (0): SiLU()
879
+ (1): Linear(in_features=1024, out_features=512, bias=True)
880
+ )
881
+ (block1): Block(
882
+ (proj): WeightStandardizedConv2d(512, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
883
+ (norm): GroupNorm(8, 256, eps=1e-05, affine=True)
884
+ (act): SiLU()
885
+ )
886
+ (block2): Block(
887
+ (proj): WeightStandardizedConv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
888
+ (norm): GroupNorm(8, 256, eps=1e-05, affine=True)
889
+ (act): SiLU()
890
+ )
891
+ (res_conv): Conv2d(512, 256, kernel_size=(1, 1), stride=(1, 1))
892
+ )
893
+ (1): ResnetBlock(
894
+ (mlp): Sequential(
895
+ (0): SiLU()
896
+ (1): Linear(in_features=1024, out_features=512, bias=True)
897
+ )
898
+ (block1): Block(
899
+ (proj): WeightStandardizedConv2d(512, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
900
+ (norm): GroupNorm(8, 256, eps=1e-05, affine=True)
901
+ (act): SiLU()
902
+ )
903
+ (block2): Block(
904
+ (proj): WeightStandardizedConv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
905
+ (norm): GroupNorm(8, 256, eps=1e-05, affine=True)
906
+ (act): SiLU()
907
+ )
908
+ (res_conv): Conv2d(512, 256, kernel_size=(1, 1), stride=(1, 1))
909
+ )
910
+ (2): Residual(
911
+ (fn): PreNorm(
912
+ (fn): LinearAttention(
913
+ (to_qkv): Conv2d(256, 384, kernel_size=(1, 1), stride=(1, 1), bias=False)
914
+ (to_out): Sequential(
915
+ (0): Conv2d(128, 256, kernel_size=(1, 1), stride=(1, 1))
916
+ (1): LayerNorm()
917
+ )
918
+ )
919
+ (norm): LayerNorm()
920
+ )
921
+ )
922
+ (3): Sequential(
923
+ (0): Upsample(scale_factor=2.0, mode=nearest)
924
+ (1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
925
+ )
926
+ )
927
+ (2): ModuleList(
928
+ (0): ResnetBlock(
929
+ (mlp): Sequential(
930
+ (0): SiLU()
931
+ (1): Linear(in_features=1024, out_features=512, bias=True)
932
+ )
933
+ (block1): Block(
934
+ (proj): WeightStandardizedConv2d(512, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
935
+ (norm): GroupNorm(8, 256, eps=1e-05, affine=True)
936
+ (act): SiLU()
937
+ )
938
+ (block2): Block(
939
+ (proj): WeightStandardizedConv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
940
+ (norm): GroupNorm(8, 256, eps=1e-05, affine=True)
941
+ (act): SiLU()
942
+ )
943
+ (res_conv): Conv2d(512, 256, kernel_size=(1, 1), stride=(1, 1))
944
+ )
945
+ (1): ResnetBlock(
946
+ (mlp): Sequential(
947
+ (0): SiLU()
948
+ (1): Linear(in_features=1024, out_features=512, bias=True)
949
+ )
950
+ (block1): Block(
951
+ (proj): WeightStandardizedConv2d(512, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
952
+ (norm): GroupNorm(8, 256, eps=1e-05, affine=True)
953
+ (act): SiLU()
954
+ )
955
+ (block2): Block(
956
+ (proj): WeightStandardizedConv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
957
+ (norm): GroupNorm(8, 256, eps=1e-05, affine=True)
958
+ (act): SiLU()
959
+ )
960
+ (res_conv): Conv2d(512, 256, kernel_size=(1, 1), stride=(1, 1))
961
+ )
962
+ (2): Residual(
963
+ (fn): PreNorm(
964
+ (fn): LinearAttention(
965
+ (to_qkv): Conv2d(256, 384, kernel_size=(1, 1), stride=(1, 1), bias=False)
966
+ (to_out): Sequential(
967
+ (0): Conv2d(128, 256, kernel_size=(1, 1), stride=(1, 1))
968
+ (1): LayerNorm()
969
+ )
970
+ )
971
+ (norm): LayerNorm()
972
+ )
973
+ )
974
+ (3): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
975
+ )
976
+ )
977
+ (mid_block1): ResnetBlock(
978
+ (mlp): Sequential(
979
+ (0): SiLU()
980
+ (1): Linear(in_features=1024, out_features=512, bias=True)
981
+ )
982
+ (block1): Block(
983
+ (proj): WeightStandardizedConv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
984
+ (norm): GroupNorm(8, 256, eps=1e-05, affine=True)
985
+ (act): SiLU()
986
+ )
987
+ (block2): Block(
988
+ (proj): WeightStandardizedConv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
989
+ (norm): GroupNorm(8, 256, eps=1e-05, affine=True)
990
+ (act): SiLU()
991
+ )
992
+ (res_conv): Identity()
993
+ )
994
+ (mid_attn): Residual(
995
+ (fn): PreNorm(
996
+ (fn): Attention(
997
+ (to_qkv): Conv2d(256, 384, kernel_size=(1, 1), stride=(1, 1), bias=False)
998
+ (to_out): Conv2d(128, 256, kernel_size=(1, 1), stride=(1, 1))
999
+ )
1000
+ (norm): LayerNorm()
1001
+ )
1002
+ )
1003
+ (mid_block2): ResnetBlock(
1004
+ (mlp): Sequential(
1005
+ (0): SiLU()
1006
+ (1): Linear(in_features=1024, out_features=512, bias=True)
1007
+ )
1008
+ (block1): Block(
1009
+ (proj): WeightStandardizedConv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
1010
+ (norm): GroupNorm(8, 256, eps=1e-05, affine=True)
1011
+ (act): SiLU()
1012
+ )
1013
+ (block2): Block(
1014
+ (proj): WeightStandardizedConv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
1015
+ (norm): GroupNorm(8, 256, eps=1e-05, affine=True)
1016
+ (act): SiLU()
1017
+ )
1018
+ (res_conv): Identity()
1019
+ )
1020
+ (final_res_block): ResnetBlock(
1021
+ (mlp): Sequential(
1022
+ (0): SiLU()
1023
+ (1): Linear(in_features=1024, out_features=512, bias=True)
1024
+ )
1025
+ (block1): Block(
1026
+ (proj): WeightStandardizedConv2d(512, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
1027
+ (norm): GroupNorm(8, 256, eps=1e-05, affine=True)
1028
+ (act): SiLU()
1029
+ )
1030
+ (block2): Block(
1031
+ (proj): WeightStandardizedConv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
1032
+ (norm): GroupNorm(8, 256, eps=1e-05, affine=True)
1033
+ (act): SiLU()
1034
+ )
1035
+ (res_conv): Conv2d(512, 256, kernel_size=(1, 1), stride=(1, 1))
1036
+ )
1037
+ (final_conv): Conv2d(256, 256, kernel_size=(1, 1), stride=(1, 1))
1038
+ )
1039
+ (conv_seg_new): Conv2d(256, 20, kernel_size=(1, 1), stride=(1, 1))
1040
+ (embed): Embedding(20, 16)
1041
+ )
1042
+ init_cfg={'type': 'Pretrained', 'checkpoint': 'pretrained/deeplabv3plus_r101-d8_512x1024_80k_cityscapes_20200606_114143-068fcfe9.pth'}
1043
+ )
1044
+ 2023-03-03 20:38:40,578 - mmseg - INFO - Loaded 2975 images
1045
+ 2023-03-03 20:38:41,636 - mmseg - INFO - Loaded 500 images
1046
+ 2023-03-03 20:38:41,642 - mmseg - INFO - Start running, host: laizeqiang@SH-IDC1-10-140-37-151, work_dir: /mnt/petrelfs/laizeqiang/mmseg-baseline/work_dirs2/deeplabv3plus_r101-d8_aspp_head_unet_fc_small_single_step_cityscapes_pretrained_freeze_embed_80k_cityscapes20
1047
+ 2023-03-03 20:38:41,642 - mmseg - INFO - Hooks will be executed in the following order:
1048
+ before_run:
1049
+ (VERY_HIGH ) StepLrUpdaterHook
1050
+ (NORMAL ) CheckpointHook
1051
+ (LOW ) DistEvalHookMultiSteps
1052
+ (VERY_LOW ) TextLoggerHook
1053
+ --------------------
1054
+ before_train_epoch:
1055
+ (VERY_HIGH ) StepLrUpdaterHook
1056
+ (LOW ) IterTimerHook
1057
+ (LOW ) DistEvalHookMultiSteps
1058
+ (VERY_LOW ) TextLoggerHook
1059
+ --------------------
1060
+ before_train_iter:
1061
+ (VERY_HIGH ) StepLrUpdaterHook
1062
+ (LOW ) IterTimerHook
1063
+ (LOW ) DistEvalHookMultiSteps
1064
+ --------------------
1065
+ after_train_iter:
1066
+ (ABOVE_NORMAL) OptimizerHook
1067
+ (NORMAL ) CheckpointHook
1068
+ (LOW ) IterTimerHook
1069
+ (LOW ) DistEvalHookMultiSteps
1070
+ (VERY_LOW ) TextLoggerHook
1071
+ --------------------
1072
+ after_train_epoch:
1073
+ (NORMAL ) CheckpointHook
1074
+ (LOW ) DistEvalHookMultiSteps
1075
+ (VERY_LOW ) TextLoggerHook
1076
+ --------------------
1077
+ before_val_epoch:
1078
+ (LOW ) IterTimerHook
1079
+ (VERY_LOW ) TextLoggerHook
1080
+ --------------------
1081
+ before_val_iter:
1082
+ (LOW ) IterTimerHook
1083
+ --------------------
1084
+ after_val_iter:
1085
+ (LOW ) IterTimerHook
1086
+ --------------------
1087
+ after_val_epoch:
1088
+ (VERY_LOW ) TextLoggerHook
1089
+ --------------------
1090
+ after_run:
1091
+ (VERY_LOW ) TextLoggerHook
1092
+ --------------------
1093
+ 2023-03-03 20:38:41,642 - mmseg - INFO - workflow: [('train', 1)], max: 80000 iters
1094
+ 2023-03-03 20:38:41,642 - mmseg - INFO - Checkpoints will be saved to /mnt/petrelfs/laizeqiang/mmseg-baseline/work_dirs2/deeplabv3plus_r101-d8_aspp_head_unet_fc_small_single_step_cityscapes_pretrained_freeze_embed_80k_cityscapes20 by HardDiskBackend.
1095
+ 2023-03-03 20:39:26,258 - mmseg - INFO - Iter [50/80000] lr: 7.350e-06, eta: 14:40:40, time: 0.661, data_time: 0.014, memory: 67605, decode.loss_ce: 1.8576, decode.acc_seg: 63.8241, loss: 1.8576
cityscapes/deeplabv3plus_r101_singlestep/20230303_203832.log.json ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ {"env_info": "sys.platform: linux\nPython: 3.7.16 (default, Jan 17 2023, 22:20:44) [GCC 11.2.0]\nCUDA available: True\nGPU 0,1,2,3,4,5,6,7: NVIDIA A100-SXM4-80GB\nCUDA_HOME: /mnt/petrelfs/laizeqiang/miniconda3/envs/torch\nNVCC: Cuda compilation tools, release 11.6, V11.6.124\nGCC: gcc (GCC) 4.8.5 20150623 (Red Hat 4.8.5-44)\nPyTorch: 1.13.1\nPyTorch compiling details: PyTorch built with:\n - GCC 9.3\n - C++ Version: 201402\n - Intel(R) oneAPI Math Kernel Library Version 2021.4-Product Build 20210904 for Intel(R) 64 architecture applications\n - Intel(R) MKL-DNN v2.6.0 (Git Hash 52b5f107dd9cf10910aaa19cb47f3abf9b349815)\n - OpenMP 201511 (a.k.a. OpenMP 4.5)\n - LAPACK is enabled (usually provided by MKL)\n - NNPACK is enabled\n - CPU capability usage: AVX2\n - CUDA Runtime 11.6\n - NVCC architecture flags: -gencode;arch=compute_37,code=sm_37;-gencode;arch=compute_50,code=sm_50;-gencode;arch=compute_60,code=sm_60;-gencode;arch=compute_61,code=sm_61;-gencode;arch=compute_70,code=sm_70;-gencode;arch=compute_75,code=sm_75;-gencode;arch=compute_80,code=sm_80;-gencode;arch=compute_86,code=sm_86;-gencode;arch=compute_37,code=compute_37\n - CuDNN 8.3.2 (built against CUDA 11.5)\n - Magma 2.6.1\n - Build settings: BLAS_INFO=mkl, BUILD_TYPE=Release, CUDA_VERSION=11.6, CUDNN_VERSION=8.3.2, CXX_COMPILER=/opt/rh/devtoolset-9/root/usr/bin/c++, CXX_FLAGS= -fabi-version=11 -Wno-deprecated -fvisibility-inlines-hidden -DUSE_PTHREADPOOL -fopenmp -DNDEBUG -DUSE_KINETO -DUSE_FBGEMM -DUSE_QNNPACK -DUSE_PYTORCH_QNNPACK -DUSE_XNNPACK -DSYMBOLICATE_MOBILE_DEBUG_HANDLE -DEDGE_PROFILER_USE_KINETO -O2 -fPIC -Wno-narrowing -Wall -Wextra -Werror=return-type -Werror=non-virtual-dtor -Wno-missing-field-initializers -Wno-type-limits -Wno-array-bounds -Wno-unknown-pragmas -Wunused-local-typedefs -Wno-unused-parameter -Wno-unused-function -Wno-unused-result -Wno-strict-overflow -Wno-strict-aliasing -Wno-error=deprecated-declarations -Wno-stringop-overflow -Wno-psabi -Wno-error=pedantic -Wno-error=redundant-decls -Wno-error=old-style-cast -fdiagnostics-color=always -faligned-new -Wno-unused-but-set-variable -Wno-maybe-uninitialized -fno-math-errno -fno-trapping-math -Werror=format -Werror=cast-function-type -Wno-stringop-overflow, LAPACK_INFO=mkl, PERF_WITH_AVX=1, PERF_WITH_AVX2=1, PERF_WITH_AVX512=1, TORCH_VERSION=1.13.1, USE_CUDA=ON, USE_CUDNN=ON, USE_EXCEPTION_PTR=1, USE_GFLAGS=OFF, USE_GLOG=OFF, USE_MKL=ON, USE_MKLDNN=ON, USE_MPI=OFF, USE_NCCL=ON, USE_NNPACK=ON, USE_OPENMP=ON, USE_ROCM=OFF, \n\nTorchVision: 0.14.1\nOpenCV: 4.7.0\nMMCV: 1.7.1\nMMCV Compiler: GCC 9.3\nMMCV CUDA Compiler: 11.6\nMMSegmentation: 0.30.0+c844fc6", "seed": 835892801, "exp_name": "deeplabv3plus_r101-d8_aspp_head_unet_fc_small_single_step_cityscapes_pretrained_freeze_embed_80k_cityscapes20.py", "mmseg_version": "0.30.0+c844fc6", "config": "norm_cfg = dict(type='SyncBN', requires_grad=True)\nmodel = dict(\n type='EncoderDecoderFreeze',\n pretrained=\n 'pretrained/deeplabv3plus_r101-d8_512x1024_80k_cityscapes_20200606_114143-068fcfe9.pth',\n backbone=dict(\n type='ResNetV1cCustomInitWeights',\n depth=101,\n num_stages=4,\n out_indices=(0, 1, 2, 3),\n dilations=(1, 1, 2, 4),\n strides=(1, 2, 1, 1),\n norm_cfg=dict(type='SyncBN', requires_grad=True),\n norm_eval=False,\n style='pytorch',\n contract_dilation=True,\n pretrained=\n 'pretrained/deeplabv3plus_r101-d8_512x1024_80k_cityscapes_20200606_114143-068fcfe9.pth'\n ),\n decode_head=dict(\n type='DepthwiseSeparableASPPHeadUnetFCHeadSingleStep',\n pretrained=\n 'pretrained/deeplabv3plus_r101-d8_512x1024_80k_cityscapes_20200606_114143-068fcfe9.pth',\n dim=256,\n out_dim=256,\n unet_channels=528,\n dim_mults=[1, 1, 1],\n cat_embedding_dim=16,\n ignore_index=0,\n in_channels=2048,\n in_index=3,\n channels=512,\n dilations=(1, 12, 24, 36),\n c1_in_channels=256,\n c1_channels=48,\n dropout_ratio=0.1,\n num_classes=20,\n norm_cfg=dict(type='SyncBN', requires_grad=True),\n align_corners=False,\n loss_decode=dict(\n type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),\n auxiliary_head=None,\n train_cfg=dict(),\n test_cfg=dict(mode='whole'),\n freeze_parameters=['backbone', 'decode_head'])\ndataset_type = 'Cityscapes20Dataset'\ndata_root = 'data/cityscapes/'\nimg_norm_cfg = dict(\n mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)\ncrop_size = (512, 1024)\ntrain_pipeline = [\n dict(type='LoadImageFromFile'),\n dict(type='LoadAnnotationsCityscapes20'),\n dict(type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)),\n dict(type='RandomCrop', crop_size=(512, 1024), cat_max_ratio=0.75),\n dict(type='RandomFlip', prob=0.5),\n dict(type='PhotoMetricDistortion'),\n dict(\n type='Normalize',\n mean=[123.675, 116.28, 103.53],\n std=[58.395, 57.12, 57.375],\n to_rgb=True),\n dict(type='Pad', size=(512, 1024), pad_val=0, seg_pad_val=0),\n dict(type='DefaultFormatBundle'),\n dict(type='Collect', keys=['img', 'gt_semantic_seg'])\n]\ntest_pipeline = [\n dict(type='LoadImageFromFile'),\n dict(\n type='MultiScaleFlipAug',\n img_scale=(2048, 1024),\n flip=False,\n transforms=[\n dict(type='Resize', keep_ratio=True),\n dict(type='RandomFlip'),\n dict(\n type='Normalize',\n mean=[123.675, 116.28, 103.53],\n std=[58.395, 57.12, 57.375],\n to_rgb=True),\n dict(type='ImageToTensor', keys=['img']),\n dict(type='Collect', keys=['img'])\n ])\n]\ndata = dict(\n samples_per_gpu=2,\n workers_per_gpu=2,\n train=dict(\n type='Cityscapes20Dataset',\n data_root='data/cityscapes/',\n img_dir='leftImg8bit/train',\n ann_dir='gtFine/train',\n pipeline=[\n dict(type='LoadImageFromFile'),\n dict(type='LoadAnnotationsCityscapes20'),\n dict(\n type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)),\n dict(type='RandomCrop', crop_size=(512, 1024), cat_max_ratio=0.75),\n dict(type='RandomFlip', prob=0.5),\n dict(type='PhotoMetricDistortion'),\n dict(\n type='Normalize',\n mean=[123.675, 116.28, 103.53],\n std=[58.395, 57.12, 57.375],\n to_rgb=True),\n dict(type='Pad', size=(512, 1024), pad_val=0, seg_pad_val=0),\n dict(type='DefaultFormatBundle'),\n dict(type='Collect', keys=['img', 'gt_semantic_seg'])\n ]),\n val=dict(\n type='Cityscapes20Dataset',\n data_root='data/cityscapes/',\n img_dir='leftImg8bit/val',\n ann_dir='gtFine/val',\n pipeline=[\n dict(type='LoadImageFromFile'),\n dict(\n type='MultiScaleFlipAug',\n img_scale=(2048, 1024),\n flip=False,\n transforms=[\n dict(type='Resize', keep_ratio=True),\n dict(type='RandomFlip'),\n dict(\n type='Normalize',\n mean=[123.675, 116.28, 103.53],\n std=[58.395, 57.12, 57.375],\n to_rgb=True),\n dict(type='ImageToTensor', keys=['img']),\n dict(type='Collect', keys=['img'])\n ])\n ]),\n test=dict(\n type='Cityscapes20Dataset',\n data_root='data/cityscapes/',\n img_dir='leftImg8bit/val',\n ann_dir='gtFine/val',\n pipeline=[\n dict(type='LoadImageFromFile'),\n dict(\n type='MultiScaleFlipAug',\n img_scale=(2048, 1024),\n flip=False,\n transforms=[\n dict(type='Resize', keep_ratio=True),\n dict(type='RandomFlip'),\n dict(\n type='Normalize',\n mean=[123.675, 116.28, 103.53],\n std=[58.395, 57.12, 57.375],\n to_rgb=True),\n dict(type='ImageToTensor', keys=['img']),\n dict(type='Collect', keys=['img'])\n ])\n ]))\nlog_config = dict(\n interval=50, hooks=[dict(type='TextLoggerHook', by_epoch=False)])\ndist_params = dict(backend='nccl')\nlog_level = 'INFO'\nload_from = None\nresume_from = None\nworkflow = [('train', 1)]\ncudnn_benchmark = True\noptimizer = dict(\n type='AdamW', lr=0.00015, betas=[0.9, 0.96], weight_decay=0.045)\noptimizer_config = dict()\nlr_config = dict(\n policy='step',\n warmup='linear',\n warmup_iters=1000,\n warmup_ratio=1e-06,\n step=10000,\n gamma=0.5,\n min_lr=1e-06,\n by_epoch=False)\nrunner = dict(type='IterBasedRunner', max_iters=80000)\ncheckpoint_config = dict(by_epoch=False, interval=8000, max_keep_ckpts=1)\nevaluation = dict(\n interval=8000, metric='mIoU', pre_eval=True, save_best='mIoU')\ncheckpoint = 'pretrained/deeplabv3plus_r101-d8_512x1024_80k_cityscapes_20200606_114143-068fcfe9.pth'\nwork_dir = './work_dirs2/deeplabv3plus_r101-d8_aspp_head_unet_fc_small_single_step_cityscapes_pretrained_freeze_embed_80k_cityscapes20'\ngpu_ids = range(0, 8)\nauto_resume = True\ndevice = 'cuda'\nseed = 835892801\n", "CLASSES": ["background", "road", "sidewalk", "building", "wall", "fence", "pole", "traffic light", "traffic sign", "vegetation", "terrain", "sky", "person", "rider", "car", "truck", "bus", "train", "motorcycle", "bicycle"], "PALETTE": [[0, 0, 0], [128, 64, 128], [244, 35, 232], [70, 70, 70], [102, 102, 156], [190, 153, 153], [153, 153, 153], [250, 170, 30], [220, 220, 0], [107, 142, 35], [152, 251, 152], [70, 130, 180], [220, 20, 60], [255, 0, 0], [0, 0, 142], [0, 0, 70], [0, 60, 100], [0, 80, 100], [0, 0, 230], [119, 11, 32]], "hook_msgs": {}}
2
+ {"mode": "train", "epoch": 1, "iter": 50, "lr": 1e-05, "memory": 67605, "data_time": 0.01446, "decode.loss_ce": 1.8576, "decode.acc_seg": 63.82413, "loss": 1.8576, "time": 0.66091}
cityscapes/deeplabv3plus_r101_singlestep/20230303_203945.log ADDED
The diff for this file is too large to render. See raw diff
 
cityscapes/deeplabv3plus_r101_singlestep/20230303_203945.log.json ADDED
The diff for this file is too large to render. See raw diff
 
cityscapes/deeplabv3plus_r101_singlestep/best_mIoU_iter_64000.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:94a167fa82fdd9b5886f3ecfb64b061bb096c43d88edbe7eca35a6f0947bed8d
3
+ size 770176920
cityscapes/deeplabv3plus_r101_singlestep/deeplabv3plus_r101-d8_aspp_head_unet_fc_small_single_step_cityscapes_pretrained_freeze_embed_80k_cityscapes20.py ADDED
@@ -0,0 +1,180 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ norm_cfg = dict(type='SyncBN', requires_grad=True)
2
+ model = dict(
3
+ type='EncoderDecoderFreeze',
4
+ pretrained=
5
+ 'pretrained/deeplabv3plus_r101-d8_512x1024_80k_cityscapes_20200606_114143-068fcfe9.pth',
6
+ backbone=dict(
7
+ type='ResNetV1cCustomInitWeights',
8
+ depth=101,
9
+ num_stages=4,
10
+ out_indices=(0, 1, 2, 3),
11
+ dilations=(1, 1, 2, 4),
12
+ strides=(1, 2, 1, 1),
13
+ norm_cfg=dict(type='SyncBN', requires_grad=True),
14
+ norm_eval=False,
15
+ style='pytorch',
16
+ contract_dilation=True),
17
+ decode_head=dict(
18
+ type='DepthwiseSeparableASPPHeadUnetFCHeadSingleStep',
19
+ pretrained=
20
+ 'pretrained/deeplabv3plus_r101-d8_512x1024_80k_cityscapes_20200606_114143-068fcfe9.pth',
21
+ dim=256,
22
+ out_dim=256,
23
+ unet_channels=528,
24
+ dim_mults=[1, 1, 1],
25
+ cat_embedding_dim=16,
26
+ ignore_index=0,
27
+ in_channels=2048,
28
+ in_index=3,
29
+ channels=512,
30
+ dilations=(1, 12, 24, 36),
31
+ c1_in_channels=256,
32
+ c1_channels=48,
33
+ dropout_ratio=0.1,
34
+ num_classes=20,
35
+ norm_cfg=dict(type='SyncBN', requires_grad=True),
36
+ align_corners=False,
37
+ loss_decode=dict(
38
+ type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
39
+ auxiliary_head=None,
40
+ train_cfg=dict(),
41
+ test_cfg=dict(mode='whole'),
42
+ freeze_parameters=['backbone', 'decode_head'])
43
+ dataset_type = 'Cityscapes20Dataset'
44
+ data_root = 'data/cityscapes/'
45
+ img_norm_cfg = dict(
46
+ mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
47
+ crop_size = (512, 1024)
48
+ train_pipeline = [
49
+ dict(type='LoadImageFromFile'),
50
+ dict(type='LoadAnnotationsCityscapes20'),
51
+ dict(type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)),
52
+ dict(type='RandomCrop', crop_size=(512, 1024), cat_max_ratio=0.75),
53
+ dict(type='RandomFlip', prob=0.5),
54
+ dict(type='PhotoMetricDistortion'),
55
+ dict(
56
+ type='Normalize',
57
+ mean=[123.675, 116.28, 103.53],
58
+ std=[58.395, 57.12, 57.375],
59
+ to_rgb=True),
60
+ dict(type='Pad', size=(512, 1024), pad_val=0, seg_pad_val=0),
61
+ dict(type='DefaultFormatBundle'),
62
+ dict(type='Collect', keys=['img', 'gt_semantic_seg'])
63
+ ]
64
+ test_pipeline = [
65
+ dict(type='LoadImageFromFile'),
66
+ dict(
67
+ type='MultiScaleFlipAug',
68
+ img_scale=(2048, 1024),
69
+ flip=False,
70
+ transforms=[
71
+ dict(type='Resize', keep_ratio=True),
72
+ dict(type='RandomFlip'),
73
+ dict(
74
+ type='Normalize',
75
+ mean=[123.675, 116.28, 103.53],
76
+ std=[58.395, 57.12, 57.375],
77
+ to_rgb=True),
78
+ dict(type='ImageToTensor', keys=['img']),
79
+ dict(type='Collect', keys=['img'])
80
+ ])
81
+ ]
82
+ data = dict(
83
+ samples_per_gpu=2,
84
+ workers_per_gpu=2,
85
+ train=dict(
86
+ type='Cityscapes20Dataset',
87
+ data_root='data/cityscapes/',
88
+ img_dir='leftImg8bit/train',
89
+ ann_dir='gtFine/train',
90
+ pipeline=[
91
+ dict(type='LoadImageFromFile'),
92
+ dict(type='LoadAnnotationsCityscapes20'),
93
+ dict(
94
+ type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)),
95
+ dict(type='RandomCrop', crop_size=(512, 1024), cat_max_ratio=0.75),
96
+ dict(type='RandomFlip', prob=0.5),
97
+ dict(type='PhotoMetricDistortion'),
98
+ dict(
99
+ type='Normalize',
100
+ mean=[123.675, 116.28, 103.53],
101
+ std=[58.395, 57.12, 57.375],
102
+ to_rgb=True),
103
+ dict(type='Pad', size=(512, 1024), pad_val=0, seg_pad_val=0),
104
+ dict(type='DefaultFormatBundle'),
105
+ dict(type='Collect', keys=['img', 'gt_semantic_seg'])
106
+ ]),
107
+ val=dict(
108
+ type='Cityscapes20Dataset',
109
+ data_root='data/cityscapes/',
110
+ img_dir='leftImg8bit/val',
111
+ ann_dir='gtFine/val',
112
+ pipeline=[
113
+ dict(type='LoadImageFromFile'),
114
+ dict(
115
+ type='MultiScaleFlipAug',
116
+ img_scale=(2048, 1024),
117
+ flip=False,
118
+ transforms=[
119
+ dict(type='Resize', keep_ratio=True),
120
+ dict(type='RandomFlip'),
121
+ dict(
122
+ type='Normalize',
123
+ mean=[123.675, 116.28, 103.53],
124
+ std=[58.395, 57.12, 57.375],
125
+ to_rgb=True),
126
+ dict(type='ImageToTensor', keys=['img']),
127
+ dict(type='Collect', keys=['img'])
128
+ ])
129
+ ]),
130
+ test=dict(
131
+ type='Cityscapes20Dataset',
132
+ data_root='data/cityscapes/',
133
+ img_dir='leftImg8bit/val',
134
+ ann_dir='gtFine/val',
135
+ pipeline=[
136
+ dict(type='LoadImageFromFile'),
137
+ dict(
138
+ type='MultiScaleFlipAug',
139
+ img_scale=(2048, 1024),
140
+ flip=False,
141
+ transforms=[
142
+ dict(type='Resize', keep_ratio=True),
143
+ dict(type='RandomFlip'),
144
+ dict(
145
+ type='Normalize',
146
+ mean=[123.675, 116.28, 103.53],
147
+ std=[58.395, 57.12, 57.375],
148
+ to_rgb=True),
149
+ dict(type='ImageToTensor', keys=['img']),
150
+ dict(type='Collect', keys=['img'])
151
+ ])
152
+ ]))
153
+ log_config = dict(
154
+ interval=50, hooks=[dict(type='TextLoggerHook', by_epoch=False)])
155
+ dist_params = dict(backend='nccl')
156
+ log_level = 'INFO'
157
+ load_from = None
158
+ resume_from = None
159
+ workflow = [('train', 1)]
160
+ cudnn_benchmark = True
161
+ optimizer = dict(
162
+ type='AdamW', lr=0.00015, betas=[0.9, 0.96], weight_decay=0.045)
163
+ optimizer_config = dict()
164
+ lr_config = dict(
165
+ policy='step',
166
+ warmup='linear',
167
+ warmup_iters=1000,
168
+ warmup_ratio=1e-06,
169
+ step=10000,
170
+ gamma=0.5,
171
+ min_lr=1e-06,
172
+ by_epoch=False)
173
+ runner = dict(type='IterBasedRunner', max_iters=80000)
174
+ checkpoint_config = dict(by_epoch=False, interval=8000, max_keep_ckpts=1)
175
+ evaluation = dict(
176
+ interval=8000, metric='mIoU', pre_eval=True, save_best='mIoU')
177
+ checkpoint = 'pretrained/deeplabv3plus_r101-d8_512x1024_80k_cityscapes_20200606_114143-068fcfe9.pth'
178
+ work_dir = './work_dirs2/deeplabv3plus_r101-d8_aspp_head_unet_fc_small_single_step_cityscapes_pretrained_freeze_embed_80k_cityscapes20'
179
+ gpu_ids = range(0, 8)
180
+ auto_resume = True
cityscapes/deeplabv3plus_r101_singlestep/iter_80000.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4943fa91e507e7d1b5e8bd0e3fbff5589b6b8def3dc7f5aa7e9082c82810aaca
3
+ size 770176920
cityscapes/deeplabv3plus_r101_singlestep/latest.pth ADDED
@@ -0,0 +1 @@
 
 
1
+ iter_80000.pth
cityscapes/deeplabv3plus_r50_multistep/20230303_205044.log ADDED
The diff for this file is too large to render. See raw diff
 
cityscapes/deeplabv3plus_r50_multistep/20230303_205044.log.json ADDED
The diff for this file is too large to render. See raw diff
 
cityscapes/deeplabv3plus_r50_multistep/best_mIoU_iter_96000.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f2f5b869e270b4b53794616b43c02c479d797d005f43af09e3b0c112e86ea77d
3
+ size 537720523
cityscapes/deeplabv3plus_r50_multistep/deeplabv3plus_r50-d8_aspp_head_unet_fc_multi_step_ade_pretrained_freeze_embed_160k_cityscapes20_finetune.py ADDED
@@ -0,0 +1,191 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ norm_cfg = dict(type='SyncBN', requires_grad=True)
2
+ model = dict(
3
+ type='EncoderDecoderDiffusion',
4
+ pretrained=
5
+ 'work_dirs2/deeplabv3plus_r50-d8_aspp_head_unet_fc_single_step_ade_pretrained_freeze_embed_80k_cityscapes20/best_mIoU_iter_72000.pth',
6
+ backbone=dict(
7
+ type='ResNetV1cCustomInitWeights',
8
+ depth=50,
9
+ num_stages=4,
10
+ out_indices=(0, 1, 2, 3),
11
+ dilations=(1, 1, 2, 4),
12
+ strides=(1, 2, 1, 1),
13
+ norm_cfg=dict(type='SyncBN', requires_grad=True),
14
+ norm_eval=False,
15
+ style='pytorch',
16
+ contract_dilation=True),
17
+ decode_head=dict(
18
+ type='DepthwiseSeparableASPPHeadUnetFCHeadMultiStep',
19
+ pretrained=
20
+ 'work_dirs2/deeplabv3plus_r50-d8_aspp_head_unet_fc_single_step_ade_pretrained_freeze_embed_80k_cityscapes20/best_mIoU_iter_72000.pth',
21
+ dim=128,
22
+ out_dim=256,
23
+ unet_channels=528,
24
+ dim_mults=[1, 1, 1],
25
+ cat_embedding_dim=16,
26
+ ignore_index=0,
27
+ diffusion_timesteps=100,
28
+ collect_timesteps=[0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 99],
29
+ in_channels=2048,
30
+ in_index=3,
31
+ channels=512,
32
+ dilations=(1, 12, 24, 36),
33
+ c1_in_channels=256,
34
+ c1_channels=48,
35
+ dropout_ratio=0.1,
36
+ num_classes=20,
37
+ norm_cfg=dict(type='SyncBN', requires_grad=True),
38
+ align_corners=False,
39
+ loss_decode=dict(
40
+ type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
41
+ auxiliary_head=None,
42
+ train_cfg=dict(),
43
+ test_cfg=dict(mode='whole'),
44
+ freeze_parameters=['backbone', 'decode_head'])
45
+ dataset_type = 'Cityscapes20Dataset'
46
+ data_root = 'data/cityscapes/'
47
+ img_norm_cfg = dict(
48
+ mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
49
+ crop_size = (512, 1024)
50
+ train_pipeline = [
51
+ dict(type='LoadImageFromFile'),
52
+ dict(type='LoadAnnotationsCityscapes20'),
53
+ dict(type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)),
54
+ dict(type='RandomCrop', crop_size=(512, 1024), cat_max_ratio=0.75),
55
+ dict(type='RandomFlip', prob=0.5),
56
+ dict(type='PhotoMetricDistortion'),
57
+ dict(
58
+ type='Normalize',
59
+ mean=[123.675, 116.28, 103.53],
60
+ std=[58.395, 57.12, 57.375],
61
+ to_rgb=True),
62
+ dict(type='Pad', size=(512, 1024), pad_val=0, seg_pad_val=0),
63
+ dict(type='DefaultFormatBundle'),
64
+ dict(type='Collect', keys=['img', 'gt_semantic_seg'])
65
+ ]
66
+ test_pipeline = [
67
+ dict(type='LoadImageFromFile'),
68
+ dict(
69
+ type='MultiScaleFlipAug',
70
+ img_scale=(2048, 1024),
71
+ flip=False,
72
+ transforms=[
73
+ dict(type='Resize', keep_ratio=True),
74
+ dict(type='RandomFlip'),
75
+ dict(
76
+ type='Normalize',
77
+ mean=[123.675, 116.28, 103.53],
78
+ std=[58.395, 57.12, 57.375],
79
+ to_rgb=True),
80
+ dict(type='ImageToTensor', keys=['img']),
81
+ dict(type='Collect', keys=['img'])
82
+ ])
83
+ ]
84
+ data = dict(
85
+ samples_per_gpu=2,
86
+ workers_per_gpu=2,
87
+ train=dict(
88
+ type='Cityscapes20Dataset',
89
+ data_root='data/cityscapes/',
90
+ img_dir='leftImg8bit/train',
91
+ ann_dir='gtFine/train',
92
+ pipeline=[
93
+ dict(type='LoadImageFromFile'),
94
+ dict(type='LoadAnnotationsCityscapes20'),
95
+ dict(
96
+ type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)),
97
+ dict(type='RandomCrop', crop_size=(512, 1024), cat_max_ratio=0.75),
98
+ dict(type='RandomFlip', prob=0.5),
99
+ dict(type='PhotoMetricDistortion'),
100
+ dict(
101
+ type='Normalize',
102
+ mean=[123.675, 116.28, 103.53],
103
+ std=[58.395, 57.12, 57.375],
104
+ to_rgb=True),
105
+ dict(type='Pad', size=(512, 1024), pad_val=0, seg_pad_val=0),
106
+ dict(type='DefaultFormatBundle'),
107
+ dict(type='Collect', keys=['img', 'gt_semantic_seg'])
108
+ ]),
109
+ val=dict(
110
+ type='Cityscapes20Dataset',
111
+ data_root='data/cityscapes/',
112
+ img_dir='leftImg8bit/val',
113
+ ann_dir='gtFine/val',
114
+ pipeline=[
115
+ dict(type='LoadImageFromFile'),
116
+ dict(
117
+ type='MultiScaleFlipAug',
118
+ img_scale=(2048, 1024),
119
+ flip=False,
120
+ transforms=[
121
+ dict(type='Resize', keep_ratio=True),
122
+ dict(type='RandomFlip'),
123
+ dict(
124
+ type='Normalize',
125
+ mean=[123.675, 116.28, 103.53],
126
+ std=[58.395, 57.12, 57.375],
127
+ to_rgb=True),
128
+ dict(type='ImageToTensor', keys=['img']),
129
+ dict(type='Collect', keys=['img'])
130
+ ])
131
+ ]),
132
+ test=dict(
133
+ type='Cityscapes20Dataset',
134
+ data_root='data/cityscapes/',
135
+ img_dir='leftImg8bit/val',
136
+ ann_dir='gtFine/val',
137
+ pipeline=[
138
+ dict(type='LoadImageFromFile'),
139
+ dict(
140
+ type='MultiScaleFlipAug',
141
+ img_scale=(2048, 1024),
142
+ flip=False,
143
+ transforms=[
144
+ dict(type='Resize', keep_ratio=True),
145
+ dict(type='RandomFlip'),
146
+ dict(
147
+ type='Normalize',
148
+ mean=[123.675, 116.28, 103.53],
149
+ std=[58.395, 57.12, 57.375],
150
+ to_rgb=True),
151
+ dict(type='ImageToTensor', keys=['img']),
152
+ dict(type='Collect', keys=['img'])
153
+ ])
154
+ ]))
155
+ log_config = dict(
156
+ interval=50, hooks=[dict(type='TextLoggerHook', by_epoch=False)])
157
+ dist_params = dict(backend='nccl')
158
+ log_level = 'INFO'
159
+ load_from = None
160
+ resume_from = None
161
+ workflow = [('train', 1)]
162
+ cudnn_benchmark = True
163
+ optimizer = dict(
164
+ type='AdamW', lr=0.00015, betas=[0.9, 0.96], weight_decay=0.045)
165
+ optimizer_config = dict()
166
+ lr_config = dict(
167
+ policy='step',
168
+ warmup='linear',
169
+ warmup_iters=1000,
170
+ warmup_ratio=1e-06,
171
+ step=20000,
172
+ gamma=0.5,
173
+ min_lr=1e-06,
174
+ by_epoch=False)
175
+ runner = dict(type='IterBasedRunner', max_iters=160000)
176
+ checkpoint_config = dict(by_epoch=False, interval=16000, max_keep_ckpts=1)
177
+ evaluation = dict(
178
+ interval=16000, metric='mIoU', pre_eval=True, save_best='mIoU')
179
+ checkpoint = 'work_dirs2/deeplabv3plus_r50-d8_aspp_head_unet_fc_single_step_ade_pretrained_freeze_embed_80k_cityscapes20/best_mIoU_iter_72000.pth'
180
+ custom_hooks = [
181
+ dict(
182
+ type='ConstantMomentumEMAHook',
183
+ momentum=0.01,
184
+ interval=25,
185
+ eval_interval=16000,
186
+ auto_resume=True,
187
+ priority=49)
188
+ ]
189
+ work_dir = './work_dirs2/deeplabv3plus_r50-d8_aspp_head_unet_fc_multi_step_ade_pretrained_freeze_embed_160k_cityscapes20_finetune'
190
+ gpu_ids = range(0, 8)
191
+ auto_resume = True
cityscapes/deeplabv3plus_r50_multistep/iter_160000.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b2068deabba26a54a4425374b2188984ce6ac4c2149ebae9281ab1537d5581c5
3
+ size 537720523
cityscapes/deeplabv3plus_r50_multistep/latest.pth ADDED
@@ -0,0 +1 @@
 
 
1
+ iter_160000.pth
cityscapes/deeplabv3plus_r50_singlestep/20230303_152127.log ADDED
The diff for this file is too large to render. See raw diff
 
cityscapes/deeplabv3plus_r50_singlestep/20230303_152127.log.json ADDED
The diff for this file is too large to render. See raw diff
 
cityscapes/deeplabv3plus_r50_singlestep/best_mIoU_iter_72000.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:656a1fbb967cba4bc26173252f63e3742f066481dc3a5c40cf23a4f5710b03d0
3
+ size 320658122
cityscapes/deeplabv3plus_r50_singlestep/deeplabv3plus_r50-d8_aspp_head_unet_fc_single_step_ade_pretrained_freeze_embed_80k_cityscapes20.py ADDED
@@ -0,0 +1,180 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ norm_cfg = dict(type='SyncBN', requires_grad=True)
2
+ model = dict(
3
+ type='EncoderDecoderFreeze',
4
+ pretrained=
5
+ 'pretrained/deeplabv3plus_r50-d8_512x1024_80k_cityscapes_20200606_114049-f9fb496d.pth',
6
+ backbone=dict(
7
+ type='ResNetV1cCustomInitWeights',
8
+ depth=50,
9
+ num_stages=4,
10
+ out_indices=(0, 1, 2, 3),
11
+ dilations=(1, 1, 2, 4),
12
+ strides=(1, 2, 1, 1),
13
+ norm_cfg=dict(type='SyncBN', requires_grad=True),
14
+ norm_eval=False,
15
+ style='pytorch',
16
+ contract_dilation=True),
17
+ decode_head=dict(
18
+ type='DepthwiseSeparableASPPHeadUnetFCHeadSingleStep',
19
+ pretrained=
20
+ 'pretrained/deeplabv3plus_r50-d8_512x1024_80k_cityscapes_20200606_114049-f9fb496d.pth',
21
+ dim=128,
22
+ out_dim=256,
23
+ unet_channels=528,
24
+ dim_mults=[1, 1, 1],
25
+ cat_embedding_dim=16,
26
+ ignore_index=0,
27
+ in_channels=2048,
28
+ in_index=3,
29
+ channels=512,
30
+ dilations=(1, 12, 24, 36),
31
+ c1_in_channels=256,
32
+ c1_channels=48,
33
+ dropout_ratio=0.1,
34
+ num_classes=20,
35
+ norm_cfg=dict(type='SyncBN', requires_grad=True),
36
+ align_corners=False,
37
+ loss_decode=dict(
38
+ type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
39
+ auxiliary_head=None,
40
+ train_cfg=dict(),
41
+ test_cfg=dict(mode='whole'),
42
+ freeze_parameters=['backbone', 'decode_head'])
43
+ dataset_type = 'Cityscapes20Dataset'
44
+ data_root = 'data/cityscapes/'
45
+ img_norm_cfg = dict(
46
+ mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
47
+ crop_size = (512, 1024)
48
+ train_pipeline = [
49
+ dict(type='LoadImageFromFile'),
50
+ dict(type='LoadAnnotationsCityscapes20'),
51
+ dict(type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)),
52
+ dict(type='RandomCrop', crop_size=(512, 1024), cat_max_ratio=0.75),
53
+ dict(type='RandomFlip', prob=0.5),
54
+ dict(type='PhotoMetricDistortion'),
55
+ dict(
56
+ type='Normalize',
57
+ mean=[123.675, 116.28, 103.53],
58
+ std=[58.395, 57.12, 57.375],
59
+ to_rgb=True),
60
+ dict(type='Pad', size=(512, 1024), pad_val=0, seg_pad_val=0),
61
+ dict(type='DefaultFormatBundle'),
62
+ dict(type='Collect', keys=['img', 'gt_semantic_seg'])
63
+ ]
64
+ test_pipeline = [
65
+ dict(type='LoadImageFromFile'),
66
+ dict(
67
+ type='MultiScaleFlipAug',
68
+ img_scale=(2048, 1024),
69
+ flip=False,
70
+ transforms=[
71
+ dict(type='Resize', keep_ratio=True),
72
+ dict(type='RandomFlip'),
73
+ dict(
74
+ type='Normalize',
75
+ mean=[123.675, 116.28, 103.53],
76
+ std=[58.395, 57.12, 57.375],
77
+ to_rgb=True),
78
+ dict(type='ImageToTensor', keys=['img']),
79
+ dict(type='Collect', keys=['img'])
80
+ ])
81
+ ]
82
+ data = dict(
83
+ samples_per_gpu=2,
84
+ workers_per_gpu=2,
85
+ train=dict(
86
+ type='Cityscapes20Dataset',
87
+ data_root='data/cityscapes/',
88
+ img_dir='leftImg8bit/train',
89
+ ann_dir='gtFine/train',
90
+ pipeline=[
91
+ dict(type='LoadImageFromFile'),
92
+ dict(type='LoadAnnotationsCityscapes20'),
93
+ dict(
94
+ type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)),
95
+ dict(type='RandomCrop', crop_size=(512, 1024), cat_max_ratio=0.75),
96
+ dict(type='RandomFlip', prob=0.5),
97
+ dict(type='PhotoMetricDistortion'),
98
+ dict(
99
+ type='Normalize',
100
+ mean=[123.675, 116.28, 103.53],
101
+ std=[58.395, 57.12, 57.375],
102
+ to_rgb=True),
103
+ dict(type='Pad', size=(512, 1024), pad_val=0, seg_pad_val=0),
104
+ dict(type='DefaultFormatBundle'),
105
+ dict(type='Collect', keys=['img', 'gt_semantic_seg'])
106
+ ]),
107
+ val=dict(
108
+ type='Cityscapes20Dataset',
109
+ data_root='data/cityscapes/',
110
+ img_dir='leftImg8bit/val',
111
+ ann_dir='gtFine/val',
112
+ pipeline=[
113
+ dict(type='LoadImageFromFile'),
114
+ dict(
115
+ type='MultiScaleFlipAug',
116
+ img_scale=(2048, 1024),
117
+ flip=False,
118
+ transforms=[
119
+ dict(type='Resize', keep_ratio=True),
120
+ dict(type='RandomFlip'),
121
+ dict(
122
+ type='Normalize',
123
+ mean=[123.675, 116.28, 103.53],
124
+ std=[58.395, 57.12, 57.375],
125
+ to_rgb=True),
126
+ dict(type='ImageToTensor', keys=['img']),
127
+ dict(type='Collect', keys=['img'])
128
+ ])
129
+ ]),
130
+ test=dict(
131
+ type='Cityscapes20Dataset',
132
+ data_root='data/cityscapes/',
133
+ img_dir='leftImg8bit/val',
134
+ ann_dir='gtFine/val',
135
+ pipeline=[
136
+ dict(type='LoadImageFromFile'),
137
+ dict(
138
+ type='MultiScaleFlipAug',
139
+ img_scale=(2048, 1024),
140
+ flip=False,
141
+ transforms=[
142
+ dict(type='Resize', keep_ratio=True),
143
+ dict(type='RandomFlip'),
144
+ dict(
145
+ type='Normalize',
146
+ mean=[123.675, 116.28, 103.53],
147
+ std=[58.395, 57.12, 57.375],
148
+ to_rgb=True),
149
+ dict(type='ImageToTensor', keys=['img']),
150
+ dict(type='Collect', keys=['img'])
151
+ ])
152
+ ]))
153
+ log_config = dict(
154
+ interval=50, hooks=[dict(type='TextLoggerHook', by_epoch=False)])
155
+ dist_params = dict(backend='nccl')
156
+ log_level = 'INFO'
157
+ load_from = None
158
+ resume_from = None
159
+ workflow = [('train', 1)]
160
+ cudnn_benchmark = True
161
+ optimizer = dict(
162
+ type='AdamW', lr=0.00015, betas=[0.9, 0.96], weight_decay=0.045)
163
+ optimizer_config = dict()
164
+ lr_config = dict(
165
+ policy='step',
166
+ warmup='linear',
167
+ warmup_iters=1000,
168
+ warmup_ratio=1e-06,
169
+ step=10000,
170
+ gamma=0.5,
171
+ min_lr=1e-06,
172
+ by_epoch=False)
173
+ runner = dict(type='IterBasedRunner', max_iters=80000)
174
+ checkpoint_config = dict(by_epoch=False, interval=8000, max_keep_ckpts=1)
175
+ evaluation = dict(
176
+ interval=8000, metric='mIoU', pre_eval=True, save_best='mIoU')
177
+ checkpoint = 'pretrained/deeplabv3plus_r50-d8_512x1024_80k_cityscapes_20200606_114049-f9fb496d.pth'
178
+ work_dir = './work_dirs2/deeplabv3plus_r50-d8_aspp_head_unet_fc_single_step_ade_pretrained_freeze_embed_80k_cityscapes20'
179
+ gpu_ids = range(0, 8)
180
+ auto_resume = True
cityscapes/deeplabv3plus_r50_singlestep/iter_80000.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8e66df1694c48dc327a6e88e46e35b8a71fc3a704f3ed6bd505732c67a32993f
3
+ size 320658122
cityscapes/deeplabv3plus_r50_singlestep/latest.pth ADDED
@@ -0,0 +1 @@
 
 
1
+ iter_80000.pth
cityscapes/segformer_b0_multistep/best_mIoU_iter_144000.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:81934dd4754a76e7987d1a88515322bfe16f01050a943e2fc4d28565dedf32af
3
+ size 211300107
cityscapes/segformer_b0_multistep/segformer_mit_b0_segformer_head_unet_fc_single_step_ade_pretrained_freeze_embed_80k_cityscapes20_finetune_cfg.py ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ _base_ = [
2
+ '../_base_/models/segformer_mit-b2_segformer_head_unet_fc.py',
3
+ '../_base_/datasets/cityscapes20_1024x1024.py',
4
+ '../_base_/default_runtime.py',
5
+ '../_base_/schedules/schedule_160k.py'
6
+
7
+ ]
8
+ norm_cfg = dict(type='SyncBN', requires_grad=True)
9
+ checkpoint = 'work_dirs/segformer_mit_b0_segformer_head_unet_fc_single_step_ade_pretrained_freeze_embed_80k_cityscapes20/best_mIoU_iter_48000.pth'
10
+ # model settings
11
+ model = dict(
12
+ type='EncoderDecoderDiffusion',
13
+ freeze_parameters=['backbone', 'decode_head'],
14
+ pretrained=checkpoint,
15
+ backbone=dict(
16
+ type='MixVisionTransformerCustomInitWeights',
17
+ embed_dims=32,
18
+ num_layers=[2, 2, 2, 2],
19
+ num_heads=[1, 2, 5, 8],
20
+ ),
21
+ decode_head=dict(
22
+ _delete_=True,
23
+ type='SegformerHeadUnetFCHeadMultiStep',
24
+ # unet params
25
+ pretrained=checkpoint,
26
+ dim=128,
27
+ out_dim=256,
28
+ unet_channels=272,
29
+ dim_mults=[1,1,1],
30
+ cat_embedding_dim=16,
31
+ diffusion_timesteps=20,
32
+ # collect_timesteps=[19,18,17,16,15,10,5,0],
33
+ collect_timesteps=[i for i in range(20)],
34
+ guidance_scale=1,
35
+ # decode head params
36
+ in_channels=[32, 64, 160, 256],
37
+ in_index=[0, 1, 2, 3],
38
+ channels=256,
39
+ dropout_ratio=0.1,
40
+ num_classes=20,
41
+ norm_cfg=norm_cfg,
42
+ align_corners=False,
43
+ ignore_index=0, # ignore background
44
+ loss_decode=dict(
45
+ type='CrossEntropyLoss',
46
+ use_sigmoid=False,
47
+ loss_weight=1.0)
48
+ )
49
+ )
50
+
51
+ optimizer = dict(_delete_=True, type='AdamW', lr=1.5e-4, betas=[0.9, 0.96], weight_decay=0.045)
52
+ lr_config = dict(_delete_=True, policy='step',
53
+ warmup='linear',
54
+ warmup_iters=1000,
55
+ warmup_ratio=1e-6,
56
+ step=20000, gamma=0.5, min_lr=1.0e-6, by_epoch=False)
57
+
58
+
59
+ custom_hooks = [dict(
60
+ type='ConstantMomentumEMAHook',
61
+ momentum=0.01,
62
+ interval=25,
63
+ eval_interval=16000,
64
+ auto_resume=True,
65
+ priority=49)
66
+ ]
67
+
68
+ # evaluation = dict(interval=100, metric='mIoU', pre_eval=True, save_best='mIoU')
cityscapes/segformer_b2_multistep/20230302_232152.log ADDED
The diff for this file is too large to render. See raw diff
 
cityscapes/segformer_b2_multistep/20230302_232152.log.json ADDED
The diff for this file is too large to render. See raw diff
 
cityscapes/segformer_b2_multistep/best_mIoU_iter_128000.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f387874491ad3136831359eb3b45cefc276f16d545a536b69f66a06a41e339ec
3
+ size 851427951
cityscapes/segformer_b2_multistep/iter_160000.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d592e34c78163224116bc7af5bd7f1bdcb67b072e6b9e88655d0ecf97768107d
3
+ size 851427951
cityscapes/segformer_b2_multistep/latest.pth ADDED
@@ -0,0 +1 @@
 
 
1
+ iter_160000.pth
cityscapes/segformer_b2_multistep/segformer_mit_b2_segformer_head_unet_fc_small_multi_step_ade_pretrained_freeze_embed_160k_cityscapes20_finetune_ema.py ADDED
@@ -0,0 +1,195 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ norm_cfg = dict(type='SyncBN', requires_grad=True)
2
+ checkpoint = 'work_dirs/segformer_mit_b2_segformer_head_unet_fc_small_single_step_ade_pretrained_freeze_embed_80k_cityscapes20/best_mIoU_iter_64000.pth'
3
+ model = dict(
4
+ type='EncoderDecoderDiffusion',
5
+ freeze_parameters=['backbone', 'decode_head'],
6
+ pretrained=
7
+ 'work_dirs/segformer_mit_b2_segformer_head_unet_fc_small_single_step_ade_pretrained_freeze_embed_80k_cityscapes20/best_mIoU_iter_64000.pth',
8
+ backbone=dict(
9
+ type='MixVisionTransformerCustomInitWeights',
10
+ in_channels=3,
11
+ embed_dims=64,
12
+ num_stages=4,
13
+ num_layers=[3, 4, 6, 3],
14
+ num_heads=[1, 2, 5, 8],
15
+ patch_sizes=[7, 3, 3, 3],
16
+ sr_ratios=[8, 4, 2, 1],
17
+ out_indices=(0, 1, 2, 3),
18
+ mlp_ratio=4,
19
+ qkv_bias=True,
20
+ drop_rate=0.0,
21
+ attn_drop_rate=0.0,
22
+ drop_path_rate=0.1),
23
+ decode_head=dict(
24
+ type='SegformerHeadUnetFCHeadMultiStep',
25
+ pretrained=
26
+ 'work_dirs/segformer_mit_b2_segformer_head_unet_fc_small_single_step_ade_pretrained_freeze_embed_80k_cityscapes20/best_mIoU_iter_64000.pth',
27
+ dim=256,
28
+ out_dim=256,
29
+ unet_channels=272,
30
+ dim_mults=[1, 1, 1],
31
+ cat_embedding_dim=16,
32
+ diffusion_timesteps=20,
33
+ collect_timesteps=[
34
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
35
+ 19
36
+ ],
37
+ in_channels=[64, 128, 320, 512],
38
+ in_index=[0, 1, 2, 3],
39
+ channels=256,
40
+ dropout_ratio=0.1,
41
+ num_classes=20,
42
+ norm_cfg=dict(type='SyncBN', requires_grad=True),
43
+ align_corners=False,
44
+ ignore_index=0,
45
+ loss_decode=dict(
46
+ type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
47
+ train_cfg=dict(),
48
+ test_cfg=dict(mode='whole'))
49
+ dataset_type = 'Cityscapes20Dataset'
50
+ data_root = 'data/cityscapes/'
51
+ img_norm_cfg = dict(
52
+ mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
53
+ crop_size = (1024, 1024)
54
+ train_pipeline = [
55
+ dict(type='LoadImageFromFile'),
56
+ dict(type='LoadAnnotationsCityscapes20'),
57
+ dict(type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)),
58
+ dict(type='RandomCrop', crop_size=(1024, 1024), cat_max_ratio=0.75),
59
+ dict(type='RandomFlip', prob=0.5),
60
+ dict(type='PhotoMetricDistortion'),
61
+ dict(
62
+ type='Normalize',
63
+ mean=[123.675, 116.28, 103.53],
64
+ std=[58.395, 57.12, 57.375],
65
+ to_rgb=True),
66
+ dict(type='Pad', size=(1024, 1024), pad_val=0, seg_pad_val=0),
67
+ dict(type='DefaultFormatBundle'),
68
+ dict(type='Collect', keys=['img', 'gt_semantic_seg'])
69
+ ]
70
+ test_pipeline = [
71
+ dict(type='LoadImageFromFile'),
72
+ dict(
73
+ type='MultiScaleFlipAug',
74
+ img_scale=(2048, 1024),
75
+ flip=False,
76
+ transforms=[
77
+ dict(type='Resize', keep_ratio=True),
78
+ dict(type='RandomFlip'),
79
+ dict(
80
+ type='Normalize',
81
+ mean=[123.675, 116.28, 103.53],
82
+ std=[58.395, 57.12, 57.375],
83
+ to_rgb=True),
84
+ dict(type='ImageToTensor', keys=['img']),
85
+ dict(type='Collect', keys=['img'])
86
+ ])
87
+ ]
88
+ data = dict(
89
+ samples_per_gpu=2,
90
+ workers_per_gpu=2,
91
+ train=dict(
92
+ type='Cityscapes20Dataset',
93
+ data_root='data/cityscapes/',
94
+ img_dir='leftImg8bit/train',
95
+ ann_dir='gtFine/train',
96
+ pipeline=[
97
+ dict(type='LoadImageFromFile'),
98
+ dict(type='LoadAnnotationsCityscapes20'),
99
+ dict(
100
+ type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)),
101
+ dict(
102
+ type='RandomCrop', crop_size=(1024, 1024), cat_max_ratio=0.75),
103
+ dict(type='RandomFlip', prob=0.5),
104
+ dict(type='PhotoMetricDistortion'),
105
+ dict(
106
+ type='Normalize',
107
+ mean=[123.675, 116.28, 103.53],
108
+ std=[58.395, 57.12, 57.375],
109
+ to_rgb=True),
110
+ dict(type='Pad', size=(1024, 1024), pad_val=0, seg_pad_val=0),
111
+ dict(type='DefaultFormatBundle'),
112
+ dict(type='Collect', keys=['img', 'gt_semantic_seg'])
113
+ ]),
114
+ val=dict(
115
+ type='Cityscapes20Dataset',
116
+ data_root='data/cityscapes/',
117
+ img_dir='leftImg8bit/val',
118
+ ann_dir='gtFine/val',
119
+ pipeline=[
120
+ dict(type='LoadImageFromFile'),
121
+ dict(
122
+ type='MultiScaleFlipAug',
123
+ img_scale=(2048, 1024),
124
+ flip=False,
125
+ transforms=[
126
+ dict(type='Resize', keep_ratio=True),
127
+ dict(type='RandomFlip'),
128
+ dict(
129
+ type='Normalize',
130
+ mean=[123.675, 116.28, 103.53],
131
+ std=[58.395, 57.12, 57.375],
132
+ to_rgb=True),
133
+ dict(type='ImageToTensor', keys=['img']),
134
+ dict(type='Collect', keys=['img'])
135
+ ])
136
+ ]),
137
+ test=dict(
138
+ type='Cityscapes20Dataset',
139
+ data_root='data/cityscapes/',
140
+ img_dir='leftImg8bit/val',
141
+ ann_dir='gtFine/val',
142
+ pipeline=[
143
+ dict(type='LoadImageFromFile'),
144
+ dict(
145
+ type='MultiScaleFlipAug',
146
+ img_scale=(2048, 1024),
147
+ flip=False,
148
+ transforms=[
149
+ dict(type='Resize', keep_ratio=True),
150
+ dict(type='RandomFlip'),
151
+ dict(
152
+ type='Normalize',
153
+ mean=[123.675, 116.28, 103.53],
154
+ std=[58.395, 57.12, 57.375],
155
+ to_rgb=True),
156
+ dict(type='ImageToTensor', keys=['img']),
157
+ dict(type='Collect', keys=['img'])
158
+ ])
159
+ ]))
160
+ log_config = dict(
161
+ interval=50, hooks=[dict(type='TextLoggerHook', by_epoch=False)])
162
+ dist_params = dict(backend='nccl')
163
+ log_level = 'INFO'
164
+ load_from = None
165
+ resume_from = None
166
+ workflow = [('train', 1)]
167
+ cudnn_benchmark = True
168
+ optimizer = dict(
169
+ type='AdamW', lr=0.00015, betas=[0.9, 0.96], weight_decay=0.045)
170
+ optimizer_config = dict()
171
+ lr_config = dict(
172
+ policy='step',
173
+ warmup='linear',
174
+ warmup_iters=1000,
175
+ warmup_ratio=1e-06,
176
+ step=20000,
177
+ gamma=0.5,
178
+ min_lr=1e-06,
179
+ by_epoch=False)
180
+ runner = dict(type='IterBasedRunner', max_iters=160000)
181
+ checkpoint_config = dict(by_epoch=False, interval=16000, max_keep_ckpts=1)
182
+ evaluation = dict(
183
+ interval=16000, metric='mIoU', pre_eval=True, save_best='mIoU')
184
+ custom_hooks = [
185
+ dict(
186
+ type='ConstantMomentumEMAHook',
187
+ momentum=0.01,
188
+ interval=25,
189
+ eval_interval=16000,
190
+ auto_resume=True,
191
+ priority=49)
192
+ ]
193
+ work_dir = './work_dirs/segformer_mit_b2_segformer_head_unet_fc_small_multi_step_ade_pretrained_freeze_embed_160k_cityscapes20_finetune_ema'
194
+ gpu_ids = range(0, 8)
195
+ auto_resume = True