limingcv commited on
Commit
e3962e4
1 Parent(s): 41f0deb

Upload with huggingface_hub

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +2 -0
  2. finetune/.DS_Store +0 -0
  3. finetune/finetune_cbnetv2_swin-L_bs128_1x_coco_lr8e-4_wd2.5e-3/20230131_101829.log +0 -0
  4. finetune/finetune_cbnetv2_swin-L_bs128_1x_coco_lr8e-4_wd2.5e-3/20230131_101829.log.json +0 -0
  5. finetune/finetune_cbnetv2_swin-L_bs128_1x_coco_lr8e-4_wd2.5e-3/best_bbox_mAP_epoch_12.pth +3 -0
  6. finetune/finetune_cbnetv2_swin-L_bs128_1x_coco_lr8e-4_wd2.5e-3/cbnetv2_swin-L_8x2_1x_coco.py +422 -0
  7. finetune/finetune_detr_100e_coco_selfsup-clusters-as-classes_add-contrastive-temp0.5-weight1.0_lr-mul-0.1/20221110_102520.log +0 -0
  8. finetune/finetune_detr_100e_coco_selfsup-clusters-as-classes_add-contrastive-temp0.5-weight1.0_lr-mul-0.1/20221110_102520.log.json +0 -0
  9. finetune/finetune_detr_100e_coco_selfsup-clusters-as-classes_add-contrastive-temp0.5-weight1.0_lr-mul-0.1/best_bbox_mAP_epoch_100.pth +3 -0
  10. finetune/finetune_detr_100e_coco_selfsup-clusters-as-classes_add-contrastive-temp0.5-weight1.0_lr-mul-0.1/detr_r50_8x2_100e_coco.py +280 -0
  11. finetune/finetune_detr_100e_voc0712/20221109_194054.log +0 -0
  12. finetune/finetune_detr_100e_voc0712/20221109_194054.log.json +0 -0
  13. finetune/finetune_detr_100e_voc0712/best_mAP_epoch_93.pth +3 -0
  14. finetune/finetune_detr_100e_voc0712/detr_mstrain_100e_voc0712.py +218 -0
  15. finetune/finetune_detr_150e_coco_lr-mult-0.1_selfsup-clusters-as-classes_add-contrastive-temp0.5-weight1.0/20221105_015109.log +3 -0
  16. finetune/finetune_detr_150e_coco_lr-mult-0.1_selfsup-clusters-as-classes_add-contrastive-temp0.5-weight1.0/20221105_015109.log.json +3 -0
  17. finetune/finetune_detr_150e_coco_lr-mult-0.1_selfsup-clusters-as-classes_add-contrastive-temp0.5-weight1.0/best_bbox_mAP_epoch_139.pth +3 -0
  18. finetune/finetune_detr_150e_coco_lr-mult-0.1_selfsup-clusters-as-classes_add-contrastive-temp0.5-weight1.0/detr_r50_8x2_150e_coco.py +280 -0
  19. finetune/finetune_detr_150e_coco_selfsup-clusters-as-classes_add-contrastive-temp0.5-weight1.0_swav_lr-mul-0.4/20221107_215710.log +0 -0
  20. finetune/finetune_detr_150e_coco_selfsup-clusters-as-classes_add-contrastive-temp0.5-weight1.0_swav_lr-mul-0.4/20221107_215710.log.json +0 -0
  21. finetune/finetune_detr_150e_coco_selfsup-clusters-as-classes_add-contrastive-temp0.5-weight1.0_swav_lr-mul-0.4/best_bbox_mAP_epoch_143.pth +3 -0
  22. finetune/finetune_detr_150e_coco_selfsup-clusters-as-classes_add-contrastive-temp0.5-weight1.0_swav_lr-mul-0.4/detr_r50_8x2_150e_coco.py +280 -0
  23. finetune/finetune_detr_50e_coco_lr-mult-0.1_selfsup-clusters-as-classes_add-contrastive-temp0.5-weight1.0/20221104_003558.log +0 -0
  24. finetune/finetune_detr_50e_coco_lr-mult-0.1_selfsup-clusters-as-classes_add-contrastive-temp0.5-weight1.0/20221104_003558.log.json +0 -0
  25. finetune/finetune_detr_50e_coco_lr-mult-0.1_selfsup-clusters-as-classes_add-contrastive-temp0.5-weight1.0/best_bbox_mAP_epoch_50.pth +3 -0
  26. finetune/finetune_detr_50e_coco_lr-mult-0.1_selfsup-clusters-as-classes_add-contrastive-temp0.5-weight1.0/detr_r50_8x2_50e_coco.py +281 -0
  27. finetune/finetune_faster-rcnn_12k_coco/20221022_163550.log +0 -0
  28. finetune/finetune_faster-rcnn_12k_coco/20221022_163550.log.json +242 -0
  29. finetune/finetune_faster-rcnn_12k_coco/best_bbox_mAP_iter_12000.pth +3 -0
  30. finetune/finetune_faster-rcnn_12k_coco/faster_rcnn_fpn_12k_semi-coco.py +248 -0
  31. finetune/finetune_faster-rcnn_1x_coco_lr3e-2_wd5e-5/20221024_212434.log +0 -0
  32. finetune/finetune_faster-rcnn_1x_coco_lr3e-2_wd5e-5/20221024_212434.log.json +0 -0
  33. finetune/finetune_faster-rcnn_1x_coco_lr3e-2_wd5e-5/best_bbox_mAP_epoch_12.pth +3 -0
  34. finetune/finetune_faster-rcnn_1x_coco_lr3e-2_wd5e-5/faster_rcnn_r50_fpn_1x_coco.py +249 -0
  35. finetune/finetune_fcos_12k_voc0712_lr1.5e-2_wd5e-5/20221003_230350.log +1257 -0
  36. finetune/finetune_fcos_12k_voc0712_lr1.5e-2_wd5e-5/20221003_230350.log.json +242 -0
  37. finetune/finetune_fcos_12k_voc0712_lr1.5e-2_wd5e-5/best_mAP_iter_12000.pth +3 -0
  38. finetune/finetune_fcos_12k_voc0712_lr1.5e-2_wd5e-5/fcos_mstrain_12k_voc0712.py +212 -0
  39. finetune/finetune_fcos_1x_coco_lr1.5e-2_wd5e.5/20221104_113744.log +0 -0
  40. finetune/finetune_fcos_1x_coco_lr1.5e-2_wd5e.5/20221104_113744.log.json +0 -0
  41. finetune/finetune_fcos_1x_coco_lr1.5e-2_wd5e.5/best_bbox_mAP_epoch_12.pth +3 -0
  42. finetune/finetune_fcos_1x_coco_lr1.5e-2_wd5e.5/fcos_r50_fpn_1x_coco.py +197 -0
  43. finetune/finetune_mask-rcnn_12k_voc0712_lr3e-2_wd5e-5/20221003_234250.log +0 -0
  44. finetune/finetune_mask-rcnn_12k_voc0712_lr3e-2_wd5e-5/20221003_234250.log.json +242 -0
  45. finetune/finetune_mask-rcnn_12k_voc0712_lr3e-2_wd5e-5/best_mAP_iter_12000.pth +3 -0
  46. finetune/finetune_mask-rcnn_12k_voc0712_lr3e-2_wd5e-5/mask_rcnn_mstrain_12k_voc0712.py +262 -0
  47. finetune/finetune_mask-rcnn_1x_coco_lr3e-2_wd5e-5/20220929_104229.log +0 -0
  48. finetune/finetune_mask-rcnn_1x_coco_lr3e-2_wd5e-5/20220929_104229.log.json +0 -0
  49. finetune/finetune_mask-rcnn_1x_coco_lr3e-2_wd5e-5/best_bbox_mAP_epoch_12.pth +3 -0
  50. finetune/finetune_mask-rcnn_1x_coco_lr3e-2_wd5e-5/mask_rcnn_r50_fpn_1x_coco.py +259 -0
.gitattributes CHANGED
@@ -33,3 +33,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ finetune/finetune_detr_150e_coco_lr-mult-0.1_selfsup-clusters-as-classes_add-contrastive-temp0.5-weight1.0/20221105_015109.log filter=lfs diff=lfs merge=lfs -text
37
+ finetune/finetune_detr_150e_coco_lr-mult-0.1_selfsup-clusters-as-classes_add-contrastive-temp0.5-weight1.0/20221105_015109.log.json filter=lfs diff=lfs merge=lfs -text
finetune/.DS_Store ADDED
Binary file (10.2 kB). View file
 
finetune/finetune_cbnetv2_swin-L_bs128_1x_coco_lr8e-4_wd2.5e-3/20230131_101829.log ADDED
The diff for this file is too large to render. See raw diff
 
finetune/finetune_cbnetv2_swin-L_bs128_1x_coco_lr8e-4_wd2.5e-3/20230131_101829.log.json ADDED
The diff for this file is too large to render. See raw diff
 
finetune/finetune_cbnetv2_swin-L_bs128_1x_coco_lr8e-4_wd2.5e-3/best_bbox_mAP_epoch_12.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a954f5e1532994ca286c85639742adb4701af91279315ebf9f0ea4c279665d40
3
+ size 5404262999
finetune/finetune_cbnetv2_swin-L_bs128_1x_coco_lr8e-4_wd2.5e-3/cbnetv2_swin-L_8x2_1x_coco.py ADDED
@@ -0,0 +1,422 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model = dict(
2
+ type='CBNetHybridTaskCascade',
3
+ pretrained=
4
+ 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window7_224_22k.pth',
5
+ backbone=dict(
6
+ type='CBSwinTransformer',
7
+ embed_dim=192,
8
+ depths=[2, 2, 18, 2],
9
+ num_heads=[6, 12, 24, 48],
10
+ window_size=7,
11
+ mlp_ratio=4.0,
12
+ qkv_bias=True,
13
+ qk_scale=None,
14
+ drop_rate=0.0,
15
+ attn_drop_rate=0.0,
16
+ drop_path_rate=0.2,
17
+ ape=False,
18
+ patch_norm=True,
19
+ out_indices=(0, 1, 2, 3),
20
+ use_checkpoint=False),
21
+ neck=dict(
22
+ type='CBFPN',
23
+ in_channels=[192, 384, 768, 1536],
24
+ out_channels=256,
25
+ num_outs=5),
26
+ rpn_head=dict(
27
+ type='RPNHead',
28
+ in_channels=256,
29
+ feat_channels=256,
30
+ anchor_generator=dict(
31
+ type='AnchorGenerator',
32
+ scales=[8],
33
+ ratios=[0.5, 1.0, 2.0],
34
+ strides=[4, 8, 16, 32, 64]),
35
+ bbox_coder=dict(
36
+ type='DeltaXYWHBBoxCoder',
37
+ target_means=[0.0, 0.0, 0.0, 0.0],
38
+ target_stds=[1.0, 1.0, 1.0, 1.0]),
39
+ loss_cls=dict(
40
+ type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
41
+ loss_bbox=dict(
42
+ type='SmoothL1Loss', beta=0.1111111111111111, loss_weight=1.0)),
43
+ roi_head=dict(
44
+ type='HybridTaskCascadeRoIHead',
45
+ interleaved=True,
46
+ mask_info_flow=True,
47
+ num_stages=3,
48
+ stage_loss_weights=[1, 0.5, 0.25],
49
+ bbox_roi_extractor=dict(
50
+ type='SingleRoIExtractor',
51
+ roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0),
52
+ out_channels=256,
53
+ featmap_strides=[4, 8, 16, 32]),
54
+ bbox_head=[
55
+ dict(
56
+ type='Shared4Conv1FCBBoxHead',
57
+ in_channels=256,
58
+ fc_out_channels=1024,
59
+ roi_feat_size=7,
60
+ num_classes=80,
61
+ bbox_coder=dict(
62
+ type='DeltaXYWHBBoxCoder',
63
+ target_means=[0.0, 0.0, 0.0, 0.0],
64
+ target_stds=[0.1, 0.1, 0.2, 0.2]),
65
+ reg_class_agnostic=True,
66
+ loss_cls=dict(
67
+ type='CrossEntropyLoss',
68
+ use_sigmoid=False,
69
+ loss_weight=1.0),
70
+ loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
71
+ loss_weight=1.0)),
72
+ dict(
73
+ type='Shared4Conv1FCBBoxHead',
74
+ in_channels=256,
75
+ fc_out_channels=1024,
76
+ roi_feat_size=7,
77
+ num_classes=80,
78
+ bbox_coder=dict(
79
+ type='DeltaXYWHBBoxCoder',
80
+ target_means=[0.0, 0.0, 0.0, 0.0],
81
+ target_stds=[0.05, 0.05, 0.1, 0.1]),
82
+ reg_class_agnostic=True,
83
+ loss_cls=dict(
84
+ type='CrossEntropyLoss',
85
+ use_sigmoid=False,
86
+ loss_weight=1.0),
87
+ loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
88
+ loss_weight=1.0)),
89
+ dict(
90
+ type='Shared4Conv1FCBBoxHead',
91
+ in_channels=256,
92
+ fc_out_channels=1024,
93
+ roi_feat_size=7,
94
+ num_classes=80,
95
+ bbox_coder=dict(
96
+ type='DeltaXYWHBBoxCoder',
97
+ target_means=[0.0, 0.0, 0.0, 0.0],
98
+ target_stds=[0.033, 0.033, 0.067, 0.067]),
99
+ reg_class_agnostic=True,
100
+ loss_cls=dict(
101
+ type='CrossEntropyLoss',
102
+ use_sigmoid=False,
103
+ loss_weight=1.0),
104
+ loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))
105
+ ],
106
+ mask_roi_extractor=dict(
107
+ type='SingleRoIExtractor',
108
+ roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0),
109
+ out_channels=256,
110
+ featmap_strides=[4, 8, 16, 32]),
111
+ mask_head=[
112
+ dict(
113
+ type='HTCMaskHead',
114
+ with_conv_res=False,
115
+ num_convs=4,
116
+ in_channels=256,
117
+ conv_out_channels=256,
118
+ num_classes=80,
119
+ loss_mask=dict(
120
+ type='CrossEntropyLoss', use_mask=True, loss_weight=1.0)),
121
+ dict(
122
+ type='HTCMaskHead',
123
+ num_convs=4,
124
+ in_channels=256,
125
+ conv_out_channels=256,
126
+ num_classes=80,
127
+ loss_mask=dict(
128
+ type='CrossEntropyLoss', use_mask=True, loss_weight=1.0)),
129
+ dict(
130
+ type='HTCMaskHead',
131
+ num_convs=4,
132
+ in_channels=256,
133
+ conv_out_channels=256,
134
+ num_classes=80,
135
+ loss_mask=dict(
136
+ type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))
137
+ ]),
138
+ train_cfg=dict(
139
+ rpn=dict(
140
+ assigner=dict(
141
+ type='MaxIoUAssigner',
142
+ pos_iou_thr=0.7,
143
+ neg_iou_thr=0.3,
144
+ min_pos_iou=0.3,
145
+ ignore_iof_thr=-1),
146
+ sampler=dict(
147
+ type='RandomSampler',
148
+ num=256,
149
+ pos_fraction=0.5,
150
+ neg_pos_ub=-1,
151
+ add_gt_as_proposals=False),
152
+ allowed_border=0,
153
+ pos_weight=-1,
154
+ debug=False),
155
+ rpn_proposal=dict(
156
+ nms_pre=2000,
157
+ max_per_img=2000,
158
+ nms=dict(type='nms', iou_threshold=0.7),
159
+ min_bbox_size=0),
160
+ rcnn=[
161
+ dict(
162
+ assigner=dict(
163
+ type='MaxIoUAssigner',
164
+ pos_iou_thr=0.5,
165
+ neg_iou_thr=0.5,
166
+ min_pos_iou=0.5,
167
+ ignore_iof_thr=-1),
168
+ sampler=dict(
169
+ type='RandomSampler',
170
+ num=512,
171
+ pos_fraction=0.25,
172
+ neg_pos_ub=-1,
173
+ add_gt_as_proposals=True),
174
+ mask_size=28,
175
+ pos_weight=-1,
176
+ debug=False),
177
+ dict(
178
+ assigner=dict(
179
+ type='MaxIoUAssigner',
180
+ pos_iou_thr=0.6,
181
+ neg_iou_thr=0.6,
182
+ min_pos_iou=0.6,
183
+ ignore_iof_thr=-1),
184
+ sampler=dict(
185
+ type='RandomSampler',
186
+ num=512,
187
+ pos_fraction=0.25,
188
+ neg_pos_ub=-1,
189
+ add_gt_as_proposals=True),
190
+ mask_size=28,
191
+ pos_weight=-1,
192
+ debug=False),
193
+ dict(
194
+ assigner=dict(
195
+ type='MaxIoUAssigner',
196
+ pos_iou_thr=0.7,
197
+ neg_iou_thr=0.7,
198
+ min_pos_iou=0.7,
199
+ ignore_iof_thr=-1),
200
+ sampler=dict(
201
+ type='RandomSampler',
202
+ num=512,
203
+ pos_fraction=0.25,
204
+ neg_pos_ub=-1,
205
+ add_gt_as_proposals=True),
206
+ mask_size=28,
207
+ pos_weight=-1,
208
+ debug=False)
209
+ ]),
210
+ test_cfg=dict(
211
+ rpn=dict(
212
+ nms_pre=1000,
213
+ max_per_img=1000,
214
+ nms=dict(type='nms', iou_threshold=0.7),
215
+ min_bbox_size=0),
216
+ rcnn=dict(
217
+ score_thr=0.001,
218
+ nms=dict(type='soft_nms', iou_threshold=0.5),
219
+ max_per_img=100,
220
+ mask_thr_binary=0.5)))
221
+ dataset_type = 'CocoDataset'
222
+ data_root = 'data/coco/'
223
+ img_norm_cfg = dict(
224
+ mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
225
+ train_pipeline = [
226
+ dict(type='LoadImageFromFile'),
227
+ dict(
228
+ type='LoadAnnotations', with_bbox=True, with_mask=True, with_seg=True),
229
+ dict(
230
+ type='Resize',
231
+ img_scale=[(1600, 400), (1600, 1400)],
232
+ multiscale_mode='range',
233
+ keep_ratio=True),
234
+ dict(type='RandomFlip', flip_ratio=0.5),
235
+ dict(
236
+ type='Normalize',
237
+ mean=[123.675, 116.28, 103.53],
238
+ std=[58.395, 57.12, 57.375],
239
+ to_rgb=True),
240
+ dict(type='Pad', size_divisor=32),
241
+ dict(type='SegRescale', scale_factor=0.125),
242
+ dict(type='DefaultFormatBundle'),
243
+ dict(
244
+ type='Collect',
245
+ keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks', 'gt_semantic_seg'])
246
+ ]
247
+ test_pipeline = [
248
+ dict(type='LoadImageFromFile'),
249
+ dict(
250
+ type='MultiScaleFlipAug',
251
+ img_scale=(1600, 1400),
252
+ flip=False,
253
+ transforms=[
254
+ dict(type='Resize', keep_ratio=True),
255
+ dict(type='RandomFlip'),
256
+ dict(
257
+ type='Normalize',
258
+ mean=[123.675, 116.28, 103.53],
259
+ std=[58.395, 57.12, 57.375],
260
+ to_rgb=True),
261
+ dict(type='Pad', size_divisor=32),
262
+ dict(type='ImageToTensor', keys=['img']),
263
+ dict(type='Collect', keys=['img'])
264
+ ])
265
+ ]
266
+ data = dict(
267
+ samples_per_gpu=2,
268
+ workers_per_gpu=2,
269
+ train=dict(
270
+ type='CocoDataset',
271
+ ann_file='data/coco/annotations/instances_train2017.json',
272
+ img_prefix='data/coco/train2017/',
273
+ pipeline=[
274
+ dict(type='LoadImageFromFile'),
275
+ dict(
276
+ type='LoadAnnotations',
277
+ with_bbox=True,
278
+ with_mask=True,
279
+ with_seg=True),
280
+ dict(
281
+ type='Resize',
282
+ img_scale=[(1600, 400), (1600, 1400)],
283
+ multiscale_mode='range',
284
+ keep_ratio=True),
285
+ dict(type='RandomFlip', flip_ratio=0.5),
286
+ dict(
287
+ type='Normalize',
288
+ mean=[123.675, 116.28, 103.53],
289
+ std=[58.395, 57.12, 57.375],
290
+ to_rgb=True),
291
+ dict(type='Pad', size_divisor=32),
292
+ dict(type='SegRescale', scale_factor=0.125),
293
+ dict(type='DefaultFormatBundle'),
294
+ dict(
295
+ type='Collect',
296
+ keys=[
297
+ 'img', 'gt_bboxes', 'gt_labels', 'gt_masks',
298
+ 'gt_semantic_seg'
299
+ ])
300
+ ],
301
+ seg_prefix='data/coco/stuffthingmaps/train2017/'),
302
+ val=dict(
303
+ type='CocoDataset',
304
+ ann_file='data/coco/annotations/instances_val2017.json',
305
+ img_prefix='data/coco/val2017/',
306
+ pipeline=[
307
+ dict(type='LoadImageFromFile'),
308
+ dict(
309
+ type='MultiScaleFlipAug',
310
+ img_scale=(1600, 1400),
311
+ flip=False,
312
+ transforms=[
313
+ dict(type='Resize', keep_ratio=True),
314
+ dict(type='RandomFlip'),
315
+ dict(
316
+ type='Normalize',
317
+ mean=[123.675, 116.28, 103.53],
318
+ std=[58.395, 57.12, 57.375],
319
+ to_rgb=True),
320
+ dict(type='Pad', size_divisor=32),
321
+ dict(type='ImageToTensor', keys=['img']),
322
+ dict(type='Collect', keys=['img'])
323
+ ])
324
+ ]),
325
+ test=dict(
326
+ type='CocoDataset',
327
+ ann_file='data/coco/annotations/instances_val2017.json',
328
+ img_prefix='data/coco/val2017/',
329
+ pipeline=[
330
+ dict(type='LoadImageFromFile'),
331
+ dict(
332
+ type='MultiScaleFlipAug',
333
+ img_scale=(1600, 1400),
334
+ flip=False,
335
+ transforms=[
336
+ dict(type='Resize', keep_ratio=True),
337
+ dict(type='RandomFlip'),
338
+ dict(
339
+ type='Normalize',
340
+ mean=[123.675, 116.28, 103.53],
341
+ std=[58.395, 57.12, 57.375],
342
+ to_rgb=True),
343
+ dict(type='Pad', size_divisor=32),
344
+ dict(type='ImageToTensor', keys=['img']),
345
+ dict(type='Collect', keys=['img'])
346
+ ])
347
+ ]))
348
+ evaluation = dict(metric=['bbox', 'segm'], save_best='auto', gpu_collect=True)
349
+ optimizer = dict(
350
+ type='AdamW',
351
+ lr=0.0008,
352
+ betas=(0.9, 0.999),
353
+ weight_decay=0.0025,
354
+ paramwise_cfg=dict(
355
+ custom_keys=dict(
356
+ absolute_pos_embed=dict(decay_mult=0.0),
357
+ relative_position_bias_table=dict(decay_mult=0.0),
358
+ norm=dict(decay_mult=0.0))))
359
+ optimizer_config = dict(grad_clip=None)
360
+ lr_config = dict(
361
+ policy='step',
362
+ warmup='linear',
363
+ warmup_iters=500,
364
+ warmup_ratio=0.001,
365
+ step=[8, 11])
366
+ runner = dict(type='EpochBasedRunner', max_epochs=12)
367
+ checkpoint_config = dict(interval=1)
368
+ log_config = dict(interval=50, hooks=[dict(type='TextLoggerHook')])
369
+ custom_hooks = [
370
+ dict(type='NumClassCheckHook'),
371
+ dict(
372
+ type='MMDetWandbHook',
373
+ init_kwargs=dict(project='I2B', group='finetune'),
374
+ interval=50,
375
+ num_eval_images=0,
376
+ log_checkpoint=False)
377
+ ]
378
+ dist_params = dict(backend='nccl')
379
+ log_level = 'INFO'
380
+ load_from = 'work_dirs/selfsup_cbv2_swin-L_1x_coco/final_model.pth'
381
+ resume_from = None
382
+ workflow = [('train', 1)]
383
+ opencv_num_threads = 0
384
+ mp_start_method = 'fork'
385
+ auto_scale_lr = dict(enable=False, base_batch_size=16)
386
+ custom_imports = dict(
387
+ imports=[
388
+ 'mmselfsup.datasets.pipelines',
389
+ 'selfsup.core.hook.momentum_update_hook',
390
+ 'selfsup.datasets.pipelines.selfsup_pipelines',
391
+ 'selfsup.datasets.pipelines.rand_aug',
392
+ 'selfsup.datasets.single_view_coco',
393
+ 'selfsup.datasets.multi_view_coco',
394
+ 'selfsup.models.losses.contrastive_loss',
395
+ 'selfsup.models.dense_heads.fcos_head',
396
+ 'selfsup.models.dense_heads.retina_head',
397
+ 'selfsup.models.dense_heads.detr_head',
398
+ 'selfsup.models.dense_heads.deformable_detr_head',
399
+ 'selfsup.models.roi_heads.bbox_heads.convfc_bbox_head',
400
+ 'selfsup.models.roi_heads.standard_roi_head',
401
+ 'selfsup.models.roi_heads.htc_roi_head',
402
+ 'selfsup.models.roi_heads.cbv2_roi_head',
403
+ 'selfsup.models.necks.cb_fpn', 'selfsup.models.backbones.cbv2',
404
+ 'selfsup.models.backbones.swinv1',
405
+ 'selfsup.models.detectors.selfsup_detector',
406
+ 'selfsup.models.detectors.selfsup_fcos',
407
+ 'selfsup.models.detectors.selfsup_detr',
408
+ 'selfsup.models.detectors.selfsup_deformable_detr',
409
+ 'selfsup.models.detectors.selfsup_retinanet',
410
+ 'selfsup.models.detectors.selfsup_mask_rcnn',
411
+ 'selfsup.models.detectors.selfsup_htc',
412
+ 'selfsup.models.detectors.selfsup_cbv2',
413
+ 'selfsup.models.detectors.cbv2',
414
+ 'selfsup.core.bbox.assigners.hungarian_assigner',
415
+ 'selfsup.core.bbox.assigners.pseudo_hungarian_assigner',
416
+ 'selfsup.core.bbox.match_costs.match_cost'
417
+ ],
418
+ allow_failed_imports=False)
419
+ fp16 = dict(loss_scale='dynamic')
420
+ work_dir = 'work_dirs/finetune_cbnetv2_swin-L_bs128_1x_coco_lr8e-4_wd2.5e-3'
421
+ auto_resume = False
422
+ gpu_ids = range(0, 64)
finetune/finetune_detr_100e_coco_selfsup-clusters-as-classes_add-contrastive-temp0.5-weight1.0_lr-mul-0.1/20221110_102520.log ADDED
The diff for this file is too large to render. See raw diff
 
finetune/finetune_detr_100e_coco_selfsup-clusters-as-classes_add-contrastive-temp0.5-weight1.0_lr-mul-0.1/20221110_102520.log.json ADDED
The diff for this file is too large to render. See raw diff
 
finetune/finetune_detr_100e_coco_selfsup-clusters-as-classes_add-contrastive-temp0.5-weight1.0_lr-mul-0.1/best_bbox_mAP_epoch_100.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7e2fa28c9e94cc5fd48d4986d4fe04381f7f0ad0433200ed50a28d6368399577
3
+ size 499563034
finetune/finetune_detr_100e_coco_selfsup-clusters-as-classes_add-contrastive-temp0.5-weight1.0_lr-mul-0.1/detr_r50_8x2_100e_coco.py ADDED
@@ -0,0 +1,280 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model = dict(
2
+ type='DETR',
3
+ backbone=dict(
4
+ type='ResNet',
5
+ depth=50,
6
+ num_stages=4,
7
+ out_indices=(3, ),
8
+ frozen_stages=-1,
9
+ norm_cfg=dict(type='SyncBN', requires_grad=True),
10
+ norm_eval=False,
11
+ style='pytorch',
12
+ init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
13
+ bbox_head=dict(
14
+ type='DETRHead',
15
+ num_classes=80,
16
+ in_channels=2048,
17
+ transformer=dict(
18
+ type='Transformer',
19
+ encoder=dict(
20
+ type='DetrTransformerEncoder',
21
+ num_layers=6,
22
+ transformerlayers=dict(
23
+ type='BaseTransformerLayer',
24
+ attn_cfgs=[
25
+ dict(
26
+ type='MultiheadAttention',
27
+ embed_dims=256,
28
+ num_heads=8,
29
+ dropout=0.1)
30
+ ],
31
+ feedforward_channels=2048,
32
+ ffn_dropout=0.1,
33
+ operation_order=('self_attn', 'norm', 'ffn', 'norm'))),
34
+ decoder=dict(
35
+ type='DetrTransformerDecoder',
36
+ return_intermediate=True,
37
+ num_layers=6,
38
+ transformerlayers=dict(
39
+ type='DetrTransformerDecoderLayer',
40
+ attn_cfgs=dict(
41
+ type='MultiheadAttention',
42
+ embed_dims=256,
43
+ num_heads=8,
44
+ dropout=0.1),
45
+ feedforward_channels=2048,
46
+ ffn_dropout=0.1,
47
+ operation_order=('self_attn', 'norm', 'cross_attn', 'norm',
48
+ 'ffn', 'norm')))),
49
+ positional_encoding=dict(
50
+ type='SinePositionalEncoding', num_feats=128, normalize=True),
51
+ loss_cls=dict(
52
+ type='CrossEntropyLoss',
53
+ bg_cls_weight=0.1,
54
+ use_sigmoid=False,
55
+ loss_weight=1.0,
56
+ class_weight=1.0),
57
+ loss_bbox=dict(type='L1Loss', loss_weight=5.0),
58
+ loss_iou=dict(type='GIoULoss', loss_weight=2.0)),
59
+ train_cfg=dict(
60
+ assigner=dict(
61
+ type='HungarianAssigner',
62
+ cls_cost=dict(type='ClassificationCost', weight=1.0),
63
+ reg_cost=dict(type='BBoxL1Cost', weight=5.0, box_format='xywh'),
64
+ iou_cost=dict(type='IoUCost', iou_mode='giou', weight=2.0))),
65
+ test_cfg=dict(max_per_img=100))
66
+ dataset_type = 'CocoDataset'
67
+ data_root = 'data/coco/'
68
+ img_norm_cfg = dict(
69
+ mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
70
+ train_pipeline = [
71
+ dict(type='LoadImageFromFile'),
72
+ dict(type='LoadAnnotations', with_bbox=True),
73
+ dict(type='RandomFlip', flip_ratio=0.5),
74
+ dict(
75
+ type='AutoAugment',
76
+ policies=[[{
77
+ 'type':
78
+ 'Resize',
79
+ 'img_scale': [(480, 1333), (512, 1333), (544, 1333), (576, 1333),
80
+ (608, 1333), (640, 1333), (672, 1333), (704, 1333),
81
+ (736, 1333), (768, 1333), (800, 1333)],
82
+ 'multiscale_mode':
83
+ 'value',
84
+ 'keep_ratio':
85
+ True
86
+ }],
87
+ [{
88
+ 'type': 'Resize',
89
+ 'img_scale': [(400, 1333), (500, 1333), (600, 1333)],
90
+ 'multiscale_mode': 'value',
91
+ 'keep_ratio': True
92
+ }, {
93
+ 'type': 'RandomCrop',
94
+ 'crop_type': 'absolute_range',
95
+ 'crop_size': (384, 600),
96
+ 'allow_negative_crop': True
97
+ }, {
98
+ 'type':
99
+ 'Resize',
100
+ 'img_scale': [(480, 1333), (512, 1333), (544, 1333),
101
+ (576, 1333), (608, 1333), (640, 1333),
102
+ (672, 1333), (704, 1333), (736, 1333),
103
+ (768, 1333), (800, 1333)],
104
+ 'multiscale_mode':
105
+ 'value',
106
+ 'override':
107
+ True,
108
+ 'keep_ratio':
109
+ True
110
+ }]]),
111
+ dict(
112
+ type='Normalize',
113
+ mean=[123.675, 116.28, 103.53],
114
+ std=[58.395, 57.12, 57.375],
115
+ to_rgb=True),
116
+ dict(type='Pad', size_divisor=1),
117
+ dict(type='DefaultFormatBundle'),
118
+ dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
119
+ ]
120
+ test_pipeline = [
121
+ dict(type='LoadImageFromFile'),
122
+ dict(
123
+ type='MultiScaleFlipAug',
124
+ img_scale=(1333, 800),
125
+ flip=False,
126
+ transforms=[
127
+ dict(type='Resize', keep_ratio=True),
128
+ dict(type='RandomFlip'),
129
+ dict(
130
+ type='Normalize',
131
+ mean=[123.675, 116.28, 103.53],
132
+ std=[58.395, 57.12, 57.375],
133
+ to_rgb=True),
134
+ dict(type='Pad', size_divisor=1),
135
+ dict(type='ImageToTensor', keys=['img']),
136
+ dict(type='Collect', keys=['img'])
137
+ ])
138
+ ]
139
+ data = dict(
140
+ samples_per_gpu=2,
141
+ workers_per_gpu=2,
142
+ train=dict(
143
+ type='CocoDataset',
144
+ ann_file='data/coco/annotations/instances_train2017.json',
145
+ img_prefix='data/coco/train2017/',
146
+ pipeline=[
147
+ dict(type='LoadImageFromFile'),
148
+ dict(type='LoadAnnotations', with_bbox=True),
149
+ dict(type='RandomFlip', flip_ratio=0.5),
150
+ dict(
151
+ type='AutoAugment',
152
+ policies=[[{
153
+ 'type':
154
+ 'Resize',
155
+ 'img_scale': [(480, 1333), (512, 1333), (544, 1333),
156
+ (576, 1333), (608, 1333), (640, 1333),
157
+ (672, 1333), (704, 1333), (736, 1333),
158
+ (768, 1333), (800, 1333)],
159
+ 'multiscale_mode':
160
+ 'value',
161
+ 'keep_ratio':
162
+ True
163
+ }],
164
+ [{
165
+ 'type': 'Resize',
166
+ 'img_scale': [(400, 1333), (500, 1333),
167
+ (600, 1333)],
168
+ 'multiscale_mode': 'value',
169
+ 'keep_ratio': True
170
+ }, {
171
+ 'type': 'RandomCrop',
172
+ 'crop_type': 'absolute_range',
173
+ 'crop_size': (384, 600),
174
+ 'allow_negative_crop': True
175
+ }, {
176
+ 'type':
177
+ 'Resize',
178
+ 'img_scale': [(480, 1333), (512, 1333),
179
+ (544, 1333), (576, 1333),
180
+ (608, 1333), (640, 1333),
181
+ (672, 1333), (704, 1333),
182
+ (736, 1333), (768, 1333),
183
+ (800, 1333)],
184
+ 'multiscale_mode':
185
+ 'value',
186
+ 'override':
187
+ True,
188
+ 'keep_ratio':
189
+ True
190
+ }]]),
191
+ dict(
192
+ type='Normalize',
193
+ mean=[123.675, 116.28, 103.53],
194
+ std=[58.395, 57.12, 57.375],
195
+ to_rgb=True),
196
+ dict(type='Pad', size_divisor=1),
197
+ dict(type='DefaultFormatBundle'),
198
+ dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
199
+ ]),
200
+ val=dict(
201
+ type='CocoDataset',
202
+ ann_file='data/coco/annotations/instances_val2017.json',
203
+ img_prefix='data/coco/val2017/',
204
+ pipeline=[
205
+ dict(type='LoadImageFromFile'),
206
+ dict(
207
+ type='MultiScaleFlipAug',
208
+ img_scale=(1333, 800),
209
+ flip=False,
210
+ transforms=[
211
+ dict(type='Resize', keep_ratio=True),
212
+ dict(type='RandomFlip'),
213
+ dict(
214
+ type='Normalize',
215
+ mean=[123.675, 116.28, 103.53],
216
+ std=[58.395, 57.12, 57.375],
217
+ to_rgb=True),
218
+ dict(type='Pad', size_divisor=1),
219
+ dict(type='ImageToTensor', keys=['img']),
220
+ dict(type='Collect', keys=['img'])
221
+ ])
222
+ ]),
223
+ test=dict(
224
+ type='CocoDataset',
225
+ ann_file='data/coco/annotations/instances_val2017.json',
226
+ img_prefix='data/coco/val2017/',
227
+ pipeline=[
228
+ dict(type='LoadImageFromFile'),
229
+ dict(
230
+ type='MultiScaleFlipAug',
231
+ img_scale=(1333, 800),
232
+ flip=False,
233
+ transforms=[
234
+ dict(type='Resize', keep_ratio=True),
235
+ dict(type='RandomFlip'),
236
+ dict(
237
+ type='Normalize',
238
+ mean=[123.675, 116.28, 103.53],
239
+ std=[58.395, 57.12, 57.375],
240
+ to_rgb=True),
241
+ dict(type='Pad', size_divisor=1),
242
+ dict(type='ImageToTensor', keys=['img']),
243
+ dict(type='Collect', keys=['img'])
244
+ ])
245
+ ]))
246
+ evaluation = dict(
247
+ interval=1, metric='bbox', save_best='auto', gpu_collect=True)
248
+ checkpoint_config = dict(interval=1)
249
+ log_config = dict(interval=50, hooks=[dict(type='TextLoggerHook')])
250
+ custom_hooks = [
251
+ dict(type='NumClassCheckHook'),
252
+ dict(
253
+ type='MMDetWandbHook',
254
+ init_kwargs=dict(project='I2B', group='finetune'),
255
+ interval=50,
256
+ num_eval_images=0,
257
+ log_checkpoint=False)
258
+ ]
259
+ dist_params = dict(backend='nccl')
260
+ log_level = 'INFO'
261
+ load_from = 'pretrain/selfsup_detr_clusters-as-classes_add-contrastive-temp0.5-weight1.0/final_model.pth'
262
+ resume_from = None
263
+ workflow = [('train', 1)]
264
+ opencv_num_threads = 0
265
+ mp_start_method = 'fork'
266
+ auto_scale_lr = dict(enable=False, base_batch_size=16)
267
+ custom_imports = None
268
+ norm_cfg = dict(type='SyncBN', requires_grad=True)
269
+ optimizer = dict(
270
+ type='AdamW',
271
+ lr=0.0002,
272
+ weight_decay=0.0001,
273
+ paramwise_cfg=dict(
274
+ custom_keys=dict(backbone=dict(lr_mult=0.1, decay_mult=1.0))))
275
+ optimizer_config = dict(grad_clip=dict(max_norm=0.1, norm_type=2))
276
+ lr_config = dict(policy='step', step=[80])
277
+ runner = dict(type='EpochBasedRunner', max_epochs=100)
278
+ work_dir = 'work_dirs/finetune_detr_100e_coco_selfsup-clusters-as-classes_add-contrastive-temp0.5-weight1.0_lr-mul-0.1'
279
+ auto_resume = False
280
+ gpu_ids = range(0, 16)
finetune/finetune_detr_100e_voc0712/20221109_194054.log ADDED
The diff for this file is too large to render. See raw diff
 
finetune/finetune_detr_100e_voc0712/20221109_194054.log.json ADDED
The diff for this file is too large to render. See raw diff
 
finetune/finetune_detr_100e_voc0712/best_mAP_epoch_93.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ab010ac6a87fe81e8b02dc5f4535671d19b9b5f2768a63f05cadd832071c0010
3
+ size 497551580
finetune/finetune_detr_100e_voc0712/detr_mstrain_100e_voc0712.py ADDED
@@ -0,0 +1,218 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model = dict(
2
+ type='DETR',
3
+ backbone=dict(
4
+ type='ResNet',
5
+ depth=50,
6
+ num_stages=4,
7
+ out_indices=(3, ),
8
+ frozen_stages=1,
9
+ norm_cfg=dict(type='SyncBN', requires_grad=True),
10
+ norm_eval=True,
11
+ style='pytorch',
12
+ init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
13
+ bbox_head=dict(
14
+ type='DETRHead',
15
+ num_classes=20,
16
+ in_channels=2048,
17
+ transformer=dict(
18
+ type='Transformer',
19
+ encoder=dict(
20
+ type='DetrTransformerEncoder',
21
+ num_layers=6,
22
+ transformerlayers=dict(
23
+ type='BaseTransformerLayer',
24
+ attn_cfgs=[
25
+ dict(
26
+ type='MultiheadAttention',
27
+ embed_dims=256,
28
+ num_heads=8,
29
+ dropout=0.1)
30
+ ],
31
+ feedforward_channels=2048,
32
+ ffn_dropout=0.1,
33
+ operation_order=('self_attn', 'norm', 'ffn', 'norm'))),
34
+ decoder=dict(
35
+ type='DetrTransformerDecoder',
36
+ return_intermediate=True,
37
+ num_layers=6,
38
+ transformerlayers=dict(
39
+ type='DetrTransformerDecoderLayer',
40
+ attn_cfgs=dict(
41
+ type='MultiheadAttention',
42
+ embed_dims=256,
43
+ num_heads=8,
44
+ dropout=0.1),
45
+ feedforward_channels=2048,
46
+ ffn_dropout=0.1,
47
+ operation_order=('self_attn', 'norm', 'cross_attn', 'norm',
48
+ 'ffn', 'norm')))),
49
+ positional_encoding=dict(
50
+ type='SinePositionalEncoding', num_feats=128, normalize=True),
51
+ loss_cls=dict(
52
+ type='CrossEntropyLoss',
53
+ bg_cls_weight=0.1,
54
+ use_sigmoid=False,
55
+ loss_weight=1.0,
56
+ class_weight=1.0),
57
+ loss_bbox=dict(type='L1Loss', loss_weight=5.0),
58
+ loss_iou=dict(type='GIoULoss', loss_weight=2.0)),
59
+ train_cfg=dict(
60
+ assigner=dict(
61
+ type='HungarianAssigner',
62
+ cls_cost=dict(type='ClassificationCost', weight=1.0),
63
+ reg_cost=dict(type='BBoxL1Cost', weight=5.0, box_format='xywh'),
64
+ iou_cost=dict(type='IoUCost', iou_mode='giou', weight=2.0))),
65
+ test_cfg=dict(max_per_img=100))
66
+ dataset_type = 'VOCDataset'
67
+ data_root = 'data/VOCdevkit/'
68
+ img_norm_cfg = dict(
69
+ mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
70
+ train_pipeline = [
71
+ dict(type='LoadImageFromFile'),
72
+ dict(type='LoadAnnotations', with_bbox=True),
73
+ dict(
74
+ type='Resize',
75
+ img_scale=[(1333, 480), (1333, 512), (1333, 544), (1333, 576),
76
+ (1333, 608), (1333, 640), (1333, 672), (1333, 704),
77
+ (1333, 736), (1333, 768), (1333, 800)],
78
+ multiscale_mode='value',
79
+ keep_ratio=True),
80
+ dict(type='RandomFlip', flip_ratio=0.5),
81
+ dict(
82
+ type='Normalize',
83
+ mean=[123.675, 116.28, 103.53],
84
+ std=[58.395, 57.12, 57.375],
85
+ to_rgb=True),
86
+ dict(type='Pad', size_divisor=32),
87
+ dict(type='DefaultFormatBundle'),
88
+ dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
89
+ ]
90
+ test_pipeline = [
91
+ dict(type='LoadImageFromFile'),
92
+ dict(
93
+ type='MultiScaleFlipAug',
94
+ img_scale=(1333, 800),
95
+ flip=False,
96
+ transforms=[
97
+ dict(type='Resize', keep_ratio=True),
98
+ dict(type='RandomFlip'),
99
+ dict(
100
+ type='Normalize',
101
+ mean=[123.675, 116.28, 103.53],
102
+ std=[58.395, 57.12, 57.375],
103
+ to_rgb=True),
104
+ dict(type='Pad', size_divisor=32),
105
+ dict(type='ImageToTensor', keys=['img']),
106
+ dict(type='Collect', keys=['img'])
107
+ ])
108
+ ]
109
+ data = dict(
110
+ samples_per_gpu=2,
111
+ workers_per_gpu=2,
112
+ train=dict(
113
+ type='VOCDataset',
114
+ ann_file=[
115
+ 'data/VOCdevkit/VOC2007/ImageSets/Main/trainval.txt',
116
+ 'data/VOCdevkit/VOC2012/ImageSets/Main/trainval.txt'
117
+ ],
118
+ img_prefix=['data/VOCdevkit/VOC2007/', 'data/VOCdevkit/VOC2012/'],
119
+ pipeline=[
120
+ dict(type='LoadImageFromFile'),
121
+ dict(type='LoadAnnotations', with_bbox=True),
122
+ dict(
123
+ type='Resize',
124
+ img_scale=[(1333, 480), (1333, 512), (1333, 544), (1333, 576),
125
+ (1333, 608), (1333, 640), (1333, 672), (1333, 704),
126
+ (1333, 736), (1333, 768), (1333, 800)],
127
+ multiscale_mode='value',
128
+ keep_ratio=True),
129
+ dict(type='RandomFlip', flip_ratio=0.5),
130
+ dict(
131
+ type='Normalize',
132
+ mean=[123.675, 116.28, 103.53],
133
+ std=[58.395, 57.12, 57.375],
134
+ to_rgb=True),
135
+ dict(type='Pad', size_divisor=32),
136
+ dict(type='DefaultFormatBundle'),
137
+ dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
138
+ ]),
139
+ val=dict(
140
+ type='VOCDataset',
141
+ ann_file='data/VOCdevkit/VOC2007/ImageSets/Main/test.txt',
142
+ img_prefix='data/VOCdevkit/VOC2007/',
143
+ pipeline=[
144
+ dict(type='LoadImageFromFile'),
145
+ dict(
146
+ type='MultiScaleFlipAug',
147
+ img_scale=(1333, 800),
148
+ flip=False,
149
+ transforms=[
150
+ dict(type='Resize', keep_ratio=True),
151
+ dict(type='RandomFlip'),
152
+ dict(
153
+ type='Normalize',
154
+ mean=[123.675, 116.28, 103.53],
155
+ std=[58.395, 57.12, 57.375],
156
+ to_rgb=True),
157
+ dict(type='Pad', size_divisor=32),
158
+ dict(type='ImageToTensor', keys=['img']),
159
+ dict(type='Collect', keys=['img'])
160
+ ])
161
+ ]),
162
+ test=dict(
163
+ type='VOCDataset',
164
+ ann_file='data/VOCdevkit/VOC2007/ImageSets/Main/test.txt',
165
+ img_prefix='data/VOCdevkit/VOC2007/',
166
+ pipeline=[
167
+ dict(type='LoadImageFromFile'),
168
+ dict(
169
+ type='MultiScaleFlipAug',
170
+ img_scale=(1333, 800),
171
+ flip=False,
172
+ transforms=[
173
+ dict(type='Resize', keep_ratio=True),
174
+ dict(type='RandomFlip'),
175
+ dict(
176
+ type='Normalize',
177
+ mean=[123.675, 116.28, 103.53],
178
+ std=[58.395, 57.12, 57.375],
179
+ to_rgb=True),
180
+ dict(type='Pad', size_divisor=32),
181
+ dict(type='ImageToTensor', keys=['img']),
182
+ dict(type='Collect', keys=['img'])
183
+ ])
184
+ ]))
185
+ evaluation = dict(interval=1, metric='mAP', save_best='auto')
186
+ checkpoint_config = dict(interval=1)
187
+ log_config = dict(interval=50, hooks=[dict(type='TextLoggerHook')])
188
+ custom_hooks = [
189
+ dict(type='NumClassCheckHook'),
190
+ dict(
191
+ type='MMDetWandbHook',
192
+ init_kwargs=dict(project='I2B', group='finetune'),
193
+ interval=50,
194
+ num_eval_images=0,
195
+ log_checkpoint=False)
196
+ ]
197
+ dist_params = dict(backend='nccl')
198
+ log_level = 'INFO'
199
+ load_from = 'pretrain/selfsup_detr_clusters-as-classes_add-contrastive-temp0.5-weight1.0/final_model.pth'
200
+ resume_from = None
201
+ workflow = [('train', 1)]
202
+ opencv_num_threads = 0
203
+ mp_start_method = 'fork'
204
+ auto_scale_lr = dict(enable=False, base_batch_size=16)
205
+ custom_imports = None
206
+ norm_cfg = dict(type='SyncBN', requires_grad=True)
207
+ optimizer = dict(
208
+ type='AdamW',
209
+ lr=0.0001,
210
+ weight_decay=0.0001,
211
+ paramwise_cfg=dict(
212
+ custom_keys=dict(backbone=dict(lr_mult=0.1, decay_mult=1.0))))
213
+ optimizer_config = dict(grad_clip=None)
214
+ lr_config = dict(policy='step', step=[70])
215
+ runner = dict(type='EpochBasedRunner', max_epochs=100)
216
+ work_dir = 'work_dirs/finetune_detr_100e_voc0712'
217
+ auto_resume = False
218
+ gpu_ids = range(0, 8)
finetune/finetune_detr_150e_coco_lr-mult-0.1_selfsup-clusters-as-classes_add-contrastive-temp0.5-weight1.0/20221105_015109.log ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e654c2ce766c7091853cfaae19117141acdbbe546b8de0cd9584e3c6e6e9cede
3
+ size 12407908
finetune/finetune_detr_150e_coco_lr-mult-0.1_selfsup-clusters-as-classes_add-contrastive-temp0.5-weight1.0/20221105_015109.log.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2c303707df2ddcbec12db613ff873af8bea57a5aef33af4c7684f7bd5cee8f84
3
+ size 12772012
finetune/finetune_detr_150e_coco_lr-mult-0.1_selfsup-clusters-as-classes_add-contrastive-temp0.5-weight1.0/best_bbox_mAP_epoch_139.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c6a57209f79e6b91db40c7c91c28a52ec3ecaa34ebf1045b514399d64fe32c78
3
+ size 499563034
finetune/finetune_detr_150e_coco_lr-mult-0.1_selfsup-clusters-as-classes_add-contrastive-temp0.5-weight1.0/detr_r50_8x2_150e_coco.py ADDED
@@ -0,0 +1,280 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model = dict(
2
+ type='DETR',
3
+ backbone=dict(
4
+ type='ResNet',
5
+ depth=50,
6
+ num_stages=4,
7
+ out_indices=(3, ),
8
+ frozen_stages=-1,
9
+ norm_cfg=dict(type='SyncBN', requires_grad=True),
10
+ norm_eval=False,
11
+ style='pytorch',
12
+ init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
13
+ bbox_head=dict(
14
+ type='DETRHead',
15
+ num_classes=80,
16
+ in_channels=2048,
17
+ transformer=dict(
18
+ type='Transformer',
19
+ encoder=dict(
20
+ type='DetrTransformerEncoder',
21
+ num_layers=6,
22
+ transformerlayers=dict(
23
+ type='BaseTransformerLayer',
24
+ attn_cfgs=[
25
+ dict(
26
+ type='MultiheadAttention',
27
+ embed_dims=256,
28
+ num_heads=8,
29
+ dropout=0.1)
30
+ ],
31
+ feedforward_channels=2048,
32
+ ffn_dropout=0.1,
33
+ operation_order=('self_attn', 'norm', 'ffn', 'norm'))),
34
+ decoder=dict(
35
+ type='DetrTransformerDecoder',
36
+ return_intermediate=True,
37
+ num_layers=6,
38
+ transformerlayers=dict(
39
+ type='DetrTransformerDecoderLayer',
40
+ attn_cfgs=dict(
41
+ type='MultiheadAttention',
42
+ embed_dims=256,
43
+ num_heads=8,
44
+ dropout=0.1),
45
+ feedforward_channels=2048,
46
+ ffn_dropout=0.1,
47
+ operation_order=('self_attn', 'norm', 'cross_attn', 'norm',
48
+ 'ffn', 'norm')))),
49
+ positional_encoding=dict(
50
+ type='SinePositionalEncoding', num_feats=128, normalize=True),
51
+ loss_cls=dict(
52
+ type='CrossEntropyLoss',
53
+ bg_cls_weight=0.1,
54
+ use_sigmoid=False,
55
+ loss_weight=1.0,
56
+ class_weight=1.0),
57
+ loss_bbox=dict(type='L1Loss', loss_weight=5.0),
58
+ loss_iou=dict(type='GIoULoss', loss_weight=2.0)),
59
+ train_cfg=dict(
60
+ assigner=dict(
61
+ type='HungarianAssigner',
62
+ cls_cost=dict(type='ClassificationCost', weight=1.0),
63
+ reg_cost=dict(type='BBoxL1Cost', weight=5.0, box_format='xywh'),
64
+ iou_cost=dict(type='IoUCost', iou_mode='giou', weight=2.0))),
65
+ test_cfg=dict(max_per_img=100))
66
+ dataset_type = 'CocoDataset'
67
+ data_root = 'data/coco/'
68
+ img_norm_cfg = dict(
69
+ mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
70
+ train_pipeline = [
71
+ dict(type='LoadImageFromFile'),
72
+ dict(type='LoadAnnotations', with_bbox=True),
73
+ dict(type='RandomFlip', flip_ratio=0.5),
74
+ dict(
75
+ type='AutoAugment',
76
+ policies=[[{
77
+ 'type':
78
+ 'Resize',
79
+ 'img_scale': [(480, 1333), (512, 1333), (544, 1333), (576, 1333),
80
+ (608, 1333), (640, 1333), (672, 1333), (704, 1333),
81
+ (736, 1333), (768, 1333), (800, 1333)],
82
+ 'multiscale_mode':
83
+ 'value',
84
+ 'keep_ratio':
85
+ True
86
+ }],
87
+ [{
88
+ 'type': 'Resize',
89
+ 'img_scale': [(400, 1333), (500, 1333), (600, 1333)],
90
+ 'multiscale_mode': 'value',
91
+ 'keep_ratio': True
92
+ }, {
93
+ 'type': 'RandomCrop',
94
+ 'crop_type': 'absolute_range',
95
+ 'crop_size': (384, 600),
96
+ 'allow_negative_crop': True
97
+ }, {
98
+ 'type':
99
+ 'Resize',
100
+ 'img_scale': [(480, 1333), (512, 1333), (544, 1333),
101
+ (576, 1333), (608, 1333), (640, 1333),
102
+ (672, 1333), (704, 1333), (736, 1333),
103
+ (768, 1333), (800, 1333)],
104
+ 'multiscale_mode':
105
+ 'value',
106
+ 'override':
107
+ True,
108
+ 'keep_ratio':
109
+ True
110
+ }]]),
111
+ dict(
112
+ type='Normalize',
113
+ mean=[123.675, 116.28, 103.53],
114
+ std=[58.395, 57.12, 57.375],
115
+ to_rgb=True),
116
+ dict(type='Pad', size_divisor=1),
117
+ dict(type='DefaultFormatBundle'),
118
+ dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
119
+ ]
120
+ test_pipeline = [
121
+ dict(type='LoadImageFromFile'),
122
+ dict(
123
+ type='MultiScaleFlipAug',
124
+ img_scale=(1333, 800),
125
+ flip=False,
126
+ transforms=[
127
+ dict(type='Resize', keep_ratio=True),
128
+ dict(type='RandomFlip'),
129
+ dict(
130
+ type='Normalize',
131
+ mean=[123.675, 116.28, 103.53],
132
+ std=[58.395, 57.12, 57.375],
133
+ to_rgb=True),
134
+ dict(type='Pad', size_divisor=1),
135
+ dict(type='ImageToTensor', keys=['img']),
136
+ dict(type='Collect', keys=['img'])
137
+ ])
138
+ ]
139
+ data = dict(
140
+ samples_per_gpu=2,
141
+ workers_per_gpu=2,
142
+ train=dict(
143
+ type='CocoDataset',
144
+ ann_file='data/coco/annotations/instances_train2017.json',
145
+ img_prefix='data/coco/train2017/',
146
+ pipeline=[
147
+ dict(type='LoadImageFromFile'),
148
+ dict(type='LoadAnnotations', with_bbox=True),
149
+ dict(type='RandomFlip', flip_ratio=0.5),
150
+ dict(
151
+ type='AutoAugment',
152
+ policies=[[{
153
+ 'type':
154
+ 'Resize',
155
+ 'img_scale': [(480, 1333), (512, 1333), (544, 1333),
156
+ (576, 1333), (608, 1333), (640, 1333),
157
+ (672, 1333), (704, 1333), (736, 1333),
158
+ (768, 1333), (800, 1333)],
159
+ 'multiscale_mode':
160
+ 'value',
161
+ 'keep_ratio':
162
+ True
163
+ }],
164
+ [{
165
+ 'type': 'Resize',
166
+ 'img_scale': [(400, 1333), (500, 1333),
167
+ (600, 1333)],
168
+ 'multiscale_mode': 'value',
169
+ 'keep_ratio': True
170
+ }, {
171
+ 'type': 'RandomCrop',
172
+ 'crop_type': 'absolute_range',
173
+ 'crop_size': (384, 600),
174
+ 'allow_negative_crop': True
175
+ }, {
176
+ 'type':
177
+ 'Resize',
178
+ 'img_scale': [(480, 1333), (512, 1333),
179
+ (544, 1333), (576, 1333),
180
+ (608, 1333), (640, 1333),
181
+ (672, 1333), (704, 1333),
182
+ (736, 1333), (768, 1333),
183
+ (800, 1333)],
184
+ 'multiscale_mode':
185
+ 'value',
186
+ 'override':
187
+ True,
188
+ 'keep_ratio':
189
+ True
190
+ }]]),
191
+ dict(
192
+ type='Normalize',
193
+ mean=[123.675, 116.28, 103.53],
194
+ std=[58.395, 57.12, 57.375],
195
+ to_rgb=True),
196
+ dict(type='Pad', size_divisor=1),
197
+ dict(type='DefaultFormatBundle'),
198
+ dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
199
+ ]),
200
+ val=dict(
201
+ type='CocoDataset',
202
+ ann_file='data/coco/annotations/instances_val2017.json',
203
+ img_prefix='data/coco/val2017/',
204
+ pipeline=[
205
+ dict(type='LoadImageFromFile'),
206
+ dict(
207
+ type='MultiScaleFlipAug',
208
+ img_scale=(1333, 800),
209
+ flip=False,
210
+ transforms=[
211
+ dict(type='Resize', keep_ratio=True),
212
+ dict(type='RandomFlip'),
213
+ dict(
214
+ type='Normalize',
215
+ mean=[123.675, 116.28, 103.53],
216
+ std=[58.395, 57.12, 57.375],
217
+ to_rgb=True),
218
+ dict(type='Pad', size_divisor=1),
219
+ dict(type='ImageToTensor', keys=['img']),
220
+ dict(type='Collect', keys=['img'])
221
+ ])
222
+ ]),
223
+ test=dict(
224
+ type='CocoDataset',
225
+ ann_file='data/coco/annotations/instances_val2017.json',
226
+ img_prefix='data/coco/val2017/',
227
+ pipeline=[
228
+ dict(type='LoadImageFromFile'),
229
+ dict(
230
+ type='MultiScaleFlipAug',
231
+ img_scale=(1333, 800),
232
+ flip=False,
233
+ transforms=[
234
+ dict(type='Resize', keep_ratio=True),
235
+ dict(type='RandomFlip'),
236
+ dict(
237
+ type='Normalize',
238
+ mean=[123.675, 116.28, 103.53],
239
+ std=[58.395, 57.12, 57.375],
240
+ to_rgb=True),
241
+ dict(type='Pad', size_divisor=1),
242
+ dict(type='ImageToTensor', keys=['img']),
243
+ dict(type='Collect', keys=['img'])
244
+ ])
245
+ ]))
246
+ evaluation = dict(
247
+ interval=1, metric='bbox', save_best='auto', gpu_collect=True)
248
+ checkpoint_config = dict(interval=1)
249
+ log_config = dict(interval=50, hooks=[dict(type='TextLoggerHook')])
250
+ custom_hooks = [
251
+ dict(type='NumClassCheckHook'),
252
+ dict(
253
+ type='MMDetWandbHook',
254
+ init_kwargs=dict(project='I2B', group='finetune'),
255
+ interval=50,
256
+ num_eval_images=0,
257
+ log_checkpoint=False)
258
+ ]
259
+ dist_params = dict(backend='nccl')
260
+ log_level = 'INFO'
261
+ load_from = 'work_dirs/selfsup_detr_clusters-as-classes_add-contrastive-temp0.5-weight1.0/final_model.pth'
262
+ resume_from = None
263
+ workflow = [('train', 1)]
264
+ opencv_num_threads = 0
265
+ mp_start_method = 'fork'
266
+ auto_scale_lr = dict(enable=False, base_batch_size=16)
267
+ custom_imports = None
268
+ norm_cfg = dict(type='SyncBN', requires_grad=True)
269
+ optimizer = dict(
270
+ type='AdamW',
271
+ lr=0.0001,
272
+ weight_decay=0.0001,
273
+ paramwise_cfg=dict(
274
+ custom_keys=dict(backbone=dict(lr_mult=0.1, decay_mult=1.0))))
275
+ optimizer_config = dict(grad_clip=dict(max_norm=0.1, norm_type=2))
276
+ lr_config = dict(policy='step', step=[100])
277
+ runner = dict(type='EpochBasedRunner', max_epochs=150)
278
+ work_dir = 'work_dirs/finetune_detr_150e_coco_lr-mult-0.1_selfsup-clusters-as-classes_add-contrastive-temp0.5-weight1.0'
279
+ auto_resume = False
280
+ gpu_ids = range(0, 8)
finetune/finetune_detr_150e_coco_selfsup-clusters-as-classes_add-contrastive-temp0.5-weight1.0_swav_lr-mul-0.4/20221107_215710.log ADDED
The diff for this file is too large to render. See raw diff
 
finetune/finetune_detr_150e_coco_selfsup-clusters-as-classes_add-contrastive-temp0.5-weight1.0_swav_lr-mul-0.4/20221107_215710.log.json ADDED
The diff for this file is too large to render. See raw diff
 
finetune/finetune_detr_150e_coco_selfsup-clusters-as-classes_add-contrastive-temp0.5-weight1.0_swav_lr-mul-0.4/best_bbox_mAP_epoch_143.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a50ef6c9f605b6c0f9606834e2db57061eb470d15ecc59ebff46e2eb5be4aaf7
3
+ size 499563098
finetune/finetune_detr_150e_coco_selfsup-clusters-as-classes_add-contrastive-temp0.5-weight1.0_swav_lr-mul-0.4/detr_r50_8x2_150e_coco.py ADDED
@@ -0,0 +1,280 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model = dict(
2
+ type='DETR',
3
+ backbone=dict(
4
+ type='ResNet',
5
+ depth=50,
6
+ num_stages=4,
7
+ out_indices=(3, ),
8
+ frozen_stages=-1,
9
+ norm_cfg=dict(type='SyncBN', requires_grad=True),
10
+ norm_eval=False,
11
+ style='pytorch',
12
+ init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
13
+ bbox_head=dict(
14
+ type='DETRHead',
15
+ num_classes=80,
16
+ in_channels=2048,
17
+ transformer=dict(
18
+ type='Transformer',
19
+ encoder=dict(
20
+ type='DetrTransformerEncoder',
21
+ num_layers=6,
22
+ transformerlayers=dict(
23
+ type='BaseTransformerLayer',
24
+ attn_cfgs=[
25
+ dict(
26
+ type='MultiheadAttention',
27
+ embed_dims=256,
28
+ num_heads=8,
29
+ dropout=0.1)
30
+ ],
31
+ feedforward_channels=2048,
32
+ ffn_dropout=0.1,
33
+ operation_order=('self_attn', 'norm', 'ffn', 'norm'))),
34
+ decoder=dict(
35
+ type='DetrTransformerDecoder',
36
+ return_intermediate=True,
37
+ num_layers=6,
38
+ transformerlayers=dict(
39
+ type='DetrTransformerDecoderLayer',
40
+ attn_cfgs=dict(
41
+ type='MultiheadAttention',
42
+ embed_dims=256,
43
+ num_heads=8,
44
+ dropout=0.1),
45
+ feedforward_channels=2048,
46
+ ffn_dropout=0.1,
47
+ operation_order=('self_attn', 'norm', 'cross_attn', 'norm',
48
+ 'ffn', 'norm')))),
49
+ positional_encoding=dict(
50
+ type='SinePositionalEncoding', num_feats=128, normalize=True),
51
+ loss_cls=dict(
52
+ type='CrossEntropyLoss',
53
+ bg_cls_weight=0.1,
54
+ use_sigmoid=False,
55
+ loss_weight=1.0,
56
+ class_weight=1.0),
57
+ loss_bbox=dict(type='L1Loss', loss_weight=5.0),
58
+ loss_iou=dict(type='GIoULoss', loss_weight=2.0)),
59
+ train_cfg=dict(
60
+ assigner=dict(
61
+ type='HungarianAssigner',
62
+ cls_cost=dict(type='ClassificationCost', weight=1.0),
63
+ reg_cost=dict(type='BBoxL1Cost', weight=5.0, box_format='xywh'),
64
+ iou_cost=dict(type='IoUCost', iou_mode='giou', weight=2.0))),
65
+ test_cfg=dict(max_per_img=100))
66
+ dataset_type = 'CocoDataset'
67
+ data_root = 'data/coco/'
68
+ img_norm_cfg = dict(
69
+ mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
70
+ train_pipeline = [
71
+ dict(type='LoadImageFromFile'),
72
+ dict(type='LoadAnnotations', with_bbox=True),
73
+ dict(type='RandomFlip', flip_ratio=0.5),
74
+ dict(
75
+ type='AutoAugment',
76
+ policies=[[{
77
+ 'type':
78
+ 'Resize',
79
+ 'img_scale': [(480, 1333), (512, 1333), (544, 1333), (576, 1333),
80
+ (608, 1333), (640, 1333), (672, 1333), (704, 1333),
81
+ (736, 1333), (768, 1333), (800, 1333)],
82
+ 'multiscale_mode':
83
+ 'value',
84
+ 'keep_ratio':
85
+ True
86
+ }],
87
+ [{
88
+ 'type': 'Resize',
89
+ 'img_scale': [(400, 1333), (500, 1333), (600, 1333)],
90
+ 'multiscale_mode': 'value',
91
+ 'keep_ratio': True
92
+ }, {
93
+ 'type': 'RandomCrop',
94
+ 'crop_type': 'absolute_range',
95
+ 'crop_size': (384, 600),
96
+ 'allow_negative_crop': True
97
+ }, {
98
+ 'type':
99
+ 'Resize',
100
+ 'img_scale': [(480, 1333), (512, 1333), (544, 1333),
101
+ (576, 1333), (608, 1333), (640, 1333),
102
+ (672, 1333), (704, 1333), (736, 1333),
103
+ (768, 1333), (800, 1333)],
104
+ 'multiscale_mode':
105
+ 'value',
106
+ 'override':
107
+ True,
108
+ 'keep_ratio':
109
+ True
110
+ }]]),
111
+ dict(
112
+ type='Normalize',
113
+ mean=[123.675, 116.28, 103.53],
114
+ std=[58.395, 57.12, 57.375],
115
+ to_rgb=True),
116
+ dict(type='Pad', size_divisor=1),
117
+ dict(type='DefaultFormatBundle'),
118
+ dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
119
+ ]
120
+ test_pipeline = [
121
+ dict(type='LoadImageFromFile'),
122
+ dict(
123
+ type='MultiScaleFlipAug',
124
+ img_scale=(1333, 800),
125
+ flip=False,
126
+ transforms=[
127
+ dict(type='Resize', keep_ratio=True),
128
+ dict(type='RandomFlip'),
129
+ dict(
130
+ type='Normalize',
131
+ mean=[123.675, 116.28, 103.53],
132
+ std=[58.395, 57.12, 57.375],
133
+ to_rgb=True),
134
+ dict(type='Pad', size_divisor=1),
135
+ dict(type='ImageToTensor', keys=['img']),
136
+ dict(type='Collect', keys=['img'])
137
+ ])
138
+ ]
139
+ data = dict(
140
+ samples_per_gpu=2,
141
+ workers_per_gpu=2,
142
+ train=dict(
143
+ type='CocoDataset',
144
+ ann_file='data/coco/annotations/instances_train2017.json',
145
+ img_prefix='data/coco/train2017/',
146
+ pipeline=[
147
+ dict(type='LoadImageFromFile'),
148
+ dict(type='LoadAnnotations', with_bbox=True),
149
+ dict(type='RandomFlip', flip_ratio=0.5),
150
+ dict(
151
+ type='AutoAugment',
152
+ policies=[[{
153
+ 'type':
154
+ 'Resize',
155
+ 'img_scale': [(480, 1333), (512, 1333), (544, 1333),
156
+ (576, 1333), (608, 1333), (640, 1333),
157
+ (672, 1333), (704, 1333), (736, 1333),
158
+ (768, 1333), (800, 1333)],
159
+ 'multiscale_mode':
160
+ 'value',
161
+ 'keep_ratio':
162
+ True
163
+ }],
164
+ [{
165
+ 'type': 'Resize',
166
+ 'img_scale': [(400, 1333), (500, 1333),
167
+ (600, 1333)],
168
+ 'multiscale_mode': 'value',
169
+ 'keep_ratio': True
170
+ }, {
171
+ 'type': 'RandomCrop',
172
+ 'crop_type': 'absolute_range',
173
+ 'crop_size': (384, 600),
174
+ 'allow_negative_crop': True
175
+ }, {
176
+ 'type':
177
+ 'Resize',
178
+ 'img_scale': [(480, 1333), (512, 1333),
179
+ (544, 1333), (576, 1333),
180
+ (608, 1333), (640, 1333),
181
+ (672, 1333), (704, 1333),
182
+ (736, 1333), (768, 1333),
183
+ (800, 1333)],
184
+ 'multiscale_mode':
185
+ 'value',
186
+ 'override':
187
+ True,
188
+ 'keep_ratio':
189
+ True
190
+ }]]),
191
+ dict(
192
+ type='Normalize',
193
+ mean=[123.675, 116.28, 103.53],
194
+ std=[58.395, 57.12, 57.375],
195
+ to_rgb=True),
196
+ dict(type='Pad', size_divisor=1),
197
+ dict(type='DefaultFormatBundle'),
198
+ dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
199
+ ]),
200
+ val=dict(
201
+ type='CocoDataset',
202
+ ann_file='data/coco/annotations/instances_val2017.json',
203
+ img_prefix='data/coco/val2017/',
204
+ pipeline=[
205
+ dict(type='LoadImageFromFile'),
206
+ dict(
207
+ type='MultiScaleFlipAug',
208
+ img_scale=(1333, 800),
209
+ flip=False,
210
+ transforms=[
211
+ dict(type='Resize', keep_ratio=True),
212
+ dict(type='RandomFlip'),
213
+ dict(
214
+ type='Normalize',
215
+ mean=[123.675, 116.28, 103.53],
216
+ std=[58.395, 57.12, 57.375],
217
+ to_rgb=True),
218
+ dict(type='Pad', size_divisor=1),
219
+ dict(type='ImageToTensor', keys=['img']),
220
+ dict(type='Collect', keys=['img'])
221
+ ])
222
+ ]),
223
+ test=dict(
224
+ type='CocoDataset',
225
+ ann_file='data/coco/annotations/instances_val2017.json',
226
+ img_prefix='data/coco/val2017/',
227
+ pipeline=[
228
+ dict(type='LoadImageFromFile'),
229
+ dict(
230
+ type='MultiScaleFlipAug',
231
+ img_scale=(1333, 800),
232
+ flip=False,
233
+ transforms=[
234
+ dict(type='Resize', keep_ratio=True),
235
+ dict(type='RandomFlip'),
236
+ dict(
237
+ type='Normalize',
238
+ mean=[123.675, 116.28, 103.53],
239
+ std=[58.395, 57.12, 57.375],
240
+ to_rgb=True),
241
+ dict(type='Pad', size_divisor=1),
242
+ dict(type='ImageToTensor', keys=['img']),
243
+ dict(type='Collect', keys=['img'])
244
+ ])
245
+ ]))
246
+ evaluation = dict(
247
+ interval=1, metric='bbox', save_best='auto', gpu_collect=True)
248
+ checkpoint_config = dict(interval=1)
249
+ log_config = dict(interval=50, hooks=[dict(type='TextLoggerHook')])
250
+ custom_hooks = [
251
+ dict(type='NumClassCheckHook'),
252
+ dict(
253
+ type='MMDetWandbHook',
254
+ init_kwargs=dict(project='I2B', group='finetune'),
255
+ interval=50,
256
+ num_eval_images=0,
257
+ log_checkpoint=False)
258
+ ]
259
+ dist_params = dict(backend='nccl')
260
+ log_level = 'INFO'
261
+ load_from = 'pretrain/selfsup_detr_clusters-as-classes_add-contrastive-temp0.5-weight1.0_swav/final_model.pth'
262
+ resume_from = None
263
+ workflow = [('train', 1)]
264
+ opencv_num_threads = 0
265
+ mp_start_method = 'fork'
266
+ auto_scale_lr = dict(enable=False, base_batch_size=16)
267
+ custom_imports = None
268
+ norm_cfg = dict(type='SyncBN', requires_grad=True)
269
+ optimizer = dict(
270
+ type='AdamW',
271
+ lr=0.0002,
272
+ weight_decay=0.0001,
273
+ paramwise_cfg=dict(
274
+ custom_keys=dict(backbone=dict(lr_mult=0.4, decay_mult=1.0))))
275
+ optimizer_config = dict(grad_clip=dict(max_norm=0.1, norm_type=2))
276
+ lr_config = dict(policy='step', step=[100])
277
+ runner = dict(type='EpochBasedRunner', max_epochs=150)
278
+ work_dir = 'work_dirs/finetune_detr_150e_coco_selfsup-clusters-as-classes_add-contrastive-temp0.5-weight1.0_swav_lr-mul-0.4'
279
+ auto_resume = False
280
+ gpu_ids = range(0, 16)
finetune/finetune_detr_50e_coco_lr-mult-0.1_selfsup-clusters-as-classes_add-contrastive-temp0.5-weight1.0/20221104_003558.log ADDED
The diff for this file is too large to render. See raw diff
 
finetune/finetune_detr_50e_coco_lr-mult-0.1_selfsup-clusters-as-classes_add-contrastive-temp0.5-weight1.0/20221104_003558.log.json ADDED
The diff for this file is too large to render. See raw diff
 
finetune/finetune_detr_50e_coco_lr-mult-0.1_selfsup-clusters-as-classes_add-contrastive-temp0.5-weight1.0/best_bbox_mAP_epoch_50.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ad26dc9d44ca07dccdacbf55e779be7f3fa85a4f3f47e0193c39e7426b5774b8
3
+ size 499563098
finetune/finetune_detr_50e_coco_lr-mult-0.1_selfsup-clusters-as-classes_add-contrastive-temp0.5-weight1.0/detr_r50_8x2_50e_coco.py ADDED
@@ -0,0 +1,281 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model = dict(
2
+ type='DETR',
3
+ backbone=dict(
4
+ type='ResNet',
5
+ depth=50,
6
+ num_stages=4,
7
+ out_indices=(3, ),
8
+ frozen_stages=-1,
9
+ norm_cfg=dict(type='SyncBN', requires_grad=True),
10
+ norm_eval=False,
11
+ style='pytorch',
12
+ init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
13
+ bbox_head=dict(
14
+ type='DETRHead',
15
+ num_classes=80,
16
+ in_channels=2048,
17
+ transformer=dict(
18
+ type='Transformer',
19
+ encoder=dict(
20
+ type='DetrTransformerEncoder',
21
+ num_layers=6,
22
+ transformerlayers=dict(
23
+ type='BaseTransformerLayer',
24
+ attn_cfgs=[
25
+ dict(
26
+ type='MultiheadAttention',
27
+ embed_dims=256,
28
+ num_heads=8,
29
+ dropout=0.1)
30
+ ],
31
+ feedforward_channels=2048,
32
+ ffn_dropout=0.1,
33
+ operation_order=('self_attn', 'norm', 'ffn', 'norm'))),
34
+ decoder=dict(
35
+ type='DetrTransformerDecoder',
36
+ return_intermediate=True,
37
+ num_layers=6,
38
+ transformerlayers=dict(
39
+ type='DetrTransformerDecoderLayer',
40
+ attn_cfgs=dict(
41
+ type='MultiheadAttention',
42
+ embed_dims=256,
43
+ num_heads=8,
44
+ dropout=0.1),
45
+ feedforward_channels=2048,
46
+ ffn_dropout=0.1,
47
+ operation_order=('self_attn', 'norm', 'cross_attn', 'norm',
48
+ 'ffn', 'norm')))),
49
+ positional_encoding=dict(
50
+ type='SinePositionalEncoding', num_feats=128, normalize=True),
51
+ loss_cls=dict(
52
+ type='CrossEntropyLoss',
53
+ bg_cls_weight=0.1,
54
+ use_sigmoid=False,
55
+ loss_weight=1.0,
56
+ class_weight=1.0),
57
+ loss_bbox=dict(type='L1Loss', loss_weight=5.0),
58
+ loss_iou=dict(type='GIoULoss', loss_weight=2.0)),
59
+ train_cfg=dict(
60
+ assigner=dict(
61
+ type='HungarianAssigner',
62
+ cls_cost=dict(type='ClassificationCost', weight=1.0),
63
+ reg_cost=dict(type='BBoxL1Cost', weight=5.0, box_format='xywh'),
64
+ iou_cost=dict(type='IoUCost', iou_mode='giou', weight=2.0))),
65
+ test_cfg=dict(max_per_img=100))
66
+ dataset_type = 'CocoDataset'
67
+ data_root = 'data/coco/'
68
+ img_norm_cfg = dict(
69
+ mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
70
+ train_pipeline = [
71
+ dict(type='LoadImageFromFile'),
72
+ dict(type='LoadAnnotations', with_bbox=True),
73
+ dict(type='RandomFlip', flip_ratio=0.5),
74
+ dict(
75
+ type='AutoAugment',
76
+ policies=[[{
77
+ 'type':
78
+ 'Resize',
79
+ 'img_scale': [(480, 1333), (512, 1333), (544, 1333), (576, 1333),
80
+ (608, 1333), (640, 1333), (672, 1333), (704, 1333),
81
+ (736, 1333), (768, 1333), (800, 1333)],
82
+ 'multiscale_mode':
83
+ 'value',
84
+ 'keep_ratio':
85
+ True
86
+ }],
87
+ [{
88
+ 'type': 'Resize',
89
+ 'img_scale': [(400, 1333), (500, 1333), (600, 1333)],
90
+ 'multiscale_mode': 'value',
91
+ 'keep_ratio': True
92
+ }, {
93
+ 'type': 'RandomCrop',
94
+ 'crop_type': 'absolute_range',
95
+ 'crop_size': (384, 600),
96
+ 'allow_negative_crop': True
97
+ }, {
98
+ 'type':
99
+ 'Resize',
100
+ 'img_scale': [(480, 1333), (512, 1333), (544, 1333),
101
+ (576, 1333), (608, 1333), (640, 1333),
102
+ (672, 1333), (704, 1333), (736, 1333),
103
+ (768, 1333), (800, 1333)],
104
+ 'multiscale_mode':
105
+ 'value',
106
+ 'override':
107
+ True,
108
+ 'keep_ratio':
109
+ True
110
+ }]]),
111
+ dict(
112
+ type='Normalize',
113
+ mean=[123.675, 116.28, 103.53],
114
+ std=[58.395, 57.12, 57.375],
115
+ to_rgb=True),
116
+ dict(type='Pad', size_divisor=1),
117
+ dict(type='DefaultFormatBundle'),
118
+ dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
119
+ ]
120
+ test_pipeline = [
121
+ dict(type='LoadImageFromFile'),
122
+ dict(
123
+ type='MultiScaleFlipAug',
124
+ img_scale=(1333, 800),
125
+ flip=False,
126
+ transforms=[
127
+ dict(type='Resize', keep_ratio=True),
128
+ dict(type='RandomFlip'),
129
+ dict(
130
+ type='Normalize',
131
+ mean=[123.675, 116.28, 103.53],
132
+ std=[58.395, 57.12, 57.375],
133
+ to_rgb=True),
134
+ dict(type='Pad', size_divisor=1),
135
+ dict(type='ImageToTensor', keys=['img']),
136
+ dict(type='Collect', keys=['img'])
137
+ ])
138
+ ]
139
+ data = dict(
140
+ samples_per_gpu=2,
141
+ workers_per_gpu=2,
142
+ train=dict(
143
+ type='CocoDataset',
144
+ ann_file='data/coco/annotations/instances_train2017.json',
145
+ img_prefix='data/coco/train2017/',
146
+ pipeline=[
147
+ dict(type='LoadImageFromFile'),
148
+ dict(type='LoadAnnotations', with_bbox=True),
149
+ dict(type='RandomFlip', flip_ratio=0.5),
150
+ dict(
151
+ type='AutoAugment',
152
+ policies=[[{
153
+ 'type':
154
+ 'Resize',
155
+ 'img_scale': [(480, 1333), (512, 1333), (544, 1333),
156
+ (576, 1333), (608, 1333), (640, 1333),
157
+ (672, 1333), (704, 1333), (736, 1333),
158
+ (768, 1333), (800, 1333)],
159
+ 'multiscale_mode':
160
+ 'value',
161
+ 'keep_ratio':
162
+ True
163
+ }],
164
+ [{
165
+ 'type': 'Resize',
166
+ 'img_scale': [(400, 1333), (500, 1333),
167
+ (600, 1333)],
168
+ 'multiscale_mode': 'value',
169
+ 'keep_ratio': True
170
+ }, {
171
+ 'type': 'RandomCrop',
172
+ 'crop_type': 'absolute_range',
173
+ 'crop_size': (384, 600),
174
+ 'allow_negative_crop': True
175
+ }, {
176
+ 'type':
177
+ 'Resize',
178
+ 'img_scale': [(480, 1333), (512, 1333),
179
+ (544, 1333), (576, 1333),
180
+ (608, 1333), (640, 1333),
181
+ (672, 1333), (704, 1333),
182
+ (736, 1333), (768, 1333),
183
+ (800, 1333)],
184
+ 'multiscale_mode':
185
+ 'value',
186
+ 'override':
187
+ True,
188
+ 'keep_ratio':
189
+ True
190
+ }]]),
191
+ dict(
192
+ type='Normalize',
193
+ mean=[123.675, 116.28, 103.53],
194
+ std=[58.395, 57.12, 57.375],
195
+ to_rgb=True),
196
+ dict(type='Pad', size_divisor=1),
197
+ dict(type='DefaultFormatBundle'),
198
+ dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
199
+ ]),
200
+ val=dict(
201
+ type='CocoDataset',
202
+ ann_file='data/coco/annotations/instances_val2017.json',
203
+ img_prefix='data/coco/val2017/',
204
+ pipeline=[
205
+ dict(type='LoadImageFromFile'),
206
+ dict(
207
+ type='MultiScaleFlipAug',
208
+ img_scale=(1333, 800),
209
+ flip=False,
210
+ transforms=[
211
+ dict(type='Resize', keep_ratio=True),
212
+ dict(type='RandomFlip'),
213
+ dict(
214
+ type='Normalize',
215
+ mean=[123.675, 116.28, 103.53],
216
+ std=[58.395, 57.12, 57.375],
217
+ to_rgb=True),
218
+ dict(type='Pad', size_divisor=1),
219
+ dict(type='ImageToTensor', keys=['img']),
220
+ dict(type='Collect', keys=['img'])
221
+ ])
222
+ ]),
223
+ test=dict(
224
+ type='CocoDataset',
225
+ ann_file='data/coco/annotations/instances_val2017.json',
226
+ img_prefix='data/coco/val2017/',
227
+ pipeline=[
228
+ dict(type='LoadImageFromFile'),
229
+ dict(
230
+ type='MultiScaleFlipAug',
231
+ img_scale=(1333, 800),
232
+ flip=False,
233
+ transforms=[
234
+ dict(type='Resize', keep_ratio=True),
235
+ dict(type='RandomFlip'),
236
+ dict(
237
+ type='Normalize',
238
+ mean=[123.675, 116.28, 103.53],
239
+ std=[58.395, 57.12, 57.375],
240
+ to_rgb=True),
241
+ dict(type='Pad', size_divisor=1),
242
+ dict(type='ImageToTensor', keys=['img']),
243
+ dict(type='Collect', keys=['img'])
244
+ ])
245
+ ]))
246
+ evaluation = dict(
247
+ interval=1, metric='bbox', save_best='auto', gpu_collect=True)
248
+ checkpoint_config = dict(interval=1)
249
+ log_config = dict(interval=50, hooks=[dict(type='TextLoggerHook')])
250
+ custom_hooks = [
251
+ dict(type='NumClassCheckHook'),
252
+ dict(
253
+ type='MMDetWandbHook',
254
+ init_kwargs=dict(project='I2B', group='finetune'),
255
+ interval=50,
256
+ num_eval_images=0,
257
+ log_checkpoint=False)
258
+ ]
259
+ dist_params = dict(backend='nccl')
260
+ log_level = 'INFO'
261
+ load_from = 'work_dirs/selfsup_detr_clusters-as-classes_add-contrastive-temp0.5-weight1.0/final_model.pth'
262
+ resume_from = None
263
+ workflow = [('train', 1)]
264
+ opencv_num_threads = 0
265
+ mp_start_method = 'fork'
266
+ auto_scale_lr = dict(enable=False, base_batch_size=16)
267
+ custom_imports = None
268
+ norm_cfg = dict(type='SyncBN', requires_grad=True)
269
+ optimizer = dict(
270
+ type='AdamW',
271
+ lr=0.0001,
272
+ weight_decay=0.0001,
273
+ paramwise_cfg=dict(
274
+ custom_keys=dict(
275
+ backbone=dict(lr_mult=0.1, decay_mult=1.0), lr_mult=0.1)))
276
+ optimizer_config = dict(grad_clip=dict(max_norm=0.1, norm_type=2))
277
+ lr_config = dict(policy='step', step=[40])
278
+ runner = dict(type='EpochBasedRunner', max_epochs=50)
279
+ work_dir = 'work_dirs/finetune_detr_50e_coco_lr-mult-0.1_selfsup-clusters-as-classes_add-contrastive-temp0.5-weight1.0'
280
+ auto_resume = False
281
+ gpu_ids = range(0, 8)
finetune/finetune_faster-rcnn_12k_coco/20221022_163550.log ADDED
The diff for this file is too large to render. See raw diff
 
finetune/finetune_faster-rcnn_12k_coco/20221022_163550.log.json ADDED
@@ -0,0 +1,242 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {"env_info": "sys.platform: linux\nPython: 3.7.3 (default, Jan 22 2021, 20:04:44) [GCC 8.3.0]\nCUDA available: True\nGPU 0,1,2,3,4,5,6,7: A100-SXM-80GB\nCUDA_HOME: /usr/local/cuda\nNVCC: Cuda compilation tools, release 11.3, V11.3.109\nGCC: x86_64-linux-gnu-gcc (Debian 8.3.0-6) 8.3.0\nPyTorch: 1.10.0\nPyTorch compiling details: PyTorch built with:\n - GCC 7.3\n - C++ Version: 201402\n - Intel(R) Math Kernel Library Version 2020.0.0 Product Build 20191122 for Intel(R) 64 architecture applications\n - Intel(R) MKL-DNN v2.2.3 (Git Hash 7336ca9f055cf1bfa13efb658fe15dc9b41f0740)\n - OpenMP 201511 (a.k.a. OpenMP 4.5)\n - LAPACK is enabled (usually provided by MKL)\n - NNPACK is enabled\n - CPU capability usage: AVX512\n - CUDA Runtime 11.3\n - NVCC architecture flags: -gencode;arch=compute_70,code=sm_70;-gencode;arch=compute_75,code=sm_75;-gencode;arch=compute_80,code=sm_80;-gencode;arch=compute_86,code=sm_86\n - CuDNN 8.2\n - Magma 2.5.2\n - Build settings: BLAS_INFO=mkl, BUILD_TYPE=Release, CUDA_VERSION=11.3, CUDNN_VERSION=8.2.0, CXX_COMPILER=/opt/rh/devtoolset-7/root/usr/bin/c++, CXX_FLAGS= -Wno-deprecated -fvisibility-inlines-hidden -DUSE_PTHREADPOOL -fopenmp -DNDEBUG -DUSE_KINETO -DUSE_FBGEMM -DUSE_QNNPACK -DUSE_PYTORCH_QNNPACK -DUSE_XNNPACK -DSYMBOLICATE_MOBILE_DEBUG_HANDLE -DEDGE_PROFILER_USE_KINETO -O2 -fPIC -Wno-narrowing -Wall -Wextra -Werror=return-type -Wno-missing-field-initializers -Wno-type-limits -Wno-array-bounds -Wno-unknown-pragmas -Wno-sign-compare -Wno-unused-parameter -Wno-unused-variable -Wno-unused-function -Wno-unused-result -Wno-unused-local-typedefs -Wno-strict-overflow -Wno-strict-aliasing -Wno-error=deprecated-declarations -Wno-stringop-overflow -Wno-psabi -Wno-error=pedantic -Wno-error=redundant-decls -Wno-error=old-style-cast -fdiagnostics-color=always -faligned-new -Wno-unused-but-set-variable -Wno-maybe-uninitialized -fno-math-errno -fno-trapping-math -Werror=format -Wno-stringop-overflow, LAPACK_INFO=mkl, PERF_WITH_AVX=1, PERF_WITH_AVX2=1, PERF_WITH_AVX512=1, TORCH_VERSION=1.10.0, USE_CUDA=ON, USE_CUDNN=ON, USE_EXCEPTION_PTR=1, USE_GFLAGS=OFF, USE_GLOG=OFF, USE_MKL=ON, USE_MKLDNN=ON, USE_MPI=OFF, USE_NCCL=ON, USE_NNPACK=ON, USE_OPENMP=ON, \n\nTorchVision: 0.11.1+cu113\nOpenCV: 4.6.0\nMMCV: 1.6.1\nMMCV Compiler: GCC 9.3\nMMCV CUDA Compiler: 11.3\nMMDetection: 2.25.2+a7ef785", "config": "model = dict(\n type='MaskRCNN',\n backbone=dict(\n type='ResNet',\n depth=50,\n num_stages=4,\n out_indices=(0, 1, 2, 3),\n frozen_stages=1,\n norm_cfg=dict(type='SyncBN', requires_grad=True),\n norm_eval=True,\n style='pytorch',\n init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),\n neck=dict(\n type='FPN',\n in_channels=[256, 512, 1024, 2048],\n out_channels=256,\n num_outs=5,\n norm_cfg=dict(type='SyncBN', requires_grad=True)),\n rpn_head=dict(\n type='RPNHead',\n in_channels=256,\n feat_channels=256,\n anchor_generator=dict(\n type='AnchorGenerator',\n scales=[8],\n ratios=[0.5, 1.0, 2.0],\n strides=[4, 8, 16, 32, 64]),\n bbox_coder=dict(\n type='DeltaXYWHBBoxCoder',\n target_means=[0.0, 0.0, 0.0, 0.0],\n target_stds=[1.0, 1.0, 1.0, 1.0]),\n loss_cls=dict(\n type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),\n loss_bbox=dict(type='L1Loss', loss_weight=1.0)),\n roi_head=dict(\n type='StandardRoIHead',\n bbox_roi_extractor=dict(\n type='SingleRoIExtractor',\n roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0),\n out_channels=256,\n featmap_strides=[4, 8, 16, 32]),\n bbox_head=dict(\n type='Shared4Conv1FCBBoxHead',\n in_channels=256,\n fc_out_channels=1024,\n roi_feat_size=7,\n num_classes=80,\n bbox_coder=dict(\n type='DeltaXYWHBBoxCoder',\n target_means=[0.0, 0.0, 0.0, 0.0],\n target_stds=[0.1, 0.1, 0.2, 0.2]),\n reg_class_agnostic=False,\n loss_cls=dict(\n type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),\n loss_bbox=dict(type='L1Loss', loss_weight=1.0)),\n mask_roi_extractor=None,\n mask_head=None),\n train_cfg=dict(\n rpn=dict(\n assigner=dict(\n type='MaxIoUAssigner',\n pos_iou_thr=0.7,\n neg_iou_thr=0.3,\n min_pos_iou=0.3,\n match_low_quality=True,\n ignore_iof_thr=-1),\n sampler=dict(\n type='RandomSampler',\n num=256,\n pos_fraction=0.5,\n neg_pos_ub=-1,\n add_gt_as_proposals=False),\n allowed_border=-1,\n pos_weight=-1,\n debug=False),\n rpn_proposal=dict(\n nms_pre=2000,\n max_per_img=1000,\n nms=dict(type='nms', iou_threshold=0.7),\n min_bbox_size=0),\n rcnn=dict(\n assigner=dict(\n type='MaxIoUAssigner',\n pos_iou_thr=0.5,\n neg_iou_thr=0.5,\n min_pos_iou=0.5,\n match_low_quality=True,\n ignore_iof_thr=-1),\n sampler=dict(\n type='RandomSampler',\n num=512,\n pos_fraction=0.25,\n neg_pos_ub=-1,\n add_gt_as_proposals=True),\n mask_size=28,\n pos_weight=-1,\n debug=False)),\n test_cfg=dict(\n rpn=dict(\n nms_pre=1000,\n max_per_img=1000,\n nms=dict(type='nms', iou_threshold=0.7),\n min_bbox_size=0),\n rcnn=dict(\n score_thr=0.05,\n nms=dict(type='nms', iou_threshold=0.5),\n max_per_img=100,\n mask_thr_binary=0.5)))\ndataset_type = 'CocoDataset'\ndata_root = 'data/coco/'\nimg_norm_cfg = dict(\n mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)\ntrain_pipeline = [\n dict(type='LoadImageFromFile'),\n dict(type='LoadAnnotations', with_bbox=True),\n dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),\n dict(type='RandomFlip', flip_ratio=0.5),\n dict(\n type='Normalize',\n mean=[123.675, 116.28, 103.53],\n std=[58.395, 57.12, 57.375],\n to_rgb=True),\n dict(type='Pad', size_divisor=32),\n dict(type='DefaultFormatBundle'),\n dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])\n]\ntest_pipeline = [\n dict(type='LoadImageFromFile'),\n dict(\n type='MultiScaleFlipAug',\n img_scale=(1333, 800),\n flip=False,\n transforms=[\n dict(type='Resize', keep_ratio=True),\n dict(type='RandomFlip'),\n dict(\n type='Normalize',\n mean=[123.675, 116.28, 103.53],\n std=[58.395, 57.12, 57.375],\n to_rgb=True),\n dict(type='Pad', size_divisor=32),\n dict(type='ImageToTensor', keys=['img']),\n dict(type='Collect', keys=['img'])\n ])\n]\ndata = dict(\n samples_per_gpu=2,\n workers_per_gpu=2,\n train=dict(\n type='CocoDataset',\n ann_file='data/coco/annotations/instances_train2017.json',\n img_prefix='data/coco/train2017/',\n pipeline=[\n dict(type='LoadImageFromFile'),\n dict(type='LoadAnnotations', with_bbox=True),\n dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),\n dict(type='RandomFlip', flip_ratio=0.5),\n dict(\n type='Normalize',\n mean=[123.675, 116.28, 103.53],\n std=[58.395, 57.12, 57.375],\n to_rgb=True),\n dict(type='Pad', size_divisor=32),\n dict(type='DefaultFormatBundle'),\n dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])\n ]),\n val=dict(\n type='CocoDataset',\n ann_file='data/coco/annotations/instances_val2017.json',\n img_prefix='data/coco/val2017/',\n pipeline=[\n dict(type='LoadImageFromFile'),\n dict(\n type='MultiScaleFlipAug',\n img_scale=(1333, 800),\n flip=False,\n transforms=[\n dict(type='Resize', keep_ratio=True),\n dict(type='RandomFlip'),\n dict(\n type='Normalize',\n mean=[123.675, 116.28, 103.53],\n std=[58.395, 57.12, 57.375],\n to_rgb=True),\n dict(type='Pad', size_divisor=32),\n dict(type='ImageToTensor', keys=['img']),\n dict(type='Collect', keys=['img'])\n ])\n ]),\n test=dict(\n type='CocoDataset',\n ann_file='data/coco/annotations/instances_val2017.json',\n img_prefix='data/coco/val2017/',\n pipeline=[\n dict(type='LoadImageFromFile'),\n dict(\n type='MultiScaleFlipAug',\n img_scale=(1333, 800),\n flip=False,\n transforms=[\n dict(type='Resize', keep_ratio=True),\n dict(type='RandomFlip'),\n dict(\n type='Normalize',\n mean=[123.675, 116.28, 103.53],\n std=[58.395, 57.12, 57.375],\n to_rgb=True),\n dict(type='Pad', size_divisor=32),\n dict(type='ImageToTensor', keys=['img']),\n dict(type='Collect', keys=['img'])\n ])\n ]))\nevaluation = dict(\n interval=12000, metric='bbox', save_best='auto', gpu_collect=True)\noptimizer = dict(type='SGD', lr=0.03, momentum=0.9, weight_decay=5e-05)\noptimizer_config = dict(grad_clip=None)\nlr_config = dict(\n policy='step',\n warmup='linear',\n warmup_iters=500,\n warmup_ratio=0.001,\n step=[9000, 11000],\n by_epoch=False)\nrunner = dict(type='IterBasedRunner', max_iters=12000)\ncheckpoint_config = dict(interval=12000)\nlog_config = dict(interval=50, hooks=[dict(type='TextLoggerHook')])\ncustom_hooks = [\n dict(type='NumClassCheckHook'),\n dict(\n type='MMDetWandbHook',\n init_kwargs=dict(project='I2B', group='semi-coco'),\n interval=50,\n num_eval_images=0,\n log_checkpoint=False)\n]\ndist_params = dict(backend='nccl')\nlog_level = 'INFO'\nload_from = 'pretrain/selfsup_mask-rcnn_mstrain-soft-teacher_sampler-4096_temp0.5/final_model.pth'\nresume_from = None\nworkflow = [('train', 1)]\nopencv_num_threads = 0\nmp_start_method = 'fork'\nauto_scale_lr = dict(enable=False, base_batch_size=16)\ncustom_imports = None\nnorm_cfg = dict(type='SyncBN', requires_grad=True)\nwork_dir = 'work_dirs/finetune_faster-rcnn_12k_coco'\nauto_resume = False\ngpu_ids = range(0, 8)\n", "seed": 42, "exp_name": "faster_rcnn_fpn_12k_semi-coco.py", "hook_msgs": {}}
2
+ {"mode": "train", "epoch": 1, "iter": 50, "lr": 0.00297, "memory": 4029, "data_time": 0.00871, "loss_rpn_cls": 0.5004, "loss_rpn_bbox": 0.10321, "loss_cls": 1.45083, "acc": 83.72046, "loss_bbox": 0.04998, "loss": 2.10442, "time": 0.13065}
3
+ {"mode": "train", "epoch": 1, "iter": 100, "lr": 0.00596, "memory": 4062, "data_time": 0.00742, "loss_rpn_cls": 0.22724, "loss_rpn_bbox": 0.0963, "loss_cls": 0.47765, "acc": 93.14209, "loss_bbox": 0.23758, "loss": 1.03878, "time": 0.12515}
4
+ {"mode": "train", "epoch": 1, "iter": 150, "lr": 0.00896, "memory": 4062, "data_time": 0.00716, "loss_rpn_cls": 0.15045, "loss_rpn_bbox": 0.08741, "loss_cls": 0.49875, "acc": 91.99438, "loss_bbox": 0.28502, "loss": 1.02164, "time": 0.14318}
5
+ {"mode": "train", "epoch": 1, "iter": 200, "lr": 0.01196, "memory": 4062, "data_time": 0.00739, "loss_rpn_cls": 0.09823, "loss_rpn_bbox": 0.08639, "loss_cls": 0.58495, "acc": 89.73608, "loss_bbox": 0.37037, "loss": 1.13995, "time": 0.12398}
6
+ {"mode": "train", "epoch": 1, "iter": 250, "lr": 0.01496, "memory": 4062, "data_time": 0.00718, "loss_rpn_cls": 0.09426, "loss_rpn_bbox": 0.08374, "loss_cls": 0.5577, "acc": 89.79077, "loss_bbox": 0.3555, "loss": 1.0912, "time": 0.12554}
7
+ {"mode": "train", "epoch": 1, "iter": 300, "lr": 0.01795, "memory": 4062, "data_time": 0.00719, "loss_rpn_cls": 0.08925, "loss_rpn_bbox": 0.08519, "loss_cls": 0.55754, "acc": 89.90454, "loss_bbox": 0.3238, "loss": 1.05578, "time": 0.12821}
8
+ {"mode": "train", "epoch": 1, "iter": 350, "lr": 0.02095, "memory": 4062, "data_time": 0.00695, "loss_rpn_cls": 0.08902, "loss_rpn_bbox": 0.08342, "loss_cls": 0.52477, "acc": 90.13989, "loss_bbox": 0.30322, "loss": 1.00043, "time": 0.12548}
9
+ {"mode": "train", "epoch": 1, "iter": 400, "lr": 0.02395, "memory": 4062, "data_time": 0.00707, "loss_rpn_cls": 0.08871, "loss_rpn_bbox": 0.08562, "loss_cls": 0.50535, "acc": 89.89819, "loss_bbox": 0.30567, "loss": 0.98535, "time": 0.12565}
10
+ {"mode": "train", "epoch": 1, "iter": 450, "lr": 0.02694, "memory": 4062, "data_time": 0.0071, "loss_rpn_cls": 0.0788, "loss_rpn_bbox": 0.07577, "loss_cls": 0.46605, "acc": 90.59253, "loss_bbox": 0.28171, "loss": 0.90233, "time": 0.12647}
11
+ {"mode": "train", "epoch": 1, "iter": 500, "lr": 0.02994, "memory": 4062, "data_time": 0.00728, "loss_rpn_cls": 0.08262, "loss_rpn_bbox": 0.08365, "loss_cls": 0.46881, "acc": 90.15234, "loss_bbox": 0.29078, "loss": 0.92586, "time": 0.12555}
12
+ {"mode": "train", "epoch": 1, "iter": 550, "lr": 0.03, "memory": 4062, "data_time": 0.00691, "loss_rpn_cls": 0.07897, "loss_rpn_bbox": 0.07984, "loss_cls": 0.47414, "acc": 89.82837, "loss_bbox": 0.29201, "loss": 0.92497, "time": 0.12599}
13
+ {"mode": "train", "epoch": 1, "iter": 600, "lr": 0.03, "memory": 4062, "data_time": 0.00735, "loss_rpn_cls": 0.08161, "loss_rpn_bbox": 0.07705, "loss_cls": 0.43686, "acc": 90.50366, "loss_bbox": 0.27477, "loss": 0.87029, "time": 0.12599}
14
+ {"mode": "train", "epoch": 1, "iter": 650, "lr": 0.03, "memory": 4062, "data_time": 0.007, "loss_rpn_cls": 0.0801, "loss_rpn_bbox": 0.08214, "loss_cls": 0.46587, "acc": 89.79932, "loss_bbox": 0.28519, "loss": 0.91331, "time": 0.12464}
15
+ {"mode": "train", "epoch": 1, "iter": 700, "lr": 0.03, "memory": 4062, "data_time": 0.00712, "loss_rpn_cls": 0.08007, "loss_rpn_bbox": 0.07933, "loss_cls": 0.42723, "acc": 90.28564, "loss_bbox": 0.28151, "loss": 0.86815, "time": 0.12475}
16
+ {"mode": "train", "epoch": 1, "iter": 750, "lr": 0.03, "memory": 4062, "data_time": 0.00714, "loss_rpn_cls": 0.07753, "loss_rpn_bbox": 0.07738, "loss_cls": 0.42494, "acc": 90.28223, "loss_bbox": 0.28204, "loss": 0.86189, "time": 0.1241}
17
+ {"mode": "train", "epoch": 1, "iter": 800, "lr": 0.03, "memory": 4062, "data_time": 0.00713, "loss_rpn_cls": 0.07466, "loss_rpn_bbox": 0.0783, "loss_cls": 0.41823, "acc": 90.23804, "loss_bbox": 0.28505, "loss": 0.85623, "time": 0.12462}
18
+ {"mode": "train", "epoch": 1, "iter": 850, "lr": 0.03, "memory": 4062, "data_time": 0.00684, "loss_rpn_cls": 0.0773, "loss_rpn_bbox": 0.07389, "loss_cls": 0.40628, "acc": 90.45337, "loss_bbox": 0.27241, "loss": 0.82988, "time": 0.12465}
19
+ {"mode": "train", "epoch": 1, "iter": 900, "lr": 0.03, "memory": 4062, "data_time": 0.0071, "loss_rpn_cls": 0.0739, "loss_rpn_bbox": 0.07948, "loss_cls": 0.40342, "acc": 90.31104, "loss_bbox": 0.27638, "loss": 0.83318, "time": 0.12678}
20
+ {"mode": "train", "epoch": 1, "iter": 950, "lr": 0.03, "memory": 4062, "data_time": 0.00672, "loss_rpn_cls": 0.07605, "loss_rpn_bbox": 0.0801, "loss_cls": 0.40499, "acc": 90.37134, "loss_bbox": 0.2765, "loss": 0.83765, "time": 0.12749}
21
+ {"mode": "train", "epoch": 1, "iter": 1000, "lr": 0.03, "memory": 4062, "data_time": 0.0075, "loss_rpn_cls": 0.0721, "loss_rpn_bbox": 0.07509, "loss_cls": 0.39642, "acc": 90.32324, "loss_bbox": 0.27627, "loss": 0.81989, "time": 0.12939}
22
+ {"mode": "train", "epoch": 1, "iter": 1050, "lr": 0.03, "memory": 4062, "data_time": 0.00713, "loss_rpn_cls": 0.07725, "loss_rpn_bbox": 0.07399, "loss_cls": 0.39299, "acc": 90.41211, "loss_bbox": 0.27577, "loss": 0.82, "time": 0.12668}
23
+ {"mode": "train", "epoch": 1, "iter": 1100, "lr": 0.03, "memory": 4062, "data_time": 0.00728, "loss_rpn_cls": 0.07213, "loss_rpn_bbox": 0.07631, "loss_cls": 0.40113, "acc": 90.19849, "loss_bbox": 0.28057, "loss": 0.83014, "time": 0.12435}
24
+ {"mode": "train", "epoch": 1, "iter": 1150, "lr": 0.03, "memory": 4062, "data_time": 0.00728, "loss_rpn_cls": 0.07204, "loss_rpn_bbox": 0.07066, "loss_cls": 0.3779, "acc": 90.53369, "loss_bbox": 0.27757, "loss": 0.79818, "time": 0.12447}
25
+ {"mode": "train", "epoch": 1, "iter": 1200, "lr": 0.03, "memory": 4062, "data_time": 0.00712, "loss_rpn_cls": 0.07314, "loss_rpn_bbox": 0.0754, "loss_cls": 0.37992, "acc": 90.42578, "loss_bbox": 0.26983, "loss": 0.79829, "time": 0.12701}
26
+ {"mode": "train", "epoch": 1, "iter": 1250, "lr": 0.03, "memory": 4062, "data_time": 0.00698, "loss_rpn_cls": 0.06889, "loss_rpn_bbox": 0.07831, "loss_cls": 0.37209, "acc": 90.4668, "loss_bbox": 0.27312, "loss": 0.79242, "time": 0.12628}
27
+ {"mode": "train", "epoch": 1, "iter": 1300, "lr": 0.03, "memory": 4062, "data_time": 0.00695, "loss_rpn_cls": 0.066, "loss_rpn_bbox": 0.07, "loss_cls": 0.37616, "acc": 90.38892, "loss_bbox": 0.27619, "loss": 0.78835, "time": 0.12492}
28
+ {"mode": "train", "epoch": 1, "iter": 1350, "lr": 0.03, "memory": 4062, "data_time": 0.00674, "loss_rpn_cls": 0.07232, "loss_rpn_bbox": 0.07786, "loss_cls": 0.3755, "acc": 90.26514, "loss_bbox": 0.28396, "loss": 0.80964, "time": 0.12501}
29
+ {"mode": "train", "epoch": 1, "iter": 1400, "lr": 0.03, "memory": 4062, "data_time": 0.00682, "loss_rpn_cls": 0.07325, "loss_rpn_bbox": 0.07771, "loss_cls": 0.3831, "acc": 90.22974, "loss_bbox": 0.28444, "loss": 0.81849, "time": 0.1243}
30
+ {"mode": "train", "epoch": 1, "iter": 1450, "lr": 0.03, "memory": 4062, "data_time": 0.00669, "loss_rpn_cls": 0.07464, "loss_rpn_bbox": 0.07715, "loss_cls": 0.37253, "acc": 90.42407, "loss_bbox": 0.27741, "loss": 0.80173, "time": 0.12416}
31
+ {"mode": "train", "epoch": 1, "iter": 1500, "lr": 0.03, "memory": 4062, "data_time": 0.00658, "loss_rpn_cls": 0.07007, "loss_rpn_bbox": 0.07278, "loss_cls": 0.37162, "acc": 90.31934, "loss_bbox": 0.27955, "loss": 0.79402, "time": 0.12608}
32
+ {"mode": "train", "epoch": 1, "iter": 1550, "lr": 0.03, "memory": 4062, "data_time": 0.00681, "loss_rpn_cls": 0.0702, "loss_rpn_bbox": 0.07832, "loss_cls": 0.36688, "acc": 90.49585, "loss_bbox": 0.27146, "loss": 0.78686, "time": 0.12784}
33
+ {"mode": "train", "epoch": 1, "iter": 1600, "lr": 0.03, "memory": 4062, "data_time": 0.00682, "loss_rpn_cls": 0.06484, "loss_rpn_bbox": 0.06892, "loss_cls": 0.36144, "acc": 90.59204, "loss_bbox": 0.27358, "loss": 0.76878, "time": 0.12424}
34
+ {"mode": "train", "epoch": 1, "iter": 1650, "lr": 0.03, "memory": 4062, "data_time": 0.00657, "loss_rpn_cls": 0.06564, "loss_rpn_bbox": 0.07224, "loss_cls": 0.34756, "acc": 91.01074, "loss_bbox": 0.25937, "loss": 0.74482, "time": 0.12565}
35
+ {"mode": "train", "epoch": 1, "iter": 1700, "lr": 0.03, "memory": 4062, "data_time": 0.00668, "loss_rpn_cls": 0.06691, "loss_rpn_bbox": 0.07381, "loss_cls": 0.35868, "acc": 90.59082, "loss_bbox": 0.27199, "loss": 0.77139, "time": 0.12504}
36
+ {"mode": "train", "epoch": 1, "iter": 1750, "lr": 0.03, "memory": 4062, "data_time": 0.00673, "loss_rpn_cls": 0.06968, "loss_rpn_bbox": 0.0748, "loss_cls": 0.37488, "acc": 90.18408, "loss_bbox": 0.29048, "loss": 0.80985, "time": 0.12586}
37
+ {"mode": "train", "epoch": 1, "iter": 1800, "lr": 0.03, "memory": 4062, "data_time": 0.00686, "loss_rpn_cls": 0.06506, "loss_rpn_bbox": 0.07304, "loss_cls": 0.33605, "acc": 91.10498, "loss_bbox": 0.25741, "loss": 0.73156, "time": 0.12818}
38
+ {"mode": "train", "epoch": 1, "iter": 1850, "lr": 0.03, "memory": 4062, "data_time": 0.00671, "loss_rpn_cls": 0.06277, "loss_rpn_bbox": 0.0736, "loss_cls": 0.37249, "acc": 90.28662, "loss_bbox": 0.27966, "loss": 0.78852, "time": 0.12526}
39
+ {"mode": "train", "epoch": 1, "iter": 1900, "lr": 0.03, "memory": 4062, "data_time": 0.00685, "loss_rpn_cls": 0.07559, "loss_rpn_bbox": 0.07904, "loss_cls": 0.35365, "acc": 90.5332, "loss_bbox": 0.27409, "loss": 0.78237, "time": 0.12688}
40
+ {"mode": "train", "epoch": 1, "iter": 1950, "lr": 0.03, "memory": 4062, "data_time": 0.00713, "loss_rpn_cls": 0.06915, "loss_rpn_bbox": 0.07222, "loss_cls": 0.34881, "acc": 90.6167, "loss_bbox": 0.27289, "loss": 0.76306, "time": 0.1266}
41
+ {"mode": "train", "epoch": 1, "iter": 2000, "lr": 0.03, "memory": 4062, "data_time": 0.00676, "loss_rpn_cls": 0.0623, "loss_rpn_bbox": 0.06677, "loss_cls": 0.3421, "acc": 90.92041, "loss_bbox": 0.26263, "loss": 0.73381, "time": 0.12749}
42
+ {"mode": "train", "epoch": 1, "iter": 2050, "lr": 0.03, "memory": 4062, "data_time": 0.00683, "loss_rpn_cls": 0.06632, "loss_rpn_bbox": 0.07268, "loss_cls": 0.36045, "acc": 90.29077, "loss_bbox": 0.2793, "loss": 0.77876, "time": 0.12997}
43
+ {"mode": "train", "epoch": 1, "iter": 2100, "lr": 0.03, "memory": 4062, "data_time": 0.00657, "loss_rpn_cls": 0.06525, "loss_rpn_bbox": 0.07256, "loss_cls": 0.34527, "acc": 90.61768, "loss_bbox": 0.27263, "loss": 0.75571, "time": 0.12826}
44
+ {"mode": "train", "epoch": 1, "iter": 2150, "lr": 0.03, "memory": 4062, "data_time": 0.00653, "loss_rpn_cls": 0.06932, "loss_rpn_bbox": 0.07361, "loss_cls": 0.34899, "acc": 90.71045, "loss_bbox": 0.27236, "loss": 0.76428, "time": 0.12686}
45
+ {"mode": "train", "epoch": 1, "iter": 2200, "lr": 0.03, "memory": 4062, "data_time": 0.00706, "loss_rpn_cls": 0.06824, "loss_rpn_bbox": 0.07525, "loss_cls": 0.34571, "acc": 90.69385, "loss_bbox": 0.27371, "loss": 0.76292, "time": 0.12951}
46
+ {"mode": "train", "epoch": 1, "iter": 2250, "lr": 0.03, "memory": 4062, "data_time": 0.0065, "loss_rpn_cls": 0.07017, "loss_rpn_bbox": 0.0747, "loss_cls": 0.37328, "acc": 90.19409, "loss_bbox": 0.28449, "loss": 0.80264, "time": 0.12735}
47
+ {"mode": "train", "epoch": 1, "iter": 2300, "lr": 0.03, "memory": 4062, "data_time": 0.00661, "loss_rpn_cls": 0.06348, "loss_rpn_bbox": 0.07449, "loss_cls": 0.34822, "acc": 90.5188, "loss_bbox": 0.27295, "loss": 0.75914, "time": 0.12709}
48
+ {"mode": "train", "epoch": 1, "iter": 2350, "lr": 0.03, "memory": 4062, "data_time": 0.00646, "loss_rpn_cls": 0.06535, "loss_rpn_bbox": 0.07423, "loss_cls": 0.35386, "acc": 90.64966, "loss_bbox": 0.26963, "loss": 0.76307, "time": 0.1259}
49
+ {"mode": "train", "epoch": 1, "iter": 2400, "lr": 0.03, "memory": 4062, "data_time": 0.00725, "loss_rpn_cls": 0.06807, "loss_rpn_bbox": 0.07117, "loss_cls": 0.3351, "acc": 91.08179, "loss_bbox": 0.25987, "loss": 0.73421, "time": 0.12522}
50
+ {"mode": "train", "epoch": 1, "iter": 2450, "lr": 0.03, "memory": 4062, "data_time": 0.00753, "loss_rpn_cls": 0.06843, "loss_rpn_bbox": 0.07083, "loss_cls": 0.33747, "acc": 90.94238, "loss_bbox": 0.26478, "loss": 0.7415, "time": 0.12707}
51
+ {"mode": "train", "epoch": 1, "iter": 2500, "lr": 0.03, "memory": 4062, "data_time": 0.00675, "loss_rpn_cls": 0.06768, "loss_rpn_bbox": 0.0766, "loss_cls": 0.33334, "acc": 90.9873, "loss_bbox": 0.25825, "loss": 0.73588, "time": 0.12546}
52
+ {"mode": "train", "epoch": 1, "iter": 2550, "lr": 0.03, "memory": 4062, "data_time": 0.00663, "loss_rpn_cls": 0.06316, "loss_rpn_bbox": 0.06771, "loss_cls": 0.33599, "acc": 90.79321, "loss_bbox": 0.27048, "loss": 0.73735, "time": 0.12494}
53
+ {"mode": "train", "epoch": 1, "iter": 2600, "lr": 0.03, "memory": 4062, "data_time": 0.00658, "loss_rpn_cls": 0.0634, "loss_rpn_bbox": 0.07433, "loss_cls": 0.32644, "acc": 91.04395, "loss_bbox": 0.26424, "loss": 0.7284, "time": 0.12708}
54
+ {"mode": "train", "epoch": 1, "iter": 2650, "lr": 0.03, "memory": 4062, "data_time": 0.00662, "loss_rpn_cls": 0.06534, "loss_rpn_bbox": 0.06786, "loss_cls": 0.35178, "acc": 90.46973, "loss_bbox": 0.26932, "loss": 0.7543, "time": 0.12585}
55
+ {"mode": "train", "epoch": 1, "iter": 2700, "lr": 0.03, "memory": 4062, "data_time": 0.00665, "loss_rpn_cls": 0.06695, "loss_rpn_bbox": 0.07492, "loss_cls": 0.34685, "acc": 90.5918, "loss_bbox": 0.26943, "loss": 0.75815, "time": 0.12618}
56
+ {"mode": "train", "epoch": 1, "iter": 2750, "lr": 0.03, "memory": 4062, "data_time": 0.00661, "loss_rpn_cls": 0.06599, "loss_rpn_bbox": 0.07533, "loss_cls": 0.34692, "acc": 90.47437, "loss_bbox": 0.27989, "loss": 0.76813, "time": 0.12456}
57
+ {"mode": "train", "epoch": 1, "iter": 2800, "lr": 0.03, "memory": 4062, "data_time": 0.00686, "loss_rpn_cls": 0.0658, "loss_rpn_bbox": 0.07292, "loss_cls": 0.35659, "acc": 90.34326, "loss_bbox": 0.28087, "loss": 0.77618, "time": 0.12765}
58
+ {"mode": "train", "epoch": 1, "iter": 2850, "lr": 0.03, "memory": 4062, "data_time": 0.00671, "loss_rpn_cls": 0.06277, "loss_rpn_bbox": 0.07358, "loss_cls": 0.34387, "acc": 90.56543, "loss_bbox": 0.27608, "loss": 0.7563, "time": 0.12432}
59
+ {"mode": "train", "epoch": 1, "iter": 2900, "lr": 0.03, "memory": 4062, "data_time": 0.00673, "loss_rpn_cls": 0.06274, "loss_rpn_bbox": 0.07267, "loss_cls": 0.3341, "acc": 90.84717, "loss_bbox": 0.27158, "loss": 0.7411, "time": 0.12481}
60
+ {"mode": "train", "epoch": 1, "iter": 2950, "lr": 0.03, "memory": 4062, "data_time": 0.00664, "loss_rpn_cls": 0.0606, "loss_rpn_bbox": 0.06713, "loss_cls": 0.32467, "acc": 91.18286, "loss_bbox": 0.26504, "loss": 0.71744, "time": 0.12532}
61
+ {"mode": "train", "epoch": 1, "iter": 3000, "lr": 0.03, "memory": 4062, "data_time": 0.00668, "loss_rpn_cls": 0.06082, "loss_rpn_bbox": 0.06731, "loss_cls": 0.33558, "acc": 90.91602, "loss_bbox": 0.26199, "loss": 0.7257, "time": 0.12504}
62
+ {"mode": "train", "epoch": 1, "iter": 3050, "lr": 0.03, "memory": 4086, "data_time": 0.00677, "loss_rpn_cls": 0.05984, "loss_rpn_bbox": 0.07053, "loss_cls": 0.327, "acc": 91.1438, "loss_bbox": 0.26296, "loss": 0.72033, "time": 0.1262}
63
+ {"mode": "train", "epoch": 1, "iter": 3100, "lr": 0.03, "memory": 4086, "data_time": 0.0067, "loss_rpn_cls": 0.06018, "loss_rpn_bbox": 0.07134, "loss_cls": 0.33664, "acc": 90.68872, "loss_bbox": 0.27123, "loss": 0.7394, "time": 0.12524}
64
+ {"mode": "train", "epoch": 1, "iter": 3150, "lr": 0.03, "memory": 4086, "data_time": 0.00683, "loss_rpn_cls": 0.06002, "loss_rpn_bbox": 0.0672, "loss_cls": 0.30375, "acc": 91.68994, "loss_bbox": 0.24682, "loss": 0.67778, "time": 0.1273}
65
+ {"mode": "train", "epoch": 1, "iter": 3200, "lr": 0.03, "memory": 4086, "data_time": 0.00661, "loss_rpn_cls": 0.06486, "loss_rpn_bbox": 0.07448, "loss_cls": 0.34244, "acc": 90.60327, "loss_bbox": 0.27034, "loss": 0.75212, "time": 0.12516}
66
+ {"mode": "train", "epoch": 1, "iter": 3250, "lr": 0.03, "memory": 4086, "data_time": 0.00693, "loss_rpn_cls": 0.06416, "loss_rpn_bbox": 0.06845, "loss_cls": 0.33274, "acc": 90.86792, "loss_bbox": 0.25985, "loss": 0.72521, "time": 0.12506}
67
+ {"mode": "train", "epoch": 1, "iter": 3300, "lr": 0.03, "memory": 4086, "data_time": 0.0065, "loss_rpn_cls": 0.06129, "loss_rpn_bbox": 0.07204, "loss_cls": 0.33978, "acc": 90.77832, "loss_bbox": 0.27167, "loss": 0.74478, "time": 0.12623}
68
+ {"mode": "train", "epoch": 1, "iter": 3350, "lr": 0.03, "memory": 4086, "data_time": 0.00675, "loss_rpn_cls": 0.05902, "loss_rpn_bbox": 0.06627, "loss_cls": 0.32886, "acc": 90.90845, "loss_bbox": 0.26446, "loss": 0.71861, "time": 0.12569}
69
+ {"mode": "train", "epoch": 1, "iter": 3400, "lr": 0.03, "memory": 4086, "data_time": 0.00696, "loss_rpn_cls": 0.06027, "loss_rpn_bbox": 0.06938, "loss_cls": 0.32262, "acc": 90.98169, "loss_bbox": 0.26169, "loss": 0.71395, "time": 0.12836}
70
+ {"mode": "train", "epoch": 1, "iter": 3450, "lr": 0.03, "memory": 4086, "data_time": 0.0068, "loss_rpn_cls": 0.06474, "loss_rpn_bbox": 0.07061, "loss_cls": 0.3313, "acc": 90.86475, "loss_bbox": 0.262, "loss": 0.72865, "time": 0.12479}
71
+ {"mode": "train", "epoch": 1, "iter": 3500, "lr": 0.03, "memory": 4086, "data_time": 0.00677, "loss_rpn_cls": 0.06194, "loss_rpn_bbox": 0.0668, "loss_cls": 0.31316, "acc": 91.26514, "loss_bbox": 0.2539, "loss": 0.6958, "time": 0.1258}
72
+ {"mode": "train", "epoch": 1, "iter": 3550, "lr": 0.03, "memory": 4086, "data_time": 0.00692, "loss_rpn_cls": 0.06058, "loss_rpn_bbox": 0.06626, "loss_cls": 0.3244, "acc": 90.92236, "loss_bbox": 0.26444, "loss": 0.71566, "time": 0.12553}
73
+ {"mode": "train", "epoch": 1, "iter": 3600, "lr": 0.03, "memory": 4086, "data_time": 0.0068, "loss_rpn_cls": 0.05856, "loss_rpn_bbox": 0.06909, "loss_cls": 0.33541, "acc": 90.69849, "loss_bbox": 0.27089, "loss": 0.73394, "time": 0.12801}
74
+ {"mode": "train", "epoch": 1, "iter": 3650, "lr": 0.03, "memory": 4086, "data_time": 0.00676, "loss_rpn_cls": 0.05899, "loss_rpn_bbox": 0.07097, "loss_cls": 0.322, "acc": 90.87402, "loss_bbox": 0.27124, "loss": 0.7232, "time": 0.1242}
75
+ {"mode": "train", "epoch": 1, "iter": 3700, "lr": 0.03, "memory": 4086, "data_time": 0.00764, "loss_rpn_cls": 0.05514, "loss_rpn_bbox": 0.07033, "loss_cls": 0.33212, "acc": 90.65552, "loss_bbox": 0.27372, "loss": 0.7313, "time": 0.12782}
76
+ {"mode": "train", "epoch": 1, "iter": 3750, "lr": 0.03, "memory": 4086, "data_time": 0.00737, "loss_rpn_cls": 0.05767, "loss_rpn_bbox": 0.06985, "loss_cls": 0.33779, "acc": 90.45264, "loss_bbox": 0.28273, "loss": 0.74804, "time": 0.12553}
77
+ {"mode": "train", "epoch": 1, "iter": 3800, "lr": 0.03, "memory": 4086, "data_time": 0.00678, "loss_rpn_cls": 0.06451, "loss_rpn_bbox": 0.07105, "loss_cls": 0.32126, "acc": 91.13184, "loss_bbox": 0.26509, "loss": 0.72191, "time": 0.12514}
78
+ {"mode": "train", "epoch": 1, "iter": 3850, "lr": 0.03, "memory": 4086, "data_time": 0.00699, "loss_rpn_cls": 0.05838, "loss_rpn_bbox": 0.06936, "loss_cls": 0.32492, "acc": 90.90625, "loss_bbox": 0.26549, "loss": 0.71816, "time": 0.12444}
79
+ {"mode": "train", "epoch": 1, "iter": 3900, "lr": 0.03, "memory": 4086, "data_time": 0.00679, "loss_rpn_cls": 0.06453, "loss_rpn_bbox": 0.07083, "loss_cls": 0.32241, "acc": 91.0769, "loss_bbox": 0.25833, "loss": 0.7161, "time": 0.12471}
80
+ {"mode": "train", "epoch": 1, "iter": 3950, "lr": 0.03, "memory": 4086, "data_time": 0.00684, "loss_rpn_cls": 0.06075, "loss_rpn_bbox": 0.06807, "loss_cls": 0.31327, "acc": 91.10498, "loss_bbox": 0.25341, "loss": 0.69551, "time": 0.12402}
81
+ {"mode": "train", "epoch": 1, "iter": 4000, "lr": 0.03, "memory": 4086, "data_time": 0.0069, "loss_rpn_cls": 0.0565, "loss_rpn_bbox": 0.06347, "loss_cls": 0.3149, "acc": 91.23999, "loss_bbox": 0.25264, "loss": 0.6875, "time": 0.12845}
82
+ {"mode": "train", "epoch": 1, "iter": 4050, "lr": 0.03, "memory": 4086, "data_time": 0.00654, "loss_rpn_cls": 0.05967, "loss_rpn_bbox": 0.06791, "loss_cls": 0.33225, "acc": 90.71216, "loss_bbox": 0.2728, "loss": 0.73263, "time": 0.12427}
83
+ {"mode": "train", "epoch": 1, "iter": 4100, "lr": 0.03, "memory": 4086, "data_time": 0.00673, "loss_rpn_cls": 0.05939, "loss_rpn_bbox": 0.0678, "loss_cls": 0.31974, "acc": 90.94897, "loss_bbox": 0.26059, "loss": 0.70751, "time": 0.12367}
84
+ {"mode": "train", "epoch": 1, "iter": 4150, "lr": 0.03, "memory": 4086, "data_time": 0.00665, "loss_rpn_cls": 0.06051, "loss_rpn_bbox": 0.06896, "loss_cls": 0.33174, "acc": 90.77588, "loss_bbox": 0.26818, "loss": 0.7294, "time": 0.12599}
85
+ {"mode": "train", "epoch": 1, "iter": 4200, "lr": 0.03, "memory": 4086, "data_time": 0.00681, "loss_rpn_cls": 0.05951, "loss_rpn_bbox": 0.07003, "loss_cls": 0.32577, "acc": 90.70068, "loss_bbox": 0.26877, "loss": 0.72407, "time": 0.12467}
86
+ {"mode": "train", "epoch": 1, "iter": 4250, "lr": 0.03, "memory": 4086, "data_time": 0.00684, "loss_rpn_cls": 0.06395, "loss_rpn_bbox": 0.07107, "loss_cls": 0.32072, "acc": 90.84888, "loss_bbox": 0.26184, "loss": 0.71758, "time": 0.1255}
87
+ {"mode": "train", "epoch": 1, "iter": 4300, "lr": 0.03, "memory": 4086, "data_time": 0.00672, "loss_rpn_cls": 0.05981, "loss_rpn_bbox": 0.06813, "loss_cls": 0.32656, "acc": 90.90137, "loss_bbox": 0.26276, "loss": 0.71726, "time": 0.12582}
88
+ {"mode": "train", "epoch": 1, "iter": 4350, "lr": 0.03, "memory": 4086, "data_time": 0.00672, "loss_rpn_cls": 0.05902, "loss_rpn_bbox": 0.06686, "loss_cls": 0.32129, "acc": 90.89966, "loss_bbox": 0.26419, "loss": 0.71136, "time": 0.125}
89
+ {"mode": "train", "epoch": 1, "iter": 4400, "lr": 0.03, "memory": 4086, "data_time": 0.0068, "loss_rpn_cls": 0.06284, "loss_rpn_bbox": 0.07057, "loss_cls": 0.31236, "acc": 91.00659, "loss_bbox": 0.26386, "loss": 0.70963, "time": 0.12676}
90
+ {"mode": "train", "epoch": 1, "iter": 4450, "lr": 0.03, "memory": 4086, "data_time": 0.00682, "loss_rpn_cls": 0.05206, "loss_rpn_bbox": 0.06306, "loss_cls": 0.29791, "acc": 91.72461, "loss_bbox": 0.24329, "loss": 0.65632, "time": 0.12467}
91
+ {"mode": "train", "epoch": 1, "iter": 4500, "lr": 0.03, "memory": 4086, "data_time": 0.00684, "loss_rpn_cls": 0.05524, "loss_rpn_bbox": 0.06607, "loss_cls": 0.31353, "acc": 90.97266, "loss_bbox": 0.26227, "loss": 0.69711, "time": 0.12402}
92
+ {"mode": "train", "epoch": 1, "iter": 4550, "lr": 0.03, "memory": 4086, "data_time": 0.00672, "loss_rpn_cls": 0.0602, "loss_rpn_bbox": 0.0714, "loss_cls": 0.31561, "acc": 91.1604, "loss_bbox": 0.25896, "loss": 0.70617, "time": 0.12794}
93
+ {"mode": "train", "epoch": 1, "iter": 4600, "lr": 0.03, "memory": 4086, "data_time": 0.00666, "loss_rpn_cls": 0.05515, "loss_rpn_bbox": 0.06715, "loss_cls": 0.31143, "acc": 91.15063, "loss_bbox": 0.26057, "loss": 0.69431, "time": 0.12675}
94
+ {"mode": "train", "epoch": 1, "iter": 4650, "lr": 0.03, "memory": 4086, "data_time": 0.00689, "loss_rpn_cls": 0.06124, "loss_rpn_bbox": 0.07377, "loss_cls": 0.33795, "acc": 90.32886, "loss_bbox": 0.27581, "loss": 0.74877, "time": 0.1247}
95
+ {"mode": "train", "epoch": 1, "iter": 4700, "lr": 0.03, "memory": 4086, "data_time": 0.00681, "loss_rpn_cls": 0.05758, "loss_rpn_bbox": 0.06837, "loss_cls": 0.32755, "acc": 90.74146, "loss_bbox": 0.27247, "loss": 0.72597, "time": 0.12509}
96
+ {"mode": "train", "epoch": 1, "iter": 4750, "lr": 0.03, "memory": 4086, "data_time": 0.00685, "loss_rpn_cls": 0.05889, "loss_rpn_bbox": 0.07008, "loss_cls": 0.3201, "acc": 90.86426, "loss_bbox": 0.26974, "loss": 0.71881, "time": 0.12459}
97
+ {"mode": "train", "epoch": 1, "iter": 4800, "lr": 0.03, "memory": 4086, "data_time": 0.0072, "loss_rpn_cls": 0.05589, "loss_rpn_bbox": 0.06586, "loss_cls": 0.3244, "acc": 90.78369, "loss_bbox": 0.26997, "loss": 0.71612, "time": 0.12588}
98
+ {"mode": "train", "epoch": 1, "iter": 4850, "lr": 0.03, "memory": 4086, "data_time": 0.00669, "loss_rpn_cls": 0.06411, "loss_rpn_bbox": 0.07069, "loss_cls": 0.31662, "acc": 91.18994, "loss_bbox": 0.25545, "loss": 0.70687, "time": 0.12352}
99
+ {"mode": "train", "epoch": 1, "iter": 4900, "lr": 0.03, "memory": 4086, "data_time": 0.00706, "loss_rpn_cls": 0.05308, "loss_rpn_bbox": 0.06561, "loss_cls": 0.30892, "acc": 91.2168, "loss_bbox": 0.25947, "loss": 0.68709, "time": 0.12865}
100
+ {"mode": "train", "epoch": 1, "iter": 4950, "lr": 0.03, "memory": 4086, "data_time": 0.00671, "loss_rpn_cls": 0.054, "loss_rpn_bbox": 0.06407, "loss_cls": 0.29765, "acc": 91.23926, "loss_bbox": 0.25225, "loss": 0.66798, "time": 0.12405}
101
+ {"mode": "train", "epoch": 1, "iter": 5000, "lr": 0.03, "memory": 4086, "data_time": 0.00683, "loss_rpn_cls": 0.06301, "loss_rpn_bbox": 0.06665, "loss_cls": 0.30958, "acc": 91.23633, "loss_bbox": 0.25877, "loss": 0.69801, "time": 0.12453}
102
+ {"mode": "train", "epoch": 1, "iter": 5050, "lr": 0.03, "memory": 4086, "data_time": 0.00681, "loss_rpn_cls": 0.05499, "loss_rpn_bbox": 0.06582, "loss_cls": 0.31523, "acc": 91.06592, "loss_bbox": 0.26504, "loss": 0.70108, "time": 0.1254}
103
+ {"mode": "train", "epoch": 1, "iter": 5100, "lr": 0.03, "memory": 4086, "data_time": 0.00679, "loss_rpn_cls": 0.05897, "loss_rpn_bbox": 0.06547, "loss_cls": 0.31699, "acc": 91.11768, "loss_bbox": 0.25777, "loss": 0.69921, "time": 0.12438}
104
+ {"mode": "train", "epoch": 1, "iter": 5150, "lr": 0.03, "memory": 4086, "data_time": 0.00675, "loss_rpn_cls": 0.06034, "loss_rpn_bbox": 0.06942, "loss_cls": 0.3189, "acc": 90.89404, "loss_bbox": 0.26596, "loss": 0.71462, "time": 0.12544}
105
+ {"mode": "train", "epoch": 1, "iter": 5200, "lr": 0.03, "memory": 4086, "data_time": 0.00678, "loss_rpn_cls": 0.05973, "loss_rpn_bbox": 0.06957, "loss_cls": 0.32406, "acc": 90.79395, "loss_bbox": 0.26579, "loss": 0.71915, "time": 0.12708}
106
+ {"mode": "train", "epoch": 1, "iter": 5250, "lr": 0.03, "memory": 4086, "data_time": 0.00678, "loss_rpn_cls": 0.05546, "loss_rpn_bbox": 0.06664, "loss_cls": 0.31332, "acc": 91.00635, "loss_bbox": 0.26214, "loss": 0.69757, "time": 0.12444}
107
+ {"mode": "train", "epoch": 1, "iter": 5300, "lr": 0.03, "memory": 4086, "data_time": 0.0071, "loss_rpn_cls": 0.05586, "loss_rpn_bbox": 0.06429, "loss_cls": 0.32748, "acc": 90.77295, "loss_bbox": 0.26838, "loss": 0.71601, "time": 0.1256}
108
+ {"mode": "train", "epoch": 1, "iter": 5350, "lr": 0.03, "memory": 4086, "data_time": 0.00676, "loss_rpn_cls": 0.05693, "loss_rpn_bbox": 0.06662, "loss_cls": 0.30918, "acc": 91.23096, "loss_bbox": 0.26069, "loss": 0.69342, "time": 0.12417}
109
+ {"mode": "train", "epoch": 1, "iter": 5400, "lr": 0.03, "memory": 4086, "data_time": 0.00719, "loss_rpn_cls": 0.05448, "loss_rpn_bbox": 0.0659, "loss_cls": 0.30277, "acc": 91.43945, "loss_bbox": 0.25534, "loss": 0.6785, "time": 0.12451}
110
+ {"mode": "train", "epoch": 1, "iter": 5450, "lr": 0.03, "memory": 4086, "data_time": 0.00689, "loss_rpn_cls": 0.05703, "loss_rpn_bbox": 0.06837, "loss_cls": 0.31374, "acc": 90.83301, "loss_bbox": 0.26939, "loss": 0.70853, "time": 0.12691}
111
+ {"mode": "train", "epoch": 1, "iter": 5500, "lr": 0.03, "memory": 4086, "data_time": 0.00701, "loss_rpn_cls": 0.05763, "loss_rpn_bbox": 0.06592, "loss_cls": 0.3206, "acc": 91.04468, "loss_bbox": 0.25829, "loss": 0.70245, "time": 0.12427}
112
+ {"mode": "train", "epoch": 1, "iter": 5550, "lr": 0.03, "memory": 4086, "data_time": 0.00722, "loss_rpn_cls": 0.0521, "loss_rpn_bbox": 0.06666, "loss_cls": 0.2992, "acc": 91.23291, "loss_bbox": 0.25475, "loss": 0.67271, "time": 0.12514}
113
+ {"mode": "train", "epoch": 1, "iter": 5600, "lr": 0.03, "memory": 4086, "data_time": 0.00733, "loss_rpn_cls": 0.05802, "loss_rpn_bbox": 0.06882, "loss_cls": 0.30546, "acc": 91.05908, "loss_bbox": 0.26377, "loss": 0.69607, "time": 0.12549}
114
+ {"mode": "train", "epoch": 1, "iter": 5650, "lr": 0.03, "memory": 4086, "data_time": 0.00704, "loss_rpn_cls": 0.05426, "loss_rpn_bbox": 0.06372, "loss_cls": 0.29833, "acc": 91.49805, "loss_bbox": 0.24935, "loss": 0.66567, "time": 0.12734}
115
+ {"mode": "train", "epoch": 1, "iter": 5700, "lr": 0.03, "memory": 4086, "data_time": 0.007, "loss_rpn_cls": 0.05702, "loss_rpn_bbox": 0.06853, "loss_cls": 0.3136, "acc": 90.95996, "loss_bbox": 0.26332, "loss": 0.70247, "time": 0.12443}
116
+ {"mode": "train", "epoch": 1, "iter": 5750, "lr": 0.03, "memory": 4086, "data_time": 0.00726, "loss_rpn_cls": 0.05463, "loss_rpn_bbox": 0.06913, "loss_cls": 0.30177, "acc": 91.18311, "loss_bbox": 0.26184, "loss": 0.68737, "time": 0.12488}
117
+ {"mode": "train", "epoch": 1, "iter": 5800, "lr": 0.03, "memory": 4086, "data_time": 0.00702, "loss_rpn_cls": 0.0526, "loss_rpn_bbox": 0.06444, "loss_cls": 0.29934, "acc": 91.43555, "loss_bbox": 0.25201, "loss": 0.66839, "time": 0.12399}
118
+ {"mode": "train", "epoch": 1, "iter": 5850, "lr": 0.03, "memory": 4086, "data_time": 0.00704, "loss_rpn_cls": 0.054, "loss_rpn_bbox": 0.06436, "loss_cls": 0.30679, "acc": 91.29663, "loss_bbox": 0.25091, "loss": 0.67606, "time": 0.12381}
119
+ {"mode": "train", "epoch": 1, "iter": 5900, "lr": 0.03, "memory": 4086, "data_time": 0.00735, "loss_rpn_cls": 0.0574, "loss_rpn_bbox": 0.06763, "loss_cls": 0.30626, "acc": 91.05591, "loss_bbox": 0.26261, "loss": 0.6939, "time": 0.12495}
120
+ {"mode": "train", "epoch": 1, "iter": 5950, "lr": 0.03, "memory": 4086, "data_time": 0.00705, "loss_rpn_cls": 0.0516, "loss_rpn_bbox": 0.06723, "loss_cls": 0.30595, "acc": 91.04834, "loss_bbox": 0.26553, "loss": 0.69031, "time": 0.12318}
121
+ {"mode": "train", "epoch": 1, "iter": 6000, "lr": 0.03, "memory": 4086, "data_time": 0.00711, "loss_rpn_cls": 0.05614, "loss_rpn_bbox": 0.06779, "loss_cls": 0.30867, "acc": 91.09888, "loss_bbox": 0.26623, "loss": 0.69883, "time": 0.12418}
122
+ {"mode": "train", "epoch": 1, "iter": 6050, "lr": 0.03, "memory": 4086, "data_time": 0.00693, "loss_rpn_cls": 0.06035, "loss_rpn_bbox": 0.06927, "loss_cls": 0.2975, "acc": 91.25024, "loss_bbox": 0.25844, "loss": 0.68556, "time": 0.12539}
123
+ {"mode": "train", "epoch": 1, "iter": 6100, "lr": 0.03, "memory": 4086, "data_time": 0.00741, "loss_rpn_cls": 0.05696, "loss_rpn_bbox": 0.07035, "loss_cls": 0.3063, "acc": 91.25903, "loss_bbox": 0.2551, "loss": 0.68871, "time": 0.12444}
124
+ {"mode": "train", "epoch": 1, "iter": 6150, "lr": 0.03, "memory": 4086, "data_time": 0.0072, "loss_rpn_cls": 0.05323, "loss_rpn_bbox": 0.06459, "loss_cls": 0.29194, "acc": 91.50952, "loss_bbox": 0.25127, "loss": 0.66103, "time": 0.12424}
125
+ {"mode": "train", "epoch": 1, "iter": 6200, "lr": 0.03, "memory": 4086, "data_time": 0.00734, "loss_rpn_cls": 0.05741, "loss_rpn_bbox": 0.06553, "loss_cls": 0.31313, "acc": 90.97876, "loss_bbox": 0.26346, "loss": 0.69953, "time": 0.12475}
126
+ {"mode": "train", "epoch": 1, "iter": 6250, "lr": 0.03, "memory": 4086, "data_time": 0.00705, "loss_rpn_cls": 0.05327, "loss_rpn_bbox": 0.06672, "loss_cls": 0.32483, "acc": 90.91504, "loss_bbox": 0.25686, "loss": 0.70168, "time": 0.12341}
127
+ {"mode": "train", "epoch": 1, "iter": 6300, "lr": 0.03, "memory": 4086, "data_time": 0.00729, "loss_rpn_cls": 0.06077, "loss_rpn_bbox": 0.07124, "loss_cls": 0.31039, "acc": 91.2854, "loss_bbox": 0.25382, "loss": 0.69622, "time": 0.12352}
128
+ {"mode": "train", "epoch": 1, "iter": 6350, "lr": 0.03, "memory": 4086, "data_time": 0.00705, "loss_rpn_cls": 0.05812, "loss_rpn_bbox": 0.06941, "loss_cls": 0.31361, "acc": 91.1394, "loss_bbox": 0.26471, "loss": 0.70584, "time": 0.1239}
129
+ {"mode": "train", "epoch": 1, "iter": 6400, "lr": 0.03, "memory": 4086, "data_time": 0.00694, "loss_rpn_cls": 0.05714, "loss_rpn_bbox": 0.06459, "loss_cls": 0.29279, "acc": 91.47095, "loss_bbox": 0.24783, "loss": 0.66234, "time": 0.12627}
130
+ {"mode": "train", "epoch": 1, "iter": 6450, "lr": 0.03, "memory": 4086, "data_time": 0.00703, "loss_rpn_cls": 0.05207, "loss_rpn_bbox": 0.06472, "loss_cls": 0.29167, "acc": 91.55029, "loss_bbox": 0.24714, "loss": 0.6556, "time": 0.12438}
131
+ {"mode": "train", "epoch": 1, "iter": 6500, "lr": 0.03, "memory": 4086, "data_time": 0.0075, "loss_rpn_cls": 0.05575, "loss_rpn_bbox": 0.06662, "loss_cls": 0.3016, "acc": 91.2002, "loss_bbox": 0.25921, "loss": 0.68318, "time": 0.12905}
132
+ {"mode": "train", "epoch": 1, "iter": 6550, "lr": 0.03, "memory": 4086, "data_time": 0.0072, "loss_rpn_cls": 0.05159, "loss_rpn_bbox": 0.06532, "loss_cls": 0.29235, "acc": 91.49438, "loss_bbox": 0.25428, "loss": 0.66353, "time": 0.12488}
133
+ {"mode": "train", "epoch": 1, "iter": 6600, "lr": 0.03, "memory": 4086, "data_time": 0.00706, "loss_rpn_cls": 0.05628, "loss_rpn_bbox": 0.06974, "loss_cls": 0.3055, "acc": 91.17163, "loss_bbox": 0.25924, "loss": 0.69076, "time": 0.12446}
134
+ {"mode": "train", "epoch": 1, "iter": 6650, "lr": 0.03, "memory": 4086, "data_time": 0.00743, "loss_rpn_cls": 0.05561, "loss_rpn_bbox": 0.06405, "loss_cls": 0.29218, "acc": 91.64258, "loss_bbox": 0.24828, "loss": 0.66012, "time": 0.1257}
135
+ {"mode": "train", "epoch": 1, "iter": 6700, "lr": 0.03, "memory": 4086, "data_time": 0.00722, "loss_rpn_cls": 0.05763, "loss_rpn_bbox": 0.07094, "loss_cls": 0.31711, "acc": 90.9021, "loss_bbox": 0.26362, "loss": 0.7093, "time": 0.12644}
136
+ {"mode": "train", "epoch": 1, "iter": 6750, "lr": 0.03, "memory": 4086, "data_time": 0.00698, "loss_rpn_cls": 0.06045, "loss_rpn_bbox": 0.06863, "loss_cls": 0.30003, "acc": 91.427, "loss_bbox": 0.25431, "loss": 0.68342, "time": 0.12437}
137
+ {"mode": "train", "epoch": 1, "iter": 6800, "lr": 0.03, "memory": 4086, "data_time": 0.00721, "loss_rpn_cls": 0.05398, "loss_rpn_bbox": 0.0677, "loss_cls": 0.30299, "acc": 91.10669, "loss_bbox": 0.26109, "loss": 0.68575, "time": 0.12416}
138
+ {"mode": "train", "epoch": 1, "iter": 6850, "lr": 0.03, "memory": 4086, "data_time": 0.00735, "loss_rpn_cls": 0.05411, "loss_rpn_bbox": 0.06493, "loss_cls": 0.31158, "acc": 91.03589, "loss_bbox": 0.2567, "loss": 0.68732, "time": 0.12388}
139
+ {"mode": "train", "epoch": 1, "iter": 6900, "lr": 0.03, "memory": 4086, "data_time": 0.00753, "loss_rpn_cls": 0.05278, "loss_rpn_bbox": 0.06708, "loss_cls": 0.30723, "acc": 91.15137, "loss_bbox": 0.25395, "loss": 0.68103, "time": 0.12898}
140
+ {"mode": "train", "epoch": 1, "iter": 6950, "lr": 0.03, "memory": 4086, "data_time": 0.00729, "loss_rpn_cls": 0.05577, "loss_rpn_bbox": 0.06376, "loss_cls": 0.319, "acc": 90.76709, "loss_bbox": 0.26778, "loss": 0.70631, "time": 0.1265}
141
+ {"mode": "train", "epoch": 1, "iter": 7000, "lr": 0.03, "memory": 4086, "data_time": 0.0074, "loss_rpn_cls": 0.05219, "loss_rpn_bbox": 0.06553, "loss_cls": 0.28613, "acc": 91.64966, "loss_bbox": 0.2539, "loss": 0.65774, "time": 0.12727}
142
+ {"mode": "train", "epoch": 1, "iter": 7050, "lr": 0.03, "memory": 4086, "data_time": 0.00711, "loss_rpn_cls": 0.05148, "loss_rpn_bbox": 0.06432, "loss_cls": 0.30067, "acc": 91.22412, "loss_bbox": 0.2549, "loss": 0.67136, "time": 0.12655}
143
+ {"mode": "train", "epoch": 1, "iter": 7100, "lr": 0.03, "memory": 4086, "data_time": 0.00694, "loss_rpn_cls": 0.04948, "loss_rpn_bbox": 0.06396, "loss_cls": 0.29618, "acc": 91.22046, "loss_bbox": 0.25754, "loss": 0.66716, "time": 0.12469}
144
+ {"mode": "train", "epoch": 1, "iter": 7150, "lr": 0.03, "memory": 4086, "data_time": 0.00726, "loss_rpn_cls": 0.05911, "loss_rpn_bbox": 0.07119, "loss_cls": 0.30194, "acc": 90.98804, "loss_bbox": 0.26571, "loss": 0.69795, "time": 0.12482}
145
+ {"mode": "train", "epoch": 1, "iter": 7200, "lr": 0.03, "memory": 4086, "data_time": 0.00742, "loss_rpn_cls": 0.05974, "loss_rpn_bbox": 0.06453, "loss_cls": 0.29391, "acc": 91.45825, "loss_bbox": 0.24681, "loss": 0.665, "time": 0.12715}
146
+ {"mode": "train", "epoch": 1, "iter": 7250, "lr": 0.03, "memory": 4086, "data_time": 0.00709, "loss_rpn_cls": 0.05218, "loss_rpn_bbox": 0.06274, "loss_cls": 0.30156, "acc": 91.25415, "loss_bbox": 0.25534, "loss": 0.67182, "time": 0.12886}
147
+ {"mode": "train", "epoch": 1, "iter": 7300, "lr": 0.03, "memory": 4086, "data_time": 0.00728, "loss_rpn_cls": 0.05002, "loss_rpn_bbox": 0.06541, "loss_cls": 0.29537, "acc": 91.43311, "loss_bbox": 0.25007, "loss": 0.66087, "time": 0.1262}
148
+ {"mode": "train", "epoch": 1, "iter": 7350, "lr": 0.03, "memory": 4086, "data_time": 0.0073, "loss_rpn_cls": 0.0562, "loss_rpn_bbox": 0.068, "loss_cls": 0.30547, "acc": 91.0459, "loss_bbox": 0.26852, "loss": 0.69819, "time": 0.12397}
149
+ {"mode": "train", "epoch": 1, "iter": 7400, "lr": 0.03, "memory": 4086, "data_time": 0.00691, "loss_rpn_cls": 0.05016, "loss_rpn_bbox": 0.07082, "loss_cls": 0.29821, "acc": 91.23169, "loss_bbox": 0.26462, "loss": 0.68382, "time": 0.12748}
150
+ {"mode": "train", "epoch": 1, "iter": 7450, "lr": 0.03, "memory": 4086, "data_time": 0.00698, "loss_rpn_cls": 0.05409, "loss_rpn_bbox": 0.0665, "loss_cls": 0.29602, "acc": 91.30591, "loss_bbox": 0.25652, "loss": 0.67314, "time": 0.12465}
151
+ {"mode": "train", "epoch": 1, "iter": 7500, "lr": 0.03, "memory": 4086, "data_time": 0.00705, "loss_rpn_cls": 0.05285, "loss_rpn_bbox": 0.06778, "loss_cls": 0.30454, "acc": 91.0625, "loss_bbox": 0.26398, "loss": 0.68915, "time": 0.12766}
152
+ {"mode": "train", "epoch": 1, "iter": 7550, "lr": 0.03, "memory": 4086, "data_time": 0.00749, "loss_rpn_cls": 0.05033, "loss_rpn_bbox": 0.06422, "loss_cls": 0.29043, "acc": 91.5271, "loss_bbox": 0.25179, "loss": 0.65677, "time": 0.12649}
153
+ {"mode": "train", "epoch": 1, "iter": 7600, "lr": 0.03, "memory": 4086, "data_time": 0.00713, "loss_rpn_cls": 0.05192, "loss_rpn_bbox": 0.06336, "loss_cls": 0.28757, "acc": 91.46924, "loss_bbox": 0.25509, "loss": 0.65794, "time": 0.12386}
154
+ {"mode": "train", "epoch": 1, "iter": 7650, "lr": 0.03, "memory": 4086, "data_time": 0.007, "loss_rpn_cls": 0.05101, "loss_rpn_bbox": 0.0657, "loss_cls": 0.29967, "acc": 91.16357, "loss_bbox": 0.26283, "loss": 0.67921, "time": 0.12374}
155
+ {"mode": "train", "epoch": 1, "iter": 7700, "lr": 0.03, "memory": 4086, "data_time": 0.00713, "loss_rpn_cls": 0.05242, "loss_rpn_bbox": 0.06822, "loss_cls": 0.29025, "acc": 91.38428, "loss_bbox": 0.25342, "loss": 0.6643, "time": 0.12493}
156
+ {"mode": "train", "epoch": 1, "iter": 7750, "lr": 0.03, "memory": 4086, "data_time": 0.00688, "loss_rpn_cls": 0.04998, "loss_rpn_bbox": 0.06341, "loss_cls": 0.28874, "acc": 91.53809, "loss_bbox": 0.2476, "loss": 0.64973, "time": 0.12379}
157
+ {"mode": "train", "epoch": 1, "iter": 7800, "lr": 0.03, "memory": 4086, "data_time": 0.00711, "loss_rpn_cls": 0.0515, "loss_rpn_bbox": 0.06637, "loss_cls": 0.2977, "acc": 91.21411, "loss_bbox": 0.26154, "loss": 0.6771, "time": 0.12575}
158
+ {"mode": "train", "epoch": 1, "iter": 7850, "lr": 0.03, "memory": 4086, "data_time": 0.00712, "loss_rpn_cls": 0.05081, "loss_rpn_bbox": 0.06234, "loss_cls": 0.29554, "acc": 91.4021, "loss_bbox": 0.25698, "loss": 0.66566, "time": 0.12674}
159
+ {"mode": "train", "epoch": 1, "iter": 7900, "lr": 0.03, "memory": 4086, "data_time": 0.00721, "loss_rpn_cls": 0.05303, "loss_rpn_bbox": 0.06708, "loss_cls": 0.30271, "acc": 91.13965, "loss_bbox": 0.26025, "loss": 0.68307, "time": 0.12584}
160
+ {"mode": "train", "epoch": 1, "iter": 7950, "lr": 0.03, "memory": 4086, "data_time": 0.00713, "loss_rpn_cls": 0.04923, "loss_rpn_bbox": 0.06471, "loss_cls": 0.28492, "acc": 91.65161, "loss_bbox": 0.24506, "loss": 0.64392, "time": 0.12424}
161
+ {"mode": "train", "epoch": 1, "iter": 8000, "lr": 0.03, "memory": 4086, "data_time": 0.00712, "loss_rpn_cls": 0.05166, "loss_rpn_bbox": 0.06261, "loss_cls": 0.29626, "acc": 91.24658, "loss_bbox": 0.25961, "loss": 0.67014, "time": 0.12461}
162
+ {"mode": "train", "epoch": 1, "iter": 8050, "lr": 0.03, "memory": 4086, "data_time": 0.00701, "loss_rpn_cls": 0.05662, "loss_rpn_bbox": 0.066, "loss_cls": 0.29961, "acc": 91.15576, "loss_bbox": 0.25284, "loss": 0.67507, "time": 0.12548}
163
+ {"mode": "train", "epoch": 1, "iter": 8100, "lr": 0.03, "memory": 4086, "data_time": 0.00704, "loss_rpn_cls": 0.05146, "loss_rpn_bbox": 0.06862, "loss_cls": 0.29882, "acc": 91.17456, "loss_bbox": 0.26026, "loss": 0.67917, "time": 0.12467}
164
+ {"mode": "train", "epoch": 1, "iter": 8150, "lr": 0.03, "memory": 4086, "data_time": 0.00747, "loss_rpn_cls": 0.05433, "loss_rpn_bbox": 0.06557, "loss_cls": 0.29066, "acc": 91.4248, "loss_bbox": 0.25489, "loss": 0.66545, "time": 0.12433}
165
+ {"mode": "train", "epoch": 1, "iter": 8200, "lr": 0.03, "memory": 4086, "data_time": 0.00759, "loss_rpn_cls": 0.0522, "loss_rpn_bbox": 0.06436, "loss_cls": 0.29238, "acc": 91.44165, "loss_bbox": 0.24878, "loss": 0.65773, "time": 0.12512}
166
+ {"mode": "train", "epoch": 1, "iter": 8250, "lr": 0.03, "memory": 4086, "data_time": 0.00726, "loss_rpn_cls": 0.05659, "loss_rpn_bbox": 0.06603, "loss_cls": 0.30945, "acc": 90.74707, "loss_bbox": 0.27209, "loss": 0.70415, "time": 0.12498}
167
+ {"mode": "train", "epoch": 1, "iter": 8300, "lr": 0.03, "memory": 4086, "data_time": 0.00723, "loss_rpn_cls": 0.05078, "loss_rpn_bbox": 0.06786, "loss_cls": 0.29402, "acc": 91.30103, "loss_bbox": 0.25823, "loss": 0.67089, "time": 0.12899}
168
+ {"mode": "train", "epoch": 1, "iter": 8350, "lr": 0.03, "memory": 4086, "data_time": 0.00699, "loss_rpn_cls": 0.05091, "loss_rpn_bbox": 0.06773, "loss_cls": 0.29637, "acc": 91.20801, "loss_bbox": 0.26066, "loss": 0.67567, "time": 0.12419}
169
+ {"mode": "train", "epoch": 1, "iter": 8400, "lr": 0.03, "memory": 4086, "data_time": 0.00756, "loss_rpn_cls": 0.05131, "loss_rpn_bbox": 0.06437, "loss_cls": 0.29236, "acc": 91.31982, "loss_bbox": 0.25511, "loss": 0.66316, "time": 0.12434}
170
+ {"mode": "train", "epoch": 1, "iter": 8450, "lr": 0.03, "memory": 4086, "data_time": 0.00721, "loss_rpn_cls": 0.05055, "loss_rpn_bbox": 0.06587, "loss_cls": 0.28216, "acc": 91.47266, "loss_bbox": 0.25718, "loss": 0.65576, "time": 0.12562}
171
+ {"mode": "train", "epoch": 1, "iter": 8500, "lr": 0.03, "memory": 4086, "data_time": 0.0073, "loss_rpn_cls": 0.04981, "loss_rpn_bbox": 0.066, "loss_cls": 0.29244, "acc": 91.44678, "loss_bbox": 0.25482, "loss": 0.66308, "time": 0.12425}
172
+ {"mode": "train", "epoch": 1, "iter": 8550, "lr": 0.03, "memory": 4086, "data_time": 0.00749, "loss_rpn_cls": 0.04878, "loss_rpn_bbox": 0.06117, "loss_cls": 0.28808, "acc": 91.56714, "loss_bbox": 0.24849, "loss": 0.64652, "time": 0.12578}
173
+ {"mode": "train", "epoch": 1, "iter": 8600, "lr": 0.03, "memory": 4086, "data_time": 0.00702, "loss_rpn_cls": 0.05736, "loss_rpn_bbox": 0.06929, "loss_cls": 0.29938, "acc": 91.07104, "loss_bbox": 0.26086, "loss": 0.68689, "time": 0.12832}
174
+ {"mode": "train", "epoch": 1, "iter": 8650, "lr": 0.03, "memory": 4086, "data_time": 0.00718, "loss_rpn_cls": 0.04809, "loss_rpn_bbox": 0.0616, "loss_cls": 0.27834, "acc": 91.60693, "loss_bbox": 0.25044, "loss": 0.63847, "time": 0.13081}
175
+ {"mode": "train", "epoch": 1, "iter": 8700, "lr": 0.03, "memory": 4086, "data_time": 0.007, "loss_rpn_cls": 0.05469, "loss_rpn_bbox": 0.06533, "loss_cls": 0.29184, "acc": 91.40259, "loss_bbox": 0.25511, "loss": 0.66697, "time": 0.12419}
176
+ {"mode": "train", "epoch": 1, "iter": 8750, "lr": 0.03, "memory": 4086, "data_time": 0.00712, "loss_rpn_cls": 0.04925, "loss_rpn_bbox": 0.06205, "loss_cls": 0.27592, "acc": 91.70605, "loss_bbox": 0.24502, "loss": 0.63224, "time": 0.12392}
177
+ {"mode": "train", "epoch": 1, "iter": 8800, "lr": 0.03, "memory": 4086, "data_time": 0.00705, "loss_rpn_cls": 0.04938, "loss_rpn_bbox": 0.06078, "loss_cls": 0.28338, "acc": 91.60449, "loss_bbox": 0.25166, "loss": 0.6452, "time": 0.12441}
178
+ {"mode": "train", "epoch": 1, "iter": 8850, "lr": 0.03, "memory": 4086, "data_time": 0.00708, "loss_rpn_cls": 0.05475, "loss_rpn_bbox": 0.06588, "loss_cls": 0.29866, "acc": 90.98779, "loss_bbox": 0.26622, "loss": 0.68552, "time": 0.12648}
179
+ {"mode": "train", "epoch": 1, "iter": 8900, "lr": 0.03, "memory": 4086, "data_time": 0.00726, "loss_rpn_cls": 0.0507, "loss_rpn_bbox": 0.06706, "loss_cls": 0.29759, "acc": 91.03589, "loss_bbox": 0.26102, "loss": 0.67638, "time": 0.12505}
180
+ {"mode": "train", "epoch": 1, "iter": 8950, "lr": 0.03, "memory": 4086, "data_time": 0.00727, "loss_rpn_cls": 0.04829, "loss_rpn_bbox": 0.06423, "loss_cls": 0.27861, "acc": 91.68579, "loss_bbox": 0.24878, "loss": 0.63991, "time": 0.12522}
181
+ {"mode": "train", "epoch": 1, "iter": 9000, "lr": 0.03, "memory": 4086, "data_time": 0.00737, "loss_rpn_cls": 0.05292, "loss_rpn_bbox": 0.06854, "loss_cls": 0.29321, "acc": 91.15771, "loss_bbox": 0.26263, "loss": 0.6773, "time": 0.12712}
182
+ {"mode": "train", "epoch": 1, "iter": 9050, "lr": 0.003, "memory": 4086, "data_time": 0.00756, "loss_rpn_cls": 0.04731, "loss_rpn_bbox": 0.06405, "loss_cls": 0.28391, "acc": 91.323, "loss_bbox": 0.2576, "loss": 0.65287, "time": 0.12506}
183
+ {"mode": "train", "epoch": 1, "iter": 9100, "lr": 0.003, "memory": 4086, "data_time": 0.0076, "loss_rpn_cls": 0.04624, "loss_rpn_bbox": 0.06, "loss_cls": 0.26564, "acc": 91.74878, "loss_bbox": 0.24754, "loss": 0.61942, "time": 0.12505}
184
+ {"mode": "train", "epoch": 1, "iter": 9150, "lr": 0.003, "memory": 4086, "data_time": 0.00752, "loss_rpn_cls": 0.04861, "loss_rpn_bbox": 0.06437, "loss_cls": 0.27286, "acc": 91.65454, "loss_bbox": 0.24795, "loss": 0.63379, "time": 0.12774}
185
+ {"mode": "train", "epoch": 1, "iter": 9200, "lr": 0.003, "memory": 4086, "data_time": 0.0075, "loss_rpn_cls": 0.04643, "loss_rpn_bbox": 0.06202, "loss_cls": 0.26017, "acc": 91.80078, "loss_bbox": 0.25287, "loss": 0.62149, "time": 0.12628}
186
+ {"mode": "train", "epoch": 1, "iter": 9250, "lr": 0.003, "memory": 4086, "data_time": 0.00774, "loss_rpn_cls": 0.04382, "loss_rpn_bbox": 0.05886, "loss_cls": 0.26827, "acc": 91.53662, "loss_bbox": 0.2531, "loss": 0.62405, "time": 0.12839}
187
+ {"mode": "train", "epoch": 1, "iter": 9300, "lr": 0.003, "memory": 4086, "data_time": 0.00773, "loss_rpn_cls": 0.04594, "loss_rpn_bbox": 0.06403, "loss_cls": 0.27312, "acc": 91.44824, "loss_bbox": 0.25831, "loss": 0.6414, "time": 0.12747}
188
+ {"mode": "train", "epoch": 1, "iter": 9350, "lr": 0.003, "memory": 4086, "data_time": 0.00807, "loss_rpn_cls": 0.04335, "loss_rpn_bbox": 0.05946, "loss_cls": 0.26772, "acc": 91.72974, "loss_bbox": 0.24967, "loss": 0.62021, "time": 0.12638}
189
+ {"mode": "train", "epoch": 1, "iter": 9400, "lr": 0.003, "memory": 4086, "data_time": 0.00807, "loss_rpn_cls": 0.04561, "loss_rpn_bbox": 0.06281, "loss_cls": 0.25674, "acc": 91.91724, "loss_bbox": 0.24643, "loss": 0.61159, "time": 0.12755}
190
+ {"mode": "train", "epoch": 1, "iter": 9450, "lr": 0.003, "memory": 4086, "data_time": 0.00781, "loss_rpn_cls": 0.04217, "loss_rpn_bbox": 0.06191, "loss_cls": 0.26106, "acc": 91.84937, "loss_bbox": 0.24931, "loss": 0.61445, "time": 0.12531}
191
+ {"mode": "train", "epoch": 1, "iter": 9500, "lr": 0.003, "memory": 4086, "data_time": 0.00768, "loss_rpn_cls": 0.04587, "loss_rpn_bbox": 0.06398, "loss_cls": 0.27613, "acc": 91.44409, "loss_bbox": 0.25608, "loss": 0.64205, "time": 0.12542}
192
+ {"mode": "train", "epoch": 1, "iter": 9550, "lr": 0.003, "memory": 4086, "data_time": 0.00717, "loss_rpn_cls": 0.04567, "loss_rpn_bbox": 0.06044, "loss_cls": 0.26172, "acc": 91.67749, "loss_bbox": 0.25451, "loss": 0.62235, "time": 0.12693}
193
+ {"mode": "train", "epoch": 1, "iter": 9600, "lr": 0.003, "memory": 4086, "data_time": 0.00713, "loss_rpn_cls": 0.04253, "loss_rpn_bbox": 0.05444, "loss_cls": 0.25444, "acc": 92.1001, "loss_bbox": 0.23776, "loss": 0.58917, "time": 0.12527}
194
+ {"mode": "train", "epoch": 1, "iter": 9650, "lr": 0.003, "memory": 4086, "data_time": 0.00749, "loss_rpn_cls": 0.04348, "loss_rpn_bbox": 0.05794, "loss_cls": 0.25661, "acc": 91.87427, "loss_bbox": 0.24369, "loss": 0.60173, "time": 0.12347}
195
+ {"mode": "train", "epoch": 1, "iter": 9700, "lr": 0.003, "memory": 4086, "data_time": 0.00721, "loss_rpn_cls": 0.04301, "loss_rpn_bbox": 0.06072, "loss_cls": 0.25332, "acc": 91.98608, "loss_bbox": 0.24438, "loss": 0.60144, "time": 0.12496}
196
+ {"mode": "train", "epoch": 1, "iter": 9750, "lr": 0.003, "memory": 4086, "data_time": 0.0076, "loss_rpn_cls": 0.04384, "loss_rpn_bbox": 0.06023, "loss_cls": 0.25452, "acc": 91.91211, "loss_bbox": 0.24349, "loss": 0.60208, "time": 0.12441}
197
+ {"mode": "train", "epoch": 1, "iter": 9800, "lr": 0.003, "memory": 4086, "data_time": 0.00773, "loss_rpn_cls": 0.04337, "loss_rpn_bbox": 0.06105, "loss_cls": 0.26257, "acc": 91.63794, "loss_bbox": 0.25024, "loss": 0.61723, "time": 0.12435}
198
+ {"mode": "train", "epoch": 1, "iter": 9850, "lr": 0.003, "memory": 4086, "data_time": 0.00758, "loss_rpn_cls": 0.03933, "loss_rpn_bbox": 0.05864, "loss_cls": 0.2424, "acc": 92.30713, "loss_bbox": 0.22981, "loss": 0.57018, "time": 0.12391}
199
+ {"mode": "train", "epoch": 1, "iter": 9900, "lr": 0.003, "memory": 4086, "data_time": 0.00749, "loss_rpn_cls": 0.04283, "loss_rpn_bbox": 0.05851, "loss_cls": 0.25776, "acc": 91.78491, "loss_bbox": 0.24656, "loss": 0.60565, "time": 0.12793}
200
+ {"mode": "train", "epoch": 1, "iter": 9950, "lr": 0.003, "memory": 4086, "data_time": 0.0076, "loss_rpn_cls": 0.04, "loss_rpn_bbox": 0.05756, "loss_cls": 0.23983, "acc": 92.43457, "loss_bbox": 0.23398, "loss": 0.57136, "time": 0.12608}
201
+ {"mode": "train", "epoch": 1, "iter": 10000, "lr": 0.003, "memory": 4086, "data_time": 0.00738, "loss_rpn_cls": 0.04284, "loss_rpn_bbox": 0.05963, "loss_cls": 0.25938, "acc": 91.85962, "loss_bbox": 0.24545, "loss": 0.6073, "time": 0.12469}
202
+ {"mode": "train", "epoch": 1, "iter": 10050, "lr": 0.003, "memory": 4086, "data_time": 0.00738, "loss_rpn_cls": 0.04326, "loss_rpn_bbox": 0.05799, "loss_cls": 0.26143, "acc": 91.84985, "loss_bbox": 0.25236, "loss": 0.61504, "time": 0.12699}
203
+ {"mode": "train", "epoch": 1, "iter": 10100, "lr": 0.003, "memory": 4086, "data_time": 0.00722, "loss_rpn_cls": 0.04335, "loss_rpn_bbox": 0.05897, "loss_cls": 0.25024, "acc": 92.00537, "loss_bbox": 0.23829, "loss": 0.59085, "time": 0.13002}
204
+ {"mode": "train", "epoch": 1, "iter": 10150, "lr": 0.003, "memory": 4086, "data_time": 0.00748, "loss_rpn_cls": 0.03917, "loss_rpn_bbox": 0.05565, "loss_cls": 0.2491, "acc": 92.04248, "loss_bbox": 0.24292, "loss": 0.58684, "time": 0.12516}
205
+ {"mode": "train", "epoch": 1, "iter": 10200, "lr": 0.003, "memory": 4086, "data_time": 0.0072, "loss_rpn_cls": 0.04008, "loss_rpn_bbox": 0.05838, "loss_cls": 0.25286, "acc": 92.01294, "loss_bbox": 0.24283, "loss": 0.59415, "time": 0.12445}
206
+ {"mode": "train", "epoch": 1, "iter": 10250, "lr": 0.003, "memory": 4086, "data_time": 0.00713, "loss_rpn_cls": 0.04256, "loss_rpn_bbox": 0.05995, "loss_cls": 0.26333, "acc": 91.76855, "loss_bbox": 0.25115, "loss": 0.61699, "time": 0.13022}
207
+ {"mode": "train", "epoch": 1, "iter": 10300, "lr": 0.003, "memory": 4086, "data_time": 0.00737, "loss_rpn_cls": 0.04139, "loss_rpn_bbox": 0.0583, "loss_cls": 0.25534, "acc": 91.83276, "loss_bbox": 0.24235, "loss": 0.59738, "time": 0.12616}
208
+ {"mode": "train", "epoch": 1, "iter": 10350, "lr": 0.003, "memory": 4086, "data_time": 0.00725, "loss_rpn_cls": 0.04302, "loss_rpn_bbox": 0.05732, "loss_cls": 0.2641, "acc": 91.73877, "loss_bbox": 0.24714, "loss": 0.61158, "time": 0.12374}
209
+ {"mode": "train", "epoch": 1, "iter": 10400, "lr": 0.003, "memory": 4086, "data_time": 0.0074, "loss_rpn_cls": 0.0402, "loss_rpn_bbox": 0.05458, "loss_cls": 0.25281, "acc": 91.9519, "loss_bbox": 0.24233, "loss": 0.58992, "time": 0.12512}
210
+ {"mode": "train", "epoch": 1, "iter": 10450, "lr": 0.003, "memory": 4086, "data_time": 0.00722, "loss_rpn_cls": 0.04237, "loss_rpn_bbox": 0.05826, "loss_cls": 0.2505, "acc": 92.03052, "loss_bbox": 0.24151, "loss": 0.59265, "time": 0.12463}
211
+ {"mode": "train", "epoch": 1, "iter": 10500, "lr": 0.003, "memory": 4086, "data_time": 0.00708, "loss_rpn_cls": 0.03979, "loss_rpn_bbox": 0.0561, "loss_cls": 0.24844, "acc": 92.1521, "loss_bbox": 0.2408, "loss": 0.58513, "time": 0.12643}
212
+ {"mode": "train", "epoch": 1, "iter": 10550, "lr": 0.003, "memory": 4086, "data_time": 0.00685, "loss_rpn_cls": 0.04416, "loss_rpn_bbox": 0.05909, "loss_cls": 0.25061, "acc": 92.17847, "loss_bbox": 0.23852, "loss": 0.59239, "time": 0.12637}
213
+ {"mode": "train", "epoch": 1, "iter": 10600, "lr": 0.003, "memory": 4086, "data_time": 0.00738, "loss_rpn_cls": 0.04234, "loss_rpn_bbox": 0.05808, "loss_cls": 0.24569, "acc": 92.22144, "loss_bbox": 0.23841, "loss": 0.58452, "time": 0.12628}
214
+ {"mode": "train", "epoch": 1, "iter": 10650, "lr": 0.003, "memory": 4086, "data_time": 0.00733, "loss_rpn_cls": 0.04445, "loss_rpn_bbox": 0.06213, "loss_cls": 0.25859, "acc": 91.87695, "loss_bbox": 0.24249, "loss": 0.60767, "time": 0.12613}
215
+ {"mode": "train", "epoch": 1, "iter": 10700, "lr": 0.003, "memory": 4086, "data_time": 0.00706, "loss_rpn_cls": 0.04298, "loss_rpn_bbox": 0.06013, "loss_cls": 0.25484, "acc": 91.93164, "loss_bbox": 0.24705, "loss": 0.605, "time": 0.125}
216
+ {"mode": "train", "epoch": 1, "iter": 10750, "lr": 0.003, "memory": 4086, "data_time": 0.00713, "loss_rpn_cls": 0.04448, "loss_rpn_bbox": 0.06122, "loss_cls": 0.27062, "acc": 91.48975, "loss_bbox": 0.25864, "loss": 0.63496, "time": 0.12556}
217
+ {"mode": "train", "epoch": 1, "iter": 10800, "lr": 0.003, "memory": 4086, "data_time": 0.00713, "loss_rpn_cls": 0.0418, "loss_rpn_bbox": 0.05812, "loss_cls": 0.25141, "acc": 91.92725, "loss_bbox": 0.25018, "loss": 0.60151, "time": 0.12644}
218
+ {"mode": "train", "epoch": 1, "iter": 10850, "lr": 0.003, "memory": 4086, "data_time": 0.00707, "loss_rpn_cls": 0.03736, "loss_rpn_bbox": 0.05273, "loss_cls": 0.24332, "acc": 92.39819, "loss_bbox": 0.22895, "loss": 0.56237, "time": 0.12403}
219
+ {"mode": "train", "epoch": 1, "iter": 10900, "lr": 0.003, "memory": 4086, "data_time": 0.00739, "loss_rpn_cls": 0.03984, "loss_rpn_bbox": 0.05928, "loss_cls": 0.2516, "acc": 92.02686, "loss_bbox": 0.23986, "loss": 0.59058, "time": 0.12513}
220
+ {"mode": "train", "epoch": 1, "iter": 10950, "lr": 0.003, "memory": 4086, "data_time": 0.00759, "loss_rpn_cls": 0.03981, "loss_rpn_bbox": 0.06248, "loss_cls": 0.25731, "acc": 91.75781, "loss_bbox": 0.25326, "loss": 0.61287, "time": 0.12644}
221
+ {"mode": "train", "epoch": 1, "iter": 11000, "lr": 0.003, "memory": 4086, "data_time": 0.00782, "loss_rpn_cls": 0.04215, "loss_rpn_bbox": 0.05917, "loss_cls": 0.24897, "acc": 91.95361, "loss_bbox": 0.24399, "loss": 0.59428, "time": 0.12597}
222
+ {"mode": "train", "epoch": 1, "iter": 11050, "lr": 0.0003, "memory": 4086, "data_time": 0.00779, "loss_rpn_cls": 0.04095, "loss_rpn_bbox": 0.05971, "loss_cls": 0.26036, "acc": 91.73682, "loss_bbox": 0.25067, "loss": 0.61168, "time": 0.12637}
223
+ {"mode": "train", "epoch": 1, "iter": 11100, "lr": 0.0003, "memory": 4086, "data_time": 0.00789, "loss_rpn_cls": 0.04053, "loss_rpn_bbox": 0.05824, "loss_cls": 0.25094, "acc": 91.97681, "loss_bbox": 0.24273, "loss": 0.59245, "time": 0.12598}
224
+ {"mode": "train", "epoch": 1, "iter": 11150, "lr": 0.0003, "memory": 4086, "data_time": 0.00764, "loss_rpn_cls": 0.04148, "loss_rpn_bbox": 0.05709, "loss_cls": 0.23714, "acc": 92.38574, "loss_bbox": 0.22856, "loss": 0.56427, "time": 0.12483}
225
+ {"mode": "train", "epoch": 1, "iter": 11200, "lr": 0.0003, "memory": 4086, "data_time": 0.00766, "loss_rpn_cls": 0.04074, "loss_rpn_bbox": 0.05867, "loss_cls": 0.24298, "acc": 92.23975, "loss_bbox": 0.23597, "loss": 0.57835, "time": 0.128}
226
+ {"mode": "train", "epoch": 1, "iter": 11250, "lr": 0.0003, "memory": 4086, "data_time": 0.00794, "loss_rpn_cls": 0.04008, "loss_rpn_bbox": 0.05704, "loss_cls": 0.24908, "acc": 92.0874, "loss_bbox": 0.24255, "loss": 0.58875, "time": 0.12495}
227
+ {"mode": "train", "epoch": 1, "iter": 11300, "lr": 0.0003, "memory": 4086, "data_time": 0.00752, "loss_rpn_cls": 0.04036, "loss_rpn_bbox": 0.05512, "loss_cls": 0.24726, "acc": 92.22876, "loss_bbox": 0.23788, "loss": 0.58062, "time": 0.12783}
228
+ {"mode": "train", "epoch": 1, "iter": 11350, "lr": 0.0003, "memory": 4086, "data_time": 0.00722, "loss_rpn_cls": 0.04193, "loss_rpn_bbox": 0.05825, "loss_cls": 0.25594, "acc": 91.97485, "loss_bbox": 0.24435, "loss": 0.60047, "time": 0.12561}
229
+ {"mode": "train", "epoch": 1, "iter": 11400, "lr": 0.0003, "memory": 4086, "data_time": 0.00706, "loss_rpn_cls": 0.0426, "loss_rpn_bbox": 0.06118, "loss_cls": 0.26107, "acc": 91.83228, "loss_bbox": 0.2458, "loss": 0.61064, "time": 0.12497}
230
+ {"mode": "train", "epoch": 1, "iter": 11450, "lr": 0.0003, "memory": 4086, "data_time": 0.00698, "loss_rpn_cls": 0.0397, "loss_rpn_bbox": 0.05726, "loss_cls": 0.25467, "acc": 91.85107, "loss_bbox": 0.24942, "loss": 0.60105, "time": 0.12513}
231
+ {"mode": "train", "epoch": 1, "iter": 11500, "lr": 0.0003, "memory": 4086, "data_time": 0.00706, "loss_rpn_cls": 0.04038, "loss_rpn_bbox": 0.05906, "loss_cls": 0.25317, "acc": 91.86792, "loss_bbox": 0.25307, "loss": 0.60569, "time": 0.12637}
232
+ {"mode": "train", "epoch": 1, "iter": 11550, "lr": 0.0003, "memory": 4086, "data_time": 0.007, "loss_rpn_cls": 0.03953, "loss_rpn_bbox": 0.05689, "loss_cls": 0.24935, "acc": 92.18823, "loss_bbox": 0.24071, "loss": 0.58648, "time": 0.1269}
233
+ {"mode": "train", "epoch": 1, "iter": 11600, "lr": 0.0003, "memory": 4086, "data_time": 0.00686, "loss_rpn_cls": 0.04277, "loss_rpn_bbox": 0.05851, "loss_cls": 0.24777, "acc": 92.02466, "loss_bbox": 0.24709, "loss": 0.59615, "time": 0.12472}
234
+ {"mode": "train", "epoch": 1, "iter": 11650, "lr": 0.0003, "memory": 4086, "data_time": 0.00686, "loss_rpn_cls": 0.04217, "loss_rpn_bbox": 0.05947, "loss_cls": 0.25302, "acc": 91.94556, "loss_bbox": 0.24634, "loss": 0.601, "time": 0.12587}
235
+ {"mode": "train", "epoch": 1, "iter": 11700, "lr": 0.0003, "memory": 4086, "data_time": 0.00684, "loss_rpn_cls": 0.04237, "loss_rpn_bbox": 0.05937, "loss_cls": 0.25698, "acc": 91.80957, "loss_bbox": 0.24823, "loss": 0.60696, "time": 0.12459}
236
+ {"mode": "train", "epoch": 1, "iter": 11750, "lr": 0.0003, "memory": 4086, "data_time": 0.00688, "loss_rpn_cls": 0.0435, "loss_rpn_bbox": 0.06226, "loss_cls": 0.25516, "acc": 91.76685, "loss_bbox": 0.25191, "loss": 0.61283, "time": 0.12718}
237
+ {"mode": "train", "epoch": 1, "iter": 11800, "lr": 0.0003, "memory": 4086, "data_time": 0.00688, "loss_rpn_cls": 0.04439, "loss_rpn_bbox": 0.05969, "loss_cls": 0.24676, "acc": 92.08594, "loss_bbox": 0.24233, "loss": 0.59316, "time": 0.12543}
238
+ {"mode": "train", "epoch": 1, "iter": 11850, "lr": 0.0003, "memory": 4086, "data_time": 0.00696, "loss_rpn_cls": 0.04321, "loss_rpn_bbox": 0.05657, "loss_cls": 0.25807, "acc": 91.79932, "loss_bbox": 0.24744, "loss": 0.60529, "time": 0.1244}
239
+ {"mode": "train", "epoch": 1, "iter": 11900, "lr": 0.0003, "memory": 4086, "data_time": 0.00679, "loss_rpn_cls": 0.04002, "loss_rpn_bbox": 0.05604, "loss_cls": 0.25288, "acc": 91.80713, "loss_bbox": 0.24632, "loss": 0.59526, "time": 0.1242}
240
+ {"mode": "train", "epoch": 1, "iter": 11950, "lr": 0.0003, "memory": 4086, "data_time": 0.00669, "loss_rpn_cls": 0.03918, "loss_rpn_bbox": 0.05533, "loss_cls": 0.25328, "acc": 91.90625, "loss_bbox": 0.24626, "loss": 0.59404, "time": 0.1232}
241
+ {"mode": "train", "epoch": 1, "iter": 12000, "lr": 0.0003, "memory": 4086, "data_time": 0.00689, "loss_rpn_cls": 0.04113, "loss_rpn_bbox": 0.05834, "loss_cls": 0.25107, "acc": 92.00781, "loss_bbox": 0.24318, "loss": 0.59372, "time": 0.13826}
242
+ {"mode": "val", "epoch": 1, "iter": 625, "lr": 0.0003, "bbox_mAP": 0.306, "bbox_mAP_50": 0.49, "bbox_mAP_75": 0.327, "bbox_mAP_s": 0.169, "bbox_mAP_m": 0.336, "bbox_mAP_l": 0.395, "bbox_mAP_copypaste": "0.306 0.490 0.327 0.169 0.336 0.395"}
finetune/finetune_faster-rcnn_12k_coco/best_bbox_mAP_iter_12000.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1b56f08c8cf0b7a3ccc07eced45e58595da0249a3d5070ffd61943db4a20acd1
3
+ size 344045801
finetune/finetune_faster-rcnn_12k_coco/faster_rcnn_fpn_12k_semi-coco.py ADDED
@@ -0,0 +1,248 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model = dict(
2
+ type='MaskRCNN',
3
+ backbone=dict(
4
+ type='ResNet',
5
+ depth=50,
6
+ num_stages=4,
7
+ out_indices=(0, 1, 2, 3),
8
+ frozen_stages=1,
9
+ norm_cfg=dict(type='SyncBN', requires_grad=True),
10
+ norm_eval=True,
11
+ style='pytorch',
12
+ init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
13
+ neck=dict(
14
+ type='FPN',
15
+ in_channels=[256, 512, 1024, 2048],
16
+ out_channels=256,
17
+ num_outs=5,
18
+ norm_cfg=dict(type='SyncBN', requires_grad=True)),
19
+ rpn_head=dict(
20
+ type='RPNHead',
21
+ in_channels=256,
22
+ feat_channels=256,
23
+ anchor_generator=dict(
24
+ type='AnchorGenerator',
25
+ scales=[8],
26
+ ratios=[0.5, 1.0, 2.0],
27
+ strides=[4, 8, 16, 32, 64]),
28
+ bbox_coder=dict(
29
+ type='DeltaXYWHBBoxCoder',
30
+ target_means=[0.0, 0.0, 0.0, 0.0],
31
+ target_stds=[1.0, 1.0, 1.0, 1.0]),
32
+ loss_cls=dict(
33
+ type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
34
+ loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
35
+ roi_head=dict(
36
+ type='StandardRoIHead',
37
+ bbox_roi_extractor=dict(
38
+ type='SingleRoIExtractor',
39
+ roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0),
40
+ out_channels=256,
41
+ featmap_strides=[4, 8, 16, 32]),
42
+ bbox_head=dict(
43
+ type='Shared4Conv1FCBBoxHead',
44
+ in_channels=256,
45
+ fc_out_channels=1024,
46
+ roi_feat_size=7,
47
+ num_classes=80,
48
+ bbox_coder=dict(
49
+ type='DeltaXYWHBBoxCoder',
50
+ target_means=[0.0, 0.0, 0.0, 0.0],
51
+ target_stds=[0.1, 0.1, 0.2, 0.2]),
52
+ reg_class_agnostic=False,
53
+ loss_cls=dict(
54
+ type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
55
+ loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
56
+ mask_roi_extractor=None,
57
+ mask_head=None),
58
+ train_cfg=dict(
59
+ rpn=dict(
60
+ assigner=dict(
61
+ type='MaxIoUAssigner',
62
+ pos_iou_thr=0.7,
63
+ neg_iou_thr=0.3,
64
+ min_pos_iou=0.3,
65
+ match_low_quality=True,
66
+ ignore_iof_thr=-1),
67
+ sampler=dict(
68
+ type='RandomSampler',
69
+ num=256,
70
+ pos_fraction=0.5,
71
+ neg_pos_ub=-1,
72
+ add_gt_as_proposals=False),
73
+ allowed_border=-1,
74
+ pos_weight=-1,
75
+ debug=False),
76
+ rpn_proposal=dict(
77
+ nms_pre=2000,
78
+ max_per_img=1000,
79
+ nms=dict(type='nms', iou_threshold=0.7),
80
+ min_bbox_size=0),
81
+ rcnn=dict(
82
+ assigner=dict(
83
+ type='MaxIoUAssigner',
84
+ pos_iou_thr=0.5,
85
+ neg_iou_thr=0.5,
86
+ min_pos_iou=0.5,
87
+ match_low_quality=True,
88
+ ignore_iof_thr=-1),
89
+ sampler=dict(
90
+ type='RandomSampler',
91
+ num=512,
92
+ pos_fraction=0.25,
93
+ neg_pos_ub=-1,
94
+ add_gt_as_proposals=True),
95
+ mask_size=28,
96
+ pos_weight=-1,
97
+ debug=False)),
98
+ test_cfg=dict(
99
+ rpn=dict(
100
+ nms_pre=1000,
101
+ max_per_img=1000,
102
+ nms=dict(type='nms', iou_threshold=0.7),
103
+ min_bbox_size=0),
104
+ rcnn=dict(
105
+ score_thr=0.05,
106
+ nms=dict(type='nms', iou_threshold=0.5),
107
+ max_per_img=100,
108
+ mask_thr_binary=0.5)))
109
+ dataset_type = 'CocoDataset'
110
+ data_root = 'data/coco/'
111
+ img_norm_cfg = dict(
112
+ mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
113
+ train_pipeline = [
114
+ dict(type='LoadImageFromFile'),
115
+ dict(type='LoadAnnotations', with_bbox=True),
116
+ dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
117
+ dict(type='RandomFlip', flip_ratio=0.5),
118
+ dict(
119
+ type='Normalize',
120
+ mean=[123.675, 116.28, 103.53],
121
+ std=[58.395, 57.12, 57.375],
122
+ to_rgb=True),
123
+ dict(type='Pad', size_divisor=32),
124
+ dict(type='DefaultFormatBundle'),
125
+ dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
126
+ ]
127
+ test_pipeline = [
128
+ dict(type='LoadImageFromFile'),
129
+ dict(
130
+ type='MultiScaleFlipAug',
131
+ img_scale=(1333, 800),
132
+ flip=False,
133
+ transforms=[
134
+ dict(type='Resize', keep_ratio=True),
135
+ dict(type='RandomFlip'),
136
+ dict(
137
+ type='Normalize',
138
+ mean=[123.675, 116.28, 103.53],
139
+ std=[58.395, 57.12, 57.375],
140
+ to_rgb=True),
141
+ dict(type='Pad', size_divisor=32),
142
+ dict(type='ImageToTensor', keys=['img']),
143
+ dict(type='Collect', keys=['img'])
144
+ ])
145
+ ]
146
+ data = dict(
147
+ samples_per_gpu=2,
148
+ workers_per_gpu=2,
149
+ train=dict(
150
+ type='CocoDataset',
151
+ ann_file='data/coco/annotations/instances_train2017.json',
152
+ img_prefix='data/coco/train2017/',
153
+ pipeline=[
154
+ dict(type='LoadImageFromFile'),
155
+ dict(type='LoadAnnotations', with_bbox=True),
156
+ dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
157
+ dict(type='RandomFlip', flip_ratio=0.5),
158
+ dict(
159
+ type='Normalize',
160
+ mean=[123.675, 116.28, 103.53],
161
+ std=[58.395, 57.12, 57.375],
162
+ to_rgb=True),
163
+ dict(type='Pad', size_divisor=32),
164
+ dict(type='DefaultFormatBundle'),
165
+ dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
166
+ ]),
167
+ val=dict(
168
+ type='CocoDataset',
169
+ ann_file='data/coco/annotations/instances_val2017.json',
170
+ img_prefix='data/coco/val2017/',
171
+ pipeline=[
172
+ dict(type='LoadImageFromFile'),
173
+ dict(
174
+ type='MultiScaleFlipAug',
175
+ img_scale=(1333, 800),
176
+ flip=False,
177
+ transforms=[
178
+ dict(type='Resize', keep_ratio=True),
179
+ dict(type='RandomFlip'),
180
+ dict(
181
+ type='Normalize',
182
+ mean=[123.675, 116.28, 103.53],
183
+ std=[58.395, 57.12, 57.375],
184
+ to_rgb=True),
185
+ dict(type='Pad', size_divisor=32),
186
+ dict(type='ImageToTensor', keys=['img']),
187
+ dict(type='Collect', keys=['img'])
188
+ ])
189
+ ]),
190
+ test=dict(
191
+ type='CocoDataset',
192
+ ann_file='data/coco/annotations/instances_val2017.json',
193
+ img_prefix='data/coco/val2017/',
194
+ pipeline=[
195
+ dict(type='LoadImageFromFile'),
196
+ dict(
197
+ type='MultiScaleFlipAug',
198
+ img_scale=(1333, 800),
199
+ flip=False,
200
+ transforms=[
201
+ dict(type='Resize', keep_ratio=True),
202
+ dict(type='RandomFlip'),
203
+ dict(
204
+ type='Normalize',
205
+ mean=[123.675, 116.28, 103.53],
206
+ std=[58.395, 57.12, 57.375],
207
+ to_rgb=True),
208
+ dict(type='Pad', size_divisor=32),
209
+ dict(type='ImageToTensor', keys=['img']),
210
+ dict(type='Collect', keys=['img'])
211
+ ])
212
+ ]))
213
+ evaluation = dict(
214
+ interval=12000, metric='bbox', save_best='auto', gpu_collect=True)
215
+ optimizer = dict(type='SGD', lr=0.03, momentum=0.9, weight_decay=5e-05)
216
+ optimizer_config = dict(grad_clip=None)
217
+ lr_config = dict(
218
+ policy='step',
219
+ warmup='linear',
220
+ warmup_iters=500,
221
+ warmup_ratio=0.001,
222
+ step=[9000, 11000],
223
+ by_epoch=False)
224
+ runner = dict(type='IterBasedRunner', max_iters=12000)
225
+ checkpoint_config = dict(interval=12000)
226
+ log_config = dict(interval=50, hooks=[dict(type='TextLoggerHook')])
227
+ custom_hooks = [
228
+ dict(type='NumClassCheckHook'),
229
+ dict(
230
+ type='MMDetWandbHook',
231
+ init_kwargs=dict(project='I2B', group='semi-coco'),
232
+ interval=50,
233
+ num_eval_images=0,
234
+ log_checkpoint=False)
235
+ ]
236
+ dist_params = dict(backend='nccl')
237
+ log_level = 'INFO'
238
+ load_from = 'pretrain/selfsup_mask-rcnn_mstrain-soft-teacher_sampler-4096_temp0.5/final_model.pth'
239
+ resume_from = None
240
+ workflow = [('train', 1)]
241
+ opencv_num_threads = 0
242
+ mp_start_method = 'fork'
243
+ auto_scale_lr = dict(enable=False, base_batch_size=16)
244
+ custom_imports = None
245
+ norm_cfg = dict(type='SyncBN', requires_grad=True)
246
+ work_dir = 'work_dirs/finetune_faster-rcnn_12k_coco'
247
+ auto_resume = False
248
+ gpu_ids = range(0, 8)
finetune/finetune_faster-rcnn_1x_coco_lr3e-2_wd5e-5/20221024_212434.log ADDED
The diff for this file is too large to render. See raw diff
 
finetune/finetune_faster-rcnn_1x_coco_lr3e-2_wd5e-5/20221024_212434.log.json ADDED
The diff for this file is too large to render. See raw diff
 
finetune/finetune_faster-rcnn_1x_coco_lr3e-2_wd5e-5/best_bbox_mAP_epoch_12.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5696a2ed63a4ab5bb500bfbd76e57bffbb11a892a27d518c8a0ef35fe6b181ad
3
+ size 344045929
finetune/finetune_faster-rcnn_1x_coco_lr3e-2_wd5e-5/faster_rcnn_r50_fpn_1x_coco.py ADDED
@@ -0,0 +1,249 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model = dict(
2
+ type='MaskRCNN',
3
+ backbone=dict(
4
+ type='ResNet',
5
+ depth=50,
6
+ num_stages=4,
7
+ out_indices=(0, 1, 2, 3),
8
+ frozen_stages=1,
9
+ norm_cfg=dict(type='SyncBN', requires_grad=True),
10
+ norm_eval=True,
11
+ style='pytorch',
12
+ init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
13
+ neck=dict(
14
+ type='FPN',
15
+ in_channels=[256, 512, 1024, 2048],
16
+ out_channels=256,
17
+ num_outs=5,
18
+ norm_cfg=dict(type='SyncBN', requires_grad=True)),
19
+ rpn_head=dict(
20
+ type='RPNHead',
21
+ in_channels=256,
22
+ feat_channels=256,
23
+ anchor_generator=dict(
24
+ type='AnchorGenerator',
25
+ scales=[8],
26
+ ratios=[0.5, 1.0, 2.0],
27
+ strides=[4, 8, 16, 32, 64]),
28
+ bbox_coder=dict(
29
+ type='DeltaXYWHBBoxCoder',
30
+ target_means=[0.0, 0.0, 0.0, 0.0],
31
+ target_stds=[1.0, 1.0, 1.0, 1.0]),
32
+ loss_cls=dict(
33
+ type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
34
+ loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
35
+ roi_head=dict(
36
+ type='StandardRoIHead',
37
+ bbox_roi_extractor=dict(
38
+ type='SingleRoIExtractor',
39
+ roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0),
40
+ out_channels=256,
41
+ featmap_strides=[4, 8, 16, 32]),
42
+ bbox_head=dict(
43
+ type='Shared4Conv1FCBBoxHead',
44
+ in_channels=256,
45
+ fc_out_channels=1024,
46
+ roi_feat_size=7,
47
+ num_classes=80,
48
+ bbox_coder=dict(
49
+ type='DeltaXYWHBBoxCoder',
50
+ target_means=[0.0, 0.0, 0.0, 0.0],
51
+ target_stds=[0.1, 0.1, 0.2, 0.2]),
52
+ reg_class_agnostic=False,
53
+ loss_cls=dict(
54
+ type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
55
+ loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
56
+ mask_roi_extractor=None,
57
+ mask_head=None),
58
+ train_cfg=dict(
59
+ rpn=dict(
60
+ assigner=dict(
61
+ type='MaxIoUAssigner',
62
+ pos_iou_thr=0.7,
63
+ neg_iou_thr=0.3,
64
+ min_pos_iou=0.3,
65
+ match_low_quality=True,
66
+ ignore_iof_thr=-1),
67
+ sampler=dict(
68
+ type='RandomSampler',
69
+ num=256,
70
+ pos_fraction=0.5,
71
+ neg_pos_ub=-1,
72
+ add_gt_as_proposals=False),
73
+ allowed_border=-1,
74
+ pos_weight=-1,
75
+ debug=False),
76
+ rpn_proposal=dict(
77
+ nms_pre=2000,
78
+ max_per_img=1000,
79
+ nms=dict(type='nms', iou_threshold=0.7),
80
+ min_bbox_size=0),
81
+ rcnn=dict(
82
+ assigner=dict(
83
+ type='MaxIoUAssigner',
84
+ pos_iou_thr=0.5,
85
+ neg_iou_thr=0.5,
86
+ min_pos_iou=0.5,
87
+ match_low_quality=True,
88
+ ignore_iof_thr=-1),
89
+ sampler=dict(
90
+ type='RandomSampler',
91
+ num=512,
92
+ pos_fraction=0.25,
93
+ neg_pos_ub=-1,
94
+ add_gt_as_proposals=True),
95
+ mask_size=28,
96
+ pos_weight=-1,
97
+ debug=False)),
98
+ test_cfg=dict(
99
+ rpn=dict(
100
+ nms_pre=1000,
101
+ max_per_img=1000,
102
+ nms=dict(type='nms', iou_threshold=0.7),
103
+ min_bbox_size=0),
104
+ rcnn=dict(
105
+ score_thr=0.05,
106
+ nms=dict(type='nms', iou_threshold=0.5),
107
+ max_per_img=100,
108
+ mask_thr_binary=0.5)))
109
+ dataset_type = 'CocoDataset'
110
+ data_root = 'data/coco/'
111
+ img_norm_cfg = dict(
112
+ mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
113
+ train_pipeline = [
114
+ dict(type='LoadImageFromFile'),
115
+ dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
116
+ dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
117
+ dict(type='RandomFlip', flip_ratio=0.5),
118
+ dict(
119
+ type='Normalize',
120
+ mean=[123.675, 116.28, 103.53],
121
+ std=[58.395, 57.12, 57.375],
122
+ to_rgb=True),
123
+ dict(type='Pad', size_divisor=32),
124
+ dict(type='DefaultFormatBundle'),
125
+ dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks'])
126
+ ]
127
+ test_pipeline = [
128
+ dict(type='LoadImageFromFile'),
129
+ dict(
130
+ type='MultiScaleFlipAug',
131
+ img_scale=(1333, 800),
132
+ flip=False,
133
+ transforms=[
134
+ dict(type='Resize', keep_ratio=True),
135
+ dict(type='RandomFlip'),
136
+ dict(
137
+ type='Normalize',
138
+ mean=[123.675, 116.28, 103.53],
139
+ std=[58.395, 57.12, 57.375],
140
+ to_rgb=True),
141
+ dict(type='Pad', size_divisor=32),
142
+ dict(type='ImageToTensor', keys=['img']),
143
+ dict(type='Collect', keys=['img'])
144
+ ])
145
+ ]
146
+ data = dict(
147
+ samples_per_gpu=2,
148
+ workers_per_gpu=2,
149
+ train=dict(
150
+ type='CocoDataset',
151
+ ann_file='data/coco/annotations/instances_train2017.json',
152
+ img_prefix='data/coco/train2017/',
153
+ pipeline=[
154
+ dict(type='LoadImageFromFile'),
155
+ dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
156
+ dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
157
+ dict(type='RandomFlip', flip_ratio=0.5),
158
+ dict(
159
+ type='Normalize',
160
+ mean=[123.675, 116.28, 103.53],
161
+ std=[58.395, 57.12, 57.375],
162
+ to_rgb=True),
163
+ dict(type='Pad', size_divisor=32),
164
+ dict(type='DefaultFormatBundle'),
165
+ dict(
166
+ type='Collect',
167
+ keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks'])
168
+ ]),
169
+ val=dict(
170
+ type='CocoDataset',
171
+ ann_file='data/coco/annotations/instances_val2017.json',
172
+ img_prefix='data/coco/val2017/',
173
+ pipeline=[
174
+ dict(type='LoadImageFromFile'),
175
+ dict(
176
+ type='MultiScaleFlipAug',
177
+ img_scale=(1333, 800),
178
+ flip=False,
179
+ transforms=[
180
+ dict(type='Resize', keep_ratio=True),
181
+ dict(type='RandomFlip'),
182
+ dict(
183
+ type='Normalize',
184
+ mean=[123.675, 116.28, 103.53],
185
+ std=[58.395, 57.12, 57.375],
186
+ to_rgb=True),
187
+ dict(type='Pad', size_divisor=32),
188
+ dict(type='ImageToTensor', keys=['img']),
189
+ dict(type='Collect', keys=['img'])
190
+ ])
191
+ ]),
192
+ test=dict(
193
+ type='CocoDataset',
194
+ ann_file='data/coco/annotations/instances_val2017.json',
195
+ img_prefix='data/coco/val2017/',
196
+ pipeline=[
197
+ dict(type='LoadImageFromFile'),
198
+ dict(
199
+ type='MultiScaleFlipAug',
200
+ img_scale=(1333, 800),
201
+ flip=False,
202
+ transforms=[
203
+ dict(type='Resize', keep_ratio=True),
204
+ dict(type='RandomFlip'),
205
+ dict(
206
+ type='Normalize',
207
+ mean=[123.675, 116.28, 103.53],
208
+ std=[58.395, 57.12, 57.375],
209
+ to_rgb=True),
210
+ dict(type='Pad', size_divisor=32),
211
+ dict(type='ImageToTensor', keys=['img']),
212
+ dict(type='Collect', keys=['img'])
213
+ ])
214
+ ]))
215
+ evaluation = dict(
216
+ metric='bbox', save_best='auto', gpu_collect=True, interval=1)
217
+ optimizer = dict(type='SGD', lr=0.03, momentum=0.9, weight_decay=5e-05)
218
+ optimizer_config = dict(grad_clip=None)
219
+ lr_config = dict(
220
+ policy='step',
221
+ warmup='linear',
222
+ warmup_iters=500,
223
+ warmup_ratio=0.001,
224
+ step=[8, 11])
225
+ runner = dict(type='EpochBasedRunner', max_epochs=12)
226
+ checkpoint_config = dict(interval=1)
227
+ log_config = dict(interval=50, hooks=[dict(type='TextLoggerHook')])
228
+ custom_hooks = [
229
+ dict(type='NumClassCheckHook'),
230
+ dict(
231
+ type='MMDetWandbHook',
232
+ init_kwargs=dict(project='I2B', group='finetune'),
233
+ interval=50,
234
+ num_eval_images=0,
235
+ log_checkpoint=False)
236
+ ]
237
+ dist_params = dict(backend='nccl')
238
+ log_level = 'INFO'
239
+ load_from = 'pretrain/selfsup_mask-rcnn_mstrain-soft-teacher_sampler-4096_temp0.5/keep_all.pth'
240
+ resume_from = None
241
+ workflow = [('train', 1)]
242
+ opencv_num_threads = 0
243
+ mp_start_method = 'fork'
244
+ auto_scale_lr = dict(enable=False, base_batch_size=16)
245
+ custom_imports = None
246
+ norm_cfg = dict(type='SyncBN', requires_grad=True)
247
+ work_dir = 'work_dirs/finetune_faster-rcnn_1x_coco_lr3e-2_wd5e-5_keep-all'
248
+ auto_resume = False
249
+ gpu_ids = range(0, 8)
finetune/finetune_fcos_12k_voc0712_lr1.5e-2_wd5e-5/20221003_230350.log ADDED
@@ -0,0 +1,1257 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2022-10-03 23:03:50,973 - mmdet - INFO - Environment info:
2
+ ------------------------------------------------------------
3
+ sys.platform: linux
4
+ Python: 3.7.3 (default, Jan 22 2021, 20:04:44) [GCC 8.3.0]
5
+ CUDA available: True
6
+ GPU 0,1,2,3,4,5,6,7: A100-SXM-80GB
7
+ CUDA_HOME: /usr/local/cuda
8
+ NVCC: Cuda compilation tools, release 11.3, V11.3.109
9
+ GCC: x86_64-linux-gnu-gcc (Debian 8.3.0-6) 8.3.0
10
+ PyTorch: 1.10.0
11
+ PyTorch compiling details: PyTorch built with:
12
+ - GCC 7.3
13
+ - C++ Version: 201402
14
+ - Intel(R) Math Kernel Library Version 2020.0.0 Product Build 20191122 for Intel(R) 64 architecture applications
15
+ - Intel(R) MKL-DNN v2.2.3 (Git Hash 7336ca9f055cf1bfa13efb658fe15dc9b41f0740)
16
+ - OpenMP 201511 (a.k.a. OpenMP 4.5)
17
+ - LAPACK is enabled (usually provided by MKL)
18
+ - NNPACK is enabled
19
+ - CPU capability usage: AVX512
20
+ - CUDA Runtime 11.3
21
+ - NVCC architecture flags: -gencode;arch=compute_70,code=sm_70;-gencode;arch=compute_75,code=sm_75;-gencode;arch=compute_80,code=sm_80;-gencode;arch=compute_86,code=sm_86
22
+ - CuDNN 8.2
23
+ - Magma 2.5.2
24
+ - Build settings: BLAS_INFO=mkl, BUILD_TYPE=Release, CUDA_VERSION=11.3, CUDNN_VERSION=8.2.0, CXX_COMPILER=/opt/rh/devtoolset-7/root/usr/bin/c++, CXX_FLAGS= -Wno-deprecated -fvisibility-inlines-hidden -DUSE_PTHREADPOOL -fopenmp -DNDEBUG -DUSE_KINETO -DUSE_FBGEMM -DUSE_QNNPACK -DUSE_PYTORCH_QNNPACK -DUSE_XNNPACK -DSYMBOLICATE_MOBILE_DEBUG_HANDLE -DEDGE_PROFILER_USE_KINETO -O2 -fPIC -Wno-narrowing -Wall -Wextra -Werror=return-type -Wno-missing-field-initializers -Wno-type-limits -Wno-array-bounds -Wno-unknown-pragmas -Wno-sign-compare -Wno-unused-parameter -Wno-unused-variable -Wno-unused-function -Wno-unused-result -Wno-unused-local-typedefs -Wno-strict-overflow -Wno-strict-aliasing -Wno-error=deprecated-declarations -Wno-stringop-overflow -Wno-psabi -Wno-error=pedantic -Wno-error=redundant-decls -Wno-error=old-style-cast -fdiagnostics-color=always -faligned-new -Wno-unused-but-set-variable -Wno-maybe-uninitialized -fno-math-errno -fno-trapping-math -Werror=format -Wno-stringop-overflow, LAPACK_INFO=mkl, PERF_WITH_AVX=1, PERF_WITH_AVX2=1, PERF_WITH_AVX512=1, TORCH_VERSION=1.10.0, USE_CUDA=ON, USE_CUDNN=ON, USE_EXCEPTION_PTR=1, USE_GFLAGS=OFF, USE_GLOG=OFF, USE_MKL=ON, USE_MKLDNN=ON, USE_MPI=OFF, USE_NCCL=ON, USE_NNPACK=ON, USE_OPENMP=ON,
25
+
26
+ TorchVision: 0.11.1+cu113
27
+ OpenCV: 4.6.0
28
+ MMCV: 1.6.1
29
+ MMCV Compiler: GCC 9.3
30
+ MMCV CUDA Compiler: 11.3
31
+ MMDetection: 2.25.2+87c120c
32
+ ------------------------------------------------------------
33
+
34
+ 2022-10-03 23:03:52,207 - mmdet - INFO - Distributed training: True
35
+ 2022-10-03 23:03:53,323 - mmdet - INFO - Config:
36
+ model = dict(
37
+ type='FCOS',
38
+ backbone=dict(
39
+ type='ResNet',
40
+ depth=50,
41
+ num_stages=4,
42
+ out_indices=(0, 1, 2, 3),
43
+ frozen_stages=1,
44
+ norm_cfg=dict(type='SyncBN', requires_grad=True),
45
+ norm_eval=True,
46
+ style='pytorch',
47
+ init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
48
+ neck=dict(
49
+ type='FPN',
50
+ in_channels=[256, 512, 1024, 2048],
51
+ out_channels=256,
52
+ start_level=1,
53
+ add_extra_convs='on_output',
54
+ num_outs=5,
55
+ relu_before_extra_convs=True,
56
+ norm_cfg=dict(type='SyncBN', requires_grad=True)),
57
+ bbox_head=dict(
58
+ type='FCOSHead',
59
+ num_classes=20,
60
+ in_channels=256,
61
+ stacked_convs=4,
62
+ feat_channels=256,
63
+ strides=[8, 16, 32, 64, 128],
64
+ loss_cls=dict(
65
+ type='FocalLoss',
66
+ use_sigmoid=True,
67
+ gamma=2.0,
68
+ alpha=0.25,
69
+ loss_weight=1.0),
70
+ loss_bbox=dict(type='IoULoss', loss_weight=1.0),
71
+ loss_centerness=dict(
72
+ type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)),
73
+ train_cfg=dict(
74
+ assigner=dict(
75
+ type='MaxIoUAssigner',
76
+ pos_iou_thr=0.5,
77
+ neg_iou_thr=0.4,
78
+ min_pos_iou=0,
79
+ ignore_iof_thr=-1),
80
+ allowed_border=-1,
81
+ pos_weight=-1,
82
+ debug=False),
83
+ test_cfg=dict(
84
+ nms_pre=1000,
85
+ min_bbox_size=0,
86
+ score_thr=0.05,
87
+ nms=dict(type='nms', iou_threshold=0.5),
88
+ max_per_img=100))
89
+ dataset_type = 'VOCDataset'
90
+ data_root = 'data/VOCdevkit/'
91
+ img_norm_cfg = dict(
92
+ mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
93
+ train_pipeline = [
94
+ dict(type='LoadImageFromFile'),
95
+ dict(type='LoadAnnotations', with_bbox=True),
96
+ dict(
97
+ type='Resize',
98
+ img_scale=[(1333, 480), (1333, 512), (1333, 544), (1333, 576),
99
+ (1333, 608), (1333, 640), (1333, 672), (1333, 704),
100
+ (1333, 736), (1333, 768), (1333, 800)],
101
+ multiscale_mode='value',
102
+ keep_ratio=True),
103
+ dict(type='RandomFlip', flip_ratio=0.5),
104
+ dict(
105
+ type='Normalize',
106
+ mean=[123.675, 116.28, 103.53],
107
+ std=[58.395, 57.12, 57.375],
108
+ to_rgb=True),
109
+ dict(type='Pad', size_divisor=32),
110
+ dict(type='DefaultFormatBundle'),
111
+ dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
112
+ ]
113
+ test_pipeline = [
114
+ dict(type='LoadImageFromFile'),
115
+ dict(
116
+ type='MultiScaleFlipAug',
117
+ img_scale=(1333, 800),
118
+ flip=False,
119
+ transforms=[
120
+ dict(type='Resize', keep_ratio=True),
121
+ dict(type='RandomFlip'),
122
+ dict(
123
+ type='Normalize',
124
+ mean=[123.675, 116.28, 103.53],
125
+ std=[58.395, 57.12, 57.375],
126
+ to_rgb=True),
127
+ dict(type='Pad', size_divisor=32),
128
+ dict(type='ImageToTensor', keys=['img']),
129
+ dict(type='Collect', keys=['img'])
130
+ ])
131
+ ]
132
+ data = dict(
133
+ samples_per_gpu=2,
134
+ workers_per_gpu=2,
135
+ train=dict(
136
+ type='VOCDataset',
137
+ ann_file=[
138
+ 'data/VOCdevkit/VOC2007/ImageSets/Main/trainval.txt',
139
+ 'data/VOCdevkit/VOC2012/ImageSets/Main/trainval.txt'
140
+ ],
141
+ img_prefix=['data/VOCdevkit/VOC2007/', 'data/VOCdevkit/VOC2012/'],
142
+ pipeline=[
143
+ dict(type='LoadImageFromFile'),
144
+ dict(type='LoadAnnotations', with_bbox=True),
145
+ dict(
146
+ type='Resize',
147
+ img_scale=[(1333, 480), (1333, 512), (1333, 544), (1333, 576),
148
+ (1333, 608), (1333, 640), (1333, 672), (1333, 704),
149
+ (1333, 736), (1333, 768), (1333, 800)],
150
+ multiscale_mode='value',
151
+ keep_ratio=True),
152
+ dict(type='RandomFlip', flip_ratio=0.5),
153
+ dict(
154
+ type='Normalize',
155
+ mean=[123.675, 116.28, 103.53],
156
+ std=[58.395, 57.12, 57.375],
157
+ to_rgb=True),
158
+ dict(type='Pad', size_divisor=32),
159
+ dict(type='DefaultFormatBundle'),
160
+ dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
161
+ ]),
162
+ val=dict(
163
+ type='VOCDataset',
164
+ ann_file='data/VOCdevkit/VOC2007/ImageSets/Main/test.txt',
165
+ img_prefix='data/VOCdevkit/VOC2007/',
166
+ pipeline=[
167
+ dict(type='LoadImageFromFile'),
168
+ dict(
169
+ type='MultiScaleFlipAug',
170
+ img_scale=(1333, 800),
171
+ flip=False,
172
+ transforms=[
173
+ dict(type='Resize', keep_ratio=True),
174
+ dict(type='RandomFlip'),
175
+ dict(
176
+ type='Normalize',
177
+ mean=[123.675, 116.28, 103.53],
178
+ std=[58.395, 57.12, 57.375],
179
+ to_rgb=True),
180
+ dict(type='Pad', size_divisor=32),
181
+ dict(type='ImageToTensor', keys=['img']),
182
+ dict(type='Collect', keys=['img'])
183
+ ])
184
+ ]),
185
+ test=dict(
186
+ type='VOCDataset',
187
+ ann_file='data/VOCdevkit/VOC2007/ImageSets/Main/test.txt',
188
+ img_prefix='data/VOCdevkit/VOC2007/',
189
+ pipeline=[
190
+ dict(type='LoadImageFromFile'),
191
+ dict(
192
+ type='MultiScaleFlipAug',
193
+ img_scale=(1333, 800),
194
+ flip=False,
195
+ transforms=[
196
+ dict(type='Resize', keep_ratio=True),
197
+ dict(type='RandomFlip'),
198
+ dict(
199
+ type='Normalize',
200
+ mean=[123.675, 116.28, 103.53],
201
+ std=[58.395, 57.12, 57.375],
202
+ to_rgb=True),
203
+ dict(type='Pad', size_divisor=32),
204
+ dict(type='ImageToTensor', keys=['img']),
205
+ dict(type='Collect', keys=['img'])
206
+ ])
207
+ ]))
208
+ evaluation = dict(interval=12000, metric='mAP', save_best='auto')
209
+ optimizer = dict(
210
+ type='SGD',
211
+ lr=0.015,
212
+ momentum=0.9,
213
+ weight_decay=5e-05,
214
+ paramwise_cfg=dict(bias_lr_mult=2.0, bias_decay_mult=0.0))
215
+ optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
216
+ lr_config = dict(
217
+ policy='step',
218
+ warmup='linear',
219
+ warmup_iters=500,
220
+ warmup_ratio=0.001,
221
+ step=[9000, 11000],
222
+ by_epoch=False)
223
+ runner = dict(type='IterBasedRunner', max_iters=12000)
224
+ checkpoint_config = dict(interval=12000)
225
+ log_config = dict(interval=50, hooks=[dict(type='TextLoggerHook')])
226
+ custom_hooks = [
227
+ dict(type='NumClassCheckHook'),
228
+ dict(
229
+ type='MMDetWandbHook',
230
+ init_kwargs=dict(project='I2B', group='finetune'),
231
+ interval=50,
232
+ num_eval_images=0,
233
+ log_checkpoint=False)
234
+ ]
235
+ dist_params = dict(backend='nccl')
236
+ log_level = 'INFO'
237
+ load_from = 'pretrain/selfsup_fcos_mstrain-soft-teacher_sampler-2048_temp0.5/final_model.pth'
238
+ resume_from = None
239
+ workflow = [('train', 1)]
240
+ opencv_num_threads = 0
241
+ mp_start_method = 'fork'
242
+ auto_scale_lr = dict(enable=False, base_batch_size=16)
243
+ custom_imports = None
244
+ norm_cfg = dict(type='SyncBN', requires_grad=True)
245
+ work_dir = 'work_dirs/finetune_fcos_12k_voc0712_lr1.5e-2_wd5e-5'
246
+ auto_resume = False
247
+ gpu_ids = range(0, 8)
248
+
249
+ 2022-10-03 23:03:53,326 - mmdet - INFO - Set random seed to 42, deterministic: False
250
+ 2022-10-03 23:03:53,597 - mmdet - INFO - initialize ResNet with init_cfg {'type': 'Pretrained', 'checkpoint': 'torchvision://resnet50'}
251
+ 2022-10-03 23:04:07,756 - mmdet - INFO - initialize FPN with init_cfg {'type': 'Xavier', 'layer': 'Conv2d', 'distribution': 'uniform'}
252
+ 2022-10-03 23:04:07,780 - mmdet - INFO - initialize FCOSHead with init_cfg {'type': 'Normal', 'layer': 'Conv2d', 'std': 0.01, 'override': {'type': 'Normal', 'name': 'conv_cls', 'std': 0.01, 'bias_prob': 0.01}}
253
+ Name of parameter - Initialization information
254
+
255
+ backbone.conv1.weight - torch.Size([64, 3, 7, 7]):
256
+ PretrainedInit: load from torchvision://resnet50
257
+
258
+ backbone.bn1.weight - torch.Size([64]):
259
+ PretrainedInit: load from torchvision://resnet50
260
+
261
+ backbone.bn1.bias - torch.Size([64]):
262
+ PretrainedInit: load from torchvision://resnet50
263
+
264
+ backbone.layer1.0.conv1.weight - torch.Size([64, 64, 1, 1]):
265
+ PretrainedInit: load from torchvision://resnet50
266
+
267
+ backbone.layer1.0.bn1.weight - torch.Size([64]):
268
+ PretrainedInit: load from torchvision://resnet50
269
+
270
+ backbone.layer1.0.bn1.bias - torch.Size([64]):
271
+ PretrainedInit: load from torchvision://resnet50
272
+
273
+ backbone.layer1.0.conv2.weight - torch.Size([64, 64, 3, 3]):
274
+ PretrainedInit: load from torchvision://resnet50
275
+
276
+ backbone.layer1.0.bn2.weight - torch.Size([64]):
277
+ PretrainedInit: load from torchvision://resnet50
278
+
279
+ backbone.layer1.0.bn2.bias - torch.Size([64]):
280
+ PretrainedInit: load from torchvision://resnet50
281
+
282
+ backbone.layer1.0.conv3.weight - torch.Size([256, 64, 1, 1]):
283
+ PretrainedInit: load from torchvision://resnet50
284
+
285
+ backbone.layer1.0.bn3.weight - torch.Size([256]):
286
+ PretrainedInit: load from torchvision://resnet50
287
+
288
+ backbone.layer1.0.bn3.bias - torch.Size([256]):
289
+ PretrainedInit: load from torchvision://resnet50
290
+
291
+ backbone.layer1.0.downsample.0.weight - torch.Size([256, 64, 1, 1]):
292
+ PretrainedInit: load from torchvision://resnet50
293
+
294
+ backbone.layer1.0.downsample.1.weight - torch.Size([256]):
295
+ PretrainedInit: load from torchvision://resnet50
296
+
297
+ backbone.layer1.0.downsample.1.bias - torch.Size([256]):
298
+ PretrainedInit: load from torchvision://resnet50
299
+
300
+ backbone.layer1.1.conv1.weight - torch.Size([64, 256, 1, 1]):
301
+ PretrainedInit: load from torchvision://resnet50
302
+
303
+ backbone.layer1.1.bn1.weight - torch.Size([64]):
304
+ PretrainedInit: load from torchvision://resnet50
305
+
306
+ backbone.layer1.1.bn1.bias - torch.Size([64]):
307
+ PretrainedInit: load from torchvision://resnet50
308
+
309
+ backbone.layer1.1.conv2.weight - torch.Size([64, 64, 3, 3]):
310
+ PretrainedInit: load from torchvision://resnet50
311
+
312
+ backbone.layer1.1.bn2.weight - torch.Size([64]):
313
+ PretrainedInit: load from torchvision://resnet50
314
+
315
+ backbone.layer1.1.bn2.bias - torch.Size([64]):
316
+ PretrainedInit: load from torchvision://resnet50
317
+
318
+ backbone.layer1.1.conv3.weight - torch.Size([256, 64, 1, 1]):
319
+ PretrainedInit: load from torchvision://resnet50
320
+
321
+ backbone.layer1.1.bn3.weight - torch.Size([256]):
322
+ PretrainedInit: load from torchvision://resnet50
323
+
324
+ backbone.layer1.1.bn3.bias - torch.Size([256]):
325
+ PretrainedInit: load from torchvision://resnet50
326
+
327
+ backbone.layer1.2.conv1.weight - torch.Size([64, 256, 1, 1]):
328
+ PretrainedInit: load from torchvision://resnet50
329
+
330
+ backbone.layer1.2.bn1.weight - torch.Size([64]):
331
+ PretrainedInit: load from torchvision://resnet50
332
+
333
+ backbone.layer1.2.bn1.bias - torch.Size([64]):
334
+ PretrainedInit: load from torchvision://resnet50
335
+
336
+ backbone.layer1.2.conv2.weight - torch.Size([64, 64, 3, 3]):
337
+ PretrainedInit: load from torchvision://resnet50
338
+
339
+ backbone.layer1.2.bn2.weight - torch.Size([64]):
340
+ PretrainedInit: load from torchvision://resnet50
341
+
342
+ backbone.layer1.2.bn2.bias - torch.Size([64]):
343
+ PretrainedInit: load from torchvision://resnet50
344
+
345
+ backbone.layer1.2.conv3.weight - torch.Size([256, 64, 1, 1]):
346
+ PretrainedInit: load from torchvision://resnet50
347
+
348
+ backbone.layer1.2.bn3.weight - torch.Size([256]):
349
+ PretrainedInit: load from torchvision://resnet50
350
+
351
+ backbone.layer1.2.bn3.bias - torch.Size([256]):
352
+ PretrainedInit: load from torchvision://resnet50
353
+
354
+ backbone.layer2.0.conv1.weight - torch.Size([128, 256, 1, 1]):
355
+ PretrainedInit: load from torchvision://resnet50
356
+
357
+ backbone.layer2.0.bn1.weight - torch.Size([128]):
358
+ PretrainedInit: load from torchvision://resnet50
359
+
360
+ backbone.layer2.0.bn1.bias - torch.Size([128]):
361
+ PretrainedInit: load from torchvision://resnet50
362
+
363
+ backbone.layer2.0.conv2.weight - torch.Size([128, 128, 3, 3]):
364
+ PretrainedInit: load from torchvision://resnet50
365
+
366
+ backbone.layer2.0.bn2.weight - torch.Size([128]):
367
+ PretrainedInit: load from torchvision://resnet50
368
+
369
+ backbone.layer2.0.bn2.bias - torch.Size([128]):
370
+ PretrainedInit: load from torchvision://resnet50
371
+
372
+ backbone.layer2.0.conv3.weight - torch.Size([512, 128, 1, 1]):
373
+ PretrainedInit: load from torchvision://resnet50
374
+
375
+ backbone.layer2.0.bn3.weight - torch.Size([512]):
376
+ PretrainedInit: load from torchvision://resnet50
377
+
378
+ backbone.layer2.0.bn3.bias - torch.Size([512]):
379
+ PretrainedInit: load from torchvision://resnet50
380
+
381
+ backbone.layer2.0.downsample.0.weight - torch.Size([512, 256, 1, 1]):
382
+ PretrainedInit: load from torchvision://resnet50
383
+
384
+ backbone.layer2.0.downsample.1.weight - torch.Size([512]):
385
+ PretrainedInit: load from torchvision://resnet50
386
+
387
+ backbone.layer2.0.downsample.1.bias - torch.Size([512]):
388
+ PretrainedInit: load from torchvision://resnet50
389
+
390
+ backbone.layer2.1.conv1.weight - torch.Size([128, 512, 1, 1]):
391
+ PretrainedInit: load from torchvision://resnet50
392
+
393
+ backbone.layer2.1.bn1.weight - torch.Size([128]):
394
+ PretrainedInit: load from torchvision://resnet50
395
+
396
+ backbone.layer2.1.bn1.bias - torch.Size([128]):
397
+ PretrainedInit: load from torchvision://resnet50
398
+
399
+ backbone.layer2.1.conv2.weight - torch.Size([128, 128, 3, 3]):
400
+ PretrainedInit: load from torchvision://resnet50
401
+
402
+ backbone.layer2.1.bn2.weight - torch.Size([128]):
403
+ PretrainedInit: load from torchvision://resnet50
404
+
405
+ backbone.layer2.1.bn2.bias - torch.Size([128]):
406
+ PretrainedInit: load from torchvision://resnet50
407
+
408
+ backbone.layer2.1.conv3.weight - torch.Size([512, 128, 1, 1]):
409
+ PretrainedInit: load from torchvision://resnet50
410
+
411
+ backbone.layer2.1.bn3.weight - torch.Size([512]):
412
+ PretrainedInit: load from torchvision://resnet50
413
+
414
+ backbone.layer2.1.bn3.bias - torch.Size([512]):
415
+ PretrainedInit: load from torchvision://resnet50
416
+
417
+ backbone.layer2.2.conv1.weight - torch.Size([128, 512, 1, 1]):
418
+ PretrainedInit: load from torchvision://resnet50
419
+
420
+ backbone.layer2.2.bn1.weight - torch.Size([128]):
421
+ PretrainedInit: load from torchvision://resnet50
422
+
423
+ backbone.layer2.2.bn1.bias - torch.Size([128]):
424
+ PretrainedInit: load from torchvision://resnet50
425
+
426
+ backbone.layer2.2.conv2.weight - torch.Size([128, 128, 3, 3]):
427
+ PretrainedInit: load from torchvision://resnet50
428
+
429
+ backbone.layer2.2.bn2.weight - torch.Size([128]):
430
+ PretrainedInit: load from torchvision://resnet50
431
+
432
+ backbone.layer2.2.bn2.bias - torch.Size([128]):
433
+ PretrainedInit: load from torchvision://resnet50
434
+
435
+ backbone.layer2.2.conv3.weight - torch.Size([512, 128, 1, 1]):
436
+ PretrainedInit: load from torchvision://resnet50
437
+
438
+ backbone.layer2.2.bn3.weight - torch.Size([512]):
439
+ PretrainedInit: load from torchvision://resnet50
440
+
441
+ backbone.layer2.2.bn3.bias - torch.Size([512]):
442
+ PretrainedInit: load from torchvision://resnet50
443
+
444
+ backbone.layer2.3.conv1.weight - torch.Size([128, 512, 1, 1]):
445
+ PretrainedInit: load from torchvision://resnet50
446
+
447
+ backbone.layer2.3.bn1.weight - torch.Size([128]):
448
+ PretrainedInit: load from torchvision://resnet50
449
+
450
+ backbone.layer2.3.bn1.bias - torch.Size([128]):
451
+ PretrainedInit: load from torchvision://resnet50
452
+
453
+ backbone.layer2.3.conv2.weight - torch.Size([128, 128, 3, 3]):
454
+ PretrainedInit: load from torchvision://resnet50
455
+
456
+ backbone.layer2.3.bn2.weight - torch.Size([128]):
457
+ PretrainedInit: load from torchvision://resnet50
458
+
459
+ backbone.layer2.3.bn2.bias - torch.Size([128]):
460
+ PretrainedInit: load from torchvision://resnet50
461
+
462
+ backbone.layer2.3.conv3.weight - torch.Size([512, 128, 1, 1]):
463
+ PretrainedInit: load from torchvision://resnet50
464
+
465
+ backbone.layer2.3.bn3.weight - torch.Size([512]):
466
+ PretrainedInit: load from torchvision://resnet50
467
+
468
+ backbone.layer2.3.bn3.bias - torch.Size([512]):
469
+ PretrainedInit: load from torchvision://resnet50
470
+
471
+ backbone.layer3.0.conv1.weight - torch.Size([256, 512, 1, 1]):
472
+ PretrainedInit: load from torchvision://resnet50
473
+
474
+ backbone.layer3.0.bn1.weight - torch.Size([256]):
475
+ PretrainedInit: load from torchvision://resnet50
476
+
477
+ backbone.layer3.0.bn1.bias - torch.Size([256]):
478
+ PretrainedInit: load from torchvision://resnet50
479
+
480
+ backbone.layer3.0.conv2.weight - torch.Size([256, 256, 3, 3]):
481
+ PretrainedInit: load from torchvision://resnet50
482
+
483
+ backbone.layer3.0.bn2.weight - torch.Size([256]):
484
+ PretrainedInit: load from torchvision://resnet50
485
+
486
+ backbone.layer3.0.bn2.bias - torch.Size([256]):
487
+ PretrainedInit: load from torchvision://resnet50
488
+
489
+ backbone.layer3.0.conv3.weight - torch.Size([1024, 256, 1, 1]):
490
+ PretrainedInit: load from torchvision://resnet50
491
+
492
+ backbone.layer3.0.bn3.weight - torch.Size([1024]):
493
+ PretrainedInit: load from torchvision://resnet50
494
+
495
+ backbone.layer3.0.bn3.bias - torch.Size([1024]):
496
+ PretrainedInit: load from torchvision://resnet50
497
+
498
+ backbone.layer3.0.downsample.0.weight - torch.Size([1024, 512, 1, 1]):
499
+ PretrainedInit: load from torchvision://resnet50
500
+
501
+ backbone.layer3.0.downsample.1.weight - torch.Size([1024]):
502
+ PretrainedInit: load from torchvision://resnet50
503
+
504
+ backbone.layer3.0.downsample.1.bias - torch.Size([1024]):
505
+ PretrainedInit: load from torchvision://resnet50
506
+
507
+ backbone.layer3.1.conv1.weight - torch.Size([256, 1024, 1, 1]):
508
+ PretrainedInit: load from torchvision://resnet50
509
+
510
+ backbone.layer3.1.bn1.weight - torch.Size([256]):
511
+ PretrainedInit: load from torchvision://resnet50
512
+
513
+ backbone.layer3.1.bn1.bias - torch.Size([256]):
514
+ PretrainedInit: load from torchvision://resnet50
515
+
516
+ backbone.layer3.1.conv2.weight - torch.Size([256, 256, 3, 3]):
517
+ PretrainedInit: load from torchvision://resnet50
518
+
519
+ backbone.layer3.1.bn2.weight - torch.Size([256]):
520
+ PretrainedInit: load from torchvision://resnet50
521
+
522
+ backbone.layer3.1.bn2.bias - torch.Size([256]):
523
+ PretrainedInit: load from torchvision://resnet50
524
+
525
+ backbone.layer3.1.conv3.weight - torch.Size([1024, 256, 1, 1]):
526
+ PretrainedInit: load from torchvision://resnet50
527
+
528
+ backbone.layer3.1.bn3.weight - torch.Size([1024]):
529
+ PretrainedInit: load from torchvision://resnet50
530
+
531
+ backbone.layer3.1.bn3.bias - torch.Size([1024]):
532
+ PretrainedInit: load from torchvision://resnet50
533
+
534
+ backbone.layer3.2.conv1.weight - torch.Size([256, 1024, 1, 1]):
535
+ PretrainedInit: load from torchvision://resnet50
536
+
537
+ backbone.layer3.2.bn1.weight - torch.Size([256]):
538
+ PretrainedInit: load from torchvision://resnet50
539
+
540
+ backbone.layer3.2.bn1.bias - torch.Size([256]):
541
+ PretrainedInit: load from torchvision://resnet50
542
+
543
+ backbone.layer3.2.conv2.weight - torch.Size([256, 256, 3, 3]):
544
+ PretrainedInit: load from torchvision://resnet50
545
+
546
+ backbone.layer3.2.bn2.weight - torch.Size([256]):
547
+ PretrainedInit: load from torchvision://resnet50
548
+
549
+ backbone.layer3.2.bn2.bias - torch.Size([256]):
550
+ PretrainedInit: load from torchvision://resnet50
551
+
552
+ backbone.layer3.2.conv3.weight - torch.Size([1024, 256, 1, 1]):
553
+ PretrainedInit: load from torchvision://resnet50
554
+
555
+ backbone.layer3.2.bn3.weight - torch.Size([1024]):
556
+ PretrainedInit: load from torchvision://resnet50
557
+
558
+ backbone.layer3.2.bn3.bias - torch.Size([1024]):
559
+ PretrainedInit: load from torchvision://resnet50
560
+
561
+ backbone.layer3.3.conv1.weight - torch.Size([256, 1024, 1, 1]):
562
+ PretrainedInit: load from torchvision://resnet50
563
+
564
+ backbone.layer3.3.bn1.weight - torch.Size([256]):
565
+ PretrainedInit: load from torchvision://resnet50
566
+
567
+ backbone.layer3.3.bn1.bias - torch.Size([256]):
568
+ PretrainedInit: load from torchvision://resnet50
569
+
570
+ backbone.layer3.3.conv2.weight - torch.Size([256, 256, 3, 3]):
571
+ PretrainedInit: load from torchvision://resnet50
572
+
573
+ backbone.layer3.3.bn2.weight - torch.Size([256]):
574
+ PretrainedInit: load from torchvision://resnet50
575
+
576
+ backbone.layer3.3.bn2.bias - torch.Size([256]):
577
+ PretrainedInit: load from torchvision://resnet50
578
+
579
+ backbone.layer3.3.conv3.weight - torch.Size([1024, 256, 1, 1]):
580
+ PretrainedInit: load from torchvision://resnet50
581
+
582
+ backbone.layer3.3.bn3.weight - torch.Size([1024]):
583
+ PretrainedInit: load from torchvision://resnet50
584
+
585
+ backbone.layer3.3.bn3.bias - torch.Size([1024]):
586
+ PretrainedInit: load from torchvision://resnet50
587
+
588
+ backbone.layer3.4.conv1.weight - torch.Size([256, 1024, 1, 1]):
589
+ PretrainedInit: load from torchvision://resnet50
590
+
591
+ backbone.layer3.4.bn1.weight - torch.Size([256]):
592
+ PretrainedInit: load from torchvision://resnet50
593
+
594
+ backbone.layer3.4.bn1.bias - torch.Size([256]):
595
+ PretrainedInit: load from torchvision://resnet50
596
+
597
+ backbone.layer3.4.conv2.weight - torch.Size([256, 256, 3, 3]):
598
+ PretrainedInit: load from torchvision://resnet50
599
+
600
+ backbone.layer3.4.bn2.weight - torch.Size([256]):
601
+ PretrainedInit: load from torchvision://resnet50
602
+
603
+ backbone.layer3.4.bn2.bias - torch.Size([256]):
604
+ PretrainedInit: load from torchvision://resnet50
605
+
606
+ backbone.layer3.4.conv3.weight - torch.Size([1024, 256, 1, 1]):
607
+ PretrainedInit: load from torchvision://resnet50
608
+
609
+ backbone.layer3.4.bn3.weight - torch.Size([1024]):
610
+ PretrainedInit: load from torchvision://resnet50
611
+
612
+ backbone.layer3.4.bn3.bias - torch.Size([1024]):
613
+ PretrainedInit: load from torchvision://resnet50
614
+
615
+ backbone.layer3.5.conv1.weight - torch.Size([256, 1024, 1, 1]):
616
+ PretrainedInit: load from torchvision://resnet50
617
+
618
+ backbone.layer3.5.bn1.weight - torch.Size([256]):
619
+ PretrainedInit: load from torchvision://resnet50
620
+
621
+ backbone.layer3.5.bn1.bias - torch.Size([256]):
622
+ PretrainedInit: load from torchvision://resnet50
623
+
624
+ backbone.layer3.5.conv2.weight - torch.Size([256, 256, 3, 3]):
625
+ PretrainedInit: load from torchvision://resnet50
626
+
627
+ backbone.layer3.5.bn2.weight - torch.Size([256]):
628
+ PretrainedInit: load from torchvision://resnet50
629
+
630
+ backbone.layer3.5.bn2.bias - torch.Size([256]):
631
+ PretrainedInit: load from torchvision://resnet50
632
+
633
+ backbone.layer3.5.conv3.weight - torch.Size([1024, 256, 1, 1]):
634
+ PretrainedInit: load from torchvision://resnet50
635
+
636
+ backbone.layer3.5.bn3.weight - torch.Size([1024]):
637
+ PretrainedInit: load from torchvision://resnet50
638
+
639
+ backbone.layer3.5.bn3.bias - torch.Size([1024]):
640
+ PretrainedInit: load from torchvision://resnet50
641
+
642
+ backbone.layer4.0.conv1.weight - torch.Size([512, 1024, 1, 1]):
643
+ PretrainedInit: load from torchvision://resnet50
644
+
645
+ backbone.layer4.0.bn1.weight - torch.Size([512]):
646
+ PretrainedInit: load from torchvision://resnet50
647
+
648
+ backbone.layer4.0.bn1.bias - torch.Size([512]):
649
+ PretrainedInit: load from torchvision://resnet50
650
+
651
+ backbone.layer4.0.conv2.weight - torch.Size([512, 512, 3, 3]):
652
+ PretrainedInit: load from torchvision://resnet50
653
+
654
+ backbone.layer4.0.bn2.weight - torch.Size([512]):
655
+ PretrainedInit: load from torchvision://resnet50
656
+
657
+ backbone.layer4.0.bn2.bias - torch.Size([512]):
658
+ PretrainedInit: load from torchvision://resnet50
659
+
660
+ backbone.layer4.0.conv3.weight - torch.Size([2048, 512, 1, 1]):
661
+ PretrainedInit: load from torchvision://resnet50
662
+
663
+ backbone.layer4.0.bn3.weight - torch.Size([2048]):
664
+ PretrainedInit: load from torchvision://resnet50
665
+
666
+ backbone.layer4.0.bn3.bias - torch.Size([2048]):
667
+ PretrainedInit: load from torchvision://resnet50
668
+
669
+ backbone.layer4.0.downsample.0.weight - torch.Size([2048, 1024, 1, 1]):
670
+ PretrainedInit: load from torchvision://resnet50
671
+
672
+ backbone.layer4.0.downsample.1.weight - torch.Size([2048]):
673
+ PretrainedInit: load from torchvision://resnet50
674
+
675
+ backbone.layer4.0.downsample.1.bias - torch.Size([2048]):
676
+ PretrainedInit: load from torchvision://resnet50
677
+
678
+ backbone.layer4.1.conv1.weight - torch.Size([512, 2048, 1, 1]):
679
+ PretrainedInit: load from torchvision://resnet50
680
+
681
+ backbone.layer4.1.bn1.weight - torch.Size([512]):
682
+ PretrainedInit: load from torchvision://resnet50
683
+
684
+ backbone.layer4.1.bn1.bias - torch.Size([512]):
685
+ PretrainedInit: load from torchvision://resnet50
686
+
687
+ backbone.layer4.1.conv2.weight - torch.Size([512, 512, 3, 3]):
688
+ PretrainedInit: load from torchvision://resnet50
689
+
690
+ backbone.layer4.1.bn2.weight - torch.Size([512]):
691
+ PretrainedInit: load from torchvision://resnet50
692
+
693
+ backbone.layer4.1.bn2.bias - torch.Size([512]):
694
+ PretrainedInit: load from torchvision://resnet50
695
+
696
+ backbone.layer4.1.conv3.weight - torch.Size([2048, 512, 1, 1]):
697
+ PretrainedInit: load from torchvision://resnet50
698
+
699
+ backbone.layer4.1.bn3.weight - torch.Size([2048]):
700
+ PretrainedInit: load from torchvision://resnet50
701
+
702
+ backbone.layer4.1.bn3.bias - torch.Size([2048]):
703
+ PretrainedInit: load from torchvision://resnet50
704
+
705
+ backbone.layer4.2.conv1.weight - torch.Size([512, 2048, 1, 1]):
706
+ PretrainedInit: load from torchvision://resnet50
707
+
708
+ backbone.layer4.2.bn1.weight - torch.Size([512]):
709
+ PretrainedInit: load from torchvision://resnet50
710
+
711
+ backbone.layer4.2.bn1.bias - torch.Size([512]):
712
+ PretrainedInit: load from torchvision://resnet50
713
+
714
+ backbone.layer4.2.conv2.weight - torch.Size([512, 512, 3, 3]):
715
+ PretrainedInit: load from torchvision://resnet50
716
+
717
+ backbone.layer4.2.bn2.weight - torch.Size([512]):
718
+ PretrainedInit: load from torchvision://resnet50
719
+
720
+ backbone.layer4.2.bn2.bias - torch.Size([512]):
721
+ PretrainedInit: load from torchvision://resnet50
722
+
723
+ backbone.layer4.2.conv3.weight - torch.Size([2048, 512, 1, 1]):
724
+ PretrainedInit: load from torchvision://resnet50
725
+
726
+ backbone.layer4.2.bn3.weight - torch.Size([2048]):
727
+ PretrainedInit: load from torchvision://resnet50
728
+
729
+ backbone.layer4.2.bn3.bias - torch.Size([2048]):
730
+ PretrainedInit: load from torchvision://resnet50
731
+
732
+ neck.lateral_convs.0.conv.weight - torch.Size([256, 512, 1, 1]):
733
+ XavierInit: gain=1, distribution=uniform, bias=0
734
+
735
+ neck.lateral_convs.0.bn.weight - torch.Size([256]):
736
+ The value is the same before and after calling `init_weights` of FCOS
737
+
738
+ neck.lateral_convs.0.bn.bias - torch.Size([256]):
739
+ The value is the same before and after calling `init_weights` of FCOS
740
+
741
+ neck.lateral_convs.1.conv.weight - torch.Size([256, 1024, 1, 1]):
742
+ XavierInit: gain=1, distribution=uniform, bias=0
743
+
744
+ neck.lateral_convs.1.bn.weight - torch.Size([256]):
745
+ The value is the same before and after calling `init_weights` of FCOS
746
+
747
+ neck.lateral_convs.1.bn.bias - torch.Size([256]):
748
+ The value is the same before and after calling `init_weights` of FCOS
749
+
750
+ neck.lateral_convs.2.conv.weight - torch.Size([256, 2048, 1, 1]):
751
+ XavierInit: gain=1, distribution=uniform, bias=0
752
+
753
+ neck.lateral_convs.2.bn.weight - torch.Size([256]):
754
+ The value is the same before and after calling `init_weights` of FCOS
755
+
756
+ neck.lateral_convs.2.bn.bias - torch.Size([256]):
757
+ The value is the same before and after calling `init_weights` of FCOS
758
+
759
+ neck.fpn_convs.0.conv.weight - torch.Size([256, 256, 3, 3]):
760
+ XavierInit: gain=1, distribution=uniform, bias=0
761
+
762
+ neck.fpn_convs.0.bn.weight - torch.Size([256]):
763
+ The value is the same before and after calling `init_weights` of FCOS
764
+
765
+ neck.fpn_convs.0.bn.bias - torch.Size([256]):
766
+ The value is the same before and after calling `init_weights` of FCOS
767
+
768
+ neck.fpn_convs.1.conv.weight - torch.Size([256, 256, 3, 3]):
769
+ XavierInit: gain=1, distribution=uniform, bias=0
770
+
771
+ neck.fpn_convs.1.bn.weight - torch.Size([256]):
772
+ The value is the same before and after calling `init_weights` of FCOS
773
+
774
+ neck.fpn_convs.1.bn.bias - torch.Size([256]):
775
+ The value is the same before and after calling `init_weights` of FCOS
776
+
777
+ neck.fpn_convs.2.conv.weight - torch.Size([256, 256, 3, 3]):
778
+ XavierInit: gain=1, distribution=uniform, bias=0
779
+
780
+ neck.fpn_convs.2.bn.weight - torch.Size([256]):
781
+ The value is the same before and after calling `init_weights` of FCOS
782
+
783
+ neck.fpn_convs.2.bn.bias - torch.Size([256]):
784
+ The value is the same before and after calling `init_weights` of FCOS
785
+
786
+ neck.fpn_convs.3.conv.weight - torch.Size([256, 256, 3, 3]):
787
+ XavierInit: gain=1, distribution=uniform, bias=0
788
+
789
+ neck.fpn_convs.3.bn.weight - torch.Size([256]):
790
+ The value is the same before and after calling `init_weights` of FCOS
791
+
792
+ neck.fpn_convs.3.bn.bias - torch.Size([256]):
793
+ The value is the same before and after calling `init_weights` of FCOS
794
+
795
+ neck.fpn_convs.4.conv.weight - torch.Size([256, 256, 3, 3]):
796
+ XavierInit: gain=1, distribution=uniform, bias=0
797
+
798
+ neck.fpn_convs.4.bn.weight - torch.Size([256]):
799
+ The value is the same before and after calling `init_weights` of FCOS
800
+
801
+ neck.fpn_convs.4.bn.bias - torch.Size([256]):
802
+ The value is the same before and after calling `init_weights` of FCOS
803
+
804
+ bbox_head.cls_convs.0.conv.weight - torch.Size([256, 256, 3, 3]):
805
+ NormalInit: mean=0, std=0.01, bias=0
806
+
807
+ bbox_head.cls_convs.0.gn.weight - torch.Size([256]):
808
+ The value is the same before and after calling `init_weights` of FCOS
809
+
810
+ bbox_head.cls_convs.0.gn.bias - torch.Size([256]):
811
+ The value is the same before and after calling `init_weights` of FCOS
812
+
813
+ bbox_head.cls_convs.1.conv.weight - torch.Size([256, 256, 3, 3]):
814
+ NormalInit: mean=0, std=0.01, bias=0
815
+
816
+ bbox_head.cls_convs.1.gn.weight - torch.Size([256]):
817
+ The value is the same before and after calling `init_weights` of FCOS
818
+
819
+ bbox_head.cls_convs.1.gn.bias - torch.Size([256]):
820
+ The value is the same before and after calling `init_weights` of FCOS
821
+
822
+ bbox_head.cls_convs.2.conv.weight - torch.Size([256, 256, 3, 3]):
823
+ NormalInit: mean=0, std=0.01, bias=0
824
+
825
+ bbox_head.cls_convs.2.gn.weight - torch.Size([256]):
826
+ The value is the same before and after calling `init_weights` of FCOS
827
+
828
+ bbox_head.cls_convs.2.gn.bias - torch.Size([256]):
829
+ The value is the same before and after calling `init_weights` of FCOS
830
+
831
+ bbox_head.cls_convs.3.conv.weight - torch.Size([256, 256, 3, 3]):
832
+ NormalInit: mean=0, std=0.01, bias=0
833
+
834
+ bbox_head.cls_convs.3.gn.weight - torch.Size([256]):
835
+ The value is the same before and after calling `init_weights` of FCOS
836
+
837
+ bbox_head.cls_convs.3.gn.bias - torch.Size([256]):
838
+ The value is the same before and after calling `init_weights` of FCOS
839
+
840
+ bbox_head.reg_convs.0.conv.weight - torch.Size([256, 256, 3, 3]):
841
+ NormalInit: mean=0, std=0.01, bias=0
842
+
843
+ bbox_head.reg_convs.0.gn.weight - torch.Size([256]):
844
+ The value is the same before and after calling `init_weights` of FCOS
845
+
846
+ bbox_head.reg_convs.0.gn.bias - torch.Size([256]):
847
+ The value is the same before and after calling `init_weights` of FCOS
848
+
849
+ bbox_head.reg_convs.1.conv.weight - torch.Size([256, 256, 3, 3]):
850
+ NormalInit: mean=0, std=0.01, bias=0
851
+
852
+ bbox_head.reg_convs.1.gn.weight - torch.Size([256]):
853
+ The value is the same before and after calling `init_weights` of FCOS
854
+
855
+ bbox_head.reg_convs.1.gn.bias - torch.Size([256]):
856
+ The value is the same before and after calling `init_weights` of FCOS
857
+
858
+ bbox_head.reg_convs.2.conv.weight - torch.Size([256, 256, 3, 3]):
859
+ NormalInit: mean=0, std=0.01, bias=0
860
+
861
+ bbox_head.reg_convs.2.gn.weight - torch.Size([256]):
862
+ The value is the same before and after calling `init_weights` of FCOS
863
+
864
+ bbox_head.reg_convs.2.gn.bias - torch.Size([256]):
865
+ The value is the same before and after calling `init_weights` of FCOS
866
+
867
+ bbox_head.reg_convs.3.conv.weight - torch.Size([256, 256, 3, 3]):
868
+ NormalInit: mean=0, std=0.01, bias=0
869
+
870
+ bbox_head.reg_convs.3.gn.weight - torch.Size([256]):
871
+ The value is the same before and after calling `init_weights` of FCOS
872
+
873
+ bbox_head.reg_convs.3.gn.bias - torch.Size([256]):
874
+ The value is the same before and after calling `init_weights` of FCOS
875
+
876
+ bbox_head.conv_cls.weight - torch.Size([20, 256, 3, 3]):
877
+ NormalInit: mean=0, std=0.01, bias=-4.59511985013459
878
+
879
+ bbox_head.conv_cls.bias - torch.Size([20]):
880
+ NormalInit: mean=0, std=0.01, bias=-4.59511985013459
881
+
882
+ bbox_head.conv_reg.weight - torch.Size([4, 256, 3, 3]):
883
+ NormalInit: mean=0, std=0.01, bias=0
884
+
885
+ bbox_head.conv_reg.bias - torch.Size([4]):
886
+ NormalInit: mean=0, std=0.01, bias=0
887
+
888
+ bbox_head.conv_centerness.weight - torch.Size([1, 256, 3, 3]):
889
+ NormalInit: mean=0, std=0.01, bias=0
890
+
891
+ bbox_head.conv_centerness.bias - torch.Size([1]):
892
+ NormalInit: mean=0, std=0.01, bias=0
893
+
894
+ bbox_head.scales.0.scale - torch.Size([]):
895
+ The value is the same before and after calling `init_weights` of FCOS
896
+
897
+ bbox_head.scales.1.scale - torch.Size([]):
898
+ The value is the same before and after calling `init_weights` of FCOS
899
+
900
+ bbox_head.scales.2.scale - torch.Size([]):
901
+ The value is the same before and after calling `init_weights` of FCOS
902
+
903
+ bbox_head.scales.3.scale - torch.Size([]):
904
+ The value is the same before and after calling `init_weights` of FCOS
905
+
906
+ bbox_head.scales.4.scale - torch.Size([]):
907
+ The value is the same before and after calling `init_weights` of FCOS
908
+ 2022-10-03 23:04:09,685 - mmdet - INFO - Automatic scaling of learning rate (LR) has been disabled.
909
+ 2022-10-03 23:04:10,655 - mmdet - INFO - load checkpoint from local path: pretrain/selfsup_fcos_mstrain-soft-teacher_sampler-2048_temp0.5/final_model.pth
910
+ 2022-10-03 23:04:10,809 - mmdet - WARNING - The model and loaded state dict do not match exactly
911
+
912
+ unexpected key in source state_dict: neck.lateral_convs.0.conv.bias, neck.lateral_convs.1.conv.bias, neck.lateral_convs.2.conv.bias, neck.fpn_convs.0.conv.bias, neck.fpn_convs.1.conv.bias, neck.fpn_convs.2.conv.bias, neck.fpn_convs.3.conv.bias, neck.fpn_convs.4.conv.bias
913
+
914
+ missing keys in source state_dict: neck.lateral_convs.0.bn.weight, neck.lateral_convs.0.bn.bias, neck.lateral_convs.0.bn.running_mean, neck.lateral_convs.0.bn.running_var, neck.lateral_convs.1.bn.weight, neck.lateral_convs.1.bn.bias, neck.lateral_convs.1.bn.running_mean, neck.lateral_convs.1.bn.running_var, neck.lateral_convs.2.bn.weight, neck.lateral_convs.2.bn.bias, neck.lateral_convs.2.bn.running_mean, neck.lateral_convs.2.bn.running_var, neck.fpn_convs.0.bn.weight, neck.fpn_convs.0.bn.bias, neck.fpn_convs.0.bn.running_mean, neck.fpn_convs.0.bn.running_var, neck.fpn_convs.1.bn.weight, neck.fpn_convs.1.bn.bias, neck.fpn_convs.1.bn.running_mean, neck.fpn_convs.1.bn.running_var, neck.fpn_convs.2.bn.weight, neck.fpn_convs.2.bn.bias, neck.fpn_convs.2.bn.running_mean, neck.fpn_convs.2.bn.running_var, neck.fpn_convs.3.bn.weight, neck.fpn_convs.3.bn.bias, neck.fpn_convs.3.bn.running_mean, neck.fpn_convs.3.bn.running_var, neck.fpn_convs.4.bn.weight, neck.fpn_convs.4.bn.bias, neck.fpn_convs.4.bn.running_mean, neck.fpn_convs.4.bn.running_var, bbox_head.conv_cls.weight, bbox_head.conv_cls.bias, bbox_head.conv_reg.weight, bbox_head.conv_reg.bias, bbox_head.conv_centerness.weight, bbox_head.conv_centerness.bias
915
+
916
+ 2022-10-03 23:04:10,814 - mmdet - INFO - Start running, host: tiger@n136-144-086, work_dir: /home/tiger/code/mmdet/work_dirs/finetune_fcos_12k_voc0712_lr1.5e-2_wd5e-5
917
+ 2022-10-03 23:04:10,815 - mmdet - INFO - Hooks will be executed in the following order:
918
+ before_run:
919
+ (VERY_HIGH ) StepLrUpdaterHook
920
+ (NORMAL ) CheckpointHook
921
+ (NORMAL ) MMDetWandbHook
922
+ (LOW ) DistEvalHook
923
+ (VERY_LOW ) TextLoggerHook
924
+ --------------------
925
+ before_train_epoch:
926
+ (VERY_HIGH ) StepLrUpdaterHook
927
+ (NORMAL ) NumClassCheckHook
928
+ (NORMAL ) MMDetWandbHook
929
+ (LOW ) IterTimerHook
930
+ (LOW ) DistEvalHook
931
+ (VERY_LOW ) TextLoggerHook
932
+ --------------------
933
+ before_train_iter:
934
+ (VERY_HIGH ) StepLrUpdaterHook
935
+ (LOW ) IterTimerHook
936
+ (LOW ) DistEvalHook
937
+ --------------------
938
+ after_train_iter:
939
+ (ABOVE_NORMAL) OptimizerHook
940
+ (NORMAL ) CheckpointHook
941
+ (NORMAL ) MMDetWandbHook
942
+ (LOW ) IterTimerHook
943
+ (LOW ) DistEvalHook
944
+ (VERY_LOW ) TextLoggerHook
945
+ --------------------
946
+ after_train_epoch:
947
+ (NORMAL ) CheckpointHook
948
+ (NORMAL ) MMDetWandbHook
949
+ (LOW ) DistEvalHook
950
+ (VERY_LOW ) TextLoggerHook
951
+ --------------------
952
+ before_val_epoch:
953
+ (NORMAL ) NumClassCheckHook
954
+ (NORMAL ) MMDetWandbHook
955
+ (LOW ) IterTimerHook
956
+ (VERY_LOW ) TextLoggerHook
957
+ --------------------
958
+ before_val_iter:
959
+ (LOW ) IterTimerHook
960
+ --------------------
961
+ after_val_iter:
962
+ (LOW ) IterTimerHook
963
+ --------------------
964
+ after_val_epoch:
965
+ (NORMAL ) MMDetWandbHook
966
+ (VERY_LOW ) TextLoggerHook
967
+ --------------------
968
+ after_run:
969
+ (NORMAL ) MMDetWandbHook
970
+ (VERY_LOW ) TextLoggerHook
971
+ --------------------
972
+ 2022-10-03 23:04:10,815 - mmdet - INFO - workflow: [('train', 1)], max: 12000 iters
973
+ 2022-10-03 23:04:10,815 - mmdet - INFO - Checkpoints will be saved to /home/tiger/code/mmdet/work_dirs/finetune_fcos_12k_voc0712_lr1.5e-2_wd5e-5 by HardDiskBackend.
974
+ 2022-10-03 23:04:17,705 - mmdet - INFO - Iter [50/12000] lr: 1.484e-03, eta: 0:22:43, time: 0.114, data_time: 0.007, memory: 3374, loss_cls: 1.0537, loss_bbox: 4.8491, loss_centerness: 0.6754, loss: 6.5782, grad_norm: 12.3657
975
+ 2022-10-03 23:04:23,217 - mmdet - INFO - Iter [100/12000] lr: 2.982e-03, eta: 0:22:14, time: 0.110, data_time: 0.006, memory: 3374, loss_cls: 0.6791, loss_bbox: 0.8601, loss_centerness: 0.6573, loss: 2.1965, grad_norm: 4.5347
976
+ 2022-10-03 23:04:28,829 - mmdet - INFO - Iter [150/12000] lr: 4.481e-03, eta: 0:22:09, time: 0.112, data_time: 0.006, memory: 3374, loss_cls: 0.5333, loss_bbox: 0.7146, loss_centerness: 0.6428, loss: 1.8907, grad_norm: 5.2160
977
+ 2022-10-03 23:04:34,160 - mmdet - INFO - Iter [200/12000] lr: 5.979e-03, eta: 0:21:47, time: 0.107, data_time: 0.006, memory: 3374, loss_cls: 0.4749, loss_bbox: 0.7338, loss_centerness: 0.6323, loss: 1.8410, grad_norm: 7.3390
978
+ 2022-10-03 23:04:39,394 - mmdet - INFO - Iter [250/12000] lr: 7.478e-03, eta: 0:21:27, time: 0.105, data_time: 0.006, memory: 3374, loss_cls: 0.4591, loss_bbox: 0.6353, loss_centerness: 0.6251, loss: 1.7196, grad_norm: 6.4957
979
+ 2022-10-03 23:04:44,999 - mmdet - INFO - Iter [300/12000] lr: 8.976e-03, eta: 0:21:26, time: 0.112, data_time: 0.007, memory: 3374, loss_cls: 0.4273, loss_bbox: 0.6177, loss_centerness: 0.6211, loss: 1.6661, grad_norm: 6.4898
980
+ 2022-10-03 23:04:50,570 - mmdet - INFO - Iter [350/12000] lr: 1.047e-02, eta: 0:21:23, time: 0.111, data_time: 0.006, memory: 3374, loss_cls: 0.4455, loss_bbox: 0.6623, loss_centerness: 0.6199, loss: 1.7277, grad_norm: 6.3315
981
+ 2022-10-03 23:04:56,146 - mmdet - INFO - Iter [400/12000] lr: 1.197e-02, eta: 0:21:20, time: 0.112, data_time: 0.006, memory: 3374, loss_cls: 0.4226, loss_bbox: 0.5643, loss_centerness: 0.6222, loss: 1.6091, grad_norm: 4.8027
982
+ 2022-10-03 23:05:01,689 - mmdet - INFO - Iter [450/12000] lr: 1.347e-02, eta: 0:21:15, time: 0.111, data_time: 0.006, memory: 3374, loss_cls: 0.4131, loss_bbox: 0.6746, loss_centerness: 0.6191, loss: 1.7067, grad_norm: 6.1644
983
+ 2022-10-03 23:05:08,483 - mmdet - INFO - Iter [500/12000] lr: 1.497e-02, eta: 0:21:39, time: 0.136, data_time: 0.006, memory: 3374, loss_cls: 0.3854, loss_bbox: 0.6034, loss_centerness: 0.6164, loss: 1.6052, grad_norm: 5.1157
984
+ 2022-10-03 23:05:13,624 - mmdet - INFO - Iter [550/12000] lr: 1.500e-02, eta: 0:21:22, time: 0.103, data_time: 0.006, memory: 3374, loss_cls: 0.3810, loss_bbox: 0.6026, loss_centerness: 0.6148, loss: 1.5985, grad_norm: 5.2547
985
+ 2022-10-03 23:05:18,837 - mmdet - INFO - Iter [600/12000] lr: 1.500e-02, eta: 0:21:09, time: 0.104, data_time: 0.006, memory: 3374, loss_cls: 0.3759, loss_bbox: 0.6033, loss_centerness: 0.6159, loss: 1.5950, grad_norm: 5.4132
986
+ 2022-10-03 23:05:24,127 - mmdet - INFO - Iter [650/12000] lr: 1.500e-02, eta: 0:20:59, time: 0.106, data_time: 0.006, memory: 3374, loss_cls: 0.3536, loss_bbox: 0.5552, loss_centerness: 0.6144, loss: 1.5232, grad_norm: 4.6439
987
+ 2022-10-03 23:05:29,329 - mmdet - INFO - Iter [700/12000] lr: 1.500e-02, eta: 0:20:48, time: 0.104, data_time: 0.006, memory: 3374, loss_cls: 0.3611, loss_bbox: 0.5496, loss_centerness: 0.6123, loss: 1.5230, grad_norm: 4.9982
988
+ 2022-10-03 23:05:34,597 - mmdet - INFO - Iter [750/12000] lr: 1.500e-02, eta: 0:20:38, time: 0.105, data_time: 0.006, memory: 3374, loss_cls: 0.3628, loss_bbox: 0.6455, loss_centerness: 0.6124, loss: 1.6208, grad_norm: 5.6424
989
+ 2022-10-03 23:05:39,916 - mmdet - INFO - Iter [800/12000] lr: 1.500e-02, eta: 0:20:30, time: 0.106, data_time: 0.006, memory: 3374, loss_cls: 0.3525, loss_bbox: 0.5528, loss_centerness: 0.6113, loss: 1.5166, grad_norm: 4.6742
990
+ 2022-10-03 23:05:44,962 - mmdet - INFO - Iter [850/12000] lr: 1.500e-02, eta: 0:20:19, time: 0.101, data_time: 0.006, memory: 3374, loss_cls: 0.3506, loss_bbox: 0.5777, loss_centerness: 0.6112, loss: 1.5394, grad_norm: 5.0777
991
+ 2022-10-03 23:05:50,213 - mmdet - INFO - Iter [900/12000] lr: 1.500e-02, eta: 0:20:11, time: 0.105, data_time: 0.006, memory: 3374, loss_cls: 0.3315, loss_bbox: 0.6549, loss_centerness: 0.6092, loss: 1.5956, grad_norm: 5.1478
992
+ 2022-10-03 23:05:55,528 - mmdet - INFO - Iter [950/12000] lr: 1.500e-02, eta: 0:20:04, time: 0.106, data_time: 0.006, memory: 3374, loss_cls: 0.3390, loss_bbox: 0.5597, loss_centerness: 0.6095, loss: 1.5081, grad_norm: 4.3107
993
+ 2022-10-03 23:06:00,561 - mmdet - INFO - Exp name: fcos_mstrain_12k_voc0712.py
994
+ 2022-10-03 23:06:00,562 - mmdet - INFO - Iter [1000/12000] lr: 1.500e-02, eta: 0:19:54, time: 0.101, data_time: 0.006, memory: 3374, loss_cls: 0.3277, loss_bbox: 0.4881, loss_centerness: 0.6082, loss: 1.4240, grad_norm: 4.3779
995
+ 2022-10-03 23:06:05,627 - mmdet - INFO - Iter [1050/12000] lr: 1.500e-02, eta: 0:19:44, time: 0.101, data_time: 0.006, memory: 3374, loss_cls: 0.3301, loss_bbox: 0.5140, loss_centerness: 0.6098, loss: 1.4539, grad_norm: 4.5959
996
+ 2022-10-03 23:06:10,997 - mmdet - INFO - Iter [1100/12000] lr: 1.500e-02, eta: 0:19:39, time: 0.107, data_time: 0.006, memory: 3374, loss_cls: 0.3150, loss_bbox: 0.5068, loss_centerness: 0.6070, loss: 1.4287, grad_norm: 4.3244
997
+ 2022-10-03 23:06:16,223 - mmdet - INFO - Iter [1150/12000] lr: 1.500e-02, eta: 0:19:31, time: 0.105, data_time: 0.006, memory: 3374, loss_cls: 0.3163, loss_bbox: 0.5167, loss_centerness: 0.6070, loss: 1.4400, grad_norm: 4.7621
998
+ 2022-10-03 23:06:21,607 - mmdet - INFO - Iter [1200/12000] lr: 1.500e-02, eta: 0:19:26, time: 0.108, data_time: 0.006, memory: 3374, loss_cls: 0.3054, loss_bbox: 0.4738, loss_centerness: 0.6063, loss: 1.3855, grad_norm: 3.7848
999
+ 2022-10-03 23:06:26,663 - mmdet - INFO - Iter [1250/12000] lr: 1.500e-02, eta: 0:19:18, time: 0.101, data_time: 0.006, memory: 3374, loss_cls: 0.2982, loss_bbox: 0.5563, loss_centerness: 0.6080, loss: 1.4626, grad_norm: 5.4164
1000
+ 2022-10-03 23:06:32,004 - mmdet - INFO - Iter [1300/12000] lr: 1.500e-02, eta: 0:19:12, time: 0.107, data_time: 0.006, memory: 3374, loss_cls: 0.2959, loss_bbox: 0.4749, loss_centerness: 0.6046, loss: 1.3755, grad_norm: 4.0394
1001
+ 2022-10-03 23:06:37,278 - mmdet - INFO - Iter [1350/12000] lr: 1.500e-02, eta: 0:19:06, time: 0.105, data_time: 0.006, memory: 3374, loss_cls: 0.3065, loss_bbox: 0.4300, loss_centerness: 0.6061, loss: 1.3425, grad_norm: 3.6915
1002
+ 2022-10-03 23:06:42,277 - mmdet - INFO - Iter [1400/12000] lr: 1.500e-02, eta: 0:18:57, time: 0.100, data_time: 0.006, memory: 3374, loss_cls: 0.3188, loss_bbox: 0.4513, loss_centerness: 0.6068, loss: 1.3769, grad_norm: 3.7263
1003
+ 2022-10-03 23:06:47,648 - mmdet - INFO - Iter [1450/12000] lr: 1.500e-02, eta: 0:18:52, time: 0.107, data_time: 0.006, memory: 3374, loss_cls: 0.3040, loss_bbox: 0.4776, loss_centerness: 0.6043, loss: 1.3859, grad_norm: 4.1767
1004
+ 2022-10-03 23:06:53,019 - mmdet - INFO - Iter [1500/12000] lr: 1.500e-02, eta: 0:18:47, time: 0.107, data_time: 0.006, memory: 3374, loss_cls: 0.2978, loss_bbox: 0.4800, loss_centerness: 0.6047, loss: 1.3824, grad_norm: 4.1199
1005
+ 2022-10-03 23:06:58,344 - mmdet - INFO - Iter [1550/12000] lr: 1.500e-02, eta: 0:18:41, time: 0.106, data_time: 0.006, memory: 3374, loss_cls: 0.2975, loss_bbox: 0.4485, loss_centerness: 0.6025, loss: 1.3485, grad_norm: 3.9451
1006
+ 2022-10-03 23:07:03,761 - mmdet - INFO - Iter [1600/12000] lr: 1.500e-02, eta: 0:18:36, time: 0.108, data_time: 0.006, memory: 3374, loss_cls: 0.2885, loss_bbox: 0.4370, loss_centerness: 0.6048, loss: 1.3304, grad_norm: 3.3962
1007
+ 2022-10-03 23:07:09,400 - mmdet - INFO - Iter [1650/12000] lr: 1.500e-02, eta: 0:18:32, time: 0.113, data_time: 0.006, memory: 3374, loss_cls: 0.2843, loss_bbox: 0.4135, loss_centerness: 0.6059, loss: 1.3038, grad_norm: 3.2543
1008
+ 2022-10-03 23:07:15,080 - mmdet - INFO - Iter [1700/12000] lr: 1.500e-02, eta: 0:18:29, time: 0.114, data_time: 0.006, memory: 3374, loss_cls: 0.2856, loss_bbox: 0.4120, loss_centerness: 0.6029, loss: 1.3005, grad_norm: 3.1262
1009
+ 2022-10-03 23:07:20,292 - mmdet - INFO - Iter [1750/12000] lr: 1.500e-02, eta: 0:18:22, time: 0.104, data_time: 0.006, memory: 3374, loss_cls: 0.2861, loss_bbox: 0.4194, loss_centerness: 0.6016, loss: 1.3071, grad_norm: 3.4000
1010
+ 2022-10-03 23:07:25,374 - mmdet - INFO - Iter [1800/12000] lr: 1.500e-02, eta: 0:18:15, time: 0.102, data_time: 0.006, memory: 3374, loss_cls: 0.2892, loss_bbox: 0.3962, loss_centerness: 0.6040, loss: 1.2893, grad_norm: 2.8839
1011
+ 2022-10-03 23:07:30,704 - mmdet - INFO - Iter [1850/12000] lr: 1.500e-02, eta: 0:18:10, time: 0.107, data_time: 0.006, memory: 3374, loss_cls: 0.2739, loss_bbox: 0.3708, loss_centerness: 0.6029, loss: 1.2476, grad_norm: 2.6212
1012
+ 2022-10-03 23:07:35,850 - mmdet - INFO - Iter [1900/12000] lr: 1.500e-02, eta: 0:18:03, time: 0.103, data_time: 0.006, memory: 3374, loss_cls: 0.2914, loss_bbox: 0.4107, loss_centerness: 0.6050, loss: 1.3071, grad_norm: 3.1236
1013
+ 2022-10-03 23:07:40,977 - mmdet - INFO - Iter [1950/12000] lr: 1.500e-02, eta: 0:17:56, time: 0.103, data_time: 0.006, memory: 3374, loss_cls: 0.2795, loss_bbox: 0.4096, loss_centerness: 0.6022, loss: 1.2914, grad_norm: 3.0628
1014
+ 2022-10-03 23:07:46,032 - mmdet - INFO - Exp name: fcos_mstrain_12k_voc0712.py
1015
+ 2022-10-03 23:07:46,033 - mmdet - INFO - Iter [2000/12000] lr: 1.500e-02, eta: 0:17:50, time: 0.101, data_time: 0.006, memory: 3374, loss_cls: 0.2805, loss_bbox: 0.3808, loss_centerness: 0.6005, loss: 1.2618, grad_norm: 2.8152
1016
+ 2022-10-03 23:07:51,065 - mmdet - INFO - Iter [2050/12000] lr: 1.500e-02, eta: 0:17:43, time: 0.101, data_time: 0.006, memory: 3374, loss_cls: 0.2772, loss_bbox: 0.3860, loss_centerness: 0.5991, loss: 1.2624, grad_norm: 2.9319
1017
+ 2022-10-03 23:07:56,283 - mmdet - INFO - Iter [2100/12000] lr: 1.500e-02, eta: 0:17:37, time: 0.104, data_time: 0.006, memory: 3374, loss_cls: 0.2703, loss_bbox: 0.3860, loss_centerness: 0.6015, loss: 1.2578, grad_norm: 2.6650
1018
+ 2022-10-03 23:08:01,375 - mmdet - INFO - Iter [2150/12000] lr: 1.500e-02, eta: 0:17:30, time: 0.102, data_time: 0.006, memory: 3374, loss_cls: 0.2569, loss_bbox: 0.3632, loss_centerness: 0.5993, loss: 1.2194, grad_norm: 2.6891
1019
+ 2022-10-03 23:08:06,399 - mmdet - INFO - Iter [2200/12000] lr: 1.500e-02, eta: 0:17:24, time: 0.100, data_time: 0.006, memory: 3374, loss_cls: 0.2682, loss_bbox: 0.3759, loss_centerness: 0.6009, loss: 1.2450, grad_norm: 2.7334
1020
+ 2022-10-03 23:08:11,513 - mmdet - INFO - Iter [2250/12000] lr: 1.500e-02, eta: 0:17:17, time: 0.102, data_time: 0.006, memory: 3375, loss_cls: 0.2589, loss_bbox: 0.3624, loss_centerness: 0.5985, loss: 1.2198, grad_norm: 2.7641
1021
+ 2022-10-03 23:08:16,930 - mmdet - INFO - Iter [2300/12000] lr: 1.500e-02, eta: 0:17:12, time: 0.108, data_time: 0.006, memory: 3375, loss_cls: 0.2619, loss_bbox: 0.3805, loss_centerness: 0.5989, loss: 1.2414, grad_norm: 2.8289
1022
+ 2022-10-03 23:08:22,089 - mmdet - INFO - Iter [2350/12000] lr: 1.500e-02, eta: 0:17:06, time: 0.103, data_time: 0.006, memory: 3375, loss_cls: 0.2718, loss_bbox: 0.3752, loss_centerness: 0.6001, loss: 1.2471, grad_norm: 2.7427
1023
+ 2022-10-03 23:08:27,145 - mmdet - INFO - Iter [2400/12000] lr: 1.500e-02, eta: 0:17:00, time: 0.101, data_time: 0.006, memory: 3375, loss_cls: 0.2640, loss_bbox: 0.3638, loss_centerness: 0.5997, loss: 1.2274, grad_norm: 2.5183
1024
+ 2022-10-03 23:08:32,180 - mmdet - INFO - Iter [2450/12000] lr: 1.500e-02, eta: 0:16:54, time: 0.101, data_time: 0.006, memory: 3375, loss_cls: 0.2464, loss_bbox: 0.3551, loss_centerness: 0.6002, loss: 1.2016, grad_norm: 2.4714
1025
+ 2022-10-03 23:08:37,449 - mmdet - INFO - Iter [2500/12000] lr: 1.500e-02, eta: 0:16:48, time: 0.105, data_time: 0.006, memory: 3375, loss_cls: 0.2611, loss_bbox: 0.3776, loss_centerness: 0.6007, loss: 1.2394, grad_norm: 2.7616
1026
+ 2022-10-03 23:08:42,790 - mmdet - INFO - Iter [2550/12000] lr: 1.500e-02, eta: 0:16:43, time: 0.107, data_time: 0.006, memory: 3375, loss_cls: 0.2558, loss_bbox: 0.3706, loss_centerness: 0.6010, loss: 1.2273, grad_norm: 2.6622
1027
+ 2022-10-03 23:08:48,168 - mmdet - INFO - Iter [2600/12000] lr: 1.500e-02, eta: 0:16:38, time: 0.108, data_time: 0.006, memory: 3375, loss_cls: 0.2528, loss_bbox: 0.3718, loss_centerness: 0.6008, loss: 1.2255, grad_norm: 2.6662
1028
+ 2022-10-03 23:08:53,369 - mmdet - INFO - Iter [2650/12000] lr: 1.500e-02, eta: 0:16:32, time: 0.104, data_time: 0.006, memory: 3375, loss_cls: 0.2571, loss_bbox: 0.3540, loss_centerness: 0.5993, loss: 1.2104, grad_norm: 2.6098
1029
+ 2022-10-03 23:08:58,471 - mmdet - INFO - Iter [2700/12000] lr: 1.500e-02, eta: 0:16:26, time: 0.102, data_time: 0.006, memory: 3375, loss_cls: 0.2488, loss_bbox: 0.3670, loss_centerness: 0.5980, loss: 1.2138, grad_norm: 2.7511
1030
+ 2022-10-03 23:09:03,858 - mmdet - INFO - Iter [2750/12000] lr: 1.500e-02, eta: 0:16:21, time: 0.108, data_time: 0.006, memory: 3375, loss_cls: 0.2875, loss_bbox: 0.3567, loss_centerness: 0.5974, loss: 1.2417, grad_norm: 2.8518
1031
+ 2022-10-03 23:09:09,385 - mmdet - INFO - Iter [2800/12000] lr: 1.500e-02, eta: 0:16:17, time: 0.111, data_time: 0.006, memory: 3375, loss_cls: 0.2600, loss_bbox: 0.3723, loss_centerness: 0.5989, loss: 1.2311, grad_norm: 2.7242
1032
+ 2022-10-03 23:09:14,773 - mmdet - INFO - Iter [2850/12000] lr: 1.500e-02, eta: 0:16:12, time: 0.108, data_time: 0.006, memory: 3375, loss_cls: 0.2638, loss_bbox: 0.3606, loss_centerness: 0.6012, loss: 1.2255, grad_norm: 2.5991
1033
+ 2022-10-03 23:09:19,960 - mmdet - INFO - Iter [2900/12000] lr: 1.500e-02, eta: 0:16:06, time: 0.104, data_time: 0.006, memory: 3375, loss_cls: 0.2658, loss_bbox: 0.3596, loss_centerness: 0.5990, loss: 1.2244, grad_norm: 2.6344
1034
+ 2022-10-03 23:09:25,447 - mmdet - INFO - Iter [2950/12000] lr: 1.500e-02, eta: 0:16:01, time: 0.110, data_time: 0.006, memory: 3375, loss_cls: 0.2623, loss_bbox: 0.3663, loss_centerness: 0.5994, loss: 1.2280, grad_norm: 2.5495
1035
+ 2022-10-03 23:09:30,697 - mmdet - INFO - Exp name: fcos_mstrain_12k_voc0712.py
1036
+ 2022-10-03 23:09:30,697 - mmdet - INFO - Iter [3000/12000] lr: 1.500e-02, eta: 0:15:56, time: 0.105, data_time: 0.006, memory: 3375, loss_cls: 0.2467, loss_bbox: 0.3484, loss_centerness: 0.5985, loss: 1.1936, grad_norm: 2.3818
1037
+ 2022-10-03 23:09:35,762 - mmdet - INFO - Iter [3050/12000] lr: 1.500e-02, eta: 0:15:50, time: 0.101, data_time: 0.006, memory: 3375, loss_cls: 0.2489, loss_bbox: 0.3615, loss_centerness: 0.5986, loss: 1.2090, grad_norm: 2.6018
1038
+ 2022-10-03 23:09:40,840 - mmdet - INFO - Iter [3100/12000] lr: 1.500e-02, eta: 0:15:44, time: 0.102, data_time: 0.006, memory: 3375, loss_cls: 0.2385, loss_bbox: 0.3538, loss_centerness: 0.5981, loss: 1.1904, grad_norm: 2.6130
1039
+ 2022-10-03 23:09:46,209 - mmdet - INFO - Iter [3150/12000] lr: 1.500e-02, eta: 0:15:38, time: 0.107, data_time: 0.006, memory: 3375, loss_cls: 0.2298, loss_bbox: 0.3453, loss_centerness: 0.5973, loss: 1.1723, grad_norm: 2.5113
1040
+ 2022-10-03 23:09:51,589 - mmdet - INFO - Iter [3200/12000] lr: 1.500e-02, eta: 0:15:33, time: 0.108, data_time: 0.006, memory: 3375, loss_cls: 0.2480, loss_bbox: 0.3494, loss_centerness: 0.5984, loss: 1.1958, grad_norm: 2.5547
1041
+ 2022-10-03 23:09:56,892 - mmdet - INFO - Iter [3250/12000] lr: 1.500e-02, eta: 0:15:28, time: 0.106, data_time: 0.006, memory: 3375, loss_cls: 0.2448, loss_bbox: 0.3371, loss_centerness: 0.5989, loss: 1.1808, grad_norm: 2.4596
1042
+ 2022-10-03 23:10:01,982 - mmdet - INFO - Iter [3300/12000] lr: 1.500e-02, eta: 0:15:22, time: 0.102, data_time: 0.006, memory: 3375, loss_cls: 0.2546, loss_bbox: 0.3607, loss_centerness: 0.5988, loss: 1.2141, grad_norm: 2.7104
1043
+ 2022-10-03 23:10:07,046 - mmdet - INFO - Iter [3350/12000] lr: 1.500e-02, eta: 0:15:16, time: 0.101, data_time: 0.006, memory: 3375, loss_cls: 0.2385, loss_bbox: 0.3381, loss_centerness: 0.5972, loss: 1.1738, grad_norm: 2.4207
1044
+ 2022-10-03 23:10:12,276 - mmdet - INFO - Iter [3400/12000] lr: 1.500e-02, eta: 0:15:11, time: 0.105, data_time: 0.006, memory: 3375, loss_cls: 0.2361, loss_bbox: 0.3271, loss_centerness: 0.5959, loss: 1.1592, grad_norm: 2.3731
1045
+ 2022-10-03 23:10:17,293 - mmdet - INFO - Iter [3450/12000] lr: 1.500e-02, eta: 0:15:05, time: 0.100, data_time: 0.006, memory: 3375, loss_cls: 0.2472, loss_bbox: 0.3672, loss_centerness: 0.5965, loss: 1.2109, grad_norm: 2.8299
1046
+ 2022-10-03 23:10:22,449 - mmdet - INFO - Iter [3500/12000] lr: 1.500e-02, eta: 0:14:59, time: 0.103, data_time: 0.006, memory: 3375, loss_cls: 0.2285, loss_bbox: 0.3372, loss_centerness: 0.5958, loss: 1.1614, grad_norm: 2.3995
1047
+ 2022-10-03 23:10:27,682 - mmdet - INFO - Iter [3550/12000] lr: 1.500e-02, eta: 0:14:54, time: 0.105, data_time: 0.006, memory: 3375, loss_cls: 0.2323, loss_bbox: 0.3379, loss_centerness: 0.5953, loss: 1.1656, grad_norm: 2.7294
1048
+ 2022-10-03 23:10:32,728 - mmdet - INFO - Iter [3600/12000] lr: 1.500e-02, eta: 0:14:48, time: 0.101, data_time: 0.006, memory: 3375, loss_cls: 0.2352, loss_bbox: 0.3351, loss_centerness: 0.5969, loss: 1.1672, grad_norm: 2.6776
1049
+ 2022-10-03 23:10:37,762 - mmdet - INFO - Iter [3650/12000] lr: 1.500e-02, eta: 0:14:42, time: 0.101, data_time: 0.006, memory: 3375, loss_cls: 0.2406, loss_bbox: 0.3351, loss_centerness: 0.5969, loss: 1.1726, grad_norm: 2.5880
1050
+ 2022-10-03 23:10:42,994 - mmdet - INFO - Iter [3700/12000] lr: 1.500e-02, eta: 0:14:37, time: 0.105, data_time: 0.006, memory: 3375, loss_cls: 0.2356, loss_bbox: 0.3268, loss_centerness: 0.5963, loss: 1.1587, grad_norm: 2.3722
1051
+ 2022-10-03 23:10:48,338 - mmdet - INFO - Iter [3750/12000] lr: 1.500e-02, eta: 0:14:31, time: 0.107, data_time: 0.006, memory: 3375, loss_cls: 0.2314, loss_bbox: 0.3354, loss_centerness: 0.5952, loss: 1.1620, grad_norm: 2.5687
1052
+ 2022-10-03 23:10:53,688 - mmdet - INFO - Iter [3800/12000] lr: 1.500e-02, eta: 0:14:26, time: 0.107, data_time: 0.006, memory: 3375, loss_cls: 0.2388, loss_bbox: 0.3303, loss_centerness: 0.5958, loss: 1.1649, grad_norm: 2.5392
1053
+ 2022-10-03 23:10:58,875 - mmdet - INFO - Iter [3850/12000] lr: 1.500e-02, eta: 0:14:21, time: 0.104, data_time: 0.007, memory: 3375, loss_cls: 0.2336, loss_bbox: 0.3207, loss_centerness: 0.5965, loss: 1.1509, grad_norm: 2.3348
1054
+ 2022-10-03 23:11:04,134 - mmdet - INFO - Iter [3900/12000] lr: 1.500e-02, eta: 0:14:15, time: 0.105, data_time: 0.006, memory: 3375, loss_cls: 0.2293, loss_bbox: 0.3234, loss_centerness: 0.5946, loss: 1.1473, grad_norm: 2.3656
1055
+ 2022-10-03 23:11:09,316 - mmdet - INFO - Iter [3950/12000] lr: 1.500e-02, eta: 0:14:10, time: 0.104, data_time: 0.006, memory: 3375, loss_cls: 0.2271, loss_bbox: 0.3254, loss_centerness: 0.5963, loss: 1.1489, grad_norm: 2.3502
1056
+ 2022-10-03 23:11:14,373 - mmdet - INFO - Exp name: fcos_mstrain_12k_voc0712.py
1057
+ 2022-10-03 23:11:14,373 - mmdet - INFO - Iter [4000/12000] lr: 1.500e-02, eta: 0:14:04, time: 0.101, data_time: 0.006, memory: 3375, loss_cls: 0.2390, loss_bbox: 0.3419, loss_centerness: 0.5963, loss: 1.1772, grad_norm: 2.6043
1058
+ 2022-10-03 23:11:19,472 - mmdet - INFO - Iter [4050/12000] lr: 1.500e-02, eta: 0:13:59, time: 0.102, data_time: 0.006, memory: 3375, loss_cls: 0.2309, loss_bbox: 0.3362, loss_centerness: 0.5982, loss: 1.1653, grad_norm: 2.5254
1059
+ 2022-10-03 23:11:24,712 - mmdet - INFO - Iter [4100/12000] lr: 1.500e-02, eta: 0:13:53, time: 0.105, data_time: 0.006, memory: 3375, loss_cls: 0.2314, loss_bbox: 0.3494, loss_centerness: 0.5960, loss: 1.1768, grad_norm: 2.6908
1060
+ 2022-10-03 23:11:30,137 - mmdet - INFO - Iter [4150/12000] lr: 1.500e-02, eta: 0:13:48, time: 0.108, data_time: 0.006, memory: 3375, loss_cls: 0.2227, loss_bbox: 0.3214, loss_centerness: 0.5937, loss: 1.1378, grad_norm: 2.5844
1061
+ 2022-10-03 23:11:35,406 - mmdet - INFO - Iter [4200/12000] lr: 1.500e-02, eta: 0:13:43, time: 0.105, data_time: 0.006, memory: 3375, loss_cls: 0.2212, loss_bbox: 0.3142, loss_centerness: 0.5922, loss: 1.1276, grad_norm: 2.5120
1062
+ 2022-10-03 23:11:40,647 - mmdet - INFO - Iter [4250/12000] lr: 1.500e-02, eta: 0:13:38, time: 0.105, data_time: 0.006, memory: 3375, loss_cls: 0.2243, loss_bbox: 0.3261, loss_centerness: 0.5936, loss: 1.1441, grad_norm: 2.5729
1063
+ 2022-10-03 23:11:45,717 - mmdet - INFO - Iter [4300/12000] lr: 1.500e-02, eta: 0:13:32, time: 0.101, data_time: 0.006, memory: 3375, loss_cls: 0.2274, loss_bbox: 0.3149, loss_centerness: 0.5937, loss: 1.1361, grad_norm: 2.5403
1064
+ 2022-10-03 23:11:50,857 - mmdet - INFO - Iter [4350/12000] lr: 1.500e-02, eta: 0:13:26, time: 0.103, data_time: 0.006, memory: 3375, loss_cls: 0.2301, loss_bbox: 0.3325, loss_centerness: 0.5947, loss: 1.1573, grad_norm: 2.6587
1065
+ 2022-10-03 23:11:55,876 - mmdet - INFO - Iter [4400/12000] lr: 1.500e-02, eta: 0:13:21, time: 0.100, data_time: 0.006, memory: 3375, loss_cls: 0.2274, loss_bbox: 0.3267, loss_centerness: 0.5945, loss: 1.1486, grad_norm: 2.4360
1066
+ 2022-10-03 23:12:00,878 - mmdet - INFO - Iter [4450/12000] lr: 1.500e-02, eta: 0:13:15, time: 0.100, data_time: 0.006, memory: 3375, loss_cls: 0.2180, loss_bbox: 0.3155, loss_centerness: 0.5942, loss: 1.1278, grad_norm: 2.4964
1067
+ 2022-10-03 23:12:05,906 - mmdet - INFO - Iter [4500/12000] lr: 1.500e-02, eta: 0:13:09, time: 0.101, data_time: 0.006, memory: 3375, loss_cls: 0.2253, loss_bbox: 0.3083, loss_centerness: 0.5952, loss: 1.1288, grad_norm: 2.3991
1068
+ 2022-10-03 23:12:10,944 - mmdet - INFO - Iter [4550/12000] lr: 1.500e-02, eta: 0:13:04, time: 0.101, data_time: 0.006, memory: 3375, loss_cls: 0.2151, loss_bbox: 0.3198, loss_centerness: 0.5954, loss: 1.1303, grad_norm: 2.4637
1069
+ 2022-10-03 23:12:16,026 - mmdet - INFO - Iter [4600/12000] lr: 1.500e-02, eta: 0:12:58, time: 0.102, data_time: 0.006, memory: 3375, loss_cls: 0.2297, loss_bbox: 0.3213, loss_centerness: 0.5944, loss: 1.1455, grad_norm: 2.5831
1070
+ 2022-10-03 23:12:21,092 - mmdet - INFO - Iter [4650/12000] lr: 1.500e-02, eta: 0:12:53, time: 0.101, data_time: 0.006, memory: 3375, loss_cls: 0.2188, loss_bbox: 0.3081, loss_centerness: 0.5925, loss: 1.1193, grad_norm: 2.2491
1071
+ 2022-10-03 23:12:26,399 - mmdet - INFO - Iter [4700/12000] lr: 1.500e-02, eta: 0:12:47, time: 0.106, data_time: 0.007, memory: 3375, loss_cls: 0.2252, loss_bbox: 0.3300, loss_centerness: 0.5961, loss: 1.1514, grad_norm: 2.5577
1072
+ 2022-10-03 23:12:31,477 - mmdet - INFO - Iter [4750/12000] lr: 1.500e-02, eta: 0:12:42, time: 0.102, data_time: 0.006, memory: 3375, loss_cls: 0.2234, loss_bbox: 0.3177, loss_centerness: 0.5950, loss: 1.1361, grad_norm: 2.4445
1073
+ 2022-10-03 23:12:36,463 - mmdet - INFO - Iter [4800/12000] lr: 1.500e-02, eta: 0:12:36, time: 0.100, data_time: 0.006, memory: 3375, loss_cls: 0.2094, loss_bbox: 0.3308, loss_centerness: 0.5942, loss: 1.1344, grad_norm: 2.5108
1074
+ 2022-10-03 23:12:41,538 - mmdet - INFO - Iter [4850/12000] lr: 1.500e-02, eta: 0:12:31, time: 0.101, data_time: 0.006, memory: 3375, loss_cls: 0.2180, loss_bbox: 0.2990, loss_centerness: 0.5935, loss: 1.1104, grad_norm: 2.3976
1075
+ 2022-10-03 23:12:46,590 - mmdet - INFO - Iter [4900/12000] lr: 1.500e-02, eta: 0:12:25, time: 0.101, data_time: 0.007, memory: 3375, loss_cls: 0.2187, loss_bbox: 0.3108, loss_centerness: 0.5938, loss: 1.1232, grad_norm: 2.4357
1076
+ 2022-10-03 23:12:51,755 - mmdet - INFO - Iter [4950/12000] lr: 1.500e-02, eta: 0:12:20, time: 0.103, data_time: 0.006, memory: 3375, loss_cls: 0.2167, loss_bbox: 0.3118, loss_centerness: 0.5939, loss: 1.1224, grad_norm: 2.4076
1077
+ 2022-10-03 23:12:56,821 - mmdet - INFO - Exp name: fcos_mstrain_12k_voc0712.py
1078
+ 2022-10-03 23:12:56,821 - mmdet - INFO - Iter [5000/12000] lr: 1.500e-02, eta: 0:12:14, time: 0.101, data_time: 0.006, memory: 3375, loss_cls: 0.2298, loss_bbox: 0.3248, loss_centerness: 0.5956, loss: 1.1501, grad_norm: 2.5838
1079
+ 2022-10-03 23:13:01,852 - mmdet - INFO - Iter [5050/12000] lr: 1.500e-02, eta: 0:12:09, time: 0.101, data_time: 0.006, memory: 3375, loss_cls: 0.2233, loss_bbox: 0.3420, loss_centerness: 0.5964, loss: 1.1616, grad_norm: 2.4916
1080
+ 2022-10-03 23:13:06,953 - mmdet - INFO - Iter [5100/12000] lr: 1.500e-02, eta: 0:12:03, time: 0.102, data_time: 0.006, memory: 3375, loss_cls: 0.2165, loss_bbox: 0.3110, loss_centerness: 0.5922, loss: 1.1196, grad_norm: 2.4828
1081
+ 2022-10-03 23:13:11,976 - mmdet - INFO - Iter [5150/12000] lr: 1.500e-02, eta: 0:11:58, time: 0.100, data_time: 0.006, memory: 3375, loss_cls: 0.2346, loss_bbox: 0.3290, loss_centerness: 0.5967, loss: 1.1603, grad_norm: 2.5321
1082
+ 2022-10-03 23:13:16,971 - mmdet - INFO - Iter [5200/12000] lr: 1.500e-02, eta: 0:11:52, time: 0.100, data_time: 0.006, memory: 3375, loss_cls: 0.2181, loss_bbox: 0.3112, loss_centerness: 0.5931, loss: 1.1224, grad_norm: 2.4412
1083
+ 2022-10-03 23:13:22,234 - mmdet - INFO - Iter [5250/12000] lr: 1.500e-02, eta: 0:11:47, time: 0.105, data_time: 0.006, memory: 3375, loss_cls: 0.2033, loss_bbox: 0.2971, loss_centerness: 0.5904, loss: 1.0909, grad_norm: 2.3256
1084
+ 2022-10-03 23:13:27,473 - mmdet - INFO - Iter [5300/12000] lr: 1.500e-02, eta: 0:11:42, time: 0.105, data_time: 0.006, memory: 3375, loss_cls: 0.2147, loss_bbox: 0.3131, loss_centerness: 0.5909, loss: 1.1187, grad_norm: 2.7216
1085
+ 2022-10-03 23:13:32,537 - mmdet - INFO - Iter [5350/12000] lr: 1.500e-02, eta: 0:11:36, time: 0.101, data_time: 0.006, memory: 3375, loss_cls: 0.2090, loss_bbox: 0.3241, loss_centerness: 0.5906, loss: 1.1237, grad_norm: 2.6115
1086
+ 2022-10-03 23:13:37,672 - mmdet - INFO - Iter [5400/12000] lr: 1.500e-02, eta: 0:11:31, time: 0.103, data_time: 0.006, memory: 3375, loss_cls: 0.1970, loss_bbox: 0.2998, loss_centerness: 0.5933, loss: 1.0901, grad_norm: 2.3276
1087
+ 2022-10-03 23:13:42,654 - mmdet - INFO - Iter [5450/12000] lr: 1.500e-02, eta: 0:11:25, time: 0.100, data_time: 0.006, memory: 3375, loss_cls: 0.2139, loss_bbox: 0.3126, loss_centerness: 0.5936, loss: 1.1201, grad_norm: 2.6115
1088
+ 2022-10-03 23:13:47,792 - mmdet - INFO - Iter [5500/12000] lr: 1.500e-02, eta: 0:11:20, time: 0.103, data_time: 0.006, memory: 3375, loss_cls: 0.2196, loss_bbox: 0.2999, loss_centerness: 0.5937, loss: 1.1133, grad_norm: 2.3717
1089
+ 2022-10-03 23:13:52,833 - mmdet - INFO - Iter [5550/12000] lr: 1.500e-02, eta: 0:11:14, time: 0.101, data_time: 0.006, memory: 3375, loss_cls: 0.2144, loss_bbox: 0.3060, loss_centerness: 0.5949, loss: 1.1154, grad_norm: 2.4405
1090
+ 2022-10-03 23:13:57,884 - mmdet - INFO - Iter [5600/12000] lr: 1.500e-02, eta: 0:11:09, time: 0.101, data_time: 0.007, memory: 3375, loss_cls: 0.2133, loss_bbox: 0.3024, loss_centerness: 0.5918, loss: 1.1075, grad_norm: 2.5034
1091
+ 2022-10-03 23:14:03,262 - mmdet - INFO - Iter [5650/12000] lr: 1.500e-02, eta: 0:11:04, time: 0.108, data_time: 0.006, memory: 3375, loss_cls: 0.1986, loss_bbox: 0.3024, loss_centerness: 0.5913, loss: 1.0922, grad_norm: 2.3927
1092
+ 2022-10-03 23:14:08,662 - mmdet - INFO - Iter [5700/12000] lr: 1.500e-02, eta: 0:10:59, time: 0.108, data_time: 0.006, memory: 3375, loss_cls: 0.2162, loss_bbox: 0.3056, loss_centerness: 0.5944, loss: 1.1162, grad_norm: 2.5149
1093
+ 2022-10-03 23:14:13,780 - mmdet - INFO - Iter [5750/12000] lr: 1.500e-02, eta: 0:10:54, time: 0.102, data_time: 0.006, memory: 3375, loss_cls: 0.2090, loss_bbox: 0.3121, loss_centerness: 0.5934, loss: 1.1145, grad_norm: 2.3781
1094
+ 2022-10-03 23:14:18,881 - mmdet - INFO - Iter [5800/12000] lr: 1.500e-02, eta: 0:10:48, time: 0.102, data_time: 0.006, memory: 3375, loss_cls: 0.2121, loss_bbox: 0.3099, loss_centerness: 0.5922, loss: 1.1142, grad_norm: 2.6576
1095
+ 2022-10-03 23:14:24,445 - mmdet - INFO - Iter [5850/12000] lr: 1.500e-02, eta: 0:10:43, time: 0.111, data_time: 0.006, memory: 3375, loss_cls: 0.2230, loss_bbox: 0.3037, loss_centerness: 0.5928, loss: 1.1195, grad_norm: 2.5658
1096
+ 2022-10-03 23:14:29,669 - mmdet - INFO - Iter [5900/12000] lr: 1.500e-02, eta: 0:10:38, time: 0.105, data_time: 0.006, memory: 3375, loss_cls: 0.2123, loss_bbox: 0.3129, loss_centerness: 0.5938, loss: 1.1190, grad_norm: 2.6781
1097
+ 2022-10-03 23:14:34,710 - mmdet - INFO - Iter [5950/12000] lr: 1.500e-02, eta: 0:10:33, time: 0.101, data_time: 0.006, memory: 3375, loss_cls: 0.2086, loss_bbox: 0.3025, loss_centerness: 0.5915, loss: 1.1027, grad_norm: 2.4293
1098
+ 2022-10-03 23:14:39,785 - mmdet - INFO - Exp name: fcos_mstrain_12k_voc0712.py
1099
+ 2022-10-03 23:14:39,785 - mmdet - INFO - Iter [6000/12000] lr: 1.500e-02, eta: 0:10:27, time: 0.101, data_time: 0.006, memory: 3375, loss_cls: 0.2063, loss_bbox: 0.3019, loss_centerness: 0.5926, loss: 1.1008, grad_norm: 2.4104
1100
+ 2022-10-03 23:14:44,827 - mmdet - INFO - Iter [6050/12000] lr: 1.500e-02, eta: 0:10:22, time: 0.101, data_time: 0.006, memory: 3375, loss_cls: 0.2097, loss_bbox: 0.3115, loss_centerness: 0.5933, loss: 1.1145, grad_norm: 2.4511
1101
+ 2022-10-03 23:14:49,852 - mmdet - INFO - Iter [6100/12000] lr: 1.500e-02, eta: 0:10:16, time: 0.101, data_time: 0.006, memory: 3375, loss_cls: 0.2069, loss_bbox: 0.2894, loss_centerness: 0.5897, loss: 1.0860, grad_norm: 2.3739
1102
+ 2022-10-03 23:14:54,937 - mmdet - INFO - Iter [6150/12000] lr: 1.500e-02, eta: 0:10:11, time: 0.102, data_time: 0.006, memory: 3375, loss_cls: 0.2026, loss_bbox: 0.3039, loss_centerness: 0.5934, loss: 1.0999, grad_norm: 2.4252
1103
+ 2022-10-03 23:15:00,058 - mmdet - INFO - Iter [6200/12000] lr: 1.500e-02, eta: 0:10:06, time: 0.102, data_time: 0.006, memory: 3375, loss_cls: 0.1982, loss_bbox: 0.2990, loss_centerness: 0.5923, loss: 1.0895, grad_norm: 2.4156
1104
+ 2022-10-03 23:15:05,099 - mmdet - INFO - Iter [6250/12000] lr: 1.500e-02, eta: 0:10:00, time: 0.101, data_time: 0.006, memory: 3375, loss_cls: 0.1977, loss_bbox: 0.3022, loss_centerness: 0.5897, loss: 1.0897, grad_norm: 2.5230
1105
+ 2022-10-03 23:15:10,411 - mmdet - INFO - Iter [6300/12000] lr: 1.500e-02, eta: 0:09:55, time: 0.106, data_time: 0.006, memory: 3375, loss_cls: 0.2001, loss_bbox: 0.2945, loss_centerness: 0.5892, loss: 1.0838, grad_norm: 2.5836
1106
+ 2022-10-03 23:15:15,557 - mmdet - INFO - Iter [6350/12000] lr: 1.500e-02, eta: 0:09:50, time: 0.103, data_time: 0.006, memory: 3375, loss_cls: 0.1997, loss_bbox: 0.2807, loss_centerness: 0.5900, loss: 1.0703, grad_norm: 2.3159
1107
+ 2022-10-03 23:15:21,104 - mmdet - INFO - Iter [6400/12000] lr: 1.500e-02, eta: 0:09:45, time: 0.111, data_time: 0.006, memory: 3375, loss_cls: 0.1946, loss_bbox: 0.2813, loss_centerness: 0.5894, loss: 1.0652, grad_norm: 2.4935
1108
+ 2022-10-03 23:15:26,515 - mmdet - INFO - Iter [6450/12000] lr: 1.500e-02, eta: 0:09:40, time: 0.108, data_time: 0.006, memory: 3375, loss_cls: 0.1993, loss_bbox: 0.2951, loss_centerness: 0.5912, loss: 1.0856, grad_norm: 2.3950
1109
+ 2022-10-03 23:15:31,748 - mmdet - INFO - Iter [6500/12000] lr: 1.500e-02, eta: 0:09:35, time: 0.105, data_time: 0.006, memory: 3375, loss_cls: 0.2053, loss_bbox: 0.3171, loss_centerness: 0.5932, loss: 1.1157, grad_norm: 2.6241
1110
+ 2022-10-03 23:15:36,910 - mmdet - INFO - Iter [6550/12000] lr: 1.500e-02, eta: 0:09:29, time: 0.103, data_time: 0.006, memory: 3375, loss_cls: 0.1946, loss_bbox: 0.2933, loss_centerness: 0.5913, loss: 1.0792, grad_norm: 2.3415
1111
+ 2022-10-03 23:15:41,932 - mmdet - INFO - Iter [6600/12000] lr: 1.500e-02, eta: 0:09:24, time: 0.100, data_time: 0.006, memory: 3375, loss_cls: 0.1969, loss_bbox: 0.2923, loss_centerness: 0.5900, loss: 1.0792, grad_norm: 2.4594
1112
+ 2022-10-03 23:15:47,006 - mmdet - INFO - Iter [6650/12000] lr: 1.500e-02, eta: 0:09:19, time: 0.101, data_time: 0.006, memory: 3375, loss_cls: 0.1952, loss_bbox: 0.2876, loss_centerness: 0.5895, loss: 1.0723, grad_norm: 2.4592
1113
+ 2022-10-03 23:15:52,065 - mmdet - INFO - Iter [6700/12000] lr: 1.500e-02, eta: 0:09:13, time: 0.101, data_time: 0.006, memory: 3375, loss_cls: 0.2037, loss_bbox: 0.2893, loss_centerness: 0.5903, loss: 1.0833, grad_norm: 2.5001
1114
+ 2022-10-03 23:15:57,122 - mmdet - INFO - Iter [6750/12000] lr: 1.500e-02, eta: 0:09:08, time: 0.101, data_time: 0.006, memory: 3375, loss_cls: 0.2060, loss_bbox: 0.3081, loss_centerness: 0.5938, loss: 1.1079, grad_norm: 2.4866
1115
+ 2022-10-03 23:16:02,117 - mmdet - INFO - Iter [6800/12000] lr: 1.500e-02, eta: 0:09:03, time: 0.100, data_time: 0.006, memory: 3375, loss_cls: 0.1931, loss_bbox: 0.2915, loss_centerness: 0.5892, loss: 1.0738, grad_norm: 2.4755
1116
+ 2022-10-03 23:16:07,139 - mmdet - INFO - Iter [6850/12000] lr: 1.500e-02, eta: 0:08:57, time: 0.100, data_time: 0.006, memory: 3375, loss_cls: 0.1999, loss_bbox: 0.3040, loss_centerness: 0.5905, loss: 1.0945, grad_norm: 2.5293
1117
+ 2022-10-03 23:16:12,233 - mmdet - INFO - Iter [6900/12000] lr: 1.500e-02, eta: 0:08:52, time: 0.102, data_time: 0.006, memory: 3375, loss_cls: 0.1905, loss_bbox: 0.3017, loss_centerness: 0.5918, loss: 1.0839, grad_norm: 2.4225
1118
+ 2022-10-03 23:16:17,543 - mmdet - INFO - Iter [6950/12000] lr: 1.500e-02, eta: 0:08:47, time: 0.106, data_time: 0.006, memory: 3375, loss_cls: 0.1933, loss_bbox: 0.2995, loss_centerness: 0.5909, loss: 1.0837, grad_norm: 2.4788
1119
+ 2022-10-03 23:16:22,605 - mmdet - INFO - Exp name: fcos_mstrain_12k_voc0712.py
1120
+ 2022-10-03 23:16:22,605 - mmdet - INFO - Iter [7000/12000] lr: 1.500e-02, eta: 0:08:41, time: 0.101, data_time: 0.006, memory: 3375, loss_cls: 0.2045, loss_bbox: 0.2978, loss_centerness: 0.5918, loss: 1.0941, grad_norm: 2.5168
1121
+ 2022-10-03 23:16:27,624 - mmdet - INFO - Iter [7050/12000] lr: 1.500e-02, eta: 0:08:36, time: 0.100, data_time: 0.007, memory: 3375, loss_cls: 0.1988, loss_bbox: 0.2890, loss_centerness: 0.5895, loss: 1.0774, grad_norm: 2.4041
1122
+ 2022-10-03 23:16:32,749 - mmdet - INFO - Iter [7100/12000] lr: 1.500e-02, eta: 0:08:31, time: 0.102, data_time: 0.007, memory: 3375, loss_cls: 0.1999, loss_bbox: 0.2849, loss_centerness: 0.5911, loss: 1.0759, grad_norm: 2.4509
1123
+ 2022-10-03 23:16:37,770 - mmdet - INFO - Iter [7150/12000] lr: 1.500e-02, eta: 0:08:25, time: 0.100, data_time: 0.007, memory: 3375, loss_cls: 0.2015, loss_bbox: 0.2874, loss_centerness: 0.5904, loss: 1.0793, grad_norm: 2.3829
1124
+ 2022-10-03 23:16:42,849 - mmdet - INFO - Iter [7200/12000] lr: 1.500e-02, eta: 0:08:20, time: 0.102, data_time: 0.007, memory: 3375, loss_cls: 0.2032, loss_bbox: 0.3085, loss_centerness: 0.5915, loss: 1.1032, grad_norm: 2.7239
1125
+ 2022-10-03 23:16:47,768 - mmdet - INFO - Iter [7250/12000] lr: 1.500e-02, eta: 0:08:15, time: 0.098, data_time: 0.007, memory: 3375, loss_cls: 0.1963, loss_bbox: 0.2936, loss_centerness: 0.5903, loss: 1.0802, grad_norm: 2.4589
1126
+ 2022-10-03 23:16:53,108 - mmdet - INFO - Iter [7300/12000] lr: 1.500e-02, eta: 0:08:10, time: 0.107, data_time: 0.007, memory: 3375, loss_cls: 0.1790, loss_bbox: 0.2900, loss_centerness: 0.5881, loss: 1.0571, grad_norm: 2.4370
1127
+ 2022-10-03 23:16:58,574 - mmdet - INFO - Iter [7350/12000] lr: 1.500e-02, eta: 0:08:04, time: 0.109, data_time: 0.006, memory: 3375, loss_cls: 0.1870, loss_bbox: 0.2812, loss_centerness: 0.5882, loss: 1.0564, grad_norm: 2.4754
1128
+ 2022-10-03 23:17:03,591 - mmdet - INFO - Iter [7400/12000] lr: 1.500e-02, eta: 0:07:59, time: 0.100, data_time: 0.007, memory: 3375, loss_cls: 0.1883, loss_bbox: 0.2878, loss_centerness: 0.5876, loss: 1.0637, grad_norm: 2.4153
1129
+ 2022-10-03 23:17:08,694 - mmdet - INFO - Iter [7450/12000] lr: 1.500e-02, eta: 0:07:54, time: 0.102, data_time: 0.007, memory: 3375, loss_cls: 0.1916, loss_bbox: 0.2811, loss_centerness: 0.5907, loss: 1.0635, grad_norm: 2.3504
1130
+ 2022-10-03 23:17:13,736 - mmdet - INFO - Iter [7500/12000] lr: 1.500e-02, eta: 0:07:49, time: 0.101, data_time: 0.007, memory: 3375, loss_cls: 0.1960, loss_bbox: 0.2863, loss_centerness: 0.5908, loss: 1.0732, grad_norm: 2.4362
1131
+ 2022-10-03 23:17:19,114 - mmdet - INFO - Iter [7550/12000] lr: 1.500e-02, eta: 0:07:43, time: 0.107, data_time: 0.007, memory: 3375, loss_cls: 0.1868, loss_bbox: 0.2858, loss_centerness: 0.5872, loss: 1.0598, grad_norm: 2.5911
1132
+ 2022-10-03 23:17:24,208 - mmdet - INFO - Iter [7600/12000] lr: 1.500e-02, eta: 0:07:38, time: 0.102, data_time: 0.006, memory: 3375, loss_cls: 0.1832, loss_bbox: 0.2850, loss_centerness: 0.5891, loss: 1.0573, grad_norm: 2.4779
1133
+ 2022-10-03 23:17:29,438 - mmdet - INFO - Iter [7650/12000] lr: 1.500e-02, eta: 0:07:33, time: 0.105, data_time: 0.007, memory: 3375, loss_cls: 0.1894, loss_bbox: 0.2913, loss_centerness: 0.5899, loss: 1.0707, grad_norm: 2.5428
1134
+ 2022-10-03 23:17:34,437 - mmdet - INFO - Iter [7700/12000] lr: 1.500e-02, eta: 0:07:28, time: 0.100, data_time: 0.006, memory: 3375, loss_cls: 0.1969, loss_bbox: 0.2840, loss_centerness: 0.5881, loss: 1.0691, grad_norm: 2.6974
1135
+ 2022-10-03 23:17:39,440 - mmdet - INFO - Iter [7750/12000] lr: 1.500e-02, eta: 0:07:22, time: 0.100, data_time: 0.006, memory: 3375, loss_cls: 0.1931, loss_bbox: 0.3048, loss_centerness: 0.5913, loss: 1.0892, grad_norm: 2.6059
1136
+ 2022-10-03 23:17:44,815 - mmdet - INFO - Iter [7800/12000] lr: 1.500e-02, eta: 0:07:17, time: 0.108, data_time: 0.006, memory: 3375, loss_cls: 0.1955, loss_bbox: 0.2941, loss_centerness: 0.5893, loss: 1.0789, grad_norm: 2.6701
1137
+ 2022-10-03 23:17:50,063 - mmdet - INFO - Iter [7850/12000] lr: 1.500e-02, eta: 0:07:12, time: 0.105, data_time: 0.006, memory: 3375, loss_cls: 0.1865, loss_bbox: 0.2720, loss_centerness: 0.5877, loss: 1.0462, grad_norm: 2.3621
1138
+ 2022-10-03 23:17:54,985 - mmdet - INFO - Iter [7900/12000] lr: 1.500e-02, eta: 0:07:07, time: 0.098, data_time: 0.006, memory: 3375, loss_cls: 0.1968, loss_bbox: 0.2950, loss_centerness: 0.5900, loss: 1.0818, grad_norm: 2.6020
1139
+ 2022-10-03 23:18:00,052 - mmdet - INFO - Iter [7950/12000] lr: 1.500e-02, eta: 0:07:01, time: 0.101, data_time: 0.006, memory: 3375, loss_cls: 0.1923, loss_bbox: 0.2947, loss_centerness: 0.5895, loss: 1.0765, grad_norm: 2.6275
1140
+ 2022-10-03 23:18:05,119 - mmdet - INFO - Exp name: fcos_mstrain_12k_voc0712.py
1141
+ 2022-10-03 23:18:05,119 - mmdet - INFO - Iter [8000/12000] lr: 1.500e-02, eta: 0:06:56, time: 0.101, data_time: 0.006, memory: 3375, loss_cls: 0.1962, loss_bbox: 0.2897, loss_centerness: 0.5913, loss: 1.0771, grad_norm: 2.4957
1142
+ 2022-10-03 23:18:10,151 - mmdet - INFO - Iter [8050/12000] lr: 1.500e-02, eta: 0:06:51, time: 0.101, data_time: 0.006, memory: 3375, loss_cls: 0.1973, loss_bbox: 0.2891, loss_centerness: 0.5909, loss: 1.0773, grad_norm: 2.5776
1143
+ 2022-10-03 23:18:15,373 - mmdet - INFO - Iter [8100/12000] lr: 1.500e-02, eta: 0:06:46, time: 0.104, data_time: 0.006, memory: 3375, loss_cls: 0.1929, loss_bbox: 0.2841, loss_centerness: 0.5903, loss: 1.0673, grad_norm: 2.5122
1144
+ 2022-10-03 23:18:20,522 - mmdet - INFO - Iter [8150/12000] lr: 1.500e-02, eta: 0:06:40, time: 0.103, data_time: 0.006, memory: 3375, loss_cls: 0.1960, loss_bbox: 0.2843, loss_centerness: 0.5869, loss: 1.0673, grad_norm: 2.6011
1145
+ 2022-10-03 23:18:25,561 - mmdet - INFO - Iter [8200/12000] lr: 1.500e-02, eta: 0:06:35, time: 0.101, data_time: 0.006, memory: 3375, loss_cls: 0.1907, loss_bbox: 0.2878, loss_centerness: 0.5906, loss: 1.0691, grad_norm: 2.4059
1146
+ 2022-10-03 23:18:30,632 - mmdet - INFO - Iter [8250/12000] lr: 1.500e-02, eta: 0:06:30, time: 0.101, data_time: 0.006, memory: 3375, loss_cls: 0.1973, loss_bbox: 0.2953, loss_centerness: 0.5907, loss: 1.0833, grad_norm: 2.3690
1147
+ 2022-10-03 23:18:35,602 - mmdet - INFO - Iter [8300/12000] lr: 1.500e-02, eta: 0:06:24, time: 0.099, data_time: 0.006, memory: 3375, loss_cls: 0.1849, loss_bbox: 0.2971, loss_centerness: 0.5895, loss: 1.0716, grad_norm: 2.5134
1148
+ 2022-10-03 23:18:40,600 - mmdet - INFO - Iter [8350/12000] lr: 1.500e-02, eta: 0:06:19, time: 0.100, data_time: 0.006, memory: 3375, loss_cls: 0.1811, loss_bbox: 0.2672, loss_centerness: 0.5855, loss: 1.0339, grad_norm: 2.4261
1149
+ 2022-10-03 23:18:45,619 - mmdet - INFO - Iter [8400/12000] lr: 1.500e-02, eta: 0:06:14, time: 0.100, data_time: 0.006, memory: 3375, loss_cls: 0.1758, loss_bbox: 0.2833, loss_centerness: 0.5882, loss: 1.0473, grad_norm: 2.6022
1150
+ 2022-10-03 23:18:50,700 - mmdet - INFO - Iter [8450/12000] lr: 1.500e-02, eta: 0:06:09, time: 0.102, data_time: 0.006, memory: 3375, loss_cls: 0.1811, loss_bbox: 0.2762, loss_centerness: 0.5893, loss: 1.0466, grad_norm: 2.3019
1151
+ 2022-10-03 23:18:55,706 - mmdet - INFO - Iter [8500/12000] lr: 1.500e-02, eta: 0:06:03, time: 0.100, data_time: 0.006, memory: 3375, loss_cls: 0.1827, loss_bbox: 0.3011, loss_centerness: 0.5893, loss: 1.0731, grad_norm: 2.5493
1152
+ 2022-10-03 23:19:00,820 - mmdet - INFO - Iter [8550/12000] lr: 1.500e-02, eta: 0:05:58, time: 0.102, data_time: 0.006, memory: 3375, loss_cls: 0.1774, loss_bbox: 0.2718, loss_centerness: 0.5866, loss: 1.0358, grad_norm: 2.5059
1153
+ 2022-10-03 23:19:06,073 - mmdet - INFO - Iter [8600/12000] lr: 1.500e-02, eta: 0:05:53, time: 0.105, data_time: 0.006, memory: 3375, loss_cls: 0.1807, loss_bbox: 0.2822, loss_centerness: 0.5873, loss: 1.0503, grad_norm: 2.5063
1154
+ 2022-10-03 23:19:11,427 - mmdet - INFO - Iter [8650/12000] lr: 1.500e-02, eta: 0:05:48, time: 0.107, data_time: 0.006, memory: 3375, loss_cls: 0.1833, loss_bbox: 0.2826, loss_centerness: 0.5887, loss: 1.0546, grad_norm: 2.5176
1155
+ 2022-10-03 23:19:16,627 - mmdet - INFO - Iter [8700/12000] lr: 1.500e-02, eta: 0:05:43, time: 0.104, data_time: 0.006, memory: 3375, loss_cls: 0.1896, loss_bbox: 0.2769, loss_centerness: 0.5886, loss: 1.0551, grad_norm: 2.4687
1156
+ 2022-10-03 23:19:21,661 - mmdet - INFO - Iter [8750/12000] lr: 1.500e-02, eta: 0:05:37, time: 0.101, data_time: 0.006, memory: 3375, loss_cls: 0.1801, loss_bbox: 0.2793, loss_centerness: 0.5897, loss: 1.0492, grad_norm: 2.3412
1157
+ 2022-10-03 23:19:26,707 - mmdet - INFO - Iter [8800/12000] lr: 1.500e-02, eta: 0:05:32, time: 0.101, data_time: 0.006, memory: 3375, loss_cls: 0.1750, loss_bbox: 0.2684, loss_centerness: 0.5876, loss: 1.0310, grad_norm: 2.4336
1158
+ 2022-10-03 23:19:31,728 - mmdet - INFO - Iter [8850/12000] lr: 1.500e-02, eta: 0:05:27, time: 0.100, data_time: 0.006, memory: 3375, loss_cls: 0.1809, loss_bbox: 0.2817, loss_centerness: 0.5880, loss: 1.0506, grad_norm: 2.6375
1159
+ 2022-10-03 23:19:36,771 - mmdet - INFO - Iter [8900/12000] lr: 1.500e-02, eta: 0:05:22, time: 0.101, data_time: 0.006, memory: 3375, loss_cls: 0.1835, loss_bbox: 0.2802, loss_centerness: 0.5887, loss: 1.0524, grad_norm: 2.5153
1160
+ 2022-10-03 23:19:41,994 - mmdet - INFO - Iter [8950/12000] lr: 1.500e-02, eta: 0:05:16, time: 0.104, data_time: 0.006, memory: 3375, loss_cls: 0.1850, loss_bbox: 0.2791, loss_centerness: 0.5898, loss: 1.0540, grad_norm: 2.4977
1161
+ 2022-10-03 23:19:47,205 - mmdet - INFO - Exp name: fcos_mstrain_12k_voc0712.py
1162
+ 2022-10-03 23:19:47,206 - mmdet - INFO - Iter [9000/12000] lr: 1.500e-02, eta: 0:05:11, time: 0.104, data_time: 0.006, memory: 3375, loss_cls: 0.1873, loss_bbox: 0.2851, loss_centerness: 0.5910, loss: 1.0634, grad_norm: 2.3387
1163
+ 2022-10-03 23:19:52,498 - mmdet - INFO - Iter [9050/12000] lr: 1.500e-03, eta: 0:05:06, time: 0.106, data_time: 0.006, memory: 3375, loss_cls: 0.1636, loss_bbox: 0.2461, loss_centerness: 0.5873, loss: 0.9969, grad_norm: 1.9895
1164
+ 2022-10-03 23:19:57,639 - mmdet - INFO - Iter [9100/12000] lr: 1.500e-03, eta: 0:05:01, time: 0.103, data_time: 0.006, memory: 3375, loss_cls: 0.1572, loss_bbox: 0.2387, loss_centerness: 0.5853, loss: 0.9812, grad_norm: 1.8355
1165
+ 2022-10-03 23:20:02,708 - mmdet - INFO - Iter [9150/12000] lr: 1.500e-03, eta: 0:04:56, time: 0.101, data_time: 0.006, memory: 3375, loss_cls: 0.1616, loss_bbox: 0.2397, loss_centerness: 0.5860, loss: 0.9873, grad_norm: 1.8689
1166
+ 2022-10-03 23:20:08,033 - mmdet - INFO - Iter [9200/12000] lr: 1.500e-03, eta: 0:04:50, time: 0.106, data_time: 0.006, memory: 3375, loss_cls: 0.1540, loss_bbox: 0.2367, loss_centerness: 0.5874, loss: 0.9782, grad_norm: 1.8470
1167
+ 2022-10-03 23:20:13,407 - mmdet - INFO - Iter [9250/12000] lr: 1.500e-03, eta: 0:04:45, time: 0.107, data_time: 0.006, memory: 3375, loss_cls: 0.1511, loss_bbox: 0.2405, loss_centerness: 0.5854, loss: 0.9771, grad_norm: 1.8256
1168
+ 2022-10-03 23:20:18,697 - mmdet - INFO - Iter [9300/12000] lr: 1.500e-03, eta: 0:04:40, time: 0.106, data_time: 0.006, memory: 3375, loss_cls: 0.1552, loss_bbox: 0.2260, loss_centerness: 0.5852, loss: 0.9664, grad_norm: 1.7953
1169
+ 2022-10-03 23:20:23,716 - mmdet - INFO - Iter [9350/12000] lr: 1.500e-03, eta: 0:04:35, time: 0.100, data_time: 0.006, memory: 3375, loss_cls: 0.1468, loss_bbox: 0.2259, loss_centerness: 0.5828, loss: 0.9555, grad_norm: 1.8571
1170
+ 2022-10-03 23:20:28,841 - mmdet - INFO - Iter [9400/12000] lr: 1.500e-03, eta: 0:04:30, time: 0.102, data_time: 0.007, memory: 3375, loss_cls: 0.1413, loss_bbox: 0.2216, loss_centerness: 0.5825, loss: 0.9454, grad_norm: 1.7513
1171
+ 2022-10-03 23:20:33,859 - mmdet - INFO - Iter [9450/12000] lr: 1.500e-03, eta: 0:04:24, time: 0.100, data_time: 0.006, memory: 3375, loss_cls: 0.1407, loss_bbox: 0.2201, loss_centerness: 0.5808, loss: 0.9416, grad_norm: 1.7782
1172
+ 2022-10-03 23:20:39,102 - mmdet - INFO - Iter [9500/12000] lr: 1.500e-03, eta: 0:04:19, time: 0.105, data_time: 0.006, memory: 3375, loss_cls: 0.1407, loss_bbox: 0.2198, loss_centerness: 0.5811, loss: 0.9416, grad_norm: 1.7229
1173
+ 2022-10-03 23:20:44,395 - mmdet - INFO - Iter [9550/12000] lr: 1.500e-03, eta: 0:04:14, time: 0.106, data_time: 0.006, memory: 3375, loss_cls: 0.1402, loss_bbox: 0.2166, loss_centerness: 0.5823, loss: 0.9391, grad_norm: 1.7978
1174
+ 2022-10-03 23:20:49,562 - mmdet - INFO - Iter [9600/12000] lr: 1.500e-03, eta: 0:04:09, time: 0.103, data_time: 0.006, memory: 3375, loss_cls: 0.1398, loss_bbox: 0.2233, loss_centerness: 0.5835, loss: 0.9466, grad_norm: 1.7395
1175
+ 2022-10-03 23:20:54,590 - mmdet - INFO - Iter [9650/12000] lr: 1.500e-03, eta: 0:04:04, time: 0.101, data_time: 0.006, memory: 3375, loss_cls: 0.1310, loss_bbox: 0.2175, loss_centerness: 0.5841, loss: 0.9325, grad_norm: 1.7419
1176
+ 2022-10-03 23:20:59,841 - mmdet - INFO - Iter [9700/12000] lr: 1.500e-03, eta: 0:03:58, time: 0.105, data_time: 0.006, memory: 3375, loss_cls: 0.1369, loss_bbox: 0.2151, loss_centerness: 0.5828, loss: 0.9348, grad_norm: 1.7701
1177
+ 2022-10-03 23:21:05,135 - mmdet - INFO - Iter [9750/12000] lr: 1.500e-03, eta: 0:03:53, time: 0.106, data_time: 0.006, memory: 3375, loss_cls: 0.1395, loss_bbox: 0.2123, loss_centerness: 0.5825, loss: 0.9343, grad_norm: 1.8823
1178
+ 2022-10-03 23:21:10,242 - mmdet - INFO - Iter [9800/12000] lr: 1.500e-03, eta: 0:03:48, time: 0.102, data_time: 0.006, memory: 3375, loss_cls: 0.1400, loss_bbox: 0.2160, loss_centerness: 0.5829, loss: 0.9390, grad_norm: 1.7530
1179
+ 2022-10-03 23:21:15,244 - mmdet - INFO - Iter [9850/12000] lr: 1.500e-03, eta: 0:03:43, time: 0.100, data_time: 0.006, memory: 3375, loss_cls: 0.1402, loss_bbox: 0.2162, loss_centerness: 0.5829, loss: 0.9394, grad_norm: 1.8387
1180
+ 2022-10-03 23:21:20,277 - mmdet - INFO - Iter [9900/12000] lr: 1.500e-03, eta: 0:03:38, time: 0.101, data_time: 0.006, memory: 3375, loss_cls: 0.1377, loss_bbox: 0.2241, loss_centerness: 0.5840, loss: 0.9459, grad_norm: 1.7914
1181
+ 2022-10-03 23:21:25,462 - mmdet - INFO - Iter [9950/12000] lr: 1.500e-03, eta: 0:03:32, time: 0.104, data_time: 0.006, memory: 3375, loss_cls: 0.1374, loss_bbox: 0.2235, loss_centerness: 0.5838, loss: 0.9447, grad_norm: 1.7652
1182
+ 2022-10-03 23:21:31,026 - mmdet - INFO - Exp name: fcos_mstrain_12k_voc0712.py
1183
+ 2022-10-03 23:21:31,026 - mmdet - INFO - Iter [10000/12000] lr: 1.500e-03, eta: 0:03:27, time: 0.111, data_time: 0.006, memory: 3375, loss_cls: 0.1387, loss_bbox: 0.2153, loss_centerness: 0.5808, loss: 0.9347, grad_norm: 1.8058
1184
+ 2022-10-03 23:21:36,386 - mmdet - INFO - Iter [10050/12000] lr: 1.500e-03, eta: 0:03:22, time: 0.107, data_time: 0.006, memory: 3375, loss_cls: 0.1394, loss_bbox: 0.2170, loss_centerness: 0.5833, loss: 0.9398, grad_norm: 1.8790
1185
+ 2022-10-03 23:21:41,757 - mmdet - INFO - Iter [10100/12000] lr: 1.500e-03, eta: 0:03:17, time: 0.107, data_time: 0.006, memory: 3375, loss_cls: 0.1397, loss_bbox: 0.2223, loss_centerness: 0.5848, loss: 0.9468, grad_norm: 1.8400
1186
+ 2022-10-03 23:21:47,164 - mmdet - INFO - Iter [10150/12000] lr: 1.500e-03, eta: 0:03:12, time: 0.108, data_time: 0.006, memory: 3375, loss_cls: 0.1342, loss_bbox: 0.2153, loss_centerness: 0.5826, loss: 0.9321, grad_norm: 1.7860
1187
+ 2022-10-03 23:21:52,515 - mmdet - INFO - Iter [10200/12000] lr: 1.500e-03, eta: 0:03:07, time: 0.107, data_time: 0.006, memory: 3375, loss_cls: 0.1382, loss_bbox: 0.2165, loss_centerness: 0.5828, loss: 0.9375, grad_norm: 1.8351
1188
+ 2022-10-03 23:21:57,603 - mmdet - INFO - Iter [10250/12000] lr: 1.500e-03, eta: 0:03:01, time: 0.102, data_time: 0.007, memory: 3375, loss_cls: 0.1360, loss_bbox: 0.2164, loss_centerness: 0.5822, loss: 0.9347, grad_norm: 1.8078
1189
+ 2022-10-03 23:22:02,711 - mmdet - INFO - Iter [10300/12000] lr: 1.500e-03, eta: 0:02:56, time: 0.102, data_time: 0.006, memory: 3375, loss_cls: 0.1381, loss_bbox: 0.2194, loss_centerness: 0.5833, loss: 0.9407, grad_norm: 1.8150
1190
+ 2022-10-03 23:22:07,799 - mmdet - INFO - Iter [10350/12000] lr: 1.500e-03, eta: 0:02:51, time: 0.102, data_time: 0.006, memory: 3375, loss_cls: 0.1409, loss_bbox: 0.2201, loss_centerness: 0.5846, loss: 0.9456, grad_norm: 1.8660
1191
+ 2022-10-03 23:22:12,820 - mmdet - INFO - Iter [10400/12000] lr: 1.500e-03, eta: 0:02:46, time: 0.100, data_time: 0.006, memory: 3375, loss_cls: 0.1360, loss_bbox: 0.2176, loss_centerness: 0.5821, loss: 0.9357, grad_norm: 1.8105
1192
+ 2022-10-03 23:22:17,815 - mmdet - INFO - Iter [10450/12000] lr: 1.500e-03, eta: 0:02:41, time: 0.100, data_time: 0.006, memory: 3375, loss_cls: 0.1321, loss_bbox: 0.2089, loss_centerness: 0.5828, loss: 0.9239, grad_norm: 1.7748
1193
+ 2022-10-03 23:22:22,853 - mmdet - INFO - Iter [10500/12000] lr: 1.500e-03, eta: 0:02:35, time: 0.101, data_time: 0.007, memory: 3375, loss_cls: 0.1363, loss_bbox: 0.2266, loss_centerness: 0.5853, loss: 0.9483, grad_norm: 1.7853
1194
+ 2022-10-03 23:22:27,885 - mmdet - INFO - Iter [10550/12000] lr: 1.500e-03, eta: 0:02:30, time: 0.101, data_time: 0.007, memory: 3375, loss_cls: 0.1343, loss_bbox: 0.2127, loss_centerness: 0.5820, loss: 0.9290, grad_norm: 1.8488
1195
+ 2022-10-03 23:22:32,971 - mmdet - INFO - Iter [10600/12000] lr: 1.500e-03, eta: 0:02:25, time: 0.102, data_time: 0.006, memory: 3375, loss_cls: 0.1314, loss_bbox: 0.2115, loss_centerness: 0.5817, loss: 0.9246, grad_norm: 1.8739
1196
+ 2022-10-03 23:22:38,025 - mmdet - INFO - Iter [10650/12000] lr: 1.500e-03, eta: 0:02:20, time: 0.101, data_time: 0.006, memory: 3375, loss_cls: 0.1326, loss_bbox: 0.2134, loss_centerness: 0.5831, loss: 0.9291, grad_norm: 1.8480
1197
+ 2022-10-03 23:22:43,120 - mmdet - INFO - Iter [10700/12000] lr: 1.500e-03, eta: 0:02:14, time: 0.102, data_time: 0.006, memory: 3375, loss_cls: 0.1348, loss_bbox: 0.2162, loss_centerness: 0.5823, loss: 0.9333, grad_norm: 1.8150
1198
+ 2022-10-03 23:22:48,378 - mmdet - INFO - Iter [10750/12000] lr: 1.500e-03, eta: 0:02:09, time: 0.105, data_time: 0.006, memory: 3375, loss_cls: 0.1357, loss_bbox: 0.2097, loss_centerness: 0.5817, loss: 0.9272, grad_norm: 1.8364
1199
+ 2022-10-03 23:22:53,464 - mmdet - INFO - Iter [10800/12000] lr: 1.500e-03, eta: 0:02:04, time: 0.102, data_time: 0.006, memory: 3375, loss_cls: 0.1358, loss_bbox: 0.2174, loss_centerness: 0.5816, loss: 0.9347, grad_norm: 1.8194
1200
+ 2022-10-03 23:22:58,782 - mmdet - INFO - Iter [10850/12000] lr: 1.500e-03, eta: 0:01:59, time: 0.106, data_time: 0.007, memory: 3375, loss_cls: 0.1340, loss_bbox: 0.2063, loss_centerness: 0.5809, loss: 0.9212, grad_norm: 1.8542
1201
+ 2022-10-03 23:23:04,036 - mmdet - INFO - Iter [10900/12000] lr: 1.500e-03, eta: 0:01:54, time: 0.105, data_time: 0.006, memory: 3375, loss_cls: 0.1306, loss_bbox: 0.2119, loss_centerness: 0.5821, loss: 0.9246, grad_norm: 1.8321
1202
+ 2022-10-03 23:23:09,311 - mmdet - INFO - Iter [10950/12000] lr: 1.500e-03, eta: 0:01:49, time: 0.106, data_time: 0.006, memory: 3375, loss_cls: 0.1319, loss_bbox: 0.2146, loss_centerness: 0.5822, loss: 0.9287, grad_norm: 1.8227
1203
+ 2022-10-03 23:23:14,275 - mmdet - INFO - Exp name: fcos_mstrain_12k_voc0712.py
1204
+ 2022-10-03 23:23:14,275 - mmdet - INFO - Iter [11000/12000] lr: 1.500e-03, eta: 0:01:43, time: 0.099, data_time: 0.006, memory: 3375, loss_cls: 0.1372, loss_bbox: 0.2184, loss_centerness: 0.5836, loss: 0.9391, grad_norm: 1.8240
1205
+ 2022-10-03 23:23:19,434 - mmdet - INFO - Iter [11050/12000] lr: 1.500e-04, eta: 0:01:38, time: 0.103, data_time: 0.006, memory: 3375, loss_cls: 0.1325, loss_bbox: 0.2064, loss_centerness: 0.5815, loss: 0.9204, grad_norm: 1.8704
1206
+ 2022-10-03 23:23:24,889 - mmdet - INFO - Iter [11100/12000] lr: 1.500e-04, eta: 0:01:33, time: 0.109, data_time: 0.006, memory: 3375, loss_cls: 0.1328, loss_bbox: 0.2112, loss_centerness: 0.5817, loss: 0.9256, grad_norm: 1.7883
1207
+ 2022-10-03 23:23:30,285 - mmdet - INFO - Iter [11150/12000] lr: 1.500e-04, eta: 0:01:28, time: 0.108, data_time: 0.006, memory: 3375, loss_cls: 0.1279, loss_bbox: 0.2029, loss_centerness: 0.5819, loss: 0.9127, grad_norm: 1.7456
1208
+ 2022-10-03 23:23:35,379 - mmdet - INFO - Iter [11200/12000] lr: 1.500e-04, eta: 0:01:23, time: 0.102, data_time: 0.006, memory: 3375, loss_cls: 0.1327, loss_bbox: 0.2078, loss_centerness: 0.5804, loss: 0.9210, grad_norm: 1.7849
1209
+ 2022-10-03 23:23:40,596 - mmdet - INFO - Iter [11250/12000] lr: 1.500e-04, eta: 0:01:17, time: 0.104, data_time: 0.006, memory: 3375, loss_cls: 0.1286, loss_bbox: 0.2092, loss_centerness: 0.5804, loss: 0.9182, grad_norm: 1.7203
1210
+ 2022-10-03 23:23:45,639 - mmdet - INFO - Iter [11300/12000] lr: 1.500e-04, eta: 0:01:12, time: 0.101, data_time: 0.006, memory: 3375, loss_cls: 0.1283, loss_bbox: 0.2036, loss_centerness: 0.5813, loss: 0.9133, grad_norm: 1.7688
1211
+ 2022-10-03 23:23:50,736 - mmdet - INFO - Iter [11350/12000] lr: 1.500e-04, eta: 0:01:07, time: 0.102, data_time: 0.006, memory: 3375, loss_cls: 0.1354, loss_bbox: 0.2096, loss_centerness: 0.5816, loss: 0.9266, grad_norm: 1.7905
1212
+ 2022-10-03 23:23:55,741 - mmdet - INFO - Iter [11400/12000] lr: 1.500e-04, eta: 0:01:02, time: 0.100, data_time: 0.006, memory: 3375, loss_cls: 0.1284, loss_bbox: 0.2049, loss_centerness: 0.5813, loss: 0.9146, grad_norm: 1.6940
1213
+ 2022-10-03 23:24:00,882 - mmdet - INFO - Iter [11450/12000] lr: 1.500e-04, eta: 0:00:57, time: 0.103, data_time: 0.006, memory: 3375, loss_cls: 0.1310, loss_bbox: 0.2129, loss_centerness: 0.5837, loss: 0.9276, grad_norm: 1.7064
1214
+ 2022-10-03 23:24:06,044 - mmdet - INFO - Iter [11500/12000] lr: 1.500e-04, eta: 0:00:51, time: 0.103, data_time: 0.006, memory: 3375, loss_cls: 0.1273, loss_bbox: 0.2013, loss_centerness: 0.5800, loss: 0.9086, grad_norm: 1.7567
1215
+ 2022-10-03 23:24:11,063 - mmdet - INFO - Iter [11550/12000] lr: 1.500e-04, eta: 0:00:46, time: 0.100, data_time: 0.006, memory: 3375, loss_cls: 0.1315, loss_bbox: 0.2136, loss_centerness: 0.5837, loss: 0.9288, grad_norm: 1.7617
1216
+ 2022-10-03 23:24:16,060 - mmdet - INFO - Iter [11600/12000] lr: 1.500e-04, eta: 0:00:41, time: 0.100, data_time: 0.006, memory: 3375, loss_cls: 0.1280, loss_bbox: 0.2022, loss_centerness: 0.5803, loss: 0.9105, grad_norm: 1.7358
1217
+ 2022-10-03 23:24:21,325 - mmdet - INFO - Iter [11650/12000] lr: 1.500e-04, eta: 0:00:36, time: 0.105, data_time: 0.007, memory: 3375, loss_cls: 0.1357, loss_bbox: 0.2126, loss_centerness: 0.5832, loss: 0.9316, grad_norm: 1.8105
1218
+ 2022-10-03 23:24:26,491 - mmdet - INFO - Iter [11700/12000] lr: 1.500e-04, eta: 0:00:31, time: 0.103, data_time: 0.006, memory: 3375, loss_cls: 0.1233, loss_bbox: 0.2019, loss_centerness: 0.5799, loss: 0.9051, grad_norm: 1.6989
1219
+ 2022-10-03 23:24:31,573 - mmdet - INFO - Iter [11750/12000] lr: 1.500e-04, eta: 0:00:25, time: 0.102, data_time: 0.006, memory: 3375, loss_cls: 0.1286, loss_bbox: 0.2183, loss_centerness: 0.5824, loss: 0.9292, grad_norm: 1.6568
1220
+ 2022-10-03 23:24:36,544 - mmdet - INFO - Iter [11800/12000] lr: 1.500e-04, eta: 0:00:20, time: 0.099, data_time: 0.007, memory: 3375, loss_cls: 0.1289, loss_bbox: 0.2059, loss_centerness: 0.5816, loss: 0.9164, grad_norm: 1.7113
1221
+ 2022-10-03 23:24:41,573 - mmdet - INFO - Iter [11850/12000] lr: 1.500e-04, eta: 0:00:15, time: 0.101, data_time: 0.007, memory: 3375, loss_cls: 0.1272, loss_bbox: 0.2092, loss_centerness: 0.5814, loss: 0.9177, grad_norm: 1.7427
1222
+ 2022-10-03 23:24:46,727 - mmdet - INFO - Iter [11900/12000] lr: 1.500e-04, eta: 0:00:10, time: 0.103, data_time: 0.007, memory: 3375, loss_cls: 0.1298, loss_bbox: 0.2030, loss_centerness: 0.5815, loss: 0.9143, grad_norm: 1.7562
1223
+ 2022-10-03 23:24:51,989 - mmdet - INFO - Iter [11950/12000] lr: 1.500e-04, eta: 0:00:05, time: 0.105, data_time: 0.006, memory: 3375, loss_cls: 0.1301, loss_bbox: 0.2108, loss_centerness: 0.5819, loss: 0.9228, grad_norm: 1.7248
1224
+ 2022-10-03 23:24:57,103 - mmdet - INFO - Saving checkpoint at 12000 iterations
1225
+ 2022-10-03 23:24:57,589 - mmdet - INFO - Exp name: fcos_mstrain_12k_voc0712.py
1226
+ 2022-10-03 23:24:57,589 - mmdet - INFO - Iter [12000/12000] lr: 1.500e-04, eta: 0:00:00, time: 0.112, data_time: 0.006, memory: 3375, loss_cls: 0.1293, loss_bbox: 0.2050, loss_centerness: 0.5805, loss: 0.9149, grad_norm: 1.7602
1227
+ 2022-10-03 23:25:20,639 - mmdet - INFO -
1228
+ +-------------+------+-------+--------+-------+
1229
+ | class | gts | dets | recall | ap |
1230
+ +-------------+------+-------+--------+-------+
1231
+ | aeroplane | 285 | 6095 | 0.993 | 0.874 |
1232
+ | bicycle | 337 | 9704 | 0.988 | 0.819 |
1233
+ | bird | 459 | 9515 | 0.980 | 0.833 |
1234
+ | boat | 263 | 12281 | 0.962 | 0.736 |
1235
+ | bottle | 469 | 17289 | 0.932 | 0.705 |
1236
+ | bus | 213 | 7837 | 0.981 | 0.851 |
1237
+ | car | 1201 | 23829 | 0.990 | 0.883 |
1238
+ | cat | 358 | 6674 | 0.992 | 0.885 |
1239
+ | chair | 756 | 31606 | 0.963 | 0.661 |
1240
+ | cow | 244 | 6770 | 1.000 | 0.861 |
1241
+ | diningtable | 206 | 17352 | 0.942 | 0.701 |
1242
+ | dog | 489 | 8899 | 1.000 | 0.880 |
1243
+ | horse | 348 | 8452 | 0.989 | 0.822 |
1244
+ | motorbike | 325 | 9942 | 0.972 | 0.826 |
1245
+ | person | 4528 | 57741 | 0.978 | 0.854 |
1246
+ | pottedplant | 480 | 19546 | 0.931 | 0.586 |
1247
+ | sheep | 242 | 6822 | 0.983 | 0.837 |
1248
+ | sofa | 239 | 11389 | 0.975 | 0.716 |
1249
+ | train | 282 | 7815 | 0.986 | 0.861 |
1250
+ | tvmonitor | 308 | 10825 | 0.974 | 0.841 |
1251
+ +-------------+------+-------+--------+-------+
1252
+ | mAP | | | | 0.802 |
1253
+ +-------------+------+-------+--------+-------+
1254
+ 2022-10-03 23:25:21,153 - mmdet - INFO - Now best checkpoint is saved as best_mAP_iter_12000.pth.
1255
+ 2022-10-03 23:25:21,154 - mmdet - INFO - Best mAP is 0.8015 at 12000 iter.
1256
+ 2022-10-03 23:25:21,154 - mmdet - INFO - Exp name: fcos_mstrain_12k_voc0712.py
1257
+ 2022-10-03 23:25:21,154 - mmdet - INFO - Iter(val) [619] mAP: 0.8015, AP50: 0.8020
finetune/finetune_fcos_12k_voc0712_lr1.5e-2_wd5e-5/20221003_230350.log.json ADDED
@@ -0,0 +1,242 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {"env_info": "sys.platform: linux\nPython: 3.7.3 (default, Jan 22 2021, 20:04:44) [GCC 8.3.0]\nCUDA available: True\nGPU 0,1,2,3,4,5,6,7: A100-SXM-80GB\nCUDA_HOME: /usr/local/cuda\nNVCC: Cuda compilation tools, release 11.3, V11.3.109\nGCC: x86_64-linux-gnu-gcc (Debian 8.3.0-6) 8.3.0\nPyTorch: 1.10.0\nPyTorch compiling details: PyTorch built with:\n - GCC 7.3\n - C++ Version: 201402\n - Intel(R) Math Kernel Library Version 2020.0.0 Product Build 20191122 for Intel(R) 64 architecture applications\n - Intel(R) MKL-DNN v2.2.3 (Git Hash 7336ca9f055cf1bfa13efb658fe15dc9b41f0740)\n - OpenMP 201511 (a.k.a. OpenMP 4.5)\n - LAPACK is enabled (usually provided by MKL)\n - NNPACK is enabled\n - CPU capability usage: AVX512\n - CUDA Runtime 11.3\n - NVCC architecture flags: -gencode;arch=compute_70,code=sm_70;-gencode;arch=compute_75,code=sm_75;-gencode;arch=compute_80,code=sm_80;-gencode;arch=compute_86,code=sm_86\n - CuDNN 8.2\n - Magma 2.5.2\n - Build settings: BLAS_INFO=mkl, BUILD_TYPE=Release, CUDA_VERSION=11.3, CUDNN_VERSION=8.2.0, CXX_COMPILER=/opt/rh/devtoolset-7/root/usr/bin/c++, CXX_FLAGS= -Wno-deprecated -fvisibility-inlines-hidden -DUSE_PTHREADPOOL -fopenmp -DNDEBUG -DUSE_KINETO -DUSE_FBGEMM -DUSE_QNNPACK -DUSE_PYTORCH_QNNPACK -DUSE_XNNPACK -DSYMBOLICATE_MOBILE_DEBUG_HANDLE -DEDGE_PROFILER_USE_KINETO -O2 -fPIC -Wno-narrowing -Wall -Wextra -Werror=return-type -Wno-missing-field-initializers -Wno-type-limits -Wno-array-bounds -Wno-unknown-pragmas -Wno-sign-compare -Wno-unused-parameter -Wno-unused-variable -Wno-unused-function -Wno-unused-result -Wno-unused-local-typedefs -Wno-strict-overflow -Wno-strict-aliasing -Wno-error=deprecated-declarations -Wno-stringop-overflow -Wno-psabi -Wno-error=pedantic -Wno-error=redundant-decls -Wno-error=old-style-cast -fdiagnostics-color=always -faligned-new -Wno-unused-but-set-variable -Wno-maybe-uninitialized -fno-math-errno -fno-trapping-math -Werror=format -Wno-stringop-overflow, LAPACK_INFO=mkl, PERF_WITH_AVX=1, PERF_WITH_AVX2=1, PERF_WITH_AVX512=1, TORCH_VERSION=1.10.0, USE_CUDA=ON, USE_CUDNN=ON, USE_EXCEPTION_PTR=1, USE_GFLAGS=OFF, USE_GLOG=OFF, USE_MKL=ON, USE_MKLDNN=ON, USE_MPI=OFF, USE_NCCL=ON, USE_NNPACK=ON, USE_OPENMP=ON, \n\nTorchVision: 0.11.1+cu113\nOpenCV: 4.6.0\nMMCV: 1.6.1\nMMCV Compiler: GCC 9.3\nMMCV CUDA Compiler: 11.3\nMMDetection: 2.25.2+87c120c", "config": "model = dict(\n type='FCOS',\n backbone=dict(\n type='ResNet',\n depth=50,\n num_stages=4,\n out_indices=(0, 1, 2, 3),\n frozen_stages=1,\n norm_cfg=dict(type='SyncBN', requires_grad=True),\n norm_eval=True,\n style='pytorch',\n init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),\n neck=dict(\n type='FPN',\n in_channels=[256, 512, 1024, 2048],\n out_channels=256,\n start_level=1,\n add_extra_convs='on_output',\n num_outs=5,\n relu_before_extra_convs=True,\n norm_cfg=dict(type='SyncBN', requires_grad=True)),\n bbox_head=dict(\n type='FCOSHead',\n num_classes=20,\n in_channels=256,\n stacked_convs=4,\n feat_channels=256,\n strides=[8, 16, 32, 64, 128],\n loss_cls=dict(\n type='FocalLoss',\n use_sigmoid=True,\n gamma=2.0,\n alpha=0.25,\n loss_weight=1.0),\n loss_bbox=dict(type='IoULoss', loss_weight=1.0),\n loss_centerness=dict(\n type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)),\n train_cfg=dict(\n assigner=dict(\n type='MaxIoUAssigner',\n pos_iou_thr=0.5,\n neg_iou_thr=0.4,\n min_pos_iou=0,\n ignore_iof_thr=-1),\n allowed_border=-1,\n pos_weight=-1,\n debug=False),\n test_cfg=dict(\n nms_pre=1000,\n min_bbox_size=0,\n score_thr=0.05,\n nms=dict(type='nms', iou_threshold=0.5),\n max_per_img=100))\ndataset_type = 'VOCDataset'\ndata_root = 'data/VOCdevkit/'\nimg_norm_cfg = dict(\n mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)\ntrain_pipeline = [\n dict(type='LoadImageFromFile'),\n dict(type='LoadAnnotations', with_bbox=True),\n dict(\n type='Resize',\n img_scale=[(1333, 480), (1333, 512), (1333, 544), (1333, 576),\n (1333, 608), (1333, 640), (1333, 672), (1333, 704),\n (1333, 736), (1333, 768), (1333, 800)],\n multiscale_mode='value',\n keep_ratio=True),\n dict(type='RandomFlip', flip_ratio=0.5),\n dict(\n type='Normalize',\n mean=[123.675, 116.28, 103.53],\n std=[58.395, 57.12, 57.375],\n to_rgb=True),\n dict(type='Pad', size_divisor=32),\n dict(type='DefaultFormatBundle'),\n dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])\n]\ntest_pipeline = [\n dict(type='LoadImageFromFile'),\n dict(\n type='MultiScaleFlipAug',\n img_scale=(1333, 800),\n flip=False,\n transforms=[\n dict(type='Resize', keep_ratio=True),\n dict(type='RandomFlip'),\n dict(\n type='Normalize',\n mean=[123.675, 116.28, 103.53],\n std=[58.395, 57.12, 57.375],\n to_rgb=True),\n dict(type='Pad', size_divisor=32),\n dict(type='ImageToTensor', keys=['img']),\n dict(type='Collect', keys=['img'])\n ])\n]\ndata = dict(\n samples_per_gpu=2,\n workers_per_gpu=2,\n train=dict(\n type='VOCDataset',\n ann_file=[\n 'data/VOCdevkit/VOC2007/ImageSets/Main/trainval.txt',\n 'data/VOCdevkit/VOC2012/ImageSets/Main/trainval.txt'\n ],\n img_prefix=['data/VOCdevkit/VOC2007/', 'data/VOCdevkit/VOC2012/'],\n pipeline=[\n dict(type='LoadImageFromFile'),\n dict(type='LoadAnnotations', with_bbox=True),\n dict(\n type='Resize',\n img_scale=[(1333, 480), (1333, 512), (1333, 544), (1333, 576),\n (1333, 608), (1333, 640), (1333, 672), (1333, 704),\n (1333, 736), (1333, 768), (1333, 800)],\n multiscale_mode='value',\n keep_ratio=True),\n dict(type='RandomFlip', flip_ratio=0.5),\n dict(\n type='Normalize',\n mean=[123.675, 116.28, 103.53],\n std=[58.395, 57.12, 57.375],\n to_rgb=True),\n dict(type='Pad', size_divisor=32),\n dict(type='DefaultFormatBundle'),\n dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])\n ]),\n val=dict(\n type='VOCDataset',\n ann_file='data/VOCdevkit/VOC2007/ImageSets/Main/test.txt',\n img_prefix='data/VOCdevkit/VOC2007/',\n pipeline=[\n dict(type='LoadImageFromFile'),\n dict(\n type='MultiScaleFlipAug',\n img_scale=(1333, 800),\n flip=False,\n transforms=[\n dict(type='Resize', keep_ratio=True),\n dict(type='RandomFlip'),\n dict(\n type='Normalize',\n mean=[123.675, 116.28, 103.53],\n std=[58.395, 57.12, 57.375],\n to_rgb=True),\n dict(type='Pad', size_divisor=32),\n dict(type='ImageToTensor', keys=['img']),\n dict(type='Collect', keys=['img'])\n ])\n ]),\n test=dict(\n type='VOCDataset',\n ann_file='data/VOCdevkit/VOC2007/ImageSets/Main/test.txt',\n img_prefix='data/VOCdevkit/VOC2007/',\n pipeline=[\n dict(type='LoadImageFromFile'),\n dict(\n type='MultiScaleFlipAug',\n img_scale=(1333, 800),\n flip=False,\n transforms=[\n dict(type='Resize', keep_ratio=True),\n dict(type='RandomFlip'),\n dict(\n type='Normalize',\n mean=[123.675, 116.28, 103.53],\n std=[58.395, 57.12, 57.375],\n to_rgb=True),\n dict(type='Pad', size_divisor=32),\n dict(type='ImageToTensor', keys=['img']),\n dict(type='Collect', keys=['img'])\n ])\n ]))\nevaluation = dict(interval=12000, metric='mAP', save_best='auto')\noptimizer = dict(\n type='SGD',\n lr=0.015,\n momentum=0.9,\n weight_decay=5e-05,\n paramwise_cfg=dict(bias_lr_mult=2.0, bias_decay_mult=0.0))\noptimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))\nlr_config = dict(\n policy='step',\n warmup='linear',\n warmup_iters=500,\n warmup_ratio=0.001,\n step=[9000, 11000],\n by_epoch=False)\nrunner = dict(type='IterBasedRunner', max_iters=12000)\ncheckpoint_config = dict(interval=12000)\nlog_config = dict(interval=50, hooks=[dict(type='TextLoggerHook')])\ncustom_hooks = [\n dict(type='NumClassCheckHook'),\n dict(\n type='MMDetWandbHook',\n init_kwargs=dict(project='I2B', group='finetune'),\n interval=50,\n num_eval_images=0,\n log_checkpoint=False)\n]\ndist_params = dict(backend='nccl')\nlog_level = 'INFO'\nload_from = 'pretrain/selfsup_fcos_mstrain-soft-teacher_sampler-2048_temp0.5/final_model.pth'\nresume_from = None\nworkflow = [('train', 1)]\nopencv_num_threads = 0\nmp_start_method = 'fork'\nauto_scale_lr = dict(enable=False, base_batch_size=16)\ncustom_imports = None\nnorm_cfg = dict(type='SyncBN', requires_grad=True)\nwork_dir = 'work_dirs/finetune_fcos_12k_voc0712_lr1.5e-2_wd5e-5'\nauto_resume = False\ngpu_ids = range(0, 8)\n", "seed": 42, "exp_name": "fcos_mstrain_12k_voc0712.py", "hook_msgs": {}}
2
+ {"mode": "train", "epoch": 1, "iter": 50, "lr": 0.00148, "memory": 3374, "data_time": 0.00727, "loss_cls": 1.05367, "loss_bbox": 4.84913, "loss_centerness": 0.67539, "loss": 6.57819, "grad_norm": 12.3657, "time": 0.11406}
3
+ {"mode": "train", "epoch": 1, "iter": 100, "lr": 0.00298, "memory": 3374, "data_time": 0.00608, "loss_cls": 0.67905, "loss_bbox": 0.86012, "loss_centerness": 0.65732, "loss": 2.19649, "grad_norm": 4.53465, "time": 0.11025}
4
+ {"mode": "train", "epoch": 1, "iter": 150, "lr": 0.00448, "memory": 3374, "data_time": 0.00605, "loss_cls": 0.53331, "loss_bbox": 0.7146, "loss_centerness": 0.64282, "loss": 1.89073, "grad_norm": 5.21603, "time": 0.11219}
5
+ {"mode": "train", "epoch": 1, "iter": 200, "lr": 0.00598, "memory": 3374, "data_time": 0.00584, "loss_cls": 0.47495, "loss_bbox": 0.73378, "loss_centerness": 0.63228, "loss": 1.841, "grad_norm": 7.339, "time": 0.10665}
6
+ {"mode": "train", "epoch": 1, "iter": 250, "lr": 0.00748, "memory": 3374, "data_time": 0.00619, "loss_cls": 0.45914, "loss_bbox": 0.63532, "loss_centerness": 0.6251, "loss": 1.71957, "grad_norm": 6.49574, "time": 0.10468}
7
+ {"mode": "train", "epoch": 1, "iter": 300, "lr": 0.00898, "memory": 3374, "data_time": 0.0066, "loss_cls": 0.42725, "loss_bbox": 0.6177, "loss_centerness": 0.62114, "loss": 1.66609, "grad_norm": 6.48983, "time": 0.11211}
8
+ {"mode": "train", "epoch": 1, "iter": 350, "lr": 0.01047, "memory": 3374, "data_time": 0.00592, "loss_cls": 0.44549, "loss_bbox": 0.6623, "loss_centerness": 0.61994, "loss": 1.72773, "grad_norm": 6.3315, "time": 0.11141}
9
+ {"mode": "train", "epoch": 1, "iter": 400, "lr": 0.01197, "memory": 3374, "data_time": 0.00627, "loss_cls": 0.42264, "loss_bbox": 0.56427, "loss_centerness": 0.62219, "loss": 1.60911, "grad_norm": 4.80269, "time": 0.11151}
10
+ {"mode": "train", "epoch": 1, "iter": 450, "lr": 0.01347, "memory": 3374, "data_time": 0.00618, "loss_cls": 0.41305, "loss_bbox": 0.67455, "loss_centerness": 0.61906, "loss": 1.70666, "grad_norm": 6.16443, "time": 0.11086}
11
+ {"mode": "train", "epoch": 1, "iter": 500, "lr": 0.01497, "memory": 3374, "data_time": 0.00619, "loss_cls": 0.38544, "loss_bbox": 0.60339, "loss_centerness": 0.61639, "loss": 1.60522, "grad_norm": 5.11566, "time": 0.13587}
12
+ {"mode": "train", "epoch": 1, "iter": 550, "lr": 0.015, "memory": 3374, "data_time": 0.0063, "loss_cls": 0.381, "loss_bbox": 0.60264, "loss_centerness": 0.61482, "loss": 1.59846, "grad_norm": 5.25467, "time": 0.10278}
13
+ {"mode": "train", "epoch": 1, "iter": 600, "lr": 0.015, "memory": 3374, "data_time": 0.00614, "loss_cls": 0.37585, "loss_bbox": 0.60326, "loss_centerness": 0.6159, "loss": 1.59501, "grad_norm": 5.41321, "time": 0.10429}
14
+ {"mode": "train", "epoch": 1, "iter": 650, "lr": 0.015, "memory": 3374, "data_time": 0.00624, "loss_cls": 0.35361, "loss_bbox": 0.55519, "loss_centerness": 0.61445, "loss": 1.52325, "grad_norm": 4.64388, "time": 0.1058}
15
+ {"mode": "train", "epoch": 1, "iter": 700, "lr": 0.015, "memory": 3374, "data_time": 0.00626, "loss_cls": 0.36112, "loss_bbox": 0.54957, "loss_centerness": 0.61232, "loss": 1.52301, "grad_norm": 4.99818, "time": 0.10405}
16
+ {"mode": "train", "epoch": 1, "iter": 750, "lr": 0.015, "memory": 3374, "data_time": 0.00619, "loss_cls": 0.36281, "loss_bbox": 0.64551, "loss_centerness": 0.61243, "loss": 1.62075, "grad_norm": 5.64236, "time": 0.10535}
17
+ {"mode": "train", "epoch": 1, "iter": 800, "lr": 0.015, "memory": 3374, "data_time": 0.00618, "loss_cls": 0.35247, "loss_bbox": 0.55278, "loss_centerness": 0.61134, "loss": 1.51659, "grad_norm": 4.67419, "time": 0.10637}
18
+ {"mode": "train", "epoch": 1, "iter": 850, "lr": 0.015, "memory": 3374, "data_time": 0.00623, "loss_cls": 0.35056, "loss_bbox": 0.57767, "loss_centerness": 0.61117, "loss": 1.5394, "grad_norm": 5.07766, "time": 0.10093}
19
+ {"mode": "train", "epoch": 1, "iter": 900, "lr": 0.015, "memory": 3374, "data_time": 0.00627, "loss_cls": 0.33146, "loss_bbox": 0.65489, "loss_centerness": 0.60925, "loss": 1.59559, "grad_norm": 5.14782, "time": 0.10501}
20
+ {"mode": "train", "epoch": 1, "iter": 950, "lr": 0.015, "memory": 3374, "data_time": 0.0062, "loss_cls": 0.33895, "loss_bbox": 0.55966, "loss_centerness": 0.60947, "loss": 1.50809, "grad_norm": 4.31073, "time": 0.10629}
21
+ {"mode": "train", "epoch": 1, "iter": 1000, "lr": 0.015, "memory": 3374, "data_time": 0.006, "loss_cls": 0.32771, "loss_bbox": 0.48808, "loss_centerness": 0.6082, "loss": 1.424, "grad_norm": 4.37789, "time": 0.10066}
22
+ {"mode": "train", "epoch": 1, "iter": 1050, "lr": 0.015, "memory": 3374, "data_time": 0.00609, "loss_cls": 0.3301, "loss_bbox": 0.51401, "loss_centerness": 0.60978, "loss": 1.45389, "grad_norm": 4.59589, "time": 0.10133}
23
+ {"mode": "train", "epoch": 1, "iter": 1100, "lr": 0.015, "memory": 3374, "data_time": 0.00617, "loss_cls": 0.31499, "loss_bbox": 0.50677, "loss_centerness": 0.60696, "loss": 1.42873, "grad_norm": 4.32437, "time": 0.10732}
24
+ {"mode": "train", "epoch": 1, "iter": 1150, "lr": 0.015, "memory": 3374, "data_time": 0.00628, "loss_cls": 0.31626, "loss_bbox": 0.5167, "loss_centerness": 0.60705, "loss": 1.44001, "grad_norm": 4.76207, "time": 0.10458}
25
+ {"mode": "train", "epoch": 1, "iter": 1200, "lr": 0.015, "memory": 3374, "data_time": 0.00621, "loss_cls": 0.30538, "loss_bbox": 0.47383, "loss_centerness": 0.60626, "loss": 1.38547, "grad_norm": 3.78477, "time": 0.10769}
26
+ {"mode": "train", "epoch": 1, "iter": 1250, "lr": 0.015, "memory": 3374, "data_time": 0.00626, "loss_cls": 0.29819, "loss_bbox": 0.55634, "loss_centerness": 0.60804, "loss": 1.46257, "grad_norm": 5.41641, "time": 0.10109}
27
+ {"mode": "train", "epoch": 1, "iter": 1300, "lr": 0.015, "memory": 3374, "data_time": 0.00594, "loss_cls": 0.29594, "loss_bbox": 0.47493, "loss_centerness": 0.60465, "loss": 1.37552, "grad_norm": 4.03944, "time": 0.10683}
28
+ {"mode": "train", "epoch": 1, "iter": 1350, "lr": 0.015, "memory": 3374, "data_time": 0.00597, "loss_cls": 0.30646, "loss_bbox": 0.43001, "loss_centerness": 0.60606, "loss": 1.34253, "grad_norm": 3.69153, "time": 0.10547}
29
+ {"mode": "train", "epoch": 1, "iter": 1400, "lr": 0.015, "memory": 3374, "data_time": 0.00608, "loss_cls": 0.31884, "loss_bbox": 0.45126, "loss_centerness": 0.60683, "loss": 1.37693, "grad_norm": 3.72633, "time": 0.09996}
30
+ {"mode": "train", "epoch": 1, "iter": 1450, "lr": 0.015, "memory": 3374, "data_time": 0.00601, "loss_cls": 0.30405, "loss_bbox": 0.47756, "loss_centerness": 0.60425, "loss": 1.38586, "grad_norm": 4.17673, "time": 0.10745}
31
+ {"mode": "train", "epoch": 1, "iter": 1500, "lr": 0.015, "memory": 3374, "data_time": 0.00632, "loss_cls": 0.29778, "loss_bbox": 0.47997, "loss_centerness": 0.6047, "loss": 1.38245, "grad_norm": 4.11991, "time": 0.10741}
32
+ {"mode": "train", "epoch": 1, "iter": 1550, "lr": 0.015, "memory": 3374, "data_time": 0.00632, "loss_cls": 0.29752, "loss_bbox": 0.44845, "loss_centerness": 0.60251, "loss": 1.34848, "grad_norm": 3.94515, "time": 0.1065}
33
+ {"mode": "train", "epoch": 1, "iter": 1600, "lr": 0.015, "memory": 3374, "data_time": 0.00626, "loss_cls": 0.28853, "loss_bbox": 0.437, "loss_centerness": 0.60482, "loss": 1.33036, "grad_norm": 3.39618, "time": 0.10835}
34
+ {"mode": "train", "epoch": 1, "iter": 1650, "lr": 0.015, "memory": 3374, "data_time": 0.00608, "loss_cls": 0.28435, "loss_bbox": 0.41355, "loss_centerness": 0.60592, "loss": 1.30381, "grad_norm": 3.25428, "time": 0.11275}
35
+ {"mode": "train", "epoch": 1, "iter": 1700, "lr": 0.015, "memory": 3374, "data_time": 0.00616, "loss_cls": 0.2856, "loss_bbox": 0.41199, "loss_centerness": 0.60287, "loss": 1.30046, "grad_norm": 3.12621, "time": 0.11362}
36
+ {"mode": "train", "epoch": 1, "iter": 1750, "lr": 0.015, "memory": 3374, "data_time": 0.0061, "loss_cls": 0.28613, "loss_bbox": 0.4194, "loss_centerness": 0.60157, "loss": 1.30711, "grad_norm": 3.39995, "time": 0.10424}
37
+ {"mode": "train", "epoch": 1, "iter": 1800, "lr": 0.015, "memory": 3374, "data_time": 0.0062, "loss_cls": 0.28919, "loss_bbox": 0.39617, "loss_centerness": 0.60398, "loss": 1.28934, "grad_norm": 2.88392, "time": 0.10162}
38
+ {"mode": "train", "epoch": 1, "iter": 1850, "lr": 0.015, "memory": 3374, "data_time": 0.00605, "loss_cls": 0.27388, "loss_bbox": 0.37082, "loss_centerness": 0.6029, "loss": 1.2476, "grad_norm": 2.62123, "time": 0.10661}
39
+ {"mode": "train", "epoch": 1, "iter": 1900, "lr": 0.015, "memory": 3374, "data_time": 0.00602, "loss_cls": 0.2914, "loss_bbox": 0.4107, "loss_centerness": 0.60497, "loss": 1.30707, "grad_norm": 3.12365, "time": 0.10292}
40
+ {"mode": "train", "epoch": 1, "iter": 1950, "lr": 0.015, "memory": 3374, "data_time": 0.00614, "loss_cls": 0.27951, "loss_bbox": 0.40965, "loss_centerness": 0.60223, "loss": 1.29139, "grad_norm": 3.06282, "time": 0.10253}
41
+ {"mode": "train", "epoch": 1, "iter": 2000, "lr": 0.015, "memory": 3374, "data_time": 0.00614, "loss_cls": 0.28046, "loss_bbox": 0.38076, "loss_centerness": 0.60054, "loss": 1.26176, "grad_norm": 2.81525, "time": 0.1011}
42
+ {"mode": "train", "epoch": 1, "iter": 2050, "lr": 0.015, "memory": 3374, "data_time": 0.00623, "loss_cls": 0.27724, "loss_bbox": 0.38601, "loss_centerness": 0.59912, "loss": 1.26237, "grad_norm": 2.93185, "time": 0.10064}
43
+ {"mode": "train", "epoch": 1, "iter": 2100, "lr": 0.015, "memory": 3374, "data_time": 0.00635, "loss_cls": 0.27031, "loss_bbox": 0.38604, "loss_centerness": 0.60149, "loss": 1.25784, "grad_norm": 2.66497, "time": 0.10436}
44
+ {"mode": "train", "epoch": 1, "iter": 2150, "lr": 0.015, "memory": 3374, "data_time": 0.00621, "loss_cls": 0.25687, "loss_bbox": 0.36319, "loss_centerness": 0.59932, "loss": 1.21938, "grad_norm": 2.68911, "time": 0.10183}
45
+ {"mode": "train", "epoch": 1, "iter": 2200, "lr": 0.015, "memory": 3374, "data_time": 0.00618, "loss_cls": 0.26823, "loss_bbox": 0.37592, "loss_centerness": 0.60088, "loss": 1.24503, "grad_norm": 2.73337, "time": 0.10048}
46
+ {"mode": "train", "epoch": 1, "iter": 2250, "lr": 0.015, "memory": 3375, "data_time": 0.00599, "loss_cls": 0.25893, "loss_bbox": 0.36243, "loss_centerness": 0.59845, "loss": 1.21981, "grad_norm": 2.76414, "time": 0.10227}
47
+ {"mode": "train", "epoch": 1, "iter": 2300, "lr": 0.015, "memory": 3375, "data_time": 0.00614, "loss_cls": 0.26192, "loss_bbox": 0.38052, "loss_centerness": 0.59894, "loss": 1.24137, "grad_norm": 2.82893, "time": 0.10835}
48
+ {"mode": "train", "epoch": 1, "iter": 2350, "lr": 0.015, "memory": 3375, "data_time": 0.00608, "loss_cls": 0.27182, "loss_bbox": 0.37517, "loss_centerness": 0.60014, "loss": 1.24714, "grad_norm": 2.74275, "time": 0.10317}
49
+ {"mode": "train", "epoch": 1, "iter": 2400, "lr": 0.015, "memory": 3375, "data_time": 0.00614, "loss_cls": 0.26396, "loss_bbox": 0.36376, "loss_centerness": 0.59972, "loss": 1.22744, "grad_norm": 2.51831, "time": 0.10111}
50
+ {"mode": "train", "epoch": 1, "iter": 2450, "lr": 0.015, "memory": 3375, "data_time": 0.00591, "loss_cls": 0.24635, "loss_bbox": 0.35507, "loss_centerness": 0.6002, "loss": 1.20163, "grad_norm": 2.47139, "time": 0.10065}
51
+ {"mode": "train", "epoch": 1, "iter": 2500, "lr": 0.015, "memory": 3375, "data_time": 0.0062, "loss_cls": 0.26106, "loss_bbox": 0.37758, "loss_centerness": 0.60074, "loss": 1.23937, "grad_norm": 2.76157, "time": 0.10543}
52
+ {"mode": "train", "epoch": 1, "iter": 2550, "lr": 0.015, "memory": 3375, "data_time": 0.00618, "loss_cls": 0.25576, "loss_bbox": 0.37056, "loss_centerness": 0.60097, "loss": 1.2273, "grad_norm": 2.66215, "time": 0.10682}
53
+ {"mode": "train", "epoch": 1, "iter": 2600, "lr": 0.015, "memory": 3375, "data_time": 0.00608, "loss_cls": 0.25285, "loss_bbox": 0.37179, "loss_centerness": 0.60085, "loss": 1.22549, "grad_norm": 2.66622, "time": 0.10757}
54
+ {"mode": "train", "epoch": 1, "iter": 2650, "lr": 0.015, "memory": 3375, "data_time": 0.00612, "loss_cls": 0.25713, "loss_bbox": 0.35402, "loss_centerness": 0.59926, "loss": 1.21041, "grad_norm": 2.6098, "time": 0.10399}
55
+ {"mode": "train", "epoch": 1, "iter": 2700, "lr": 0.015, "memory": 3375, "data_time": 0.00626, "loss_cls": 0.24883, "loss_bbox": 0.36702, "loss_centerness": 0.59797, "loss": 1.21381, "grad_norm": 2.75112, "time": 0.10203}
56
+ {"mode": "train", "epoch": 1, "iter": 2750, "lr": 0.015, "memory": 3375, "data_time": 0.00595, "loss_cls": 0.28755, "loss_bbox": 0.35672, "loss_centerness": 0.59744, "loss": 1.24171, "grad_norm": 2.85178, "time": 0.10774}
57
+ {"mode": "train", "epoch": 1, "iter": 2800, "lr": 0.015, "memory": 3375, "data_time": 0.00605, "loss_cls": 0.25995, "loss_bbox": 0.37229, "loss_centerness": 0.59889, "loss": 1.23112, "grad_norm": 2.7242, "time": 0.11054}
58
+ {"mode": "train", "epoch": 1, "iter": 2850, "lr": 0.015, "memory": 3375, "data_time": 0.00625, "loss_cls": 0.26379, "loss_bbox": 0.36059, "loss_centerness": 0.60115, "loss": 1.22553, "grad_norm": 2.59911, "time": 0.10775}
59
+ {"mode": "train", "epoch": 1, "iter": 2900, "lr": 0.015, "memory": 3375, "data_time": 0.00613, "loss_cls": 0.26577, "loss_bbox": 0.35956, "loss_centerness": 0.59903, "loss": 1.22436, "grad_norm": 2.63438, "time": 0.1037}
60
+ {"mode": "train", "epoch": 1, "iter": 2950, "lr": 0.015, "memory": 3375, "data_time": 0.00634, "loss_cls": 0.26231, "loss_bbox": 0.36625, "loss_centerness": 0.59943, "loss": 1.22799, "grad_norm": 2.54951, "time": 0.10977}
61
+ {"mode": "train", "epoch": 1, "iter": 3000, "lr": 0.015, "memory": 3375, "data_time": 0.00638, "loss_cls": 0.24668, "loss_bbox": 0.3484, "loss_centerness": 0.59853, "loss": 1.19361, "grad_norm": 2.38182, "time": 0.105}
62
+ {"mode": "train", "epoch": 1, "iter": 3050, "lr": 0.015, "memory": 3375, "data_time": 0.00619, "loss_cls": 0.2489, "loss_bbox": 0.3615, "loss_centerness": 0.59859, "loss": 1.20899, "grad_norm": 2.60184, "time": 0.1013}
63
+ {"mode": "train", "epoch": 1, "iter": 3100, "lr": 0.015, "memory": 3375, "data_time": 0.00604, "loss_cls": 0.23851, "loss_bbox": 0.35379, "loss_centerness": 0.59812, "loss": 1.19042, "grad_norm": 2.613, "time": 0.10157}
64
+ {"mode": "train", "epoch": 1, "iter": 3150, "lr": 0.015, "memory": 3375, "data_time": 0.00605, "loss_cls": 0.22975, "loss_bbox": 0.3453, "loss_centerness": 0.59729, "loss": 1.17234, "grad_norm": 2.51127, "time": 0.10737}
65
+ {"mode": "train", "epoch": 1, "iter": 3200, "lr": 0.015, "memory": 3375, "data_time": 0.0063, "loss_cls": 0.24802, "loss_bbox": 0.34935, "loss_centerness": 0.59841, "loss": 1.19579, "grad_norm": 2.55473, "time": 0.10758}
66
+ {"mode": "train", "epoch": 1, "iter": 3250, "lr": 0.015, "memory": 3375, "data_time": 0.00615, "loss_cls": 0.24478, "loss_bbox": 0.33711, "loss_centerness": 0.59891, "loss": 1.18081, "grad_norm": 2.45956, "time": 0.10608}
67
+ {"mode": "train", "epoch": 1, "iter": 3300, "lr": 0.015, "memory": 3375, "data_time": 0.0062, "loss_cls": 0.25464, "loss_bbox": 0.36066, "loss_centerness": 0.59882, "loss": 1.21413, "grad_norm": 2.71043, "time": 0.10178}
68
+ {"mode": "train", "epoch": 1, "iter": 3350, "lr": 0.015, "memory": 3375, "data_time": 0.00588, "loss_cls": 0.23852, "loss_bbox": 0.33813, "loss_centerness": 0.59717, "loss": 1.17381, "grad_norm": 2.42068, "time": 0.10128}
69
+ {"mode": "train", "epoch": 1, "iter": 3400, "lr": 0.015, "memory": 3375, "data_time": 0.00626, "loss_cls": 0.23614, "loss_bbox": 0.3271, "loss_centerness": 0.59594, "loss": 1.15918, "grad_norm": 2.37305, "time": 0.10461}
70
+ {"mode": "train", "epoch": 1, "iter": 3450, "lr": 0.015, "memory": 3375, "data_time": 0.00629, "loss_cls": 0.24715, "loss_bbox": 0.36722, "loss_centerness": 0.59653, "loss": 1.2109, "grad_norm": 2.82987, "time": 0.10034}
71
+ {"mode": "train", "epoch": 1, "iter": 3500, "lr": 0.015, "memory": 3375, "data_time": 0.00631, "loss_cls": 0.22846, "loss_bbox": 0.33719, "loss_centerness": 0.59579, "loss": 1.16144, "grad_norm": 2.39953, "time": 0.10309}
72
+ {"mode": "train", "epoch": 1, "iter": 3550, "lr": 0.015, "memory": 3375, "data_time": 0.00644, "loss_cls": 0.23233, "loss_bbox": 0.33793, "loss_centerness": 0.59533, "loss": 1.1656, "grad_norm": 2.72938, "time": 0.10464}
73
+ {"mode": "train", "epoch": 1, "iter": 3600, "lr": 0.015, "memory": 3375, "data_time": 0.00638, "loss_cls": 0.23519, "loss_bbox": 0.3351, "loss_centerness": 0.5969, "loss": 1.16719, "grad_norm": 2.67764, "time": 0.10095}
74
+ {"mode": "train", "epoch": 1, "iter": 3650, "lr": 0.015, "memory": 3375, "data_time": 0.00621, "loss_cls": 0.24062, "loss_bbox": 0.33509, "loss_centerness": 0.59692, "loss": 1.17263, "grad_norm": 2.58796, "time": 0.10067}
75
+ {"mode": "train", "epoch": 1, "iter": 3700, "lr": 0.015, "memory": 3375, "data_time": 0.00648, "loss_cls": 0.23562, "loss_bbox": 0.32676, "loss_centerness": 0.59629, "loss": 1.15867, "grad_norm": 2.37223, "time": 0.10463}
76
+ {"mode": "train", "epoch": 1, "iter": 3750, "lr": 0.015, "memory": 3375, "data_time": 0.00626, "loss_cls": 0.23139, "loss_bbox": 0.33541, "loss_centerness": 0.59518, "loss": 1.16198, "grad_norm": 2.56868, "time": 0.10687}
77
+ {"mode": "train", "epoch": 1, "iter": 3800, "lr": 0.015, "memory": 3375, "data_time": 0.00628, "loss_cls": 0.23881, "loss_bbox": 0.33033, "loss_centerness": 0.59578, "loss": 1.16492, "grad_norm": 2.5392, "time": 0.10699}
78
+ {"mode": "train", "epoch": 1, "iter": 3850, "lr": 0.015, "memory": 3375, "data_time": 0.00654, "loss_cls": 0.23365, "loss_bbox": 0.32073, "loss_centerness": 0.59648, "loss": 1.15086, "grad_norm": 2.3348, "time": 0.10376}
79
+ {"mode": "train", "epoch": 1, "iter": 3900, "lr": 0.015, "memory": 3375, "data_time": 0.00619, "loss_cls": 0.22927, "loss_bbox": 0.32344, "loss_centerness": 0.59456, "loss": 1.14726, "grad_norm": 2.36562, "time": 0.10518}
80
+ {"mode": "train", "epoch": 1, "iter": 3950, "lr": 0.015, "memory": 3375, "data_time": 0.00635, "loss_cls": 0.22709, "loss_bbox": 0.32544, "loss_centerness": 0.59635, "loss": 1.14888, "grad_norm": 2.35018, "time": 0.10364}
81
+ {"mode": "train", "epoch": 1, "iter": 4000, "lr": 0.015, "memory": 3375, "data_time": 0.00619, "loss_cls": 0.23902, "loss_bbox": 0.34186, "loss_centerness": 0.5963, "loss": 1.17718, "grad_norm": 2.60435, "time": 0.10113}
82
+ {"mode": "train", "epoch": 1, "iter": 4050, "lr": 0.015, "memory": 3375, "data_time": 0.00624, "loss_cls": 0.23092, "loss_bbox": 0.33621, "loss_centerness": 0.5982, "loss": 1.16534, "grad_norm": 2.52537, "time": 0.10192}
83
+ {"mode": "train", "epoch": 1, "iter": 4100, "lr": 0.015, "memory": 3375, "data_time": 0.00645, "loss_cls": 0.23137, "loss_bbox": 0.34944, "loss_centerness": 0.59602, "loss": 1.17684, "grad_norm": 2.69075, "time": 0.10484}
84
+ {"mode": "train", "epoch": 1, "iter": 4150, "lr": 0.015, "memory": 3375, "data_time": 0.00637, "loss_cls": 0.22275, "loss_bbox": 0.32141, "loss_centerness": 0.59367, "loss": 1.13783, "grad_norm": 2.58436, "time": 0.1085}
85
+ {"mode": "train", "epoch": 1, "iter": 4200, "lr": 0.015, "memory": 3375, "data_time": 0.00632, "loss_cls": 0.22118, "loss_bbox": 0.31418, "loss_centerness": 0.59222, "loss": 1.12758, "grad_norm": 2.51202, "time": 0.10538}
86
+ {"mode": "train", "epoch": 1, "iter": 4250, "lr": 0.015, "memory": 3375, "data_time": 0.00639, "loss_cls": 0.22434, "loss_bbox": 0.32612, "loss_centerness": 0.5936, "loss": 1.14406, "grad_norm": 2.57291, "time": 0.10482}
87
+ {"mode": "train", "epoch": 1, "iter": 4300, "lr": 0.015, "memory": 3375, "data_time": 0.00614, "loss_cls": 0.22744, "loss_bbox": 0.31494, "loss_centerness": 0.59373, "loss": 1.13611, "grad_norm": 2.5403, "time": 0.10139}
88
+ {"mode": "train", "epoch": 1, "iter": 4350, "lr": 0.015, "memory": 3375, "data_time": 0.00614, "loss_cls": 0.23015, "loss_bbox": 0.33246, "loss_centerness": 0.5947, "loss": 1.15732, "grad_norm": 2.65875, "time": 0.10279}
89
+ {"mode": "train", "epoch": 1, "iter": 4400, "lr": 0.015, "memory": 3375, "data_time": 0.00618, "loss_cls": 0.22744, "loss_bbox": 0.32671, "loss_centerness": 0.59447, "loss": 1.14862, "grad_norm": 2.43597, "time": 0.10038}
90
+ {"mode": "train", "epoch": 1, "iter": 4450, "lr": 0.015, "memory": 3375, "data_time": 0.00603, "loss_cls": 0.218, "loss_bbox": 0.31555, "loss_centerness": 0.59422, "loss": 1.12776, "grad_norm": 2.49639, "time": 0.10003}
91
+ {"mode": "train", "epoch": 1, "iter": 4500, "lr": 0.015, "memory": 3375, "data_time": 0.00634, "loss_cls": 0.22531, "loss_bbox": 0.30833, "loss_centerness": 0.59519, "loss": 1.12883, "grad_norm": 2.39912, "time": 0.10057}
92
+ {"mode": "train", "epoch": 1, "iter": 4550, "lr": 0.015, "memory": 3375, "data_time": 0.00632, "loss_cls": 0.21511, "loss_bbox": 0.31985, "loss_centerness": 0.59535, "loss": 1.13031, "grad_norm": 2.46371, "time": 0.10075}
93
+ {"mode": "train", "epoch": 1, "iter": 4600, "lr": 0.015, "memory": 3375, "data_time": 0.00643, "loss_cls": 0.22971, "loss_bbox": 0.32132, "loss_centerness": 0.59443, "loss": 1.14545, "grad_norm": 2.58306, "time": 0.10164}
94
+ {"mode": "train", "epoch": 1, "iter": 4650, "lr": 0.015, "memory": 3375, "data_time": 0.00644, "loss_cls": 0.21877, "loss_bbox": 0.30812, "loss_centerness": 0.59245, "loss": 1.11934, "grad_norm": 2.24905, "time": 0.10133}
95
+ {"mode": "train", "epoch": 1, "iter": 4700, "lr": 0.015, "memory": 3375, "data_time": 0.00662, "loss_cls": 0.22523, "loss_bbox": 0.33003, "loss_centerness": 0.59614, "loss": 1.1514, "grad_norm": 2.55765, "time": 0.10613}
96
+ {"mode": "train", "epoch": 1, "iter": 4750, "lr": 0.015, "memory": 3375, "data_time": 0.00642, "loss_cls": 0.22336, "loss_bbox": 0.31766, "loss_centerness": 0.59504, "loss": 1.13606, "grad_norm": 2.44447, "time": 0.10157}
97
+ {"mode": "train", "epoch": 1, "iter": 4800, "lr": 0.015, "memory": 3375, "data_time": 0.00625, "loss_cls": 0.20943, "loss_bbox": 0.33077, "loss_centerness": 0.5942, "loss": 1.1344, "grad_norm": 2.5108, "time": 0.09971}
98
+ {"mode": "train", "epoch": 1, "iter": 4850, "lr": 0.015, "memory": 3375, "data_time": 0.0065, "loss_cls": 0.21797, "loss_bbox": 0.29898, "loss_centerness": 0.59347, "loss": 1.11042, "grad_norm": 2.39757, "time": 0.10149}
99
+ {"mode": "train", "epoch": 1, "iter": 4900, "lr": 0.015, "memory": 3375, "data_time": 0.00652, "loss_cls": 0.21865, "loss_bbox": 0.31082, "loss_centerness": 0.59377, "loss": 1.12324, "grad_norm": 2.43567, "time": 0.10105}
100
+ {"mode": "train", "epoch": 1, "iter": 4950, "lr": 0.015, "memory": 3375, "data_time": 0.0063, "loss_cls": 0.21673, "loss_bbox": 0.31178, "loss_centerness": 0.59391, "loss": 1.12242, "grad_norm": 2.4076, "time": 0.10326}
101
+ {"mode": "train", "epoch": 1, "iter": 5000, "lr": 0.015, "memory": 3375, "data_time": 0.00635, "loss_cls": 0.2298, "loss_bbox": 0.32477, "loss_centerness": 0.59557, "loss": 1.15014, "grad_norm": 2.58385, "time": 0.10134}
102
+ {"mode": "train", "epoch": 1, "iter": 5050, "lr": 0.015, "memory": 3375, "data_time": 0.00621, "loss_cls": 0.22325, "loss_bbox": 0.342, "loss_centerness": 0.59638, "loss": 1.16164, "grad_norm": 2.49164, "time": 0.10061}
103
+ {"mode": "train", "epoch": 1, "iter": 5100, "lr": 0.015, "memory": 3375, "data_time": 0.00609, "loss_cls": 0.21648, "loss_bbox": 0.31098, "loss_centerness": 0.59217, "loss": 1.11962, "grad_norm": 2.48281, "time": 0.10201}
104
+ {"mode": "train", "epoch": 1, "iter": 5150, "lr": 0.015, "memory": 3375, "data_time": 0.00639, "loss_cls": 0.23459, "loss_bbox": 0.32901, "loss_centerness": 0.59666, "loss": 1.16025, "grad_norm": 2.5321, "time": 0.10048}
105
+ {"mode": "train", "epoch": 1, "iter": 5200, "lr": 0.015, "memory": 3375, "data_time": 0.00627, "loss_cls": 0.21814, "loss_bbox": 0.31118, "loss_centerness": 0.59305, "loss": 1.12238, "grad_norm": 2.44123, "time": 0.09989}
106
+ {"mode": "train", "epoch": 1, "iter": 5250, "lr": 0.015, "memory": 3375, "data_time": 0.00638, "loss_cls": 0.20335, "loss_bbox": 0.29714, "loss_centerness": 0.59042, "loss": 1.0909, "grad_norm": 2.32562, "time": 0.10526}
107
+ {"mode": "train", "epoch": 1, "iter": 5300, "lr": 0.015, "memory": 3375, "data_time": 0.00632, "loss_cls": 0.21468, "loss_bbox": 0.3131, "loss_centerness": 0.5909, "loss": 1.11868, "grad_norm": 2.72161, "time": 0.10479}
108
+ {"mode": "train", "epoch": 1, "iter": 5350, "lr": 0.015, "memory": 3375, "data_time": 0.00628, "loss_cls": 0.20904, "loss_bbox": 0.32411, "loss_centerness": 0.59059, "loss": 1.12374, "grad_norm": 2.61154, "time": 0.10126}
109
+ {"mode": "train", "epoch": 1, "iter": 5400, "lr": 0.015, "memory": 3375, "data_time": 0.00622, "loss_cls": 0.19696, "loss_bbox": 0.29982, "loss_centerness": 0.59331, "loss": 1.09009, "grad_norm": 2.32762, "time": 0.1027}
110
+ {"mode": "train", "epoch": 1, "iter": 5450, "lr": 0.015, "memory": 3375, "data_time": 0.00621, "loss_cls": 0.21395, "loss_bbox": 0.31259, "loss_centerness": 0.59356, "loss": 1.1201, "grad_norm": 2.61151, "time": 0.09963}
111
+ {"mode": "train", "epoch": 1, "iter": 5500, "lr": 0.015, "memory": 3375, "data_time": 0.00613, "loss_cls": 0.21961, "loss_bbox": 0.29994, "loss_centerness": 0.5937, "loss": 1.11325, "grad_norm": 2.37175, "time": 0.10271}
112
+ {"mode": "train", "epoch": 1, "iter": 5550, "lr": 0.015, "memory": 3375, "data_time": 0.00621, "loss_cls": 0.21441, "loss_bbox": 0.30605, "loss_centerness": 0.5949, "loss": 1.11536, "grad_norm": 2.44051, "time": 0.10087}
113
+ {"mode": "train", "epoch": 1, "iter": 5600, "lr": 0.015, "memory": 3375, "data_time": 0.00653, "loss_cls": 0.21331, "loss_bbox": 0.30238, "loss_centerness": 0.59178, "loss": 1.10747, "grad_norm": 2.50337, "time": 0.101}
114
+ {"mode": "train", "epoch": 1, "iter": 5650, "lr": 0.015, "memory": 3375, "data_time": 0.00638, "loss_cls": 0.1986, "loss_bbox": 0.30236, "loss_centerness": 0.59128, "loss": 1.09224, "grad_norm": 2.39265, "time": 0.10758}
115
+ {"mode": "train", "epoch": 1, "iter": 5700, "lr": 0.015, "memory": 3375, "data_time": 0.00641, "loss_cls": 0.21615, "loss_bbox": 0.30565, "loss_centerness": 0.59437, "loss": 1.11618, "grad_norm": 2.51489, "time": 0.10799}
116
+ {"mode": "train", "epoch": 1, "iter": 5750, "lr": 0.015, "memory": 3375, "data_time": 0.00634, "loss_cls": 0.20899, "loss_bbox": 0.3121, "loss_centerness": 0.59337, "loss": 1.11446, "grad_norm": 2.37811, "time": 0.10237}
117
+ {"mode": "train", "epoch": 1, "iter": 5800, "lr": 0.015, "memory": 3375, "data_time": 0.00626, "loss_cls": 0.2121, "loss_bbox": 0.30988, "loss_centerness": 0.59221, "loss": 1.1142, "grad_norm": 2.65762, "time": 0.10201}
118
+ {"mode": "train", "epoch": 1, "iter": 5850, "lr": 0.015, "memory": 3375, "data_time": 0.00637, "loss_cls": 0.22302, "loss_bbox": 0.30368, "loss_centerness": 0.59277, "loss": 1.11948, "grad_norm": 2.56575, "time": 0.11122}
119
+ {"mode": "train", "epoch": 1, "iter": 5900, "lr": 0.015, "memory": 3375, "data_time": 0.00632, "loss_cls": 0.21231, "loss_bbox": 0.31288, "loss_centerness": 0.5938, "loss": 1.11899, "grad_norm": 2.67807, "time": 0.10453}
120
+ {"mode": "train", "epoch": 1, "iter": 5950, "lr": 0.015, "memory": 3375, "data_time": 0.00629, "loss_cls": 0.20863, "loss_bbox": 0.30254, "loss_centerness": 0.5915, "loss": 1.10267, "grad_norm": 2.42933, "time": 0.10083}
121
+ {"mode": "train", "epoch": 1, "iter": 6000, "lr": 0.015, "memory": 3375, "data_time": 0.00618, "loss_cls": 0.20635, "loss_bbox": 0.30186, "loss_centerness": 0.59259, "loss": 1.1008, "grad_norm": 2.41039, "time": 0.1015}
122
+ {"mode": "train", "epoch": 1, "iter": 6050, "lr": 0.015, "memory": 3375, "data_time": 0.00628, "loss_cls": 0.20965, "loss_bbox": 0.3115, "loss_centerness": 0.59334, "loss": 1.11449, "grad_norm": 2.45113, "time": 0.10083}
123
+ {"mode": "train", "epoch": 1, "iter": 6100, "lr": 0.015, "memory": 3375, "data_time": 0.00621, "loss_cls": 0.20693, "loss_bbox": 0.28935, "loss_centerness": 0.58975, "loss": 1.08603, "grad_norm": 2.37388, "time": 0.1005}
124
+ {"mode": "train", "epoch": 1, "iter": 6150, "lr": 0.015, "memory": 3375, "data_time": 0.00612, "loss_cls": 0.20255, "loss_bbox": 0.30394, "loss_centerness": 0.59342, "loss": 1.09991, "grad_norm": 2.42524, "time": 0.1017}
125
+ {"mode": "train", "epoch": 1, "iter": 6200, "lr": 0.015, "memory": 3375, "data_time": 0.00644, "loss_cls": 0.19824, "loss_bbox": 0.29895, "loss_centerness": 0.59232, "loss": 1.08951, "grad_norm": 2.41559, "time": 0.10242}
126
+ {"mode": "train", "epoch": 1, "iter": 6250, "lr": 0.015, "memory": 3375, "data_time": 0.00626, "loss_cls": 0.19775, "loss_bbox": 0.3022, "loss_centerness": 0.58973, "loss": 1.08968, "grad_norm": 2.52301, "time": 0.1008}
127
+ {"mode": "train", "epoch": 1, "iter": 6300, "lr": 0.015, "memory": 3375, "data_time": 0.00624, "loss_cls": 0.2001, "loss_bbox": 0.29448, "loss_centerness": 0.58922, "loss": 1.0838, "grad_norm": 2.58361, "time": 0.10622}
128
+ {"mode": "train", "epoch": 1, "iter": 6350, "lr": 0.015, "memory": 3375, "data_time": 0.00635, "loss_cls": 0.19966, "loss_bbox": 0.2807, "loss_centerness": 0.58999, "loss": 1.07034, "grad_norm": 2.31589, "time": 0.10294}
129
+ {"mode": "train", "epoch": 1, "iter": 6400, "lr": 0.015, "memory": 3375, "data_time": 0.00646, "loss_cls": 0.19457, "loss_bbox": 0.28127, "loss_centerness": 0.58938, "loss": 1.06521, "grad_norm": 2.49348, "time": 0.11093}
130
+ {"mode": "train", "epoch": 1, "iter": 6450, "lr": 0.015, "memory": 3375, "data_time": 0.00625, "loss_cls": 0.19928, "loss_bbox": 0.29511, "loss_centerness": 0.59116, "loss": 1.08555, "grad_norm": 2.39498, "time": 0.10821}
131
+ {"mode": "train", "epoch": 1, "iter": 6500, "lr": 0.015, "memory": 3375, "data_time": 0.00605, "loss_cls": 0.2053, "loss_bbox": 0.31715, "loss_centerness": 0.59323, "loss": 1.11568, "grad_norm": 2.6241, "time": 0.10467}
132
+ {"mode": "train", "epoch": 1, "iter": 6550, "lr": 0.015, "memory": 3375, "data_time": 0.00621, "loss_cls": 0.19461, "loss_bbox": 0.29334, "loss_centerness": 0.5913, "loss": 1.07924, "grad_norm": 2.34148, "time": 0.10323}
133
+ {"mode": "train", "epoch": 1, "iter": 6600, "lr": 0.015, "memory": 3375, "data_time": 0.00604, "loss_cls": 0.19686, "loss_bbox": 0.29225, "loss_centerness": 0.59004, "loss": 1.07916, "grad_norm": 2.45944, "time": 0.10042}
134
+ {"mode": "train", "epoch": 1, "iter": 6650, "lr": 0.015, "memory": 3375, "data_time": 0.00637, "loss_cls": 0.19516, "loss_bbox": 0.28764, "loss_centerness": 0.58953, "loss": 1.07233, "grad_norm": 2.45918, "time": 0.10149}
135
+ {"mode": "train", "epoch": 1, "iter": 6700, "lr": 0.015, "memory": 3375, "data_time": 0.00623, "loss_cls": 0.20365, "loss_bbox": 0.2893, "loss_centerness": 0.5903, "loss": 1.08326, "grad_norm": 2.50012, "time": 0.10117}
136
+ {"mode": "train", "epoch": 1, "iter": 6750, "lr": 0.015, "memory": 3375, "data_time": 0.00625, "loss_cls": 0.20603, "loss_bbox": 0.30809, "loss_centerness": 0.59378, "loss": 1.1079, "grad_norm": 2.4866, "time": 0.1011}
137
+ {"mode": "train", "epoch": 1, "iter": 6800, "lr": 0.015, "memory": 3375, "data_time": 0.00622, "loss_cls": 0.19311, "loss_bbox": 0.29149, "loss_centerness": 0.58915, "loss": 1.07375, "grad_norm": 2.47546, "time": 0.09993}
138
+ {"mode": "train", "epoch": 1, "iter": 6850, "lr": 0.015, "memory": 3375, "data_time": 0.00637, "loss_cls": 0.19994, "loss_bbox": 0.30404, "loss_centerness": 0.59051, "loss": 1.09448, "grad_norm": 2.52933, "time": 0.10044}
139
+ {"mode": "train", "epoch": 1, "iter": 6900, "lr": 0.015, "memory": 3375, "data_time": 0.00648, "loss_cls": 0.19046, "loss_bbox": 0.30168, "loss_centerness": 0.59178, "loss": 1.08391, "grad_norm": 2.42246, "time": 0.10186}
140
+ {"mode": "train", "epoch": 1, "iter": 6950, "lr": 0.015, "memory": 3375, "data_time": 0.00644, "loss_cls": 0.19325, "loss_bbox": 0.29945, "loss_centerness": 0.59095, "loss": 1.08365, "grad_norm": 2.4788, "time": 0.1062}
141
+ {"mode": "train", "epoch": 1, "iter": 7000, "lr": 0.015, "memory": 3375, "data_time": 0.00649, "loss_cls": 0.20445, "loss_bbox": 0.29784, "loss_centerness": 0.59176, "loss": 1.09406, "grad_norm": 2.51684, "time": 0.10123}
142
+ {"mode": "train", "epoch": 1, "iter": 7050, "lr": 0.015, "memory": 3375, "data_time": 0.00659, "loss_cls": 0.19883, "loss_bbox": 0.28902, "loss_centerness": 0.58954, "loss": 1.07738, "grad_norm": 2.40415, "time": 0.1004}
143
+ {"mode": "train", "epoch": 1, "iter": 7100, "lr": 0.015, "memory": 3375, "data_time": 0.00695, "loss_cls": 0.19991, "loss_bbox": 0.28487, "loss_centerness": 0.59112, "loss": 1.0759, "grad_norm": 2.45093, "time": 0.10247}
144
+ {"mode": "train", "epoch": 1, "iter": 7150, "lr": 0.015, "memory": 3375, "data_time": 0.00697, "loss_cls": 0.20151, "loss_bbox": 0.2874, "loss_centerness": 0.59044, "loss": 1.07934, "grad_norm": 2.38288, "time": 0.1004}
145
+ {"mode": "train", "epoch": 1, "iter": 7200, "lr": 0.015, "memory": 3375, "data_time": 0.00732, "loss_cls": 0.20318, "loss_bbox": 0.30848, "loss_centerness": 0.59152, "loss": 1.10318, "grad_norm": 2.72387, "time": 0.10161}
146
+ {"mode": "train", "epoch": 1, "iter": 7250, "lr": 0.015, "memory": 3375, "data_time": 0.00706, "loss_cls": 0.19628, "loss_bbox": 0.29361, "loss_centerness": 0.59032, "loss": 1.08022, "grad_norm": 2.45891, "time": 0.09833}
147
+ {"mode": "train", "epoch": 1, "iter": 7300, "lr": 0.015, "memory": 3375, "data_time": 0.00675, "loss_cls": 0.17899, "loss_bbox": 0.28999, "loss_centerness": 0.58813, "loss": 1.05711, "grad_norm": 2.43698, "time": 0.10685}
148
+ {"mode": "train", "epoch": 1, "iter": 7350, "lr": 0.015, "memory": 3375, "data_time": 0.00638, "loss_cls": 0.18704, "loss_bbox": 0.28119, "loss_centerness": 0.58816, "loss": 1.05639, "grad_norm": 2.47545, "time": 0.10931}
149
+ {"mode": "train", "epoch": 1, "iter": 7400, "lr": 0.015, "memory": 3375, "data_time": 0.00693, "loss_cls": 0.18828, "loss_bbox": 0.28777, "loss_centerness": 0.58763, "loss": 1.06367, "grad_norm": 2.41534, "time": 0.10032}
150
+ {"mode": "train", "epoch": 1, "iter": 7450, "lr": 0.015, "memory": 3375, "data_time": 0.00654, "loss_cls": 0.19165, "loss_bbox": 0.28112, "loss_centerness": 0.59074, "loss": 1.0635, "grad_norm": 2.35038, "time": 0.10206}
151
+ {"mode": "train", "epoch": 1, "iter": 7500, "lr": 0.015, "memory": 3375, "data_time": 0.00657, "loss_cls": 0.19602, "loss_bbox": 0.28633, "loss_centerness": 0.59084, "loss": 1.0732, "grad_norm": 2.43617, "time": 0.10084}
152
+ {"mode": "train", "epoch": 1, "iter": 7550, "lr": 0.015, "memory": 3375, "data_time": 0.00682, "loss_cls": 0.18683, "loss_bbox": 0.28579, "loss_centerness": 0.58722, "loss": 1.05984, "grad_norm": 2.59111, "time": 0.1075}
153
+ {"mode": "train", "epoch": 1, "iter": 7600, "lr": 0.015, "memory": 3375, "data_time": 0.00645, "loss_cls": 0.1832, "loss_bbox": 0.28496, "loss_centerness": 0.58912, "loss": 1.05729, "grad_norm": 2.47794, "time": 0.10194}
154
+ {"mode": "train", "epoch": 1, "iter": 7650, "lr": 0.015, "memory": 3375, "data_time": 0.00696, "loss_cls": 0.18944, "loss_bbox": 0.29133, "loss_centerness": 0.58993, "loss": 1.0707, "grad_norm": 2.5428, "time": 0.10458}
155
+ {"mode": "train", "epoch": 1, "iter": 7700, "lr": 0.015, "memory": 3375, "data_time": 0.00621, "loss_cls": 0.19691, "loss_bbox": 0.28403, "loss_centerness": 0.58813, "loss": 1.06907, "grad_norm": 2.6974, "time": 0.09997}
156
+ {"mode": "train", "epoch": 1, "iter": 7750, "lr": 0.015, "memory": 3375, "data_time": 0.00631, "loss_cls": 0.19306, "loss_bbox": 0.30479, "loss_centerness": 0.59132, "loss": 1.08916, "grad_norm": 2.60589, "time": 0.10007}
157
+ {"mode": "train", "epoch": 1, "iter": 7800, "lr": 0.015, "memory": 3375, "data_time": 0.00616, "loss_cls": 0.19549, "loss_bbox": 0.29408, "loss_centerness": 0.58934, "loss": 1.07892, "grad_norm": 2.67007, "time": 0.1075}
158
+ {"mode": "train", "epoch": 1, "iter": 7850, "lr": 0.015, "memory": 3375, "data_time": 0.00622, "loss_cls": 0.18654, "loss_bbox": 0.27202, "loss_centerness": 0.58766, "loss": 1.04622, "grad_norm": 2.36205, "time": 0.10495}
159
+ {"mode": "train", "epoch": 1, "iter": 7900, "lr": 0.015, "memory": 3375, "data_time": 0.00637, "loss_cls": 0.19684, "loss_bbox": 0.29504, "loss_centerness": 0.58997, "loss": 1.08185, "grad_norm": 2.60201, "time": 0.09843}
160
+ {"mode": "train", "epoch": 1, "iter": 7950, "lr": 0.015, "memory": 3375, "data_time": 0.00647, "loss_cls": 0.19227, "loss_bbox": 0.2947, "loss_centerness": 0.58955, "loss": 1.07652, "grad_norm": 2.62747, "time": 0.10135}
161
+ {"mode": "train", "epoch": 1, "iter": 8000, "lr": 0.015, "memory": 3375, "data_time": 0.00637, "loss_cls": 0.19619, "loss_bbox": 0.28967, "loss_centerness": 0.59128, "loss": 1.07714, "grad_norm": 2.49571, "time": 0.10128}
162
+ {"mode": "train", "epoch": 1, "iter": 8050, "lr": 0.015, "memory": 3375, "data_time": 0.00641, "loss_cls": 0.1973, "loss_bbox": 0.28911, "loss_centerness": 0.59087, "loss": 1.07729, "grad_norm": 2.57755, "time": 0.10068}
163
+ {"mode": "train", "epoch": 1, "iter": 8100, "lr": 0.015, "memory": 3375, "data_time": 0.00626, "loss_cls": 0.19291, "loss_bbox": 0.28406, "loss_centerness": 0.59035, "loss": 1.06731, "grad_norm": 2.51216, "time": 0.10444}
164
+ {"mode": "train", "epoch": 1, "iter": 8150, "lr": 0.015, "memory": 3375, "data_time": 0.00647, "loss_cls": 0.19604, "loss_bbox": 0.28428, "loss_centerness": 0.58694, "loss": 1.06725, "grad_norm": 2.60108, "time": 0.10298}
165
+ {"mode": "train", "epoch": 1, "iter": 8200, "lr": 0.015, "memory": 3375, "data_time": 0.0063, "loss_cls": 0.19074, "loss_bbox": 0.28775, "loss_centerness": 0.59065, "loss": 1.06914, "grad_norm": 2.40589, "time": 0.10077}
166
+ {"mode": "train", "epoch": 1, "iter": 8250, "lr": 0.015, "memory": 3375, "data_time": 0.00627, "loss_cls": 0.19729, "loss_bbox": 0.2953, "loss_centerness": 0.59073, "loss": 1.08333, "grad_norm": 2.36902, "time": 0.10143}
167
+ {"mode": "train", "epoch": 1, "iter": 8300, "lr": 0.015, "memory": 3375, "data_time": 0.00607, "loss_cls": 0.1849, "loss_bbox": 0.29715, "loss_centerness": 0.58954, "loss": 1.07159, "grad_norm": 2.51339, "time": 0.09932}
168
+ {"mode": "train", "epoch": 1, "iter": 8350, "lr": 0.015, "memory": 3375, "data_time": 0.0063, "loss_cls": 0.1811, "loss_bbox": 0.26724, "loss_centerness": 0.58554, "loss": 1.03388, "grad_norm": 2.42606, "time": 0.10002}
169
+ {"mode": "train", "epoch": 1, "iter": 8400, "lr": 0.015, "memory": 3375, "data_time": 0.00627, "loss_cls": 0.1758, "loss_bbox": 0.28332, "loss_centerness": 0.58819, "loss": 1.04732, "grad_norm": 2.60223, "time": 0.10039}
170
+ {"mode": "train", "epoch": 1, "iter": 8450, "lr": 0.015, "memory": 3375, "data_time": 0.00612, "loss_cls": 0.1811, "loss_bbox": 0.27624, "loss_centerness": 0.5893, "loss": 1.04665, "grad_norm": 2.30193, "time": 0.10162}
171
+ {"mode": "train", "epoch": 1, "iter": 8500, "lr": 0.015, "memory": 3375, "data_time": 0.0063, "loss_cls": 0.18271, "loss_bbox": 0.30111, "loss_centerness": 0.58932, "loss": 1.07314, "grad_norm": 2.54934, "time": 0.1001}
172
+ {"mode": "train", "epoch": 1, "iter": 8550, "lr": 0.015, "memory": 3375, "data_time": 0.00622, "loss_cls": 0.17741, "loss_bbox": 0.27182, "loss_centerness": 0.58657, "loss": 1.0358, "grad_norm": 2.50591, "time": 0.1023}
173
+ {"mode": "train", "epoch": 1, "iter": 8600, "lr": 0.015, "memory": 3375, "data_time": 0.00605, "loss_cls": 0.18075, "loss_bbox": 0.28219, "loss_centerness": 0.58735, "loss": 1.05028, "grad_norm": 2.50633, "time": 0.10505}
174
+ {"mode": "train", "epoch": 1, "iter": 8650, "lr": 0.015, "memory": 3375, "data_time": 0.00616, "loss_cls": 0.18326, "loss_bbox": 0.28263, "loss_centerness": 0.58868, "loss": 1.05457, "grad_norm": 2.5176, "time": 0.10706}
175
+ {"mode": "train", "epoch": 1, "iter": 8700, "lr": 0.015, "memory": 3375, "data_time": 0.00623, "loss_cls": 0.18959, "loss_bbox": 0.27692, "loss_centerness": 0.58858, "loss": 1.05509, "grad_norm": 2.46866, "time": 0.10401}
176
+ {"mode": "train", "epoch": 1, "iter": 8750, "lr": 0.015, "memory": 3375, "data_time": 0.00624, "loss_cls": 0.18009, "loss_bbox": 0.27933, "loss_centerness": 0.58975, "loss": 1.04916, "grad_norm": 2.34117, "time": 0.10067}
177
+ {"mode": "train", "epoch": 1, "iter": 8800, "lr": 0.015, "memory": 3375, "data_time": 0.00635, "loss_cls": 0.175, "loss_bbox": 0.26839, "loss_centerness": 0.58756, "loss": 1.03096, "grad_norm": 2.43363, "time": 0.10091}
178
+ {"mode": "train", "epoch": 1, "iter": 8850, "lr": 0.015, "memory": 3375, "data_time": 0.00627, "loss_cls": 0.18094, "loss_bbox": 0.28168, "loss_centerness": 0.58801, "loss": 1.05063, "grad_norm": 2.63755, "time": 0.10043}
179
+ {"mode": "train", "epoch": 1, "iter": 8900, "lr": 0.015, "memory": 3375, "data_time": 0.00621, "loss_cls": 0.18354, "loss_bbox": 0.28015, "loss_centerness": 0.58874, "loss": 1.05243, "grad_norm": 2.51529, "time": 0.10083}
180
+ {"mode": "train", "epoch": 1, "iter": 8950, "lr": 0.015, "memory": 3375, "data_time": 0.00604, "loss_cls": 0.18504, "loss_bbox": 0.27912, "loss_centerness": 0.58981, "loss": 1.05398, "grad_norm": 2.49768, "time": 0.10448}
181
+ {"mode": "train", "epoch": 1, "iter": 9000, "lr": 0.015, "memory": 3375, "data_time": 0.00618, "loss_cls": 0.18728, "loss_bbox": 0.28509, "loss_centerness": 0.59104, "loss": 1.06341, "grad_norm": 2.33872, "time": 0.10424}
182
+ {"mode": "train", "epoch": 1, "iter": 9050, "lr": 0.0015, "memory": 3375, "data_time": 0.00643, "loss_cls": 0.16358, "loss_bbox": 0.24606, "loss_centerness": 0.58728, "loss": 0.99691, "grad_norm": 1.98953, "time": 0.10584}
183
+ {"mode": "train", "epoch": 1, "iter": 9100, "lr": 0.0015, "memory": 3375, "data_time": 0.00629, "loss_cls": 0.15721, "loss_bbox": 0.23867, "loss_centerness": 0.58531, "loss": 0.98119, "grad_norm": 1.83553, "time": 0.10281}
184
+ {"mode": "train", "epoch": 1, "iter": 9150, "lr": 0.0015, "memory": 3375, "data_time": 0.00632, "loss_cls": 0.16164, "loss_bbox": 0.23974, "loss_centerness": 0.58596, "loss": 0.98733, "grad_norm": 1.8689, "time": 0.10139}
185
+ {"mode": "train", "epoch": 1, "iter": 9200, "lr": 0.0015, "memory": 3375, "data_time": 0.00626, "loss_cls": 0.15399, "loss_bbox": 0.23674, "loss_centerness": 0.58744, "loss": 0.97818, "grad_norm": 1.84704, "time": 0.10649}
186
+ {"mode": "train", "epoch": 1, "iter": 9250, "lr": 0.0015, "memory": 3375, "data_time": 0.00632, "loss_cls": 0.15113, "loss_bbox": 0.24054, "loss_centerness": 0.58539, "loss": 0.97706, "grad_norm": 1.82562, "time": 0.10748}
187
+ {"mode": "train", "epoch": 1, "iter": 9300, "lr": 0.0015, "memory": 3375, "data_time": 0.00629, "loss_cls": 0.1552, "loss_bbox": 0.22599, "loss_centerness": 0.58517, "loss": 0.96636, "grad_norm": 1.79527, "time": 0.10579}
188
+ {"mode": "train", "epoch": 1, "iter": 9350, "lr": 0.0015, "memory": 3375, "data_time": 0.0064, "loss_cls": 0.14679, "loss_bbox": 0.22591, "loss_centerness": 0.58278, "loss": 0.95548, "grad_norm": 1.85706, "time": 0.10038}
189
+ {"mode": "train", "epoch": 1, "iter": 9400, "lr": 0.0015, "memory": 3375, "data_time": 0.00652, "loss_cls": 0.14132, "loss_bbox": 0.22163, "loss_centerness": 0.58247, "loss": 0.94542, "grad_norm": 1.7513, "time": 0.10249}
190
+ {"mode": "train", "epoch": 1, "iter": 9450, "lr": 0.0015, "memory": 3375, "data_time": 0.00619, "loss_cls": 0.1407, "loss_bbox": 0.22011, "loss_centerness": 0.58083, "loss": 0.94163, "grad_norm": 1.77815, "time": 0.10036}
191
+ {"mode": "train", "epoch": 1, "iter": 9500, "lr": 0.0015, "memory": 3375, "data_time": 0.00619, "loss_cls": 0.14069, "loss_bbox": 0.21975, "loss_centerness": 0.58114, "loss": 0.94158, "grad_norm": 1.72292, "time": 0.10485}
192
+ {"mode": "train", "epoch": 1, "iter": 9550, "lr": 0.0015, "memory": 3375, "data_time": 0.00644, "loss_cls": 0.14023, "loss_bbox": 0.21662, "loss_centerness": 0.58228, "loss": 0.93914, "grad_norm": 1.79777, "time": 0.10588}
193
+ {"mode": "train", "epoch": 1, "iter": 9600, "lr": 0.0015, "memory": 3375, "data_time": 0.00626, "loss_cls": 0.13979, "loss_bbox": 0.22333, "loss_centerness": 0.5835, "loss": 0.94662, "grad_norm": 1.73949, "time": 0.10332}
194
+ {"mode": "train", "epoch": 1, "iter": 9650, "lr": 0.0015, "memory": 3375, "data_time": 0.0063, "loss_cls": 0.13098, "loss_bbox": 0.21748, "loss_centerness": 0.58405, "loss": 0.93251, "grad_norm": 1.74186, "time": 0.10056}
195
+ {"mode": "train", "epoch": 1, "iter": 9700, "lr": 0.0015, "memory": 3375, "data_time": 0.00636, "loss_cls": 0.13692, "loss_bbox": 0.21515, "loss_centerness": 0.58276, "loss": 0.93483, "grad_norm": 1.7701, "time": 0.105}
196
+ {"mode": "train", "epoch": 1, "iter": 9750, "lr": 0.0015, "memory": 3375, "data_time": 0.00628, "loss_cls": 0.13946, "loss_bbox": 0.21226, "loss_centerness": 0.58254, "loss": 0.93427, "grad_norm": 1.88229, "time": 0.10588}
197
+ {"mode": "train", "epoch": 1, "iter": 9800, "lr": 0.0015, "memory": 3375, "data_time": 0.00624, "loss_cls": 0.14004, "loss_bbox": 0.21601, "loss_centerness": 0.58294, "loss": 0.939, "grad_norm": 1.75304, "time": 0.10209}
198
+ {"mode": "train", "epoch": 1, "iter": 9850, "lr": 0.0015, "memory": 3375, "data_time": 0.00625, "loss_cls": 0.14022, "loss_bbox": 0.21624, "loss_centerness": 0.58291, "loss": 0.93936, "grad_norm": 1.83872, "time": 0.10009}
199
+ {"mode": "train", "epoch": 1, "iter": 9900, "lr": 0.0015, "memory": 3375, "data_time": 0.00632, "loss_cls": 0.13774, "loss_bbox": 0.22411, "loss_centerness": 0.58401, "loss": 0.94586, "grad_norm": 1.79141, "time": 0.10065}
200
+ {"mode": "train", "epoch": 1, "iter": 9950, "lr": 0.0015, "memory": 3375, "data_time": 0.00628, "loss_cls": 0.13736, "loss_bbox": 0.22349, "loss_centerness": 0.58381, "loss": 0.94467, "grad_norm": 1.76524, "time": 0.10369}
201
+ {"mode": "train", "epoch": 1, "iter": 10000, "lr": 0.0015, "memory": 3375, "data_time": 0.00614, "loss_cls": 0.13868, "loss_bbox": 0.21526, "loss_centerness": 0.5808, "loss": 0.93474, "grad_norm": 1.80581, "time": 0.11126}
202
+ {"mode": "train", "epoch": 1, "iter": 10050, "lr": 0.0015, "memory": 3375, "data_time": 0.0064, "loss_cls": 0.13944, "loss_bbox": 0.21702, "loss_centerness": 0.5833, "loss": 0.93976, "grad_norm": 1.879, "time": 0.10721}
203
+ {"mode": "train", "epoch": 1, "iter": 10100, "lr": 0.0015, "memory": 3375, "data_time": 0.00624, "loss_cls": 0.13971, "loss_bbox": 0.22227, "loss_centerness": 0.58484, "loss": 0.94681, "grad_norm": 1.84002, "time": 0.10742}
204
+ {"mode": "train", "epoch": 1, "iter": 10150, "lr": 0.0015, "memory": 3375, "data_time": 0.00626, "loss_cls": 0.13419, "loss_bbox": 0.21528, "loss_centerness": 0.58264, "loss": 0.9321, "grad_norm": 1.78602, "time": 0.10808}
205
+ {"mode": "train", "epoch": 1, "iter": 10200, "lr": 0.0015, "memory": 3375, "data_time": 0.00636, "loss_cls": 0.1382, "loss_bbox": 0.21649, "loss_centerness": 0.58285, "loss": 0.93753, "grad_norm": 1.83511, "time": 0.10707}
206
+ {"mode": "train", "epoch": 1, "iter": 10250, "lr": 0.0015, "memory": 3375, "data_time": 0.00659, "loss_cls": 0.13603, "loss_bbox": 0.21641, "loss_centerness": 0.58221, "loss": 0.93465, "grad_norm": 1.80781, "time": 0.10173}
207
+ {"mode": "train", "epoch": 1, "iter": 10300, "lr": 0.0015, "memory": 3375, "data_time": 0.00623, "loss_cls": 0.13807, "loss_bbox": 0.21937, "loss_centerness": 0.58326, "loss": 0.9407, "grad_norm": 1.81499, "time": 0.10216}
208
+ {"mode": "train", "epoch": 1, "iter": 10350, "lr": 0.0015, "memory": 3375, "data_time": 0.00631, "loss_cls": 0.14088, "loss_bbox": 0.22011, "loss_centerness": 0.5846, "loss": 0.9456, "grad_norm": 1.86602, "time": 0.10176}
209
+ {"mode": "train", "epoch": 1, "iter": 10400, "lr": 0.0015, "memory": 3375, "data_time": 0.00636, "loss_cls": 0.13596, "loss_bbox": 0.21764, "loss_centerness": 0.58213, "loss": 0.93573, "grad_norm": 1.81047, "time": 0.10042}
210
+ {"mode": "train", "epoch": 1, "iter": 10450, "lr": 0.0015, "memory": 3375, "data_time": 0.0062, "loss_cls": 0.13215, "loss_bbox": 0.20892, "loss_centerness": 0.58279, "loss": 0.92385, "grad_norm": 1.77483, "time": 0.0999}
211
+ {"mode": "train", "epoch": 1, "iter": 10500, "lr": 0.0015, "memory": 3375, "data_time": 0.00652, "loss_cls": 0.13633, "loss_bbox": 0.22661, "loss_centerness": 0.58533, "loss": 0.94826, "grad_norm": 1.7853, "time": 0.10076}
212
+ {"mode": "train", "epoch": 1, "iter": 10550, "lr": 0.0015, "memory": 3375, "data_time": 0.00654, "loss_cls": 0.13431, "loss_bbox": 0.21272, "loss_centerness": 0.58198, "loss": 0.92901, "grad_norm": 1.8488, "time": 0.10065}
213
+ {"mode": "train", "epoch": 1, "iter": 10600, "lr": 0.0015, "memory": 3375, "data_time": 0.00618, "loss_cls": 0.13136, "loss_bbox": 0.21154, "loss_centerness": 0.58173, "loss": 0.92463, "grad_norm": 1.87392, "time": 0.10172}
214
+ {"mode": "train", "epoch": 1, "iter": 10650, "lr": 0.0015, "memory": 3375, "data_time": 0.00611, "loss_cls": 0.13259, "loss_bbox": 0.21336, "loss_centerness": 0.58311, "loss": 0.92906, "grad_norm": 1.84798, "time": 0.10107}
215
+ {"mode": "train", "epoch": 1, "iter": 10700, "lr": 0.0015, "memory": 3375, "data_time": 0.0063, "loss_cls": 0.13479, "loss_bbox": 0.21621, "loss_centerness": 0.5823, "loss": 0.9333, "grad_norm": 1.81503, "time": 0.10189}
216
+ {"mode": "train", "epoch": 1, "iter": 10750, "lr": 0.0015, "memory": 3375, "data_time": 0.00616, "loss_cls": 0.1357, "loss_bbox": 0.20975, "loss_centerness": 0.5817, "loss": 0.92716, "grad_norm": 1.83638, "time": 0.10515}
217
+ {"mode": "train", "epoch": 1, "iter": 10800, "lr": 0.0015, "memory": 3375, "data_time": 0.00619, "loss_cls": 0.13575, "loss_bbox": 0.21737, "loss_centerness": 0.58161, "loss": 0.93473, "grad_norm": 1.81943, "time": 0.10173}
218
+ {"mode": "train", "epoch": 1, "iter": 10850, "lr": 0.0015, "memory": 3375, "data_time": 0.00656, "loss_cls": 0.13402, "loss_bbox": 0.20626, "loss_centerness": 0.58094, "loss": 0.92121, "grad_norm": 1.85421, "time": 0.10636}
219
+ {"mode": "train", "epoch": 1, "iter": 10900, "lr": 0.0015, "memory": 3375, "data_time": 0.00618, "loss_cls": 0.13062, "loss_bbox": 0.2119, "loss_centerness": 0.5821, "loss": 0.92461, "grad_norm": 1.8321, "time": 0.10503}
220
+ {"mode": "train", "epoch": 1, "iter": 10950, "lr": 0.0015, "memory": 3375, "data_time": 0.00613, "loss_cls": 0.13188, "loss_bbox": 0.21462, "loss_centerness": 0.58224, "loss": 0.92875, "grad_norm": 1.82268, "time": 0.10555}
221
+ {"mode": "train", "epoch": 1, "iter": 11000, "lr": 0.0015, "memory": 3375, "data_time": 0.00601, "loss_cls": 0.13717, "loss_bbox": 0.2184, "loss_centerness": 0.58357, "loss": 0.93914, "grad_norm": 1.82401, "time": 0.09926}
222
+ {"mode": "train", "epoch": 1, "iter": 11050, "lr": 0.00015, "memory": 3375, "data_time": 0.00632, "loss_cls": 0.13248, "loss_bbox": 0.20644, "loss_centerness": 0.58149, "loss": 0.9204, "grad_norm": 1.87043, "time": 0.10316}
223
+ {"mode": "train", "epoch": 1, "iter": 11100, "lr": 0.00015, "memory": 3375, "data_time": 0.00628, "loss_cls": 0.13282, "loss_bbox": 0.21117, "loss_centerness": 0.58166, "loss": 0.92564, "grad_norm": 1.78833, "time": 0.1091}
224
+ {"mode": "train", "epoch": 1, "iter": 11150, "lr": 0.00015, "memory": 3375, "data_time": 0.00612, "loss_cls": 0.12788, "loss_bbox": 0.2029, "loss_centerness": 0.58191, "loss": 0.91269, "grad_norm": 1.74556, "time": 0.10792}
225
+ {"mode": "train", "epoch": 1, "iter": 11200, "lr": 0.00015, "memory": 3375, "data_time": 0.0062, "loss_cls": 0.13274, "loss_bbox": 0.20783, "loss_centerness": 0.58043, "loss": 0.92101, "grad_norm": 1.78492, "time": 0.10187}
226
+ {"mode": "train", "epoch": 1, "iter": 11250, "lr": 0.00015, "memory": 3375, "data_time": 0.00624, "loss_cls": 0.12857, "loss_bbox": 0.2092, "loss_centerness": 0.5804, "loss": 0.91818, "grad_norm": 1.72029, "time": 0.10435}
227
+ {"mode": "train", "epoch": 1, "iter": 11300, "lr": 0.00015, "memory": 3375, "data_time": 0.00625, "loss_cls": 0.12827, "loss_bbox": 0.20364, "loss_centerness": 0.58134, "loss": 0.91326, "grad_norm": 1.76875, "time": 0.10085}
228
+ {"mode": "train", "epoch": 1, "iter": 11350, "lr": 0.00015, "memory": 3375, "data_time": 0.00629, "loss_cls": 0.13537, "loss_bbox": 0.20962, "loss_centerness": 0.58162, "loss": 0.92661, "grad_norm": 1.79054, "time": 0.10196}
229
+ {"mode": "train", "epoch": 1, "iter": 11400, "lr": 0.00015, "memory": 3375, "data_time": 0.00628, "loss_cls": 0.12844, "loss_bbox": 0.20489, "loss_centerness": 0.58127, "loss": 0.9146, "grad_norm": 1.69398, "time": 0.10006}
230
+ {"mode": "train", "epoch": 1, "iter": 11450, "lr": 0.00015, "memory": 3375, "data_time": 0.00631, "loss_cls": 0.13101, "loss_bbox": 0.21286, "loss_centerness": 0.58373, "loss": 0.9276, "grad_norm": 1.70636, "time": 0.10284}
231
+ {"mode": "train", "epoch": 1, "iter": 11500, "lr": 0.00015, "memory": 3375, "data_time": 0.00615, "loss_cls": 0.12725, "loss_bbox": 0.20134, "loss_centerness": 0.58003, "loss": 0.90862, "grad_norm": 1.75666, "time": 0.10323}
232
+ {"mode": "train", "epoch": 1, "iter": 11550, "lr": 0.00015, "memory": 3375, "data_time": 0.00632, "loss_cls": 0.13145, "loss_bbox": 0.21364, "loss_centerness": 0.5837, "loss": 0.92879, "grad_norm": 1.76175, "time": 0.10038}
233
+ {"mode": "train", "epoch": 1, "iter": 11600, "lr": 0.00015, "memory": 3375, "data_time": 0.00628, "loss_cls": 0.12798, "loss_bbox": 0.2022, "loss_centerness": 0.58031, "loss": 0.91049, "grad_norm": 1.73584, "time": 0.09994}
234
+ {"mode": "train", "epoch": 1, "iter": 11650, "lr": 0.00015, "memory": 3375, "data_time": 0.00676, "loss_cls": 0.13575, "loss_bbox": 0.21264, "loss_centerness": 0.58319, "loss": 0.93158, "grad_norm": 1.81048, "time": 0.10531}
235
+ {"mode": "train", "epoch": 1, "iter": 11700, "lr": 0.00015, "memory": 3375, "data_time": 0.00637, "loss_cls": 0.12329, "loss_bbox": 0.20193, "loss_centerness": 0.57987, "loss": 0.90509, "grad_norm": 1.69889, "time": 0.10331}
236
+ {"mode": "train", "epoch": 1, "iter": 11750, "lr": 0.00015, "memory": 3375, "data_time": 0.00629, "loss_cls": 0.12858, "loss_bbox": 0.21826, "loss_centerness": 0.5824, "loss": 0.92924, "grad_norm": 1.65682, "time": 0.10163}
237
+ {"mode": "train", "epoch": 1, "iter": 11800, "lr": 0.00015, "memory": 3375, "data_time": 0.00665, "loss_cls": 0.12892, "loss_bbox": 0.20593, "loss_centerness": 0.58157, "loss": 0.91642, "grad_norm": 1.71129, "time": 0.09941}
238
+ {"mode": "train", "epoch": 1, "iter": 11850, "lr": 0.00015, "memory": 3375, "data_time": 0.00664, "loss_cls": 0.12715, "loss_bbox": 0.20916, "loss_centerness": 0.58141, "loss": 0.91772, "grad_norm": 1.74266, "time": 0.10058}
239
+ {"mode": "train", "epoch": 1, "iter": 11900, "lr": 0.00015, "memory": 3375, "data_time": 0.00675, "loss_cls": 0.12976, "loss_bbox": 0.20303, "loss_centerness": 0.58149, "loss": 0.91427, "grad_norm": 1.75621, "time": 0.10307}
240
+ {"mode": "train", "epoch": 1, "iter": 11950, "lr": 0.00015, "memory": 3375, "data_time": 0.00649, "loss_cls": 0.1301, "loss_bbox": 0.21077, "loss_centerness": 0.58191, "loss": 0.92278, "grad_norm": 1.72479, "time": 0.10525}
241
+ {"mode": "train", "epoch": 1, "iter": 12000, "lr": 0.00015, "memory": 3375, "data_time": 0.00648, "loss_cls": 0.12932, "loss_bbox": 0.20505, "loss_centerness": 0.58048, "loss": 0.91485, "grad_norm": 1.76024, "time": 0.112}
242
+ {"mode": "val", "epoch": 1, "iter": 619, "lr": 0.00015, "mAP": 0.80154, "AP50": 0.802}
finetune/finetune_fcos_12k_voc0712_lr1.5e-2_wd5e-5/best_mAP_iter_12000.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fbc0568ef8db056c9a3cf3e6902d5dca30325aff3c882b3fdacb3d798e175737
3
+ size 256817773
finetune/finetune_fcos_12k_voc0712_lr1.5e-2_wd5e-5/fcos_mstrain_12k_voc0712.py ADDED
@@ -0,0 +1,212 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model = dict(
2
+ type='FCOS',
3
+ backbone=dict(
4
+ type='ResNet',
5
+ depth=50,
6
+ num_stages=4,
7
+ out_indices=(0, 1, 2, 3),
8
+ frozen_stages=1,
9
+ norm_cfg=dict(type='SyncBN', requires_grad=True),
10
+ norm_eval=True,
11
+ style='pytorch',
12
+ init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
13
+ neck=dict(
14
+ type='FPN',
15
+ in_channels=[256, 512, 1024, 2048],
16
+ out_channels=256,
17
+ start_level=1,
18
+ add_extra_convs='on_output',
19
+ num_outs=5,
20
+ relu_before_extra_convs=True,
21
+ norm_cfg=dict(type='SyncBN', requires_grad=True)),
22
+ bbox_head=dict(
23
+ type='FCOSHead',
24
+ num_classes=20,
25
+ in_channels=256,
26
+ stacked_convs=4,
27
+ feat_channels=256,
28
+ strides=[8, 16, 32, 64, 128],
29
+ loss_cls=dict(
30
+ type='FocalLoss',
31
+ use_sigmoid=True,
32
+ gamma=2.0,
33
+ alpha=0.25,
34
+ loss_weight=1.0),
35
+ loss_bbox=dict(type='IoULoss', loss_weight=1.0),
36
+ loss_centerness=dict(
37
+ type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)),
38
+ train_cfg=dict(
39
+ assigner=dict(
40
+ type='MaxIoUAssigner',
41
+ pos_iou_thr=0.5,
42
+ neg_iou_thr=0.4,
43
+ min_pos_iou=0,
44
+ ignore_iof_thr=-1),
45
+ allowed_border=-1,
46
+ pos_weight=-1,
47
+ debug=False),
48
+ test_cfg=dict(
49
+ nms_pre=1000,
50
+ min_bbox_size=0,
51
+ score_thr=0.05,
52
+ nms=dict(type='nms', iou_threshold=0.5),
53
+ max_per_img=100))
54
+ dataset_type = 'VOCDataset'
55
+ data_root = 'data/VOCdevkit/'
56
+ img_norm_cfg = dict(
57
+ mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
58
+ train_pipeline = [
59
+ dict(type='LoadImageFromFile'),
60
+ dict(type='LoadAnnotations', with_bbox=True),
61
+ dict(
62
+ type='Resize',
63
+ img_scale=[(1333, 480), (1333, 512), (1333, 544), (1333, 576),
64
+ (1333, 608), (1333, 640), (1333, 672), (1333, 704),
65
+ (1333, 736), (1333, 768), (1333, 800)],
66
+ multiscale_mode='value',
67
+ keep_ratio=True),
68
+ dict(type='RandomFlip', flip_ratio=0.5),
69
+ dict(
70
+ type='Normalize',
71
+ mean=[123.675, 116.28, 103.53],
72
+ std=[58.395, 57.12, 57.375],
73
+ to_rgb=True),
74
+ dict(type='Pad', size_divisor=32),
75
+ dict(type='DefaultFormatBundle'),
76
+ dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
77
+ ]
78
+ test_pipeline = [
79
+ dict(type='LoadImageFromFile'),
80
+ dict(
81
+ type='MultiScaleFlipAug',
82
+ img_scale=(1333, 800),
83
+ flip=False,
84
+ transforms=[
85
+ dict(type='Resize', keep_ratio=True),
86
+ dict(type='RandomFlip'),
87
+ dict(
88
+ type='Normalize',
89
+ mean=[123.675, 116.28, 103.53],
90
+ std=[58.395, 57.12, 57.375],
91
+ to_rgb=True),
92
+ dict(type='Pad', size_divisor=32),
93
+ dict(type='ImageToTensor', keys=['img']),
94
+ dict(type='Collect', keys=['img'])
95
+ ])
96
+ ]
97
+ data = dict(
98
+ samples_per_gpu=2,
99
+ workers_per_gpu=2,
100
+ train=dict(
101
+ type='VOCDataset',
102
+ ann_file=[
103
+ 'data/VOCdevkit/VOC2007/ImageSets/Main/trainval.txt',
104
+ 'data/VOCdevkit/VOC2012/ImageSets/Main/trainval.txt'
105
+ ],
106
+ img_prefix=['data/VOCdevkit/VOC2007/', 'data/VOCdevkit/VOC2012/'],
107
+ pipeline=[
108
+ dict(type='LoadImageFromFile'),
109
+ dict(type='LoadAnnotations', with_bbox=True),
110
+ dict(
111
+ type='Resize',
112
+ img_scale=[(1333, 480), (1333, 512), (1333, 544), (1333, 576),
113
+ (1333, 608), (1333, 640), (1333, 672), (1333, 704),
114
+ (1333, 736), (1333, 768), (1333, 800)],
115
+ multiscale_mode='value',
116
+ keep_ratio=True),
117
+ dict(type='RandomFlip', flip_ratio=0.5),
118
+ dict(
119
+ type='Normalize',
120
+ mean=[123.675, 116.28, 103.53],
121
+ std=[58.395, 57.12, 57.375],
122
+ to_rgb=True),
123
+ dict(type='Pad', size_divisor=32),
124
+ dict(type='DefaultFormatBundle'),
125
+ dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
126
+ ]),
127
+ val=dict(
128
+ type='VOCDataset',
129
+ ann_file='data/VOCdevkit/VOC2007/ImageSets/Main/test.txt',
130
+ img_prefix='data/VOCdevkit/VOC2007/',
131
+ pipeline=[
132
+ dict(type='LoadImageFromFile'),
133
+ dict(
134
+ type='MultiScaleFlipAug',
135
+ img_scale=(1333, 800),
136
+ flip=False,
137
+ transforms=[
138
+ dict(type='Resize', keep_ratio=True),
139
+ dict(type='RandomFlip'),
140
+ dict(
141
+ type='Normalize',
142
+ mean=[123.675, 116.28, 103.53],
143
+ std=[58.395, 57.12, 57.375],
144
+ to_rgb=True),
145
+ dict(type='Pad', size_divisor=32),
146
+ dict(type='ImageToTensor', keys=['img']),
147
+ dict(type='Collect', keys=['img'])
148
+ ])
149
+ ]),
150
+ test=dict(
151
+ type='VOCDataset',
152
+ ann_file='data/VOCdevkit/VOC2007/ImageSets/Main/test.txt',
153
+ img_prefix='data/VOCdevkit/VOC2007/',
154
+ pipeline=[
155
+ dict(type='LoadImageFromFile'),
156
+ dict(
157
+ type='MultiScaleFlipAug',
158
+ img_scale=(1333, 800),
159
+ flip=False,
160
+ transforms=[
161
+ dict(type='Resize', keep_ratio=True),
162
+ dict(type='RandomFlip'),
163
+ dict(
164
+ type='Normalize',
165
+ mean=[123.675, 116.28, 103.53],
166
+ std=[58.395, 57.12, 57.375],
167
+ to_rgb=True),
168
+ dict(type='Pad', size_divisor=32),
169
+ dict(type='ImageToTensor', keys=['img']),
170
+ dict(type='Collect', keys=['img'])
171
+ ])
172
+ ]))
173
+ evaluation = dict(interval=12000, metric='mAP', save_best='auto')
174
+ optimizer = dict(
175
+ type='SGD',
176
+ lr=0.015,
177
+ momentum=0.9,
178
+ weight_decay=5e-05,
179
+ paramwise_cfg=dict(bias_lr_mult=2.0, bias_decay_mult=0.0))
180
+ optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
181
+ lr_config = dict(
182
+ policy='step',
183
+ warmup='linear',
184
+ warmup_iters=500,
185
+ warmup_ratio=0.001,
186
+ step=[9000, 11000],
187
+ by_epoch=False)
188
+ runner = dict(type='IterBasedRunner', max_iters=12000)
189
+ checkpoint_config = dict(interval=12000)
190
+ log_config = dict(interval=50, hooks=[dict(type='TextLoggerHook')])
191
+ custom_hooks = [
192
+ dict(type='NumClassCheckHook'),
193
+ dict(
194
+ type='MMDetWandbHook',
195
+ init_kwargs=dict(project='I2B', group='finetune'),
196
+ interval=50,
197
+ num_eval_images=0,
198
+ log_checkpoint=False)
199
+ ]
200
+ dist_params = dict(backend='nccl')
201
+ log_level = 'INFO'
202
+ load_from = 'pretrain/selfsup_fcos_mstrain-soft-teacher_sampler-2048_temp0.5/final_model.pth'
203
+ resume_from = None
204
+ workflow = [('train', 1)]
205
+ opencv_num_threads = 0
206
+ mp_start_method = 'fork'
207
+ auto_scale_lr = dict(enable=False, base_batch_size=16)
208
+ custom_imports = None
209
+ norm_cfg = dict(type='SyncBN', requires_grad=True)
210
+ work_dir = 'work_dirs/finetune_fcos_12k_voc0712_lr1.5e-2_wd5e-5'
211
+ auto_resume = False
212
+ gpu_ids = range(0, 8)
finetune/finetune_fcos_1x_coco_lr1.5e-2_wd5e.5/20221104_113744.log ADDED
The diff for this file is too large to render. See raw diff
 
finetune/finetune_fcos_1x_coco_lr1.5e-2_wd5e.5/20221104_113744.log.json ADDED
The diff for this file is too large to render. See raw diff
 
finetune/finetune_fcos_1x_coco_lr1.5e-2_wd5e.5/best_bbox_mAP_epoch_12.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a23cb08dcfde3078a0a804e45beff69bb475ef7ebb6268249ceec76b21052a53
3
+ size 257924589
finetune/finetune_fcos_1x_coco_lr1.5e-2_wd5e.5/fcos_r50_fpn_1x_coco.py ADDED
@@ -0,0 +1,197 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model = dict(
2
+ type='FCOS',
3
+ backbone=dict(
4
+ type='ResNet',
5
+ depth=50,
6
+ num_stages=4,
7
+ out_indices=(0, 1, 2, 3),
8
+ frozen_stages=1,
9
+ norm_cfg=dict(type='SyncBN', requires_grad=True),
10
+ norm_eval=True,
11
+ style='pytorch',
12
+ init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
13
+ neck=dict(
14
+ type='FPN',
15
+ in_channels=[256, 512, 1024, 2048],
16
+ out_channels=256,
17
+ start_level=1,
18
+ add_extra_convs='on_output',
19
+ num_outs=5,
20
+ relu_before_extra_convs=True,
21
+ norm_cfg=dict(type='SyncBN', requires_grad=True)),
22
+ bbox_head=dict(
23
+ type='FCOSHead',
24
+ num_classes=80,
25
+ in_channels=256,
26
+ stacked_convs=4,
27
+ feat_channels=256,
28
+ strides=[8, 16, 32, 64, 128],
29
+ loss_cls=dict(
30
+ type='FocalLoss',
31
+ use_sigmoid=True,
32
+ gamma=2.0,
33
+ alpha=0.25,
34
+ loss_weight=1.0),
35
+ loss_bbox=dict(type='IoULoss', loss_weight=1.0),
36
+ loss_centerness=dict(
37
+ type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)),
38
+ train_cfg=dict(
39
+ assigner=dict(
40
+ type='MaxIoUAssigner',
41
+ pos_iou_thr=0.5,
42
+ neg_iou_thr=0.4,
43
+ min_pos_iou=0,
44
+ ignore_iof_thr=-1),
45
+ allowed_border=-1,
46
+ pos_weight=-1,
47
+ debug=False),
48
+ test_cfg=dict(
49
+ nms_pre=1000,
50
+ min_bbox_size=0,
51
+ score_thr=0.05,
52
+ nms=dict(type='nms', iou_threshold=0.5),
53
+ max_per_img=100))
54
+ dataset_type = 'CocoDataset'
55
+ data_root = 'data/coco/'
56
+ img_norm_cfg = dict(
57
+ mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
58
+ train_pipeline = [
59
+ dict(type='LoadImageFromFile'),
60
+ dict(type='LoadAnnotations', with_bbox=True),
61
+ dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
62
+ dict(type='RandomFlip', flip_ratio=0.5),
63
+ dict(
64
+ type='Normalize',
65
+ mean=[123.675, 116.28, 103.53],
66
+ std=[58.395, 57.12, 57.375],
67
+ to_rgb=True),
68
+ dict(type='Pad', size_divisor=32),
69
+ dict(type='DefaultFormatBundle'),
70
+ dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
71
+ ]
72
+ test_pipeline = [
73
+ dict(type='LoadImageFromFile'),
74
+ dict(
75
+ type='MultiScaleFlipAug',
76
+ img_scale=(1333, 800),
77
+ flip=False,
78
+ transforms=[
79
+ dict(type='Resize', keep_ratio=True),
80
+ dict(type='RandomFlip'),
81
+ dict(
82
+ type='Normalize',
83
+ mean=[123.675, 116.28, 103.53],
84
+ std=[58.395, 57.12, 57.375],
85
+ to_rgb=True),
86
+ dict(type='Pad', size_divisor=32),
87
+ dict(type='ImageToTensor', keys=['img']),
88
+ dict(type='Collect', keys=['img'])
89
+ ])
90
+ ]
91
+ data = dict(
92
+ samples_per_gpu=2,
93
+ workers_per_gpu=2,
94
+ train=dict(
95
+ type='CocoDataset',
96
+ ann_file='data/coco/annotations/instances_train2017.json',
97
+ img_prefix='data/coco/train2017/',
98
+ pipeline=[
99
+ dict(type='LoadImageFromFile'),
100
+ dict(type='LoadAnnotations', with_bbox=True),
101
+ dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
102
+ dict(type='RandomFlip', flip_ratio=0.5),
103
+ dict(
104
+ type='Normalize',
105
+ mean=[123.675, 116.28, 103.53],
106
+ std=[58.395, 57.12, 57.375],
107
+ to_rgb=True),
108
+ dict(type='Pad', size_divisor=32),
109
+ dict(type='DefaultFormatBundle'),
110
+ dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
111
+ ]),
112
+ val=dict(
113
+ type='CocoDataset',
114
+ ann_file='data/coco/annotations/instances_val2017.json',
115
+ img_prefix='data/coco/val2017/',
116
+ pipeline=[
117
+ dict(type='LoadImageFromFile'),
118
+ dict(
119
+ type='MultiScaleFlipAug',
120
+ img_scale=(1333, 800),
121
+ flip=False,
122
+ transforms=[
123
+ dict(type='Resize', keep_ratio=True),
124
+ dict(type='RandomFlip'),
125
+ dict(
126
+ type='Normalize',
127
+ mean=[123.675, 116.28, 103.53],
128
+ std=[58.395, 57.12, 57.375],
129
+ to_rgb=True),
130
+ dict(type='Pad', size_divisor=32),
131
+ dict(type='ImageToTensor', keys=['img']),
132
+ dict(type='Collect', keys=['img'])
133
+ ])
134
+ ]),
135
+ test=dict(
136
+ type='CocoDataset',
137
+ ann_file='data/coco/annotations/instances_val2017.json',
138
+ img_prefix='data/coco/val2017/',
139
+ pipeline=[
140
+ dict(type='LoadImageFromFile'),
141
+ dict(
142
+ type='MultiScaleFlipAug',
143
+ img_scale=(1333, 800),
144
+ flip=False,
145
+ transforms=[
146
+ dict(type='Resize', keep_ratio=True),
147
+ dict(type='RandomFlip'),
148
+ dict(
149
+ type='Normalize',
150
+ mean=[123.675, 116.28, 103.53],
151
+ std=[58.395, 57.12, 57.375],
152
+ to_rgb=True),
153
+ dict(type='Pad', size_divisor=32),
154
+ dict(type='ImageToTensor', keys=['img']),
155
+ dict(type='Collect', keys=['img'])
156
+ ])
157
+ ]))
158
+ evaluation = dict(
159
+ interval=1, metric='bbox', save_best='auto', gpu_collect=True)
160
+ optimizer = dict(
161
+ type='SGD',
162
+ lr=0.015,
163
+ momentum=0.9,
164
+ weight_decay=5e-05,
165
+ paramwise_cfg=dict(bias_lr_mult=2.0, bias_decay_mult=0.0))
166
+ optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
167
+ lr_config = dict(
168
+ policy='step',
169
+ warmup='linear',
170
+ warmup_iters=500,
171
+ warmup_ratio=0.001,
172
+ step=[8, 11])
173
+ runner = dict(type='EpochBasedRunner', max_epochs=12)
174
+ checkpoint_config = dict(interval=1)
175
+ log_config = dict(interval=50, hooks=[dict(type='TextLoggerHook')])
176
+ custom_hooks = [
177
+ dict(type='NumClassCheckHook'),
178
+ dict(
179
+ type='MMDetWandbHook',
180
+ init_kwargs=dict(project='I2B', group='finetune'),
181
+ interval=50,
182
+ num_eval_images=0,
183
+ log_checkpoint=False)
184
+ ]
185
+ dist_params = dict(backend='nccl')
186
+ log_level = 'INFO'
187
+ load_from = 'pretrain/selfsup_fcos_mstrain-soft-teacher_sampler-2048_temp0.5/final_model.pth'
188
+ resume_from = None
189
+ workflow = [('train', 1)]
190
+ opencv_num_threads = 0
191
+ mp_start_method = 'fork'
192
+ auto_scale_lr = dict(enable=False, base_batch_size=16)
193
+ custom_imports = None
194
+ norm_cfg = dict(type='SyncBN', requires_grad=True)
195
+ work_dir = 'work_dirs/finetune_fcos_1x_coco_lr1.5e-2_wd5e.5'
196
+ auto_resume = False
197
+ gpu_ids = range(0, 8)
finetune/finetune_mask-rcnn_12k_voc0712_lr3e-2_wd5e-5/20221003_234250.log ADDED
The diff for this file is too large to render. See raw diff
 
finetune/finetune_mask-rcnn_12k_voc0712_lr3e-2_wd5e-5/20221003_234250.log.json ADDED
@@ -0,0 +1,242 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {"env_info": "sys.platform: linux\nPython: 3.7.3 (default, Jan 22 2021, 20:04:44) [GCC 8.3.0]\nCUDA available: True\nGPU 0,1,2,3,4,5,6,7: A100-SXM-80GB\nCUDA_HOME: /usr/local/cuda\nNVCC: Cuda compilation tools, release 11.3, V11.3.109\nGCC: x86_64-linux-gnu-gcc (Debian 8.3.0-6) 8.3.0\nPyTorch: 1.10.0\nPyTorch compiling details: PyTorch built with:\n - GCC 7.3\n - C++ Version: 201402\n - Intel(R) Math Kernel Library Version 2020.0.0 Product Build 20191122 for Intel(R) 64 architecture applications\n - Intel(R) MKL-DNN v2.2.3 (Git Hash 7336ca9f055cf1bfa13efb658fe15dc9b41f0740)\n - OpenMP 201511 (a.k.a. OpenMP 4.5)\n - LAPACK is enabled (usually provided by MKL)\n - NNPACK is enabled\n - CPU capability usage: AVX512\n - CUDA Runtime 11.3\n - NVCC architecture flags: -gencode;arch=compute_70,code=sm_70;-gencode;arch=compute_75,code=sm_75;-gencode;arch=compute_80,code=sm_80;-gencode;arch=compute_86,code=sm_86\n - CuDNN 8.2\n - Magma 2.5.2\n - Build settings: BLAS_INFO=mkl, BUILD_TYPE=Release, CUDA_VERSION=11.3, CUDNN_VERSION=8.2.0, CXX_COMPILER=/opt/rh/devtoolset-7/root/usr/bin/c++, CXX_FLAGS= -Wno-deprecated -fvisibility-inlines-hidden -DUSE_PTHREADPOOL -fopenmp -DNDEBUG -DUSE_KINETO -DUSE_FBGEMM -DUSE_QNNPACK -DUSE_PYTORCH_QNNPACK -DUSE_XNNPACK -DSYMBOLICATE_MOBILE_DEBUG_HANDLE -DEDGE_PROFILER_USE_KINETO -O2 -fPIC -Wno-narrowing -Wall -Wextra -Werror=return-type -Wno-missing-field-initializers -Wno-type-limits -Wno-array-bounds -Wno-unknown-pragmas -Wno-sign-compare -Wno-unused-parameter -Wno-unused-variable -Wno-unused-function -Wno-unused-result -Wno-unused-local-typedefs -Wno-strict-overflow -Wno-strict-aliasing -Wno-error=deprecated-declarations -Wno-stringop-overflow -Wno-psabi -Wno-error=pedantic -Wno-error=redundant-decls -Wno-error=old-style-cast -fdiagnostics-color=always -faligned-new -Wno-unused-but-set-variable -Wno-maybe-uninitialized -fno-math-errno -fno-trapping-math -Werror=format -Wno-stringop-overflow, LAPACK_INFO=mkl, PERF_WITH_AVX=1, PERF_WITH_AVX2=1, PERF_WITH_AVX512=1, TORCH_VERSION=1.10.0, USE_CUDA=ON, USE_CUDNN=ON, USE_EXCEPTION_PTR=1, USE_GFLAGS=OFF, USE_GLOG=OFF, USE_MKL=ON, USE_MKLDNN=ON, USE_MPI=OFF, USE_NCCL=ON, USE_NNPACK=ON, USE_OPENMP=ON, \n\nTorchVision: 0.11.1+cu113\nOpenCV: 4.6.0\nMMCV: 1.6.1\nMMCV Compiler: GCC 9.3\nMMCV CUDA Compiler: 11.3\nMMDetection: 2.25.2+87c120c", "config": "model = dict(\n type='MaskRCNN',\n backbone=dict(\n type='ResNet',\n depth=50,\n num_stages=4,\n out_indices=(0, 1, 2, 3),\n frozen_stages=1,\n norm_cfg=dict(type='SyncBN', requires_grad=True),\n norm_eval=True,\n style='pytorch',\n init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),\n neck=dict(\n type='FPN',\n in_channels=[256, 512, 1024, 2048],\n out_channels=256,\n num_outs=5,\n norm_cfg=dict(type='SyncBN', requires_grad=True)),\n rpn_head=dict(\n type='RPNHead',\n in_channels=256,\n feat_channels=256,\n anchor_generator=dict(\n type='AnchorGenerator',\n scales=[8],\n ratios=[0.5, 1.0, 2.0],\n strides=[4, 8, 16, 32, 64]),\n bbox_coder=dict(\n type='DeltaXYWHBBoxCoder',\n target_means=[0.0, 0.0, 0.0, 0.0],\n target_stds=[1.0, 1.0, 1.0, 1.0]),\n loss_cls=dict(\n type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),\n loss_bbox=dict(type='L1Loss', loss_weight=1.0)),\n roi_head=dict(\n type='StandardRoIHead',\n bbox_roi_extractor=dict(\n type='SingleRoIExtractor',\n roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0),\n out_channels=256,\n featmap_strides=[4, 8, 16, 32]),\n bbox_head=dict(\n type='Shared4Conv1FCBBoxHead',\n in_channels=256,\n fc_out_channels=1024,\n roi_feat_size=7,\n num_classes=20,\n bbox_coder=dict(\n type='DeltaXYWHBBoxCoder',\n target_means=[0.0, 0.0, 0.0, 0.0],\n target_stds=[0.1, 0.1, 0.2, 0.2]),\n reg_class_agnostic=False,\n loss_cls=dict(\n type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),\n loss_bbox=dict(type='L1Loss', loss_weight=1.0)),\n mask_roi_extractor=None,\n mask_head=None),\n train_cfg=dict(\n rpn=dict(\n assigner=dict(\n type='MaxIoUAssigner',\n pos_iou_thr=0.7,\n neg_iou_thr=0.3,\n min_pos_iou=0.3,\n match_low_quality=True,\n ignore_iof_thr=-1),\n sampler=dict(\n type='RandomSampler',\n num=256,\n pos_fraction=0.5,\n neg_pos_ub=-1,\n add_gt_as_proposals=False),\n allowed_border=-1,\n pos_weight=-1,\n debug=False),\n rpn_proposal=dict(\n nms_pre=2000,\n max_per_img=1000,\n nms=dict(type='nms', iou_threshold=0.7),\n min_bbox_size=0),\n rcnn=dict(\n assigner=dict(\n type='MaxIoUAssigner',\n pos_iou_thr=0.5,\n neg_iou_thr=0.5,\n min_pos_iou=0.5,\n match_low_quality=True,\n ignore_iof_thr=-1),\n sampler=dict(\n type='RandomSampler',\n num=512,\n pos_fraction=0.25,\n neg_pos_ub=-1,\n add_gt_as_proposals=True),\n mask_size=28,\n pos_weight=-1,\n debug=False)),\n test_cfg=dict(\n rpn=dict(\n nms_pre=1000,\n max_per_img=1000,\n nms=dict(type='nms', iou_threshold=0.7),\n min_bbox_size=0),\n rcnn=dict(\n score_thr=0.05,\n nms=dict(type='nms', iou_threshold=0.5),\n max_per_img=100,\n mask_thr_binary=0.5)))\ndataset_type = 'VOCDataset'\ndata_root = 'data/VOCdevkit/'\nimg_norm_cfg = dict(\n mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)\ntrain_pipeline = [\n dict(type='LoadImageFromFile'),\n dict(type='LoadAnnotations', with_bbox=True),\n dict(\n type='Resize',\n img_scale=[(1333, 480), (1333, 512), (1333, 544), (1333, 576),\n (1333, 608), (1333, 640), (1333, 672), (1333, 704),\n (1333, 736), (1333, 768), (1333, 800)],\n multiscale_mode='value',\n keep_ratio=True),\n dict(type='RandomFlip', flip_ratio=0.5),\n dict(\n type='Normalize',\n mean=[123.675, 116.28, 103.53],\n std=[58.395, 57.12, 57.375],\n to_rgb=True),\n dict(type='Pad', size_divisor=32),\n dict(type='DefaultFormatBundle'),\n dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])\n]\ntest_pipeline = [\n dict(type='LoadImageFromFile'),\n dict(\n type='MultiScaleFlipAug',\n img_scale=(1333, 800),\n flip=False,\n transforms=[\n dict(type='Resize', keep_ratio=True),\n dict(type='RandomFlip'),\n dict(\n type='Normalize',\n mean=[123.675, 116.28, 103.53],\n std=[58.395, 57.12, 57.375],\n to_rgb=True),\n dict(type='Pad', size_divisor=32),\n dict(type='ImageToTensor', keys=['img']),\n dict(type='Collect', keys=['img'])\n ])\n]\ndata = dict(\n samples_per_gpu=2,\n workers_per_gpu=2,\n train=dict(\n type='VOCDataset',\n ann_file=[\n 'data/VOCdevkit/VOC2007/ImageSets/Main/trainval.txt',\n 'data/VOCdevkit/VOC2012/ImageSets/Main/trainval.txt'\n ],\n img_prefix=['data/VOCdevkit/VOC2007/', 'data/VOCdevkit/VOC2012/'],\n pipeline=[\n dict(type='LoadImageFromFile'),\n dict(type='LoadAnnotations', with_bbox=True),\n dict(\n type='Resize',\n img_scale=[(1333, 480), (1333, 512), (1333, 544), (1333, 576),\n (1333, 608), (1333, 640), (1333, 672), (1333, 704),\n (1333, 736), (1333, 768), (1333, 800)],\n multiscale_mode='value',\n keep_ratio=True),\n dict(type='RandomFlip', flip_ratio=0.5),\n dict(\n type='Normalize',\n mean=[123.675, 116.28, 103.53],\n std=[58.395, 57.12, 57.375],\n to_rgb=True),\n dict(type='Pad', size_divisor=32),\n dict(type='DefaultFormatBundle'),\n dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])\n ]),\n val=dict(\n type='VOCDataset',\n ann_file='data/VOCdevkit/VOC2007/ImageSets/Main/test.txt',\n img_prefix='data/VOCdevkit/VOC2007/',\n pipeline=[\n dict(type='LoadImageFromFile'),\n dict(\n type='MultiScaleFlipAug',\n img_scale=(1333, 800),\n flip=False,\n transforms=[\n dict(type='Resize', keep_ratio=True),\n dict(type='RandomFlip'),\n dict(\n type='Normalize',\n mean=[123.675, 116.28, 103.53],\n std=[58.395, 57.12, 57.375],\n to_rgb=True),\n dict(type='Pad', size_divisor=32),\n dict(type='ImageToTensor', keys=['img']),\n dict(type='Collect', keys=['img'])\n ])\n ]),\n test=dict(\n type='VOCDataset',\n ann_file='data/VOCdevkit/VOC2007/ImageSets/Main/test.txt',\n img_prefix='data/VOCdevkit/VOC2007/',\n pipeline=[\n dict(type='LoadImageFromFile'),\n dict(\n type='MultiScaleFlipAug',\n img_scale=(1333, 800),\n flip=False,\n transforms=[\n dict(type='Resize', keep_ratio=True),\n dict(type='RandomFlip'),\n dict(\n type='Normalize',\n mean=[123.675, 116.28, 103.53],\n std=[58.395, 57.12, 57.375],\n to_rgb=True),\n dict(type='Pad', size_divisor=32),\n dict(type='ImageToTensor', keys=['img']),\n dict(type='Collect', keys=['img'])\n ])\n ]))\nevaluation = dict(interval=12000, metric='mAP', save_best='auto')\noptimizer = dict(type='SGD', lr=0.03, momentum=0.9, weight_decay=5e-05)\noptimizer_config = dict(grad_clip=None)\nlr_config = dict(\n policy='step',\n warmup='linear',\n warmup_iters=500,\n warmup_ratio=0.001,\n step=[9000, 11000],\n by_epoch=False)\nrunner = dict(type='IterBasedRunner', max_iters=12000)\ncheckpoint_config = dict(interval=12000)\nlog_config = dict(interval=50, hooks=[dict(type='TextLoggerHook')])\ncustom_hooks = [\n dict(type='NumClassCheckHook'),\n dict(\n type='MMDetWandbHook',\n init_kwargs=dict(project='I2B', group='finetune'),\n interval=50,\n num_eval_images=0,\n log_checkpoint=False)\n]\ndist_params = dict(backend='nccl')\nlog_level = 'INFO'\nload_from = 'pretrain/selfsup_mask-rcnn_mstrain-soft-teacher_sampler-4096_temp0.5/final_model.pth'\nresume_from = None\nworkflow = [('train', 1)]\nopencv_num_threads = 0\nmp_start_method = 'fork'\nauto_scale_lr = dict(enable=False, base_batch_size=16)\ncustom_imports = None\nnorm_cfg = dict(type='SyncBN', requires_grad=True)\nwork_dir = 'work_dirs/finetune_mask-rcnn__12k_voc0712_lr3e-2_wd5e-5'\nauto_resume = False\ngpu_ids = range(0, 8)\n", "seed": 42, "exp_name": "mask_rcnn_mstrain_12k_voc0712.py", "hook_msgs": {}}
2
+ {"mode": "train", "epoch": 1, "iter": 50, "lr": 0.00297, "memory": 3991, "data_time": 0.00644, "loss_rpn_cls": 0.41043, "loss_rpn_bbox": 0.03144, "loss_cls": 1.08654, "acc": 84.58247, "loss_bbox": 0.04802, "loss": 1.57643, "time": 0.11675}
3
+ {"mode": "train", "epoch": 1, "iter": 100, "lr": 0.00596, "memory": 3991, "data_time": 0.00565, "loss_rpn_cls": 0.10294, "loss_rpn_bbox": 0.0304, "loss_cls": 0.22905, "acc": 95.9438, "loss_bbox": 0.15879, "loss": 0.52118, "time": 0.112}
4
+ {"mode": "train", "epoch": 1, "iter": 150, "lr": 0.00896, "memory": 3992, "data_time": 0.00552, "loss_rpn_cls": 0.06976, "loss_rpn_bbox": 0.03024, "loss_cls": 0.23172, "acc": 95.49007, "loss_bbox": 0.17311, "loss": 0.50484, "time": 0.11235}
5
+ {"mode": "train", "epoch": 1, "iter": 200, "lr": 0.01196, "memory": 3995, "data_time": 0.00534, "loss_rpn_cls": 0.05468, "loss_rpn_bbox": 0.02682, "loss_cls": 0.22995, "acc": 95.1844, "loss_bbox": 0.17774, "loss": 0.48919, "time": 0.11386}
6
+ {"mode": "train", "epoch": 1, "iter": 250, "lr": 0.01496, "memory": 3995, "data_time": 0.00556, "loss_rpn_cls": 0.03784, "loss_rpn_bbox": 0.02613, "loss_cls": 0.25751, "acc": 94.17487, "loss_bbox": 0.18684, "loss": 0.50832, "time": 0.11232}
7
+ {"mode": "train", "epoch": 1, "iter": 300, "lr": 0.01795, "memory": 3995, "data_time": 0.00554, "loss_rpn_cls": 0.03023, "loss_rpn_bbox": 0.02683, "loss_cls": 0.2374, "acc": 94.24235, "loss_bbox": 0.17474, "loss": 0.4692, "time": 0.11367}
8
+ {"mode": "train", "epoch": 1, "iter": 350, "lr": 0.02095, "memory": 3995, "data_time": 0.00574, "loss_rpn_cls": 0.03024, "loss_rpn_bbox": 0.02549, "loss_cls": 0.2147, "acc": 94.58972, "loss_bbox": 0.16195, "loss": 0.43238, "time": 0.11257}
9
+ {"mode": "train", "epoch": 1, "iter": 400, "lr": 0.02395, "memory": 3995, "data_time": 0.00575, "loss_rpn_cls": 0.02962, "loss_rpn_bbox": 0.02628, "loss_cls": 0.20662, "acc": 94.50053, "loss_bbox": 0.16455, "loss": 0.42707, "time": 0.1129}
10
+ {"mode": "train", "epoch": 1, "iter": 450, "lr": 0.02694, "memory": 3995, "data_time": 0.00554, "loss_rpn_cls": 0.02807, "loss_rpn_bbox": 0.02585, "loss_cls": 0.19557, "acc": 94.61617, "loss_bbox": 0.16118, "loss": 0.41067, "time": 0.11173}
11
+ {"mode": "train", "epoch": 1, "iter": 500, "lr": 0.02994, "memory": 3995, "data_time": 0.00564, "loss_rpn_cls": 0.0275, "loss_rpn_bbox": 0.02652, "loss_cls": 0.19759, "acc": 94.51387, "loss_bbox": 0.16369, "loss": 0.4153, "time": 0.11064}
12
+ {"mode": "train", "epoch": 1, "iter": 550, "lr": 0.03, "memory": 3995, "data_time": 0.00571, "loss_rpn_cls": 0.02621, "loss_rpn_bbox": 0.0237, "loss_cls": 0.19653, "acc": 94.49793, "loss_bbox": 0.16558, "loss": 0.41202, "time": 0.11319}
13
+ {"mode": "train", "epoch": 1, "iter": 600, "lr": 0.03, "memory": 3995, "data_time": 0.00568, "loss_rpn_cls": 0.02619, "loss_rpn_bbox": 0.02437, "loss_cls": 0.19264, "acc": 94.49541, "loss_bbox": 0.16481, "loss": 0.40801, "time": 0.11242}
14
+ {"mode": "train", "epoch": 1, "iter": 650, "lr": 0.03, "memory": 3995, "data_time": 0.0058, "loss_rpn_cls": 0.02561, "loss_rpn_bbox": 0.02533, "loss_cls": 0.17946, "acc": 94.84409, "loss_bbox": 0.16391, "loss": 0.39431, "time": 0.11165}
15
+ {"mode": "train", "epoch": 1, "iter": 700, "lr": 0.03, "memory": 3995, "data_time": 0.00556, "loss_rpn_cls": 0.02681, "loss_rpn_bbox": 0.0249, "loss_cls": 0.18208, "acc": 94.75084, "loss_bbox": 0.16257, "loss": 0.39636, "time": 0.11304}
16
+ {"mode": "train", "epoch": 1, "iter": 750, "lr": 0.03, "memory": 3995, "data_time": 0.00556, "loss_rpn_cls": 0.02305, "loss_rpn_bbox": 0.02381, "loss_cls": 0.17837, "acc": 94.85941, "loss_bbox": 0.1554, "loss": 0.38064, "time": 0.1142}
17
+ {"mode": "train", "epoch": 1, "iter": 800, "lr": 0.03, "memory": 3995, "data_time": 0.00565, "loss_rpn_cls": 0.02559, "loss_rpn_bbox": 0.02504, "loss_cls": 0.1712, "acc": 95.01142, "loss_bbox": 0.15679, "loss": 0.37862, "time": 0.11286}
18
+ {"mode": "train", "epoch": 1, "iter": 850, "lr": 0.03, "memory": 3995, "data_time": 0.00603, "loss_rpn_cls": 0.02292, "loss_rpn_bbox": 0.02453, "loss_cls": 0.17492, "acc": 94.82509, "loss_bbox": 0.16082, "loss": 0.38319, "time": 0.11357}
19
+ {"mode": "train", "epoch": 1, "iter": 900, "lr": 0.03, "memory": 3995, "data_time": 0.00601, "loss_rpn_cls": 0.02541, "loss_rpn_bbox": 0.02451, "loss_cls": 0.16358, "acc": 94.99489, "loss_bbox": 0.1574, "loss": 0.3709, "time": 0.11277}
20
+ {"mode": "train", "epoch": 1, "iter": 950, "lr": 0.03, "memory": 3995, "data_time": 0.00568, "loss_rpn_cls": 0.02353, "loss_rpn_bbox": 0.02362, "loss_cls": 0.16549, "acc": 94.90694, "loss_bbox": 0.16323, "loss": 0.37588, "time": 0.1138}
21
+ {"mode": "train", "epoch": 1, "iter": 1000, "lr": 0.03, "memory": 3995, "data_time": 0.00566, "loss_rpn_cls": 0.02596, "loss_rpn_bbox": 0.02283, "loss_cls": 0.15184, "acc": 95.35421, "loss_bbox": 0.14761, "loss": 0.34824, "time": 0.11104}
22
+ {"mode": "train", "epoch": 1, "iter": 1050, "lr": 0.03, "memory": 3995, "data_time": 0.00624, "loss_rpn_cls": 0.02226, "loss_rpn_bbox": 0.02232, "loss_cls": 0.15488, "acc": 95.17919, "loss_bbox": 0.15263, "loss": 0.35209, "time": 0.11326}
23
+ {"mode": "train", "epoch": 1, "iter": 1100, "lr": 0.03, "memory": 3995, "data_time": 0.0057, "loss_rpn_cls": 0.02119, "loss_rpn_bbox": 0.02169, "loss_cls": 0.15774, "acc": 94.94709, "loss_bbox": 0.15923, "loss": 0.35986, "time": 0.11268}
24
+ {"mode": "train", "epoch": 1, "iter": 1150, "lr": 0.03, "memory": 3995, "data_time": 0.006, "loss_rpn_cls": 0.0218, "loss_rpn_bbox": 0.02127, "loss_cls": 0.1472, "acc": 95.34287, "loss_bbox": 0.14878, "loss": 0.33906, "time": 0.11321}
25
+ {"mode": "train", "epoch": 1, "iter": 1200, "lr": 0.03, "memory": 3995, "data_time": 0.00615, "loss_rpn_cls": 0.02182, "loss_rpn_bbox": 0.02347, "loss_cls": 0.16316, "acc": 94.93938, "loss_bbox": 0.16, "loss": 0.36845, "time": 0.11361}
26
+ {"mode": "train", "epoch": 1, "iter": 1250, "lr": 0.03, "memory": 3995, "data_time": 0.00581, "loss_rpn_cls": 0.02133, "loss_rpn_bbox": 0.02247, "loss_cls": 0.16057, "acc": 94.90527, "loss_bbox": 0.16563, "loss": 0.36999, "time": 0.11263}
27
+ {"mode": "train", "epoch": 1, "iter": 1300, "lr": 0.03, "memory": 3995, "data_time": 0.006, "loss_rpn_cls": 0.01998, "loss_rpn_bbox": 0.02045, "loss_cls": 0.14458, "acc": 95.52483, "loss_bbox": 0.14612, "loss": 0.33113, "time": 0.11225}
28
+ {"mode": "train", "epoch": 1, "iter": 1350, "lr": 0.03, "memory": 3995, "data_time": 0.0057, "loss_rpn_cls": 0.02182, "loss_rpn_bbox": 0.02188, "loss_cls": 0.15621, "acc": 95.08521, "loss_bbox": 0.15699, "loss": 0.35689, "time": 0.11065}
29
+ {"mode": "train", "epoch": 1, "iter": 1400, "lr": 0.03, "memory": 3995, "data_time": 0.00579, "loss_rpn_cls": 0.02226, "loss_rpn_bbox": 0.02149, "loss_cls": 0.1555, "acc": 95.15674, "loss_bbox": 0.15612, "loss": 0.35537, "time": 0.11239}
30
+ {"mode": "train", "epoch": 1, "iter": 1450, "lr": 0.03, "memory": 3995, "data_time": 0.00565, "loss_rpn_cls": 0.02105, "loss_rpn_bbox": 0.02309, "loss_cls": 0.15521, "acc": 95.05029, "loss_bbox": 0.15922, "loss": 0.35857, "time": 0.11099}
31
+ {"mode": "train", "epoch": 1, "iter": 1500, "lr": 0.03, "memory": 3995, "data_time": 0.00585, "loss_rpn_cls": 0.02237, "loss_rpn_bbox": 0.02267, "loss_cls": 0.16152, "acc": 94.90433, "loss_bbox": 0.16257, "loss": 0.36914, "time": 0.11216}
32
+ {"mode": "train", "epoch": 1, "iter": 1550, "lr": 0.03, "memory": 3995, "data_time": 0.00575, "loss_rpn_cls": 0.02116, "loss_rpn_bbox": 0.0225, "loss_cls": 0.15811, "acc": 95.14237, "loss_bbox": 0.15299, "loss": 0.35477, "time": 0.11156}
33
+ {"mode": "train", "epoch": 1, "iter": 1600, "lr": 0.03, "memory": 3995, "data_time": 0.00588, "loss_rpn_cls": 0.02113, "loss_rpn_bbox": 0.02233, "loss_cls": 0.15422, "acc": 95.05477, "loss_bbox": 0.16012, "loss": 0.3578, "time": 0.1111}
34
+ {"mode": "train", "epoch": 1, "iter": 1650, "lr": 0.03, "memory": 3995, "data_time": 0.00571, "loss_rpn_cls": 0.01862, "loss_rpn_bbox": 0.02049, "loss_cls": 0.14403, "acc": 95.29903, "loss_bbox": 0.1535, "loss": 0.33663, "time": 0.11174}
35
+ {"mode": "train", "epoch": 1, "iter": 1700, "lr": 0.03, "memory": 3995, "data_time": 0.00608, "loss_rpn_cls": 0.0189, "loss_rpn_bbox": 0.02025, "loss_cls": 0.15246, "acc": 95.10426, "loss_bbox": 0.16041, "loss": 0.35202, "time": 0.11328}
36
+ {"mode": "train", "epoch": 1, "iter": 1750, "lr": 0.03, "memory": 3995, "data_time": 0.00556, "loss_rpn_cls": 0.02053, "loss_rpn_bbox": 0.02232, "loss_cls": 0.14866, "acc": 95.23583, "loss_bbox": 0.15446, "loss": 0.34597, "time": 0.11099}
37
+ {"mode": "train", "epoch": 1, "iter": 1800, "lr": 0.03, "memory": 3995, "data_time": 0.0059, "loss_rpn_cls": 0.01839, "loss_rpn_bbox": 0.02141, "loss_cls": 0.1544, "acc": 95.08892, "loss_bbox": 0.15747, "loss": 0.35167, "time": 0.11367}
38
+ {"mode": "train", "epoch": 1, "iter": 1850, "lr": 0.03, "memory": 3995, "data_time": 0.00594, "loss_rpn_cls": 0.01763, "loss_rpn_bbox": 0.02074, "loss_cls": 0.14871, "acc": 95.24131, "loss_bbox": 0.15688, "loss": 0.34397, "time": 0.11305}
39
+ {"mode": "train", "epoch": 1, "iter": 1900, "lr": 0.03, "memory": 3995, "data_time": 0.00583, "loss_rpn_cls": 0.02196, "loss_rpn_bbox": 0.02072, "loss_cls": 0.15315, "acc": 95.14866, "loss_bbox": 0.15627, "loss": 0.35211, "time": 0.11176}
40
+ {"mode": "train", "epoch": 1, "iter": 1950, "lr": 0.03, "memory": 3995, "data_time": 0.00593, "loss_rpn_cls": 0.02119, "loss_rpn_bbox": 0.02369, "loss_cls": 0.15557, "acc": 94.98127, "loss_bbox": 0.15799, "loss": 0.35844, "time": 0.11139}
41
+ {"mode": "train", "epoch": 1, "iter": 2000, "lr": 0.03, "memory": 3995, "data_time": 0.00587, "loss_rpn_cls": 0.01852, "loss_rpn_bbox": 0.02064, "loss_cls": 0.15419, "acc": 95.07802, "loss_bbox": 0.15635, "loss": 0.3497, "time": 0.1139}
42
+ {"mode": "train", "epoch": 1, "iter": 2050, "lr": 0.03, "memory": 3995, "data_time": 0.00602, "loss_rpn_cls": 0.01764, "loss_rpn_bbox": 0.02038, "loss_cls": 0.15086, "acc": 94.989, "loss_bbox": 0.16541, "loss": 0.35429, "time": 0.11379}
43
+ {"mode": "train", "epoch": 1, "iter": 2100, "lr": 0.03, "memory": 3995, "data_time": 0.00586, "loss_rpn_cls": 0.01823, "loss_rpn_bbox": 0.02101, "loss_cls": 0.14495, "acc": 95.17257, "loss_bbox": 0.15929, "loss": 0.34349, "time": 0.11219}
44
+ {"mode": "train", "epoch": 1, "iter": 2150, "lr": 0.03, "memory": 3995, "data_time": 0.0058, "loss_rpn_cls": 0.01883, "loss_rpn_bbox": 0.01963, "loss_cls": 0.13767, "acc": 95.39026, "loss_bbox": 0.15031, "loss": 0.32644, "time": 0.11393}
45
+ {"mode": "train", "epoch": 1, "iter": 2200, "lr": 0.03, "memory": 3995, "data_time": 0.00616, "loss_rpn_cls": 0.01979, "loss_rpn_bbox": 0.02195, "loss_cls": 0.14059, "acc": 95.48267, "loss_bbox": 0.15105, "loss": 0.33339, "time": 0.11346}
46
+ {"mode": "train", "epoch": 1, "iter": 2250, "lr": 0.03, "memory": 3995, "data_time": 0.00609, "loss_rpn_cls": 0.01784, "loss_rpn_bbox": 0.02077, "loss_cls": 0.13327, "acc": 95.58228, "loss_bbox": 0.14601, "loss": 0.31788, "time": 0.11329}
47
+ {"mode": "train", "epoch": 1, "iter": 2300, "lr": 0.03, "memory": 3995, "data_time": 0.00661, "loss_rpn_cls": 0.01818, "loss_rpn_bbox": 0.02095, "loss_cls": 0.14542, "acc": 95.18523, "loss_bbox": 0.15631, "loss": 0.34086, "time": 0.11317}
48
+ {"mode": "train", "epoch": 1, "iter": 2350, "lr": 0.03, "memory": 3995, "data_time": 0.00557, "loss_rpn_cls": 0.01844, "loss_rpn_bbox": 0.02101, "loss_cls": 0.14542, "acc": 95.1355, "loss_bbox": 0.15289, "loss": 0.33777, "time": 0.11121}
49
+ {"mode": "train", "epoch": 1, "iter": 2400, "lr": 0.03, "memory": 3995, "data_time": 0.00593, "loss_rpn_cls": 0.01792, "loss_rpn_bbox": 0.02135, "loss_cls": 0.14588, "acc": 95.16064, "loss_bbox": 0.1564, "loss": 0.34155, "time": 0.11223}
50
+ {"mode": "train", "epoch": 1, "iter": 2450, "lr": 0.03, "memory": 3995, "data_time": 0.00588, "loss_rpn_cls": 0.01755, "loss_rpn_bbox": 0.02053, "loss_cls": 0.14049, "acc": 95.26367, "loss_bbox": 0.15466, "loss": 0.33323, "time": 0.11272}
51
+ {"mode": "train", "epoch": 1, "iter": 2500, "lr": 0.03, "memory": 3995, "data_time": 0.00609, "loss_rpn_cls": 0.02039, "loss_rpn_bbox": 0.02397, "loss_cls": 0.1549, "acc": 94.93652, "loss_bbox": 0.16426, "loss": 0.36353, "time": 0.11252}
52
+ {"mode": "train", "epoch": 1, "iter": 2550, "lr": 0.03, "memory": 3995, "data_time": 0.006, "loss_rpn_cls": 0.01623, "loss_rpn_bbox": 0.02098, "loss_cls": 0.14247, "acc": 95.25298, "loss_bbox": 0.15651, "loss": 0.3362, "time": 0.1134}
53
+ {"mode": "train", "epoch": 1, "iter": 2600, "lr": 0.03, "memory": 3995, "data_time": 0.00582, "loss_rpn_cls": 0.01884, "loss_rpn_bbox": 0.02109, "loss_cls": 0.13547, "acc": 95.46997, "loss_bbox": 0.15315, "loss": 0.32856, "time": 0.11149}
54
+ {"mode": "train", "epoch": 1, "iter": 2650, "lr": 0.03, "memory": 3995, "data_time": 0.00586, "loss_rpn_cls": 0.01967, "loss_rpn_bbox": 0.02079, "loss_cls": 0.14561, "acc": 95.08887, "loss_bbox": 0.15858, "loss": 0.34464, "time": 0.11136}
55
+ {"mode": "train", "epoch": 1, "iter": 2700, "lr": 0.03, "memory": 3995, "data_time": 0.00583, "loss_rpn_cls": 0.01698, "loss_rpn_bbox": 0.02067, "loss_cls": 0.13336, "acc": 95.5565, "loss_bbox": 0.14898, "loss": 0.31999, "time": 0.1122}
56
+ {"mode": "train", "epoch": 1, "iter": 2750, "lr": 0.03, "memory": 3995, "data_time": 0.0059, "loss_rpn_cls": 0.0192, "loss_rpn_bbox": 0.01979, "loss_cls": 0.13104, "acc": 95.75586, "loss_bbox": 0.13352, "loss": 0.30356, "time": 0.11293}
57
+ {"mode": "train", "epoch": 1, "iter": 2800, "lr": 0.03, "memory": 3995, "data_time": 0.00607, "loss_rpn_cls": 0.01678, "loss_rpn_bbox": 0.02024, "loss_cls": 0.13716, "acc": 95.48662, "loss_bbox": 0.14714, "loss": 0.32131, "time": 0.11507}
58
+ {"mode": "train", "epoch": 1, "iter": 2850, "lr": 0.03, "memory": 3995, "data_time": 0.00595, "loss_rpn_cls": 0.01666, "loss_rpn_bbox": 0.02099, "loss_cls": 0.13786, "acc": 95.4589, "loss_bbox": 0.14836, "loss": 0.32387, "time": 0.11199}
59
+ {"mode": "train", "epoch": 1, "iter": 2900, "lr": 0.03, "memory": 3995, "data_time": 0.00581, "loss_rpn_cls": 0.0188, "loss_rpn_bbox": 0.02122, "loss_cls": 0.13358, "acc": 95.51888, "loss_bbox": 0.15122, "loss": 0.32482, "time": 0.11167}
60
+ {"mode": "train", "epoch": 1, "iter": 2950, "lr": 0.03, "memory": 3995, "data_time": 0.00573, "loss_rpn_cls": 0.01875, "loss_rpn_bbox": 0.02166, "loss_cls": 0.13258, "acc": 95.51099, "loss_bbox": 0.15158, "loss": 0.32456, "time": 0.11163}
61
+ {"mode": "train", "epoch": 1, "iter": 3000, "lr": 0.03, "memory": 3995, "data_time": 0.0059, "loss_rpn_cls": 0.01689, "loss_rpn_bbox": 0.02006, "loss_cls": 0.13288, "acc": 95.58953, "loss_bbox": 0.14761, "loss": 0.31744, "time": 0.11247}
62
+ {"mode": "train", "epoch": 1, "iter": 3050, "lr": 0.03, "memory": 3995, "data_time": 0.00594, "loss_rpn_cls": 0.01656, "loss_rpn_bbox": 0.02146, "loss_cls": 0.14181, "acc": 95.24055, "loss_bbox": 0.15667, "loss": 0.33651, "time": 0.11175}
63
+ {"mode": "train", "epoch": 1, "iter": 3100, "lr": 0.03, "memory": 3995, "data_time": 0.00569, "loss_rpn_cls": 0.01451, "loss_rpn_bbox": 0.01844, "loss_cls": 0.1265, "acc": 95.6736, "loss_bbox": 0.14161, "loss": 0.30105, "time": 0.11376}
64
+ {"mode": "train", "epoch": 1, "iter": 3150, "lr": 0.03, "memory": 3995, "data_time": 0.00577, "loss_rpn_cls": 0.0153, "loss_rpn_bbox": 0.02112, "loss_cls": 0.12232, "acc": 95.82123, "loss_bbox": 0.14632, "loss": 0.30506, "time": 0.11255}
65
+ {"mode": "train", "epoch": 1, "iter": 3200, "lr": 0.03, "memory": 3995, "data_time": 0.00589, "loss_rpn_cls": 0.0158, "loss_rpn_bbox": 0.01947, "loss_cls": 0.13158, "acc": 95.5488, "loss_bbox": 0.1518, "loss": 0.31865, "time": 0.11265}
66
+ {"mode": "train", "epoch": 1, "iter": 3250, "lr": 0.03, "memory": 3995, "data_time": 0.0059, "loss_rpn_cls": 0.01715, "loss_rpn_bbox": 0.02001, "loss_cls": 0.13336, "acc": 95.46193, "loss_bbox": 0.14775, "loss": 0.31828, "time": 0.11518}
67
+ {"mode": "train", "epoch": 1, "iter": 3300, "lr": 0.03, "memory": 3995, "data_time": 0.00591, "loss_rpn_cls": 0.01779, "loss_rpn_bbox": 0.02071, "loss_cls": 0.14669, "acc": 95.10522, "loss_bbox": 0.15644, "loss": 0.34164, "time": 0.11122}
68
+ {"mode": "train", "epoch": 1, "iter": 3350, "lr": 0.03, "memory": 3995, "data_time": 0.00583, "loss_rpn_cls": 0.01714, "loss_rpn_bbox": 0.0215, "loss_cls": 0.1389, "acc": 95.42118, "loss_bbox": 0.15361, "loss": 0.33115, "time": 0.11296}
69
+ {"mode": "train", "epoch": 1, "iter": 3400, "lr": 0.03, "memory": 3995, "data_time": 0.00586, "loss_rpn_cls": 0.01734, "loss_rpn_bbox": 0.02038, "loss_cls": 0.13277, "acc": 95.51876, "loss_bbox": 0.15011, "loss": 0.32061, "time": 0.11062}
70
+ {"mode": "train", "epoch": 1, "iter": 3450, "lr": 0.03, "memory": 3995, "data_time": 0.00589, "loss_rpn_cls": 0.01602, "loss_rpn_bbox": 0.01807, "loss_cls": 0.13039, "acc": 95.6438, "loss_bbox": 0.14447, "loss": 0.30895, "time": 0.11282}
71
+ {"mode": "train", "epoch": 1, "iter": 3500, "lr": 0.03, "memory": 3995, "data_time": 0.00582, "loss_rpn_cls": 0.01734, "loss_rpn_bbox": 0.01959, "loss_cls": 0.12871, "acc": 95.61638, "loss_bbox": 0.14639, "loss": 0.31202, "time": 0.11425}
72
+ {"mode": "train", "epoch": 1, "iter": 3550, "lr": 0.03, "memory": 3995, "data_time": 0.00586, "loss_rpn_cls": 0.01554, "loss_rpn_bbox": 0.01853, "loss_cls": 0.12347, "acc": 95.9135, "loss_bbox": 0.13484, "loss": 0.29239, "time": 0.112}
73
+ {"mode": "train", "epoch": 1, "iter": 3600, "lr": 0.03, "memory": 3995, "data_time": 0.00592, "loss_rpn_cls": 0.01497, "loss_rpn_bbox": 0.01877, "loss_cls": 0.12396, "acc": 95.76997, "loss_bbox": 0.1442, "loss": 0.3019, "time": 0.11181}
74
+ {"mode": "train", "epoch": 1, "iter": 3650, "lr": 0.03, "memory": 3995, "data_time": 0.00574, "loss_rpn_cls": 0.01611, "loss_rpn_bbox": 0.01965, "loss_cls": 0.1312, "acc": 95.55712, "loss_bbox": 0.14665, "loss": 0.3136, "time": 0.11111}
75
+ {"mode": "train", "epoch": 1, "iter": 3700, "lr": 0.03, "memory": 3995, "data_time": 0.00614, "loss_rpn_cls": 0.0147, "loss_rpn_bbox": 0.01981, "loss_cls": 0.13301, "acc": 95.42896, "loss_bbox": 0.15353, "loss": 0.32106, "time": 0.11316}
76
+ {"mode": "train", "epoch": 1, "iter": 3750, "lr": 0.03, "memory": 3995, "data_time": 0.00592, "loss_rpn_cls": 0.0153, "loss_rpn_bbox": 0.019, "loss_cls": 0.1274, "acc": 95.60289, "loss_bbox": 0.14549, "loss": 0.3072, "time": 0.11147}
77
+ {"mode": "train", "epoch": 1, "iter": 3800, "lr": 0.03, "memory": 3995, "data_time": 0.00618, "loss_rpn_cls": 0.01585, "loss_rpn_bbox": 0.02116, "loss_cls": 0.13253, "acc": 95.46723, "loss_bbox": 0.15026, "loss": 0.31981, "time": 0.11502}
78
+ {"mode": "train", "epoch": 1, "iter": 3850, "lr": 0.03, "memory": 3995, "data_time": 0.00661, "loss_rpn_cls": 0.0162, "loss_rpn_bbox": 0.02001, "loss_cls": 0.1276, "acc": 95.71683, "loss_bbox": 0.14368, "loss": 0.30749, "time": 0.1145}
79
+ {"mode": "train", "epoch": 1, "iter": 3900, "lr": 0.03, "memory": 3995, "data_time": 0.00629, "loss_rpn_cls": 0.01685, "loss_rpn_bbox": 0.02061, "loss_cls": 0.13027, "acc": 95.47534, "loss_bbox": 0.1519, "loss": 0.31962, "time": 0.11462}
80
+ {"mode": "train", "epoch": 1, "iter": 3950, "lr": 0.03, "memory": 3995, "data_time": 0.00586, "loss_rpn_cls": 0.01695, "loss_rpn_bbox": 0.02055, "loss_cls": 0.13005, "acc": 95.51831, "loss_bbox": 0.1515, "loss": 0.31904, "time": 0.11211}
81
+ {"mode": "train", "epoch": 1, "iter": 4000, "lr": 0.03, "memory": 3995, "data_time": 0.00584, "loss_rpn_cls": 0.0167, "loss_rpn_bbox": 0.0202, "loss_cls": 0.12733, "acc": 95.7033, "loss_bbox": 0.147, "loss": 0.31123, "time": 0.11247}
82
+ {"mode": "train", "epoch": 1, "iter": 4050, "lr": 0.03, "memory": 3995, "data_time": 0.00589, "loss_rpn_cls": 0.01509, "loss_rpn_bbox": 0.02133, "loss_cls": 0.1277, "acc": 95.6313, "loss_bbox": 0.14736, "loss": 0.31149, "time": 0.11125}
83
+ {"mode": "train", "epoch": 1, "iter": 4100, "lr": 0.03, "memory": 3995, "data_time": 0.00606, "loss_rpn_cls": 0.01527, "loss_rpn_bbox": 0.02054, "loss_cls": 0.13643, "acc": 95.37872, "loss_bbox": 0.15197, "loss": 0.32421, "time": 0.11279}
84
+ {"mode": "train", "epoch": 1, "iter": 4150, "lr": 0.03, "memory": 3995, "data_time": 0.00615, "loss_rpn_cls": 0.01663, "loss_rpn_bbox": 0.01969, "loss_cls": 0.11733, "acc": 95.92139, "loss_bbox": 0.13731, "loss": 0.29096, "time": 0.11452}
85
+ {"mode": "train", "epoch": 1, "iter": 4200, "lr": 0.03, "memory": 3995, "data_time": 0.00585, "loss_rpn_cls": 0.0134, "loss_rpn_bbox": 0.01886, "loss_cls": 0.11585, "acc": 96.07697, "loss_bbox": 0.13511, "loss": 0.28322, "time": 0.11103}
86
+ {"mode": "train", "epoch": 1, "iter": 4250, "lr": 0.03, "memory": 3995, "data_time": 0.00604, "loss_rpn_cls": 0.01408, "loss_rpn_bbox": 0.0194, "loss_cls": 0.11955, "acc": 95.7892, "loss_bbox": 0.14035, "loss": 0.29339, "time": 0.11347}
87
+ {"mode": "train", "epoch": 1, "iter": 4300, "lr": 0.03, "memory": 3995, "data_time": 0.00616, "loss_rpn_cls": 0.01399, "loss_rpn_bbox": 0.01942, "loss_cls": 0.12427, "acc": 95.68457, "loss_bbox": 0.14125, "loss": 0.29892, "time": 0.11339}
88
+ {"mode": "train", "epoch": 1, "iter": 4350, "lr": 0.03, "memory": 3995, "data_time": 0.00578, "loss_rpn_cls": 0.01491, "loss_rpn_bbox": 0.01865, "loss_cls": 0.11547, "acc": 95.93968, "loss_bbox": 0.13746, "loss": 0.28649, "time": 0.11049}
89
+ {"mode": "train", "epoch": 1, "iter": 4400, "lr": 0.03, "memory": 3995, "data_time": 0.00601, "loss_rpn_cls": 0.01435, "loss_rpn_bbox": 0.01974, "loss_cls": 0.12664, "acc": 95.68365, "loss_bbox": 0.13863, "loss": 0.29936, "time": 0.11255}
90
+ {"mode": "train", "epoch": 1, "iter": 4450, "lr": 0.03, "memory": 3995, "data_time": 0.00559, "loss_rpn_cls": 0.01318, "loss_rpn_bbox": 0.01758, "loss_cls": 0.11504, "acc": 96.06677, "loss_bbox": 0.13407, "loss": 0.27987, "time": 0.11334}
91
+ {"mode": "train", "epoch": 1, "iter": 4500, "lr": 0.03, "memory": 3995, "data_time": 0.00595, "loss_rpn_cls": 0.01342, "loss_rpn_bbox": 0.01907, "loss_cls": 0.12267, "acc": 95.80985, "loss_bbox": 0.14088, "loss": 0.29604, "time": 0.11256}
92
+ {"mode": "train", "epoch": 1, "iter": 4550, "lr": 0.03, "memory": 3995, "data_time": 0.0059, "loss_rpn_cls": 0.01318, "loss_rpn_bbox": 0.01897, "loss_cls": 0.1186, "acc": 95.795, "loss_bbox": 0.14276, "loss": 0.29351, "time": 0.11085}
93
+ {"mode": "train", "epoch": 1, "iter": 4600, "lr": 0.03, "memory": 3995, "data_time": 0.00586, "loss_rpn_cls": 0.01495, "loss_rpn_bbox": 0.01961, "loss_cls": 0.12388, "acc": 95.78857, "loss_bbox": 0.14132, "loss": 0.29976, "time": 0.11333}
94
+ {"mode": "train", "epoch": 1, "iter": 4650, "lr": 0.03, "memory": 3995, "data_time": 0.00639, "loss_rpn_cls": 0.01471, "loss_rpn_bbox": 0.02033, "loss_cls": 0.12695, "acc": 95.57019, "loss_bbox": 0.14868, "loss": 0.31067, "time": 0.1126}
95
+ {"mode": "train", "epoch": 1, "iter": 4700, "lr": 0.03, "memory": 3995, "data_time": 0.00626, "loss_rpn_cls": 0.01545, "loss_rpn_bbox": 0.0211, "loss_cls": 0.12371, "acc": 95.68498, "loss_bbox": 0.14619, "loss": 0.30645, "time": 0.11288}
96
+ {"mode": "train", "epoch": 1, "iter": 4750, "lr": 0.03, "memory": 3995, "data_time": 0.0057, "loss_rpn_cls": 0.01529, "loss_rpn_bbox": 0.01975, "loss_cls": 0.12865, "acc": 95.60235, "loss_bbox": 0.14556, "loss": 0.30925, "time": 0.1107}
97
+ {"mode": "train", "epoch": 1, "iter": 4800, "lr": 0.03, "memory": 3995, "data_time": 0.00604, "loss_rpn_cls": 0.01454, "loss_rpn_bbox": 0.01915, "loss_cls": 0.12333, "acc": 95.6651, "loss_bbox": 0.14638, "loss": 0.3034, "time": 0.11266}
98
+ {"mode": "train", "epoch": 1, "iter": 4850, "lr": 0.03, "memory": 3995, "data_time": 0.00611, "loss_rpn_cls": 0.01387, "loss_rpn_bbox": 0.01822, "loss_cls": 0.12018, "acc": 95.92751, "loss_bbox": 0.13742, "loss": 0.28969, "time": 0.11255}
99
+ {"mode": "train", "epoch": 1, "iter": 4900, "lr": 0.03, "memory": 3995, "data_time": 0.00627, "loss_rpn_cls": 0.01509, "loss_rpn_bbox": 0.02014, "loss_cls": 0.12157, "acc": 95.72143, "loss_bbox": 0.14711, "loss": 0.3039, "time": 0.11619}
100
+ {"mode": "train", "epoch": 1, "iter": 4950, "lr": 0.03, "memory": 3995, "data_time": 0.00583, "loss_rpn_cls": 0.01509, "loss_rpn_bbox": 0.0203, "loss_cls": 0.12276, "acc": 95.71746, "loss_bbox": 0.14537, "loss": 0.30353, "time": 0.11454}
101
+ {"mode": "train", "epoch": 1, "iter": 5000, "lr": 0.03, "memory": 3995, "data_time": 0.0063, "loss_rpn_cls": 0.0147, "loss_rpn_bbox": 0.01998, "loss_cls": 0.12018, "acc": 95.89231, "loss_bbox": 0.13663, "loss": 0.29149, "time": 0.11137}
102
+ {"mode": "train", "epoch": 1, "iter": 5050, "lr": 0.03, "memory": 3995, "data_time": 0.00606, "loss_rpn_cls": 0.01672, "loss_rpn_bbox": 0.02209, "loss_cls": 0.13009, "acc": 95.40796, "loss_bbox": 0.15582, "loss": 0.32472, "time": 0.11408}
103
+ {"mode": "train", "epoch": 1, "iter": 5100, "lr": 0.03, "memory": 3995, "data_time": 0.00571, "loss_rpn_cls": 0.01506, "loss_rpn_bbox": 0.02068, "loss_cls": 0.12411, "acc": 95.68228, "loss_bbox": 0.14316, "loss": 0.30301, "time": 0.11427}
104
+ {"mode": "train", "epoch": 1, "iter": 5150, "lr": 0.03, "memory": 3995, "data_time": 0.00592, "loss_rpn_cls": 0.01549, "loss_rpn_bbox": 0.02017, "loss_cls": 0.12849, "acc": 95.49834, "loss_bbox": 0.14867, "loss": 0.31282, "time": 0.11298}
105
+ {"mode": "train", "epoch": 1, "iter": 5200, "lr": 0.03, "memory": 3995, "data_time": 0.00628, "loss_rpn_cls": 0.0136, "loss_rpn_bbox": 0.02032, "loss_cls": 0.12288, "acc": 95.65445, "loss_bbox": 0.14672, "loss": 0.30351, "time": 0.11305}
106
+ {"mode": "train", "epoch": 1, "iter": 5250, "lr": 0.03, "memory": 3995, "data_time": 0.00562, "loss_rpn_cls": 0.0137, "loss_rpn_bbox": 0.01951, "loss_cls": 0.11884, "acc": 95.77036, "loss_bbox": 0.14615, "loss": 0.2982, "time": 0.11126}
107
+ {"mode": "train", "epoch": 1, "iter": 5300, "lr": 0.03, "memory": 3995, "data_time": 0.00576, "loss_rpn_cls": 0.01479, "loss_rpn_bbox": 0.01865, "loss_cls": 0.1166, "acc": 95.88662, "loss_bbox": 0.1374, "loss": 0.28745, "time": 0.111}
108
+ {"mode": "train", "epoch": 1, "iter": 5350, "lr": 0.03, "memory": 3995, "data_time": 0.00574, "loss_rpn_cls": 0.01445, "loss_rpn_bbox": 0.02002, "loss_cls": 0.11387, "acc": 95.94926, "loss_bbox": 0.13753, "loss": 0.28587, "time": 0.11213}
109
+ {"mode": "train", "epoch": 1, "iter": 5400, "lr": 0.03, "memory": 3995, "data_time": 0.00595, "loss_rpn_cls": 0.0152, "loss_rpn_bbox": 0.01997, "loss_cls": 0.1153, "acc": 95.95431, "loss_bbox": 0.14423, "loss": 0.2947, "time": 0.1149}
110
+ {"mode": "train", "epoch": 1, "iter": 5450, "lr": 0.03, "memory": 3995, "data_time": 0.00589, "loss_rpn_cls": 0.0134, "loss_rpn_bbox": 0.01848, "loss_cls": 0.12329, "acc": 95.62272, "loss_bbox": 0.14627, "loss": 0.30145, "time": 0.11341}
111
+ {"mode": "train", "epoch": 1, "iter": 5500, "lr": 0.03, "memory": 3995, "data_time": 0.00573, "loss_rpn_cls": 0.01472, "loss_rpn_bbox": 0.02123, "loss_cls": 0.12226, "acc": 95.76346, "loss_bbox": 0.14509, "loss": 0.30331, "time": 0.11431}
112
+ {"mode": "train", "epoch": 1, "iter": 5550, "lr": 0.03, "memory": 3995, "data_time": 0.00561, "loss_rpn_cls": 0.0151, "loss_rpn_bbox": 0.01939, "loss_cls": 0.11866, "acc": 95.90704, "loss_bbox": 0.13869, "loss": 0.29184, "time": 0.11163}
113
+ {"mode": "train", "epoch": 1, "iter": 5600, "lr": 0.03, "memory": 3995, "data_time": 0.00601, "loss_rpn_cls": 0.01253, "loss_rpn_bbox": 0.01907, "loss_cls": 0.11391, "acc": 95.94889, "loss_bbox": 0.13402, "loss": 0.27953, "time": 0.11456}
114
+ {"mode": "train", "epoch": 1, "iter": 5650, "lr": 0.03, "memory": 3995, "data_time": 0.00584, "loss_rpn_cls": 0.01225, "loss_rpn_bbox": 0.01826, "loss_cls": 0.11009, "acc": 96.08372, "loss_bbox": 0.14075, "loss": 0.28135, "time": 0.11223}
115
+ {"mode": "train", "epoch": 1, "iter": 5700, "lr": 0.03, "memory": 3995, "data_time": 0.00616, "loss_rpn_cls": 0.01449, "loss_rpn_bbox": 0.01897, "loss_cls": 0.11826, "acc": 95.89343, "loss_bbox": 0.14034, "loss": 0.29206, "time": 0.11263}
116
+ {"mode": "train", "epoch": 1, "iter": 5750, "lr": 0.03, "memory": 3995, "data_time": 0.00579, "loss_rpn_cls": 0.016, "loss_rpn_bbox": 0.01967, "loss_cls": 0.12497, "acc": 95.65385, "loss_bbox": 0.14751, "loss": 0.30814, "time": 0.11398}
117
+ {"mode": "train", "epoch": 1, "iter": 5800, "lr": 0.03, "memory": 3995, "data_time": 0.00573, "loss_rpn_cls": 0.01318, "loss_rpn_bbox": 0.01837, "loss_cls": 0.11391, "acc": 95.87925, "loss_bbox": 0.14173, "loss": 0.28719, "time": 0.11359}
118
+ {"mode": "train", "epoch": 1, "iter": 5850, "lr": 0.03, "memory": 3995, "data_time": 0.00596, "loss_rpn_cls": 0.01302, "loss_rpn_bbox": 0.01798, "loss_cls": 0.11946, "acc": 95.91666, "loss_bbox": 0.137, "loss": 0.28746, "time": 0.11147}
119
+ {"mode": "train", "epoch": 1, "iter": 5900, "lr": 0.03, "memory": 3995, "data_time": 0.00587, "loss_rpn_cls": 0.013, "loss_rpn_bbox": 0.01902, "loss_cls": 0.11336, "acc": 96.01307, "loss_bbox": 0.13826, "loss": 0.28364, "time": 0.1148}
120
+ {"mode": "train", "epoch": 1, "iter": 5950, "lr": 0.03, "memory": 3995, "data_time": 0.00596, "loss_rpn_cls": 0.01329, "loss_rpn_bbox": 0.01967, "loss_cls": 0.11398, "acc": 95.88134, "loss_bbox": 0.13869, "loss": 0.28562, "time": 0.11265}
121
+ {"mode": "train", "epoch": 1, "iter": 6000, "lr": 0.03, "memory": 3995, "data_time": 0.00574, "loss_rpn_cls": 0.01211, "loss_rpn_bbox": 0.01905, "loss_cls": 0.11495, "acc": 95.93848, "loss_bbox": 0.13856, "loss": 0.28467, "time": 0.11194}
122
+ {"mode": "train", "epoch": 1, "iter": 6050, "lr": 0.03, "memory": 3995, "data_time": 0.00593, "loss_rpn_cls": 0.01495, "loss_rpn_bbox": 0.01934, "loss_cls": 0.11784, "acc": 95.89795, "loss_bbox": 0.14284, "loss": 0.29497, "time": 0.11304}
123
+ {"mode": "train", "epoch": 1, "iter": 6100, "lr": 0.03, "memory": 3995, "data_time": 0.0058, "loss_rpn_cls": 0.01217, "loss_rpn_bbox": 0.0192, "loss_cls": 0.11223, "acc": 96.10008, "loss_bbox": 0.13199, "loss": 0.2756, "time": 0.11505}
124
+ {"mode": "train", "epoch": 1, "iter": 6150, "lr": 0.03, "memory": 3995, "data_time": 0.00571, "loss_rpn_cls": 0.0123, "loss_rpn_bbox": 0.01969, "loss_cls": 0.11693, "acc": 95.85812, "loss_bbox": 0.14091, "loss": 0.28984, "time": 0.11214}
125
+ {"mode": "train", "epoch": 1, "iter": 6200, "lr": 0.03, "memory": 3995, "data_time": 0.0059, "loss_rpn_cls": 0.01208, "loss_rpn_bbox": 0.01952, "loss_cls": 0.11188, "acc": 96.01107, "loss_bbox": 0.13723, "loss": 0.2807, "time": 0.11155}
126
+ {"mode": "train", "epoch": 1, "iter": 6250, "lr": 0.03, "memory": 3995, "data_time": 0.00589, "loss_rpn_cls": 0.01204, "loss_rpn_bbox": 0.01896, "loss_cls": 0.11208, "acc": 96.07434, "loss_bbox": 0.1347, "loss": 0.27778, "time": 0.11198}
127
+ {"mode": "train", "epoch": 1, "iter": 6300, "lr": 0.03, "memory": 3995, "data_time": 0.00573, "loss_rpn_cls": 0.01187, "loss_rpn_bbox": 0.01767, "loss_cls": 0.10275, "acc": 96.30457, "loss_bbox": 0.12716, "loss": 0.25945, "time": 0.11175}
128
+ {"mode": "train", "epoch": 1, "iter": 6350, "lr": 0.03, "memory": 3995, "data_time": 0.00616, "loss_rpn_cls": 0.01301, "loss_rpn_bbox": 0.02005, "loss_cls": 0.11505, "acc": 95.91158, "loss_bbox": 0.13773, "loss": 0.28584, "time": 0.11397}
129
+ {"mode": "train", "epoch": 1, "iter": 6400, "lr": 0.03, "memory": 3995, "data_time": 0.00611, "loss_rpn_cls": 0.01203, "loss_rpn_bbox": 0.01743, "loss_cls": 0.10464, "acc": 96.30615, "loss_bbox": 0.12959, "loss": 0.26369, "time": 0.11351}
130
+ {"mode": "train", "epoch": 1, "iter": 6450, "lr": 0.03, "memory": 3995, "data_time": 0.00581, "loss_rpn_cls": 0.01319, "loss_rpn_bbox": 0.01853, "loss_cls": 0.11438, "acc": 95.87359, "loss_bbox": 0.14267, "loss": 0.28877, "time": 0.11291}
131
+ {"mode": "train", "epoch": 1, "iter": 6500, "lr": 0.03, "memory": 3995, "data_time": 0.00571, "loss_rpn_cls": 0.01356, "loss_rpn_bbox": 0.01919, "loss_cls": 0.12081, "acc": 95.71672, "loss_bbox": 0.14575, "loss": 0.29932, "time": 0.11221}
132
+ {"mode": "train", "epoch": 1, "iter": 6550, "lr": 0.03, "memory": 3995, "data_time": 0.00592, "loss_rpn_cls": 0.01302, "loss_rpn_bbox": 0.01942, "loss_cls": 0.11041, "acc": 96.08888, "loss_bbox": 0.13223, "loss": 0.27508, "time": 0.11367}
133
+ {"mode": "train", "epoch": 1, "iter": 6600, "lr": 0.03, "memory": 3995, "data_time": 0.00542, "loss_rpn_cls": 0.01253, "loss_rpn_bbox": 0.01934, "loss_cls": 0.10712, "acc": 96.20015, "loss_bbox": 0.13376, "loss": 0.27275, "time": 0.11006}
134
+ {"mode": "train", "epoch": 1, "iter": 6650, "lr": 0.03, "memory": 3995, "data_time": 0.00596, "loss_rpn_cls": 0.01392, "loss_rpn_bbox": 0.01864, "loss_cls": 0.11497, "acc": 95.92511, "loss_bbox": 0.13749, "loss": 0.28501, "time": 0.11243}
135
+ {"mode": "train", "epoch": 1, "iter": 6700, "lr": 0.03, "memory": 3995, "data_time": 0.0057, "loss_rpn_cls": 0.01379, "loss_rpn_bbox": 0.01927, "loss_cls": 0.106, "acc": 96.29372, "loss_bbox": 0.1269, "loss": 0.26596, "time": 0.11327}
136
+ {"mode": "train", "epoch": 1, "iter": 6750, "lr": 0.03, "memory": 3995, "data_time": 0.00594, "loss_rpn_cls": 0.01477, "loss_rpn_bbox": 0.0208, "loss_cls": 0.11846, "acc": 95.85744, "loss_bbox": 0.14275, "loss": 0.29679, "time": 0.11283}
137
+ {"mode": "train", "epoch": 1, "iter": 6800, "lr": 0.03, "memory": 3995, "data_time": 0.00594, "loss_rpn_cls": 0.0122, "loss_rpn_bbox": 0.01805, "loss_cls": 0.1064, "acc": 96.19126, "loss_bbox": 0.13343, "loss": 0.27008, "time": 0.11434}
138
+ {"mode": "train", "epoch": 1, "iter": 6850, "lr": 0.03, "memory": 3995, "data_time": 0.00586, "loss_rpn_cls": 0.01227, "loss_rpn_bbox": 0.01858, "loss_cls": 0.11187, "acc": 96.04183, "loss_bbox": 0.13384, "loss": 0.27656, "time": 0.11404}
139
+ {"mode": "train", "epoch": 1, "iter": 6900, "lr": 0.03, "memory": 3995, "data_time": 0.006, "loss_rpn_cls": 0.01201, "loss_rpn_bbox": 0.01965, "loss_cls": 0.11556, "acc": 95.82464, "loss_bbox": 0.14822, "loss": 0.29544, "time": 0.11304}
140
+ {"mode": "train", "epoch": 1, "iter": 6950, "lr": 0.03, "memory": 3995, "data_time": 0.00588, "loss_rpn_cls": 0.0129, "loss_rpn_bbox": 0.01957, "loss_cls": 0.11387, "acc": 95.97333, "loss_bbox": 0.13807, "loss": 0.28442, "time": 0.11233}
141
+ {"mode": "train", "epoch": 1, "iter": 7000, "lr": 0.03, "memory": 3995, "data_time": 0.00606, "loss_rpn_cls": 0.01314, "loss_rpn_bbox": 0.01983, "loss_cls": 0.11578, "acc": 95.94185, "loss_bbox": 0.13706, "loss": 0.28581, "time": 0.11488}
142
+ {"mode": "train", "epoch": 1, "iter": 7050, "lr": 0.03, "memory": 3995, "data_time": 0.00616, "loss_rpn_cls": 0.01196, "loss_rpn_bbox": 0.01717, "loss_cls": 0.11732, "acc": 95.8434, "loss_bbox": 0.13927, "loss": 0.28572, "time": 0.11411}
143
+ {"mode": "train", "epoch": 1, "iter": 7100, "lr": 0.03, "memory": 3995, "data_time": 0.00588, "loss_rpn_cls": 0.01257, "loss_rpn_bbox": 0.0173, "loss_cls": 0.10936, "acc": 96.15309, "loss_bbox": 0.13275, "loss": 0.27197, "time": 0.11223}
144
+ {"mode": "train", "epoch": 1, "iter": 7150, "lr": 0.03, "memory": 3995, "data_time": 0.00568, "loss_rpn_cls": 0.0127, "loss_rpn_bbox": 0.01972, "loss_cls": 0.11639, "acc": 95.92398, "loss_bbox": 0.13866, "loss": 0.28748, "time": 0.11339}
145
+ {"mode": "train", "epoch": 1, "iter": 7200, "lr": 0.03, "memory": 3995, "data_time": 0.00604, "loss_rpn_cls": 0.01227, "loss_rpn_bbox": 0.01816, "loss_cls": 0.11266, "acc": 96.01514, "loss_bbox": 0.13595, "loss": 0.27904, "time": 0.11231}
146
+ {"mode": "train", "epoch": 1, "iter": 7250, "lr": 0.03, "memory": 3995, "data_time": 0.00582, "loss_rpn_cls": 0.01202, "loss_rpn_bbox": 0.01811, "loss_cls": 0.11436, "acc": 95.90405, "loss_bbox": 0.13996, "loss": 0.28446, "time": 0.10941}
147
+ {"mode": "train", "epoch": 1, "iter": 7300, "lr": 0.03, "memory": 3995, "data_time": 0.00609, "loss_rpn_cls": 0.01183, "loss_rpn_bbox": 0.01804, "loss_cls": 0.11021, "acc": 96.00136, "loss_bbox": 0.14049, "loss": 0.28056, "time": 0.11244}
148
+ {"mode": "train", "epoch": 1, "iter": 7350, "lr": 0.03, "memory": 3995, "data_time": 0.00562, "loss_rpn_cls": 0.01185, "loss_rpn_bbox": 0.01901, "loss_cls": 0.10815, "acc": 96.12046, "loss_bbox": 0.13334, "loss": 0.27236, "time": 0.11322}
149
+ {"mode": "train", "epoch": 1, "iter": 7400, "lr": 0.03, "memory": 3995, "data_time": 0.00572, "loss_rpn_cls": 0.01184, "loss_rpn_bbox": 0.01896, "loss_cls": 0.10858, "acc": 96.01173, "loss_bbox": 0.13723, "loss": 0.27662, "time": 0.11281}
150
+ {"mode": "train", "epoch": 1, "iter": 7450, "lr": 0.03, "memory": 3995, "data_time": 0.0059, "loss_rpn_cls": 0.01434, "loss_rpn_bbox": 0.01916, "loss_cls": 0.10515, "acc": 96.27126, "loss_bbox": 0.12992, "loss": 0.26858, "time": 0.11425}
151
+ {"mode": "train", "epoch": 1, "iter": 7500, "lr": 0.03, "memory": 3995, "data_time": 0.006, "loss_rpn_cls": 0.01221, "loss_rpn_bbox": 0.01845, "loss_cls": 0.10933, "acc": 96.16033, "loss_bbox": 0.13132, "loss": 0.27132, "time": 0.11419}
152
+ {"mode": "train", "epoch": 1, "iter": 7550, "lr": 0.03, "memory": 3995, "data_time": 0.00602, "loss_rpn_cls": 0.01175, "loss_rpn_bbox": 0.01805, "loss_cls": 0.10322, "acc": 96.27255, "loss_bbox": 0.1302, "loss": 0.26322, "time": 0.11239}
153
+ {"mode": "train", "epoch": 1, "iter": 7600, "lr": 0.03, "memory": 3995, "data_time": 0.00586, "loss_rpn_cls": 0.01103, "loss_rpn_bbox": 0.01846, "loss_cls": 0.1059, "acc": 96.17031, "loss_bbox": 0.13127, "loss": 0.26667, "time": 0.1118}
154
+ {"mode": "train", "epoch": 1, "iter": 7650, "lr": 0.03, "memory": 3995, "data_time": 0.0063, "loss_rpn_cls": 0.01223, "loss_rpn_bbox": 0.01823, "loss_cls": 0.10785, "acc": 96.11645, "loss_bbox": 0.13455, "loss": 0.27285, "time": 0.11043}
155
+ {"mode": "train", "epoch": 1, "iter": 7700, "lr": 0.03, "memory": 3995, "data_time": 0.0058, "loss_rpn_cls": 0.01272, "loss_rpn_bbox": 0.01673, "loss_cls": 0.10477, "acc": 96.22598, "loss_bbox": 0.12399, "loss": 0.25822, "time": 0.11208}
156
+ {"mode": "train", "epoch": 1, "iter": 7750, "lr": 0.03, "memory": 3995, "data_time": 0.00618, "loss_rpn_cls": 0.01222, "loss_rpn_bbox": 0.01832, "loss_cls": 0.10734, "acc": 96.08925, "loss_bbox": 0.13804, "loss": 0.27593, "time": 0.11258}
157
+ {"mode": "train", "epoch": 1, "iter": 7800, "lr": 0.03, "memory": 3995, "data_time": 0.0059, "loss_rpn_cls": 0.01067, "loss_rpn_bbox": 0.01746, "loss_cls": 0.10213, "acc": 96.30039, "loss_bbox": 0.12825, "loss": 0.25851, "time": 0.11212}
158
+ {"mode": "train", "epoch": 1, "iter": 7850, "lr": 0.03, "memory": 3995, "data_time": 0.00616, "loss_rpn_cls": 0.01214, "loss_rpn_bbox": 0.0204, "loss_cls": 0.1057, "acc": 96.11499, "loss_bbox": 0.13419, "loss": 0.27243, "time": 0.11211}
159
+ {"mode": "train", "epoch": 1, "iter": 7900, "lr": 0.03, "memory": 3995, "data_time": 0.00579, "loss_rpn_cls": 0.01377, "loss_rpn_bbox": 0.0168, "loss_cls": 0.10892, "acc": 96.14355, "loss_bbox": 0.13155, "loss": 0.27105, "time": 0.11114}
160
+ {"mode": "train", "epoch": 1, "iter": 7950, "lr": 0.03, "memory": 3995, "data_time": 0.00605, "loss_rpn_cls": 0.01165, "loss_rpn_bbox": 0.01799, "loss_cls": 0.1095, "acc": 96.08301, "loss_bbox": 0.13066, "loss": 0.26981, "time": 0.11397}
161
+ {"mode": "train", "epoch": 1, "iter": 8000, "lr": 0.03, "memory": 3995, "data_time": 0.00598, "loss_rpn_cls": 0.01149, "loss_rpn_bbox": 0.01904, "loss_cls": 0.10709, "acc": 96.07349, "loss_bbox": 0.13479, "loss": 0.27242, "time": 0.11201}
162
+ {"mode": "train", "epoch": 1, "iter": 8050, "lr": 0.03, "memory": 3995, "data_time": 0.00601, "loss_rpn_cls": 0.01289, "loss_rpn_bbox": 0.02151, "loss_cls": 0.11397, "acc": 95.89264, "loss_bbox": 0.13908, "loss": 0.28745, "time": 0.11324}
163
+ {"mode": "train", "epoch": 1, "iter": 8100, "lr": 0.03, "memory": 3995, "data_time": 0.0059, "loss_rpn_cls": 0.01272, "loss_rpn_bbox": 0.01834, "loss_cls": 0.10714, "acc": 96.17448, "loss_bbox": 0.13317, "loss": 0.27137, "time": 0.11302}
164
+ {"mode": "train", "epoch": 1, "iter": 8150, "lr": 0.03, "memory": 3995, "data_time": 0.00603, "loss_rpn_cls": 0.01217, "loss_rpn_bbox": 0.01947, "loss_cls": 0.10914, "acc": 96.14951, "loss_bbox": 0.1303, "loss": 0.27108, "time": 0.1148}
165
+ {"mode": "train", "epoch": 1, "iter": 8200, "lr": 0.03, "memory": 3995, "data_time": 0.00558, "loss_rpn_cls": 0.01245, "loss_rpn_bbox": 0.01853, "loss_cls": 0.10688, "acc": 96.13852, "loss_bbox": 0.13402, "loss": 0.27188, "time": 0.11182}
166
+ {"mode": "train", "epoch": 1, "iter": 8250, "lr": 0.03, "memory": 3995, "data_time": 0.00576, "loss_rpn_cls": 0.01371, "loss_rpn_bbox": 0.01865, "loss_cls": 0.11947, "acc": 95.84909, "loss_bbox": 0.14203, "loss": 0.29386, "time": 0.1115}
167
+ {"mode": "train", "epoch": 1, "iter": 8300, "lr": 0.03, "memory": 3995, "data_time": 0.00631, "loss_rpn_cls": 0.01238, "loss_rpn_bbox": 0.0199, "loss_cls": 0.10934, "acc": 96.04741, "loss_bbox": 0.13768, "loss": 0.27929, "time": 0.11339}
168
+ {"mode": "train", "epoch": 1, "iter": 8350, "lr": 0.03, "memory": 3995, "data_time": 0.00597, "loss_rpn_cls": 0.01141, "loss_rpn_bbox": 0.01706, "loss_cls": 0.09907, "acc": 96.40811, "loss_bbox": 0.12185, "loss": 0.24938, "time": 0.1154}
169
+ {"mode": "train", "epoch": 1, "iter": 8400, "lr": 0.03, "memory": 3995, "data_time": 0.00608, "loss_rpn_cls": 0.01143, "loss_rpn_bbox": 0.01834, "loss_cls": 0.1031, "acc": 96.26259, "loss_bbox": 0.12995, "loss": 0.26282, "time": 0.11332}
170
+ {"mode": "train", "epoch": 1, "iter": 8450, "lr": 0.03, "memory": 3995, "data_time": 0.00612, "loss_rpn_cls": 0.0108, "loss_rpn_bbox": 0.01958, "loss_cls": 0.10462, "acc": 96.21404, "loss_bbox": 0.13354, "loss": 0.26854, "time": 0.11148}
171
+ {"mode": "train", "epoch": 1, "iter": 8500, "lr": 0.03, "memory": 3995, "data_time": 0.00605, "loss_rpn_cls": 0.01162, "loss_rpn_bbox": 0.0184, "loss_cls": 0.10976, "acc": 95.94873, "loss_bbox": 0.13856, "loss": 0.27835, "time": 0.11322}
172
+ {"mode": "train", "epoch": 1, "iter": 8550, "lr": 0.03, "memory": 3995, "data_time": 0.00583, "loss_rpn_cls": 0.01082, "loss_rpn_bbox": 0.01811, "loss_cls": 0.1046, "acc": 96.24151, "loss_bbox": 0.12974, "loss": 0.26327, "time": 0.11185}
173
+ {"mode": "train", "epoch": 1, "iter": 8600, "lr": 0.03, "memory": 3995, "data_time": 0.0059, "loss_rpn_cls": 0.01083, "loss_rpn_bbox": 0.01819, "loss_cls": 0.1029, "acc": 96.27243, "loss_bbox": 0.12992, "loss": 0.26185, "time": 0.11198}
174
+ {"mode": "train", "epoch": 1, "iter": 8650, "lr": 0.03, "memory": 3995, "data_time": 0.00618, "loss_rpn_cls": 0.01051, "loss_rpn_bbox": 0.01671, "loss_cls": 0.10008, "acc": 96.3317, "loss_bbox": 0.12492, "loss": 0.25222, "time": 0.11481}
175
+ {"mode": "train", "epoch": 1, "iter": 8700, "lr": 0.03, "memory": 3995, "data_time": 0.00592, "loss_rpn_cls": 0.01385, "loss_rpn_bbox": 0.02009, "loss_cls": 0.10871, "acc": 96.14798, "loss_bbox": 0.13146, "loss": 0.27411, "time": 0.11231}
176
+ {"mode": "train", "epoch": 1, "iter": 8750, "lr": 0.03, "memory": 3995, "data_time": 0.00578, "loss_rpn_cls": 0.01349, "loss_rpn_bbox": 0.01906, "loss_cls": 0.11118, "acc": 96.13435, "loss_bbox": 0.13629, "loss": 0.28003, "time": 0.11199}
177
+ {"mode": "train", "epoch": 1, "iter": 8800, "lr": 0.03, "memory": 3995, "data_time": 0.0062, "loss_rpn_cls": 0.01106, "loss_rpn_bbox": 0.01762, "loss_cls": 0.10428, "acc": 96.24785, "loss_bbox": 0.13087, "loss": 0.26383, "time": 0.113}
178
+ {"mode": "train", "epoch": 1, "iter": 8850, "lr": 0.03, "memory": 3995, "data_time": 0.00664, "loss_rpn_cls": 0.01105, "loss_rpn_bbox": 0.01747, "loss_cls": 0.10203, "acc": 96.37115, "loss_bbox": 0.12801, "loss": 0.25856, "time": 0.11447}
179
+ {"mode": "train", "epoch": 1, "iter": 8900, "lr": 0.03, "memory": 3995, "data_time": 0.00602, "loss_rpn_cls": 0.00977, "loss_rpn_bbox": 0.01723, "loss_cls": 0.10355, "acc": 96.26266, "loss_bbox": 0.12724, "loss": 0.25779, "time": 0.11419}
180
+ {"mode": "train", "epoch": 1, "iter": 8950, "lr": 0.03, "memory": 3995, "data_time": 0.00557, "loss_rpn_cls": 0.01388, "loss_rpn_bbox": 0.01818, "loss_cls": 0.10997, "acc": 96.08882, "loss_bbox": 0.13225, "loss": 0.27428, "time": 0.11252}
181
+ {"mode": "train", "epoch": 1, "iter": 9000, "lr": 0.03, "memory": 3995, "data_time": 0.00582, "loss_rpn_cls": 0.01254, "loss_rpn_bbox": 0.01872, "loss_cls": 0.11267, "acc": 95.96868, "loss_bbox": 0.13527, "loss": 0.2792, "time": 0.11271}
182
+ {"mode": "train", "epoch": 1, "iter": 9050, "lr": 0.003, "memory": 3995, "data_time": 0.00611, "loss_rpn_cls": 0.01013, "loss_rpn_bbox": 0.01747, "loss_cls": 0.10022, "acc": 96.29622, "loss_bbox": 0.13109, "loss": 0.2589, "time": 0.11175}
183
+ {"mode": "train", "epoch": 1, "iter": 9100, "lr": 0.003, "memory": 3995, "data_time": 0.00582, "loss_rpn_cls": 0.00957, "loss_rpn_bbox": 0.01772, "loss_cls": 0.09508, "acc": 96.53051, "loss_bbox": 0.12571, "loss": 0.24808, "time": 0.11243}
184
+ {"mode": "train", "epoch": 1, "iter": 9150, "lr": 0.003, "memory": 3995, "data_time": 0.00638, "loss_rpn_cls": 0.01038, "loss_rpn_bbox": 0.01719, "loss_cls": 0.09175, "acc": 96.65638, "loss_bbox": 0.12077, "loss": 0.24009, "time": 0.11431}
185
+ {"mode": "train", "epoch": 1, "iter": 9200, "lr": 0.003, "memory": 3995, "data_time": 0.00585, "loss_rpn_cls": 0.00968, "loss_rpn_bbox": 0.0165, "loss_cls": 0.09303, "acc": 96.57205, "loss_bbox": 0.12519, "loss": 0.2444, "time": 0.11365}
186
+ {"mode": "train", "epoch": 1, "iter": 9250, "lr": 0.003, "memory": 3995, "data_time": 0.00617, "loss_rpn_cls": 0.0092, "loss_rpn_bbox": 0.01691, "loss_cls": 0.09337, "acc": 96.51763, "loss_bbox": 0.12768, "loss": 0.24715, "time": 0.1118}
187
+ {"mode": "train", "epoch": 1, "iter": 9300, "lr": 0.003, "memory": 3995, "data_time": 0.00607, "loss_rpn_cls": 0.00918, "loss_rpn_bbox": 0.01539, "loss_cls": 0.09126, "acc": 96.6136, "loss_bbox": 0.12141, "loss": 0.23723, "time": 0.11456}
188
+ {"mode": "train", "epoch": 1, "iter": 9350, "lr": 0.003, "memory": 3995, "data_time": 0.00585, "loss_rpn_cls": 0.00846, "loss_rpn_bbox": 0.01461, "loss_cls": 0.08149, "acc": 96.97933, "loss_bbox": 0.11291, "loss": 0.21747, "time": 0.11069}
189
+ {"mode": "train", "epoch": 1, "iter": 9400, "lr": 0.003, "memory": 3995, "data_time": 0.00612, "loss_rpn_cls": 0.00791, "loss_rpn_bbox": 0.01619, "loss_cls": 0.08253, "acc": 96.87909, "loss_bbox": 0.11883, "loss": 0.22545, "time": 0.11443}
190
+ {"mode": "train", "epoch": 1, "iter": 9450, "lr": 0.003, "memory": 3995, "data_time": 0.0059, "loss_rpn_cls": 0.00972, "loss_rpn_bbox": 0.01666, "loss_cls": 0.08279, "acc": 96.79284, "loss_bbox": 0.11762, "loss": 0.22679, "time": 0.11498}
191
+ {"mode": "train", "epoch": 1, "iter": 9500, "lr": 0.003, "memory": 3995, "data_time": 0.0057, "loss_rpn_cls": 0.0082, "loss_rpn_bbox": 0.01542, "loss_cls": 0.08434, "acc": 96.77832, "loss_bbox": 0.11894, "loss": 0.22689, "time": 0.11294}
192
+ {"mode": "train", "epoch": 1, "iter": 9550, "lr": 0.003, "memory": 3995, "data_time": 0.00604, "loss_rpn_cls": 0.0079, "loss_rpn_bbox": 0.01649, "loss_cls": 0.08003, "acc": 96.90973, "loss_bbox": 0.11167, "loss": 0.2161, "time": 0.11396}
193
+ {"mode": "train", "epoch": 1, "iter": 9600, "lr": 0.003, "memory": 3995, "data_time": 0.00593, "loss_rpn_cls": 0.00848, "loss_rpn_bbox": 0.01733, "loss_cls": 0.08126, "acc": 96.92579, "loss_bbox": 0.11798, "loss": 0.22506, "time": 0.11227}
194
+ {"mode": "train", "epoch": 1, "iter": 9650, "lr": 0.003, "memory": 3995, "data_time": 0.006, "loss_rpn_cls": 0.00781, "loss_rpn_bbox": 0.01587, "loss_cls": 0.07796, "acc": 96.92668, "loss_bbox": 0.11531, "loss": 0.21696, "time": 0.11071}
195
+ {"mode": "train", "epoch": 1, "iter": 9700, "lr": 0.003, "memory": 3995, "data_time": 0.00612, "loss_rpn_cls": 0.00812, "loss_rpn_bbox": 0.01638, "loss_cls": 0.08296, "acc": 96.84869, "loss_bbox": 0.11972, "loss": 0.22718, "time": 0.11142}
196
+ {"mode": "train", "epoch": 1, "iter": 9750, "lr": 0.003, "memory": 3995, "data_time": 0.00598, "loss_rpn_cls": 0.00759, "loss_rpn_bbox": 0.01685, "loss_cls": 0.07658, "acc": 97.06909, "loss_bbox": 0.10464, "loss": 0.20565, "time": 0.11315}
197
+ {"mode": "train", "epoch": 1, "iter": 9800, "lr": 0.003, "memory": 3995, "data_time": 0.00596, "loss_rpn_cls": 0.00859, "loss_rpn_bbox": 0.01593, "loss_cls": 0.07961, "acc": 96.9885, "loss_bbox": 0.11143, "loss": 0.21557, "time": 0.11344}
198
+ {"mode": "train", "epoch": 1, "iter": 9850, "lr": 0.003, "memory": 3995, "data_time": 0.00586, "loss_rpn_cls": 0.00709, "loss_rpn_bbox": 0.01595, "loss_cls": 0.08076, "acc": 96.90101, "loss_bbox": 0.11537, "loss": 0.21916, "time": 0.11385}
199
+ {"mode": "train", "epoch": 1, "iter": 9900, "lr": 0.003, "memory": 3995, "data_time": 0.0059, "loss_rpn_cls": 0.00824, "loss_rpn_bbox": 0.01686, "loss_cls": 0.08498, "acc": 96.71205, "loss_bbox": 0.12216, "loss": 0.23224, "time": 0.11409}
200
+ {"mode": "train", "epoch": 1, "iter": 9950, "lr": 0.003, "memory": 3995, "data_time": 0.00586, "loss_rpn_cls": 0.00821, "loss_rpn_bbox": 0.01657, "loss_cls": 0.08293, "acc": 96.82558, "loss_bbox": 0.11816, "loss": 0.22587, "time": 0.11233}
201
+ {"mode": "train", "epoch": 1, "iter": 10000, "lr": 0.003, "memory": 3995, "data_time": 0.00588, "loss_rpn_cls": 0.00855, "loss_rpn_bbox": 0.01668, "loss_cls": 0.08424, "acc": 96.81545, "loss_bbox": 0.117, "loss": 0.22648, "time": 0.1114}
202
+ {"mode": "train", "epoch": 1, "iter": 10050, "lr": 0.003, "memory": 3995, "data_time": 0.00586, "loss_rpn_cls": 0.00884, "loss_rpn_bbox": 0.01745, "loss_cls": 0.0804, "acc": 96.94595, "loss_bbox": 0.11102, "loss": 0.21771, "time": 0.11237}
203
+ {"mode": "train", "epoch": 1, "iter": 10100, "lr": 0.003, "memory": 3995, "data_time": 0.00592, "loss_rpn_cls": 0.0087, "loss_rpn_bbox": 0.01666, "loss_cls": 0.0832, "acc": 96.84756, "loss_bbox": 0.11982, "loss": 0.22839, "time": 0.11162}
204
+ {"mode": "train", "epoch": 1, "iter": 10150, "lr": 0.003, "memory": 3995, "data_time": 0.00592, "loss_rpn_cls": 0.00766, "loss_rpn_bbox": 0.01596, "loss_cls": 0.07906, "acc": 96.98943, "loss_bbox": 0.11391, "loss": 0.21659, "time": 0.11506}
205
+ {"mode": "train", "epoch": 1, "iter": 10200, "lr": 0.003, "memory": 3995, "data_time": 0.0061, "loss_rpn_cls": 0.00852, "loss_rpn_bbox": 0.01685, "loss_cls": 0.07827, "acc": 96.94886, "loss_bbox": 0.11137, "loss": 0.21502, "time": 0.11282}
206
+ {"mode": "train", "epoch": 1, "iter": 10250, "lr": 0.003, "memory": 3995, "data_time": 0.00642, "loss_rpn_cls": 0.00871, "loss_rpn_bbox": 0.01675, "loss_cls": 0.07876, "acc": 96.99846, "loss_bbox": 0.11244, "loss": 0.21667, "time": 0.11557}
207
+ {"mode": "train", "epoch": 1, "iter": 10300, "lr": 0.003, "memory": 3995, "data_time": 0.00584, "loss_rpn_cls": 0.0082, "loss_rpn_bbox": 0.01644, "loss_cls": 0.08083, "acc": 97.00887, "loss_bbox": 0.11433, "loss": 0.2198, "time": 0.11137}
208
+ {"mode": "train", "epoch": 1, "iter": 10350, "lr": 0.003, "memory": 3995, "data_time": 0.00587, "loss_rpn_cls": 0.00825, "loss_rpn_bbox": 0.01613, "loss_cls": 0.08238, "acc": 96.80806, "loss_bbox": 0.11689, "loss": 0.22365, "time": 0.11373}
209
+ {"mode": "train", "epoch": 1, "iter": 10400, "lr": 0.003, "memory": 3995, "data_time": 0.00592, "loss_rpn_cls": 0.0091, "loss_rpn_bbox": 0.01821, "loss_cls": 0.07943, "acc": 96.92927, "loss_bbox": 0.11578, "loss": 0.22252, "time": 0.1123}
210
+ {"mode": "train", "epoch": 1, "iter": 10450, "lr": 0.003, "memory": 3995, "data_time": 0.00582, "loss_rpn_cls": 0.00719, "loss_rpn_bbox": 0.01439, "loss_cls": 0.07744, "acc": 97.05878, "loss_bbox": 0.11047, "loss": 0.2095, "time": 0.11399}
211
+ {"mode": "train", "epoch": 1, "iter": 10500, "lr": 0.003, "memory": 3995, "data_time": 0.00595, "loss_rpn_cls": 0.00778, "loss_rpn_bbox": 0.01671, "loss_cls": 0.07988, "acc": 96.94416, "loss_bbox": 0.11971, "loss": 0.22408, "time": 0.11275}
212
+ {"mode": "train", "epoch": 1, "iter": 10550, "lr": 0.003, "memory": 3995, "data_time": 0.00645, "loss_rpn_cls": 0.00789, "loss_rpn_bbox": 0.01537, "loss_cls": 0.07649, "acc": 97.03599, "loss_bbox": 0.11098, "loss": 0.21072, "time": 0.11457}
213
+ {"mode": "train", "epoch": 1, "iter": 10600, "lr": 0.003, "memory": 3995, "data_time": 0.00592, "loss_rpn_cls": 0.00767, "loss_rpn_bbox": 0.01646, "loss_cls": 0.07328, "acc": 97.20435, "loss_bbox": 0.10803, "loss": 0.20544, "time": 0.11298}
214
+ {"mode": "train", "epoch": 1, "iter": 10650, "lr": 0.003, "memory": 3995, "data_time": 0.00567, "loss_rpn_cls": 0.00767, "loss_rpn_bbox": 0.01473, "loss_cls": 0.07644, "acc": 97.01988, "loss_bbox": 0.11335, "loss": 0.21218, "time": 0.11341}
215
+ {"mode": "train", "epoch": 1, "iter": 10700, "lr": 0.003, "memory": 3995, "data_time": 0.00569, "loss_rpn_cls": 0.00818, "loss_rpn_bbox": 0.01566, "loss_cls": 0.08111, "acc": 96.91401, "loss_bbox": 0.11509, "loss": 0.22004, "time": 0.11171}
216
+ {"mode": "train", "epoch": 1, "iter": 10750, "lr": 0.003, "memory": 3995, "data_time": 0.00583, "loss_rpn_cls": 0.00844, "loss_rpn_bbox": 0.01555, "loss_cls": 0.07558, "acc": 97.09872, "loss_bbox": 0.1093, "loss": 0.20887, "time": 0.1147}
217
+ {"mode": "train", "epoch": 1, "iter": 10800, "lr": 0.003, "memory": 3995, "data_time": 0.0059, "loss_rpn_cls": 0.00749, "loss_rpn_bbox": 0.016, "loss_cls": 0.08087, "acc": 96.86987, "loss_bbox": 0.11349, "loss": 0.21785, "time": 0.11576}
218
+ {"mode": "train", "epoch": 1, "iter": 10850, "lr": 0.003, "memory": 3995, "data_time": 0.00575, "loss_rpn_cls": 0.00684, "loss_rpn_bbox": 0.01442, "loss_cls": 0.07627, "acc": 97.03856, "loss_bbox": 0.11192, "loss": 0.20945, "time": 0.11175}
219
+ {"mode": "train", "epoch": 1, "iter": 10900, "lr": 0.003, "memory": 3995, "data_time": 0.0059, "loss_rpn_cls": 0.00818, "loss_rpn_bbox": 0.01592, "loss_cls": 0.07622, "acc": 97.07876, "loss_bbox": 0.11306, "loss": 0.21339, "time": 0.11258}
220
+ {"mode": "train", "epoch": 1, "iter": 10950, "lr": 0.003, "memory": 3995, "data_time": 0.0057, "loss_rpn_cls": 0.00742, "loss_rpn_bbox": 0.01528, "loss_cls": 0.07832, "acc": 97.00102, "loss_bbox": 0.1126, "loss": 0.21362, "time": 0.11307}
221
+ {"mode": "train", "epoch": 1, "iter": 11000, "lr": 0.003, "memory": 3995, "data_time": 0.00588, "loss_rpn_cls": 0.00819, "loss_rpn_bbox": 0.01536, "loss_cls": 0.0821, "acc": 96.82437, "loss_bbox": 0.11795, "loss": 0.2236, "time": 0.11113}
222
+ {"mode": "train", "epoch": 1, "iter": 11050, "lr": 0.0003, "memory": 3995, "data_time": 0.00633, "loss_rpn_cls": 0.00709, "loss_rpn_bbox": 0.0152, "loss_cls": 0.07137, "acc": 97.2684, "loss_bbox": 0.10362, "loss": 0.19728, "time": 0.11428}
223
+ {"mode": "train", "epoch": 1, "iter": 11100, "lr": 0.0003, "memory": 3995, "data_time": 0.00566, "loss_rpn_cls": 0.00814, "loss_rpn_bbox": 0.01674, "loss_cls": 0.08006, "acc": 96.94915, "loss_bbox": 0.11467, "loss": 0.21961, "time": 0.11128}
224
+ {"mode": "train", "epoch": 1, "iter": 11150, "lr": 0.0003, "memory": 3995, "data_time": 0.00624, "loss_rpn_cls": 0.00775, "loss_rpn_bbox": 0.01569, "loss_cls": 0.07375, "acc": 97.18479, "loss_bbox": 0.10578, "loss": 0.20298, "time": 0.1134}
225
+ {"mode": "train", "epoch": 1, "iter": 11200, "lr": 0.0003, "memory": 3995, "data_time": 0.00604, "loss_rpn_cls": 0.00796, "loss_rpn_bbox": 0.01588, "loss_cls": 0.0755, "acc": 97.04548, "loss_bbox": 0.1115, "loss": 0.21083, "time": 0.11201}
226
+ {"mode": "train", "epoch": 1, "iter": 11250, "lr": 0.0003, "memory": 3995, "data_time": 0.00607, "loss_rpn_cls": 0.0077, "loss_rpn_bbox": 0.0148, "loss_cls": 0.07568, "acc": 97.05821, "loss_bbox": 0.11354, "loss": 0.21173, "time": 0.11247}
227
+ {"mode": "train", "epoch": 1, "iter": 11300, "lr": 0.0003, "memory": 3995, "data_time": 0.00601, "loss_rpn_cls": 0.00745, "loss_rpn_bbox": 0.01649, "loss_cls": 0.07373, "acc": 97.17293, "loss_bbox": 0.1063, "loss": 0.20396, "time": 0.11268}
228
+ {"mode": "train", "epoch": 1, "iter": 11350, "lr": 0.0003, "memory": 3995, "data_time": 0.00569, "loss_rpn_cls": 0.00832, "loss_rpn_bbox": 0.0161, "loss_cls": 0.07727, "acc": 97.0578, "loss_bbox": 0.10971, "loss": 0.21141, "time": 0.11305}
229
+ {"mode": "train", "epoch": 1, "iter": 11400, "lr": 0.0003, "memory": 3995, "data_time": 0.00576, "loss_rpn_cls": 0.00795, "loss_rpn_bbox": 0.01543, "loss_cls": 0.07385, "acc": 97.17054, "loss_bbox": 0.1101, "loss": 0.20734, "time": 0.11322}
230
+ {"mode": "train", "epoch": 1, "iter": 11450, "lr": 0.0003, "memory": 3995, "data_time": 0.0062, "loss_rpn_cls": 0.00782, "loss_rpn_bbox": 0.01771, "loss_cls": 0.07685, "acc": 97.01687, "loss_bbox": 0.11463, "loss": 0.21701, "time": 0.11287}
231
+ {"mode": "train", "epoch": 1, "iter": 11500, "lr": 0.0003, "memory": 3995, "data_time": 0.00593, "loss_rpn_cls": 0.0068, "loss_rpn_bbox": 0.01503, "loss_cls": 0.0693, "acc": 97.32531, "loss_bbox": 0.10133, "loss": 0.19245, "time": 0.1117}
232
+ {"mode": "train", "epoch": 1, "iter": 11550, "lr": 0.0003, "memory": 3995, "data_time": 0.00595, "loss_rpn_cls": 0.00764, "loss_rpn_bbox": 0.01642, "loss_cls": 0.07797, "acc": 97.00856, "loss_bbox": 0.11488, "loss": 0.21692, "time": 0.11095}
233
+ {"mode": "train", "epoch": 1, "iter": 11600, "lr": 0.0003, "memory": 3995, "data_time": 0.00593, "loss_rpn_cls": 0.00682, "loss_rpn_bbox": 0.01569, "loss_cls": 0.07434, "acc": 97.12411, "loss_bbox": 0.10999, "loss": 0.20683, "time": 0.11169}
234
+ {"mode": "train", "epoch": 1, "iter": 11650, "lr": 0.0003, "memory": 3995, "data_time": 0.0064, "loss_rpn_cls": 0.00771, "loss_rpn_bbox": 0.01522, "loss_cls": 0.07643, "acc": 97.06462, "loss_bbox": 0.11036, "loss": 0.20973, "time": 0.11242}
235
+ {"mode": "train", "epoch": 1, "iter": 11700, "lr": 0.0003, "memory": 3995, "data_time": 0.00609, "loss_rpn_cls": 0.00716, "loss_rpn_bbox": 0.01436, "loss_cls": 0.071, "acc": 97.28213, "loss_bbox": 0.10694, "loss": 0.19946, "time": 0.11527}
236
+ {"mode": "train", "epoch": 1, "iter": 11750, "lr": 0.0003, "memory": 3995, "data_time": 0.00569, "loss_rpn_cls": 0.00787, "loss_rpn_bbox": 0.01678, "loss_cls": 0.07745, "acc": 96.98307, "loss_bbox": 0.11778, "loss": 0.21988, "time": 0.11492}
237
+ {"mode": "train", "epoch": 1, "iter": 11800, "lr": 0.0003, "memory": 3995, "data_time": 0.00611, "loss_rpn_cls": 0.00691, "loss_rpn_bbox": 0.01579, "loss_cls": 0.07497, "acc": 97.09582, "loss_bbox": 0.10819, "loss": 0.20587, "time": 0.11348}
238
+ {"mode": "train", "epoch": 1, "iter": 11850, "lr": 0.0003, "memory": 3995, "data_time": 0.00572, "loss_rpn_cls": 0.00789, "loss_rpn_bbox": 0.01631, "loss_cls": 0.0756, "acc": 97.08119, "loss_bbox": 0.11148, "loss": 0.21129, "time": 0.11214}
239
+ {"mode": "train", "epoch": 1, "iter": 11900, "lr": 0.0003, "memory": 3995, "data_time": 0.00576, "loss_rpn_cls": 0.00665, "loss_rpn_bbox": 0.01629, "loss_cls": 0.07343, "acc": 97.19099, "loss_bbox": 0.10649, "loss": 0.20287, "time": 0.11253}
240
+ {"mode": "train", "epoch": 1, "iter": 11950, "lr": 0.0003, "memory": 3995, "data_time": 0.00609, "loss_rpn_cls": 0.00792, "loss_rpn_bbox": 0.01633, "loss_cls": 0.07778, "acc": 96.98358, "loss_bbox": 0.11344, "loss": 0.21547, "time": 0.11297}
241
+ {"mode": "train", "epoch": 1, "iter": 12000, "lr": 0.0003, "memory": 3995, "data_time": 0.00577, "loss_rpn_cls": 0.0072, "loss_rpn_bbox": 0.0156, "loss_cls": 0.07421, "acc": 97.14434, "loss_bbox": 0.1089, "loss": 0.2059, "time": 0.12536}
242
+ {"mode": "val", "epoch": 1, "iter": 619, "lr": 0.0003, "mAP": 0.82919, "AP50": 0.829}
finetune/finetune_mask-rcnn_12k_voc0712_lr3e-2_wd5e-5/best_mAP_iter_12000.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:092caa083e55583761d57ebe9b32117fbba98666429dbe5eee77220d37dacb08
3
+ size 341585449
finetune/finetune_mask-rcnn_12k_voc0712_lr3e-2_wd5e-5/mask_rcnn_mstrain_12k_voc0712.py ADDED
@@ -0,0 +1,262 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model = dict(
2
+ type='MaskRCNN',
3
+ backbone=dict(
4
+ type='ResNet',
5
+ depth=50,
6
+ num_stages=4,
7
+ out_indices=(0, 1, 2, 3),
8
+ frozen_stages=1,
9
+ norm_cfg=dict(type='SyncBN', requires_grad=True),
10
+ norm_eval=True,
11
+ style='pytorch',
12
+ init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
13
+ neck=dict(
14
+ type='FPN',
15
+ in_channels=[256, 512, 1024, 2048],
16
+ out_channels=256,
17
+ num_outs=5,
18
+ norm_cfg=dict(type='SyncBN', requires_grad=True)),
19
+ rpn_head=dict(
20
+ type='RPNHead',
21
+ in_channels=256,
22
+ feat_channels=256,
23
+ anchor_generator=dict(
24
+ type='AnchorGenerator',
25
+ scales=[8],
26
+ ratios=[0.5, 1.0, 2.0],
27
+ strides=[4, 8, 16, 32, 64]),
28
+ bbox_coder=dict(
29
+ type='DeltaXYWHBBoxCoder',
30
+ target_means=[0.0, 0.0, 0.0, 0.0],
31
+ target_stds=[1.0, 1.0, 1.0, 1.0]),
32
+ loss_cls=dict(
33
+ type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
34
+ loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
35
+ roi_head=dict(
36
+ type='StandardRoIHead',
37
+ bbox_roi_extractor=dict(
38
+ type='SingleRoIExtractor',
39
+ roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0),
40
+ out_channels=256,
41
+ featmap_strides=[4, 8, 16, 32]),
42
+ bbox_head=dict(
43
+ type='Shared4Conv1FCBBoxHead',
44
+ in_channels=256,
45
+ fc_out_channels=1024,
46
+ roi_feat_size=7,
47
+ num_classes=20,
48
+ bbox_coder=dict(
49
+ type='DeltaXYWHBBoxCoder',
50
+ target_means=[0.0, 0.0, 0.0, 0.0],
51
+ target_stds=[0.1, 0.1, 0.2, 0.2]),
52
+ reg_class_agnostic=False,
53
+ loss_cls=dict(
54
+ type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
55
+ loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
56
+ mask_roi_extractor=None,
57
+ mask_head=None),
58
+ train_cfg=dict(
59
+ rpn=dict(
60
+ assigner=dict(
61
+ type='MaxIoUAssigner',
62
+ pos_iou_thr=0.7,
63
+ neg_iou_thr=0.3,
64
+ min_pos_iou=0.3,
65
+ match_low_quality=True,
66
+ ignore_iof_thr=-1),
67
+ sampler=dict(
68
+ type='RandomSampler',
69
+ num=256,
70
+ pos_fraction=0.5,
71
+ neg_pos_ub=-1,
72
+ add_gt_as_proposals=False),
73
+ allowed_border=-1,
74
+ pos_weight=-1,
75
+ debug=False),
76
+ rpn_proposal=dict(
77
+ nms_pre=2000,
78
+ max_per_img=1000,
79
+ nms=dict(type='nms', iou_threshold=0.7),
80
+ min_bbox_size=0),
81
+ rcnn=dict(
82
+ assigner=dict(
83
+ type='MaxIoUAssigner',
84
+ pos_iou_thr=0.5,
85
+ neg_iou_thr=0.5,
86
+ min_pos_iou=0.5,
87
+ match_low_quality=True,
88
+ ignore_iof_thr=-1),
89
+ sampler=dict(
90
+ type='RandomSampler',
91
+ num=512,
92
+ pos_fraction=0.25,
93
+ neg_pos_ub=-1,
94
+ add_gt_as_proposals=True),
95
+ mask_size=28,
96
+ pos_weight=-1,
97
+ debug=False)),
98
+ test_cfg=dict(
99
+ rpn=dict(
100
+ nms_pre=1000,
101
+ max_per_img=1000,
102
+ nms=dict(type='nms', iou_threshold=0.7),
103
+ min_bbox_size=0),
104
+ rcnn=dict(
105
+ score_thr=0.05,
106
+ nms=dict(type='nms', iou_threshold=0.5),
107
+ max_per_img=100,
108
+ mask_thr_binary=0.5)))
109
+ dataset_type = 'VOCDataset'
110
+ data_root = 'data/VOCdevkit/'
111
+ img_norm_cfg = dict(
112
+ mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
113
+ train_pipeline = [
114
+ dict(type='LoadImageFromFile'),
115
+ dict(type='LoadAnnotations', with_bbox=True),
116
+ dict(
117
+ type='Resize',
118
+ img_scale=[(1333, 480), (1333, 512), (1333, 544), (1333, 576),
119
+ (1333, 608), (1333, 640), (1333, 672), (1333, 704),
120
+ (1333, 736), (1333, 768), (1333, 800)],
121
+ multiscale_mode='value',
122
+ keep_ratio=True),
123
+ dict(type='RandomFlip', flip_ratio=0.5),
124
+ dict(
125
+ type='Normalize',
126
+ mean=[123.675, 116.28, 103.53],
127
+ std=[58.395, 57.12, 57.375],
128
+ to_rgb=True),
129
+ dict(type='Pad', size_divisor=32),
130
+ dict(type='DefaultFormatBundle'),
131
+ dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
132
+ ]
133
+ test_pipeline = [
134
+ dict(type='LoadImageFromFile'),
135
+ dict(
136
+ type='MultiScaleFlipAug',
137
+ img_scale=(1333, 800),
138
+ flip=False,
139
+ transforms=[
140
+ dict(type='Resize', keep_ratio=True),
141
+ dict(type='RandomFlip'),
142
+ dict(
143
+ type='Normalize',
144
+ mean=[123.675, 116.28, 103.53],
145
+ std=[58.395, 57.12, 57.375],
146
+ to_rgb=True),
147
+ dict(type='Pad', size_divisor=32),
148
+ dict(type='ImageToTensor', keys=['img']),
149
+ dict(type='Collect', keys=['img'])
150
+ ])
151
+ ]
152
+ data = dict(
153
+ samples_per_gpu=2,
154
+ workers_per_gpu=2,
155
+ train=dict(
156
+ type='VOCDataset',
157
+ ann_file=[
158
+ 'data/VOCdevkit/VOC2007/ImageSets/Main/trainval.txt',
159
+ 'data/VOCdevkit/VOC2012/ImageSets/Main/trainval.txt'
160
+ ],
161
+ img_prefix=['data/VOCdevkit/VOC2007/', 'data/VOCdevkit/VOC2012/'],
162
+ pipeline=[
163
+ dict(type='LoadImageFromFile'),
164
+ dict(type='LoadAnnotations', with_bbox=True),
165
+ dict(
166
+ type='Resize',
167
+ img_scale=[(1333, 480), (1333, 512), (1333, 544), (1333, 576),
168
+ (1333, 608), (1333, 640), (1333, 672), (1333, 704),
169
+ (1333, 736), (1333, 768), (1333, 800)],
170
+ multiscale_mode='value',
171
+ keep_ratio=True),
172
+ dict(type='RandomFlip', flip_ratio=0.5),
173
+ dict(
174
+ type='Normalize',
175
+ mean=[123.675, 116.28, 103.53],
176
+ std=[58.395, 57.12, 57.375],
177
+ to_rgb=True),
178
+ dict(type='Pad', size_divisor=32),
179
+ dict(type='DefaultFormatBundle'),
180
+ dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
181
+ ]),
182
+ val=dict(
183
+ type='VOCDataset',
184
+ ann_file='data/VOCdevkit/VOC2007/ImageSets/Main/test.txt',
185
+ img_prefix='data/VOCdevkit/VOC2007/',
186
+ pipeline=[
187
+ dict(type='LoadImageFromFile'),
188
+ dict(
189
+ type='MultiScaleFlipAug',
190
+ img_scale=(1333, 800),
191
+ flip=False,
192
+ transforms=[
193
+ dict(type='Resize', keep_ratio=True),
194
+ dict(type='RandomFlip'),
195
+ dict(
196
+ type='Normalize',
197
+ mean=[123.675, 116.28, 103.53],
198
+ std=[58.395, 57.12, 57.375],
199
+ to_rgb=True),
200
+ dict(type='Pad', size_divisor=32),
201
+ dict(type='ImageToTensor', keys=['img']),
202
+ dict(type='Collect', keys=['img'])
203
+ ])
204
+ ]),
205
+ test=dict(
206
+ type='VOCDataset',
207
+ ann_file='data/VOCdevkit/VOC2007/ImageSets/Main/test.txt',
208
+ img_prefix='data/VOCdevkit/VOC2007/',
209
+ pipeline=[
210
+ dict(type='LoadImageFromFile'),
211
+ dict(
212
+ type='MultiScaleFlipAug',
213
+ img_scale=(1333, 800),
214
+ flip=False,
215
+ transforms=[
216
+ dict(type='Resize', keep_ratio=True),
217
+ dict(type='RandomFlip'),
218
+ dict(
219
+ type='Normalize',
220
+ mean=[123.675, 116.28, 103.53],
221
+ std=[58.395, 57.12, 57.375],
222
+ to_rgb=True),
223
+ dict(type='Pad', size_divisor=32),
224
+ dict(type='ImageToTensor', keys=['img']),
225
+ dict(type='Collect', keys=['img'])
226
+ ])
227
+ ]))
228
+ evaluation = dict(interval=12000, metric='mAP', save_best='auto')
229
+ optimizer = dict(type='SGD', lr=0.03, momentum=0.9, weight_decay=5e-05)
230
+ optimizer_config = dict(grad_clip=None)
231
+ lr_config = dict(
232
+ policy='step',
233
+ warmup='linear',
234
+ warmup_iters=500,
235
+ warmup_ratio=0.001,
236
+ step=[9000, 11000],
237
+ by_epoch=False)
238
+ runner = dict(type='IterBasedRunner', max_iters=12000)
239
+ checkpoint_config = dict(interval=12000)
240
+ log_config = dict(interval=50, hooks=[dict(type='TextLoggerHook')])
241
+ custom_hooks = [
242
+ dict(type='NumClassCheckHook'),
243
+ dict(
244
+ type='MMDetWandbHook',
245
+ init_kwargs=dict(project='I2B', group='finetune'),
246
+ interval=50,
247
+ num_eval_images=0,
248
+ log_checkpoint=False)
249
+ ]
250
+ dist_params = dict(backend='nccl')
251
+ log_level = 'INFO'
252
+ load_from = 'pretrain/selfsup_mask-rcnn_mstrain-soft-teacher_sampler-4096_temp0.5/final_model.pth'
253
+ resume_from = None
254
+ workflow = [('train', 1)]
255
+ opencv_num_threads = 0
256
+ mp_start_method = 'fork'
257
+ auto_scale_lr = dict(enable=False, base_batch_size=16)
258
+ custom_imports = None
259
+ norm_cfg = dict(type='SyncBN', requires_grad=True)
260
+ work_dir = 'work_dirs/finetune_mask-rcnn__12k_voc0712_lr3e-2_wd5e-5'
261
+ auto_resume = False
262
+ gpu_ids = range(0, 8)
finetune/finetune_mask-rcnn_1x_coco_lr3e-2_wd5e-5/20220929_104229.log ADDED
The diff for this file is too large to render. See raw diff
 
finetune/finetune_mask-rcnn_1x_coco_lr3e-2_wd5e-5/20220929_104229.log.json ADDED
The diff for this file is too large to render. See raw diff
 
finetune/finetune_mask-rcnn_1x_coco_lr3e-2_wd5e-5/best_bbox_mAP_epoch_12.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f36ebf2356dfc2f612293f95ea49046127d0a7e5680f1dc0d1bf0172fb4cf3ba
3
+ size 365201337
finetune/finetune_mask-rcnn_1x_coco_lr3e-2_wd5e-5/mask_rcnn_r50_fpn_1x_coco.py ADDED
@@ -0,0 +1,259 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model = dict(
2
+ type='MaskRCNN',
3
+ backbone=dict(
4
+ type='ResNet',
5
+ depth=50,
6
+ num_stages=4,
7
+ out_indices=(0, 1, 2, 3),
8
+ frozen_stages=1,
9
+ norm_cfg=dict(type='SyncBN', requires_grad=True),
10
+ norm_eval=True,
11
+ style='pytorch',
12
+ init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
13
+ neck=dict(
14
+ type='FPN',
15
+ in_channels=[256, 512, 1024, 2048],
16
+ out_channels=256,
17
+ num_outs=5,
18
+ norm_cfg=dict(type='SyncBN', requires_grad=True)),
19
+ rpn_head=dict(
20
+ type='RPNHead',
21
+ in_channels=256,
22
+ feat_channels=256,
23
+ anchor_generator=dict(
24
+ type='AnchorGenerator',
25
+ scales=[8],
26
+ ratios=[0.5, 1.0, 2.0],
27
+ strides=[4, 8, 16, 32, 64]),
28
+ bbox_coder=dict(
29
+ type='DeltaXYWHBBoxCoder',
30
+ target_means=[0.0, 0.0, 0.0, 0.0],
31
+ target_stds=[1.0, 1.0, 1.0, 1.0]),
32
+ loss_cls=dict(
33
+ type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
34
+ loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
35
+ roi_head=dict(
36
+ type='StandardRoIHead',
37
+ bbox_roi_extractor=dict(
38
+ type='SingleRoIExtractor',
39
+ roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0),
40
+ out_channels=256,
41
+ featmap_strides=[4, 8, 16, 32]),
42
+ bbox_head=dict(
43
+ type='Shared4Conv1FCBBoxHead',
44
+ in_channels=256,
45
+ fc_out_channels=1024,
46
+ roi_feat_size=7,
47
+ num_classes=80,
48
+ bbox_coder=dict(
49
+ type='DeltaXYWHBBoxCoder',
50
+ target_means=[0.0, 0.0, 0.0, 0.0],
51
+ target_stds=[0.1, 0.1, 0.2, 0.2]),
52
+ reg_class_agnostic=False,
53
+ loss_cls=dict(
54
+ type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
55
+ loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
56
+ mask_roi_extractor=dict(
57
+ type='SingleRoIExtractor',
58
+ roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0),
59
+ out_channels=256,
60
+ featmap_strides=[4, 8, 16, 32]),
61
+ mask_head=dict(
62
+ type='FCNMaskHead',
63
+ num_convs=4,
64
+ in_channels=256,
65
+ conv_out_channels=256,
66
+ num_classes=80,
67
+ loss_mask=dict(
68
+ type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))),
69
+ train_cfg=dict(
70
+ rpn=dict(
71
+ assigner=dict(
72
+ type='MaxIoUAssigner',
73
+ pos_iou_thr=0.7,
74
+ neg_iou_thr=0.3,
75
+ min_pos_iou=0.3,
76
+ match_low_quality=True,
77
+ ignore_iof_thr=-1),
78
+ sampler=dict(
79
+ type='RandomSampler',
80
+ num=256,
81
+ pos_fraction=0.5,
82
+ neg_pos_ub=-1,
83
+ add_gt_as_proposals=False),
84
+ allowed_border=-1,
85
+ pos_weight=-1,
86
+ debug=False),
87
+ rpn_proposal=dict(
88
+ nms_pre=2000,
89
+ max_per_img=1000,
90
+ nms=dict(type='nms', iou_threshold=0.7),
91
+ min_bbox_size=0),
92
+ rcnn=dict(
93
+ assigner=dict(
94
+ type='MaxIoUAssigner',
95
+ pos_iou_thr=0.5,
96
+ neg_iou_thr=0.5,
97
+ min_pos_iou=0.5,
98
+ match_low_quality=True,
99
+ ignore_iof_thr=-1),
100
+ sampler=dict(
101
+ type='RandomSampler',
102
+ num=512,
103
+ pos_fraction=0.25,
104
+ neg_pos_ub=-1,
105
+ add_gt_as_proposals=True),
106
+ mask_size=28,
107
+ pos_weight=-1,
108
+ debug=False)),
109
+ test_cfg=dict(
110
+ rpn=dict(
111
+ nms_pre=1000,
112
+ max_per_img=1000,
113
+ nms=dict(type='nms', iou_threshold=0.7),
114
+ min_bbox_size=0),
115
+ rcnn=dict(
116
+ score_thr=0.05,
117
+ nms=dict(type='nms', iou_threshold=0.5),
118
+ max_per_img=100,
119
+ mask_thr_binary=0.5)))
120
+ dataset_type = 'CocoDataset'
121
+ data_root = 'data/coco/'
122
+ img_norm_cfg = dict(
123
+ mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
124
+ train_pipeline = [
125
+ dict(type='LoadImageFromFile'),
126
+ dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
127
+ dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
128
+ dict(type='RandomFlip', flip_ratio=0.5),
129
+ dict(
130
+ type='Normalize',
131
+ mean=[123.675, 116.28, 103.53],
132
+ std=[58.395, 57.12, 57.375],
133
+ to_rgb=True),
134
+ dict(type='Pad', size_divisor=32),
135
+ dict(type='DefaultFormatBundle'),
136
+ dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks'])
137
+ ]
138
+ test_pipeline = [
139
+ dict(type='LoadImageFromFile'),
140
+ dict(
141
+ type='MultiScaleFlipAug',
142
+ img_scale=(1333, 800),
143
+ flip=False,
144
+ transforms=[
145
+ dict(type='Resize', keep_ratio=True),
146
+ dict(type='RandomFlip'),
147
+ dict(
148
+ type='Normalize',
149
+ mean=[123.675, 116.28, 103.53],
150
+ std=[58.395, 57.12, 57.375],
151
+ to_rgb=True),
152
+ dict(type='Pad', size_divisor=32),
153
+ dict(type='ImageToTensor', keys=['img']),
154
+ dict(type='Collect', keys=['img'])
155
+ ])
156
+ ]
157
+ data = dict(
158
+ samples_per_gpu=2,
159
+ workers_per_gpu=2,
160
+ train=dict(
161
+ type='CocoDataset',
162
+ ann_file='data/coco/annotations/instances_train2017.json',
163
+ img_prefix='data/coco/train2017/',
164
+ pipeline=[
165
+ dict(type='LoadImageFromFile'),
166
+ dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
167
+ dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
168
+ dict(type='RandomFlip', flip_ratio=0.5),
169
+ dict(
170
+ type='Normalize',
171
+ mean=[123.675, 116.28, 103.53],
172
+ std=[58.395, 57.12, 57.375],
173
+ to_rgb=True),
174
+ dict(type='Pad', size_divisor=32),
175
+ dict(type='DefaultFormatBundle'),
176
+ dict(
177
+ type='Collect',
178
+ keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks'])
179
+ ]),
180
+ val=dict(
181
+ type='CocoDataset',
182
+ ann_file='data/coco/annotations/instances_val2017.json',
183
+ img_prefix='data/coco/val2017/',
184
+ pipeline=[
185
+ dict(type='LoadImageFromFile'),
186
+ dict(
187
+ type='MultiScaleFlipAug',
188
+ img_scale=(1333, 800),
189
+ flip=False,
190
+ transforms=[
191
+ dict(type='Resize', keep_ratio=True),
192
+ dict(type='RandomFlip'),
193
+ dict(
194
+ type='Normalize',
195
+ mean=[123.675, 116.28, 103.53],
196
+ std=[58.395, 57.12, 57.375],
197
+ to_rgb=True),
198
+ dict(type='Pad', size_divisor=32),
199
+ dict(type='ImageToTensor', keys=['img']),
200
+ dict(type='Collect', keys=['img'])
201
+ ])
202
+ ]),
203
+ test=dict(
204
+ type='CocoDataset',
205
+ ann_file='data/coco/annotations/instances_val2017.json',
206
+ img_prefix='data/coco/val2017/',
207
+ pipeline=[
208
+ dict(type='LoadImageFromFile'),
209
+ dict(
210
+ type='MultiScaleFlipAug',
211
+ img_scale=(1333, 800),
212
+ flip=False,
213
+ transforms=[
214
+ dict(type='Resize', keep_ratio=True),
215
+ dict(type='RandomFlip'),
216
+ dict(
217
+ type='Normalize',
218
+ mean=[123.675, 116.28, 103.53],
219
+ std=[58.395, 57.12, 57.375],
220
+ to_rgb=True),
221
+ dict(type='Pad', size_divisor=32),
222
+ dict(type='ImageToTensor', keys=['img']),
223
+ dict(type='Collect', keys=['img'])
224
+ ])
225
+ ]))
226
+ evaluation = dict(metric=['bbox', 'segm'], save_best='auto')
227
+ optimizer = dict(type='SGD', lr=0.03, momentum=0.9, weight_decay=5e-05)
228
+ optimizer_config = dict(grad_clip=None)
229
+ lr_config = dict(
230
+ policy='step',
231
+ warmup='linear',
232
+ warmup_iters=500,
233
+ warmup_ratio=0.001,
234
+ step=[8, 11])
235
+ runner = dict(type='EpochBasedRunner', max_epochs=12)
236
+ checkpoint_config = dict(interval=1)
237
+ log_config = dict(interval=50, hooks=[dict(type='TextLoggerHook')])
238
+ custom_hooks = [
239
+ dict(type='NumClassCheckHook'),
240
+ dict(
241
+ type='MMDetWandbHook',
242
+ init_kwargs=dict(project='I2B', group='finetune'),
243
+ interval=50,
244
+ num_eval_images=0,
245
+ log_checkpoint=False)
246
+ ]
247
+ dist_params = dict(backend='nccl')
248
+ log_level = 'INFO'
249
+ load_from = 'work_dirs/selfsup_mask-rcnn_mstrain-soft-teacher_sampler-4096_temp0.5/final_model.pth'
250
+ resume_from = None
251
+ workflow = [('train', 1)]
252
+ opencv_num_threads = 0
253
+ mp_start_method = 'fork'
254
+ auto_scale_lr = dict(enable=False, base_batch_size=16)
255
+ custom_imports = None
256
+ norm_cfg = dict(type='SyncBN', requires_grad=True)
257
+ work_dir = 'work_dirs/finetune_mask-rcnn_1x_coco_lr3e-2_wd5e-5'
258
+ auto_resume = False
259
+ gpu_ids = range(0, 8)