File size: 9,957 Bytes
6410dbb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
optim_wrapper = dict(
    optimizer=dict(
        type='AdamW',
        lr=0.0004,
        weight_decay=0.05,
        eps=1e-08,
        betas=(0.9, 0.999),
        _scope_='mmpretrain'),
    paramwise_cfg=dict(
        norm_decay_mult=0.0,
        bias_decay_mult=0.0,
        flat_decay_mult=0.0,
        custom_keys=dict({
            '.absolute_pos_embed': dict(decay_mult=0.0),
            '.relative_position_bias_table': dict(decay_mult=0.0)
        })),
    type='AmpOptimWrapper',
    dtype='bfloat16',
    clip_grad=None)
param_scheduler = [
    dict(type='CosineAnnealingLR', eta_min=1e-05, by_epoch=False, begin=0)
]
train_cfg = dict(by_epoch=True, max_epochs=10, val_interval=1)
val_cfg = dict()
test_cfg = dict()
auto_scale_lr = dict(base_batch_size=4096)
model = dict(
    type='ImageClassifier',
    backbone=dict(
        frozen_stages=2,
        type='ConvNeXt',
        arch='small',
        drop_path_rate=0.4,
        init_cfg=dict(
            type='Pretrained',
            checkpoint=
            'https://download.openmmlab.com/mmclassification/v0/convnext/convnext-small_in21k-pre_3rdparty_in1k_20221219-aeca4c93.pth',
            prefix='backbone')),
    head=dict(
        type='LinearClsHead',
        num_classes=2,
        in_channels=768,
        loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
        init_cfg=None),
    init_cfg=dict(
        type='TruncNormal', layer=['Conv2d', 'Linear'], std=0.02, bias=0.0),
    train_cfg=None)
dataset_type = 'CustomDataset'
data_preprocessor = dict(
    num_classes=2,
    mean=[123.675, 116.28, 103.53],
    std=[58.395, 57.12, 57.375],
    to_rgb=True)
bgr_mean = [103.53, 116.28, 123.675]
bgr_std = [57.375, 57.12, 58.395]
train_pipeline = [
    dict(type='LoadImageFromFile'),
    dict(
        type='RandomResizedCrop',
        scale=224,
        backend='pillow',
        interpolation='bicubic'),
    dict(type='RandomFlip', prob=0.5, direction='horizontal'),
    dict(type='JPEG', compress_val=65, prob=0.5),
    dict(type='GaussianBlur', radius=1.5, prob=0.5),
    dict(type='PackInputs')
]
test_pipeline = [
    dict(type='LoadImageFromFile'),
    dict(
        type='ResizeEdge',
        scale=256,
        edge='short',
        backend='pillow',
        interpolation='bicubic'),
    dict(type='CenterCrop', crop_size=224),
    dict(type='PackInputs')
]
train_dataloader = dict(
    pin_memory=True,
    persistent_workers=True,
    collate_fn=dict(type='default_collate'),
    batch_size=256,
    num_workers=10,
    dataset=dict(
        type='ConcatDataset',
        datasets=[
            dict(
                type='CustomDataset',
                data_root='/mnt/petrelfs/luzeyu/workspace/fakebench/dataset',
                ann_file=
                '/mnt/petrelfs/luzeyu/workspace/fakebench/dataset/meta/train/IF80w.csv',
                pipeline=[
                    dict(type='LoadImageFromFile'),
                    dict(
                        type='RandomResizedCrop',
                        scale=224,
                        backend='pillow',
                        interpolation='bicubic'),
                    dict(type='RandomFlip', prob=0.5, direction='horizontal'),
                    dict(type='JPEG', compress_val=65, prob=0.5),
                    dict(type='GaussianBlur', radius=1.5, prob=0.5),
                    dict(type='PackInputs')
                ]),
            dict(
                type='CustomDataset',
                data_root='/mnt/petrelfs/luzeyu/workspace/fakebench/dataset',
                ann_file=
                '/mnt/petrelfs/luzeyu/workspace/fakebench/dataset/meta/train/if-dpmsolver++-50-20w.tsv',
                pipeline=[
                    dict(type='LoadImageFromFile'),
                    dict(
                        type='RandomResizedCrop',
                        scale=224,
                        backend='pillow',
                        interpolation='bicubic'),
                    dict(type='RandomFlip', prob=0.5, direction='horizontal'),
                    dict(type='JPEG', compress_val=65, prob=0.5),
                    dict(type='GaussianBlur', radius=1.5, prob=0.5),
                    dict(type='PackInputs')
                ]),
            dict(
                type='CustomDataset',
                data_root='',
                ann_file=
                '/mnt/petrelfs/luzeyu/workspace/fakebench/dataset/meta/train/cc1m.csv',
                pipeline=[
                    dict(type='LoadImageFromFile'),
                    dict(
                        type='RandomResizedCrop',
                        scale=224,
                        backend='pillow',
                        interpolation='bicubic'),
                    dict(type='RandomFlip', prob=0.5, direction='horizontal'),
                    dict(type='JPEG', compress_val=65, prob=0.5),
                    dict(type='GaussianBlur', radius=1.5, prob=0.5),
                    dict(type='PackInputs')
                ])
        ]),
    sampler=dict(type='DefaultSampler', shuffle=True))
val_dataloader = dict(
    pin_memory=True,
    persistent_workers=True,
    collate_fn=dict(type='default_collate'),
    batch_size=256,
    num_workers=10,
    dataset=dict(
        type='ConcatDataset',
        datasets=[
            dict(
                type='CustomDataset',
                data_root='/mnt/petrelfs/luzeyu/workspace/fakebench/dataset',
                ann_file=
                '/mnt/petrelfs/luzeyu/workspace/fakebench/dataset/meta/val/if-dpmsolver++-25-1w.tsv',
                pipeline=[
                    dict(type='LoadImageFromFile'),
                    dict(
                        type='RandomResizedCrop',
                        scale=224,
                        backend='pillow',
                        interpolation='bicubic'),
                    dict(type='RandomFlip', prob=0.5, direction='horizontal'),
                    dict(type='JPEG', compress_val=65, prob=0.5),
                    dict(type='GaussianBlur', radius=1.5, prob=0.5),
                    dict(type='PackInputs')
                ]),
            dict(
                type='CustomDataset',
                data_root='',
                ann_file=
                '/mnt/petrelfs/luzeyu/workspace/fakebench/dataset/meta/val/cc1w.csv',
                pipeline=[
                    dict(type='LoadImageFromFile'),
                    dict(
                        type='RandomResizedCrop',
                        scale=224,
                        backend='pillow',
                        interpolation='bicubic'),
                    dict(type='RandomFlip', prob=0.5, direction='horizontal'),
                    dict(type='JPEG', compress_val=65, prob=0.5),
                    dict(type='GaussianBlur', radius=1.5, prob=0.5),
                    dict(type='PackInputs')
                ])
        ]),
    sampler=dict(type='DefaultSampler', shuffle=False))
val_evaluator = [
    dict(type='Accuracy', topk=1),
    dict(type='SingleLabelMetric', average=None)
]
test_dataloader = dict(
    pin_memory=True,
    persistent_workers=True,
    collate_fn=dict(type='default_collate'),
    batch_size=256,
    num_workers=10,
    dataset=dict(
        type='ConcatDataset',
        datasets=[
            dict(
                type='CustomDataset',
                data_root='/mnt/petrelfs/luzeyu/workspace/fakebench/dataset',
                ann_file=
                '/mnt/petrelfs/luzeyu/workspace/fakebench/dataset/meta/val/if-dpmsolver++-25-1w.tsv',
                pipeline=[
                    dict(type='LoadImageFromFile'),
                    dict(
                        type='RandomResizedCrop',
                        scale=224,
                        backend='pillow',
                        interpolation='bicubic'),
                    dict(type='RandomFlip', prob=0.5, direction='horizontal'),
                    dict(type='JPEG', compress_val=65, prob=0.5),
                    dict(type='GaussianBlur', radius=1.5, prob=0.5),
                    dict(type='PackInputs')
                ]),
            dict(
                type='CustomDataset',
                data_root='',
                ann_file=
                '/mnt/petrelfs/luzeyu/workspace/fakebench/dataset/meta/val/cc1w.csv',
                pipeline=[
                    dict(type='LoadImageFromFile'),
                    dict(
                        type='RandomResizedCrop',
                        scale=224,
                        backend='pillow',
                        interpolation='bicubic'),
                    dict(type='RandomFlip', prob=0.5, direction='horizontal'),
                    dict(type='JPEG', compress_val=65, prob=0.5),
                    dict(type='GaussianBlur', radius=1.5, prob=0.5),
                    dict(type='PackInputs')
                ])
        ]),
    sampler=dict(type='DefaultSampler', shuffle=False))
test_evaluator = [
    dict(type='Accuracy', topk=1),
    dict(type='SingleLabelMetric', average=None)
]
custom_hooks = [dict(type='EMAHook', momentum=0.0001, priority='ABOVE_NORMAL')]
default_scope = 'mmpretrain'
default_hooks = dict(
    timer=dict(type='IterTimerHook'),
    logger=dict(type='LoggerHook', interval=100),
    param_scheduler=dict(type='ParamSchedulerHook'),
    checkpoint=dict(type='CheckpointHook', interval=1),
    sampler_seed=dict(type='DistSamplerSeedHook'),
    visualization=dict(type='VisualizationHook', enable=True))
env_cfg = dict(
    cudnn_benchmark=True,
    mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0),
    dist_cfg=dict(backend='nccl'))
vis_backends = [dict(type='LocalVisBackend')]
visualizer = dict(
    type='UniversalVisualizer',
    vis_backends=[
        dict(type='LocalVisBackend'),
        dict(type='TensorboardVisBackend')
    ])
log_level = 'INFO'
load_from = None
resume = False
randomness = dict(seed=None, deterministic=False)
launcher = 'slurm'
work_dir = 'workdir/convnext_small_4xb256_IF_1m_lr4e-4_aug_5e-1'