auto_scale_lr = dict(base_batch_size=96) custom_hooks = [ dict(momentum=0.0001, priority='ABOVE_NORMAL', type='EMAHook'), ] data_preprocessor = dict( mean=[ 123.675, 116.28, 103.53, ], num_classes=2, std=[ 58.395, 57.12, 57.375, ], to_rgb=True) dataset_type = 'CustomDataset' default_hooks = dict( checkpoint=dict(interval=2, type='CheckpointHook'), logger=dict(interval=100, type='LoggerHook'), param_scheduler=dict(type='ParamSchedulerHook'), sampler_seed=dict(type='DistSamplerSeedHook'), timer=dict(type='IterTimerHook'), visualization=dict( enable=True, interval=1, out_dir=None, type='VisualizationHook', wait_time=2)) default_scope = 'mmpretrain' env_cfg = dict( cudnn_benchmark=False, dist_cfg=dict(backend='nccl'), mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0)) launcher = 'none' load_from = './ConvNeXt_v2-v2_ep90.pth' log_level = 'INFO' model = dict( backbone=dict( arch='tiny', drop_path_rate=0.5, layer_scale_init_value=0.0, type='ConvNeXt', use_grn=True), head=dict( in_channels=768, init_cfg=None, loss=dict(label_smooth_val=0.2, type='LabelSmoothLoss'), num_classes=2, type='LinearClsHead'), init_cfg=dict( bias=0.0, layer=[ 'Conv2d', 'Linear', ], std=0.02, type='TruncNormal'), train_cfg=dict(augments=[ dict(alpha=0.8, type='Mixup'), dict(alpha=1.0, type='CutMix'), ]), type='ImageClassifier') optim_wrapper = dict( accumulative_counts=3, clip_grad=None, loss_scale='dynamic', optimizer=dict( betas=( 0.9, 0.999, ), eps=1e-08, lr=0.00032, type='AdamW', weight_decay=0.05), paramwise_cfg=dict( bias_decay_mult=0.0, custom_keys=dict({ '.absolute_pos_embed': dict(decay_mult=0.0), '.relative_position_bias_table': dict(decay_mult=0.0) }), flat_decay_mult=0.0, norm_decay_mult=0.0), type='AmpOptimWrapper') param_scheduler = [ dict( by_epoch=True, convert_to_iter_based=True, end=2, start_factor=0.001, type='LinearLR'), dict(begin=2, by_epoch=True, eta_min=8e-05, type='CosineAnnealingLR'), ] randomness = dict(deterministic=False, seed=None) resume = False test_cfg = dict() test_dataloader = dict( batch_size=16, collate_fn=dict(type='default_collate'), dataset=dict( data_root='./testimgs', pipeline=[ dict(type='LoadImageFromFile'), dict( backend='pillow', interpolation='bicubic', scale=384, type='Resize'), dict(type='PackInputs'), ], type='CustomDataset'), num_workers=5, persistent_workers=True, pin_memory=True, sampler=dict(shuffle=False, type='DefaultSampler')) test_evaluator = dict(topk=(1, ), type='Accuracy') test_pipeline = [ dict(type='LoadImageFromFile'), dict(backend='pillow', interpolation='bicubic', scale=384, type='Resize'), dict(type='PackInputs'), ] train_cfg = dict(by_epoch=True, max_epochs=120, val_interval=1) train_dataloader = dict( batch_size=32, collate_fn=dict(type='default_collate'), dataset=dict( data_root='./procset', pipeline=[ dict(type='LoadImageFromFile'), dict( backend='pillow', interpolation='bicubic', scale=384, type='RandomResizedCrop'), dict(direction='horizontal', prob=0.5, type='RandomFlip'), dict(type='PackInputs'), ], type='CustomDataset'), num_workers=5, persistent_workers=True, pin_memory=True, sampler=dict(shuffle=True, type='DefaultSampler')) train_pipeline = [ dict(type='LoadImageFromFile'), dict( backend='pillow', interpolation='bicubic', scale=384, type='RandomResizedCrop'), dict(direction='horizontal', prob=0.5, type='RandomFlip'), dict(type='PackInputs'), ] val_cfg = dict() val_dataloader = dict( batch_size=16, collate_fn=dict(type='default_collate'), dataset=dict( data_root='./valset', pipeline=[ dict(type='LoadImageFromFile'), dict( backend='pillow', interpolation='bicubic', scale=384, type='Resize'), dict(type='PackInputs'), ], type='CustomDataset'), num_workers=5, persistent_workers=True, pin_memory=True, sampler=dict(shuffle=False, type='DefaultSampler')) val_evaluator = dict(topk=(1, ), type='Accuracy') vis_backends = [ dict(type='LocalVisBackend'), ] visualizer = dict( type='UniversalVisualizer', vis_backends=[ dict(type='LocalVisBackend'), ]) work_dir = './work_dirs\\convnext-v2-tiny_32xb32_in1k-384px'