stefanwolf
commited on
Commit
•
ee66a83
0
Parent(s):
Initial commit
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitignore +179 -0
- configs/_base_/datasets/cifar100_bs16.py +45 -0
- configs/_base_/datasets/cifar10_bs16.py +45 -0
- configs/_base_/datasets/cub_bs8_384.py +51 -0
- configs/_base_/datasets/cub_bs8_448.py +50 -0
- configs/_base_/datasets/fungi_bs16_swin_384.py +93 -0
- configs/_base_/datasets/fungi_bs16_swin_384_class-balanced.py +96 -0
- configs/_base_/datasets/imagenet21k_bs128.py +53 -0
- configs/_base_/datasets/imagenet_bs128_mbv3.py +68 -0
- configs/_base_/datasets/imagenet_bs128_poolformer_medium_224.py +82 -0
- configs/_base_/datasets/imagenet_bs128_poolformer_small_224.py +82 -0
- configs/_base_/datasets/imagenet_bs128_revvit_224.py +85 -0
- configs/_base_/datasets/imagenet_bs128_riformer_medium_384.py +82 -0
- configs/_base_/datasets/imagenet_bs128_riformer_small_384.py +82 -0
- configs/_base_/datasets/imagenet_bs128_vig_224.py +82 -0
- configs/_base_/datasets/imagenet_bs16_eva_196.py +62 -0
- configs/_base_/datasets/imagenet_bs16_eva_336.py +62 -0
- configs/_base_/datasets/imagenet_bs16_eva_560.py +62 -0
- configs/_base_/datasets/imagenet_bs16_pil_bicubic_384.py +55 -0
- configs/_base_/datasets/imagenet_bs256_beitv2.py +48 -0
- configs/_base_/datasets/imagenet_bs256_davit_224.py +82 -0
- configs/_base_/datasets/imagenet_bs256_levit_224.py +82 -0
- configs/_base_/datasets/imagenet_bs256_rsb_a12.py +74 -0
- configs/_base_/datasets/imagenet_bs256_rsb_a3.py +74 -0
- configs/_base_/datasets/imagenet_bs256_simmim_192.py +34 -0
- configs/_base_/datasets/imagenet_bs256_swin_192.py +83 -0
- configs/_base_/datasets/imagenet_bs32.py +53 -0
- configs/_base_/datasets/imagenet_bs32_byol.py +90 -0
- configs/_base_/datasets/imagenet_bs32_mocov2.py +59 -0
- configs/_base_/datasets/imagenet_bs32_pil_bicubic.py +62 -0
- configs/_base_/datasets/imagenet_bs32_pil_resize.py +53 -0
- configs/_base_/datasets/imagenet_bs32_simclr.py +53 -0
- configs/_base_/datasets/imagenet_bs512_mae.py +33 -0
- configs/_base_/datasets/imagenet_bs512_mocov3.py +91 -0
- configs/_base_/datasets/imagenet_bs64.py +53 -0
- configs/_base_/datasets/imagenet_bs64_autoaug.py +61 -0
- configs/_base_/datasets/imagenet_bs64_clip_224.py +72 -0
- configs/_base_/datasets/imagenet_bs64_clip_384.py +72 -0
- configs/_base_/datasets/imagenet_bs64_clip_448.py +73 -0
- configs/_base_/datasets/imagenet_bs64_convmixer_224.py +82 -0
- configs/_base_/datasets/imagenet_bs64_deit3_224.py +82 -0
- configs/_base_/datasets/imagenet_bs64_deit3_384.py +62 -0
- configs/_base_/datasets/imagenet_bs64_edgenext_256.py +82 -0
- configs/_base_/datasets/imagenet_bs64_mixer_224.py +54 -0
- configs/_base_/datasets/imagenet_bs64_pil_resize.py +53 -0
- configs/_base_/datasets/imagenet_bs64_pil_resize_autoaug.py +70 -0
- configs/_base_/datasets/imagenet_bs64_swin_224.py +82 -0
- configs/_base_/datasets/imagenet_bs64_swin_256.py +82 -0
- configs/_base_/datasets/imagenet_bs64_swin_384.py +56 -0
- configs/_base_/datasets/imagenet_bs64_t2t_224.py +82 -0
.gitignore
ADDED
@@ -0,0 +1,179 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
data
|
2 |
+
work_dirs
|
3 |
+
|
4 |
+
# Created by https://www.toptal.com/developers/gitignore/api/python
|
5 |
+
# Edit at https://www.toptal.com/developers/gitignore?templates=python
|
6 |
+
|
7 |
+
### Python ###
|
8 |
+
# Byte-compiled / optimized / DLL files
|
9 |
+
__pycache__/
|
10 |
+
*.py[cod]
|
11 |
+
*$py.class
|
12 |
+
|
13 |
+
# C extensions
|
14 |
+
*.so
|
15 |
+
|
16 |
+
# Distribution / packaging
|
17 |
+
.Python
|
18 |
+
build/
|
19 |
+
develop-eggs/
|
20 |
+
dist/
|
21 |
+
downloads/
|
22 |
+
eggs/
|
23 |
+
.eggs/
|
24 |
+
lib/
|
25 |
+
lib64/
|
26 |
+
parts/
|
27 |
+
sdist/
|
28 |
+
var/
|
29 |
+
wheels/
|
30 |
+
share/python-wheels/
|
31 |
+
*.egg-info/
|
32 |
+
.installed.cfg
|
33 |
+
*.egg
|
34 |
+
MANIFEST
|
35 |
+
|
36 |
+
# PyInstaller
|
37 |
+
# Usually these files are written by a python script from a template
|
38 |
+
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
39 |
+
*.manifest
|
40 |
+
*.spec
|
41 |
+
|
42 |
+
# Installer logs
|
43 |
+
pip-log.txt
|
44 |
+
pip-delete-this-directory.txt
|
45 |
+
|
46 |
+
# Unit test / coverage reports
|
47 |
+
htmlcov/
|
48 |
+
.tox/
|
49 |
+
.nox/
|
50 |
+
.coverage
|
51 |
+
.coverage.*
|
52 |
+
.cache
|
53 |
+
nosetests.xml
|
54 |
+
coverage.xml
|
55 |
+
*.cover
|
56 |
+
*.py,cover
|
57 |
+
.hypothesis/
|
58 |
+
.pytest_cache/
|
59 |
+
cover/
|
60 |
+
|
61 |
+
# Translations
|
62 |
+
*.mo
|
63 |
+
*.pot
|
64 |
+
|
65 |
+
# Django stuff:
|
66 |
+
*.log
|
67 |
+
local_settings.py
|
68 |
+
db.sqlite3
|
69 |
+
db.sqlite3-journal
|
70 |
+
|
71 |
+
# Flask stuff:
|
72 |
+
instance/
|
73 |
+
.webassets-cache
|
74 |
+
|
75 |
+
# Scrapy stuff:
|
76 |
+
.scrapy
|
77 |
+
|
78 |
+
# Sphinx documentation
|
79 |
+
docs/_build/
|
80 |
+
|
81 |
+
# PyBuilder
|
82 |
+
.pybuilder/
|
83 |
+
target/
|
84 |
+
|
85 |
+
# Jupyter Notebook
|
86 |
+
.ipynb_checkpoints
|
87 |
+
|
88 |
+
# IPython
|
89 |
+
profile_default/
|
90 |
+
ipython_config.py
|
91 |
+
|
92 |
+
# pyenv
|
93 |
+
# For a library or package, you might want to ignore these files since the code is
|
94 |
+
# intended to run in multiple environments; otherwise, check them in:
|
95 |
+
# .python-version
|
96 |
+
|
97 |
+
# pipenv
|
98 |
+
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
99 |
+
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
100 |
+
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
101 |
+
# install all needed dependencies.
|
102 |
+
#Pipfile.lock
|
103 |
+
|
104 |
+
# poetry
|
105 |
+
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
|
106 |
+
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
107 |
+
# commonly ignored for libraries.
|
108 |
+
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
|
109 |
+
#poetry.lock
|
110 |
+
|
111 |
+
# pdm
|
112 |
+
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
|
113 |
+
#pdm.lock
|
114 |
+
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
|
115 |
+
# in version control.
|
116 |
+
# https://pdm.fming.dev/#use-with-ide
|
117 |
+
.pdm.toml
|
118 |
+
|
119 |
+
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
|
120 |
+
__pypackages__/
|
121 |
+
|
122 |
+
# Celery stuff
|
123 |
+
celerybeat-schedule
|
124 |
+
celerybeat.pid
|
125 |
+
|
126 |
+
# SageMath parsed files
|
127 |
+
*.sage.py
|
128 |
+
|
129 |
+
# Environments
|
130 |
+
.env
|
131 |
+
.venv
|
132 |
+
env/
|
133 |
+
venv/
|
134 |
+
ENV/
|
135 |
+
env.bak/
|
136 |
+
venv.bak/
|
137 |
+
|
138 |
+
# Spyder project settings
|
139 |
+
.spyderproject
|
140 |
+
.spyproject
|
141 |
+
|
142 |
+
# Rope project settings
|
143 |
+
.ropeproject
|
144 |
+
|
145 |
+
# mkdocs documentation
|
146 |
+
/site
|
147 |
+
|
148 |
+
# mypy
|
149 |
+
.mypy_cache/
|
150 |
+
.dmypy.json
|
151 |
+
dmypy.json
|
152 |
+
|
153 |
+
# Pyre type checker
|
154 |
+
.pyre/
|
155 |
+
|
156 |
+
# pytype static type analyzer
|
157 |
+
.pytype/
|
158 |
+
|
159 |
+
# Cython debug symbols
|
160 |
+
cython_debug/
|
161 |
+
|
162 |
+
# PyCharm
|
163 |
+
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
|
164 |
+
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
|
165 |
+
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
166 |
+
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
167 |
+
#.idea/
|
168 |
+
|
169 |
+
### Python Patch ###
|
170 |
+
# Poetry local configuration file - https://python-poetry.org/docs/configuration/#local-configuration
|
171 |
+
poetry.toml
|
172 |
+
|
173 |
+
# ruff
|
174 |
+
.ruff_cache/
|
175 |
+
|
176 |
+
# LSP config files
|
177 |
+
pyrightconfig.json
|
178 |
+
|
179 |
+
# End of https://www.toptal.com/developers/gitignore/api/python
|
configs/_base_/datasets/cifar100_bs16.py
ADDED
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# dataset settings
|
2 |
+
dataset_type = 'CIFAR100'
|
3 |
+
data_preprocessor = dict(
|
4 |
+
num_classes=100,
|
5 |
+
# RGB format normalization parameters
|
6 |
+
mean=[129.304, 124.070, 112.434],
|
7 |
+
std=[68.170, 65.392, 70.418],
|
8 |
+
# loaded images are already RGB format
|
9 |
+
to_rgb=False)
|
10 |
+
|
11 |
+
train_pipeline = [
|
12 |
+
dict(type='RandomCrop', crop_size=32, padding=4),
|
13 |
+
dict(type='RandomFlip', prob=0.5, direction='horizontal'),
|
14 |
+
dict(type='PackInputs'),
|
15 |
+
]
|
16 |
+
|
17 |
+
test_pipeline = [
|
18 |
+
dict(type='PackInputs'),
|
19 |
+
]
|
20 |
+
|
21 |
+
train_dataloader = dict(
|
22 |
+
batch_size=16,
|
23 |
+
num_workers=2,
|
24 |
+
dataset=dict(
|
25 |
+
type=dataset_type,
|
26 |
+
data_prefix='data/cifar100',
|
27 |
+
test_mode=False,
|
28 |
+
pipeline=train_pipeline),
|
29 |
+
sampler=dict(type='DefaultSampler', shuffle=True),
|
30 |
+
)
|
31 |
+
|
32 |
+
val_dataloader = dict(
|
33 |
+
batch_size=16,
|
34 |
+
num_workers=2,
|
35 |
+
dataset=dict(
|
36 |
+
type=dataset_type,
|
37 |
+
data_prefix='data/cifar100/',
|
38 |
+
test_mode=True,
|
39 |
+
pipeline=test_pipeline),
|
40 |
+
sampler=dict(type='DefaultSampler', shuffle=False),
|
41 |
+
)
|
42 |
+
val_evaluator = dict(type='Accuracy', topk=(1, ))
|
43 |
+
|
44 |
+
test_dataloader = val_dataloader
|
45 |
+
test_evaluator = val_evaluator
|
configs/_base_/datasets/cifar10_bs16.py
ADDED
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# dataset settings
|
2 |
+
dataset_type = 'CIFAR10'
|
3 |
+
data_preprocessor = dict(
|
4 |
+
num_classes=10,
|
5 |
+
# RGB format normalization parameters
|
6 |
+
mean=[125.307, 122.961, 113.8575],
|
7 |
+
std=[51.5865, 50.847, 51.255],
|
8 |
+
# loaded images are already RGB format
|
9 |
+
to_rgb=False)
|
10 |
+
|
11 |
+
train_pipeline = [
|
12 |
+
dict(type='RandomCrop', crop_size=32, padding=4),
|
13 |
+
dict(type='RandomFlip', prob=0.5, direction='horizontal'),
|
14 |
+
dict(type='PackInputs'),
|
15 |
+
]
|
16 |
+
|
17 |
+
test_pipeline = [
|
18 |
+
dict(type='PackInputs'),
|
19 |
+
]
|
20 |
+
|
21 |
+
train_dataloader = dict(
|
22 |
+
batch_size=16,
|
23 |
+
num_workers=2,
|
24 |
+
dataset=dict(
|
25 |
+
type=dataset_type,
|
26 |
+
data_prefix='data/cifar10',
|
27 |
+
test_mode=False,
|
28 |
+
pipeline=train_pipeline),
|
29 |
+
sampler=dict(type='DefaultSampler', shuffle=True),
|
30 |
+
)
|
31 |
+
|
32 |
+
val_dataloader = dict(
|
33 |
+
batch_size=16,
|
34 |
+
num_workers=2,
|
35 |
+
dataset=dict(
|
36 |
+
type=dataset_type,
|
37 |
+
data_prefix='data/cifar10/',
|
38 |
+
test_mode=True,
|
39 |
+
pipeline=test_pipeline),
|
40 |
+
sampler=dict(type='DefaultSampler', shuffle=False),
|
41 |
+
)
|
42 |
+
val_evaluator = dict(type='Accuracy', topk=(1, ))
|
43 |
+
|
44 |
+
test_dataloader = val_dataloader
|
45 |
+
test_evaluator = val_evaluator
|
configs/_base_/datasets/cub_bs8_384.py
ADDED
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# dataset settings
|
2 |
+
dataset_type = 'CUB'
|
3 |
+
data_preprocessor = dict(
|
4 |
+
num_classes=200,
|
5 |
+
# RGB format normalization parameters
|
6 |
+
mean=[123.675, 116.28, 103.53],
|
7 |
+
std=[58.395, 57.12, 57.375],
|
8 |
+
# convert image from BGR to RGB
|
9 |
+
to_rgb=True,
|
10 |
+
)
|
11 |
+
|
12 |
+
train_pipeline = [
|
13 |
+
dict(type='LoadImageFromFile'),
|
14 |
+
dict(type='Resize', scale=510),
|
15 |
+
dict(type='RandomCrop', crop_size=384),
|
16 |
+
dict(type='RandomFlip', prob=0.5, direction='horizontal'),
|
17 |
+
dict(type='PackInputs'),
|
18 |
+
]
|
19 |
+
|
20 |
+
test_pipeline = [
|
21 |
+
dict(type='LoadImageFromFile'),
|
22 |
+
dict(type='Resize', scale=510),
|
23 |
+
dict(type='CenterCrop', crop_size=384),
|
24 |
+
dict(type='PackInputs'),
|
25 |
+
]
|
26 |
+
|
27 |
+
train_dataloader = dict(
|
28 |
+
batch_size=8,
|
29 |
+
num_workers=2,
|
30 |
+
dataset=dict(
|
31 |
+
type=dataset_type,
|
32 |
+
data_root='data/CUB_200_2011',
|
33 |
+
test_mode=False,
|
34 |
+
pipeline=train_pipeline),
|
35 |
+
sampler=dict(type='DefaultSampler', shuffle=True),
|
36 |
+
)
|
37 |
+
|
38 |
+
val_dataloader = dict(
|
39 |
+
batch_size=8,
|
40 |
+
num_workers=2,
|
41 |
+
dataset=dict(
|
42 |
+
type=dataset_type,
|
43 |
+
data_root='data/CUB_200_2011',
|
44 |
+
test_mode=True,
|
45 |
+
pipeline=test_pipeline),
|
46 |
+
sampler=dict(type='DefaultSampler', shuffle=False),
|
47 |
+
)
|
48 |
+
val_evaluator = dict(type='Accuracy', topk=(1, ))
|
49 |
+
|
50 |
+
test_dataloader = val_dataloader
|
51 |
+
test_evaluator = val_evaluator
|
configs/_base_/datasets/cub_bs8_448.py
ADDED
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# dataset settings
|
2 |
+
dataset_type = 'CUB'
|
3 |
+
data_preprocessor = dict(
|
4 |
+
num_classes=200,
|
5 |
+
mean=[123.675, 116.28, 103.53],
|
6 |
+
std=[58.395, 57.12, 57.375],
|
7 |
+
# convert image from BGR to RGB
|
8 |
+
to_rgb=True,
|
9 |
+
)
|
10 |
+
|
11 |
+
train_pipeline = [
|
12 |
+
dict(type='LoadImageFromFile'),
|
13 |
+
dict(type='Resize', scale=600),
|
14 |
+
dict(type='RandomCrop', crop_size=448),
|
15 |
+
dict(type='RandomFlip', prob=0.5, direction='horizontal'),
|
16 |
+
dict(type='PackInputs'),
|
17 |
+
]
|
18 |
+
|
19 |
+
test_pipeline = [
|
20 |
+
dict(type='LoadImageFromFile'),
|
21 |
+
dict(type='Resize', scale=600),
|
22 |
+
dict(type='CenterCrop', crop_size=448),
|
23 |
+
dict(type='PackInputs'),
|
24 |
+
]
|
25 |
+
|
26 |
+
train_dataloader = dict(
|
27 |
+
batch_size=8,
|
28 |
+
num_workers=2,
|
29 |
+
dataset=dict(
|
30 |
+
type=dataset_type,
|
31 |
+
data_root='data/CUB_200_2011',
|
32 |
+
test_mode=False,
|
33 |
+
pipeline=train_pipeline),
|
34 |
+
sampler=dict(type='DefaultSampler', shuffle=True),
|
35 |
+
)
|
36 |
+
|
37 |
+
val_dataloader = dict(
|
38 |
+
batch_size=8,
|
39 |
+
num_workers=2,
|
40 |
+
dataset=dict(
|
41 |
+
type=dataset_type,
|
42 |
+
data_root='data/CUB_200_2011',
|
43 |
+
test_mode=True,
|
44 |
+
pipeline=test_pipeline),
|
45 |
+
sampler=dict(type='DefaultSampler', shuffle=False),
|
46 |
+
)
|
47 |
+
val_evaluator = dict(type='Accuracy', topk=(1, ))
|
48 |
+
|
49 |
+
test_dataloader = val_dataloader
|
50 |
+
test_evaluator = val_evaluator
|
configs/_base_/datasets/fungi_bs16_swin_384.py
ADDED
@@ -0,0 +1,93 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
_base_ = ['./pipelines/rand_aug.py']
|
2 |
+
|
3 |
+
# dataset settings
|
4 |
+
dataset_type = 'Fungi'
|
5 |
+
data_preprocessor = dict(
|
6 |
+
num_classes=1604,
|
7 |
+
# RGB format normalization parameters
|
8 |
+
mean=[123.675, 116.28, 103.53],
|
9 |
+
std=[58.395, 57.12, 57.375],
|
10 |
+
# convert image from BGR to RGB
|
11 |
+
to_rgb=True,
|
12 |
+
)
|
13 |
+
|
14 |
+
bgr_mean = data_preprocessor['mean'][::-1]
|
15 |
+
bgr_std = data_preprocessor['std'][::-1]
|
16 |
+
|
17 |
+
train_pipeline = [
|
18 |
+
dict(type='LoadImageFromFileFungi'),
|
19 |
+
dict(
|
20 |
+
type='RandomResizedCrop',
|
21 |
+
scale=384,
|
22 |
+
backend='pillow',
|
23 |
+
interpolation='bicubic'),
|
24 |
+
dict(type='RandomFlip', prob=0.5, direction='horizontal'),
|
25 |
+
dict(
|
26 |
+
type='RandAugment',
|
27 |
+
policies='timm_increasing',
|
28 |
+
num_policies=2,
|
29 |
+
total_level=10,
|
30 |
+
magnitude_level=9,
|
31 |
+
magnitude_std=0.5,
|
32 |
+
hparams=dict(
|
33 |
+
pad_val=[round(x) for x in bgr_mean], interpolation='bicubic')),
|
34 |
+
dict(
|
35 |
+
type='RandomErasing',
|
36 |
+
erase_prob=0.25,
|
37 |
+
mode='rand',
|
38 |
+
min_area_ratio=0.02,
|
39 |
+
max_area_ratio=1 / 3,
|
40 |
+
fill_color=bgr_mean,
|
41 |
+
fill_std=bgr_std),
|
42 |
+
dict(type='PackInputs'),
|
43 |
+
]
|
44 |
+
|
45 |
+
test_pipeline = [
|
46 |
+
dict(type='LoadImageFromFileFungi'),
|
47 |
+
dict(
|
48 |
+
type='ResizeEdge',
|
49 |
+
scale=438,
|
50 |
+
edge='short',
|
51 |
+
backend='pillow',
|
52 |
+
interpolation='bicubic'),
|
53 |
+
dict(type='CenterCrop', crop_size=384),
|
54 |
+
dict(type='PackInputs'),
|
55 |
+
]
|
56 |
+
|
57 |
+
train_dataloader = dict(
|
58 |
+
batch_size=16,
|
59 |
+
num_workers=8,
|
60 |
+
dataset=dict(
|
61 |
+
type=dataset_type,
|
62 |
+
data_root='data/fungi2023/',
|
63 |
+
ann_file='FungiCLEF2023_train_metadata_PRODUCTION.csv',
|
64 |
+
data_prefix='DF20/',
|
65 |
+
pipeline=train_pipeline),
|
66 |
+
sampler=dict(type='DefaultSampler', shuffle=True),
|
67 |
+
)
|
68 |
+
|
69 |
+
val_dataloader = dict(
|
70 |
+
batch_size=64,
|
71 |
+
num_workers=8,
|
72 |
+
dataset=dict(
|
73 |
+
type=dataset_type,
|
74 |
+
data_root='data/fungi2023/',
|
75 |
+
ann_file='FungiCLEF2023_val_metadata_PRODUCTION.csv',
|
76 |
+
data_prefix='DF21/',
|
77 |
+
pipeline=test_pipeline),
|
78 |
+
sampler=dict(type='DefaultSampler', shuffle=False),
|
79 |
+
)
|
80 |
+
val_evaluator = dict(type='SingleLabelMetric', items=['precision', 'recall', 'f1-score'])
|
81 |
+
|
82 |
+
test_dataloader = dict(
|
83 |
+
batch_size=64,
|
84 |
+
num_workers=8,
|
85 |
+
dataset=dict(
|
86 |
+
type=dataset_type,
|
87 |
+
data_root='data/fungi2023/',
|
88 |
+
ann_file='FungiCLEF2023_val_metadata_PRODUCTION.csv',
|
89 |
+
data_prefix='DF21/',
|
90 |
+
pipeline=test_pipeline),
|
91 |
+
sampler=dict(type='DefaultSampler', shuffle=False),
|
92 |
+
)
|
93 |
+
test_evaluator = val_evaluator
|
configs/_base_/datasets/fungi_bs16_swin_384_class-balanced.py
ADDED
@@ -0,0 +1,96 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
_base_ = ['./pipelines/rand_aug.py']
|
2 |
+
|
3 |
+
# dataset settings
|
4 |
+
dataset_type = 'Fungi'
|
5 |
+
data_preprocessor = dict(
|
6 |
+
num_classes=1604,
|
7 |
+
# RGB format normalization parameters
|
8 |
+
mean=[123.675, 116.28, 103.53],
|
9 |
+
std=[58.395, 57.12, 57.375],
|
10 |
+
# convert image from BGR to RGB
|
11 |
+
to_rgb=True,
|
12 |
+
)
|
13 |
+
|
14 |
+
bgr_mean = data_preprocessor['mean'][::-1]
|
15 |
+
bgr_std = data_preprocessor['std'][::-1]
|
16 |
+
|
17 |
+
train_pipeline = [
|
18 |
+
dict(type='LoadImageFromFileFungi'),
|
19 |
+
dict(
|
20 |
+
type='RandomResizedCrop',
|
21 |
+
scale=384,
|
22 |
+
backend='pillow',
|
23 |
+
interpolation='bicubic'),
|
24 |
+
dict(type='RandomFlip', prob=0.5, direction='horizontal'),
|
25 |
+
dict(
|
26 |
+
type='RandAugment',
|
27 |
+
policies='timm_increasing',
|
28 |
+
num_policies=2,
|
29 |
+
total_level=10,
|
30 |
+
magnitude_level=9,
|
31 |
+
magnitude_std=0.5,
|
32 |
+
hparams=dict(
|
33 |
+
pad_val=[round(x) for x in bgr_mean], interpolation='bicubic')),
|
34 |
+
dict(
|
35 |
+
type='RandomErasing',
|
36 |
+
erase_prob=0.25,
|
37 |
+
mode='rand',
|
38 |
+
min_area_ratio=0.02,
|
39 |
+
max_area_ratio=1 / 3,
|
40 |
+
fill_color=bgr_mean,
|
41 |
+
fill_std=bgr_std),
|
42 |
+
dict(type='PackInputs'),
|
43 |
+
]
|
44 |
+
|
45 |
+
test_pipeline = [
|
46 |
+
dict(type='LoadImageFromFileFungi'),
|
47 |
+
dict(
|
48 |
+
type='ResizeEdge',
|
49 |
+
scale=438,
|
50 |
+
edge='short',
|
51 |
+
backend='pillow',
|
52 |
+
interpolation='bicubic'),
|
53 |
+
dict(type='CenterCrop', crop_size=384),
|
54 |
+
dict(type='PackInputs'),
|
55 |
+
]
|
56 |
+
|
57 |
+
train_dataloader = dict(
|
58 |
+
batch_size=16,
|
59 |
+
num_workers=8,
|
60 |
+
dataset=dict(
|
61 |
+
type='ClassBalancedDataset',
|
62 |
+
oversample_thr=1e-2,
|
63 |
+
dataset=dict(
|
64 |
+
type=dataset_type,
|
65 |
+
data_root='data/fungi2023/',
|
66 |
+
ann_file='FungiCLEF2023_train_metadata_PRODUCTION.csv',
|
67 |
+
data_prefix='DF20/',
|
68 |
+
pipeline=train_pipeline)),
|
69 |
+
sampler=dict(type='DefaultSampler', shuffle=True),
|
70 |
+
)
|
71 |
+
|
72 |
+
val_dataloader = dict(
|
73 |
+
batch_size=64,
|
74 |
+
num_workers=8,
|
75 |
+
dataset=dict(
|
76 |
+
type=dataset_type,
|
77 |
+
data_root='data/fungi2023/',
|
78 |
+
ann_file='FungiCLEF2023_val_metadata_PRODUCTION.csv',
|
79 |
+
data_prefix='DF21/',
|
80 |
+
pipeline=test_pipeline),
|
81 |
+
sampler=dict(type='DefaultSampler', shuffle=False),
|
82 |
+
)
|
83 |
+
val_evaluator = dict(type='SingleLabelMetric', items=['precision', 'recall', 'f1-score'])
|
84 |
+
|
85 |
+
test_dataloader = dict(
|
86 |
+
batch_size=64,
|
87 |
+
num_workers=8,
|
88 |
+
dataset=dict(
|
89 |
+
type=dataset_type,
|
90 |
+
data_root='data/fungi2023/',
|
91 |
+
ann_file='FungiCLEF2023_val_metadata_PRODUCTION.csv',
|
92 |
+
data_prefix='DF21/',
|
93 |
+
pipeline=test_pipeline),
|
94 |
+
sampler=dict(type='DefaultSampler', shuffle=False),
|
95 |
+
)
|
96 |
+
test_evaluator = val_evaluator
|
configs/_base_/datasets/imagenet21k_bs128.py
ADDED
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# dataset settings
|
2 |
+
dataset_type = 'ImageNet21k'
|
3 |
+
data_preprocessor = dict(
|
4 |
+
num_classes=21842,
|
5 |
+
# RGB format normalization parameters
|
6 |
+
mean=[123.675, 116.28, 103.53],
|
7 |
+
std=[58.395, 57.12, 57.375],
|
8 |
+
# convert image from BGR to RGB
|
9 |
+
to_rgb=True,
|
10 |
+
)
|
11 |
+
|
12 |
+
train_pipeline = [
|
13 |
+
dict(type='LoadImageFromFile'),
|
14 |
+
dict(type='RandomResizedCrop', scale=224),
|
15 |
+
dict(type='RandomFlip', prob=0.5, direction='horizontal'),
|
16 |
+
dict(type='PackInputs'),
|
17 |
+
]
|
18 |
+
|
19 |
+
test_pipeline = [
|
20 |
+
dict(type='LoadImageFromFile'),
|
21 |
+
dict(type='ResizeEdge', scale=256, edge='short'),
|
22 |
+
dict(type='CenterCrop', crop_size=224),
|
23 |
+
dict(type='PackInputs'),
|
24 |
+
]
|
25 |
+
|
26 |
+
train_dataloader = dict(
|
27 |
+
batch_size=128,
|
28 |
+
num_workers=5,
|
29 |
+
dataset=dict(
|
30 |
+
type=dataset_type,
|
31 |
+
data_root='data/imagenet21k',
|
32 |
+
ann_file='meta/train.txt',
|
33 |
+
data_prefix='train',
|
34 |
+
pipeline=train_pipeline),
|
35 |
+
sampler=dict(type='DefaultSampler', shuffle=True),
|
36 |
+
)
|
37 |
+
|
38 |
+
val_dataloader = dict(
|
39 |
+
batch_size=128,
|
40 |
+
num_workers=5,
|
41 |
+
dataset=dict(
|
42 |
+
type=dataset_type,
|
43 |
+
data_root='data/imagenet21k',
|
44 |
+
ann_file='meta/val.txt',
|
45 |
+
data_prefix='val',
|
46 |
+
pipeline=test_pipeline),
|
47 |
+
sampler=dict(type='DefaultSampler', shuffle=False),
|
48 |
+
)
|
49 |
+
val_evaluator = dict(type='Accuracy', topk=(1, 5))
|
50 |
+
|
51 |
+
# If you want standard test, please manually configure the test dataset
|
52 |
+
test_dataloader = val_dataloader
|
53 |
+
test_evaluator = val_evaluator
|
configs/_base_/datasets/imagenet_bs128_mbv3.py
ADDED
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# dataset settings
|
2 |
+
dataset_type = 'ImageNet'
|
3 |
+
data_preprocessor = dict(
|
4 |
+
num_classes=1000,
|
5 |
+
# RGB format normalization parameters
|
6 |
+
mean=[123.675, 116.28, 103.53],
|
7 |
+
std=[58.395, 57.12, 57.375],
|
8 |
+
# convert image from BGR to RGB
|
9 |
+
to_rgb=True,
|
10 |
+
)
|
11 |
+
|
12 |
+
bgr_mean = data_preprocessor['mean'][::-1]
|
13 |
+
bgr_std = data_preprocessor['std'][::-1]
|
14 |
+
|
15 |
+
train_pipeline = [
|
16 |
+
dict(type='LoadImageFromFile'),
|
17 |
+
dict(type='RandomResizedCrop', scale=224, backend='pillow'),
|
18 |
+
dict(type='RandomFlip', prob=0.5, direction='horizontal'),
|
19 |
+
dict(
|
20 |
+
type='AutoAugment',
|
21 |
+
policies='imagenet',
|
22 |
+
hparams=dict(pad_val=[round(x) for x in bgr_mean])),
|
23 |
+
dict(
|
24 |
+
type='RandomErasing',
|
25 |
+
erase_prob=0.2,
|
26 |
+
mode='rand',
|
27 |
+
min_area_ratio=0.02,
|
28 |
+
max_area_ratio=1 / 3,
|
29 |
+
fill_color=bgr_mean,
|
30 |
+
fill_std=bgr_std),
|
31 |
+
dict(type='PackInputs'),
|
32 |
+
]
|
33 |
+
|
34 |
+
test_pipeline = [
|
35 |
+
dict(type='LoadImageFromFile'),
|
36 |
+
dict(type='ResizeEdge', scale=256, edge='short', backend='pillow'),
|
37 |
+
dict(type='CenterCrop', crop_size=224),
|
38 |
+
dict(type='PackInputs'),
|
39 |
+
]
|
40 |
+
|
41 |
+
train_dataloader = dict(
|
42 |
+
batch_size=128,
|
43 |
+
num_workers=5,
|
44 |
+
dataset=dict(
|
45 |
+
type=dataset_type,
|
46 |
+
data_root='data/imagenet',
|
47 |
+
ann_file='meta/train.txt',
|
48 |
+
data_prefix='train',
|
49 |
+
pipeline=train_pipeline),
|
50 |
+
sampler=dict(type='DefaultSampler', shuffle=True),
|
51 |
+
)
|
52 |
+
|
53 |
+
val_dataloader = dict(
|
54 |
+
batch_size=128,
|
55 |
+
num_workers=5,
|
56 |
+
dataset=dict(
|
57 |
+
type=dataset_type,
|
58 |
+
data_root='data/imagenet',
|
59 |
+
ann_file='meta/val.txt',
|
60 |
+
data_prefix='val',
|
61 |
+
pipeline=test_pipeline),
|
62 |
+
sampler=dict(type='DefaultSampler', shuffle=False),
|
63 |
+
)
|
64 |
+
val_evaluator = dict(type='Accuracy', topk=(1, 5))
|
65 |
+
|
66 |
+
# If you want standard test, please manually configure the test dataset
|
67 |
+
test_dataloader = val_dataloader
|
68 |
+
test_evaluator = val_evaluator
|
configs/_base_/datasets/imagenet_bs128_poolformer_medium_224.py
ADDED
@@ -0,0 +1,82 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# dataset settings
|
2 |
+
dataset_type = 'ImageNet'
|
3 |
+
data_preprocessor = dict(
|
4 |
+
num_classes=1000,
|
5 |
+
# RGB format normalization parameters
|
6 |
+
mean=[123.675, 116.28, 103.53],
|
7 |
+
std=[58.395, 57.12, 57.375],
|
8 |
+
# convert image from BGR to RGB
|
9 |
+
to_rgb=True,
|
10 |
+
)
|
11 |
+
|
12 |
+
bgr_mean = data_preprocessor['mean'][::-1]
|
13 |
+
bgr_std = data_preprocessor['std'][::-1]
|
14 |
+
|
15 |
+
train_pipeline = [
|
16 |
+
dict(type='LoadImageFromFile'),
|
17 |
+
dict(
|
18 |
+
type='RandomResizedCrop',
|
19 |
+
scale=224,
|
20 |
+
backend='pillow',
|
21 |
+
interpolation='bicubic'),
|
22 |
+
dict(type='RandomFlip', prob=0.5, direction='horizontal'),
|
23 |
+
dict(
|
24 |
+
type='RandAugment',
|
25 |
+
policies='timm_increasing',
|
26 |
+
num_policies=2,
|
27 |
+
total_level=10,
|
28 |
+
magnitude_level=9,
|
29 |
+
magnitude_std=0.5,
|
30 |
+
hparams=dict(
|
31 |
+
pad_val=[round(x) for x in bgr_mean], interpolation='bicubic')),
|
32 |
+
dict(
|
33 |
+
type='RandomErasing',
|
34 |
+
erase_prob=0.25,
|
35 |
+
mode='rand',
|
36 |
+
min_area_ratio=0.02,
|
37 |
+
max_area_ratio=1 / 3,
|
38 |
+
fill_color=bgr_mean,
|
39 |
+
fill_std=bgr_std),
|
40 |
+
dict(type='PackInputs'),
|
41 |
+
]
|
42 |
+
|
43 |
+
test_pipeline = [
|
44 |
+
dict(type='LoadImageFromFile'),
|
45 |
+
dict(
|
46 |
+
type='ResizeEdge',
|
47 |
+
scale=236,
|
48 |
+
edge='short',
|
49 |
+
backend='pillow',
|
50 |
+
interpolation='bicubic'),
|
51 |
+
dict(type='CenterCrop', crop_size=224),
|
52 |
+
dict(type='PackInputs'),
|
53 |
+
]
|
54 |
+
|
55 |
+
train_dataloader = dict(
|
56 |
+
batch_size=128,
|
57 |
+
num_workers=5,
|
58 |
+
dataset=dict(
|
59 |
+
type=dataset_type,
|
60 |
+
data_root='data/imagenet',
|
61 |
+
ann_file='meta/train.txt',
|
62 |
+
data_prefix='train',
|
63 |
+
pipeline=train_pipeline),
|
64 |
+
sampler=dict(type='DefaultSampler', shuffle=True),
|
65 |
+
)
|
66 |
+
|
67 |
+
val_dataloader = dict(
|
68 |
+
batch_size=128,
|
69 |
+
num_workers=5,
|
70 |
+
dataset=dict(
|
71 |
+
type=dataset_type,
|
72 |
+
data_root='data/imagenet',
|
73 |
+
ann_file='meta/val.txt',
|
74 |
+
data_prefix='val',
|
75 |
+
pipeline=test_pipeline),
|
76 |
+
sampler=dict(type='DefaultSampler', shuffle=False),
|
77 |
+
)
|
78 |
+
val_evaluator = dict(type='Accuracy', topk=(1, 5))
|
79 |
+
|
80 |
+
# If you want standard test, please manually configure the test dataset
|
81 |
+
test_dataloader = val_dataloader
|
82 |
+
test_evaluator = val_evaluator
|
configs/_base_/datasets/imagenet_bs128_poolformer_small_224.py
ADDED
@@ -0,0 +1,82 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# dataset settings
|
2 |
+
dataset_type = 'ImageNet'
|
3 |
+
data_preprocessor = dict(
|
4 |
+
num_classes=1000,
|
5 |
+
# RGB format normalization parameters
|
6 |
+
mean=[123.675, 116.28, 103.53],
|
7 |
+
std=[58.395, 57.12, 57.375],
|
8 |
+
# convert image from BGR to RGB
|
9 |
+
to_rgb=True,
|
10 |
+
)
|
11 |
+
|
12 |
+
bgr_mean = data_preprocessor['mean'][::-1]
|
13 |
+
bgr_std = data_preprocessor['std'][::-1]
|
14 |
+
|
15 |
+
train_pipeline = [
|
16 |
+
dict(type='LoadImageFromFile'),
|
17 |
+
dict(
|
18 |
+
type='RandomResizedCrop',
|
19 |
+
scale=224,
|
20 |
+
backend='pillow',
|
21 |
+
interpolation='bicubic'),
|
22 |
+
dict(type='RandomFlip', prob=0.5, direction='horizontal'),
|
23 |
+
dict(
|
24 |
+
type='RandAugment',
|
25 |
+
policies='timm_increasing',
|
26 |
+
num_policies=2,
|
27 |
+
total_level=10,
|
28 |
+
magnitude_level=9,
|
29 |
+
magnitude_std=0.5,
|
30 |
+
hparams=dict(
|
31 |
+
pad_val=[round(x) for x in bgr_mean], interpolation='bicubic')),
|
32 |
+
dict(
|
33 |
+
type='RandomErasing',
|
34 |
+
erase_prob=0.25,
|
35 |
+
mode='rand',
|
36 |
+
min_area_ratio=0.02,
|
37 |
+
max_area_ratio=1 / 3,
|
38 |
+
fill_color=bgr_mean,
|
39 |
+
fill_std=bgr_std),
|
40 |
+
dict(type='PackInputs'),
|
41 |
+
]
|
42 |
+
|
43 |
+
test_pipeline = [
|
44 |
+
dict(type='LoadImageFromFile'),
|
45 |
+
dict(
|
46 |
+
type='ResizeEdge',
|
47 |
+
scale=248,
|
48 |
+
edge='short',
|
49 |
+
backend='pillow',
|
50 |
+
interpolation='bicubic'),
|
51 |
+
dict(type='CenterCrop', crop_size=224),
|
52 |
+
dict(type='PackInputs'),
|
53 |
+
]
|
54 |
+
|
55 |
+
train_dataloader = dict(
|
56 |
+
batch_size=128,
|
57 |
+
num_workers=5,
|
58 |
+
dataset=dict(
|
59 |
+
type=dataset_type,
|
60 |
+
data_root='data/imagenet',
|
61 |
+
ann_file='meta/train.txt',
|
62 |
+
data_prefix='train',
|
63 |
+
pipeline=train_pipeline),
|
64 |
+
sampler=dict(type='DefaultSampler', shuffle=True),
|
65 |
+
)
|
66 |
+
|
67 |
+
val_dataloader = dict(
|
68 |
+
batch_size=128,
|
69 |
+
num_workers=5,
|
70 |
+
dataset=dict(
|
71 |
+
type=dataset_type,
|
72 |
+
data_root='data/imagenet',
|
73 |
+
ann_file='meta/val.txt',
|
74 |
+
data_prefix='val',
|
75 |
+
pipeline=test_pipeline),
|
76 |
+
sampler=dict(type='DefaultSampler', shuffle=False),
|
77 |
+
)
|
78 |
+
val_evaluator = dict(type='Accuracy', topk=(1, 5))
|
79 |
+
|
80 |
+
# If you want standard test, please manually configure the test dataset
|
81 |
+
test_dataloader = val_dataloader
|
82 |
+
test_evaluator = val_evaluator
|
configs/_base_/datasets/imagenet_bs128_revvit_224.py
ADDED
@@ -0,0 +1,85 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# dataset settings
|
2 |
+
dataset_type = 'ImageNet'
|
3 |
+
data_preprocessor = dict(
|
4 |
+
num_classes=1000,
|
5 |
+
# RGB format normalization parameters
|
6 |
+
mean=[123.675, 116.28, 103.53],
|
7 |
+
std=[58.395, 57.12, 57.375],
|
8 |
+
# convert image from BGR to RGB
|
9 |
+
to_rgb=True,
|
10 |
+
)
|
11 |
+
|
12 |
+
bgr_mean = data_preprocessor['mean'][::-1]
|
13 |
+
bgr_std = data_preprocessor['std'][::-1]
|
14 |
+
|
15 |
+
train_pipeline = [
|
16 |
+
dict(type='LoadImageFromFile'),
|
17 |
+
dict(
|
18 |
+
type='RandomResizedCrop',
|
19 |
+
scale=224,
|
20 |
+
backend='pillow',
|
21 |
+
interpolation='bicubic'),
|
22 |
+
dict(type='RandomFlip', prob=0.5, direction='horizontal'),
|
23 |
+
dict(
|
24 |
+
type='RandAugment',
|
25 |
+
policies='timm_increasing',
|
26 |
+
num_policies=2,
|
27 |
+
total_level=10,
|
28 |
+
magnitude_level=7,
|
29 |
+
magnitude_std=0.5,
|
30 |
+
hparams=dict(
|
31 |
+
pad_val=[round(x) for x in bgr_mean], interpolation='bicubic')),
|
32 |
+
dict(type='ColorJitter', brightness=0.4, contrast=0.4, saturation=0.4),
|
33 |
+
dict(
|
34 |
+
type='RandomErasing',
|
35 |
+
erase_prob=0.25,
|
36 |
+
mode='rand', # should be 'pixel', but currently not supported
|
37 |
+
min_area_ratio=0.02,
|
38 |
+
max_area_ratio=1 / 3,
|
39 |
+
fill_color=bgr_mean,
|
40 |
+
fill_std=bgr_std),
|
41 |
+
dict(type='PackInputs'),
|
42 |
+
]
|
43 |
+
|
44 |
+
test_pipeline = [
|
45 |
+
dict(type='LoadImageFromFile'),
|
46 |
+
dict(
|
47 |
+
type='ResizeEdge',
|
48 |
+
scale=256,
|
49 |
+
edge='short',
|
50 |
+
backend='pillow',
|
51 |
+
interpolation='bicubic'),
|
52 |
+
dict(type='CenterCrop', crop_size=224),
|
53 |
+
dict(type='PackInputs'),
|
54 |
+
]
|
55 |
+
|
56 |
+
train_dataloader = dict(
|
57 |
+
batch_size=256,
|
58 |
+
num_workers=5,
|
59 |
+
dataset=dict(
|
60 |
+
type=dataset_type,
|
61 |
+
data_root='data/imagenet',
|
62 |
+
ann_file='meta/train.txt',
|
63 |
+
data_prefix='train',
|
64 |
+
pipeline=train_pipeline),
|
65 |
+
sampler=dict(type='DefaultSampler', shuffle=True),
|
66 |
+
persistent_workers=True,
|
67 |
+
)
|
68 |
+
|
69 |
+
val_dataloader = dict(
|
70 |
+
batch_size=64,
|
71 |
+
num_workers=5,
|
72 |
+
dataset=dict(
|
73 |
+
type=dataset_type,
|
74 |
+
data_root='data/imagenet',
|
75 |
+
ann_file='meta/val.txt',
|
76 |
+
data_prefix='val',
|
77 |
+
pipeline=test_pipeline),
|
78 |
+
sampler=dict(type='DefaultSampler', shuffle=False),
|
79 |
+
persistent_workers=True,
|
80 |
+
)
|
81 |
+
val_evaluator = dict(type='Accuracy', topk=(1, 5))
|
82 |
+
|
83 |
+
# If you want standard test, please manually configure the test dataset
|
84 |
+
test_dataloader = val_dataloader
|
85 |
+
test_evaluator = val_evaluator
|
configs/_base_/datasets/imagenet_bs128_riformer_medium_384.py
ADDED
@@ -0,0 +1,82 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# dataset settings
|
2 |
+
dataset_type = 'ImageNet'
|
3 |
+
data_preprocessor = dict(
|
4 |
+
num_classes=1000,
|
5 |
+
# RGB format normalization parameters
|
6 |
+
mean=[123.675, 116.28, 103.53],
|
7 |
+
std=[58.395, 57.12, 57.375],
|
8 |
+
# convert image from BGR to RGB
|
9 |
+
to_rgb=True,
|
10 |
+
)
|
11 |
+
|
12 |
+
bgr_mean = data_preprocessor['mean'][::-1]
|
13 |
+
bgr_std = data_preprocessor['std'][::-1]
|
14 |
+
|
15 |
+
train_pipeline = [
|
16 |
+
dict(type='LoadImageFromFile'),
|
17 |
+
dict(
|
18 |
+
type='RandomResizedCrop',
|
19 |
+
scale=384,
|
20 |
+
backend='pillow',
|
21 |
+
interpolation='bicubic'),
|
22 |
+
dict(type='RandomFlip', prob=0.5, direction='horizontal'),
|
23 |
+
dict(
|
24 |
+
type='RandAugment',
|
25 |
+
policies='timm_increasing',
|
26 |
+
num_policies=2,
|
27 |
+
total_level=10,
|
28 |
+
magnitude_level=9,
|
29 |
+
magnitude_std=0.5,
|
30 |
+
hparams=dict(
|
31 |
+
pad_val=[round(x) for x in bgr_mean], interpolation='bicubic')),
|
32 |
+
dict(
|
33 |
+
type='RandomErasing',
|
34 |
+
erase_prob=0.25,
|
35 |
+
mode='rand',
|
36 |
+
min_area_ratio=0.02,
|
37 |
+
max_area_ratio=1 / 3,
|
38 |
+
fill_color=bgr_mean,
|
39 |
+
fill_std=bgr_std),
|
40 |
+
dict(type='PackInputs'),
|
41 |
+
]
|
42 |
+
|
43 |
+
test_pipeline = [
|
44 |
+
dict(type='LoadImageFromFile'),
|
45 |
+
dict(
|
46 |
+
type='ResizeEdge',
|
47 |
+
scale=404,
|
48 |
+
edge='short',
|
49 |
+
backend='pillow',
|
50 |
+
interpolation='bicubic'),
|
51 |
+
dict(type='CenterCrop', crop_size=384),
|
52 |
+
dict(type='PackInputs'),
|
53 |
+
]
|
54 |
+
|
55 |
+
train_dataloader = dict(
|
56 |
+
batch_size=128,
|
57 |
+
num_workers=5,
|
58 |
+
dataset=dict(
|
59 |
+
type=dataset_type,
|
60 |
+
data_root='data/imagenet',
|
61 |
+
ann_file='meta/train.txt',
|
62 |
+
data_prefix='train',
|
63 |
+
pipeline=train_pipeline),
|
64 |
+
sampler=dict(type='DefaultSampler', shuffle=True),
|
65 |
+
)
|
66 |
+
|
67 |
+
val_dataloader = dict(
|
68 |
+
batch_size=16,
|
69 |
+
num_workers=5,
|
70 |
+
dataset=dict(
|
71 |
+
type=dataset_type,
|
72 |
+
data_root='data/imagenet',
|
73 |
+
ann_file='meta/val.txt',
|
74 |
+
data_prefix='val',
|
75 |
+
pipeline=test_pipeline),
|
76 |
+
sampler=dict(type='DefaultSampler', shuffle=False),
|
77 |
+
)
|
78 |
+
val_evaluator = dict(type='Accuracy', topk=(1, 5))
|
79 |
+
|
80 |
+
# If you want standard test, please manually configure the test dataset
|
81 |
+
test_dataloader = val_dataloader
|
82 |
+
test_evaluator = val_evaluator
|
configs/_base_/datasets/imagenet_bs128_riformer_small_384.py
ADDED
@@ -0,0 +1,82 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# dataset settings
|
2 |
+
dataset_type = 'ImageNet'
|
3 |
+
data_preprocessor = dict(
|
4 |
+
num_classes=1000,
|
5 |
+
# RGB format normalization parameters
|
6 |
+
mean=[123.675, 116.28, 103.53],
|
7 |
+
std=[58.395, 57.12, 57.375],
|
8 |
+
# convert image from BGR to RGB
|
9 |
+
to_rgb=True,
|
10 |
+
)
|
11 |
+
|
12 |
+
bgr_mean = data_preprocessor['mean'][::-1]
|
13 |
+
bgr_std = data_preprocessor['std'][::-1]
|
14 |
+
|
15 |
+
train_pipeline = [
|
16 |
+
dict(type='LoadImageFromFile'),
|
17 |
+
dict(
|
18 |
+
type='RandomResizedCrop',
|
19 |
+
scale=384,
|
20 |
+
backend='pillow',
|
21 |
+
interpolation='bicubic'),
|
22 |
+
dict(type='RandomFlip', prob=0.5, direction='horizontal'),
|
23 |
+
dict(
|
24 |
+
type='RandAugment',
|
25 |
+
policies='timm_increasing',
|
26 |
+
num_policies=2,
|
27 |
+
total_level=10,
|
28 |
+
magnitude_level=9,
|
29 |
+
magnitude_std=0.5,
|
30 |
+
hparams=dict(
|
31 |
+
pad_val=[round(x) for x in bgr_mean], interpolation='bicubic')),
|
32 |
+
dict(
|
33 |
+
type='RandomErasing',
|
34 |
+
erase_prob=0.25,
|
35 |
+
mode='rand',
|
36 |
+
min_area_ratio=0.02,
|
37 |
+
max_area_ratio=1 / 3,
|
38 |
+
fill_color=bgr_mean,
|
39 |
+
fill_std=bgr_std),
|
40 |
+
dict(type='PackInputs'),
|
41 |
+
]
|
42 |
+
|
43 |
+
test_pipeline = [
|
44 |
+
dict(type='LoadImageFromFile'),
|
45 |
+
dict(
|
46 |
+
type='ResizeEdge',
|
47 |
+
scale=426,
|
48 |
+
edge='short',
|
49 |
+
backend='pillow',
|
50 |
+
interpolation='bicubic'),
|
51 |
+
dict(type='CenterCrop', crop_size=384),
|
52 |
+
dict(type='PackInputs'),
|
53 |
+
]
|
54 |
+
|
55 |
+
train_dataloader = dict(
|
56 |
+
batch_size=128,
|
57 |
+
num_workers=5,
|
58 |
+
dataset=dict(
|
59 |
+
type=dataset_type,
|
60 |
+
data_root='data/imagenet',
|
61 |
+
ann_file='meta/train.txt',
|
62 |
+
data_prefix='train',
|
63 |
+
pipeline=train_pipeline),
|
64 |
+
sampler=dict(type='DefaultSampler', shuffle=True),
|
65 |
+
)
|
66 |
+
|
67 |
+
val_dataloader = dict(
|
68 |
+
batch_size=32,
|
69 |
+
num_workers=5,
|
70 |
+
dataset=dict(
|
71 |
+
type=dataset_type,
|
72 |
+
data_root='data/imagenet',
|
73 |
+
ann_file='meta/val.txt',
|
74 |
+
data_prefix='val',
|
75 |
+
pipeline=test_pipeline),
|
76 |
+
sampler=dict(type='DefaultSampler', shuffle=False),
|
77 |
+
)
|
78 |
+
val_evaluator = dict(type='Accuracy', topk=(1, 5))
|
79 |
+
|
80 |
+
# If you want standard test, please manually configure the test dataset
|
81 |
+
test_dataloader = val_dataloader
|
82 |
+
test_evaluator = val_evaluator
|
configs/_base_/datasets/imagenet_bs128_vig_224.py
ADDED
@@ -0,0 +1,82 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# dataset settings
|
2 |
+
dataset_type = 'ImageNet'
|
3 |
+
data_preprocessor = dict(
|
4 |
+
num_classes=1000,
|
5 |
+
# RGB format normalization parameters
|
6 |
+
mean=[127.5, 127.5, 127.5],
|
7 |
+
std=[127.5, 127.5, 127.5],
|
8 |
+
# convert image from BGR to RGB
|
9 |
+
to_rgb=True,
|
10 |
+
)
|
11 |
+
|
12 |
+
bgr_mean = data_preprocessor['mean'][::-1]
|
13 |
+
bgr_std = data_preprocessor['std'][::-1]
|
14 |
+
|
15 |
+
train_pipeline = [
|
16 |
+
dict(type='LoadImageFromFile'),
|
17 |
+
dict(
|
18 |
+
type='RandomResizedCrop',
|
19 |
+
scale=224,
|
20 |
+
backend='pillow',
|
21 |
+
interpolation='bicubic'),
|
22 |
+
dict(type='RandomFlip', prob=0.5, direction='horizontal'),
|
23 |
+
dict(
|
24 |
+
type='RandAugment',
|
25 |
+
policies='timm_increasing',
|
26 |
+
num_policies=2,
|
27 |
+
total_level=10,
|
28 |
+
magnitude_level=9,
|
29 |
+
magnitude_std=0.5,
|
30 |
+
hparams=dict(
|
31 |
+
pad_val=[round(x) for x in bgr_mean], interpolation='bicubic')),
|
32 |
+
dict(
|
33 |
+
type='RandomErasing',
|
34 |
+
erase_prob=0.25,
|
35 |
+
mode='rand',
|
36 |
+
min_area_ratio=0.02,
|
37 |
+
max_area_ratio=1 / 3,
|
38 |
+
fill_color=bgr_mean,
|
39 |
+
fill_std=bgr_std),
|
40 |
+
dict(type='PackInputs'),
|
41 |
+
]
|
42 |
+
|
43 |
+
test_pipeline = [
|
44 |
+
dict(type='LoadImageFromFile'),
|
45 |
+
dict(
|
46 |
+
type='ResizeEdge',
|
47 |
+
scale=248,
|
48 |
+
edge='short',
|
49 |
+
backend='pillow',
|
50 |
+
interpolation='bicubic'),
|
51 |
+
dict(type='CenterCrop', crop_size=224),
|
52 |
+
dict(type='PackInputs'),
|
53 |
+
]
|
54 |
+
|
55 |
+
train_dataloader = dict(
|
56 |
+
batch_size=128,
|
57 |
+
num_workers=5,
|
58 |
+
dataset=dict(
|
59 |
+
type=dataset_type,
|
60 |
+
data_root='data/imagenet',
|
61 |
+
ann_file='meta/train.txt',
|
62 |
+
data_prefix='train',
|
63 |
+
pipeline=train_pipeline),
|
64 |
+
sampler=dict(type='DefaultSampler', shuffle=True),
|
65 |
+
)
|
66 |
+
|
67 |
+
val_dataloader = dict(
|
68 |
+
batch_size=128,
|
69 |
+
num_workers=5,
|
70 |
+
dataset=dict(
|
71 |
+
type=dataset_type,
|
72 |
+
data_root='data/imagenet',
|
73 |
+
ann_file='meta/val.txt',
|
74 |
+
data_prefix='val',
|
75 |
+
pipeline=test_pipeline),
|
76 |
+
sampler=dict(type='DefaultSampler', shuffle=False),
|
77 |
+
)
|
78 |
+
val_evaluator = dict(type='Accuracy', topk=(1, 5))
|
79 |
+
|
80 |
+
# If you want standard test, please manually configure the test dataset
|
81 |
+
test_dataloader = val_dataloader
|
82 |
+
test_evaluator = val_evaluator
|
configs/_base_/datasets/imagenet_bs16_eva_196.py
ADDED
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# dataset settings
|
2 |
+
dataset_type = 'ImageNet'
|
3 |
+
data_preprocessor = dict(
|
4 |
+
num_classes=1000,
|
5 |
+
# RGB format normalization parameters
|
6 |
+
mean=[0.48145466 * 255, 0.4578275 * 255, 0.40821073 * 255],
|
7 |
+
std=[0.26862954 * 255, 0.26130258 * 255, 0.27577711 * 255],
|
8 |
+
# convert image from BGR to RGB
|
9 |
+
to_rgb=True,
|
10 |
+
)
|
11 |
+
|
12 |
+
train_pipeline = [
|
13 |
+
dict(type='LoadImageFromFile'),
|
14 |
+
dict(
|
15 |
+
type='RandomResizedCrop',
|
16 |
+
scale=196,
|
17 |
+
backend='pillow',
|
18 |
+
interpolation='bicubic'),
|
19 |
+
dict(type='RandomFlip', prob=0.5, direction='horizontal'),
|
20 |
+
dict(type='PackInputs'),
|
21 |
+
]
|
22 |
+
|
23 |
+
test_pipeline = [
|
24 |
+
dict(type='LoadImageFromFile'),
|
25 |
+
dict(
|
26 |
+
type='ResizeEdge',
|
27 |
+
scale=196,
|
28 |
+
edge='short',
|
29 |
+
backend='pillow',
|
30 |
+
interpolation='bicubic'),
|
31 |
+
dict(type='CenterCrop', crop_size=196),
|
32 |
+
dict(type='PackInputs'),
|
33 |
+
]
|
34 |
+
|
35 |
+
train_dataloader = dict(
|
36 |
+
batch_size=16,
|
37 |
+
num_workers=5,
|
38 |
+
dataset=dict(
|
39 |
+
type=dataset_type,
|
40 |
+
data_root='data/imagenet',
|
41 |
+
ann_file='meta/train.txt',
|
42 |
+
data_prefix='train',
|
43 |
+
pipeline=train_pipeline),
|
44 |
+
sampler=dict(type='DefaultSampler', shuffle=True),
|
45 |
+
)
|
46 |
+
|
47 |
+
val_dataloader = dict(
|
48 |
+
batch_size=16,
|
49 |
+
num_workers=5,
|
50 |
+
dataset=dict(
|
51 |
+
type=dataset_type,
|
52 |
+
data_root='data/imagenet',
|
53 |
+
ann_file='meta/val.txt',
|
54 |
+
data_prefix='val',
|
55 |
+
pipeline=test_pipeline),
|
56 |
+
sampler=dict(type='DefaultSampler', shuffle=False),
|
57 |
+
)
|
58 |
+
val_evaluator = dict(type='Accuracy', topk=(1, 5))
|
59 |
+
|
60 |
+
# If you want standard test, please manually configure the test dataset
|
61 |
+
test_dataloader = val_dataloader
|
62 |
+
test_evaluator = val_evaluator
|
configs/_base_/datasets/imagenet_bs16_eva_336.py
ADDED
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# dataset settings
|
2 |
+
dataset_type = 'ImageNet'
|
3 |
+
data_preprocessor = dict(
|
4 |
+
num_classes=1000,
|
5 |
+
# RGB format normalization parameters
|
6 |
+
mean=[0.48145466 * 255, 0.4578275 * 255, 0.40821073 * 255],
|
7 |
+
std=[0.26862954 * 255, 0.26130258 * 255, 0.27577711 * 255],
|
8 |
+
# convert image from BGR to RGB
|
9 |
+
to_rgb=True,
|
10 |
+
)
|
11 |
+
|
12 |
+
train_pipeline = [
|
13 |
+
dict(type='LoadImageFromFile'),
|
14 |
+
dict(
|
15 |
+
type='RandomResizedCrop',
|
16 |
+
scale=336,
|
17 |
+
backend='pillow',
|
18 |
+
interpolation='bicubic'),
|
19 |
+
dict(type='RandomFlip', prob=0.5, direction='horizontal'),
|
20 |
+
dict(type='PackInputs'),
|
21 |
+
]
|
22 |
+
|
23 |
+
test_pipeline = [
|
24 |
+
dict(type='LoadImageFromFile'),
|
25 |
+
dict(
|
26 |
+
type='ResizeEdge',
|
27 |
+
scale=336,
|
28 |
+
edge='short',
|
29 |
+
backend='pillow',
|
30 |
+
interpolation='bicubic'),
|
31 |
+
dict(type='CenterCrop', crop_size=336),
|
32 |
+
dict(type='PackInputs'),
|
33 |
+
]
|
34 |
+
|
35 |
+
train_dataloader = dict(
|
36 |
+
batch_size=16,
|
37 |
+
num_workers=5,
|
38 |
+
dataset=dict(
|
39 |
+
type=dataset_type,
|
40 |
+
data_root='data/imagenet',
|
41 |
+
ann_file='meta/train.txt',
|
42 |
+
data_prefix='train',
|
43 |
+
pipeline=train_pipeline),
|
44 |
+
sampler=dict(type='DefaultSampler', shuffle=True),
|
45 |
+
)
|
46 |
+
|
47 |
+
val_dataloader = dict(
|
48 |
+
batch_size=16,
|
49 |
+
num_workers=5,
|
50 |
+
dataset=dict(
|
51 |
+
type=dataset_type,
|
52 |
+
data_root='data/imagenet',
|
53 |
+
ann_file='meta/val.txt',
|
54 |
+
data_prefix='val',
|
55 |
+
pipeline=test_pipeline),
|
56 |
+
sampler=dict(type='DefaultSampler', shuffle=False),
|
57 |
+
)
|
58 |
+
val_evaluator = dict(type='Accuracy', topk=(1, 5))
|
59 |
+
|
60 |
+
# If you want standard test, please manually configure the test dataset
|
61 |
+
test_dataloader = val_dataloader
|
62 |
+
test_evaluator = val_evaluator
|
configs/_base_/datasets/imagenet_bs16_eva_560.py
ADDED
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# dataset settings
|
2 |
+
dataset_type = 'ImageNet'
|
3 |
+
data_preprocessor = dict(
|
4 |
+
num_classes=1000,
|
5 |
+
# RGB format normalization parameters
|
6 |
+
mean=[0.48145466 * 255, 0.4578275 * 255, 0.40821073 * 255],
|
7 |
+
std=[0.26862954 * 255, 0.26130258 * 255, 0.27577711 * 255],
|
8 |
+
# convert image from BGR to RGB
|
9 |
+
to_rgb=True,
|
10 |
+
)
|
11 |
+
|
12 |
+
train_pipeline = [
|
13 |
+
dict(type='LoadImageFromFile'),
|
14 |
+
dict(
|
15 |
+
type='RandomResizedCrop',
|
16 |
+
scale=560,
|
17 |
+
backend='pillow',
|
18 |
+
interpolation='bicubic'),
|
19 |
+
dict(type='RandomFlip', prob=0.5, direction='horizontal'),
|
20 |
+
dict(type='PackInputs'),
|
21 |
+
]
|
22 |
+
|
23 |
+
test_pipeline = [
|
24 |
+
dict(type='LoadImageFromFile'),
|
25 |
+
dict(
|
26 |
+
type='ResizeEdge',
|
27 |
+
scale=560,
|
28 |
+
edge='short',
|
29 |
+
backend='pillow',
|
30 |
+
interpolation='bicubic'),
|
31 |
+
dict(type='CenterCrop', crop_size=560),
|
32 |
+
dict(type='PackInputs'),
|
33 |
+
]
|
34 |
+
|
35 |
+
train_dataloader = dict(
|
36 |
+
batch_size=16,
|
37 |
+
num_workers=5,
|
38 |
+
dataset=dict(
|
39 |
+
type=dataset_type,
|
40 |
+
data_root='data/imagenet',
|
41 |
+
ann_file='meta/train.txt',
|
42 |
+
data_prefix='train',
|
43 |
+
pipeline=train_pipeline),
|
44 |
+
sampler=dict(type='DefaultSampler', shuffle=True),
|
45 |
+
)
|
46 |
+
|
47 |
+
val_dataloader = dict(
|
48 |
+
batch_size=16,
|
49 |
+
num_workers=5,
|
50 |
+
dataset=dict(
|
51 |
+
type=dataset_type,
|
52 |
+
data_root='data/imagenet',
|
53 |
+
ann_file='meta/val.txt',
|
54 |
+
data_prefix='val',
|
55 |
+
pipeline=test_pipeline),
|
56 |
+
sampler=dict(type='DefaultSampler', shuffle=False),
|
57 |
+
)
|
58 |
+
val_evaluator = dict(type='Accuracy', topk=(1, 5))
|
59 |
+
|
60 |
+
# If you want standard test, please manually configure the test dataset
|
61 |
+
test_dataloader = val_dataloader
|
62 |
+
test_evaluator = val_evaluator
|
configs/_base_/datasets/imagenet_bs16_pil_bicubic_384.py
ADDED
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# dataset settings
|
2 |
+
dataset_type = 'ImageNet'
|
3 |
+
data_preprocessor = dict(
|
4 |
+
# RGB format normalization parameters
|
5 |
+
mean=[123.675, 116.28, 103.53],
|
6 |
+
std=[58.395, 57.12, 57.375],
|
7 |
+
# convert image from BGR to RGB
|
8 |
+
to_rgb=True,
|
9 |
+
)
|
10 |
+
|
11 |
+
train_pipeline = [
|
12 |
+
dict(type='LoadImageFromFile'),
|
13 |
+
dict(
|
14 |
+
type='RandomResizedCrop',
|
15 |
+
scale=384,
|
16 |
+
backend='pillow',
|
17 |
+
interpolation='bicubic'),
|
18 |
+
dict(type='RandomFlip', prob=0.5, direction='horizontal'),
|
19 |
+
dict(type='PackInputs'),
|
20 |
+
]
|
21 |
+
|
22 |
+
test_pipeline = [
|
23 |
+
dict(type='LoadImageFromFile'),
|
24 |
+
dict(type='Resize', scale=384, backend='pillow', interpolation='bicubic'),
|
25 |
+
dict(type='PackInputs'),
|
26 |
+
]
|
27 |
+
|
28 |
+
train_dataloader = dict(
|
29 |
+
batch_size=16,
|
30 |
+
num_workers=5,
|
31 |
+
dataset=dict(
|
32 |
+
type=dataset_type,
|
33 |
+
data_root='data/imagenet',
|
34 |
+
ann_file='meta/train.txt',
|
35 |
+
data_prefix='train',
|
36 |
+
pipeline=train_pipeline),
|
37 |
+
sampler=dict(type='DefaultSampler', shuffle=True),
|
38 |
+
)
|
39 |
+
|
40 |
+
val_dataloader = dict(
|
41 |
+
batch_size=16,
|
42 |
+
num_workers=5,
|
43 |
+
dataset=dict(
|
44 |
+
type=dataset_type,
|
45 |
+
data_root='data/imagenet',
|
46 |
+
ann_file='meta/val.txt',
|
47 |
+
data_prefix='val',
|
48 |
+
pipeline=test_pipeline),
|
49 |
+
sampler=dict(type='DefaultSampler', shuffle=False),
|
50 |
+
)
|
51 |
+
val_evaluator = dict(type='Accuracy', topk=(1, 5))
|
52 |
+
|
53 |
+
# If you want standard test, please manually configure the test dataset
|
54 |
+
test_dataloader = val_dataloader
|
55 |
+
test_evaluator = val_evaluator
|
configs/_base_/datasets/imagenet_bs256_beitv2.py
ADDED
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# dataset settings
|
2 |
+
dataset_type = 'ImageNet'
|
3 |
+
data_root = 'data/imagenet/'
|
4 |
+
data_preprocessor = dict(
|
5 |
+
type='TwoNormDataPreprocessor',
|
6 |
+
mean=[123.675, 116.28, 103.53],
|
7 |
+
std=[58.395, 57.12, 57.375],
|
8 |
+
second_mean=[127.5, 127.5, 127.5],
|
9 |
+
second_std=[127.5, 127.5, 127.5],
|
10 |
+
to_rgb=True)
|
11 |
+
|
12 |
+
train_pipeline = [
|
13 |
+
dict(type='LoadImageFromFile'),
|
14 |
+
dict(
|
15 |
+
type='ColorJitter',
|
16 |
+
brightness=0.4,
|
17 |
+
contrast=0.4,
|
18 |
+
saturation=0.4,
|
19 |
+
hue=0.),
|
20 |
+
dict(type='RandomFlip', prob=0.5, direction='horizontal'),
|
21 |
+
dict(
|
22 |
+
type='RandomResizedCropAndInterpolationWithTwoPic',
|
23 |
+
size=224,
|
24 |
+
second_size=224,
|
25 |
+
interpolation='bicubic',
|
26 |
+
second_interpolation='bicubic',
|
27 |
+
scale=(0.2, 1.0)),
|
28 |
+
dict(
|
29 |
+
type='BEiTMaskGenerator',
|
30 |
+
input_size=(14, 14),
|
31 |
+
num_masking_patches=75,
|
32 |
+
max_num_patches=75,
|
33 |
+
min_num_patches=16),
|
34 |
+
dict(type='PackInputs')
|
35 |
+
]
|
36 |
+
|
37 |
+
train_dataloader = dict(
|
38 |
+
batch_size=256,
|
39 |
+
num_workers=8,
|
40 |
+
persistent_workers=True,
|
41 |
+
sampler=dict(type='DefaultSampler', shuffle=True),
|
42 |
+
collate_fn=dict(type='default_collate'),
|
43 |
+
dataset=dict(
|
44 |
+
type=dataset_type,
|
45 |
+
data_root=data_root,
|
46 |
+
ann_file='meta/train.txt',
|
47 |
+
data_prefix=dict(img_path='train/'),
|
48 |
+
pipeline=train_pipeline))
|
configs/_base_/datasets/imagenet_bs256_davit_224.py
ADDED
@@ -0,0 +1,82 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# dataset settings
|
2 |
+
dataset_type = 'ImageNet'
|
3 |
+
data_preprocessor = dict(
|
4 |
+
num_classes=1000,
|
5 |
+
# RGB format normalization parameters
|
6 |
+
mean=[123.675, 116.28, 103.53],
|
7 |
+
std=[58.395, 57.12, 57.375],
|
8 |
+
# convert image from BGR to RGB
|
9 |
+
to_rgb=True,
|
10 |
+
)
|
11 |
+
|
12 |
+
bgr_mean = data_preprocessor['mean'][::-1]
|
13 |
+
bgr_std = data_preprocessor['std'][::-1]
|
14 |
+
|
15 |
+
train_pipeline = [
|
16 |
+
dict(type='LoadImageFromFile'),
|
17 |
+
dict(
|
18 |
+
type='RandomResizedCrop',
|
19 |
+
scale=224,
|
20 |
+
backend='pillow',
|
21 |
+
interpolation='bicubic'),
|
22 |
+
dict(type='RandomFlip', prob=0.5, direction='horizontal'),
|
23 |
+
dict(
|
24 |
+
type='RandAugment',
|
25 |
+
policies='timm_increasing',
|
26 |
+
num_policies=2,
|
27 |
+
total_level=10,
|
28 |
+
magnitude_level=9,
|
29 |
+
magnitude_std=0.5,
|
30 |
+
hparams=dict(
|
31 |
+
pad_val=[round(x) for x in bgr_mean], interpolation='bicubic')),
|
32 |
+
dict(
|
33 |
+
type='RandomErasing',
|
34 |
+
erase_prob=0.25,
|
35 |
+
mode='rand',
|
36 |
+
min_area_ratio=0.02,
|
37 |
+
max_area_ratio=1 / 3,
|
38 |
+
fill_color=bgr_mean,
|
39 |
+
fill_std=bgr_std),
|
40 |
+
dict(type='PackInputs'),
|
41 |
+
]
|
42 |
+
|
43 |
+
test_pipeline = [
|
44 |
+
dict(type='LoadImageFromFile'),
|
45 |
+
dict(
|
46 |
+
type='ResizeEdge',
|
47 |
+
scale=236,
|
48 |
+
edge='short',
|
49 |
+
backend='pillow',
|
50 |
+
interpolation='bicubic'),
|
51 |
+
dict(type='CenterCrop', crop_size=224),
|
52 |
+
dict(type='PackInputs'),
|
53 |
+
]
|
54 |
+
|
55 |
+
train_dataloader = dict(
|
56 |
+
batch_size=64,
|
57 |
+
num_workers=5,
|
58 |
+
dataset=dict(
|
59 |
+
type=dataset_type,
|
60 |
+
data_root='data/imagenet',
|
61 |
+
ann_file='meta/train.txt',
|
62 |
+
data_prefix='train',
|
63 |
+
pipeline=train_pipeline),
|
64 |
+
sampler=dict(type='DefaultSampler', shuffle=True),
|
65 |
+
)
|
66 |
+
|
67 |
+
val_dataloader = dict(
|
68 |
+
batch_size=64,
|
69 |
+
num_workers=5,
|
70 |
+
dataset=dict(
|
71 |
+
type=dataset_type,
|
72 |
+
data_root='data/imagenet',
|
73 |
+
ann_file='meta/val.txt',
|
74 |
+
data_prefix='val',
|
75 |
+
pipeline=test_pipeline),
|
76 |
+
sampler=dict(type='DefaultSampler', shuffle=False),
|
77 |
+
)
|
78 |
+
val_evaluator = dict(type='Accuracy', topk=(1, 5))
|
79 |
+
|
80 |
+
# If you want standard test, please manually configure the test dataset
|
81 |
+
test_dataloader = val_dataloader
|
82 |
+
test_evaluator = val_evaluator
|
configs/_base_/datasets/imagenet_bs256_levit_224.py
ADDED
@@ -0,0 +1,82 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
dataset_type = 'ImageNet'
|
2 |
+
|
3 |
+
data_preprocessor = dict(
|
4 |
+
num_classes=1000,
|
5 |
+
# RGB format normalization parameters
|
6 |
+
mean=[123.675, 116.28, 103.53],
|
7 |
+
std=[58.395, 57.12, 57.375],
|
8 |
+
# convert image from BGR to RGB
|
9 |
+
to_rgb=True,
|
10 |
+
)
|
11 |
+
|
12 |
+
bgr_mean = data_preprocessor['mean'][::-1]
|
13 |
+
bgr_std = data_preprocessor['std'][::-1]
|
14 |
+
|
15 |
+
train_pipeline = [
|
16 |
+
dict(type='LoadImageFromFile'),
|
17 |
+
dict(
|
18 |
+
type='RandomResizedCrop',
|
19 |
+
scale=224,
|
20 |
+
backend='pillow',
|
21 |
+
interpolation='bicubic'),
|
22 |
+
dict(type='RandomFlip', prob=0.5, direction='horizontal'),
|
23 |
+
dict(
|
24 |
+
type='RandAugment',
|
25 |
+
policies='timm_increasing',
|
26 |
+
num_policies=2,
|
27 |
+
total_level=10,
|
28 |
+
magnitude_level=9,
|
29 |
+
magnitude_std=0.5,
|
30 |
+
hparams=dict(
|
31 |
+
pad_val=[round(x) for x in bgr_mean], interpolation='bicubic')),
|
32 |
+
dict(
|
33 |
+
type='RandomErasing',
|
34 |
+
erase_prob=0.25,
|
35 |
+
mode='rand',
|
36 |
+
min_area_ratio=0.02,
|
37 |
+
max_area_ratio=1 / 3,
|
38 |
+
fill_color=bgr_mean,
|
39 |
+
fill_std=bgr_std),
|
40 |
+
dict(type='PackInputs'),
|
41 |
+
]
|
42 |
+
|
43 |
+
test_pipeline = [
|
44 |
+
dict(type='LoadImageFromFile'),
|
45 |
+
dict(
|
46 |
+
type='ResizeEdge',
|
47 |
+
scale=256,
|
48 |
+
edge='short',
|
49 |
+
backend='pillow',
|
50 |
+
interpolation='bicubic'),
|
51 |
+
dict(type='CenterCrop', crop_size=224),
|
52 |
+
dict(type='PackInputs'),
|
53 |
+
]
|
54 |
+
|
55 |
+
train_dataloader = dict(
|
56 |
+
batch_size=256,
|
57 |
+
num_workers=4,
|
58 |
+
dataset=dict(
|
59 |
+
type=dataset_type,
|
60 |
+
data_root=r'E:\imagenet',
|
61 |
+
ann_file='meta/val.txt',
|
62 |
+
data_prefix='ILSVRC2012_img_val',
|
63 |
+
pipeline=train_pipeline),
|
64 |
+
sampler=dict(type='DefaultSampler', shuffle=True),
|
65 |
+
)
|
66 |
+
|
67 |
+
val_dataloader = dict(
|
68 |
+
batch_size=256,
|
69 |
+
num_workers=4,
|
70 |
+
dataset=dict(
|
71 |
+
type=dataset_type,
|
72 |
+
data_root=r'E:\imagenet',
|
73 |
+
ann_file='meta/val.txt',
|
74 |
+
data_prefix='ILSVRC2012_img_val',
|
75 |
+
pipeline=test_pipeline),
|
76 |
+
sampler=dict(type='DefaultSampler', shuffle=False),
|
77 |
+
)
|
78 |
+
val_evaluator = dict(type='Accuracy', topk=(1, 5))
|
79 |
+
|
80 |
+
# If you want standard test, please manually configure the test dataset
|
81 |
+
test_dataloader = val_dataloader
|
82 |
+
test_evaluator = val_evaluator
|
configs/_base_/datasets/imagenet_bs256_rsb_a12.py
ADDED
@@ -0,0 +1,74 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# dataset settings
|
2 |
+
dataset_type = 'ImageNet'
|
3 |
+
data_preprocessor = dict(
|
4 |
+
num_classes=1000,
|
5 |
+
# RGB format normalization parameters
|
6 |
+
mean=[123.675, 116.28, 103.53],
|
7 |
+
std=[58.395, 57.12, 57.375],
|
8 |
+
# convert image from BGR to RGB
|
9 |
+
to_rgb=True,
|
10 |
+
)
|
11 |
+
|
12 |
+
bgr_mean = data_preprocessor['mean'][::-1]
|
13 |
+
bgr_std = data_preprocessor['std'][::-1]
|
14 |
+
|
15 |
+
train_pipeline = [
|
16 |
+
dict(type='LoadImageFromFile'),
|
17 |
+
dict(
|
18 |
+
type='RandomResizedCrop',
|
19 |
+
scale=224,
|
20 |
+
backend='pillow',
|
21 |
+
interpolation='bicubic'),
|
22 |
+
dict(type='RandomFlip', prob=0.5, direction='horizontal'),
|
23 |
+
dict(
|
24 |
+
type='RandAugment',
|
25 |
+
policies='timm_increasing',
|
26 |
+
num_policies=2,
|
27 |
+
total_level=10,
|
28 |
+
magnitude_level=7,
|
29 |
+
magnitude_std=0.5,
|
30 |
+
hparams=dict(
|
31 |
+
pad_val=[round(x) for x in bgr_mean], interpolation='bicubic')),
|
32 |
+
dict(type='PackInputs'),
|
33 |
+
]
|
34 |
+
|
35 |
+
test_pipeline = [
|
36 |
+
dict(type='LoadImageFromFile'),
|
37 |
+
dict(
|
38 |
+
type='ResizeEdge',
|
39 |
+
scale=236,
|
40 |
+
edge='short',
|
41 |
+
backend='pillow',
|
42 |
+
interpolation='bicubic'),
|
43 |
+
dict(type='CenterCrop', crop_size=224),
|
44 |
+
dict(type='PackInputs')
|
45 |
+
]
|
46 |
+
|
47 |
+
train_dataloader = dict(
|
48 |
+
batch_size=256,
|
49 |
+
num_workers=5,
|
50 |
+
dataset=dict(
|
51 |
+
type=dataset_type,
|
52 |
+
data_root='data/imagenet',
|
53 |
+
ann_file='meta/train.txt',
|
54 |
+
data_prefix='train',
|
55 |
+
pipeline=train_pipeline),
|
56 |
+
sampler=dict(type='DefaultSampler', shuffle=True),
|
57 |
+
)
|
58 |
+
|
59 |
+
val_dataloader = dict(
|
60 |
+
batch_size=256,
|
61 |
+
num_workers=5,
|
62 |
+
dataset=dict(
|
63 |
+
type=dataset_type,
|
64 |
+
data_root='data/imagenet',
|
65 |
+
ann_file='meta/val.txt',
|
66 |
+
data_prefix='val',
|
67 |
+
pipeline=test_pipeline),
|
68 |
+
sampler=dict(type='DefaultSampler', shuffle=False),
|
69 |
+
)
|
70 |
+
val_evaluator = dict(type='Accuracy', topk=(1, 5))
|
71 |
+
|
72 |
+
# If you want standard test, please manually configure the test dataset
|
73 |
+
test_dataloader = val_dataloader
|
74 |
+
test_evaluator = val_evaluator
|
configs/_base_/datasets/imagenet_bs256_rsb_a3.py
ADDED
@@ -0,0 +1,74 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# dataset settings
|
2 |
+
dataset_type = 'ImageNet'
|
3 |
+
data_preprocessor = dict(
|
4 |
+
num_classes=1000,
|
5 |
+
# RGB format normalization parameters
|
6 |
+
mean=[123.675, 116.28, 103.53],
|
7 |
+
std=[58.395, 57.12, 57.375],
|
8 |
+
# convert image from BGR to RGB
|
9 |
+
to_rgb=True,
|
10 |
+
)
|
11 |
+
|
12 |
+
bgr_mean = data_preprocessor['mean'][::-1]
|
13 |
+
bgr_std = data_preprocessor['std'][::-1]
|
14 |
+
|
15 |
+
train_pipeline = [
|
16 |
+
dict(type='LoadImageFromFile'),
|
17 |
+
dict(
|
18 |
+
type='RandomResizedCrop',
|
19 |
+
scale=224,
|
20 |
+
backend='pillow',
|
21 |
+
interpolation='bicubic'),
|
22 |
+
dict(type='RandomFlip', prob=0.5, direction='horizontal'),
|
23 |
+
dict(
|
24 |
+
type='RandAugment',
|
25 |
+
policies='timm_increasing',
|
26 |
+
num_policies=2,
|
27 |
+
total_level=10,
|
28 |
+
magnitude_level=6,
|
29 |
+
magnitude_std=0.5,
|
30 |
+
hparams=dict(
|
31 |
+
pad_val=[round(x) for x in bgr_mean], interpolation='bicubic')),
|
32 |
+
dict(type='PackInputs'),
|
33 |
+
]
|
34 |
+
|
35 |
+
test_pipeline = [
|
36 |
+
dict(type='LoadImageFromFile'),
|
37 |
+
dict(
|
38 |
+
type='ResizeEdge',
|
39 |
+
scale=236,
|
40 |
+
edge='short',
|
41 |
+
backend='pillow',
|
42 |
+
interpolation='bicubic'),
|
43 |
+
dict(type='CenterCrop', crop_size=224),
|
44 |
+
dict(type='PackInputs')
|
45 |
+
]
|
46 |
+
|
47 |
+
train_dataloader = dict(
|
48 |
+
batch_size=256,
|
49 |
+
num_workers=5,
|
50 |
+
dataset=dict(
|
51 |
+
type=dataset_type,
|
52 |
+
data_root='data/imagenet',
|
53 |
+
ann_file='meta/train.txt',
|
54 |
+
data_prefix='train',
|
55 |
+
pipeline=train_pipeline),
|
56 |
+
sampler=dict(type='DefaultSampler', shuffle=True),
|
57 |
+
)
|
58 |
+
|
59 |
+
val_dataloader = dict(
|
60 |
+
batch_size=256,
|
61 |
+
num_workers=5,
|
62 |
+
dataset=dict(
|
63 |
+
type=dataset_type,
|
64 |
+
data_root='data/imagenet',
|
65 |
+
ann_file='meta/val.txt',
|
66 |
+
data_prefix='val',
|
67 |
+
pipeline=test_pipeline),
|
68 |
+
sampler=dict(type='DefaultSampler', shuffle=False),
|
69 |
+
)
|
70 |
+
val_evaluator = dict(type='Accuracy', topk=(1, 5))
|
71 |
+
|
72 |
+
# If you want standard test, please manually configure the test dataset
|
73 |
+
test_dataloader = val_dataloader
|
74 |
+
test_evaluator = val_evaluator
|
configs/_base_/datasets/imagenet_bs256_simmim_192.py
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# dataset settings
|
2 |
+
dataset_type = 'ImageNet'
|
3 |
+
data_root = 'data/imagenet/'
|
4 |
+
data_preprocessor = dict(
|
5 |
+
type='SelfSupDataPreprocessor',
|
6 |
+
mean=[123.675, 116.28, 103.53],
|
7 |
+
std=[58.395, 57.12, 57.375],
|
8 |
+
to_rgb=True)
|
9 |
+
|
10 |
+
train_pipeline = [
|
11 |
+
dict(type='LoadImageFromFile'),
|
12 |
+
dict(type='RandomResizedCrop', scale=192, crop_ratio_range=(0.67, 1.0)),
|
13 |
+
dict(type='RandomFlip', prob=0.5),
|
14 |
+
dict(
|
15 |
+
type='SimMIMMaskGenerator',
|
16 |
+
input_size=192,
|
17 |
+
mask_patch_size=32,
|
18 |
+
model_patch_size=4,
|
19 |
+
mask_ratio=0.6),
|
20 |
+
dict(type='PackInputs')
|
21 |
+
]
|
22 |
+
|
23 |
+
train_dataloader = dict(
|
24 |
+
batch_size=256,
|
25 |
+
num_workers=8,
|
26 |
+
persistent_workers=True,
|
27 |
+
sampler=dict(type='DefaultSampler', shuffle=True),
|
28 |
+
collate_fn=dict(type='default_collate'),
|
29 |
+
dataset=dict(
|
30 |
+
type=dataset_type,
|
31 |
+
data_root=data_root,
|
32 |
+
ann_file='meta/train.txt',
|
33 |
+
data_prefix=dict(img_path='train/'),
|
34 |
+
pipeline=train_pipeline))
|
configs/_base_/datasets/imagenet_bs256_swin_192.py
ADDED
@@ -0,0 +1,83 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# dataset settings
|
2 |
+
dataset_type = 'ImageNet'
|
3 |
+
data_root = 'data/imagenet/'
|
4 |
+
data_preprocessor = dict(
|
5 |
+
num_classes=1000,
|
6 |
+
# RGB format normalization parameters
|
7 |
+
mean=[123.675, 116.28, 103.53],
|
8 |
+
std=[58.395, 57.12, 57.375],
|
9 |
+
# convert image from BGR to RGB
|
10 |
+
to_rgb=True,
|
11 |
+
)
|
12 |
+
|
13 |
+
train_pipeline = [
|
14 |
+
dict(type='LoadImageFromFile'),
|
15 |
+
dict(
|
16 |
+
type='RandomResizedCrop',
|
17 |
+
scale=192,
|
18 |
+
backend='pillow',
|
19 |
+
interpolation='bicubic'),
|
20 |
+
dict(type='RandomFlip', prob=0.5, direction='horizontal'),
|
21 |
+
dict(
|
22 |
+
type='RandAugment',
|
23 |
+
policies='timm_increasing',
|
24 |
+
num_policies=2,
|
25 |
+
total_level=10,
|
26 |
+
magnitude_level=9,
|
27 |
+
magnitude_std=0.5,
|
28 |
+
hparams=dict(pad_val=[104, 116, 124], interpolation='bicubic')),
|
29 |
+
dict(
|
30 |
+
type='RandomErasing',
|
31 |
+
erase_prob=0.25,
|
32 |
+
mode='rand',
|
33 |
+
min_area_ratio=0.02,
|
34 |
+
max_area_ratio=1 / 3,
|
35 |
+
fill_color=[103.53, 116.28, 123.675],
|
36 |
+
fill_std=[57.375, 57.12, 58.395]),
|
37 |
+
dict(type='PackInputs'),
|
38 |
+
]
|
39 |
+
|
40 |
+
test_pipeline = [
|
41 |
+
dict(type='LoadImageFromFile'),
|
42 |
+
dict(
|
43 |
+
type='ResizeEdge',
|
44 |
+
scale=219,
|
45 |
+
edge='short',
|
46 |
+
backend='pillow',
|
47 |
+
interpolation='bicubic'),
|
48 |
+
dict(type='CenterCrop', crop_size=192),
|
49 |
+
dict(type='PackInputs'),
|
50 |
+
]
|
51 |
+
|
52 |
+
train_dataloader = dict(
|
53 |
+
batch_size=256,
|
54 |
+
num_workers=8,
|
55 |
+
collate_fn=dict(type='default_collate'),
|
56 |
+
persistent_workers=True,
|
57 |
+
sampler=dict(type='DefaultSampler', shuffle=True),
|
58 |
+
dataset=dict(
|
59 |
+
type=dataset_type,
|
60 |
+
data_root=data_root,
|
61 |
+
ann_file='meta/train.txt',
|
62 |
+
data_prefix='train',
|
63 |
+
pipeline=train_pipeline),
|
64 |
+
)
|
65 |
+
|
66 |
+
val_dataloader = dict(
|
67 |
+
batch_size=64,
|
68 |
+
num_workers=5,
|
69 |
+
collate_fn=dict(type='default_collate'),
|
70 |
+
persistent_workers=True,
|
71 |
+
sampler=dict(type='DefaultSampler', shuffle=False),
|
72 |
+
dataset=dict(
|
73 |
+
type=dataset_type,
|
74 |
+
data_root=data_root,
|
75 |
+
ann_file='meta/val.txt',
|
76 |
+
data_prefix='val',
|
77 |
+
pipeline=test_pipeline),
|
78 |
+
)
|
79 |
+
val_evaluator = dict(type='Accuracy', topk=(1, 5))
|
80 |
+
|
81 |
+
# If you want standard test, please manually configure the test dataset
|
82 |
+
test_dataloader = val_dataloader
|
83 |
+
test_evaluator = val_evaluator
|
configs/_base_/datasets/imagenet_bs32.py
ADDED
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# dataset settings
|
2 |
+
dataset_type = 'ImageNet'
|
3 |
+
data_preprocessor = dict(
|
4 |
+
num_classes=1000,
|
5 |
+
# RGB format normalization parameters
|
6 |
+
mean=[123.675, 116.28, 103.53],
|
7 |
+
std=[58.395, 57.12, 57.375],
|
8 |
+
# convert image from BGR to RGB
|
9 |
+
to_rgb=True,
|
10 |
+
)
|
11 |
+
|
12 |
+
train_pipeline = [
|
13 |
+
dict(type='LoadImageFromFile'),
|
14 |
+
dict(type='RandomResizedCrop', scale=224),
|
15 |
+
dict(type='RandomFlip', prob=0.5, direction='horizontal'),
|
16 |
+
dict(type='PackInputs'),
|
17 |
+
]
|
18 |
+
|
19 |
+
test_pipeline = [
|
20 |
+
dict(type='LoadImageFromFile'),
|
21 |
+
dict(type='ResizeEdge', scale=256, edge='short'),
|
22 |
+
dict(type='CenterCrop', crop_size=224),
|
23 |
+
dict(type='PackInputs'),
|
24 |
+
]
|
25 |
+
|
26 |
+
train_dataloader = dict(
|
27 |
+
batch_size=32,
|
28 |
+
num_workers=5,
|
29 |
+
dataset=dict(
|
30 |
+
type=dataset_type,
|
31 |
+
data_root='data/imagenet',
|
32 |
+
ann_file='meta/train.txt',
|
33 |
+
data_prefix='train',
|
34 |
+
pipeline=train_pipeline),
|
35 |
+
sampler=dict(type='DefaultSampler', shuffle=True),
|
36 |
+
)
|
37 |
+
|
38 |
+
val_dataloader = dict(
|
39 |
+
batch_size=32,
|
40 |
+
num_workers=5,
|
41 |
+
dataset=dict(
|
42 |
+
type=dataset_type,
|
43 |
+
data_root='data/imagenet',
|
44 |
+
ann_file='meta/val.txt',
|
45 |
+
data_prefix='val',
|
46 |
+
pipeline=test_pipeline),
|
47 |
+
sampler=dict(type='DefaultSampler', shuffle=False),
|
48 |
+
)
|
49 |
+
val_evaluator = dict(type='Accuracy', topk=(1, 5))
|
50 |
+
|
51 |
+
# If you want standard test, please manually configure the test dataset
|
52 |
+
test_dataloader = val_dataloader
|
53 |
+
test_evaluator = val_evaluator
|
configs/_base_/datasets/imagenet_bs32_byol.py
ADDED
@@ -0,0 +1,90 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# dataset settings
|
2 |
+
dataset_type = 'ImageNet'
|
3 |
+
data_root = 'data/imagenet/'
|
4 |
+
data_preprocessor = dict(
|
5 |
+
type='SelfSupDataPreprocessor',
|
6 |
+
mean=[123.675, 116.28, 103.53],
|
7 |
+
std=[58.395, 57.12, 57.375],
|
8 |
+
to_rgb=True)
|
9 |
+
|
10 |
+
view_pipeline1 = [
|
11 |
+
dict(
|
12 |
+
type='RandomResizedCrop',
|
13 |
+
scale=224,
|
14 |
+
interpolation='bicubic',
|
15 |
+
backend='pillow'),
|
16 |
+
dict(type='RandomFlip', prob=0.5),
|
17 |
+
dict(
|
18 |
+
type='RandomApply',
|
19 |
+
transforms=[
|
20 |
+
dict(
|
21 |
+
type='ColorJitter',
|
22 |
+
brightness=0.4,
|
23 |
+
contrast=0.4,
|
24 |
+
saturation=0.2,
|
25 |
+
hue=0.1)
|
26 |
+
],
|
27 |
+
prob=0.8),
|
28 |
+
dict(
|
29 |
+
type='RandomGrayscale',
|
30 |
+
prob=0.2,
|
31 |
+
keep_channels=True,
|
32 |
+
channel_weights=(0.114, 0.587, 0.2989)),
|
33 |
+
dict(
|
34 |
+
type='GaussianBlur',
|
35 |
+
magnitude_range=(0.1, 2.0),
|
36 |
+
magnitude_std='inf',
|
37 |
+
prob=1.),
|
38 |
+
dict(type='Solarize', thr=128, prob=0.),
|
39 |
+
]
|
40 |
+
view_pipeline2 = [
|
41 |
+
dict(
|
42 |
+
type='RandomResizedCrop',
|
43 |
+
scale=224,
|
44 |
+
interpolation='bicubic',
|
45 |
+
backend='pillow'),
|
46 |
+
dict(type='RandomFlip', prob=0.5),
|
47 |
+
dict(
|
48 |
+
type='RandomApply',
|
49 |
+
transforms=[
|
50 |
+
dict(
|
51 |
+
type='ColorJitter',
|
52 |
+
brightness=0.4,
|
53 |
+
contrast=0.4,
|
54 |
+
saturation=0.2,
|
55 |
+
hue=0.1)
|
56 |
+
],
|
57 |
+
prob=0.8),
|
58 |
+
dict(
|
59 |
+
type='RandomGrayscale',
|
60 |
+
prob=0.2,
|
61 |
+
keep_channels=True,
|
62 |
+
channel_weights=(0.114, 0.587, 0.2989)),
|
63 |
+
dict(
|
64 |
+
type='GaussianBlur',
|
65 |
+
magnitude_range=(0.1, 2.0),
|
66 |
+
magnitude_std='inf',
|
67 |
+
prob=0.1),
|
68 |
+
dict(type='Solarize', thr=128, prob=0.2)
|
69 |
+
]
|
70 |
+
train_pipeline = [
|
71 |
+
dict(type='LoadImageFromFile'),
|
72 |
+
dict(
|
73 |
+
type='MultiView',
|
74 |
+
num_views=[1, 1],
|
75 |
+
transforms=[view_pipeline1, view_pipeline2]),
|
76 |
+
dict(type='PackInputs')
|
77 |
+
]
|
78 |
+
|
79 |
+
train_dataloader = dict(
|
80 |
+
batch_size=32,
|
81 |
+
num_workers=4,
|
82 |
+
persistent_workers=True,
|
83 |
+
sampler=dict(type='DefaultSampler', shuffle=True),
|
84 |
+
collate_fn=dict(type='default_collate'),
|
85 |
+
dataset=dict(
|
86 |
+
type=dataset_type,
|
87 |
+
data_root=data_root,
|
88 |
+
ann_file='meta/train.txt',
|
89 |
+
data_prefix=dict(img_path='train/'),
|
90 |
+
pipeline=train_pipeline))
|
configs/_base_/datasets/imagenet_bs32_mocov2.py
ADDED
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# dataset settings
|
2 |
+
dataset_type = 'ImageNet'
|
3 |
+
data_root = 'data/imagenet/'
|
4 |
+
data_preprocessor = dict(
|
5 |
+
type='SelfSupDataPreprocessor',
|
6 |
+
mean=[123.675, 116.28, 103.53],
|
7 |
+
std=[58.395, 57.12, 57.375],
|
8 |
+
to_rgb=True)
|
9 |
+
|
10 |
+
# The difference between mocov2 and mocov1 is the transforms in the pipeline
|
11 |
+
view_pipeline = [
|
12 |
+
dict(
|
13 |
+
type='RandomResizedCrop',
|
14 |
+
scale=224,
|
15 |
+
crop_ratio_range=(0.2, 1.),
|
16 |
+
backend='pillow'),
|
17 |
+
dict(
|
18 |
+
type='RandomApply',
|
19 |
+
transforms=[
|
20 |
+
dict(
|
21 |
+
type='ColorJitter',
|
22 |
+
brightness=0.4,
|
23 |
+
contrast=0.4,
|
24 |
+
saturation=0.4,
|
25 |
+
hue=0.1)
|
26 |
+
],
|
27 |
+
prob=0.8),
|
28 |
+
dict(
|
29 |
+
type='RandomGrayscale',
|
30 |
+
prob=0.2,
|
31 |
+
keep_channels=True,
|
32 |
+
channel_weights=(0.114, 0.587, 0.2989)),
|
33 |
+
dict(
|
34 |
+
type='GaussianBlur',
|
35 |
+
magnitude_range=(0.1, 2.0),
|
36 |
+
magnitude_std='inf',
|
37 |
+
prob=0.5),
|
38 |
+
dict(type='RandomFlip', prob=0.5),
|
39 |
+
]
|
40 |
+
|
41 |
+
train_pipeline = [
|
42 |
+
dict(type='LoadImageFromFile'),
|
43 |
+
dict(type='MultiView', num_views=2, transforms=[view_pipeline]),
|
44 |
+
dict(type='PackInputs')
|
45 |
+
]
|
46 |
+
|
47 |
+
train_dataloader = dict(
|
48 |
+
batch_size=32,
|
49 |
+
num_workers=8,
|
50 |
+
drop_last=True,
|
51 |
+
persistent_workers=True,
|
52 |
+
sampler=dict(type='DefaultSampler', shuffle=True),
|
53 |
+
collate_fn=dict(type='default_collate'),
|
54 |
+
dataset=dict(
|
55 |
+
type=dataset_type,
|
56 |
+
data_root=data_root,
|
57 |
+
ann_file='meta/train.txt',
|
58 |
+
data_prefix=dict(img_path='train/'),
|
59 |
+
pipeline=train_pipeline))
|
configs/_base_/datasets/imagenet_bs32_pil_bicubic.py
ADDED
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# dataset settings
|
2 |
+
dataset_type = 'ImageNet'
|
3 |
+
data_preprocessor = dict(
|
4 |
+
num_classes=1000,
|
5 |
+
# RGB format normalization parameters
|
6 |
+
mean=[123.675, 116.28, 103.53],
|
7 |
+
std=[58.395, 57.12, 57.375],
|
8 |
+
# convert image from BGR to RGB
|
9 |
+
to_rgb=True,
|
10 |
+
)
|
11 |
+
|
12 |
+
train_pipeline = [
|
13 |
+
dict(type='LoadImageFromFile'),
|
14 |
+
dict(
|
15 |
+
type='RandomResizedCrop',
|
16 |
+
scale=224,
|
17 |
+
backend='pillow',
|
18 |
+
interpolation='bicubic'),
|
19 |
+
dict(type='RandomFlip', prob=0.5, direction='horizontal'),
|
20 |
+
dict(type='PackInputs'),
|
21 |
+
]
|
22 |
+
|
23 |
+
test_pipeline = [
|
24 |
+
dict(type='LoadImageFromFile'),
|
25 |
+
dict(
|
26 |
+
type='ResizeEdge',
|
27 |
+
scale=256,
|
28 |
+
edge='short',
|
29 |
+
backend='pillow',
|
30 |
+
interpolation='bicubic'),
|
31 |
+
dict(type='CenterCrop', crop_size=224),
|
32 |
+
dict(type='PackInputs'),
|
33 |
+
]
|
34 |
+
|
35 |
+
train_dataloader = dict(
|
36 |
+
batch_size=32,
|
37 |
+
num_workers=5,
|
38 |
+
dataset=dict(
|
39 |
+
type=dataset_type,
|
40 |
+
data_root='data/imagenet',
|
41 |
+
ann_file='meta/train.txt',
|
42 |
+
data_prefix='train',
|
43 |
+
pipeline=train_pipeline),
|
44 |
+
sampler=dict(type='DefaultSampler', shuffle=True),
|
45 |
+
)
|
46 |
+
|
47 |
+
val_dataloader = dict(
|
48 |
+
batch_size=32,
|
49 |
+
num_workers=5,
|
50 |
+
dataset=dict(
|
51 |
+
type=dataset_type,
|
52 |
+
data_root='data/imagenet',
|
53 |
+
ann_file='meta/val.txt',
|
54 |
+
data_prefix='val',
|
55 |
+
pipeline=test_pipeline),
|
56 |
+
sampler=dict(type='DefaultSampler', shuffle=False),
|
57 |
+
)
|
58 |
+
val_evaluator = dict(type='Accuracy', topk=(1, 5))
|
59 |
+
|
60 |
+
# If you want standard test, please manually configure the test dataset
|
61 |
+
test_dataloader = val_dataloader
|
62 |
+
test_evaluator = val_evaluator
|
configs/_base_/datasets/imagenet_bs32_pil_resize.py
ADDED
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# dataset settings
|
2 |
+
dataset_type = 'ImageNet'
|
3 |
+
data_preprocessor = dict(
|
4 |
+
num_classes=1000,
|
5 |
+
# RGB format normalization parameters
|
6 |
+
mean=[123.675, 116.28, 103.53],
|
7 |
+
std=[58.395, 57.12, 57.375],
|
8 |
+
# convert image from BGR to RGB
|
9 |
+
to_rgb=True,
|
10 |
+
)
|
11 |
+
|
12 |
+
train_pipeline = [
|
13 |
+
dict(type='LoadImageFromFile'),
|
14 |
+
dict(type='RandomResizedCrop', scale=224, backend='pillow'),
|
15 |
+
dict(type='RandomFlip', prob=0.5, direction='horizontal'),
|
16 |
+
dict(type='PackInputs'),
|
17 |
+
]
|
18 |
+
|
19 |
+
test_pipeline = [
|
20 |
+
dict(type='LoadImageFromFile'),
|
21 |
+
dict(type='ResizeEdge', scale=256, edge='short', backend='pillow'),
|
22 |
+
dict(type='CenterCrop', crop_size=224),
|
23 |
+
dict(type='PackInputs'),
|
24 |
+
]
|
25 |
+
|
26 |
+
train_dataloader = dict(
|
27 |
+
batch_size=32,
|
28 |
+
num_workers=5,
|
29 |
+
dataset=dict(
|
30 |
+
type=dataset_type,
|
31 |
+
data_root='data/imagenet',
|
32 |
+
ann_file='meta/train.txt',
|
33 |
+
data_prefix='train',
|
34 |
+
pipeline=train_pipeline),
|
35 |
+
sampler=dict(type='DefaultSampler', shuffle=True),
|
36 |
+
)
|
37 |
+
|
38 |
+
val_dataloader = dict(
|
39 |
+
batch_size=32,
|
40 |
+
num_workers=5,
|
41 |
+
dataset=dict(
|
42 |
+
type=dataset_type,
|
43 |
+
data_root='data/imagenet',
|
44 |
+
ann_file='meta/val.txt',
|
45 |
+
data_prefix='val',
|
46 |
+
pipeline=test_pipeline),
|
47 |
+
sampler=dict(type='DefaultSampler', shuffle=False),
|
48 |
+
)
|
49 |
+
val_evaluator = dict(type='Accuracy', topk=(1, 5))
|
50 |
+
|
51 |
+
# If you want standard test, please manually configure the test dataset
|
52 |
+
test_dataloader = val_dataloader
|
53 |
+
test_evaluator = val_evaluator
|
configs/_base_/datasets/imagenet_bs32_simclr.py
ADDED
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# dataset settings
|
2 |
+
dataset_type = 'ImageNet'
|
3 |
+
data_root = 'data/imagenet/'
|
4 |
+
data_preprocessor = dict(
|
5 |
+
type='SelfSupDataPreprocessor',
|
6 |
+
mean=[123.675, 116.28, 103.53],
|
7 |
+
std=[58.395, 57.12, 57.375],
|
8 |
+
to_rgb=True)
|
9 |
+
|
10 |
+
view_pipeline = [
|
11 |
+
dict(type='RandomResizedCrop', scale=224, backend='pillow'),
|
12 |
+
dict(type='RandomFlip', prob=0.5),
|
13 |
+
dict(
|
14 |
+
type='RandomApply',
|
15 |
+
transforms=[
|
16 |
+
dict(
|
17 |
+
type='ColorJitter',
|
18 |
+
brightness=0.8,
|
19 |
+
contrast=0.8,
|
20 |
+
saturation=0.8,
|
21 |
+
hue=0.2)
|
22 |
+
],
|
23 |
+
prob=0.8),
|
24 |
+
dict(
|
25 |
+
type='RandomGrayscale',
|
26 |
+
prob=0.2,
|
27 |
+
keep_channels=True,
|
28 |
+
channel_weights=(0.114, 0.587, 0.2989)),
|
29 |
+
dict(
|
30 |
+
type='GaussianBlur',
|
31 |
+
magnitude_range=(0.1, 2.0),
|
32 |
+
magnitude_std='inf',
|
33 |
+
prob=0.5),
|
34 |
+
]
|
35 |
+
|
36 |
+
train_pipeline = [
|
37 |
+
dict(type='LoadImageFromFile'),
|
38 |
+
dict(type='MultiView', num_views=2, transforms=[view_pipeline]),
|
39 |
+
dict(type='PackInputs')
|
40 |
+
]
|
41 |
+
|
42 |
+
train_dataloader = dict(
|
43 |
+
batch_size=32,
|
44 |
+
num_workers=4,
|
45 |
+
persistent_workers=True,
|
46 |
+
sampler=dict(type='DefaultSampler', shuffle=True),
|
47 |
+
collate_fn=dict(type='default_collate'),
|
48 |
+
dataset=dict(
|
49 |
+
type=dataset_type,
|
50 |
+
data_root=data_root,
|
51 |
+
ann_file='meta/train.txt',
|
52 |
+
data_prefix=dict(img_path='train/'),
|
53 |
+
pipeline=train_pipeline))
|
configs/_base_/datasets/imagenet_bs512_mae.py
ADDED
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# dataset settings
|
2 |
+
dataset_type = 'ImageNet'
|
3 |
+
data_root = 'data/imagenet/'
|
4 |
+
data_preprocessor = dict(
|
5 |
+
type='SelfSupDataPreprocessor',
|
6 |
+
mean=[123.675, 116.28, 103.53],
|
7 |
+
std=[58.395, 57.12, 57.375],
|
8 |
+
to_rgb=True)
|
9 |
+
|
10 |
+
train_pipeline = [
|
11 |
+
dict(type='LoadImageFromFile'),
|
12 |
+
dict(
|
13 |
+
type='RandomResizedCrop',
|
14 |
+
scale=224,
|
15 |
+
crop_ratio_range=(0.2, 1.0),
|
16 |
+
backend='pillow',
|
17 |
+
interpolation='bicubic'),
|
18 |
+
dict(type='RandomFlip', prob=0.5),
|
19 |
+
dict(type='PackInputs')
|
20 |
+
]
|
21 |
+
|
22 |
+
train_dataloader = dict(
|
23 |
+
batch_size=512,
|
24 |
+
num_workers=8,
|
25 |
+
persistent_workers=True,
|
26 |
+
sampler=dict(type='DefaultSampler', shuffle=True),
|
27 |
+
collate_fn=dict(type='default_collate'),
|
28 |
+
dataset=dict(
|
29 |
+
type=dataset_type,
|
30 |
+
data_root=data_root,
|
31 |
+
ann_file='meta/train.txt',
|
32 |
+
data_prefix=dict(img_path='train/'),
|
33 |
+
pipeline=train_pipeline))
|
configs/_base_/datasets/imagenet_bs512_mocov3.py
ADDED
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# dataset settings
|
2 |
+
dataset_type = 'ImageNet'
|
3 |
+
data_root = 'data/imagenet/'
|
4 |
+
data_preprocessor = dict(
|
5 |
+
type='SelfSupDataPreprocessor',
|
6 |
+
mean=[123.675, 116.28, 103.53],
|
7 |
+
std=[58.395, 57.12, 57.375],
|
8 |
+
to_rgb=True)
|
9 |
+
|
10 |
+
view_pipeline1 = [
|
11 |
+
dict(
|
12 |
+
type='RandomResizedCrop',
|
13 |
+
scale=224,
|
14 |
+
crop_ratio_range=(0.2, 1.),
|
15 |
+
backend='pillow'),
|
16 |
+
dict(
|
17 |
+
type='RandomApply',
|
18 |
+
transforms=[
|
19 |
+
dict(
|
20 |
+
type='ColorJitter',
|
21 |
+
brightness=0.4,
|
22 |
+
contrast=0.4,
|
23 |
+
saturation=0.2,
|
24 |
+
hue=0.1)
|
25 |
+
],
|
26 |
+
prob=0.8),
|
27 |
+
dict(
|
28 |
+
type='RandomGrayscale',
|
29 |
+
prob=0.2,
|
30 |
+
keep_channels=True,
|
31 |
+
channel_weights=(0.114, 0.587, 0.2989)),
|
32 |
+
dict(
|
33 |
+
type='GaussianBlur',
|
34 |
+
magnitude_range=(0.1, 2.0),
|
35 |
+
magnitude_std='inf',
|
36 |
+
prob=1.),
|
37 |
+
dict(type='Solarize', thr=128, prob=0.),
|
38 |
+
dict(type='RandomFlip', prob=0.5),
|
39 |
+
]
|
40 |
+
view_pipeline2 = [
|
41 |
+
dict(
|
42 |
+
type='RandomResizedCrop',
|
43 |
+
scale=224,
|
44 |
+
crop_ratio_range=(0.2, 1.),
|
45 |
+
backend='pillow'),
|
46 |
+
dict(
|
47 |
+
type='RandomApply',
|
48 |
+
transforms=[
|
49 |
+
dict(
|
50 |
+
type='ColorJitter',
|
51 |
+
brightness=0.4,
|
52 |
+
contrast=0.4,
|
53 |
+
saturation=0.2,
|
54 |
+
hue=0.1)
|
55 |
+
],
|
56 |
+
prob=0.8),
|
57 |
+
dict(
|
58 |
+
type='RandomGrayscale',
|
59 |
+
prob=0.2,
|
60 |
+
keep_channels=True,
|
61 |
+
channel_weights=(0.114, 0.587, 0.2989)),
|
62 |
+
dict(
|
63 |
+
type='GaussianBlur',
|
64 |
+
magnitude_range=(0.1, 2.0),
|
65 |
+
magnitude_std='inf',
|
66 |
+
prob=0.1),
|
67 |
+
dict(type='Solarize', thr=128, prob=0.2),
|
68 |
+
dict(type='RandomFlip', prob=0.5),
|
69 |
+
]
|
70 |
+
train_pipeline = [
|
71 |
+
dict(type='LoadImageFromFile'),
|
72 |
+
dict(
|
73 |
+
type='MultiView',
|
74 |
+
num_views=[1, 1],
|
75 |
+
transforms=[view_pipeline1, view_pipeline2]),
|
76 |
+
dict(type='PackInputs')
|
77 |
+
]
|
78 |
+
|
79 |
+
train_dataloader = dict(
|
80 |
+
batch_size=512,
|
81 |
+
num_workers=8,
|
82 |
+
persistent_workers=True,
|
83 |
+
pin_memory=True,
|
84 |
+
sampler=dict(type='DefaultSampler', shuffle=True),
|
85 |
+
collate_fn=dict(type='default_collate'),
|
86 |
+
dataset=dict(
|
87 |
+
type=dataset_type,
|
88 |
+
data_root=data_root,
|
89 |
+
ann_file='meta/train.txt',
|
90 |
+
data_prefix=dict(img_path='train/'),
|
91 |
+
pipeline=train_pipeline))
|
configs/_base_/datasets/imagenet_bs64.py
ADDED
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# dataset settings
|
2 |
+
dataset_type = 'ImageNet'
|
3 |
+
data_preprocessor = dict(
|
4 |
+
num_classes=1000,
|
5 |
+
# RGB format normalization parameters
|
6 |
+
mean=[123.675, 116.28, 103.53],
|
7 |
+
std=[58.395, 57.12, 57.375],
|
8 |
+
# convert image from BGR to RGB
|
9 |
+
to_rgb=True,
|
10 |
+
)
|
11 |
+
|
12 |
+
train_pipeline = [
|
13 |
+
dict(type='LoadImageFromFile'),
|
14 |
+
dict(type='RandomResizedCrop', scale=224),
|
15 |
+
dict(type='RandomFlip', prob=0.5, direction='horizontal'),
|
16 |
+
dict(type='PackInputs'),
|
17 |
+
]
|
18 |
+
|
19 |
+
test_pipeline = [
|
20 |
+
dict(type='LoadImageFromFile'),
|
21 |
+
dict(type='ResizeEdge', scale=256, edge='short'),
|
22 |
+
dict(type='CenterCrop', crop_size=224),
|
23 |
+
dict(type='PackInputs'),
|
24 |
+
]
|
25 |
+
|
26 |
+
train_dataloader = dict(
|
27 |
+
batch_size=64,
|
28 |
+
num_workers=5,
|
29 |
+
dataset=dict(
|
30 |
+
type=dataset_type,
|
31 |
+
data_root='data/imagenet',
|
32 |
+
ann_file='meta/train.txt',
|
33 |
+
data_prefix='train',
|
34 |
+
pipeline=train_pipeline),
|
35 |
+
sampler=dict(type='DefaultSampler', shuffle=True),
|
36 |
+
)
|
37 |
+
|
38 |
+
val_dataloader = dict(
|
39 |
+
batch_size=64,
|
40 |
+
num_workers=5,
|
41 |
+
dataset=dict(
|
42 |
+
type=dataset_type,
|
43 |
+
data_root='data/imagenet',
|
44 |
+
ann_file='meta/val.txt',
|
45 |
+
data_prefix='val',
|
46 |
+
pipeline=test_pipeline),
|
47 |
+
sampler=dict(type='DefaultSampler', shuffle=False),
|
48 |
+
)
|
49 |
+
val_evaluator = dict(type='Accuracy', topk=(1, 5))
|
50 |
+
|
51 |
+
# If you want standard test, please manually configure the test dataset
|
52 |
+
test_dataloader = val_dataloader
|
53 |
+
test_evaluator = val_evaluator
|
configs/_base_/datasets/imagenet_bs64_autoaug.py
ADDED
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# dataset settings
|
2 |
+
dataset_type = 'ImageNet'
|
3 |
+
data_preprocessor = dict(
|
4 |
+
num_classes=1000,
|
5 |
+
# RGB format normalization parameters
|
6 |
+
mean=[123.675, 116.28, 103.53],
|
7 |
+
std=[58.395, 57.12, 57.375],
|
8 |
+
# convert image from BGR to RGB
|
9 |
+
to_rgb=True,
|
10 |
+
)
|
11 |
+
|
12 |
+
bgr_mean = data_preprocessor['mean'][::-1]
|
13 |
+
bgr_std = data_preprocessor['std'][::-1]
|
14 |
+
|
15 |
+
train_pipeline = [
|
16 |
+
dict(type='LoadImageFromFile'),
|
17 |
+
dict(type='RandomResizedCrop', scale=224),
|
18 |
+
dict(type='RandomFlip', prob=0.5, direction='horizontal'),
|
19 |
+
dict(
|
20 |
+
type='AutoAugment',
|
21 |
+
policies='imagenet',
|
22 |
+
hparams=dict(
|
23 |
+
pad_val=[round(x) for x in bgr_mean], interpolation='bicubic')),
|
24 |
+
dict(type='PackInputs'),
|
25 |
+
]
|
26 |
+
|
27 |
+
test_pipeline = [
|
28 |
+
dict(type='LoadImageFromFile'),
|
29 |
+
dict(type='ResizeEdge', scale=256, edge='short'),
|
30 |
+
dict(type='CenterCrop', crop_size=224),
|
31 |
+
dict(type='PackInputs'),
|
32 |
+
]
|
33 |
+
|
34 |
+
train_dataloader = dict(
|
35 |
+
batch_size=64,
|
36 |
+
num_workers=5,
|
37 |
+
dataset=dict(
|
38 |
+
type=dataset_type,
|
39 |
+
data_root='data/imagenet',
|
40 |
+
ann_file='meta/train.txt',
|
41 |
+
data_prefix='train',
|
42 |
+
pipeline=train_pipeline),
|
43 |
+
sampler=dict(type='DefaultSampler', shuffle=True),
|
44 |
+
)
|
45 |
+
|
46 |
+
val_dataloader = dict(
|
47 |
+
batch_size=64,
|
48 |
+
num_workers=5,
|
49 |
+
dataset=dict(
|
50 |
+
type=dataset_type,
|
51 |
+
data_root='data/imagenet',
|
52 |
+
ann_file='meta/val.txt',
|
53 |
+
data_prefix='val',
|
54 |
+
pipeline=test_pipeline),
|
55 |
+
sampler=dict(type='DefaultSampler', shuffle=False),
|
56 |
+
)
|
57 |
+
val_evaluator = dict(type='Accuracy', topk=(1, 5))
|
58 |
+
|
59 |
+
# If you want standard test, please manually configure the test dataset
|
60 |
+
test_dataloader = val_dataloader
|
61 |
+
test_evaluator = val_evaluator
|
configs/_base_/datasets/imagenet_bs64_clip_224.py
ADDED
@@ -0,0 +1,72 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# dataset settings
|
2 |
+
dataset_type = 'ImageNet'
|
3 |
+
img_norm_cfg = dict(
|
4 |
+
mean=[0.48145466 * 255, 0.4578275 * 255, 0.40821073 * 255],
|
5 |
+
std=[0.26862954 * 255, 0.26130258 * 255, 0.27577711 * 255],
|
6 |
+
to_rgb=True)
|
7 |
+
image_size = 224
|
8 |
+
train_pipeline = [
|
9 |
+
dict(type='LoadImageFromFile'),
|
10 |
+
dict(
|
11 |
+
type='RandomResizedCrop',
|
12 |
+
size=image_size,
|
13 |
+
backend='pillow',
|
14 |
+
interpolation='bicubic'),
|
15 |
+
dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'),
|
16 |
+
# dict(
|
17 |
+
# type='RandAugment',
|
18 |
+
# policies={{_base_.rand_increasing_policies}},
|
19 |
+
# num_policies=2,
|
20 |
+
# total_level=10,
|
21 |
+
# magnitude_level=9,
|
22 |
+
# magnitude_std=0.5,
|
23 |
+
# hparams=dict(
|
24 |
+
# pad_val=[round(x) for x in img_norm_cfg['mean'][::-1]],
|
25 |
+
# interpolation='bicubic')),
|
26 |
+
dict(
|
27 |
+
type='RandomErasing',
|
28 |
+
erase_prob=0.25,
|
29 |
+
mode='rand',
|
30 |
+
min_area_ratio=0.02,
|
31 |
+
max_area_ratio=1 / 3,
|
32 |
+
fill_color=img_norm_cfg['mean'][::-1],
|
33 |
+
fill_std=img_norm_cfg['std'][::-1]),
|
34 |
+
dict(type='Normalize', **img_norm_cfg),
|
35 |
+
dict(type='ImageToTensor', keys=['img']),
|
36 |
+
dict(type='ToTensor', keys=['gt_label']),
|
37 |
+
dict(type='Collect', keys=['img', 'gt_label'])
|
38 |
+
]
|
39 |
+
|
40 |
+
test_pipeline = [
|
41 |
+
dict(type='LoadImageFromFile'),
|
42 |
+
dict(
|
43 |
+
type='Resize',
|
44 |
+
size=(image_size, -1),
|
45 |
+
backend='pillow',
|
46 |
+
interpolation='bicubic'),
|
47 |
+
dict(type='CenterCrop', crop_size=image_size),
|
48 |
+
dict(type='Normalize', **img_norm_cfg),
|
49 |
+
dict(type='ImageToTensor', keys=['img']),
|
50 |
+
dict(type='Collect', keys=['img'])
|
51 |
+
]
|
52 |
+
|
53 |
+
data = dict(
|
54 |
+
samples_per_gpu=64,
|
55 |
+
workers_per_gpu=8,
|
56 |
+
train=dict(
|
57 |
+
type=dataset_type,
|
58 |
+
data_prefix='data/imagenet/train',
|
59 |
+
pipeline=train_pipeline),
|
60 |
+
val=dict(
|
61 |
+
type=dataset_type,
|
62 |
+
data_prefix='data/imagenet/val',
|
63 |
+
ann_file='data/imagenet/meta/val.txt',
|
64 |
+
pipeline=test_pipeline),
|
65 |
+
test=dict(
|
66 |
+
# replace `data/val` with `data/test` for standard test
|
67 |
+
type=dataset_type,
|
68 |
+
data_prefix='data/imagenet/val',
|
69 |
+
ann_file='data/imagenet/meta/val.txt',
|
70 |
+
pipeline=test_pipeline))
|
71 |
+
|
72 |
+
evaluation = dict(interval=10, metric='accuracy')
|
configs/_base_/datasets/imagenet_bs64_clip_384.py
ADDED
@@ -0,0 +1,72 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# dataset settings
|
2 |
+
dataset_type = 'ImageNet'
|
3 |
+
img_norm_cfg = dict(
|
4 |
+
mean=[0.48145466 * 255, 0.4578275 * 255, 0.40821073 * 255],
|
5 |
+
std=[0.26862954 * 255, 0.26130258 * 255, 0.27577711 * 255],
|
6 |
+
to_rgb=True)
|
7 |
+
image_size = 384
|
8 |
+
train_pipeline = [
|
9 |
+
dict(type='LoadImageFromFile'),
|
10 |
+
dict(
|
11 |
+
type='RandomResizedCrop',
|
12 |
+
size=image_size,
|
13 |
+
backend='pillow',
|
14 |
+
interpolation='bicubic'),
|
15 |
+
dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'),
|
16 |
+
# dict(
|
17 |
+
# type='RandAugment',
|
18 |
+
# policies={{_base_.rand_increasing_policies}},
|
19 |
+
# num_policies=2,
|
20 |
+
# total_level=10,
|
21 |
+
# magnitude_level=9,
|
22 |
+
# magnitude_std=0.5,
|
23 |
+
# hparams=dict(
|
24 |
+
# pad_val=[round(x) for x in img_norm_cfg['mean'][::-1]],
|
25 |
+
# interpolation='bicubic')),
|
26 |
+
dict(
|
27 |
+
type='RandomErasing',
|
28 |
+
erase_prob=0.25,
|
29 |
+
mode='rand',
|
30 |
+
min_area_ratio=0.02,
|
31 |
+
max_area_ratio=1 / 3,
|
32 |
+
fill_color=img_norm_cfg['mean'][::-1],
|
33 |
+
fill_std=img_norm_cfg['std'][::-1]),
|
34 |
+
dict(type='Normalize', **img_norm_cfg),
|
35 |
+
dict(type='ImageToTensor', keys=['img']),
|
36 |
+
dict(type='ToTensor', keys=['gt_label']),
|
37 |
+
dict(type='Collect', keys=['img', 'gt_label'])
|
38 |
+
]
|
39 |
+
|
40 |
+
test_pipeline = [
|
41 |
+
dict(type='LoadImageFromFile'),
|
42 |
+
dict(
|
43 |
+
type='Resize',
|
44 |
+
size=(image_size, -1),
|
45 |
+
backend='pillow',
|
46 |
+
interpolation='bicubic'),
|
47 |
+
dict(type='CenterCrop', crop_size=image_size),
|
48 |
+
dict(type='Normalize', **img_norm_cfg),
|
49 |
+
dict(type='ImageToTensor', keys=['img']),
|
50 |
+
dict(type='Collect', keys=['img'])
|
51 |
+
]
|
52 |
+
|
53 |
+
data = dict(
|
54 |
+
samples_per_gpu=64,
|
55 |
+
workers_per_gpu=8,
|
56 |
+
train=dict(
|
57 |
+
type=dataset_type,
|
58 |
+
data_prefix='data/imagenet/train',
|
59 |
+
pipeline=train_pipeline),
|
60 |
+
val=dict(
|
61 |
+
type=dataset_type,
|
62 |
+
data_prefix='data/imagenet/val',
|
63 |
+
ann_file='data/imagenet/meta/val.txt',
|
64 |
+
pipeline=test_pipeline),
|
65 |
+
test=dict(
|
66 |
+
# replace `data/val` with `data/test` for standard test
|
67 |
+
type=dataset_type,
|
68 |
+
data_prefix='data/imagenet/val',
|
69 |
+
ann_file='data/imagenet/meta/val.txt',
|
70 |
+
pipeline=test_pipeline))
|
71 |
+
|
72 |
+
evaluation = dict(interval=10, metric='accuracy')
|
configs/_base_/datasets/imagenet_bs64_clip_448.py
ADDED
@@ -0,0 +1,73 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# dataset settings
|
2 |
+
dataset_type = 'ImageNet'
|
3 |
+
img_norm_cfg = dict(
|
4 |
+
mean=[0.48145466 * 255, 0.4578275 * 255, 0.40821073 * 255],
|
5 |
+
std=[0.26862954 * 255, 0.26130258 * 255, 0.27577711 * 255],
|
6 |
+
to_rgb=True)
|
7 |
+
image_size = 448
|
8 |
+
|
9 |
+
train_pipeline = [
|
10 |
+
dict(type='LoadImageFromFile'),
|
11 |
+
dict(
|
12 |
+
type='RandomResizedCrop',
|
13 |
+
size=image_size,
|
14 |
+
backend='pillow',
|
15 |
+
interpolation='bicubic'),
|
16 |
+
dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'),
|
17 |
+
# dict(
|
18 |
+
# type='RandAugment',
|
19 |
+
# policies={{_base_.rand_increasing_policies}},
|
20 |
+
# num_policies=2,
|
21 |
+
# total_level=10,
|
22 |
+
# magnitude_level=9,
|
23 |
+
# magnitude_std=0.5,
|
24 |
+
# hparams=dict(
|
25 |
+
# pad_val=[round(x) for x in img_norm_cfg['mean'][::-1]],
|
26 |
+
# interpolation='bicubic')),
|
27 |
+
dict(
|
28 |
+
type='RandomErasing',
|
29 |
+
erase_prob=0.25,
|
30 |
+
mode='rand',
|
31 |
+
min_area_ratio=0.02,
|
32 |
+
max_area_ratio=1 / 3,
|
33 |
+
fill_color=img_norm_cfg['mean'][::-1],
|
34 |
+
fill_std=img_norm_cfg['std'][::-1]),
|
35 |
+
dict(type='Normalize', **img_norm_cfg),
|
36 |
+
dict(type='ImageToTensor', keys=['img']),
|
37 |
+
dict(type='ToTensor', keys=['gt_label']),
|
38 |
+
dict(type='Collect', keys=['img', 'gt_label'])
|
39 |
+
]
|
40 |
+
|
41 |
+
test_pipeline = [
|
42 |
+
dict(type='LoadImageFromFile'),
|
43 |
+
dict(
|
44 |
+
type='Resize',
|
45 |
+
size=(image_size, -1),
|
46 |
+
backend='pillow',
|
47 |
+
interpolation='bicubic'),
|
48 |
+
dict(type='CenterCrop', crop_size=image_size),
|
49 |
+
dict(type='Normalize', **img_norm_cfg),
|
50 |
+
dict(type='ImageToTensor', keys=['img']),
|
51 |
+
dict(type='Collect', keys=['img'])
|
52 |
+
]
|
53 |
+
|
54 |
+
data = dict(
|
55 |
+
samples_per_gpu=64,
|
56 |
+
workers_per_gpu=8,
|
57 |
+
train=dict(
|
58 |
+
type=dataset_type,
|
59 |
+
data_prefix='data/imagenet/train',
|
60 |
+
pipeline=train_pipeline),
|
61 |
+
val=dict(
|
62 |
+
type=dataset_type,
|
63 |
+
data_prefix='data/imagenet/val',
|
64 |
+
ann_file='data/imagenet/meta/val.txt',
|
65 |
+
pipeline=test_pipeline),
|
66 |
+
test=dict(
|
67 |
+
# replace `data/val` with `data/test` for standard test
|
68 |
+
type=dataset_type,
|
69 |
+
data_prefix='data/imagenet/val',
|
70 |
+
ann_file='data/imagenet/meta/val.txt',
|
71 |
+
pipeline=test_pipeline))
|
72 |
+
|
73 |
+
evaluation = dict(interval=10, metric='accuracy')
|
configs/_base_/datasets/imagenet_bs64_convmixer_224.py
ADDED
@@ -0,0 +1,82 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# dataset settings
|
2 |
+
dataset_type = 'ImageNet'
|
3 |
+
data_preprocessor = dict(
|
4 |
+
num_classes=1000,
|
5 |
+
# RGB format normalization parameters
|
6 |
+
mean=[123.675, 116.28, 103.53],
|
7 |
+
std=[58.395, 57.12, 57.375],
|
8 |
+
# convert image from BGR to RGB
|
9 |
+
to_rgb=True,
|
10 |
+
)
|
11 |
+
|
12 |
+
bgr_mean = data_preprocessor['mean'][::-1]
|
13 |
+
bgr_std = data_preprocessor['std'][::-1]
|
14 |
+
|
15 |
+
train_pipeline = [
|
16 |
+
dict(type='LoadImageFromFile'),
|
17 |
+
dict(
|
18 |
+
type='RandomResizedCrop',
|
19 |
+
scale=224,
|
20 |
+
backend='pillow',
|
21 |
+
interpolation='bicubic'),
|
22 |
+
dict(type='RandomFlip', prob=0.5, direction='horizontal'),
|
23 |
+
dict(
|
24 |
+
type='RandAugment',
|
25 |
+
policies='timm_increasing',
|
26 |
+
num_policies=2,
|
27 |
+
total_level=10,
|
28 |
+
magnitude_level=9,
|
29 |
+
magnitude_std=0.5,
|
30 |
+
hparams=dict(
|
31 |
+
pad_val=[round(x) for x in bgr_mean], interpolation='bicubic')),
|
32 |
+
dict(
|
33 |
+
type='RandomErasing',
|
34 |
+
erase_prob=0.25,
|
35 |
+
mode='rand',
|
36 |
+
min_area_ratio=0.02,
|
37 |
+
max_area_ratio=1 / 3,
|
38 |
+
fill_color=bgr_mean,
|
39 |
+
fill_std=bgr_std),
|
40 |
+
dict(type='PackInputs')
|
41 |
+
]
|
42 |
+
|
43 |
+
test_pipeline = [
|
44 |
+
dict(type='LoadImageFromFile'),
|
45 |
+
dict(
|
46 |
+
type='ResizeEdge',
|
47 |
+
scale=233,
|
48 |
+
edge='short',
|
49 |
+
backend='pillow',
|
50 |
+
interpolation='bicubic'),
|
51 |
+
dict(type='CenterCrop', crop_size=224),
|
52 |
+
dict(type='PackInputs')
|
53 |
+
]
|
54 |
+
|
55 |
+
train_dataloader = dict(
|
56 |
+
batch_size=64,
|
57 |
+
num_workers=5,
|
58 |
+
dataset=dict(
|
59 |
+
type=dataset_type,
|
60 |
+
data_root='data/imagenet',
|
61 |
+
ann_file='meta/train.txt',
|
62 |
+
data_prefix='train',
|
63 |
+
pipeline=train_pipeline),
|
64 |
+
sampler=dict(type='DefaultSampler', shuffle=True),
|
65 |
+
)
|
66 |
+
|
67 |
+
val_dataloader = dict(
|
68 |
+
batch_size=64,
|
69 |
+
num_workers=5,
|
70 |
+
dataset=dict(
|
71 |
+
type=dataset_type,
|
72 |
+
data_root='data/imagenet',
|
73 |
+
ann_file='meta/val.txt',
|
74 |
+
data_prefix='val',
|
75 |
+
pipeline=test_pipeline),
|
76 |
+
sampler=dict(type='DefaultSampler', shuffle=False),
|
77 |
+
)
|
78 |
+
val_evaluator = dict(type='Accuracy', topk=(1, 5))
|
79 |
+
|
80 |
+
# If you want standard test, please manually configure the test dataset
|
81 |
+
test_dataloader = val_dataloader
|
82 |
+
test_evaluator = val_evaluator
|
configs/_base_/datasets/imagenet_bs64_deit3_224.py
ADDED
@@ -0,0 +1,82 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# dataset settings
|
2 |
+
dataset_type = 'ImageNet'
|
3 |
+
data_preprocessor = dict(
|
4 |
+
num_classes=1000,
|
5 |
+
# RGB format normalization parameters
|
6 |
+
mean=[123.675, 116.28, 103.53],
|
7 |
+
std=[58.395, 57.12, 57.375],
|
8 |
+
# convert image from BGR to RGB
|
9 |
+
to_rgb=True,
|
10 |
+
)
|
11 |
+
|
12 |
+
bgr_mean = data_preprocessor['mean'][::-1]
|
13 |
+
bgr_std = data_preprocessor['std'][::-1]
|
14 |
+
|
15 |
+
train_pipeline = [
|
16 |
+
dict(type='LoadImageFromFile'),
|
17 |
+
dict(
|
18 |
+
type='RandomResizedCrop',
|
19 |
+
scale=224,
|
20 |
+
backend='pillow',
|
21 |
+
interpolation='bicubic'),
|
22 |
+
dict(type='RandomFlip', prob=0.5, direction='horizontal'),
|
23 |
+
dict(
|
24 |
+
type='RandAugment',
|
25 |
+
policies='timm_increasing',
|
26 |
+
num_policies=2,
|
27 |
+
total_level=10,
|
28 |
+
magnitude_level=9,
|
29 |
+
magnitude_std=0.5,
|
30 |
+
hparams=dict(
|
31 |
+
pad_val=[round(x) for x in bgr_mean], interpolation='bicubic')),
|
32 |
+
dict(
|
33 |
+
type='RandomErasing',
|
34 |
+
erase_prob=0.25,
|
35 |
+
mode='rand',
|
36 |
+
min_area_ratio=0.02,
|
37 |
+
max_area_ratio=1 / 3,
|
38 |
+
fill_color=bgr_mean,
|
39 |
+
fill_std=bgr_std),
|
40 |
+
dict(type='PackInputs'),
|
41 |
+
]
|
42 |
+
|
43 |
+
test_pipeline = [
|
44 |
+
dict(type='LoadImageFromFile'),
|
45 |
+
dict(
|
46 |
+
type='ResizeEdge',
|
47 |
+
scale=224,
|
48 |
+
edge='short',
|
49 |
+
backend='pillow',
|
50 |
+
interpolation='bicubic'),
|
51 |
+
dict(type='CenterCrop', crop_size=224),
|
52 |
+
dict(type='PackInputs'),
|
53 |
+
]
|
54 |
+
|
55 |
+
train_dataloader = dict(
|
56 |
+
batch_size=64,
|
57 |
+
num_workers=5,
|
58 |
+
dataset=dict(
|
59 |
+
type=dataset_type,
|
60 |
+
data_root='data/imagenet',
|
61 |
+
ann_file='meta/train.txt',
|
62 |
+
data_prefix='train',
|
63 |
+
pipeline=train_pipeline),
|
64 |
+
sampler=dict(type='DefaultSampler', shuffle=True),
|
65 |
+
)
|
66 |
+
|
67 |
+
val_dataloader = dict(
|
68 |
+
batch_size=64,
|
69 |
+
num_workers=5,
|
70 |
+
dataset=dict(
|
71 |
+
type=dataset_type,
|
72 |
+
data_root='data/imagenet',
|
73 |
+
ann_file='meta/val.txt',
|
74 |
+
data_prefix='val',
|
75 |
+
pipeline=test_pipeline),
|
76 |
+
sampler=dict(type='DefaultSampler', shuffle=False),
|
77 |
+
)
|
78 |
+
val_evaluator = dict(type='Accuracy', topk=(1, 5))
|
79 |
+
|
80 |
+
# If you want standard test, please manually configure the test dataset
|
81 |
+
test_dataloader = val_dataloader
|
82 |
+
test_evaluator = val_evaluator
|
configs/_base_/datasets/imagenet_bs64_deit3_384.py
ADDED
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# dataset settings
|
2 |
+
dataset_type = 'ImageNet'
|
3 |
+
data_preprocessor = dict(
|
4 |
+
num_classes=1000,
|
5 |
+
# RGB format normalization parameters
|
6 |
+
mean=[123.675, 116.28, 103.53],
|
7 |
+
std=[58.395, 57.12, 57.375],
|
8 |
+
# convert image from BGR to RGB
|
9 |
+
to_rgb=True,
|
10 |
+
)
|
11 |
+
|
12 |
+
train_pipeline = [
|
13 |
+
dict(type='LoadImageFromFile'),
|
14 |
+
dict(
|
15 |
+
type='RandomResizedCrop',
|
16 |
+
scale=384,
|
17 |
+
backend='pillow',
|
18 |
+
interpolation='bicubic'),
|
19 |
+
dict(type='RandomFlip', prob=0.5, direction='horizontal'),
|
20 |
+
dict(type='PackInputs'),
|
21 |
+
]
|
22 |
+
|
23 |
+
test_pipeline = [
|
24 |
+
dict(type='LoadImageFromFile'),
|
25 |
+
dict(
|
26 |
+
type='ResizeEdge',
|
27 |
+
scale=384,
|
28 |
+
edge='short',
|
29 |
+
backend='pillow',
|
30 |
+
interpolation='bicubic'),
|
31 |
+
dict(type='CenterCrop', crop_size=384),
|
32 |
+
dict(type='PackInputs'),
|
33 |
+
]
|
34 |
+
|
35 |
+
train_dataloader = dict(
|
36 |
+
batch_size=64,
|
37 |
+
num_workers=5,
|
38 |
+
dataset=dict(
|
39 |
+
type=dataset_type,
|
40 |
+
data_root='data/imagenet',
|
41 |
+
ann_file='meta/train.txt',
|
42 |
+
data_prefix='train',
|
43 |
+
pipeline=train_pipeline),
|
44 |
+
sampler=dict(type='DefaultSampler', shuffle=True),
|
45 |
+
)
|
46 |
+
|
47 |
+
val_dataloader = dict(
|
48 |
+
batch_size=64,
|
49 |
+
num_workers=5,
|
50 |
+
dataset=dict(
|
51 |
+
type=dataset_type,
|
52 |
+
data_root='data/imagenet',
|
53 |
+
ann_file='meta/val.txt',
|
54 |
+
data_prefix='val',
|
55 |
+
pipeline=test_pipeline),
|
56 |
+
sampler=dict(type='DefaultSampler', shuffle=False),
|
57 |
+
)
|
58 |
+
val_evaluator = dict(type='Accuracy', topk=(1, 5))
|
59 |
+
|
60 |
+
# If you want standard test, please manually configure the test dataset
|
61 |
+
test_dataloader = val_dataloader
|
62 |
+
test_evaluator = val_evaluator
|
configs/_base_/datasets/imagenet_bs64_edgenext_256.py
ADDED
@@ -0,0 +1,82 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# dataset settings
|
2 |
+
dataset_type = 'ImageNet'
|
3 |
+
data_preprocessor = dict(
|
4 |
+
num_classes=1000,
|
5 |
+
# RGB format normalization parameters
|
6 |
+
mean=[123.675, 116.28, 103.53],
|
7 |
+
std=[58.395, 57.12, 57.375],
|
8 |
+
# convert image from BGR to RGB
|
9 |
+
to_rgb=True,
|
10 |
+
)
|
11 |
+
|
12 |
+
bgr_mean = data_preprocessor['mean'][::-1]
|
13 |
+
bgr_std = data_preprocessor['std'][::-1]
|
14 |
+
|
15 |
+
train_pipeline = [
|
16 |
+
dict(type='LoadImageFromFile'),
|
17 |
+
dict(
|
18 |
+
type='RandomResizedCrop',
|
19 |
+
scale=256,
|
20 |
+
backend='pillow',
|
21 |
+
interpolation='bicubic'),
|
22 |
+
dict(type='RandomFlip', prob=0.5, direction='horizontal'),
|
23 |
+
dict(
|
24 |
+
type='RandAugment',
|
25 |
+
policies='timm_increasing',
|
26 |
+
num_policies=2,
|
27 |
+
total_level=10,
|
28 |
+
magnitude_level=9,
|
29 |
+
magnitude_std=0.5,
|
30 |
+
hparams=dict(
|
31 |
+
pad_val=[round(x) for x in bgr_mean], interpolation='bicubic')),
|
32 |
+
dict(
|
33 |
+
type='RandomErasing',
|
34 |
+
erase_prob=0.25,
|
35 |
+
mode='rand',
|
36 |
+
min_area_ratio=0.02,
|
37 |
+
max_area_ratio=1 / 3,
|
38 |
+
fill_color=bgr_mean,
|
39 |
+
fill_std=bgr_std),
|
40 |
+
dict(type='PackInputs'),
|
41 |
+
]
|
42 |
+
|
43 |
+
test_pipeline = [
|
44 |
+
dict(type='LoadImageFromFile'),
|
45 |
+
dict(
|
46 |
+
type='ResizeEdge',
|
47 |
+
scale=292,
|
48 |
+
edge='short',
|
49 |
+
backend='pillow',
|
50 |
+
interpolation='bicubic'),
|
51 |
+
dict(type='CenterCrop', crop_size=256),
|
52 |
+
dict(type='PackInputs')
|
53 |
+
]
|
54 |
+
|
55 |
+
train_dataloader = dict(
|
56 |
+
batch_size=64,
|
57 |
+
num_workers=5,
|
58 |
+
dataset=dict(
|
59 |
+
type=dataset_type,
|
60 |
+
data_root='data/imagenet',
|
61 |
+
ann_file='meta/train.txt',
|
62 |
+
data_prefix='train',
|
63 |
+
pipeline=train_pipeline),
|
64 |
+
sampler=dict(type='DefaultSampler', shuffle=True),
|
65 |
+
)
|
66 |
+
|
67 |
+
val_dataloader = dict(
|
68 |
+
batch_size=64,
|
69 |
+
num_workers=5,
|
70 |
+
dataset=dict(
|
71 |
+
type=dataset_type,
|
72 |
+
data_root='data/imagenet',
|
73 |
+
ann_file='meta/val.txt',
|
74 |
+
data_prefix='val',
|
75 |
+
pipeline=test_pipeline),
|
76 |
+
sampler=dict(type='DefaultSampler', shuffle=False),
|
77 |
+
)
|
78 |
+
val_evaluator = dict(type='Accuracy', topk=(1, 5))
|
79 |
+
|
80 |
+
# If you want standard test, please manually configure the test dataset
|
81 |
+
test_dataloader = val_dataloader
|
82 |
+
test_evaluator = val_evaluator
|
configs/_base_/datasets/imagenet_bs64_mixer_224.py
ADDED
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# dataset settings
|
2 |
+
dataset_type = 'ImageNet'
|
3 |
+
|
4 |
+
# Google research usually use the below normalization setting.
|
5 |
+
data_preprocessor = dict(
|
6 |
+
num_classes=1000,
|
7 |
+
mean=[127.5, 127.5, 127.5],
|
8 |
+
std=[127.5, 127.5, 127.5],
|
9 |
+
# convert image from BGR to RGB
|
10 |
+
to_rgb=True,
|
11 |
+
)
|
12 |
+
|
13 |
+
train_pipeline = [
|
14 |
+
dict(type='LoadImageFromFile'),
|
15 |
+
dict(type='RandomResizedCrop', scale=224),
|
16 |
+
dict(type='RandomFlip', prob=0.5, direction='horizontal'),
|
17 |
+
dict(type='PackInputs'),
|
18 |
+
]
|
19 |
+
|
20 |
+
test_pipeline = [
|
21 |
+
dict(type='LoadImageFromFile'),
|
22 |
+
dict(type='ResizeEdge', scale=256, edge='short', interpolation='bicubic'),
|
23 |
+
dict(type='CenterCrop', crop_size=224),
|
24 |
+
dict(type='PackInputs'),
|
25 |
+
]
|
26 |
+
|
27 |
+
train_dataloader = dict(
|
28 |
+
batch_size=64,
|
29 |
+
num_workers=5,
|
30 |
+
dataset=dict(
|
31 |
+
type=dataset_type,
|
32 |
+
data_root='data/imagenet',
|
33 |
+
ann_file='meta/train.txt',
|
34 |
+
data_prefix='train',
|
35 |
+
pipeline=train_pipeline),
|
36 |
+
sampler=dict(type='DefaultSampler', shuffle=True),
|
37 |
+
)
|
38 |
+
|
39 |
+
val_dataloader = dict(
|
40 |
+
batch_size=64,
|
41 |
+
num_workers=5,
|
42 |
+
dataset=dict(
|
43 |
+
type=dataset_type,
|
44 |
+
data_root='data/imagenet',
|
45 |
+
ann_file='meta/val.txt',
|
46 |
+
data_prefix='val',
|
47 |
+
pipeline=test_pipeline),
|
48 |
+
sampler=dict(type='DefaultSampler', shuffle=False),
|
49 |
+
)
|
50 |
+
val_evaluator = dict(type='Accuracy', topk=(1, 5))
|
51 |
+
|
52 |
+
# If you want standard test, please manually configure the test dataset
|
53 |
+
test_dataloader = val_dataloader
|
54 |
+
test_evaluator = val_evaluator
|
configs/_base_/datasets/imagenet_bs64_pil_resize.py
ADDED
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# dataset settings
|
2 |
+
dataset_type = 'ImageNet'
|
3 |
+
data_preprocessor = dict(
|
4 |
+
num_classes=1000,
|
5 |
+
# RGB format normalization parameters
|
6 |
+
mean=[123.675, 116.28, 103.53],
|
7 |
+
std=[58.395, 57.12, 57.375],
|
8 |
+
# convert image from BGR to RGB
|
9 |
+
to_rgb=True,
|
10 |
+
)
|
11 |
+
|
12 |
+
train_pipeline = [
|
13 |
+
dict(type='LoadImageFromFile'),
|
14 |
+
dict(type='RandomResizedCrop', scale=224, backend='pillow'),
|
15 |
+
dict(type='RandomFlip', prob=0.5, direction='horizontal'),
|
16 |
+
dict(type='PackInputs'),
|
17 |
+
]
|
18 |
+
|
19 |
+
test_pipeline = [
|
20 |
+
dict(type='LoadImageFromFile'),
|
21 |
+
dict(type='ResizeEdge', scale=256, edge='short', backend='pillow'),
|
22 |
+
dict(type='CenterCrop', crop_size=224),
|
23 |
+
dict(type='PackInputs'),
|
24 |
+
]
|
25 |
+
|
26 |
+
train_dataloader = dict(
|
27 |
+
batch_size=64,
|
28 |
+
num_workers=5,
|
29 |
+
dataset=dict(
|
30 |
+
type=dataset_type,
|
31 |
+
data_root='data/imagenet',
|
32 |
+
ann_file='meta/train.txt',
|
33 |
+
data_prefix='train',
|
34 |
+
pipeline=train_pipeline),
|
35 |
+
sampler=dict(type='DefaultSampler', shuffle=True),
|
36 |
+
)
|
37 |
+
|
38 |
+
val_dataloader = dict(
|
39 |
+
batch_size=64,
|
40 |
+
num_workers=5,
|
41 |
+
dataset=dict(
|
42 |
+
type=dataset_type,
|
43 |
+
data_root='data/imagenet',
|
44 |
+
ann_file='meta/val.txt',
|
45 |
+
data_prefix='val',
|
46 |
+
pipeline=test_pipeline),
|
47 |
+
sampler=dict(type='DefaultSampler', shuffle=False),
|
48 |
+
)
|
49 |
+
val_evaluator = dict(type='Accuracy', topk=(1, 5))
|
50 |
+
|
51 |
+
# If you want standard test, please manually configure the test dataset
|
52 |
+
test_dataloader = val_dataloader
|
53 |
+
test_evaluator = val_evaluator
|
configs/_base_/datasets/imagenet_bs64_pil_resize_autoaug.py
ADDED
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# dataset settings
|
2 |
+
dataset_type = 'ImageNet'
|
3 |
+
data_preprocessor = dict(
|
4 |
+
num_classes=1000,
|
5 |
+
# RGB format normalization parameters
|
6 |
+
mean=[123.675, 116.28, 103.53],
|
7 |
+
std=[58.395, 57.12, 57.375],
|
8 |
+
# convert image from BGR to RGB
|
9 |
+
to_rgb=True,
|
10 |
+
)
|
11 |
+
|
12 |
+
bgr_mean = data_preprocessor['mean'][::-1]
|
13 |
+
bgr_std = data_preprocessor['std'][::-1]
|
14 |
+
|
15 |
+
train_pipeline = [
|
16 |
+
dict(type='LoadImageFromFile'),
|
17 |
+
dict(
|
18 |
+
type='RandomResizedCrop',
|
19 |
+
scale=224,
|
20 |
+
backend='pillow',
|
21 |
+
interpolation='bicubic'),
|
22 |
+
dict(type='RandomFlip', prob=0.5, direction='horizontal'),
|
23 |
+
dict(
|
24 |
+
type='AutoAugment',
|
25 |
+
policies='imagenet',
|
26 |
+
hparams=dict(
|
27 |
+
pad_val=[round(x) for x in bgr_mean], interpolation='bicubic')),
|
28 |
+
dict(type='PackInputs'),
|
29 |
+
]
|
30 |
+
|
31 |
+
test_pipeline = [
|
32 |
+
dict(type='LoadImageFromFile'),
|
33 |
+
dict(
|
34 |
+
type='ResizeEdge',
|
35 |
+
scale=256,
|
36 |
+
edge='short',
|
37 |
+
backend='pillow',
|
38 |
+
interpolation='bicubic'),
|
39 |
+
dict(type='CenterCrop', crop_size=224),
|
40 |
+
dict(type='PackInputs'),
|
41 |
+
]
|
42 |
+
|
43 |
+
train_dataloader = dict(
|
44 |
+
batch_size=64,
|
45 |
+
num_workers=5,
|
46 |
+
dataset=dict(
|
47 |
+
type=dataset_type,
|
48 |
+
data_root='data/imagenet',
|
49 |
+
ann_file='meta/train.txt',
|
50 |
+
data_prefix='train',
|
51 |
+
pipeline=train_pipeline),
|
52 |
+
sampler=dict(type='DefaultSampler', shuffle=True),
|
53 |
+
)
|
54 |
+
|
55 |
+
val_dataloader = dict(
|
56 |
+
batch_size=64,
|
57 |
+
num_workers=5,
|
58 |
+
dataset=dict(
|
59 |
+
type=dataset_type,
|
60 |
+
data_root='data/imagenet',
|
61 |
+
ann_file='meta/val.txt',
|
62 |
+
data_prefix='val',
|
63 |
+
pipeline=test_pipeline),
|
64 |
+
sampler=dict(type='DefaultSampler', shuffle=False),
|
65 |
+
)
|
66 |
+
val_evaluator = dict(type='Accuracy', topk=(1, 5))
|
67 |
+
|
68 |
+
# If you want standard test, please manually configure the test dataset
|
69 |
+
test_dataloader = val_dataloader
|
70 |
+
test_evaluator = val_evaluator
|
configs/_base_/datasets/imagenet_bs64_swin_224.py
ADDED
@@ -0,0 +1,82 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# dataset settings
|
2 |
+
dataset_type = 'ImageNet'
|
3 |
+
data_preprocessor = dict(
|
4 |
+
num_classes=1000,
|
5 |
+
# RGB format normalization parameters
|
6 |
+
mean=[123.675, 116.28, 103.53],
|
7 |
+
std=[58.395, 57.12, 57.375],
|
8 |
+
# convert image from BGR to RGB
|
9 |
+
to_rgb=True,
|
10 |
+
)
|
11 |
+
|
12 |
+
bgr_mean = data_preprocessor['mean'][::-1]
|
13 |
+
bgr_std = data_preprocessor['std'][::-1]
|
14 |
+
|
15 |
+
train_pipeline = [
|
16 |
+
dict(type='LoadImageFromFile'),
|
17 |
+
dict(
|
18 |
+
type='RandomResizedCrop',
|
19 |
+
scale=224,
|
20 |
+
backend='pillow',
|
21 |
+
interpolation='bicubic'),
|
22 |
+
dict(type='RandomFlip', prob=0.5, direction='horizontal'),
|
23 |
+
dict(
|
24 |
+
type='RandAugment',
|
25 |
+
policies='timm_increasing',
|
26 |
+
num_policies=2,
|
27 |
+
total_level=10,
|
28 |
+
magnitude_level=9,
|
29 |
+
magnitude_std=0.5,
|
30 |
+
hparams=dict(
|
31 |
+
pad_val=[round(x) for x in bgr_mean], interpolation='bicubic')),
|
32 |
+
dict(
|
33 |
+
type='RandomErasing',
|
34 |
+
erase_prob=0.25,
|
35 |
+
mode='rand',
|
36 |
+
min_area_ratio=0.02,
|
37 |
+
max_area_ratio=1 / 3,
|
38 |
+
fill_color=bgr_mean,
|
39 |
+
fill_std=bgr_std),
|
40 |
+
dict(type='PackInputs'),
|
41 |
+
]
|
42 |
+
|
43 |
+
test_pipeline = [
|
44 |
+
dict(type='LoadImageFromFile'),
|
45 |
+
dict(
|
46 |
+
type='ResizeEdge',
|
47 |
+
scale=256,
|
48 |
+
edge='short',
|
49 |
+
backend='pillow',
|
50 |
+
interpolation='bicubic'),
|
51 |
+
dict(type='CenterCrop', crop_size=224),
|
52 |
+
dict(type='PackInputs'),
|
53 |
+
]
|
54 |
+
|
55 |
+
train_dataloader = dict(
|
56 |
+
batch_size=64,
|
57 |
+
num_workers=5,
|
58 |
+
dataset=dict(
|
59 |
+
type=dataset_type,
|
60 |
+
data_root='data/imagenet',
|
61 |
+
ann_file='meta/train.txt',
|
62 |
+
data_prefix='train',
|
63 |
+
pipeline=train_pipeline),
|
64 |
+
sampler=dict(type='DefaultSampler', shuffle=True),
|
65 |
+
)
|
66 |
+
|
67 |
+
val_dataloader = dict(
|
68 |
+
batch_size=64,
|
69 |
+
num_workers=5,
|
70 |
+
dataset=dict(
|
71 |
+
type=dataset_type,
|
72 |
+
data_root='data/imagenet',
|
73 |
+
ann_file='meta/val.txt',
|
74 |
+
data_prefix='val',
|
75 |
+
pipeline=test_pipeline),
|
76 |
+
sampler=dict(type='DefaultSampler', shuffle=False),
|
77 |
+
)
|
78 |
+
val_evaluator = dict(type='Accuracy', topk=(1, 5))
|
79 |
+
|
80 |
+
# If you want standard test, please manually configure the test dataset
|
81 |
+
test_dataloader = val_dataloader
|
82 |
+
test_evaluator = val_evaluator
|
configs/_base_/datasets/imagenet_bs64_swin_256.py
ADDED
@@ -0,0 +1,82 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# dataset settings
|
2 |
+
dataset_type = 'ImageNet'
|
3 |
+
data_preprocessor = dict(
|
4 |
+
num_classes=1000,
|
5 |
+
# RGB format normalization parameters
|
6 |
+
mean=[123.675, 116.28, 103.53],
|
7 |
+
std=[58.395, 57.12, 57.375],
|
8 |
+
# convert image from BGR to RGB
|
9 |
+
to_rgb=True,
|
10 |
+
)
|
11 |
+
|
12 |
+
bgr_mean = data_preprocessor['mean'][::-1]
|
13 |
+
bgr_std = data_preprocessor['std'][::-1]
|
14 |
+
|
15 |
+
train_pipeline = [
|
16 |
+
dict(type='LoadImageFromFile'),
|
17 |
+
dict(
|
18 |
+
type='RandomResizedCrop',
|
19 |
+
scale=256,
|
20 |
+
backend='pillow',
|
21 |
+
interpolation='bicubic'),
|
22 |
+
dict(type='RandomFlip', prob=0.5, direction='horizontal'),
|
23 |
+
dict(
|
24 |
+
type='RandAugment',
|
25 |
+
policies='timm_increasing',
|
26 |
+
num_policies=2,
|
27 |
+
total_level=10,
|
28 |
+
magnitude_level=9,
|
29 |
+
magnitude_std=0.5,
|
30 |
+
hparams=dict(
|
31 |
+
pad_val=[round(x) for x in bgr_mean], interpolation='bicubic')),
|
32 |
+
dict(
|
33 |
+
type='RandomErasing',
|
34 |
+
erase_prob=0.25,
|
35 |
+
mode='rand',
|
36 |
+
min_area_ratio=0.02,
|
37 |
+
max_area_ratio=1 / 3,
|
38 |
+
fill_color=bgr_mean,
|
39 |
+
fill_std=bgr_std),
|
40 |
+
dict(type='PackInputs'),
|
41 |
+
]
|
42 |
+
|
43 |
+
test_pipeline = [
|
44 |
+
dict(type='LoadImageFromFile'),
|
45 |
+
dict(
|
46 |
+
type='ResizeEdge',
|
47 |
+
scale=292, # ( 256 / 224 * 256 )
|
48 |
+
edge='short',
|
49 |
+
backend='pillow',
|
50 |
+
interpolation='bicubic'),
|
51 |
+
dict(type='CenterCrop', crop_size=256),
|
52 |
+
dict(type='PackInputs'),
|
53 |
+
]
|
54 |
+
|
55 |
+
train_dataloader = dict(
|
56 |
+
batch_size=64,
|
57 |
+
num_workers=5,
|
58 |
+
dataset=dict(
|
59 |
+
type=dataset_type,
|
60 |
+
data_root='data/imagenet',
|
61 |
+
ann_file='meta/train.txt',
|
62 |
+
data_prefix='train',
|
63 |
+
pipeline=train_pipeline),
|
64 |
+
sampler=dict(type='DefaultSampler', shuffle=True),
|
65 |
+
)
|
66 |
+
|
67 |
+
val_dataloader = dict(
|
68 |
+
batch_size=64,
|
69 |
+
num_workers=5,
|
70 |
+
dataset=dict(
|
71 |
+
type=dataset_type,
|
72 |
+
data_root='data/imagenet',
|
73 |
+
ann_file='meta/val.txt',
|
74 |
+
data_prefix='val',
|
75 |
+
pipeline=test_pipeline),
|
76 |
+
sampler=dict(type='DefaultSampler', shuffle=False),
|
77 |
+
)
|
78 |
+
val_evaluator = dict(type='Accuracy', topk=(1, 5))
|
79 |
+
|
80 |
+
# If you want standard test, please manually configure the test dataset
|
81 |
+
test_dataloader = val_dataloader
|
82 |
+
test_evaluator = val_evaluator
|
configs/_base_/datasets/imagenet_bs64_swin_384.py
ADDED
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# dataset settings
|
2 |
+
dataset_type = 'ImageNet'
|
3 |
+
data_preprocessor = dict(
|
4 |
+
num_classes=1000,
|
5 |
+
# RGB format normalization parameters
|
6 |
+
mean=[123.675, 116.28, 103.53],
|
7 |
+
std=[58.395, 57.12, 57.375],
|
8 |
+
# convert image from BGR to RGB
|
9 |
+
to_rgb=True,
|
10 |
+
)
|
11 |
+
|
12 |
+
train_pipeline = [
|
13 |
+
dict(type='LoadImageFromFile'),
|
14 |
+
dict(
|
15 |
+
type='RandomResizedCrop',
|
16 |
+
scale=384,
|
17 |
+
backend='pillow',
|
18 |
+
interpolation='bicubic'),
|
19 |
+
dict(type='RandomFlip', prob=0.5, direction='horizontal'),
|
20 |
+
dict(type='PackInputs'),
|
21 |
+
]
|
22 |
+
|
23 |
+
test_pipeline = [
|
24 |
+
dict(type='LoadImageFromFile'),
|
25 |
+
dict(type='Resize', scale=384, backend='pillow', interpolation='bicubic'),
|
26 |
+
dict(type='PackInputs'),
|
27 |
+
]
|
28 |
+
|
29 |
+
train_dataloader = dict(
|
30 |
+
batch_size=64,
|
31 |
+
num_workers=5,
|
32 |
+
dataset=dict(
|
33 |
+
type=dataset_type,
|
34 |
+
data_root='data/imagenet',
|
35 |
+
ann_file='meta/train.txt',
|
36 |
+
data_prefix='train',
|
37 |
+
pipeline=train_pipeline),
|
38 |
+
sampler=dict(type='DefaultSampler', shuffle=True),
|
39 |
+
)
|
40 |
+
|
41 |
+
val_dataloader = dict(
|
42 |
+
batch_size=64,
|
43 |
+
num_workers=5,
|
44 |
+
dataset=dict(
|
45 |
+
type=dataset_type,
|
46 |
+
data_root='data/imagenet',
|
47 |
+
ann_file='meta/val.txt',
|
48 |
+
data_prefix='val',
|
49 |
+
pipeline=test_pipeline),
|
50 |
+
sampler=dict(type='DefaultSampler', shuffle=False),
|
51 |
+
)
|
52 |
+
val_evaluator = dict(type='Accuracy', topk=(1, 5))
|
53 |
+
|
54 |
+
# If you want standard test, please manually configure the test dataset
|
55 |
+
test_dataloader = val_dataloader
|
56 |
+
test_evaluator = val_evaluator
|
configs/_base_/datasets/imagenet_bs64_t2t_224.py
ADDED
@@ -0,0 +1,82 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# dataset settings
|
2 |
+
dataset_type = 'ImageNet'
|
3 |
+
data_preprocessor = dict(
|
4 |
+
num_classes=1000,
|
5 |
+
# RGB format normalization parameters
|
6 |
+
mean=[123.675, 116.28, 103.53],
|
7 |
+
std=[58.395, 57.12, 57.375],
|
8 |
+
# convert image from BGR to RGB
|
9 |
+
to_rgb=True,
|
10 |
+
)
|
11 |
+
|
12 |
+
bgr_mean = data_preprocessor['mean'][::-1]
|
13 |
+
bgr_std = data_preprocessor['std'][::-1]
|
14 |
+
|
15 |
+
train_pipeline = [
|
16 |
+
dict(type='LoadImageFromFile'),
|
17 |
+
dict(
|
18 |
+
type='RandomResizedCrop',
|
19 |
+
scale=224,
|
20 |
+
backend='pillow',
|
21 |
+
interpolation='bicubic'),
|
22 |
+
dict(type='RandomFlip', prob=0.5, direction='horizontal'),
|
23 |
+
dict(
|
24 |
+
type='RandAugment',
|
25 |
+
policies='timm_increasing',
|
26 |
+
num_policies=2,
|
27 |
+
total_level=10,
|
28 |
+
magnitude_level=9,
|
29 |
+
magnitude_std=0.5,
|
30 |
+
hparams=dict(
|
31 |
+
pad_val=[round(x) for x in bgr_mean], interpolation='bicubic')),
|
32 |
+
dict(
|
33 |
+
type='RandomErasing',
|
34 |
+
erase_prob=0.25,
|
35 |
+
mode='rand',
|
36 |
+
min_area_ratio=0.02,
|
37 |
+
max_area_ratio=1 / 3,
|
38 |
+
fill_color=bgr_mean,
|
39 |
+
fill_std=bgr_std),
|
40 |
+
dict(type='PackInputs'),
|
41 |
+
]
|
42 |
+
|
43 |
+
test_pipeline = [
|
44 |
+
dict(type='LoadImageFromFile'),
|
45 |
+
dict(
|
46 |
+
type='ResizeEdge',
|
47 |
+
scale=248,
|
48 |
+
edge='short',
|
49 |
+
backend='pillow',
|
50 |
+
interpolation='bicubic'),
|
51 |
+
dict(type='CenterCrop', crop_size=224),
|
52 |
+
dict(type='PackInputs'),
|
53 |
+
]
|
54 |
+
|
55 |
+
train_dataloader = dict(
|
56 |
+
batch_size=64,
|
57 |
+
num_workers=5,
|
58 |
+
dataset=dict(
|
59 |
+
type=dataset_type,
|
60 |
+
data_root='data/imagenet',
|
61 |
+
ann_file='meta/train.txt',
|
62 |
+
data_prefix='train',
|
63 |
+
pipeline=train_pipeline),
|
64 |
+
sampler=dict(type='DefaultSampler', shuffle=True),
|
65 |
+
)
|
66 |
+
|
67 |
+
val_dataloader = dict(
|
68 |
+
batch_size=64,
|
69 |
+
num_workers=5,
|
70 |
+
dataset=dict(
|
71 |
+
type=dataset_type,
|
72 |
+
data_root='data/imagenet',
|
73 |
+
ann_file='meta/val.txt',
|
74 |
+
data_prefix='val',
|
75 |
+
pipeline=test_pipeline),
|
76 |
+
sampler=dict(type='DefaultSampler', shuffle=False),
|
77 |
+
)
|
78 |
+
val_evaluator = dict(type='Accuracy', topk=(1, 5))
|
79 |
+
|
80 |
+
# If you want standard test, please manually configure the test dataset
|
81 |
+
test_dataloader = val_dataloader
|
82 |
+
test_evaluator = val_evaluator
|