Spaces:
Running
on
Zero
Running
on
Zero
haotongl
commited on
Commit
•
98844c3
1
Parent(s):
c14ea0d
inital version
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +1 -35
- .gitignore +187 -0
- LICENSE +201 -0
- README.md +5 -6
- app.py +209 -0
- data/assets/example0_chair.zip +3 -0
- promptda/main.py +9 -0
- promptda/model/blocks.py +303 -0
- promptda/model/config.py +6 -0
- promptda/model/dpt.py +145 -0
- promptda/promptda.py +119 -0
- promptda/utils/depth_utils.py +91 -0
- promptda/utils/io_wrapper.py +98 -0
- promptda/utils/logger.py +75 -0
- promptda/utils/parallel_utils.py +78 -0
- setup.py +10 -0
- torchhub/README.md +3 -0
- torchhub/facebookresearch_dinov2_main/CODE_OF_CONDUCT.md +80 -0
- torchhub/facebookresearch_dinov2_main/CONTRIBUTING.md +31 -0
- torchhub/facebookresearch_dinov2_main/LICENSE +400 -0
- torchhub/facebookresearch_dinov2_main/MODEL_CARD.md +201 -0
- torchhub/facebookresearch_dinov2_main/README.md +277 -0
- torchhub/facebookresearch_dinov2_main/conda.yaml +22 -0
- torchhub/facebookresearch_dinov2_main/dinov2/__init__.py +7 -0
- torchhub/facebookresearch_dinov2_main/dinov2/configs/__init__.py +23 -0
- torchhub/facebookresearch_dinov2_main/dinov2/configs/eval/vitb14_pretrain.yaml +6 -0
- torchhub/facebookresearch_dinov2_main/dinov2/configs/eval/vitg14_pretrain.yaml +7 -0
- torchhub/facebookresearch_dinov2_main/dinov2/configs/eval/vitl14_pretrain.yaml +6 -0
- torchhub/facebookresearch_dinov2_main/dinov2/configs/eval/vits14_pretrain.yaml +6 -0
- torchhub/facebookresearch_dinov2_main/dinov2/configs/ssl_default_config.yaml +115 -0
- torchhub/facebookresearch_dinov2_main/dinov2/configs/train/vitg14.yaml +26 -0
- torchhub/facebookresearch_dinov2_main/dinov2/configs/train/vitl14.yaml +26 -0
- torchhub/facebookresearch_dinov2_main/dinov2/configs/train/vitl16_short.yaml +6 -0
- torchhub/facebookresearch_dinov2_main/dinov2/data/__init__.py +11 -0
- torchhub/facebookresearch_dinov2_main/dinov2/data/adapters.py +29 -0
- torchhub/facebookresearch_dinov2_main/dinov2/data/augmentations.py +119 -0
- torchhub/facebookresearch_dinov2_main/dinov2/data/collate.py +50 -0
- torchhub/facebookresearch_dinov2_main/dinov2/data/datasets/__init__.py +8 -0
- torchhub/facebookresearch_dinov2_main/dinov2/data/datasets/decoders.py +32 -0
- torchhub/facebookresearch_dinov2_main/dinov2/data/datasets/extended.py +39 -0
- torchhub/facebookresearch_dinov2_main/dinov2/data/datasets/image_net.py +291 -0
- torchhub/facebookresearch_dinov2_main/dinov2/data/datasets/image_net_22k.py +303 -0
- torchhub/facebookresearch_dinov2_main/dinov2/data/loaders.py +223 -0
- torchhub/facebookresearch_dinov2_main/dinov2/data/masking.py +87 -0
- torchhub/facebookresearch_dinov2_main/dinov2/data/samplers.py +230 -0
- torchhub/facebookresearch_dinov2_main/dinov2/data/transforms.py +92 -0
- torchhub/facebookresearch_dinov2_main/dinov2/distributed/__init__.py +271 -0
- torchhub/facebookresearch_dinov2_main/dinov2/eval/__init__.py +5 -0
- torchhub/facebookresearch_dinov2_main/dinov2/eval/knn.py +405 -0
- torchhub/facebookresearch_dinov2_main/dinov2/eval/linear.py +626 -0
.gitattributes
CHANGED
@@ -1,35 +1 @@
|
|
1 |
-
|
2 |
-
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
-
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
-
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
-
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
-
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
-
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
-
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
-
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
-
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
-
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
-
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
-
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
-
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
-
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
-
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
-
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
-
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
-
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
-
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
-
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
-
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
-
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
-
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
-
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
-
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
-
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
-
*.tar filter=lfs diff=lfs merge=lfs -text
|
29 |
-
*.tflite filter=lfs diff=lfs merge=lfs -text
|
30 |
-
*.tgz filter=lfs diff=lfs merge=lfs -text
|
31 |
-
*.wasm filter=lfs diff=lfs merge=lfs -text
|
32 |
-
*.xz filter=lfs diff=lfs merge=lfs -text
|
33 |
-
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
-
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
-
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
1 |
+
data/assets/example0_chair.zip filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
.gitignore
ADDED
@@ -0,0 +1,187 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# delete files larger than 10MiB
|
2 |
+
**basicModel_neutral_lbs_10_207_0_v1.0.0.pkl
|
3 |
+
|
4 |
+
.vscode
|
5 |
+
.hydra
|
6 |
+
*.txt
|
7 |
+
# All file or folders start with tmp will be ignored
|
8 |
+
tmp*
|
9 |
+
|
10 |
+
# Byte-compiled / optimized / DLL files
|
11 |
+
__pycache__/
|
12 |
+
*.py[cod]
|
13 |
+
*$py.class
|
14 |
+
|
15 |
+
# C extensions
|
16 |
+
*.so
|
17 |
+
|
18 |
+
# Distribution / packaging
|
19 |
+
.Python
|
20 |
+
build/
|
21 |
+
develop-eggs/
|
22 |
+
dist/
|
23 |
+
downloads/
|
24 |
+
eggs/
|
25 |
+
.eggs/
|
26 |
+
lib64/
|
27 |
+
parts/
|
28 |
+
sdist/
|
29 |
+
var/
|
30 |
+
wheels/
|
31 |
+
share/python-wheels/
|
32 |
+
*.egg-info/
|
33 |
+
.installed.cfg
|
34 |
+
*.egg
|
35 |
+
MANIFEST
|
36 |
+
|
37 |
+
# PyInstaller
|
38 |
+
# Usually these files are written by a python script from a template
|
39 |
+
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
40 |
+
*.manifest
|
41 |
+
*.spec
|
42 |
+
|
43 |
+
# Installer logs
|
44 |
+
pip-log.txt
|
45 |
+
pip-delete-this-directory.txt
|
46 |
+
|
47 |
+
# Unit test / coverage reports
|
48 |
+
htmlcov/
|
49 |
+
.tox/
|
50 |
+
.nox/
|
51 |
+
.coverage
|
52 |
+
.coverage.*
|
53 |
+
.cache
|
54 |
+
nosetests.xml
|
55 |
+
coverage.xml
|
56 |
+
*.cover
|
57 |
+
*.py,cover
|
58 |
+
.hypothesis/
|
59 |
+
.pytest_cache/
|
60 |
+
cover/
|
61 |
+
|
62 |
+
# Translations
|
63 |
+
*.mo
|
64 |
+
*.pot
|
65 |
+
|
66 |
+
# Django stuff:
|
67 |
+
*.log
|
68 |
+
local_settings.py
|
69 |
+
db.sqlite3
|
70 |
+
db.sqlite3-journal
|
71 |
+
|
72 |
+
# Flask stuff:
|
73 |
+
instance/
|
74 |
+
.webassets-cache
|
75 |
+
|
76 |
+
# Scrapy stuff:
|
77 |
+
.scrapy
|
78 |
+
|
79 |
+
# Sphinx documentation
|
80 |
+
docs/_build/
|
81 |
+
|
82 |
+
# PyBuilder
|
83 |
+
.pybuilder/
|
84 |
+
target/
|
85 |
+
|
86 |
+
# Jupyter Notebook
|
87 |
+
.ipynb_checkpoints
|
88 |
+
|
89 |
+
# IPython
|
90 |
+
profile_default/
|
91 |
+
ipython_config.py
|
92 |
+
|
93 |
+
# pyenv
|
94 |
+
# For a library or package, you might want to ignore these files since the code is
|
95 |
+
# intended to run in multiple environments; otherwise, check them in:
|
96 |
+
# .python-version
|
97 |
+
|
98 |
+
# pipenv
|
99 |
+
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
100 |
+
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
101 |
+
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
102 |
+
# install all needed dependencies.
|
103 |
+
#Pipfile.lock
|
104 |
+
|
105 |
+
# poetry
|
106 |
+
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
|
107 |
+
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
108 |
+
# commonly ignored for libraries.
|
109 |
+
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
|
110 |
+
#poetry.lock
|
111 |
+
|
112 |
+
# pdm
|
113 |
+
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
|
114 |
+
#pdm.lock
|
115 |
+
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
|
116 |
+
# in version control.
|
117 |
+
# https://pdm.fming.dev/#use-with-ide
|
118 |
+
.pdm.toml
|
119 |
+
|
120 |
+
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
|
121 |
+
__pypackages__/
|
122 |
+
|
123 |
+
# Celery stuff
|
124 |
+
celerybeat-schedule
|
125 |
+
celerybeat.pid
|
126 |
+
|
127 |
+
# SageMath parsed files
|
128 |
+
*.sage.py
|
129 |
+
|
130 |
+
# Environments
|
131 |
+
.env
|
132 |
+
.venv
|
133 |
+
env/
|
134 |
+
venv/
|
135 |
+
ENV/
|
136 |
+
env.bak/
|
137 |
+
venv.bak/
|
138 |
+
|
139 |
+
# Spyder project settings
|
140 |
+
.spyderproject
|
141 |
+
.spyproject
|
142 |
+
|
143 |
+
# Rope project settings
|
144 |
+
.ropeproject
|
145 |
+
|
146 |
+
# mkdocs documentation
|
147 |
+
/site
|
148 |
+
|
149 |
+
# mypy
|
150 |
+
.mypy_cache/
|
151 |
+
.dmypy.json
|
152 |
+
dmypy.json
|
153 |
+
|
154 |
+
# Pyre type checker
|
155 |
+
.pyre/
|
156 |
+
|
157 |
+
# pytype static type analyzer
|
158 |
+
.pytype/
|
159 |
+
|
160 |
+
# Cython debug symbols
|
161 |
+
cython_debug/
|
162 |
+
|
163 |
+
#
|
164 |
+
.DS_Store/
|
165 |
+
|
166 |
+
# PyCharm
|
167 |
+
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
|
168 |
+
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
|
169 |
+
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
170 |
+
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
171 |
+
#.idea/
|
172 |
+
|
173 |
+
# torchsparse
|
174 |
+
torchsparse
|
175 |
+
|
176 |
+
# tensorboard
|
177 |
+
tensorboard
|
178 |
+
|
179 |
+
# glove
|
180 |
+
glove
|
181 |
+
*.jpg
|
182 |
+
*.png
|
183 |
+
*.mp4
|
184 |
+
|
185 |
+
*.ipynb
|
186 |
+
*.ply
|
187 |
+
3rdparty
|
LICENSE
ADDED
@@ -0,0 +1,201 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Apache License
|
2 |
+
Version 2.0, January 2004
|
3 |
+
http://www.apache.org/licenses/
|
4 |
+
|
5 |
+
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
6 |
+
|
7 |
+
1. Definitions.
|
8 |
+
|
9 |
+
"License" shall mean the terms and conditions for use, reproduction,
|
10 |
+
and distribution as defined by Sections 1 through 9 of this document.
|
11 |
+
|
12 |
+
"Licensor" shall mean the copyright owner or entity authorized by
|
13 |
+
the copyright owner that is granting the License.
|
14 |
+
|
15 |
+
"Legal Entity" shall mean the union of the acting entity and all
|
16 |
+
other entities that control, are controlled by, or are under common
|
17 |
+
control with that entity. For the purposes of this definition,
|
18 |
+
"control" means (i) the power, direct or indirect, to cause the
|
19 |
+
direction or management of such entity, whether by contract or
|
20 |
+
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
21 |
+
outstanding shares, or (iii) beneficial ownership of such entity.
|
22 |
+
|
23 |
+
"You" (or "Your") shall mean an individual or Legal Entity
|
24 |
+
exercising permissions granted by this License.
|
25 |
+
|
26 |
+
"Source" form shall mean the preferred form for making modifications,
|
27 |
+
including but not limited to software source code, documentation
|
28 |
+
source, and configuration files.
|
29 |
+
|
30 |
+
"Object" form shall mean any form resulting from mechanical
|
31 |
+
transformation or translation of a Source form, including but
|
32 |
+
not limited to compiled object code, generated documentation,
|
33 |
+
and conversions to other media types.
|
34 |
+
|
35 |
+
"Work" shall mean the work of authorship, whether in Source or
|
36 |
+
Object form, made available under the License, as indicated by a
|
37 |
+
copyright notice that is included in or attached to the work
|
38 |
+
(an example is provided in the Appendix below).
|
39 |
+
|
40 |
+
"Derivative Works" shall mean any work, whether in Source or Object
|
41 |
+
form, that is based on (or derived from) the Work and for which the
|
42 |
+
editorial revisions, annotations, elaborations, or other modifications
|
43 |
+
represent, as a whole, an original work of authorship. For the purposes
|
44 |
+
of this License, Derivative Works shall not include works that remain
|
45 |
+
separable from, or merely link (or bind by name) to the interfaces of,
|
46 |
+
the Work and Derivative Works thereof.
|
47 |
+
|
48 |
+
"Contribution" shall mean any work of authorship, including
|
49 |
+
the original version of the Work and any modifications or additions
|
50 |
+
to that Work or Derivative Works thereof, that is intentionally
|
51 |
+
submitted to Licensor for inclusion in the Work by the copyright owner
|
52 |
+
or by an individual or Legal Entity authorized to submit on behalf of
|
53 |
+
the copyright owner. For the purposes of this definition, "submitted"
|
54 |
+
means any form of electronic, verbal, or written communication sent
|
55 |
+
to the Licensor or its representatives, including but not limited to
|
56 |
+
communication on electronic mailing lists, source code control systems,
|
57 |
+
and issue tracking systems that are managed by, or on behalf of, the
|
58 |
+
Licensor for the purpose of discussing and improving the Work, but
|
59 |
+
excluding communication that is conspicuously marked or otherwise
|
60 |
+
designated in writing by the copyright owner as "Not a Contribution."
|
61 |
+
|
62 |
+
"Contributor" shall mean Licensor and any individual or Legal Entity
|
63 |
+
on behalf of whom a Contribution has been received by Licensor and
|
64 |
+
subsequently incorporated within the Work.
|
65 |
+
|
66 |
+
2. Grant of Copyright License. Subject to the terms and conditions of
|
67 |
+
this License, each Contributor hereby grants to You a perpetual,
|
68 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
69 |
+
copyright license to reproduce, prepare Derivative Works of,
|
70 |
+
publicly display, publicly perform, sublicense, and distribute the
|
71 |
+
Work and such Derivative Works in Source or Object form.
|
72 |
+
|
73 |
+
3. Grant of Patent License. Subject to the terms and conditions of
|
74 |
+
this License, each Contributor hereby grants to You a perpetual,
|
75 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
76 |
+
(except as stated in this section) patent license to make, have made,
|
77 |
+
use, offer to sell, sell, import, and otherwise transfer the Work,
|
78 |
+
where such license applies only to those patent claims licensable
|
79 |
+
by such Contributor that are necessarily infringed by their
|
80 |
+
Contribution(s) alone or by combination of their Contribution(s)
|
81 |
+
with the Work to which such Contribution(s) was submitted. If You
|
82 |
+
institute patent litigation against any entity (including a
|
83 |
+
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
84 |
+
or a Contribution incorporated within the Work constitutes direct
|
85 |
+
or contributory patent infringement, then any patent licenses
|
86 |
+
granted to You under this License for that Work shall terminate
|
87 |
+
as of the date such litigation is filed.
|
88 |
+
|
89 |
+
4. Redistribution. You may reproduce and distribute copies of the
|
90 |
+
Work or Derivative Works thereof in any medium, with or without
|
91 |
+
modifications, and in Source or Object form, provided that You
|
92 |
+
meet the following conditions:
|
93 |
+
|
94 |
+
(a) You must give any other recipients of the Work or
|
95 |
+
Derivative Works a copy of this License; and
|
96 |
+
|
97 |
+
(b) You must cause any modified files to carry prominent notices
|
98 |
+
stating that You changed the files; and
|
99 |
+
|
100 |
+
(c) You must retain, in the Source form of any Derivative Works
|
101 |
+
that You distribute, all copyright, patent, trademark, and
|
102 |
+
attribution notices from the Source form of the Work,
|
103 |
+
excluding those notices that do not pertain to any part of
|
104 |
+
the Derivative Works; and
|
105 |
+
|
106 |
+
(d) If the Work includes a "NOTICE" text file as part of its
|
107 |
+
distribution, then any Derivative Works that You distribute must
|
108 |
+
include a readable copy of the attribution notices contained
|
109 |
+
within such NOTICE file, excluding those notices that do not
|
110 |
+
pertain to any part of the Derivative Works, in at least one
|
111 |
+
of the following places: within a NOTICE text file distributed
|
112 |
+
as part of the Derivative Works; within the Source form or
|
113 |
+
documentation, if provided along with the Derivative Works; or,
|
114 |
+
within a display generated by the Derivative Works, if and
|
115 |
+
wherever such third-party notices normally appear. The contents
|
116 |
+
of the NOTICE file are for informational purposes only and
|
117 |
+
do not modify the License. You may add Your own attribution
|
118 |
+
notices within Derivative Works that You distribute, alongside
|
119 |
+
or as an addendum to the NOTICE text from the Work, provided
|
120 |
+
that such additional attribution notices cannot be construed
|
121 |
+
as modifying the License.
|
122 |
+
|
123 |
+
You may add Your own copyright statement to Your modifications and
|
124 |
+
may provide additional or different license terms and conditions
|
125 |
+
for use, reproduction, or distribution of Your modifications, or
|
126 |
+
for any such Derivative Works as a whole, provided Your use,
|
127 |
+
reproduction, and distribution of the Work otherwise complies with
|
128 |
+
the conditions stated in this License.
|
129 |
+
|
130 |
+
5. Submission of Contributions. Unless You explicitly state otherwise,
|
131 |
+
any Contribution intentionally submitted for inclusion in the Work
|
132 |
+
by You to the Licensor shall be under the terms and conditions of
|
133 |
+
this License, without any additional terms or conditions.
|
134 |
+
Notwithstanding the above, nothing herein shall supersede or modify
|
135 |
+
the terms of any separate license agreement you may have executed
|
136 |
+
with Licensor regarding such Contributions.
|
137 |
+
|
138 |
+
6. Trademarks. This License does not grant permission to use the trade
|
139 |
+
names, trademarks, service marks, or product names of the Licensor,
|
140 |
+
except as required for reasonable and customary use in describing the
|
141 |
+
origin of the Work and reproducing the content of the NOTICE file.
|
142 |
+
|
143 |
+
7. Disclaimer of Warranty. Unless required by applicable law or
|
144 |
+
agreed to in writing, Licensor provides the Work (and each
|
145 |
+
Contributor provides its Contributions) on an "AS IS" BASIS,
|
146 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
147 |
+
implied, including, without limitation, any warranties or conditions
|
148 |
+
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
149 |
+
PARTICULAR PURPOSE. You are solely responsible for determining the
|
150 |
+
appropriateness of using or redistributing the Work and assume any
|
151 |
+
risks associated with Your exercise of permissions under this License.
|
152 |
+
|
153 |
+
8. Limitation of Liability. In no event and under no legal theory,
|
154 |
+
whether in tort (including negligence), contract, or otherwise,
|
155 |
+
unless required by applicable law (such as deliberate and grossly
|
156 |
+
negligent acts) or agreed to in writing, shall any Contributor be
|
157 |
+
liable to You for damages, including any direct, indirect, special,
|
158 |
+
incidental, or consequential damages of any character arising as a
|
159 |
+
result of this License or out of the use or inability to use the
|
160 |
+
Work (including but not limited to damages for loss of goodwill,
|
161 |
+
work stoppage, computer failure or malfunction, or any and all
|
162 |
+
other commercial damages or losses), even if such Contributor
|
163 |
+
has been advised of the possibility of such damages.
|
164 |
+
|
165 |
+
9. Accepting Warranty or Additional Liability. While redistributing
|
166 |
+
the Work or Derivative Works thereof, You may choose to offer,
|
167 |
+
and charge a fee for, acceptance of support, warranty, indemnity,
|
168 |
+
or other liability obligations and/or rights consistent with this
|
169 |
+
License. However, in accepting such obligations, You may act only
|
170 |
+
on Your own behalf and on Your sole responsibility, not on behalf
|
171 |
+
of any other Contributor, and only if You agree to indemnify,
|
172 |
+
defend, and hold each Contributor harmless for any liability
|
173 |
+
incurred by, or claims asserted against, such Contributor by reason
|
174 |
+
of your accepting any such warranty or additional liability.
|
175 |
+
|
176 |
+
END OF TERMS AND CONDITIONS
|
177 |
+
|
178 |
+
APPENDIX: How to apply the Apache License to your work.
|
179 |
+
|
180 |
+
To apply the Apache License to your work, attach the following
|
181 |
+
boilerplate notice, with the fields enclosed by brackets "[]"
|
182 |
+
replaced with your own identifying information. (Don't include
|
183 |
+
the brackets!) The text should be enclosed in the appropriate
|
184 |
+
comment syntax for the file format. We also recommend that a
|
185 |
+
file or class name and description of purpose be included on the
|
186 |
+
same "printed page" as the copyright notice for easier
|
187 |
+
identification within third-party archives.
|
188 |
+
|
189 |
+
Copyright [yyyy] [name of copyright owner]
|
190 |
+
|
191 |
+
Licensed under the Apache License, Version 2.0 (the "License");
|
192 |
+
you may not use this file except in compliance with the License.
|
193 |
+
You may obtain a copy of the License at
|
194 |
+
|
195 |
+
http://www.apache.org/licenses/LICENSE-2.0
|
196 |
+
|
197 |
+
Unless required by applicable law or agreed to in writing, software
|
198 |
+
distributed under the License is distributed on an "AS IS" BASIS,
|
199 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
200 |
+
See the License for the specific language governing permissions and
|
201 |
+
limitations under the License.
|
README.md
CHANGED
@@ -1,14 +1,13 @@
|
|
1 |
---
|
2 |
-
title:
|
3 |
-
emoji:
|
4 |
-
colorFrom:
|
5 |
-
colorTo:
|
6 |
sdk: gradio
|
7 |
-
sdk_version:
|
8 |
app_file: app.py
|
9 |
pinned: false
|
10 |
license: apache-2.0
|
11 |
-
short_description: Prompting Depth Anything for 4K Resolution Accurate Metric D
|
12 |
---
|
13 |
|
14 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
1 |
---
|
2 |
+
title: Promptda
|
3 |
+
emoji: 📉
|
4 |
+
colorFrom: blue
|
5 |
+
colorTo: purple
|
6 |
sdk: gradio
|
7 |
+
sdk_version: 4.44.1
|
8 |
app_file: app.py
|
9 |
pinned: false
|
10 |
license: apache-2.0
|
|
|
11 |
---
|
12 |
|
13 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
app.py
ADDED
@@ -0,0 +1,209 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import time
|
3 |
+
import shutil
|
4 |
+
from pathlib import Path
|
5 |
+
from typing import Union
|
6 |
+
import atexit
|
7 |
+
import spaces
|
8 |
+
from concurrent.futures import ThreadPoolExecutor
|
9 |
+
import open3d as o3d
|
10 |
+
import trimesh
|
11 |
+
|
12 |
+
import gradio as gr
|
13 |
+
from gradio_imageslider import ImageSlider
|
14 |
+
import cv2
|
15 |
+
import numpy as np
|
16 |
+
import click
|
17 |
+
import imageio
|
18 |
+
from promptda.promptda import PromptDA
|
19 |
+
from promptda.utils.io_wrapper import load_image, load_depth
|
20 |
+
from promptda.utils.depth_utils import visualize_depth, unproject_depth
|
21 |
+
model = PromptDA.from_pretrained('depth-anything/promptda_vitl').to("cuda").eval()
|
22 |
+
thread_pool_executor = ThreadPoolExecutor(max_workers=1)
|
23 |
+
|
24 |
+
def delete_later(path: Union[str, os.PathLike], delay: int = 300):
|
25 |
+
print(f"Deleting file: {path}")
|
26 |
+
def _delete():
|
27 |
+
try:
|
28 |
+
if os.path.isfile(path):
|
29 |
+
os.remove(path)
|
30 |
+
print(f"Deleted file: {path}")
|
31 |
+
elif os.path.isdir(path):
|
32 |
+
shutil.rmtree(path)
|
33 |
+
print(f"Deleted directory: {path}")
|
34 |
+
except:
|
35 |
+
pass
|
36 |
+
def _wait_and_delete():
|
37 |
+
time.sleep(delay)
|
38 |
+
_delete(path)
|
39 |
+
thread_pool_executor.submit(_wait_and_delete)
|
40 |
+
atexit.register(_delete)
|
41 |
+
|
42 |
+
|
43 |
+
@spaces.GPU
|
44 |
+
def run_with_gpu(image, prompt_depth):
|
45 |
+
depth = model.predict(image, prompt_depth)
|
46 |
+
depth = depth[0, 0].detach().cpu().numpy()
|
47 |
+
return depth
|
48 |
+
|
49 |
+
def check_is_stray_scanner_app_capture(input_dir):
|
50 |
+
assert os.path.exists(os.path.join(input_dir, 'rgb.mp4')), 'rgb.mp4 not found'
|
51 |
+
pass
|
52 |
+
|
53 |
+
def run(input_file, resolution):
|
54 |
+
import ipdb; ipdb.set_trace()
|
55 |
+
# unzip zip file
|
56 |
+
input_file = input_file.name
|
57 |
+
root_dir = os.path.dirname(input_file)
|
58 |
+
scene_name = input_file.split('/')[-1].split('.')[0]
|
59 |
+
input_dir = os.path.join(root_dir, scene_name)
|
60 |
+
cmd = f'unzip -o {input_file} -d {root_dir}'
|
61 |
+
os.system(cmd)
|
62 |
+
check_is_stray_scanner_app_capture(input_dir)
|
63 |
+
|
64 |
+
# extract rgb images
|
65 |
+
os.makedirs(os.path.join(input_dir, 'rgb'), exist_ok=True)
|
66 |
+
cmd = f'ffmpeg -i {input_dir}/rgb.mp4 -start_number 0 -frames:v 10 -q:v 2 {input_dir}/rgb/%06d.jpg'
|
67 |
+
os.system(cmd)
|
68 |
+
|
69 |
+
# Loading & Inference
|
70 |
+
image_path = os.path.join(input_dir, 'rgb', '000000.jpg')
|
71 |
+
image = load_image(image_path)
|
72 |
+
prompt_depth_path = os.path.join(input_dir, 'depth/000000.png')
|
73 |
+
prompt_depth = load_depth(prompt_depth_path)
|
74 |
+
depth = run_with_gpu(image, prompt_depth)
|
75 |
+
|
76 |
+
|
77 |
+
color = (image[0].permute(1,2,0).cpu().numpy() * 255.).astype(np.uint8)
|
78 |
+
|
79 |
+
# Visualization file
|
80 |
+
vis_depth, depth_min, depth_max = visualize_depth(depth, ret_minmax=True)
|
81 |
+
vis_prompt_depth = visualize_depth(prompt_depth[0, 0].detach().cpu().numpy(), depth_min=depth_min, depth_max=depth_max)
|
82 |
+
vis_prompt_depth = cv2.resize(vis_prompt_depth, (vis_depth.shape[1], vis_depth.shape[0]), interpolation=cv2.INTER_NEAREST)
|
83 |
+
|
84 |
+
# PLY File
|
85 |
+
ixt_path = os.path.join(input_dir, f'camera_matrix.csv')
|
86 |
+
ixt = np.loadtxt(ixt_path, delimiter=',')
|
87 |
+
orig_max = 1920
|
88 |
+
now_max = max(color.shape[1], color.shape[0])
|
89 |
+
scale = orig_max / now_max
|
90 |
+
ixt[:2] = ixt[:2] / scale
|
91 |
+
pcd = unproject_depth(depth, ixt=ixt, color=color, ret_pcd=True)
|
92 |
+
ply_path = os.path.join(input_dir, f'pointcloud.ply')
|
93 |
+
o3d.io.write_point_cloud(ply_path, pcd)
|
94 |
+
|
95 |
+
glb_path = os.path.join(input_dir, f'pointcloud.glb')
|
96 |
+
scene_3d = trimesh.Scene()
|
97 |
+
glb_colors = np.asarray(pcd.colors).astype(np.float32)
|
98 |
+
glb_colors = np.concatenate([glb_colors, np.ones_like(glb_colors[:, :1])], axis=1)
|
99 |
+
# glb_colors = (np.asarray(pcd.colors) * 255).astype(np.uint8)
|
100 |
+
pcd_data = trimesh.PointCloud(
|
101 |
+
vertices=np.asarray(pcd.points) * np.array([[1, -1, -1]]),
|
102 |
+
colors=glb_colors.astype(np.float64),
|
103 |
+
)
|
104 |
+
scene_3d.add_geometry(pcd_data)
|
105 |
+
scene_3d.export(file_obj=glb_path)
|
106 |
+
# o3d.io.write_point_cloud(glb_path, pcd)
|
107 |
+
|
108 |
+
# Depth Map Original Value
|
109 |
+
depth_path = os.path.join(input_dir, f'depth.png')
|
110 |
+
output_depth = (depth * 1000).astype(np.uint16)
|
111 |
+
imageio.imwrite(depth_path, output_depth)
|
112 |
+
|
113 |
+
|
114 |
+
delete_later(Path(input_dir))
|
115 |
+
delete_later(Path(input_file))
|
116 |
+
|
117 |
+
return color, (vis_depth, vis_prompt_depth), Path(glb_path), Path(ply_path).as_posix(), Path(depth_path).as_posix()
|
118 |
+
|
119 |
+
DESCRIPTION = """
|
120 |
+
# Estimate accurate and high-resolution depth maps from your iPhone capture.
|
121 |
+
|
122 |
+
## Requirements:
|
123 |
+
1. iPhone 12 Pro or later Pro models, iPad 2020 Pro or later Pro models
|
124 |
+
2. Free iOS App: [Stray Scanner App](https://apps.apple.com/us/app/stray-scanner/id1557051662)
|
125 |
+
|
126 |
+
## Testing Steps:
|
127 |
+
1. Capture a scene with the Stray Scanner App.
|
128 |
+
2. Use the iPhone [Files App](https://apps.apple.com/us/app/files/id1232058109) to compress it into a zip file and transfer it to your computer. (Long press the capture folder to compress)
|
129 |
+
3. Upload the zip file and click "Submit" to get the depth map of the first frame.
|
130 |
+
|
131 |
+
Note:
|
132 |
+
- Currently, this demo only supports inference for the first frame. If you need to obtain all depth frames, please refer to our [GitHub repo](https://github.com/DepthAnything/PromptDA).
|
133 |
+
- The depth map is stored as uint16, with a unit of millimeters.
|
134 |
+
"""
|
135 |
+
|
136 |
+
@click.command()
|
137 |
+
@click.option('--share', is_flag=True, help='Whether to run the app in shared mode.')
|
138 |
+
def main(share: bool):
|
139 |
+
with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
140 |
+
gr.Markdown(DESCRIPTION)
|
141 |
+
|
142 |
+
|
143 |
+
|
144 |
+
with gr.Row():
|
145 |
+
input_file = gr.File(type="filepath", label="Upload a stray scanner app capture zip file")
|
146 |
+
resolution = gr.Dropdown(choices=['756x1008', '1428x1904'], value='756x1008', label="Inference resolution")
|
147 |
+
submit_btn = gr.Button("Submit")
|
148 |
+
|
149 |
+
gr.Examples(examples=[
|
150 |
+
["data/assets/example0_chair.zip", "756x1008"]
|
151 |
+
],
|
152 |
+
inputs=[input_file, resolution],
|
153 |
+
# outputs=[output_rgb, output_depths, output_3d_model, output_ply, output_depth_map],
|
154 |
+
label="Examples",
|
155 |
+
)
|
156 |
+
|
157 |
+
with gr.Row():
|
158 |
+
with gr.Column():
|
159 |
+
output_rgb = gr.Image(type="numpy", label="RGB Image")
|
160 |
+
with gr.Column():
|
161 |
+
output_depths = ImageSlider(label="Depth map / prompt depth", position=0.5)
|
162 |
+
|
163 |
+
with gr.Row():
|
164 |
+
with gr.Column():
|
165 |
+
output_3d_model = gr.Model3D(label="3D Viewer", display_mode='solid', clear_color=[1.0, 1.0, 1.0, 1.0])
|
166 |
+
with gr.Column():
|
167 |
+
output_ply = gr.File(type="filepath", label="Download the unprojected point cloud as .ply file")
|
168 |
+
output_depth_map = gr.File(type="filepath", label="Download the depth map as .png file")
|
169 |
+
|
170 |
+
outputs = [
|
171 |
+
output_rgb,
|
172 |
+
output_depths,
|
173 |
+
output_3d_model,
|
174 |
+
output_ply,
|
175 |
+
output_depth_map,
|
176 |
+
]
|
177 |
+
|
178 |
+
submit_btn.click(run,
|
179 |
+
inputs=[input_file, resolution],
|
180 |
+
outputs=outputs)
|
181 |
+
|
182 |
+
demo.launch(share=share, debug=True)
|
183 |
+
# def main(share: bool):
|
184 |
+
# gr.Interface(
|
185 |
+
# fn=run,
|
186 |
+
# inputs=[
|
187 |
+
# gr.File(type="filepath", label="Upload a stray scanner app capture zip file"),
|
188 |
+
# gr.Dropdown(choices=['756x1008', '1428x1904'], value='756x1008', label="Inference resolution")
|
189 |
+
# ],
|
190 |
+
# outputs=[
|
191 |
+
# gr.Image(type="numpy", label="RGB Image"),
|
192 |
+
# ImageSlider(label="Depth map / prompt depth", position=0.5),
|
193 |
+
# gr.Model3D(label="3D Viewer", display_mode='solid', clear_color=[1.0, 1.0, 1.0, 1.0]),
|
194 |
+
# gr.File(type="filepath", label="Download the unprojected point cloud as .ply file"),
|
195 |
+
# gr.File(type="filepath", label="Download the depth map as .png file"),
|
196 |
+
# ],
|
197 |
+
# title=None,
|
198 |
+
# description=DESCRIPTION,
|
199 |
+
# clear_btn=None,
|
200 |
+
# allow_flagging="never",
|
201 |
+
# theme=gr.themes.Soft(),
|
202 |
+
# examples=[
|
203 |
+
# ["data/assets/8b98276b0a.zip"]
|
204 |
+
# ]
|
205 |
+
# ).launch(share=True)
|
206 |
+
|
207 |
+
|
208 |
+
if __name__ == '__main__':
|
209 |
+
main()
|
data/assets/example0_chair.zip
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a40632ba3908c9153ee6d0d85eb19b96c1c12d3a657732ce77348b5465dd9bd9
|
3 |
+
size 1080751
|
promptda/main.py
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from promptda.utils.logger import Log
|
2 |
+
|
3 |
+
|
4 |
+
def main():
|
5 |
+
pass
|
6 |
+
|
7 |
+
|
8 |
+
if __name__ == "__main__":
|
9 |
+
main()
|
promptda/model/blocks.py
ADDED
@@ -0,0 +1,303 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch.nn as nn
|
2 |
+
import torch
|
3 |
+
import torch.nn.functional as F
|
4 |
+
from promptda.utils.logger import Log
|
5 |
+
import os
|
6 |
+
import numpy as np
|
7 |
+
|
8 |
+
|
9 |
+
def _make_fusion_block(features, use_bn, size=None):
|
10 |
+
return FeatureFusionDepthBlock(
|
11 |
+
features,
|
12 |
+
nn.ReLU(False),
|
13 |
+
deconv=False,
|
14 |
+
bn=use_bn,
|
15 |
+
expand=False,
|
16 |
+
align_corners=True,
|
17 |
+
size=size,
|
18 |
+
)
|
19 |
+
|
20 |
+
|
21 |
+
def _make_scratch(in_shape, out_shape, groups=1, expand=False):
|
22 |
+
scratch = nn.Module()
|
23 |
+
|
24 |
+
out_shape1 = out_shape
|
25 |
+
out_shape2 = out_shape
|
26 |
+
out_shape3 = out_shape
|
27 |
+
if len(in_shape) >= 4:
|
28 |
+
out_shape4 = out_shape
|
29 |
+
|
30 |
+
if expand:
|
31 |
+
out_shape1 = out_shape
|
32 |
+
out_shape2 = out_shape*2
|
33 |
+
out_shape3 = out_shape*4
|
34 |
+
if len(in_shape) >= 4:
|
35 |
+
out_shape4 = out_shape*8
|
36 |
+
|
37 |
+
scratch.layer1_rn = nn.Conv2d(
|
38 |
+
in_shape[0], out_shape1, kernel_size=3, stride=1, padding=1, bias=False, groups=groups
|
39 |
+
)
|
40 |
+
scratch.layer2_rn = nn.Conv2d(
|
41 |
+
in_shape[1], out_shape2, kernel_size=3, stride=1, padding=1, bias=False, groups=groups
|
42 |
+
)
|
43 |
+
scratch.layer3_rn = nn.Conv2d(
|
44 |
+
in_shape[2], out_shape3, kernel_size=3, stride=1, padding=1, bias=False, groups=groups
|
45 |
+
)
|
46 |
+
if len(in_shape) >= 4:
|
47 |
+
scratch.layer4_rn = nn.Conv2d(
|
48 |
+
in_shape[3], out_shape4, kernel_size=3, stride=1, padding=1, bias=False, groups=groups
|
49 |
+
)
|
50 |
+
|
51 |
+
return scratch
|
52 |
+
|
53 |
+
|
54 |
+
class ResidualConvUnit(nn.Module):
|
55 |
+
"""Residual convolution module.
|
56 |
+
"""
|
57 |
+
|
58 |
+
def __init__(self, features, activation, bn):
|
59 |
+
"""Init.
|
60 |
+
|
61 |
+
Args:
|
62 |
+
features (int): number of features
|
63 |
+
"""
|
64 |
+
super().__init__()
|
65 |
+
|
66 |
+
self.bn = bn
|
67 |
+
|
68 |
+
self.groups = 1
|
69 |
+
|
70 |
+
self.conv1 = nn.Conv2d(
|
71 |
+
features, features, kernel_size=3, stride=1, padding=1, bias=True, groups=self.groups
|
72 |
+
)
|
73 |
+
|
74 |
+
self.conv2 = nn.Conv2d(
|
75 |
+
features, features, kernel_size=3, stride=1, padding=1, bias=True, groups=self.groups
|
76 |
+
)
|
77 |
+
|
78 |
+
if self.bn == True:
|
79 |
+
self.bn1 = nn.BatchNorm2d(features)
|
80 |
+
self.bn2 = nn.BatchNorm2d(features)
|
81 |
+
|
82 |
+
self.activation = activation
|
83 |
+
|
84 |
+
self.skip_add = nn.quantized.FloatFunctional()
|
85 |
+
|
86 |
+
def forward(self, x):
|
87 |
+
"""Forward pass.
|
88 |
+
|
89 |
+
Args:
|
90 |
+
x (tensor): input
|
91 |
+
|
92 |
+
Returns:
|
93 |
+
tensor: output
|
94 |
+
"""
|
95 |
+
|
96 |
+
out = self.activation(x)
|
97 |
+
out = self.conv1(out)
|
98 |
+
if self.bn == True:
|
99 |
+
out = self.bn1(out)
|
100 |
+
|
101 |
+
out = self.activation(out)
|
102 |
+
out = self.conv2(out)
|
103 |
+
if self.bn == True:
|
104 |
+
out = self.bn2(out)
|
105 |
+
|
106 |
+
if self.groups > 1:
|
107 |
+
out = self.conv_merge(out)
|
108 |
+
|
109 |
+
return self.skip_add.add(out, x)
|
110 |
+
|
111 |
+
|
112 |
+
class FeatureFusionBlock(nn.Module):
|
113 |
+
"""Feature fusion block.
|
114 |
+
"""
|
115 |
+
|
116 |
+
def __init__(self, features, activation, deconv=False, bn=False, expand=False, align_corners=True, size=None):
|
117 |
+
"""Init.
|
118 |
+
|
119 |
+
Args:
|
120 |
+
features (int): number of features
|
121 |
+
"""
|
122 |
+
super(FeatureFusionBlock, self).__init__()
|
123 |
+
|
124 |
+
self.deconv = deconv
|
125 |
+
self.align_corners = align_corners
|
126 |
+
|
127 |
+
self.groups = 1
|
128 |
+
|
129 |
+
self.expand = expand
|
130 |
+
out_features = features
|
131 |
+
if self.expand == True:
|
132 |
+
out_features = features//2
|
133 |
+
|
134 |
+
self.out_conv = nn.Conv2d(
|
135 |
+
features, out_features, kernel_size=1, stride=1, padding=0, bias=True, groups=1)
|
136 |
+
|
137 |
+
self.resConfUnit1 = ResidualConvUnit(features, activation, bn)
|
138 |
+
self.resConfUnit2 = ResidualConvUnit(features, activation, bn)
|
139 |
+
|
140 |
+
self.skip_add = nn.quantized.FloatFunctional()
|
141 |
+
|
142 |
+
self.size = size
|
143 |
+
|
144 |
+
def forward(self, *xs, size=None):
|
145 |
+
"""Forward pass.
|
146 |
+
|
147 |
+
Returns:
|
148 |
+
tensor: output
|
149 |
+
"""
|
150 |
+
output = xs[0]
|
151 |
+
|
152 |
+
if len(xs) == 2:
|
153 |
+
res = self.resConfUnit1(xs[1])
|
154 |
+
output = self.skip_add.add(output, res)
|
155 |
+
|
156 |
+
output = self.resConfUnit2(output)
|
157 |
+
|
158 |
+
if (size is None) and (self.size is None):
|
159 |
+
modifier = {"scale_factor": 2}
|
160 |
+
elif size is None:
|
161 |
+
modifier = {"size": self.size}
|
162 |
+
else:
|
163 |
+
modifier = {"size": size}
|
164 |
+
|
165 |
+
output = nn.functional.interpolate(
|
166 |
+
output, **modifier, mode="bilinear", align_corners=self.align_corners
|
167 |
+
)
|
168 |
+
|
169 |
+
output = self.out_conv(output)
|
170 |
+
|
171 |
+
return output
|
172 |
+
|
173 |
+
|
174 |
+
class FeatureFusionControlBlock(FeatureFusionBlock):
|
175 |
+
"""Feature fusion block.
|
176 |
+
"""
|
177 |
+
|
178 |
+
def __init__(self, features, activation, deconv=False, bn=False, expand=False, align_corners=True, size=None):
|
179 |
+
"""Init.
|
180 |
+
|
181 |
+
Args:
|
182 |
+
features (int): number of features
|
183 |
+
"""
|
184 |
+
super.__init__(features, activation, deconv,
|
185 |
+
bn, expand, align_corners, size)
|
186 |
+
self.copy_block = FeatureFusionBlock(
|
187 |
+
features, activation, deconv, bn, expand, align_corners, size)
|
188 |
+
|
189 |
+
def forward(self, *xs, size=None):
|
190 |
+
"""Forward pass.
|
191 |
+
|
192 |
+
Returns:
|
193 |
+
tensor: output
|
194 |
+
"""
|
195 |
+
output = xs[0]
|
196 |
+
|
197 |
+
if len(xs) == 2:
|
198 |
+
res = self.resConfUnit1(xs[1])
|
199 |
+
output = self.skip_add.add(output, res)
|
200 |
+
|
201 |
+
output = self.resConfUnit2(output)
|
202 |
+
|
203 |
+
if (size is None) and (self.size is None):
|
204 |
+
modifier = {"scale_factor": 2}
|
205 |
+
elif size is None:
|
206 |
+
modifier = {"size": self.size}
|
207 |
+
else:
|
208 |
+
modifier = {"size": size}
|
209 |
+
|
210 |
+
output = nn.functional.interpolate(
|
211 |
+
output, **modifier, mode="bilinear", align_corners=self.align_corners
|
212 |
+
)
|
213 |
+
|
214 |
+
output = self.out_conv(output)
|
215 |
+
|
216 |
+
return output
|
217 |
+
|
218 |
+
|
219 |
+
def zero_module(module):
|
220 |
+
"""
|
221 |
+
Zero out the parameters of a module and return it.
|
222 |
+
"""
|
223 |
+
for p in module.parameters():
|
224 |
+
p.detach().zero_()
|
225 |
+
return module
|
226 |
+
|
227 |
+
|
228 |
+
class FeatureFusionDepthBlock(nn.Module):
|
229 |
+
"""Feature fusion block.
|
230 |
+
"""
|
231 |
+
|
232 |
+
def __init__(self, features, activation, deconv=False, bn=False, expand=False, align_corners=True, size=None):
|
233 |
+
"""Init.
|
234 |
+
|
235 |
+
Args:
|
236 |
+
features (int): number of features
|
237 |
+
"""
|
238 |
+
super(FeatureFusionDepthBlock, self).__init__()
|
239 |
+
|
240 |
+
self.deconv = deconv
|
241 |
+
self.align_corners = align_corners
|
242 |
+
|
243 |
+
self.groups = 1
|
244 |
+
|
245 |
+
self.expand = expand
|
246 |
+
out_features = features
|
247 |
+
if self.expand == True:
|
248 |
+
out_features = features//2
|
249 |
+
|
250 |
+
self.out_conv = nn.Conv2d(
|
251 |
+
features, out_features, kernel_size=1, stride=1, padding=0, bias=True, groups=1)
|
252 |
+
|
253 |
+
self.resConfUnit1 = ResidualConvUnit(features, activation, bn)
|
254 |
+
self.resConfUnit2 = ResidualConvUnit(features, activation, bn)
|
255 |
+
self.resConfUnit_depth = nn.Sequential(
|
256 |
+
nn.Conv2d(1, features, kernel_size=3, stride=1,
|
257 |
+
padding=1, bias=True, groups=1),
|
258 |
+
activation,
|
259 |
+
nn.Conv2d(features, features, kernel_size=3,
|
260 |
+
stride=1, padding=1, bias=True, groups=1),
|
261 |
+
activation,
|
262 |
+
zero_module(
|
263 |
+
nn.Conv2d(features, features, kernel_size=3,
|
264 |
+
stride=1, padding=1, bias=True, groups=1)
|
265 |
+
)
|
266 |
+
)
|
267 |
+
self.skip_add = nn.quantized.FloatFunctional()
|
268 |
+
self.size = size
|
269 |
+
|
270 |
+
def forward(self, *xs, prompt_depth=None, size=None):
|
271 |
+
"""Forward pass.
|
272 |
+
|
273 |
+
Returns:
|
274 |
+
tensor: output
|
275 |
+
"""
|
276 |
+
output = xs[0]
|
277 |
+
|
278 |
+
if len(xs) == 2:
|
279 |
+
res = self.resConfUnit1(xs[1])
|
280 |
+
output = self.skip_add.add(output, res)
|
281 |
+
|
282 |
+
output = self.resConfUnit2(output)
|
283 |
+
|
284 |
+
if prompt_depth is not None:
|
285 |
+
prompt_depth = F.interpolate(
|
286 |
+
prompt_depth, output.shape[2:], mode='bilinear', align_corners=False)
|
287 |
+
res = self.resConfUnit_depth(prompt_depth)
|
288 |
+
output = self.skip_add.add(output, res)
|
289 |
+
|
290 |
+
if (size is None) and (self.size is None):
|
291 |
+
modifier = {"scale_factor": 2}
|
292 |
+
elif size is None:
|
293 |
+
modifier = {"size": self.size}
|
294 |
+
else:
|
295 |
+
modifier = {"size": size}
|
296 |
+
|
297 |
+
output = nn.functional.interpolate(
|
298 |
+
output, **modifier, mode="bilinear", align_corners=self.align_corners
|
299 |
+
)
|
300 |
+
|
301 |
+
output = self.out_conv(output)
|
302 |
+
|
303 |
+
return output
|
promptda/model/config.py
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
model_configs = {
|
2 |
+
'vits': {'encoder': 'vits', 'features': 64, 'out_channels': [48, 96, 192, 384], 'layer_idxs': [2, 5, 8, 11]},
|
3 |
+
'vitb': {'encoder': 'vitb', 'features': 128, 'out_channels': [96, 192, 384, 768], 'layer_idxs': [2, 5, 8, 11]},
|
4 |
+
'vitl': {'encoder': 'vitl', 'features': 256, 'out_channels': [256, 512, 1024, 1024], 'layer_idxs': [4, 11, 17, 23]},
|
5 |
+
'vitg': {'encoder': 'vitg', 'features': 384, 'out_channels': [1536, 1536, 1536, 1536], 'layer_idxs': [9, 19, 29, 39]}
|
6 |
+
}
|
promptda/model/dpt.py
ADDED
@@ -0,0 +1,145 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) 2024, Depth Anything V2
|
2 |
+
# https://github.com/DepthAnything/Depth-Anything-V2/blob/main/depth_anything_v2/dpt.py
|
3 |
+
import torch
|
4 |
+
import torch.nn as nn
|
5 |
+
import torch.nn.functional as F
|
6 |
+
from promptda.model.blocks import _make_scratch, _make_fusion_block
|
7 |
+
|
8 |
+
|
9 |
+
class DPTHead(nn.Module):
|
10 |
+
def __init__(self,
|
11 |
+
nclass,
|
12 |
+
in_channels,
|
13 |
+
features=256,
|
14 |
+
out_channels=[256, 512, 1024, 1024],
|
15 |
+
use_bn=False,
|
16 |
+
use_clstoken=False,
|
17 |
+
output_act='sigmoid'):
|
18 |
+
super(DPTHead, self).__init__()
|
19 |
+
|
20 |
+
self.nclass = nclass
|
21 |
+
self.use_clstoken = use_clstoken
|
22 |
+
|
23 |
+
self.projects = nn.ModuleList([
|
24 |
+
nn.Conv2d(
|
25 |
+
in_channels=in_channels,
|
26 |
+
out_channels=out_channel,
|
27 |
+
kernel_size=1,
|
28 |
+
stride=1,
|
29 |
+
padding=0,
|
30 |
+
) for out_channel in out_channels
|
31 |
+
])
|
32 |
+
|
33 |
+
self.resize_layers = nn.ModuleList([
|
34 |
+
nn.ConvTranspose2d(
|
35 |
+
in_channels=out_channels[0],
|
36 |
+
out_channels=out_channels[0],
|
37 |
+
kernel_size=4,
|
38 |
+
stride=4,
|
39 |
+
padding=0),
|
40 |
+
nn.ConvTranspose2d(
|
41 |
+
in_channels=out_channels[1],
|
42 |
+
out_channels=out_channels[1],
|
43 |
+
kernel_size=2,
|
44 |
+
stride=2,
|
45 |
+
padding=0),
|
46 |
+
nn.Identity(),
|
47 |
+
nn.Conv2d(
|
48 |
+
in_channels=out_channels[3],
|
49 |
+
out_channels=out_channels[3],
|
50 |
+
kernel_size=3,
|
51 |
+
stride=2,
|
52 |
+
padding=1)
|
53 |
+
])
|
54 |
+
|
55 |
+
if use_clstoken:
|
56 |
+
self.readout_projects = nn.ModuleList()
|
57 |
+
for _ in range(len(self.projects)):
|
58 |
+
self.readout_projects.append(
|
59 |
+
nn.Sequential(
|
60 |
+
nn.Linear(2 * in_channels, in_channels),
|
61 |
+
nn.GELU()))
|
62 |
+
|
63 |
+
self.scratch = _make_scratch(
|
64 |
+
out_channels,
|
65 |
+
features,
|
66 |
+
groups=1,
|
67 |
+
expand=False,
|
68 |
+
)
|
69 |
+
|
70 |
+
self.scratch.stem_transpose = None
|
71 |
+
|
72 |
+
self.scratch.refinenet1 = _make_fusion_block(
|
73 |
+
features, use_bn)
|
74 |
+
self.scratch.refinenet2 = _make_fusion_block(
|
75 |
+
features, use_bn)
|
76 |
+
self.scratch.refinenet3 = _make_fusion_block(
|
77 |
+
features, use_bn)
|
78 |
+
self.scratch.refinenet4 = _make_fusion_block(
|
79 |
+
features, use_bn)
|
80 |
+
|
81 |
+
head_features_1 = features
|
82 |
+
head_features_2 = 32
|
83 |
+
|
84 |
+
act_func = nn.Sigmoid() if output_act == 'sigmoid' else nn.Identity()
|
85 |
+
|
86 |
+
if nclass > 1:
|
87 |
+
self.scratch.output_conv = nn.Sequential(
|
88 |
+
nn.Conv2d(head_features_1, head_features_1,
|
89 |
+
kernel_size=3, stride=1, padding=1),
|
90 |
+
nn.ReLU(True),
|
91 |
+
nn.Conv2d(head_features_1, nclass,
|
92 |
+
kernel_size=1, stride=1, padding=0),
|
93 |
+
)
|
94 |
+
else:
|
95 |
+
self.scratch.output_conv1 = nn.Conv2d(
|
96 |
+
head_features_1, head_features_1 // 2, kernel_size=3, stride=1, padding=1)
|
97 |
+
|
98 |
+
self.scratch.output_conv2 = nn.Sequential(
|
99 |
+
nn.Conv2d(head_features_1 // 2, head_features_2,
|
100 |
+
kernel_size=3, stride=1, padding=1),
|
101 |
+
nn.ReLU(True),
|
102 |
+
nn.Conv2d(head_features_2, 1, kernel_size=1,
|
103 |
+
stride=1, padding=0),
|
104 |
+
act_func,
|
105 |
+
)
|
106 |
+
|
107 |
+
def forward(self, out_features, patch_h, patch_w, prompt_depth=None):
|
108 |
+
out = []
|
109 |
+
for i, x in enumerate(out_features):
|
110 |
+
if self.use_clstoken:
|
111 |
+
x, cls_token = x[0], x[1]
|
112 |
+
readout = cls_token.unsqueeze(1).expand_as(x)
|
113 |
+
x = self.readout_projects[i](torch.cat((x, readout), -1))
|
114 |
+
else:
|
115 |
+
x = x[0]
|
116 |
+
|
117 |
+
x = x.permute(0, 2, 1).reshape(
|
118 |
+
(x.shape[0], x.shape[-1], patch_h, patch_w))
|
119 |
+
|
120 |
+
x = self.projects[i](x)
|
121 |
+
x = self.resize_layers[i](x)
|
122 |
+
|
123 |
+
out.append(x)
|
124 |
+
|
125 |
+
layer_1, layer_2, layer_3, layer_4 = out
|
126 |
+
|
127 |
+
layer_1_rn = self.scratch.layer1_rn(layer_1)
|
128 |
+
layer_2_rn = self.scratch.layer2_rn(layer_2)
|
129 |
+
layer_3_rn = self.scratch.layer3_rn(layer_3)
|
130 |
+
layer_4_rn = self.scratch.layer4_rn(layer_4)
|
131 |
+
|
132 |
+
path_4 = self.scratch.refinenet4(
|
133 |
+
layer_4_rn, size=layer_3_rn.shape[2:], prompt_depth=prompt_depth)
|
134 |
+
path_3 = self.scratch.refinenet3(
|
135 |
+
path_4, layer_3_rn, size=layer_2_rn.shape[2:], prompt_depth=prompt_depth)
|
136 |
+
path_2 = self.scratch.refinenet2(
|
137 |
+
path_3, layer_2_rn, size=layer_1_rn.shape[2:], prompt_depth=prompt_depth)
|
138 |
+
path_1 = self.scratch.refinenet1(
|
139 |
+
path_2, layer_1_rn, prompt_depth=prompt_depth)
|
140 |
+
out = self.scratch.output_conv1(path_1)
|
141 |
+
out_feat = F.interpolate(
|
142 |
+
out, (int(patch_h * 14), int(patch_w * 14)),
|
143 |
+
mode="bilinear", align_corners=True)
|
144 |
+
out = self.scratch.output_conv2(out_feat)
|
145 |
+
return out
|
promptda/promptda.py
ADDED
@@ -0,0 +1,119 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import torch.nn as nn
|
3 |
+
from promptda.model.dpt import DPTHead
|
4 |
+
from promptda.model.config import model_configs
|
5 |
+
from promptda.utils.logger import Log
|
6 |
+
import os
|
7 |
+
from pathlib import Path
|
8 |
+
from huggingface_hub import hf_hub_download
|
9 |
+
|
10 |
+
|
11 |
+
class PromptDA(nn.Module):
|
12 |
+
patch_size = 14 # patch size of the pretrained dinov2 model
|
13 |
+
use_bn = False
|
14 |
+
use_clstoken = False
|
15 |
+
output_act = 'sigmoid'
|
16 |
+
|
17 |
+
def __init__(self,
|
18 |
+
encoder='vitl',
|
19 |
+
ckpt_path='data/checkpoints/promptda_vitl.ckpt'):
|
20 |
+
super().__init__()
|
21 |
+
model_config = model_configs[encoder]
|
22 |
+
|
23 |
+
self.encoder = encoder
|
24 |
+
self.model_config = model_config
|
25 |
+
self.pretrained = torch.hub.load(
|
26 |
+
'torchhub/facebookresearch_dinov2_main',
|
27 |
+
'dinov2_{:}14'.format(encoder),
|
28 |
+
source='local',
|
29 |
+
pretrained=False)
|
30 |
+
dim = self.pretrained.blocks[0].attn.qkv.in_features
|
31 |
+
self.depth_head = DPTHead(nclass=1,
|
32 |
+
in_channels=dim,
|
33 |
+
features=model_config['features'],
|
34 |
+
out_channels=model_config['out_channels'],
|
35 |
+
use_bn=self.use_bn,
|
36 |
+
use_clstoken=self.use_clstoken,
|
37 |
+
output_act=self.output_act)
|
38 |
+
|
39 |
+
# mean and std of the pretrained dinov2 model
|
40 |
+
self.register_buffer('_mean', torch.tensor(
|
41 |
+
[0.485, 0.456, 0.406]).view(1, 3, 1, 1))
|
42 |
+
self.register_buffer('_std', torch.tensor(
|
43 |
+
[0.229, 0.224, 0.225]).view(1, 3, 1, 1))
|
44 |
+
|
45 |
+
self.load_checkpoint(ckpt_path)
|
46 |
+
|
47 |
+
@classmethod
|
48 |
+
def from_pretrained(cls, pretrained_model_name_or_path = None, model_kwargs = None, **hf_kwargs):
|
49 |
+
"""
|
50 |
+
Load a model from a checkpoint file.
|
51 |
+
### Parameters:
|
52 |
+
- `pretrained_model_name_or_path`: path to the checkpoint file or repo id.
|
53 |
+
- `model_kwargs`: additional keyword arguments to override the parameters in the checkpoint.
|
54 |
+
- `hf_kwargs`: additional keyword arguments to pass to the `hf_hub_download` function. Ignored if `pretrained_model_name_or_path` is a local path.
|
55 |
+
### Returns:
|
56 |
+
- A new instance of `MoGe` with the parameters loaded from the checkpoint.
|
57 |
+
"""
|
58 |
+
ckpt_path = None
|
59 |
+
if Path(pretrained_model_name_or_path).exists():
|
60 |
+
ckpt_path = pretrained_model_name_or_path
|
61 |
+
else:
|
62 |
+
cached_checkpoint_path = hf_hub_download(
|
63 |
+
repo_id=pretrained_model_name_or_path,
|
64 |
+
repo_type="model",
|
65 |
+
filename="promptda_vitl.ckpt",
|
66 |
+
**hf_kwargs
|
67 |
+
)
|
68 |
+
ckpt_path = cached_checkpoint_path
|
69 |
+
# model_config = checkpoint['model_config']
|
70 |
+
# if model_kwargs is not None:
|
71 |
+
# model_config.update(model_kwargs)
|
72 |
+
if model_kwargs is None:
|
73 |
+
model_kwargs = {}
|
74 |
+
model_kwargs.update({'ckpt_path': ckpt_path})
|
75 |
+
model = cls(**model_kwargs)
|
76 |
+
return model
|
77 |
+
|
78 |
+
def load_checkpoint(self, ckpt_path):
|
79 |
+
if os.path.exists(ckpt_path):
|
80 |
+
Log.info(f'Loading checkpoint from {ckpt_path}')
|
81 |
+
checkpoint = torch.load(ckpt_path, map_location='cpu')
|
82 |
+
self.load_state_dict(
|
83 |
+
{k[9:]: v for k, v in checkpoint['state_dict'].items()})
|
84 |
+
else:
|
85 |
+
Log.warn(f'Checkpoint {ckpt_path} not found')
|
86 |
+
|
87 |
+
def forward(self, x, prompt_depth=None):
|
88 |
+
assert prompt_depth is not None, 'prompt_depth is required'
|
89 |
+
prompt_depth, min_val, max_val = self.normalize(prompt_depth)
|
90 |
+
h, w = x.shape[-2:]
|
91 |
+
features = self.pretrained.get_intermediate_layers(
|
92 |
+
x, self.model_config['layer_idxs'],
|
93 |
+
return_class_token=True)
|
94 |
+
patch_h, patch_w = h // self.patch_size, w // self.patch_size
|
95 |
+
depth = self.depth_head(features, patch_h, patch_w, prompt_depth)
|
96 |
+
depth = self.denormalize(depth, min_val, max_val)
|
97 |
+
return depth
|
98 |
+
|
99 |
+
@torch.no_grad()
|
100 |
+
def predict(self,
|
101 |
+
image: torch.Tensor,
|
102 |
+
prompt_depth: torch.Tensor):
|
103 |
+
return self.forward(image, prompt_depth)
|
104 |
+
|
105 |
+
def normalize(self,
|
106 |
+
prompt_depth: torch.Tensor):
|
107 |
+
B, C, H, W = prompt_depth.shape
|
108 |
+
min_val = torch.quantile(
|
109 |
+
prompt_depth.reshape(B, -1), 0., dim=1, keepdim=True)[:, :, None, None]
|
110 |
+
max_val = torch.quantile(
|
111 |
+
prompt_depth.reshape(B, -1), 1., dim=1, keepdim=True)[:, :, None, None]
|
112 |
+
prompt_depth = (prompt_depth - min_val) / (max_val - min_val)
|
113 |
+
return prompt_depth, min_val, max_val
|
114 |
+
|
115 |
+
def denormalize(self,
|
116 |
+
depth: torch.Tensor,
|
117 |
+
min_val: torch.Tensor,
|
118 |
+
max_val: torch.Tensor):
|
119 |
+
return depth * (max_val - min_val) + min_val
|
promptda/utils/depth_utils.py
ADDED
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
import matplotlib
|
3 |
+
import open3d as o3d
|
4 |
+
|
5 |
+
def visualize_depth(depth: np.ndarray,
|
6 |
+
depth_min=None,
|
7 |
+
depth_max=None,
|
8 |
+
percentile=5,
|
9 |
+
ret_minmax=False,
|
10 |
+
cmap='Spectral'):
|
11 |
+
if depth_min is None: depth_min = np.percentile(depth, percentile)
|
12 |
+
if depth_max is None: depth_max = np.percentile(depth, 100 - percentile)
|
13 |
+
if depth_min == depth_max:
|
14 |
+
depth_min = depth_min - 1e-6
|
15 |
+
depth_max = depth_max + 1e-6
|
16 |
+
cm = matplotlib.colormaps[cmap]
|
17 |
+
depth = ((depth - depth_min) / (depth_max - depth_min)).clip(0, 1)
|
18 |
+
img_colored_np = cm(depth[None], bytes=False)[:, :, :, 0:3] # value from 0 to 1
|
19 |
+
img_colored_np = (img_colored_np[0] * 255.0).astype(np.uint8)
|
20 |
+
if ret_minmax:
|
21 |
+
return img_colored_np, depth_min, depth_max
|
22 |
+
else:
|
23 |
+
return img_colored_np
|
24 |
+
|
25 |
+
|
26 |
+
def unproject_depth(depth,
|
27 |
+
ixt,
|
28 |
+
depth_min=0.01,
|
29 |
+
depth_max=None,
|
30 |
+
color=None,
|
31 |
+
ext=None,
|
32 |
+
conf=None,
|
33 |
+
ret_pcd=False,
|
34 |
+
clip_box=None):
|
35 |
+
height, width = depth.shape
|
36 |
+
x = np.arange(0, width)
|
37 |
+
y = np.arange(0, height)
|
38 |
+
xx, yy = np.meshgrid(x, y)
|
39 |
+
xx = xx.reshape(-1)
|
40 |
+
yy = yy.reshape(-1)
|
41 |
+
zz = depth.reshape(-1)
|
42 |
+
mask = np.ones_like(xx, dtype=np.bool_)
|
43 |
+
if depth_min is not None:
|
44 |
+
mask &= zz >= depth_min
|
45 |
+
if depth_max is not None:
|
46 |
+
mask &= zz <= depth_max
|
47 |
+
if conf is not None:
|
48 |
+
mask &= conf.reshape(-1) == 2
|
49 |
+
xx = xx[mask]
|
50 |
+
yy = yy[mask]
|
51 |
+
zz = zz[mask]
|
52 |
+
pcd = np.stack([xx, yy, np.ones_like(xx)], axis=1)
|
53 |
+
pcd = pcd * zz[:, None]
|
54 |
+
pcd = np.dot(pcd, np.linalg.inv(ixt).T)
|
55 |
+
if ext is not None:
|
56 |
+
pcd = np.concatenate([pcd, np.ones((pcd.shape[0], 1))], axis=1)
|
57 |
+
pcd = np.dot(pcd, np.linalg.inv(ext).T)
|
58 |
+
new_mask = np.ones_like(pcd[:, 0]).astype(np.bool_)
|
59 |
+
if clip_box is not None:
|
60 |
+
assert len(clip_box) == 6
|
61 |
+
for i, val in enumerate(clip_box):
|
62 |
+
if val is None:
|
63 |
+
continue
|
64 |
+
if i == 0: new_mask &= (pcd[:, 0] <= val)
|
65 |
+
elif i == 1: new_mask &= (pcd[:, 1] <= val)
|
66 |
+
elif i == 2: new_mask &= (pcd[:, 2] <= val)
|
67 |
+
elif i == 3: new_mask &= (pcd[:, 0] >= val)
|
68 |
+
elif i == 4: new_mask &= (pcd[:, 1] >= val)
|
69 |
+
elif i == 5: new_mask &= (pcd[:, 2] >= val)
|
70 |
+
if color is not None:
|
71 |
+
if color.dtype == np.uint8:
|
72 |
+
color = color.astype(np.float32) / 255.
|
73 |
+
if ret_pcd:
|
74 |
+
points = pcd
|
75 |
+
pcd = o3d.geometry.PointCloud()
|
76 |
+
pcd.points = o3d.utility.Vector3dVector(points[:, :3][new_mask])
|
77 |
+
pcd.colors = o3d.utility.Vector3dVector(color.reshape(-1, 3)[mask][new_mask])
|
78 |
+
else:
|
79 |
+
return pcd[:, :3][new_mask], color.reshape(-1, 3)[mask][new_mask]
|
80 |
+
else:
|
81 |
+
if ret_pcd:
|
82 |
+
points = pcd
|
83 |
+
pcd = o3d.geometry.PointCloud()
|
84 |
+
pcd.points = o3d.utility.Vector3dVector(pcd[:, :3][new_mask])
|
85 |
+
else:
|
86 |
+
return pcd[:, :3][new_mask]
|
87 |
+
return pcd
|
88 |
+
|
89 |
+
if __name__ == '__main__':
|
90 |
+
depth = np.random.rand(100, 100)
|
91 |
+
visualize_depth(depth)
|
promptda/utils/io_wrapper.py
ADDED
@@ -0,0 +1,98 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
import imageio
|
3 |
+
import torch
|
4 |
+
import os
|
5 |
+
import matplotlib.pyplot as plt
|
6 |
+
import cv2
|
7 |
+
|
8 |
+
from promptda.utils.logger import Log
|
9 |
+
|
10 |
+
DEVICE = 'cuda' if torch.cuda.is_available(
|
11 |
+
) else 'mps' if torch.backends.mps.is_available() else 'cpu'
|
12 |
+
|
13 |
+
|
14 |
+
def to_tensor_func(arr):
|
15 |
+
if arr.ndim == 2:
|
16 |
+
arr = arr[:, :, np.newaxis]
|
17 |
+
return torch.from_numpy(arr).permute(2, 0, 1).unsqueeze(0).to(DEVICE)
|
18 |
+
|
19 |
+
|
20 |
+
def to_numpy_func(tensor):
|
21 |
+
arr = tensor.squeeze(0).permute(1, 2, 0).cpu().numpy()
|
22 |
+
if arr.shape[2] == 1:
|
23 |
+
arr = arr[:, :, 0]
|
24 |
+
return arr
|
25 |
+
|
26 |
+
|
27 |
+
def ensure_multiple_of(x, multiple_of=14):
|
28 |
+
return int(x // multiple_of * multiple_of)
|
29 |
+
|
30 |
+
|
31 |
+
def load_image(image_path, to_tensor=True, max_size=1008, multiple_of=14):
|
32 |
+
'''
|
33 |
+
Load image from path and convert to tensor
|
34 |
+
max_size // 14 = 0
|
35 |
+
'''
|
36 |
+
image = np.asarray(imageio.imread(image_path)).astype(np.float32)
|
37 |
+
image = image / 255.
|
38 |
+
|
39 |
+
max_size = max_size // multiple_of * multiple_of
|
40 |
+
if max(image.shape) > max_size:
|
41 |
+
h, w = image.shape[:2]
|
42 |
+
scale = max_size / max(h, w)
|
43 |
+
tar_h = ensure_multiple_of(h * scale)
|
44 |
+
tar_w = ensure_multiple_of(w * scale)
|
45 |
+
image = cv2.resize(image, (tar_w, tar_h), interpolation=cv2.INTER_AREA)
|
46 |
+
if to_tensor:
|
47 |
+
return to_tensor_func(image)
|
48 |
+
return image
|
49 |
+
|
50 |
+
|
51 |
+
def load_depth(depth_path, to_tensor=True):
|
52 |
+
'''
|
53 |
+
Load depth from path and convert to tensor
|
54 |
+
'''
|
55 |
+
if depth_path.endswith('.png'):
|
56 |
+
depth = np.asarray(imageio.imread(depth_path)).astype(np.float32)
|
57 |
+
depth = depth / 1000.
|
58 |
+
elif depth_path.endswith('.npz'):
|
59 |
+
depth = np.load(depth_path)['depth']
|
60 |
+
else:
|
61 |
+
raise ValueError(f"Unsupported depth format: {depth_path}")
|
62 |
+
if to_tensor:
|
63 |
+
return to_tensor_func(depth)
|
64 |
+
return depth
|
65 |
+
|
66 |
+
|
67 |
+
def save_depth(depth,
|
68 |
+
prompt_depth=None,
|
69 |
+
image=None,
|
70 |
+
output_path='data/output/depth.png',
|
71 |
+
save_vis=True):
|
72 |
+
'''
|
73 |
+
Save depth to path
|
74 |
+
'''
|
75 |
+
os.makedirs(os.path.dirname(output_path), exist_ok=True)
|
76 |
+
depth = to_numpy_func(depth)
|
77 |
+
uint16_depth = (depth * 1000.).astype(np.uint16)
|
78 |
+
imageio.imwrite(output_path, uint16_depth)
|
79 |
+
|
80 |
+
if not save_vis:
|
81 |
+
return
|
82 |
+
|
83 |
+
output_path = output_path.replace('.png', '_vis.png')
|
84 |
+
prompt_depth = to_numpy_func(prompt_depth)
|
85 |
+
image = to_numpy_func(image)
|
86 |
+
plt.subplot(1, 3, 1)
|
87 |
+
plt.imshow(image)
|
88 |
+
plt.axis('off')
|
89 |
+
plt.subplot(1, 3, 2)
|
90 |
+
plt.imshow(prompt_depth)
|
91 |
+
plt.axis('off')
|
92 |
+
plt.subplot(1, 3, 3)
|
93 |
+
plt.imshow(depth)
|
94 |
+
plt.axis('off')
|
95 |
+
plt.tight_layout()
|
96 |
+
plt.savefig(output_path)
|
97 |
+
plt.close()
|
98 |
+
Log.info(f'Saved depth to {output_path}')
|
promptda/utils/logger.py
ADDED
@@ -0,0 +1,75 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
|
3 |
+
|
4 |
+
class Log:
|
5 |
+
log_on = True # fast switch
|
6 |
+
used_tags = dict() # To keep track of used tags
|
7 |
+
_is_main_cached = None # Cache to store the main process check result
|
8 |
+
|
9 |
+
@staticmethod
|
10 |
+
def is_main_process():
|
11 |
+
if Log._is_main_cached is not None:
|
12 |
+
return Log._is_main_cached
|
13 |
+
try:
|
14 |
+
from pytorch_lightning.utilities import rank_zero_only
|
15 |
+
if rank_zero_only.rank == 0:
|
16 |
+
Log._is_main_cached = True
|
17 |
+
else:
|
18 |
+
Log._is_main_cached = False
|
19 |
+
except:
|
20 |
+
Log._is_main_cached = True
|
21 |
+
return Log._is_main_cached
|
22 |
+
|
23 |
+
@staticmethod
|
24 |
+
def _should_log(tag):
|
25 |
+
"""
|
26 |
+
Determine if the log information should be recorded.
|
27 |
+
Conditions: log function is enabled, current process is the main process, and the tag has not been used.
|
28 |
+
"""
|
29 |
+
if not Log.log_on:
|
30 |
+
return False
|
31 |
+
if not Log.is_main_process():
|
32 |
+
return False
|
33 |
+
if tag is None:
|
34 |
+
return True
|
35 |
+
if '__' in tag:
|
36 |
+
num = int(tag.split('__')[-1])
|
37 |
+
tag = tag.split('__')[0] # can output num same information
|
38 |
+
else:
|
39 |
+
num = 3 # default 3
|
40 |
+
|
41 |
+
if tag not in Log.used_tags:
|
42 |
+
Log.used_tags[tag] = num
|
43 |
+
Log.used_tags[tag] -= 1
|
44 |
+
if Log.used_tags[tag] >= 0:
|
45 |
+
return True
|
46 |
+
else:
|
47 |
+
return False
|
48 |
+
|
49 |
+
@staticmethod
|
50 |
+
def info(*args, tag=None):
|
51 |
+
"""
|
52 |
+
Output INFO level log information.
|
53 |
+
"""
|
54 |
+
if Log._should_log(tag):
|
55 |
+
print("\033[1;32m[INFO]\033[0;0m", *args)
|
56 |
+
|
57 |
+
@staticmethod
|
58 |
+
def warn(*args, tag=None):
|
59 |
+
"""
|
60 |
+
Output WARN level log information.
|
61 |
+
"""
|
62 |
+
if Log._should_log(tag):
|
63 |
+
print("\033[1;35m[WARN]\033[0;0m", *args)
|
64 |
+
|
65 |
+
@staticmethod
|
66 |
+
def error(*args, tag=None):
|
67 |
+
print("\033[1;31m[ERROR]\033[0;0m", *args)
|
68 |
+
|
69 |
+
@staticmethod
|
70 |
+
def debug(*args, tag=None):
|
71 |
+
"""
|
72 |
+
Output DEBUG level log information.
|
73 |
+
"""
|
74 |
+
if Log._should_log(tag) and 'HT_DEBUG' in os.environ and os.environ['HT_DEBUG'] == '1':
|
75 |
+
print("\033[1;33m[DEBUG]\033[0;0m", *args)
|
promptda/utils/parallel_utils.py
ADDED
@@ -0,0 +1,78 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import Callable, List, Dict
|
2 |
+
from multiprocessing.pool import ThreadPool
|
3 |
+
from tqdm import tqdm
|
4 |
+
from threading import Thread
|
5 |
+
import asyncio
|
6 |
+
from functools import wraps
|
7 |
+
|
8 |
+
|
9 |
+
def async_call_func(func):
|
10 |
+
@wraps(func)
|
11 |
+
async def wrapper(*args, **kwargs):
|
12 |
+
loop = asyncio.get_event_loop()
|
13 |
+
# Use run_in_executor to run the blocking function in a separate thread
|
14 |
+
return await loop.run_in_executor(None, func, *args, **kwargs)
|
15 |
+
return wrapper
|
16 |
+
|
17 |
+
|
18 |
+
def async_call(fn):
|
19 |
+
def wrapper(*args, **kwargs):
|
20 |
+
Thread(target=fn, args=args, kwargs=kwargs).start()
|
21 |
+
return wrapper
|
22 |
+
|
23 |
+
|
24 |
+
def parallel_execution(*args, action: Callable, num_processes=32, print_progress=False, sequential=False, async_return=False, desc=None, **kwargs):
|
25 |
+
# Copy from EasyVolCap
|
26 |
+
# Author: Zhen Xu https://github.com/dendenxu
|
27 |
+
# NOTE: we expect first arg / or kwargs to be distributed
|
28 |
+
# NOTE: print_progress arg is reserved
|
29 |
+
def get_length(args: List, kwargs: Dict):
|
30 |
+
for a in args:
|
31 |
+
if isinstance(a, list):
|
32 |
+
return len(a)
|
33 |
+
for v in kwargs.values():
|
34 |
+
if isinstance(v, list):
|
35 |
+
return len(v)
|
36 |
+
raise NotImplementedError
|
37 |
+
|
38 |
+
def get_action_args(length: int, args: List, kwargs: Dict, i: int):
|
39 |
+
action_args = [(arg[i] if isinstance(arg, list) and len(
|
40 |
+
arg) == length else arg) for arg in args]
|
41 |
+
# TODO: Support all types of iterable
|
42 |
+
action_kwargs = {key: (kwargs[key][i] if isinstance(kwargs[key], list) and len(
|
43 |
+
kwargs[key]) == length else kwargs[key]) for key in kwargs}
|
44 |
+
return action_args, action_kwargs
|
45 |
+
|
46 |
+
if not sequential:
|
47 |
+
# Create ThreadPool
|
48 |
+
pool = ThreadPool(processes=num_processes)
|
49 |
+
|
50 |
+
# Spawn threads
|
51 |
+
results = []
|
52 |
+
asyncs = []
|
53 |
+
length = get_length(args, kwargs)
|
54 |
+
for i in range(length):
|
55 |
+
action_args, action_kwargs = get_action_args(
|
56 |
+
length, args, kwargs, i)
|
57 |
+
async_result = pool.apply_async(action, action_args, action_kwargs)
|
58 |
+
asyncs.append(async_result)
|
59 |
+
|
60 |
+
# Join threads and get return values
|
61 |
+
if not async_return:
|
62 |
+
for async_result in tqdm(asyncs, desc=desc, disable=not print_progress):
|
63 |
+
# will sync the corresponding thread
|
64 |
+
results.append(async_result.get())
|
65 |
+
pool.close()
|
66 |
+
pool.join()
|
67 |
+
return results
|
68 |
+
else:
|
69 |
+
return pool
|
70 |
+
else:
|
71 |
+
results = []
|
72 |
+
length = get_length(args, kwargs)
|
73 |
+
for i in tqdm(range(length), desc=desc, disable=not print_progress):
|
74 |
+
action_args, action_kwargs = get_action_args(
|
75 |
+
length, args, kwargs, i)
|
76 |
+
async_result = action(*action_args, **action_kwargs)
|
77 |
+
results.append(async_result)
|
78 |
+
return results
|
setup.py
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from setuptools import setup, find_packages
|
2 |
+
|
3 |
+
setup(
|
4 |
+
name="promptda",
|
5 |
+
version="1.0",
|
6 |
+
packages=find_packages(where="src"),
|
7 |
+
author="Haotong Lin",
|
8 |
+
description=["Prompt Depth Anything"],
|
9 |
+
url="https://github.com/DepthAnything/PromptDA",
|
10 |
+
)
|
torchhub/README.md
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
# Local PyTorch Hub
|
2 |
+
|
3 |
+
This directory is for loading the DINOv2 encoder locally in case of no Internet connection.
|
torchhub/facebookresearch_dinov2_main/CODE_OF_CONDUCT.md
ADDED
@@ -0,0 +1,80 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Code of Conduct
|
2 |
+
|
3 |
+
## Our Pledge
|
4 |
+
|
5 |
+
In the interest of fostering an open and welcoming environment, we as
|
6 |
+
contributors and maintainers pledge to make participation in our project and
|
7 |
+
our community a harassment-free experience for everyone, regardless of age, body
|
8 |
+
size, disability, ethnicity, sex characteristics, gender identity and expression,
|
9 |
+
level of experience, education, socio-economic status, nationality, personal
|
10 |
+
appearance, race, religion, or sexual identity and orientation.
|
11 |
+
|
12 |
+
## Our Standards
|
13 |
+
|
14 |
+
Examples of behavior that contributes to creating a positive environment
|
15 |
+
include:
|
16 |
+
|
17 |
+
* Using welcoming and inclusive language
|
18 |
+
* Being respectful of differing viewpoints and experiences
|
19 |
+
* Gracefully accepting constructive criticism
|
20 |
+
* Focusing on what is best for the community
|
21 |
+
* Showing empathy towards other community members
|
22 |
+
|
23 |
+
Examples of unacceptable behavior by participants include:
|
24 |
+
|
25 |
+
* The use of sexualized language or imagery and unwelcome sexual attention or
|
26 |
+
advances
|
27 |
+
* Trolling, insulting/derogatory comments, and personal or political attacks
|
28 |
+
* Public or private harassment
|
29 |
+
* Publishing others' private information, such as a physical or electronic
|
30 |
+
address, without explicit permission
|
31 |
+
* Other conduct which could reasonably be considered inappropriate in a
|
32 |
+
professional setting
|
33 |
+
|
34 |
+
## Our Responsibilities
|
35 |
+
|
36 |
+
Project maintainers are responsible for clarifying the standards of acceptable
|
37 |
+
behavior and are expected to take appropriate and fair corrective action in
|
38 |
+
response to any instances of unacceptable behavior.
|
39 |
+
|
40 |
+
Project maintainers have the right and responsibility to remove, edit, or
|
41 |
+
reject comments, commits, code, wiki edits, issues, and other contributions
|
42 |
+
that are not aligned to this Code of Conduct, or to ban temporarily or
|
43 |
+
permanently any contributor for other behaviors that they deem inappropriate,
|
44 |
+
threatening, offensive, or harmful.
|
45 |
+
|
46 |
+
## Scope
|
47 |
+
|
48 |
+
This Code of Conduct applies within all project spaces, and it also applies when
|
49 |
+
an individual is representing the project or its community in public spaces.
|
50 |
+
Examples of representing a project or community include using an official
|
51 |
+
project e-mail address, posting via an official social media account, or acting
|
52 |
+
as an appointed representative at an online or offline event. Representation of
|
53 |
+
a project may be further defined and clarified by project maintainers.
|
54 |
+
|
55 |
+
This Code of Conduct also applies outside the project spaces when there is a
|
56 |
+
reasonable belief that an individual's behavior may have a negative impact on
|
57 |
+
the project or its community.
|
58 |
+
|
59 |
+
## Enforcement
|
60 |
+
|
61 |
+
Instances of abusive, harassing, or otherwise unacceptable behavior may be
|
62 |
+
reported by contacting the project team at <opensource-conduct@meta.com>. All
|
63 |
+
complaints will be reviewed and investigated and will result in a response that
|
64 |
+
is deemed necessary and appropriate to the circumstances. The project team is
|
65 |
+
obligated to maintain confidentiality with regard to the reporter of an incident.
|
66 |
+
Further details of specific enforcement policies may be posted separately.
|
67 |
+
|
68 |
+
Project maintainers who do not follow or enforce the Code of Conduct in good
|
69 |
+
faith may face temporary or permanent repercussions as determined by other
|
70 |
+
members of the project's leadership.
|
71 |
+
|
72 |
+
## Attribution
|
73 |
+
|
74 |
+
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
|
75 |
+
available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html
|
76 |
+
|
77 |
+
[homepage]: https://www.contributor-covenant.org
|
78 |
+
|
79 |
+
For answers to common questions about this code of conduct, see
|
80 |
+
https://www.contributor-covenant.org/faq
|
torchhub/facebookresearch_dinov2_main/CONTRIBUTING.md
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Contributing to DINOv2
|
2 |
+
We want to make contributing to this project as easy and transparent as
|
3 |
+
possible.
|
4 |
+
|
5 |
+
## Pull Requests
|
6 |
+
We actively welcome your pull requests.
|
7 |
+
|
8 |
+
1. Fork the repo and create your branch from `main`.
|
9 |
+
2. If you've added code that should be tested, add tests.
|
10 |
+
3. If you've changed APIs, update the documentation.
|
11 |
+
4. Ensure the test suite passes.
|
12 |
+
5. Make sure your code lints.
|
13 |
+
6. If you haven't already, complete the Contributor License Agreement ("CLA").
|
14 |
+
|
15 |
+
## Contributor License Agreement ("CLA")
|
16 |
+
In order to accept your pull request, we need you to submit a CLA. You only need
|
17 |
+
to do this once to work on any of Meta's open source projects.
|
18 |
+
|
19 |
+
Complete your CLA here: <https://code.facebook.com/cla>
|
20 |
+
|
21 |
+
## Issues
|
22 |
+
We use GitHub issues to track public bugs. Please ensure your description is
|
23 |
+
clear and has sufficient instructions to be able to reproduce the issue.
|
24 |
+
|
25 |
+
Meta has a [bounty program](https://www.facebook.com/whitehat/) for the safe
|
26 |
+
disclosure of security bugs. In those cases, please go through the process
|
27 |
+
outlined on that page and do not file a public issue.
|
28 |
+
|
29 |
+
## License
|
30 |
+
By contributing to DINOv2, you agree that your contributions will be licensed
|
31 |
+
under the LICENSE file in the root directory of this source tree.
|
torchhub/facebookresearch_dinov2_main/LICENSE
ADDED
@@ -0,0 +1,400 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
Attribution-NonCommercial 4.0 International
|
3 |
+
|
4 |
+
=======================================================================
|
5 |
+
|
6 |
+
Creative Commons Corporation ("Creative Commons") is not a law firm and
|
7 |
+
does not provide legal services or legal advice. Distribution of
|
8 |
+
Creative Commons public licenses does not create a lawyer-client or
|
9 |
+
other relationship. Creative Commons makes its licenses and related
|
10 |
+
information available on an "as-is" basis. Creative Commons gives no
|
11 |
+
warranties regarding its licenses, any material licensed under their
|
12 |
+
terms and conditions, or any related information. Creative Commons
|
13 |
+
disclaims all liability for damages resulting from their use to the
|
14 |
+
fullest extent possible.
|
15 |
+
|
16 |
+
Using Creative Commons Public Licenses
|
17 |
+
|
18 |
+
Creative Commons public licenses provide a standard set of terms and
|
19 |
+
conditions that creators and other rights holders may use to share
|
20 |
+
original works of authorship and other material subject to copyright
|
21 |
+
and certain other rights specified in the public license below. The
|
22 |
+
following considerations are for informational purposes only, are not
|
23 |
+
exhaustive, and do not form part of our licenses.
|
24 |
+
|
25 |
+
Considerations for licensors: Our public licenses are
|
26 |
+
intended for use by those authorized to give the public
|
27 |
+
permission to use material in ways otherwise restricted by
|
28 |
+
copyright and certain other rights. Our licenses are
|
29 |
+
irrevocable. Licensors should read and understand the terms
|
30 |
+
and conditions of the license they choose before applying it.
|
31 |
+
Licensors should also secure all rights necessary before
|
32 |
+
applying our licenses so that the public can reuse the
|
33 |
+
material as expected. Licensors should clearly mark any
|
34 |
+
material not subject to the license. This includes other CC-
|
35 |
+
licensed material, or material used under an exception or
|
36 |
+
limitation to copyright. More considerations for licensors:
|
37 |
+
wiki.creativecommons.org/Considerations_for_licensors
|
38 |
+
|
39 |
+
Considerations for the public: By using one of our public
|
40 |
+
licenses, a licensor grants the public permission to use the
|
41 |
+
licensed material under specified terms and conditions. If
|
42 |
+
the licensor's permission is not necessary for any reason--for
|
43 |
+
example, because of any applicable exception or limitation to
|
44 |
+
copyright--then that use is not regulated by the license. Our
|
45 |
+
licenses grant only permissions under copyright and certain
|
46 |
+
other rights that a licensor has authority to grant. Use of
|
47 |
+
the licensed material may still be restricted for other
|
48 |
+
reasons, including because others have copyright or other
|
49 |
+
rights in the material. A licensor may make special requests,
|
50 |
+
such as asking that all changes be marked or described.
|
51 |
+
Although not required by our licenses, you are encouraged to
|
52 |
+
respect those requests where reasonable. More_considerations
|
53 |
+
for the public:
|
54 |
+
wiki.creativecommons.org/Considerations_for_licensees
|
55 |
+
|
56 |
+
=======================================================================
|
57 |
+
|
58 |
+
Creative Commons Attribution-NonCommercial 4.0 International Public
|
59 |
+
License
|
60 |
+
|
61 |
+
By exercising the Licensed Rights (defined below), You accept and agree
|
62 |
+
to be bound by the terms and conditions of this Creative Commons
|
63 |
+
Attribution-NonCommercial 4.0 International Public License ("Public
|
64 |
+
License"). To the extent this Public License may be interpreted as a
|
65 |
+
contract, You are granted the Licensed Rights in consideration of Your
|
66 |
+
acceptance of these terms and conditions, and the Licensor grants You
|
67 |
+
such rights in consideration of benefits the Licensor receives from
|
68 |
+
making the Licensed Material available under these terms and
|
69 |
+
conditions.
|
70 |
+
|
71 |
+
Section 1 -- Definitions.
|
72 |
+
|
73 |
+
a. Adapted Material means material subject to Copyright and Similar
|
74 |
+
Rights that is derived from or based upon the Licensed Material
|
75 |
+
and in which the Licensed Material is translated, altered,
|
76 |
+
arranged, transformed, or otherwise modified in a manner requiring
|
77 |
+
permission under the Copyright and Similar Rights held by the
|
78 |
+
Licensor. For purposes of this Public License, where the Licensed
|
79 |
+
Material is a musical work, performance, or sound recording,
|
80 |
+
Adapted Material is always produced where the Licensed Material is
|
81 |
+
synched in timed relation with a moving image.
|
82 |
+
|
83 |
+
b. Adapter's License means the license You apply to Your Copyright
|
84 |
+
and Similar Rights in Your contributions to Adapted Material in
|
85 |
+
accordance with the terms and conditions of this Public License.
|
86 |
+
|
87 |
+
c. Copyright and Similar Rights means copyright and/or similar rights
|
88 |
+
closely related to copyright including, without limitation,
|
89 |
+
performance, broadcast, sound recording, and Sui Generis Database
|
90 |
+
Rights, without regard to how the rights are labeled or
|
91 |
+
categorized. For purposes of this Public License, the rights
|
92 |
+
specified in Section 2(b)(1)-(2) are not Copyright and Similar
|
93 |
+
Rights.
|
94 |
+
d. Effective Technological Measures means those measures that, in the
|
95 |
+
absence of proper authority, may not be circumvented under laws
|
96 |
+
fulfilling obligations under Article 11 of the WIPO Copyright
|
97 |
+
Treaty adopted on December 20, 1996, and/or similar international
|
98 |
+
agreements.
|
99 |
+
|
100 |
+
e. Exceptions and Limitations means fair use, fair dealing, and/or
|
101 |
+
any other exception or limitation to Copyright and Similar Rights
|
102 |
+
that applies to Your use of the Licensed Material.
|
103 |
+
|
104 |
+
f. Licensed Material means the artistic or literary work, database,
|
105 |
+
or other material to which the Licensor applied this Public
|
106 |
+
License.
|
107 |
+
|
108 |
+
g. Licensed Rights means the rights granted to You subject to the
|
109 |
+
terms and conditions of this Public License, which are limited to
|
110 |
+
all Copyright and Similar Rights that apply to Your use of the
|
111 |
+
Licensed Material and that the Licensor has authority to license.
|
112 |
+
|
113 |
+
h. Licensor means the individual(s) or entity(ies) granting rights
|
114 |
+
under this Public License.
|
115 |
+
|
116 |
+
i. NonCommercial means not primarily intended for or directed towards
|
117 |
+
commercial advantage or monetary compensation. For purposes of
|
118 |
+
this Public License, the exchange of the Licensed Material for
|
119 |
+
other material subject to Copyright and Similar Rights by digital
|
120 |
+
file-sharing or similar means is NonCommercial provided there is
|
121 |
+
no payment of monetary compensation in connection with the
|
122 |
+
exchange.
|
123 |
+
|
124 |
+
j. Share means to provide material to the public by any means or
|
125 |
+
process that requires permission under the Licensed Rights, such
|
126 |
+
as reproduction, public display, public performance, distribution,
|
127 |
+
dissemination, communication, or importation, and to make material
|
128 |
+
available to the public including in ways that members of the
|
129 |
+
public may access the material from a place and at a time
|
130 |
+
individually chosen by them.
|
131 |
+
|
132 |
+
k. Sui Generis Database Rights means rights other than copyright
|
133 |
+
resulting from Directive 96/9/EC of the European Parliament and of
|
134 |
+
the Council of 11 March 1996 on the legal protection of databases,
|
135 |
+
as amended and/or succeeded, as well as other essentially
|
136 |
+
equivalent rights anywhere in the world.
|
137 |
+
|
138 |
+
l. You means the individual or entity exercising the Licensed Rights
|
139 |
+
under this Public License. Your has a corresponding meaning.
|
140 |
+
|
141 |
+
Section 2 -- Scope.
|
142 |
+
|
143 |
+
a. License grant.
|
144 |
+
|
145 |
+
1. Subject to the terms and conditions of this Public License,
|
146 |
+
the Licensor hereby grants You a worldwide, royalty-free,
|
147 |
+
non-sublicensable, non-exclusive, irrevocable license to
|
148 |
+
exercise the Licensed Rights in the Licensed Material to:
|
149 |
+
|
150 |
+
a. reproduce and Share the Licensed Material, in whole or
|
151 |
+
in part, for NonCommercial purposes only; and
|
152 |
+
|
153 |
+
b. produce, reproduce, and Share Adapted Material for
|
154 |
+
NonCommercial purposes only.
|
155 |
+
|
156 |
+
2. Exceptions and Limitations. For the avoidance of doubt, where
|
157 |
+
Exceptions and Limitations apply to Your use, this Public
|
158 |
+
License does not apply, and You do not need to comply with
|
159 |
+
its terms and conditions.
|
160 |
+
|
161 |
+
3. Term. The term of this Public License is specified in Section
|
162 |
+
6(a).
|
163 |
+
|
164 |
+
4. Media and formats; technical modifications allowed. The
|
165 |
+
Licensor authorizes You to exercise the Licensed Rights in
|
166 |
+
all media and formats whether now known or hereafter created,
|
167 |
+
and to make technical modifications necessary to do so. The
|
168 |
+
Licensor waives and/or agrees not to assert any right or
|
169 |
+
authority to forbid You from making technical modifications
|
170 |
+
necessary to exercise the Licensed Rights, including
|
171 |
+
technical modifications necessary to circumvent Effective
|
172 |
+
Technological Measures. For purposes of this Public License,
|
173 |
+
simply making modifications authorized by this Section 2(a)
|
174 |
+
(4) never produces Adapted Material.
|
175 |
+
|
176 |
+
5. Downstream recipients.
|
177 |
+
|
178 |
+
a. Offer from the Licensor -- Licensed Material. Every
|
179 |
+
recipient of the Licensed Material automatically
|
180 |
+
receives an offer from the Licensor to exercise the
|
181 |
+
Licensed Rights under the terms and conditions of this
|
182 |
+
Public License.
|
183 |
+
|
184 |
+
b. No downstream restrictions. You may not offer or impose
|
185 |
+
any additional or different terms or conditions on, or
|
186 |
+
apply any Effective Technological Measures to, the
|
187 |
+
Licensed Material if doing so restricts exercise of the
|
188 |
+
Licensed Rights by any recipient of the Licensed
|
189 |
+
Material.
|
190 |
+
|
191 |
+
6. No endorsement. Nothing in this Public License constitutes or
|
192 |
+
may be construed as permission to assert or imply that You
|
193 |
+
are, or that Your use of the Licensed Material is, connected
|
194 |
+
with, or sponsored, endorsed, or granted official status by,
|
195 |
+
the Licensor or others designated to receive attribution as
|
196 |
+
provided in Section 3(a)(1)(A)(i).
|
197 |
+
|
198 |
+
b. Other rights.
|
199 |
+
|
200 |
+
1. Moral rights, such as the right of integrity, are not
|
201 |
+
licensed under this Public License, nor are publicity,
|
202 |
+
privacy, and/or other similar personality rights; however, to
|
203 |
+
the extent possible, the Licensor waives and/or agrees not to
|
204 |
+
assert any such rights held by the Licensor to the limited
|
205 |
+
extent necessary to allow You to exercise the Licensed
|
206 |
+
Rights, but not otherwise.
|
207 |
+
|
208 |
+
2. Patent and trademark rights are not licensed under this
|
209 |
+
Public License.
|
210 |
+
|
211 |
+
3. To the extent possible, the Licensor waives any right to
|
212 |
+
collect royalties from You for the exercise of the Licensed
|
213 |
+
Rights, whether directly or through a collecting society
|
214 |
+
under any voluntary or waivable statutory or compulsory
|
215 |
+
licensing scheme. In all other cases the Licensor expressly
|
216 |
+
reserves any right to collect such royalties, including when
|
217 |
+
the Licensed Material is used other than for NonCommercial
|
218 |
+
purposes.
|
219 |
+
|
220 |
+
Section 3 -- License Conditions.
|
221 |
+
|
222 |
+
Your exercise of the Licensed Rights is expressly made subject to the
|
223 |
+
following conditions.
|
224 |
+
|
225 |
+
a. Attribution.
|
226 |
+
|
227 |
+
1. If You Share the Licensed Material (including in modified
|
228 |
+
form), You must:
|
229 |
+
|
230 |
+
a. retain the following if it is supplied by the Licensor
|
231 |
+
with the Licensed Material:
|
232 |
+
|
233 |
+
i. identification of the creator(s) of the Licensed
|
234 |
+
Material and any others designated to receive
|
235 |
+
attribution, in any reasonable manner requested by
|
236 |
+
the Licensor (including by pseudonym if
|
237 |
+
designated);
|
238 |
+
|
239 |
+
ii. a copyright notice;
|
240 |
+
|
241 |
+
iii. a notice that refers to this Public License;
|
242 |
+
|
243 |
+
iv. a notice that refers to the disclaimer of
|
244 |
+
warranties;
|
245 |
+
|
246 |
+
v. a URI or hyperlink to the Licensed Material to the
|
247 |
+
extent reasonably practicable;
|
248 |
+
|
249 |
+
b. indicate if You modified the Licensed Material and
|
250 |
+
retain an indication of any previous modifications; and
|
251 |
+
|
252 |
+
c. indicate the Licensed Material is licensed under this
|
253 |
+
Public License, and include the text of, or the URI or
|
254 |
+
hyperlink to, this Public License.
|
255 |
+
|
256 |
+
2. You may satisfy the conditions in Section 3(a)(1) in any
|
257 |
+
reasonable manner based on the medium, means, and context in
|
258 |
+
which You Share the Licensed Material. For example, it may be
|
259 |
+
reasonable to satisfy the conditions by providing a URI or
|
260 |
+
hyperlink to a resource that includes the required
|
261 |
+
information.
|
262 |
+
|
263 |
+
3. If requested by the Licensor, You must remove any of the
|
264 |
+
information required by Section 3(a)(1)(A) to the extent
|
265 |
+
reasonably practicable.
|
266 |
+
|
267 |
+
4. If You Share Adapted Material You produce, the Adapter's
|
268 |
+
License You apply must not prevent recipients of the Adapted
|
269 |
+
Material from complying with this Public License.
|
270 |
+
|
271 |
+
Section 4 -- Sui Generis Database Rights.
|
272 |
+
|
273 |
+
Where the Licensed Rights include Sui Generis Database Rights that
|
274 |
+
apply to Your use of the Licensed Material:
|
275 |
+
|
276 |
+
a. for the avoidance of doubt, Section 2(a)(1) grants You the right
|
277 |
+
to extract, reuse, reproduce, and Share all or a substantial
|
278 |
+
portion of the contents of the database for NonCommercial purposes
|
279 |
+
only;
|
280 |
+
|
281 |
+
b. if You include all or a substantial portion of the database
|
282 |
+
contents in a database in which You have Sui Generis Database
|
283 |
+
Rights, then the database in which You have Sui Generis Database
|
284 |
+
Rights (but not its individual contents) is Adapted Material; and
|
285 |
+
|
286 |
+
c. You must comply with the conditions in Section 3(a) if You Share
|
287 |
+
all or a substantial portion of the contents of the database.
|
288 |
+
|
289 |
+
For the avoidance of doubt, this Section 4 supplements and does not
|
290 |
+
replace Your obligations under this Public License where the Licensed
|
291 |
+
Rights include other Copyright and Similar Rights.
|
292 |
+
|
293 |
+
Section 5 -- Disclaimer of Warranties and Limitation of Liability.
|
294 |
+
|
295 |
+
a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE
|
296 |
+
EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS
|
297 |
+
AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF
|
298 |
+
ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS,
|
299 |
+
IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION,
|
300 |
+
WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR
|
301 |
+
PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS,
|
302 |
+
ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT
|
303 |
+
KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT
|
304 |
+
ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU.
|
305 |
+
|
306 |
+
b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE
|
307 |
+
TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION,
|
308 |
+
NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT,
|
309 |
+
INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES,
|
310 |
+
COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR
|
311 |
+
USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN
|
312 |
+
ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR
|
313 |
+
DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR
|
314 |
+
IN PART, THIS LIMITATION MAY NOT APPLY TO YOU.
|
315 |
+
|
316 |
+
c. The disclaimer of warranties and limitation of liability provided
|
317 |
+
above shall be interpreted in a manner that, to the extent
|
318 |
+
possible, most closely approximates an absolute disclaimer and
|
319 |
+
waiver of all liability.
|
320 |
+
|
321 |
+
Section 6 -- Term and Termination.
|
322 |
+
|
323 |
+
a. This Public License applies for the term of the Copyright and
|
324 |
+
Similar Rights licensed here. However, if You fail to comply with
|
325 |
+
this Public License, then Your rights under this Public License
|
326 |
+
terminate automatically.
|
327 |
+
|
328 |
+
b. Where Your right to use the Licensed Material has terminated under
|
329 |
+
Section 6(a), it reinstates:
|
330 |
+
|
331 |
+
1. automatically as of the date the violation is cured, provided
|
332 |
+
it is cured within 30 days of Your discovery of the
|
333 |
+
violation; or
|
334 |
+
|
335 |
+
2. upon express reinstatement by the Licensor.
|
336 |
+
|
337 |
+
For the avoidance of doubt, this Section 6(b) does not affect any
|
338 |
+
right the Licensor may have to seek remedies for Your violations
|
339 |
+
of this Public License.
|
340 |
+
|
341 |
+
c. For the avoidance of doubt, the Licensor may also offer the
|
342 |
+
Licensed Material under separate terms or conditions or stop
|
343 |
+
distributing the Licensed Material at any time; however, doing so
|
344 |
+
will not terminate this Public License.
|
345 |
+
|
346 |
+
d. Sections 1, 5, 6, 7, and 8 survive termination of this Public
|
347 |
+
License.
|
348 |
+
|
349 |
+
Section 7 -- Other Terms and Conditions.
|
350 |
+
|
351 |
+
a. The Licensor shall not be bound by any additional or different
|
352 |
+
terms or conditions communicated by You unless expressly agreed.
|
353 |
+
|
354 |
+
b. Any arrangements, understandings, or agreements regarding the
|
355 |
+
Licensed Material not stated herein are separate from and
|
356 |
+
independent of the terms and conditions of this Public License.
|
357 |
+
|
358 |
+
Section 8 -- Interpretation.
|
359 |
+
|
360 |
+
a. For the avoidance of doubt, this Public License does not, and
|
361 |
+
shall not be interpreted to, reduce, limit, restrict, or impose
|
362 |
+
conditions on any use of the Licensed Material that could lawfully
|
363 |
+
be made without permission under this Public License.
|
364 |
+
|
365 |
+
b. To the extent possible, if any provision of this Public License is
|
366 |
+
deemed unenforceable, it shall be automatically reformed to the
|
367 |
+
minimum extent necessary to make it enforceable. If the provision
|
368 |
+
cannot be reformed, it shall be severed from this Public License
|
369 |
+
without affecting the enforceability of the remaining terms and
|
370 |
+
conditions.
|
371 |
+
|
372 |
+
c. No term or condition of this Public License will be waived and no
|
373 |
+
failure to comply consented to unless expressly agreed to by the
|
374 |
+
Licensor.
|
375 |
+
|
376 |
+
d. Nothing in this Public License constitutes or may be interpreted
|
377 |
+
as a limitation upon, or waiver of, any privileges and immunities
|
378 |
+
that apply to the Licensor or You, including from the legal
|
379 |
+
processes of any jurisdiction or authority.
|
380 |
+
|
381 |
+
=======================================================================
|
382 |
+
|
383 |
+
Creative Commons is not a party to its public
|
384 |
+
licenses. Notwithstanding, Creative Commons may elect to apply one of
|
385 |
+
its public licenses to material it publishes and in those instances
|
386 |
+
will be considered the “Licensor.” The text of the Creative Commons
|
387 |
+
public licenses is dedicated to the public domain under the CC0 Public
|
388 |
+
Domain Dedication. Except for the limited purpose of indicating that
|
389 |
+
material is shared under a Creative Commons public license or as
|
390 |
+
otherwise permitted by the Creative Commons policies published at
|
391 |
+
creativecommons.org/policies, Creative Commons does not authorize the
|
392 |
+
use of the trademark "Creative Commons" or any other trademark or logo
|
393 |
+
of Creative Commons without its prior written consent including,
|
394 |
+
without limitation, in connection with any unauthorized modifications
|
395 |
+
to any of its public licenses or any other arrangements,
|
396 |
+
understandings, or agreements concerning use of licensed material. For
|
397 |
+
the avoidance of doubt, this paragraph does not form part of the
|
398 |
+
public licenses.
|
399 |
+
|
400 |
+
Creative Commons may be contacted at creativecommons.org.
|
torchhub/facebookresearch_dinov2_main/MODEL_CARD.md
ADDED
@@ -0,0 +1,201 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Model Card for DINOv2-S/B/L/g
|
2 |
+
|
3 |
+
These are Vision Transformer models trained following the method described in the paper:
|
4 |
+
"DINOv2: Learning Robust Visual Features without Supervision"
|
5 |
+
|
6 |
+
We provide 4 models: 1 ViT-g trained from scratch, and 3 ViT-S/B/L models distilled from the ViT-g.
|
7 |
+
|
8 |
+
## Model Details
|
9 |
+
The model takes an image as input and returns a class token and patch tokens.
|
10 |
+
|
11 |
+
The embedding dimension is:
|
12 |
+
- 384 for ViT-S.
|
13 |
+
- 768 for ViT-B.
|
14 |
+
- 1024 for ViT-L.
|
15 |
+
- 1536 for ViT-g.
|
16 |
+
|
17 |
+
The models follow a Transformer architecture, with a patch size of 14.
|
18 |
+
|
19 |
+
For a 224x224 image, this results in 1 class token + 256 patch tokens.
|
20 |
+
|
21 |
+
The models can accept larger images provided the image shapes are multiples of the patch size (14).
|
22 |
+
If this condition is not verified, the model will crop to the closest smaller multiple of the patch size.
|
23 |
+
|
24 |
+
### Model Description
|
25 |
+
|
26 |
+
- **Developed by:** Meta AI
|
27 |
+
- **Model type:** Vision Transformer
|
28 |
+
- **License:** CC-BY-NC
|
29 |
+
|
30 |
+
- **Repository:** https://github.com/facebookresearch/dinov2
|
31 |
+
- **Paper:** https://arxiv.org/abs/2304.07193
|
32 |
+
- **Demo:** https://dinov2.metademolab.com/
|
33 |
+
|
34 |
+
## Uses
|
35 |
+
|
36 |
+
The models are vision backbones providing multi-purpose features for downstream tasks.
|
37 |
+
|
38 |
+
### Direct Use
|
39 |
+
|
40 |
+
The models can be used without fine-tuning, with downstream classifiers as simple as linear layers, to obtain competitive results:
|
41 |
+
- on depth estimation, semantic segmentation, using linear layers.
|
42 |
+
- on image classification, using k-NN classifiers on the class token.
|
43 |
+
- on image classification, with logistic regression classifiers applied on the class token.
|
44 |
+
- on image classification, with a linear layer applied on the class token and the average of the patch tokens.
|
45 |
+
- on image retrieval using nearest neighbors.
|
46 |
+
|
47 |
+
### Downstream Use
|
48 |
+
|
49 |
+
It is technically possible to perform fine-tuning on the models, for small gains (we measured +2% on ImageNet-1k classification).
|
50 |
+
We recommend keeping this as a very last step and only when necessary, as the features already provide good performance out-of-the-box.
|
51 |
+
|
52 |
+
## Bias, Risks, and Limitations
|
53 |
+
|
54 |
+
Despite improvements thanks to the training method not using annotations, we still observe significant biases in our models toward rich households from Western countries.
|
55 |
+
|
56 |
+
### Recommendations
|
57 |
+
|
58 |
+
We expect fine-tuning will increase the biases in the features produced by the model as they will be tuned to the fine-tuning labels.
|
59 |
+
|
60 |
+
## How to Get Started with the Model
|
61 |
+
|
62 |
+
Use the code below to get started with the model.
|
63 |
+
|
64 |
+
```python
|
65 |
+
import torch
|
66 |
+
dinov2_vits14 = torch.hub.load('facebookresearch/dinov2', 'dinov2_vits14')
|
67 |
+
dinov2_vitb14 = torch.hub.load('facebookresearch/dinov2', 'dinov2_vitb14')
|
68 |
+
dinov2_vitl14 = torch.hub.load('facebookresearch/dinov2', 'dinov2_vitl14')
|
69 |
+
dinov2_vitg14 = torch.hub.load('facebookresearch/dinov2', 'dinov2_vitg14')
|
70 |
+
```
|
71 |
+
|
72 |
+
## Training Details
|
73 |
+
|
74 |
+
### Training Data
|
75 |
+
|
76 |
+
- **Training data:** LVD-142M (see paper)
|
77 |
+
- **Training regime:** fp16 using PyTorch-FSDP mixed-precision.
|
78 |
+
|
79 |
+
### Training Procedure
|
80 |
+
|
81 |
+
- **Training objective:**
|
82 |
+
- DINO self-distillation loss with multi-crop
|
83 |
+
- iBOT masked-image modeling loss
|
84 |
+
- KoLeo regularization on [CLS] tokens
|
85 |
+
- **Architectures:**
|
86 |
+
- ViT-S (21M params): Patch size 14, embedding dimension 384, 6 heads, MLP FFN
|
87 |
+
- ViT-B (86M params): Patch size 14, embedding dimension 768, 12 heads, MLP FFN
|
88 |
+
- ViT-L (0.3B params): Patch size 14, embedding dimension 1024, 16 heads, MLP FFN
|
89 |
+
- ViT-g (1.1B params): Patch size 14, embedding dimension 1536, 24 heads, SwiGLU FFN
|
90 |
+
- **Distillation:**
|
91 |
+
- Distillation follows the standard DINOv2 pretraining procedure, except the teacher is a pretrained ViT-g, frozen.
|
92 |
+
|
93 |
+
## Evaluation
|
94 |
+
|
95 |
+
We refer users to the associated paper for the evaluation protocols.
|
96 |
+
|
97 |
+
<table>
|
98 |
+
<tr>
|
99 |
+
<th>model</th>
|
100 |
+
<th colspan="3">ImageNet-1k</th>
|
101 |
+
<th>NYU-Depth v2</th>
|
102 |
+
<th>SUN-RGBD</th>
|
103 |
+
<th>ADE20k</th>
|
104 |
+
<th>iNaturalist 2018</th>
|
105 |
+
<th>Oxford-H</th>
|
106 |
+
</tr>
|
107 |
+
<tr>
|
108 |
+
<th rowspan="2">task</th>
|
109 |
+
<th>classif. (acc)</th>
|
110 |
+
<th>classif. (acc)</th>
|
111 |
+
<th>classif. V2 (acc)</th>
|
112 |
+
<th>depth (RMSE)</th>
|
113 |
+
<th>depth (RMSE)</th>
|
114 |
+
<th>segm. (mAP)</th>
|
115 |
+
<th>classif. (acc)</th>
|
116 |
+
<th>retrieval (mAP)</th>
|
117 |
+
</tr>
|
118 |
+
<tr>
|
119 |
+
<!-- <th>^</th> -->
|
120 |
+
<th>k-NN</th>
|
121 |
+
<th>linear</th>
|
122 |
+
<th>linear</th>
|
123 |
+
<th>linear<br />4 layers</th>
|
124 |
+
<th>NYU-D transfer</th>
|
125 |
+
<th>multiscale</th>
|
126 |
+
<th>linear</th>
|
127 |
+
<th>nearest neighbor</th>
|
128 |
+
</tr>
|
129 |
+
<tr>
|
130 |
+
<td>ViT-S/14</td>
|
131 |
+
<td align="right">79.0%</td>
|
132 |
+
<td align="right">81.1%</td>
|
133 |
+
<td align="right">70.8%</td>
|
134 |
+
<td align="right">0.417</td>
|
135 |
+
<td align="right">0.431</td>
|
136 |
+
<td align="right">47.2</td>
|
137 |
+
<td align="right">69.5%</td>
|
138 |
+
<td align="right">43.2</td>
|
139 |
+
</tr>
|
140 |
+
<tr>
|
141 |
+
<td>ViT-B/14</td>
|
142 |
+
<td align="right">82.1%</td>
|
143 |
+
<td align="right">84.5%</td>
|
144 |
+
<td align="right">74.9%</td>
|
145 |
+
<td align="right">0.362</td>
|
146 |
+
<td align="right">0.400</td>
|
147 |
+
<td align="right">51.3</td>
|
148 |
+
<td align="right">76.3%</td>
|
149 |
+
<td align="right">49.5</td>
|
150 |
+
</tr>
|
151 |
+
<tr>
|
152 |
+
<td>ViT-L/14</td>
|
153 |
+
<td align="right">83.5%</td>
|
154 |
+
<td align="right">86.3%</td>
|
155 |
+
<td align="right">77.6%</td>
|
156 |
+
<td align="right">0.333</td>
|
157 |
+
<td align="right">0.396</td>
|
158 |
+
<td align="right">53.1</td>
|
159 |
+
<td align="right">79.8%</td>
|
160 |
+
<td align="right">54.0</td>
|
161 |
+
</tr>
|
162 |
+
<tr>
|
163 |
+
<td>ViT-g/14</td>
|
164 |
+
<td align="right">83.5%</td>
|
165 |
+
<td align="right">86.5%</td>
|
166 |
+
<td align="right">78.4%</td>
|
167 |
+
<td align="right">0.298</td>
|
168 |
+
<td align="right">0.362</td>
|
169 |
+
<td align="right">53.0</td>
|
170 |
+
<td align="right">81.6%</td>
|
171 |
+
<td align="right">52.3</td>
|
172 |
+
</tr>
|
173 |
+
</table>
|
174 |
+
|
175 |
+
## Environmental Impact
|
176 |
+
|
177 |
+
- **Hardware Type:** Nvidia A100
|
178 |
+
- **Hours used:** 22,000 for ViT-g, 4,500 for ViT-S distillation, 5,300 for ViT-B distillation, 8,000 for ViT-L distillation
|
179 |
+
- **Cloud Provider:** Private infra
|
180 |
+
- **Compute Region:** USA
|
181 |
+
- **Carbon Emitted:** 7t CO2eq
|
182 |
+
|
183 |
+
#### Hardware
|
184 |
+
|
185 |
+
Nvidia A100 GPUs
|
186 |
+
|
187 |
+
#### Software
|
188 |
+
|
189 |
+
PyTorch 2.0,
|
190 |
+
xFormers 0.0.18
|
191 |
+
|
192 |
+
**BibTeX**
|
193 |
+
|
194 |
+
```
|
195 |
+
@misc{oquab2023dinov2,
|
196 |
+
title={DINOv2: Learning Robust Visual Features without Supervision},
|
197 |
+
author={Oquab, Maxime and Darcet, Timothée and Moutakanni, Theo and Vo, Huy and Szafraniec, Marc and Khalidov, Vasil and Fernandez, Pierre and Haziza, Daniel and Massa, Francisco and El-Nouby, Alaaeldin and Howes, Russell and Huang, Po-Yao and Xu, Hu and Sharma, Vasu and Li, Shang-Wen and Galuba, Wojciech and Rabbat, Mike and Assran, Mido and Ballas, Nicolas and Synnaeve, Gabriel and Misra, Ishan and Jegou, Herve and Mairal, Julien and Labatut, Patrick and Joulin, Armand and Bojanowski, Piotr},
|
198 |
+
journal={arXiv:2304.07193},
|
199 |
+
year={2023}
|
200 |
+
}
|
201 |
+
```
|
torchhub/facebookresearch_dinov2_main/README.md
ADDED
@@ -0,0 +1,277 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# DINOv2: Learning Robust Visual Features without Supervision
|
2 |
+
|
3 |
+
**[Meta AI Research, FAIR](https://ai.facebook.com/research/)**
|
4 |
+
|
5 |
+
Maxime Oquab,
|
6 |
+
Timothée Darcet,
|
7 |
+
Théo Moutakanni,
|
8 |
+
Huy V. Vo,
|
9 |
+
Marc Szafraniec,
|
10 |
+
Vasil Khalidov,
|
11 |
+
Patrick Labatut,
|
12 |
+
Armand Joulin,
|
13 |
+
Piotr Bojanowski
|
14 |
+
|
15 |
+
[[`Paper`](https://arxiv.org/abs/2304.07193)] [[`Blog`](https://ai.facebook.com/blog/dino-v2-computer-vision-self-supervised-learning/)] [[`Demo`](https://dinov2.metademolab.com)] [[`BibTeX`](#citing-dinov2)]
|
16 |
+
|
17 |
+
PyTorch implementation and pretrained models for DINOv2. For details, see the paper: **[DINOv2: Learning Robust Visual Features without Supervision](https://arxiv.org/abs/2304.07193)**.
|
18 |
+
|
19 |
+
DINOv2 models produce high-performance visual features that can be directly employed with classifiers as simple as linear layers on a variety of computer vision tasks; these visual features are robust and perform well across domains without any requirement for fine-tuning. The models were pretrained on a dataset of 142 M images without using any labels or annotations.
|
20 |
+
|
21 |
+
https://github.com/facebookresearch/dinov2/assets/60359573/f168823e-7922-415a-b429-578badf5c356
|
22 |
+
|
23 |
+
<div align="center">
|
24 |
+
Visualization of the three first principal components of the patch features of all frames, mapped to RGB values.
|
25 |
+
</div>
|
26 |
+
|
27 |
+
## Pretrained models
|
28 |
+
|
29 |
+
<table style="margin: auto">
|
30 |
+
<tr>
|
31 |
+
<th>model</th>
|
32 |
+
<th># of<br />params</th>
|
33 |
+
<th>ImageNet<br />k-NN</th>
|
34 |
+
<th>ImageNet<br />linear</th>
|
35 |
+
<th>download</th>
|
36 |
+
</tr>
|
37 |
+
<tr>
|
38 |
+
<td>ViT-S/14 distilled</td>
|
39 |
+
<td align="right">21 M</td>
|
40 |
+
<td align="right">79.0%</td>
|
41 |
+
<td align="right">81.1%</td>
|
42 |
+
<td><a href="https://dl.fbaipublicfiles.com/dinov2/dinov2_vits14/dinov2_vits14_pretrain.pth">backbone only</a></td>
|
43 |
+
</tr>
|
44 |
+
<tr>
|
45 |
+
<td>ViT-B/14 distilled</td>
|
46 |
+
<td align="right">86 M</td>
|
47 |
+
<td align="right">82.1%</td>
|
48 |
+
<td align="right">84.5%</td>
|
49 |
+
<td><a href="https://dl.fbaipublicfiles.com/dinov2/dinov2_vitb14/dinov2_vitb14_pretrain.pth">backbone only</a></td>
|
50 |
+
</tr>
|
51 |
+
<tr>
|
52 |
+
<td>ViT-L/14 distilled</td>
|
53 |
+
<td align="right">300 M</td>
|
54 |
+
<td align="right">83.5%</td>
|
55 |
+
<td align="right">86.3%</td>
|
56 |
+
<td><a href="https://dl.fbaipublicfiles.com/dinov2/dinov2_vitl14/dinov2_vitl14_pretrain.pth">backbone only</a></td>
|
57 |
+
</tr>
|
58 |
+
<tr>
|
59 |
+
<td>ViT-g/14</td>
|
60 |
+
<td align="right">1,100 M</td>
|
61 |
+
<td align="right">83.5%</td>
|
62 |
+
<td align="right">86.5%</td>
|
63 |
+
<td><a href="https://dl.fbaipublicfiles.com/dinov2/dinov2_vitg14/dinov2_vitg14_pretrain.pth">backbone only</a></td>
|
64 |
+
</tr>
|
65 |
+
</table>
|
66 |
+
|
67 |
+
### Pretrained models via PyTorch Hub
|
68 |
+
|
69 |
+
Please follow the instructions [here](https://pytorch.org/get-started/locally/) to install PyTorch (the only required dependency for loading the model). Installing PyTorch with CUDA support is strongly recommended.
|
70 |
+
|
71 |
+
A corresponding [model card](MODEL_CARD.md) is included in the repository.
|
72 |
+
|
73 |
+
```python
|
74 |
+
import torch
|
75 |
+
|
76 |
+
dinov2_vits14 = torch.hub.load('facebookresearch/dinov2', 'dinov2_vits14')
|
77 |
+
dinov2_vitb14 = torch.hub.load('facebookresearch/dinov2', 'dinov2_vitb14')
|
78 |
+
dinov2_vitl14 = torch.hub.load('facebookresearch/dinov2', 'dinov2_vitl14')
|
79 |
+
dinov2_vitg14 = torch.hub.load('facebookresearch/dinov2', 'dinov2_vitg14')
|
80 |
+
```
|
81 |
+
|
82 |
+
## Installation
|
83 |
+
|
84 |
+
The training and evaluation code requires PyTorch 2.0 and [xFormers](https://github.com/facebookresearch/xformers) 0.0.18 as well as a number of other 3rd party packages. Note that the code has only been tested with the specified versions and also expects a Linux environment. To setup all the required dependencies for training and evaluation, please follow the instructions below:
|
85 |
+
|
86 |
+
*[conda](https://docs.conda.io/projects/conda/en/latest/user-guide/getting-started.html)* **(Recommended)** - Clone the repository and then create and activate a `dinov2` conda environment using the provided environment definition:
|
87 |
+
|
88 |
+
```shell
|
89 |
+
conda env create -f conda.yaml
|
90 |
+
conda activate dinov2
|
91 |
+
```
|
92 |
+
|
93 |
+
*[pip](https://pip.pypa.io/en/stable/getting-started/)* - Clone the repository and then use the provided `requirements.txt` to install the dependencies:
|
94 |
+
|
95 |
+
```shell
|
96 |
+
pip install -r requirements.txt
|
97 |
+
```
|
98 |
+
|
99 |
+
## Data preparation
|
100 |
+
|
101 |
+
### ImageNet-1k
|
102 |
+
|
103 |
+
The root directory of the dataset should hold the following contents:
|
104 |
+
|
105 |
+
- `<ROOT>/test/ILSVRC2012_test_00000001.JPEG`
|
106 |
+
- `<ROOT>/test/[..]`
|
107 |
+
- `<ROOT>/test/ILSVRC2012_test_00100000.JPEG`
|
108 |
+
- `<ROOT>/train/n01440764/n01440764_10026.JPEG`
|
109 |
+
- `<ROOT>/train/[...]`
|
110 |
+
- `<ROOT>/train/n15075141/n15075141_9993.JPEG`
|
111 |
+
- `<ROOT>/val/n01440764/ILSVRC2012_val_00000293.JPEG`
|
112 |
+
- `<ROOT>/val/[...]`
|
113 |
+
- `<ROOT>/val/n15075141/ILSVRC2012_val_00049174.JPEG`
|
114 |
+
- `<ROOT>/labels.txt`
|
115 |
+
|
116 |
+
The provided dataset implementation expects a few additional metadata files to be present under the extra directory:
|
117 |
+
|
118 |
+
- `<EXTRA>/class-ids-TRAIN.npy`
|
119 |
+
- `<EXTRA>/class-ids-VAL.npy`
|
120 |
+
- `<EXTRA>/class-names-TRAIN.npy`
|
121 |
+
- `<EXTRA>/class-names-VAL.npy`
|
122 |
+
- `<EXTRA>/entries-TEST.npy`
|
123 |
+
- `<EXTRA>/entries-TRAIN.npy`
|
124 |
+
- `<EXTRA>/entries-VAL.npy`
|
125 |
+
|
126 |
+
These metadata files can be generated (once) with the following lines of Python code:
|
127 |
+
|
128 |
+
```python
|
129 |
+
from dinov2.data.datasets import ImageNet
|
130 |
+
|
131 |
+
for split in ImageNet.Split:
|
132 |
+
dataset = ImageNet(split=split, root="<ROOT>", extra="<EXTRA>")
|
133 |
+
dataset.dump_extra()
|
134 |
+
```
|
135 |
+
|
136 |
+
Note that the root and extra directories do not have to be distinct directories.
|
137 |
+
|
138 |
+
### ImageNet-22k
|
139 |
+
|
140 |
+
Please adapt the [dataset class](dinov2/data/datasets/image_net_22k.py) to match your local setup.
|
141 |
+
|
142 |
+
<br />
|
143 |
+
|
144 |
+
:warning: To execute the commands provided in the next sections for training and evaluation, the `dinov2` package should be included in the Python module search path, i.e. simply prefix the command to run with `PYTHONPATH=.`.
|
145 |
+
|
146 |
+
## Training
|
147 |
+
|
148 |
+
### Fast setup: training DINOv2 ViT-L/16 on ImageNet-1k
|
149 |
+
|
150 |
+
Run DINOv2 training on 4 A100-80GB nodes (32 GPUs) in a SLURM cluster environment with submitit:
|
151 |
+
|
152 |
+
```shell
|
153 |
+
python dinov2/run/train/train.py \
|
154 |
+
--nodes 4 \
|
155 |
+
--config-file dinov2/configs/train/vitl16_short.yaml \
|
156 |
+
--output-dir <PATH/TO/OUTPUT/DIR> \
|
157 |
+
train.dataset_path=ImageNet:split=TRAIN:root=<PATH/TO/DATASET>:extra=<PATH/TO/DATASET>
|
158 |
+
```
|
159 |
+
|
160 |
+
Training time is approximately 1 day and the resulting checkpoint should reach 81.6% on k-NN eval and 82.9% on linear eval.
|
161 |
+
|
162 |
+
The training code saves the weights of the teacher in the `eval` folder every 12500 iterations for evaluation.
|
163 |
+
|
164 |
+
### Long setup: training DINOv2 ViT-L/14 on ImageNet-22k
|
165 |
+
|
166 |
+
Run DINOv2 training on 12 A100-80GB nodes (96 GPUs) in a SLURM cluster environment with submitit:
|
167 |
+
|
168 |
+
```shell
|
169 |
+
python dinov2/run/train/train.py \
|
170 |
+
--nodes 12 \
|
171 |
+
--config-file dinov2/configs/train/vitl14.yaml \
|
172 |
+
--output-dir <PATH/TO/OUTPUT/DIR> \
|
173 |
+
train.dataset_path=ImageNet22k:root=<PATH/TO/DATASET>:extra=<PATH/TO/DATASET>
|
174 |
+
```
|
175 |
+
|
176 |
+
Training time is approximately 3.3 days and the resulting checkpoint should reach 82.0% on k-NN eval and 84.5% on linear eval.
|
177 |
+
|
178 |
+
The training code saves the weights of the teacher in the `eval` folder every 12500 iterations for evaluation.
|
179 |
+
|
180 |
+
|
181 |
+
## Evaluation
|
182 |
+
|
183 |
+
The training code regularly saves the teacher weights. In order to evaluate the model, run the following evaluation on a single node:
|
184 |
+
|
185 |
+
### k-NN classification on ImageNet-1k
|
186 |
+
|
187 |
+
```shell
|
188 |
+
python dinov2/run/eval/knn.py \
|
189 |
+
--config-file <PATH/TO/OUTPUT/DIR>/config.yaml \
|
190 |
+
--pretrained-weights <PATH/TO/OUTPUT/DIR>/eval/training_24999/teacher_checkpoint.pth \
|
191 |
+
--output-dir <PATH/TO/OUTPUT/DIR>/eval/training_24999/knn \
|
192 |
+
--train-dataset ImageNet:split=TRAIN:root=<PATH/TO/DATASET>:extra=<PATH/TO/DATASET> \
|
193 |
+
--val-dataset ImageNet:split=VAL:root=<PATH/TO/DATASET>:extra=<PATH/TO/DATASET>
|
194 |
+
```
|
195 |
+
|
196 |
+
### Logistic regression classification on ImageNet-1k
|
197 |
+
|
198 |
+
```shell
|
199 |
+
python dinov2/run/eval/log_regression.py \
|
200 |
+
--config-file <PATH/TO/OUTPUT/DIR>/config.yaml \
|
201 |
+
--pretrained-weights <PATH/TO/OUTPUT/DIR>/eval/training_24999/teacher_checkpoint.pth \
|
202 |
+
--output-dir <PATH/TO/OUTPUT/DIR>/eval/training_24999/logreg \
|
203 |
+
--train-dataset ImageNet:split=TRAIN:root=<PATH/TO/DATASET>:extra=<PATH/TO/DATASET> \
|
204 |
+
--val-dataset ImageNet:split=VAL:root=<PATH/TO/DATASET>:extra=<PATH/TO/DATASET>
|
205 |
+
```
|
206 |
+
|
207 |
+
### Linear classification with data augmentation on ImageNet-1k
|
208 |
+
|
209 |
+
```shell
|
210 |
+
python dinov2/run/eval/linear.py \
|
211 |
+
--config-file <PATH/TO/OUTPUT/DIR>/config.yaml \
|
212 |
+
--pretrained-weights <PATH/TO/OUTPUT/DIR>/eval/training_24999/teacher_checkpoint.pth \
|
213 |
+
--output-dir <PATH/TO/OUTPUT/DIR>/eval/training_24999/linear \
|
214 |
+
--train-dataset ImageNet:split=TRAIN:root=<PATH/TO/DATASET>:extra=<PATH/TO/DATASET> \
|
215 |
+
--val-dataset ImageNet:split=VAL:root=<PATH/TO/DATASET>:extra=<PATH/TO/DATASET>
|
216 |
+
```
|
217 |
+
|
218 |
+
We release the weights from evaluating the different models:
|
219 |
+
|
220 |
+
<table style="margin: auto">
|
221 |
+
<tr>
|
222 |
+
<th>model</th>
|
223 |
+
<th>ImageNet<br />top-1</th>
|
224 |
+
<th>linear evaluation</th>
|
225 |
+
</tr>
|
226 |
+
<tr>
|
227 |
+
<td>ViT-S/14 distilled</td>
|
228 |
+
<td align="right">81.1%</td>
|
229 |
+
<td><a href="https://dl.fbaipublicfiles.com/dinov2/dinov2_vits14/dinov2_vits14_linear_head.pth">linear head weights</a></td>
|
230 |
+
</tr>
|
231 |
+
<tr>
|
232 |
+
<td>ViT-B/14 distilled</td>
|
233 |
+
<td align="right">84.5%</td>
|
234 |
+
<td><a href="https://dl.fbaipublicfiles.com/dinov2/dinov2_vitb14/dinov2_vitb14_linear_head.pth">linear head weights</a></td>
|
235 |
+
</tr>
|
236 |
+
<tr>
|
237 |
+
<td>ViT-L/14 distilled</td>
|
238 |
+
<td align="right">86.3%</td>
|
239 |
+
<td><a href="https://dl.fbaipublicfiles.com/dinov2/dinov2_vitl14/dinov2_vitl14_linear_head.pth">linear head weights</a></td>
|
240 |
+
</tr>
|
241 |
+
<tr>
|
242 |
+
<td>ViT-g/14</td>
|
243 |
+
<td align="right">86.5%</td>
|
244 |
+
<td><a href="https://dl.fbaipublicfiles.com/dinov2/dinov2_vitg14/dinov2_vitg14_linear_head.pth">linear head weights</a></td>
|
245 |
+
</tr>
|
246 |
+
</table>
|
247 |
+
|
248 |
+
The performance of the provided pretrained model weights can be evaluated as follows on ImageNet-1k:
|
249 |
+
|
250 |
+
```shell
|
251 |
+
python dinov2/run/eval/linear.py \
|
252 |
+
--config-file dinov2/configs/eval/vitg14_pretrain.yaml \
|
253 |
+
--pretrained-weights https://dl.fbaipublicfiles.com/dinov2/dinov2_vitg14/dinov2_vitg14_pretrain.pth \
|
254 |
+
--train-dataset ImageNet:split=TRAIN:root=<PATH/TO/DATASET>:extra=<PATH/TO/DATASET> \
|
255 |
+
--val-dataset ImageNet:split=VAL:root=<PATH/TO/DATASET>:extra=<PATH/TO/DATASET>
|
256 |
+
```
|
257 |
+
|
258 |
+
## License
|
259 |
+
|
260 |
+
DINOv2 code and model weights are released under the CC-BY-NC 4.0 license. See [LICENSE](LICENSE) for additional details.
|
261 |
+
|
262 |
+
## Contributing
|
263 |
+
|
264 |
+
See [contributing](CONTRIBUTING.md) and the [code of conduct](CODE_OF_CONDUCT.md).
|
265 |
+
|
266 |
+
## Citing DINOv2
|
267 |
+
|
268 |
+
If you find this repository useful, please consider giving a star :star: and citation :t-rex::
|
269 |
+
|
270 |
+
```
|
271 |
+
@misc{oquab2023dinov2,
|
272 |
+
title={DINOv2: Learning Robust Visual Features without Supervision},
|
273 |
+
author={Oquab, Maxime and Darcet, Timothée and Moutakanni, Theo and Vo, Huy V. and Szafraniec, Marc and Khalidov, Vasil and Fernandez, Pierre and Haziza, Daniel and Massa, Francisco and El-Nouby, Alaaeldin and Howes, Russell and Huang, Po-Yao and Xu, Hu and Sharma, Vasu and Li, Shang-Wen and Galuba, Wojciech and Rabbat, Mike and Assran, Mido and Ballas, Nicolas and Synnaeve, Gabriel and Misra, Ishan and Jegou, Herve and Mairal, Julien and Labatut, Patrick and Joulin, Armand and Bojanowski, Piotr},
|
274 |
+
journal={arXiv:2304.07193},
|
275 |
+
year={2023}
|
276 |
+
}
|
277 |
+
```
|
torchhub/facebookresearch_dinov2_main/conda.yaml
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: dinov2
|
2 |
+
channels:
|
3 |
+
- defaults
|
4 |
+
- pytorch
|
5 |
+
- nvidia
|
6 |
+
- xformers
|
7 |
+
- conda-forge
|
8 |
+
dependencies:
|
9 |
+
- python=3.9
|
10 |
+
- pytorch::pytorch=2.0.0
|
11 |
+
- pytorch::pytorch-cuda=11.7.0
|
12 |
+
- pytorch::torchvision=0.15.0
|
13 |
+
- omegaconf
|
14 |
+
- torchmetrics=0.10.3
|
15 |
+
- fvcore
|
16 |
+
- iopath
|
17 |
+
- xformers::xformers=0.0.18
|
18 |
+
- pip
|
19 |
+
- pip:
|
20 |
+
- git+https://github.com/facebookincubator/submitit
|
21 |
+
- --extra-index-url https://pypi.nvidia.com
|
22 |
+
- cuml-cu11
|
torchhub/facebookresearch_dinov2_main/dinov2/__init__.py
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
2 |
+
# All rights reserved.
|
3 |
+
#
|
4 |
+
# This source code is licensed under the license found in the
|
5 |
+
# LICENSE file in the root directory of this source tree.
|
6 |
+
|
7 |
+
__version__ = "0.0.1"
|
torchhub/facebookresearch_dinov2_main/dinov2/configs/__init__.py
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
2 |
+
# All rights reserved.
|
3 |
+
#
|
4 |
+
# This source code is licensed under the license found in the
|
5 |
+
# LICENSE file in the root directory of this source tree.
|
6 |
+
|
7 |
+
import pathlib
|
8 |
+
|
9 |
+
from omegaconf import OmegaConf
|
10 |
+
|
11 |
+
|
12 |
+
def load_config(config_name: str):
|
13 |
+
config_filename = config_name + ".yaml"
|
14 |
+
return OmegaConf.load(pathlib.Path(__file__).parent.resolve() / config_filename)
|
15 |
+
|
16 |
+
|
17 |
+
dinov2_default_config = load_config("ssl_default_config")
|
18 |
+
|
19 |
+
|
20 |
+
def load_and_merge_config(config_name: str):
|
21 |
+
default_config = OmegaConf.create(dinov2_default_config)
|
22 |
+
loaded_config = load_config(config_name)
|
23 |
+
return OmegaConf.merge(default_config, loaded_config)
|
torchhub/facebookresearch_dinov2_main/dinov2/configs/eval/vitb14_pretrain.yaml
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
student:
|
2 |
+
arch: vit_base
|
3 |
+
patch_size: 14
|
4 |
+
crops:
|
5 |
+
global_crops_size: 518 # this is to set up the position embeddings properly
|
6 |
+
local_crops_size: 98
|
torchhub/facebookresearch_dinov2_main/dinov2/configs/eval/vitg14_pretrain.yaml
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
student:
|
2 |
+
arch: vit_giant2
|
3 |
+
patch_size: 14
|
4 |
+
ffn_layer: swiglufused
|
5 |
+
crops:
|
6 |
+
global_crops_size: 518 # this is to set up the position embeddings properly
|
7 |
+
local_crops_size: 98
|
torchhub/facebookresearch_dinov2_main/dinov2/configs/eval/vitl14_pretrain.yaml
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
student:
|
2 |
+
arch: vit_large
|
3 |
+
patch_size: 14
|
4 |
+
crops:
|
5 |
+
global_crops_size: 518 # this is to set up the position embeddings properly
|
6 |
+
local_crops_size: 98
|
torchhub/facebookresearch_dinov2_main/dinov2/configs/eval/vits14_pretrain.yaml
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
student:
|
2 |
+
arch: vit_small
|
3 |
+
patch_size: 14
|
4 |
+
crops:
|
5 |
+
global_crops_size: 518 # this is to set up the position embeddings properly
|
6 |
+
local_crops_size: 98
|
torchhub/facebookresearch_dinov2_main/dinov2/configs/ssl_default_config.yaml
ADDED
@@ -0,0 +1,115 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
MODEL:
|
2 |
+
WEIGHTS: ''
|
3 |
+
compute_precision:
|
4 |
+
grad_scaler: true
|
5 |
+
teacher:
|
6 |
+
backbone:
|
7 |
+
sharding_strategy: SHARD_GRAD_OP
|
8 |
+
mixed_precision:
|
9 |
+
param_dtype: fp16
|
10 |
+
reduce_dtype: fp16
|
11 |
+
buffer_dtype: fp32
|
12 |
+
dino_head:
|
13 |
+
sharding_strategy: SHARD_GRAD_OP
|
14 |
+
mixed_precision:
|
15 |
+
param_dtype: fp16
|
16 |
+
reduce_dtype: fp16
|
17 |
+
buffer_dtype: fp32
|
18 |
+
ibot_head:
|
19 |
+
sharding_strategy: SHARD_GRAD_OP
|
20 |
+
mixed_precision:
|
21 |
+
param_dtype: fp16
|
22 |
+
reduce_dtype: fp16
|
23 |
+
buffer_dtype: fp32
|
24 |
+
student:
|
25 |
+
backbone:
|
26 |
+
sharding_strategy: SHARD_GRAD_OP
|
27 |
+
mixed_precision:
|
28 |
+
param_dtype: fp16
|
29 |
+
reduce_dtype: fp16
|
30 |
+
buffer_dtype: fp32
|
31 |
+
dino_head:
|
32 |
+
sharding_strategy: SHARD_GRAD_OP
|
33 |
+
mixed_precision:
|
34 |
+
param_dtype: fp16
|
35 |
+
reduce_dtype: fp32
|
36 |
+
buffer_dtype: fp32
|
37 |
+
ibot_head:
|
38 |
+
sharding_strategy: SHARD_GRAD_OP
|
39 |
+
mixed_precision:
|
40 |
+
param_dtype: fp16
|
41 |
+
reduce_dtype: fp32
|
42 |
+
buffer_dtype: fp32
|
43 |
+
dino:
|
44 |
+
loss_weight: 1.0
|
45 |
+
head_n_prototypes: 65536
|
46 |
+
head_bottleneck_dim: 256
|
47 |
+
head_nlayers: 3
|
48 |
+
head_hidden_dim: 2048
|
49 |
+
koleo_loss_weight: 0.1
|
50 |
+
ibot:
|
51 |
+
loss_weight: 1.0
|
52 |
+
mask_sample_probability: 0.5
|
53 |
+
mask_ratio_min_max:
|
54 |
+
- 0.1
|
55 |
+
- 0.5
|
56 |
+
separate_head: false
|
57 |
+
head_n_prototypes: 65536
|
58 |
+
head_bottleneck_dim: 256
|
59 |
+
head_nlayers: 3
|
60 |
+
head_hidden_dim: 2048
|
61 |
+
train:
|
62 |
+
batch_size_per_gpu: 64
|
63 |
+
dataset_path: ImageNet:split=TRAIN
|
64 |
+
output_dir: .
|
65 |
+
saveckp_freq: 20
|
66 |
+
seed: 0
|
67 |
+
num_workers: 10
|
68 |
+
OFFICIAL_EPOCH_LENGTH: 1250
|
69 |
+
cache_dataset: true
|
70 |
+
centering: "centering" # or "sinkhorn_knopp"
|
71 |
+
student:
|
72 |
+
arch: vit_large
|
73 |
+
patch_size: 16
|
74 |
+
drop_path_rate: 0.3
|
75 |
+
layerscale: 1.0e-05
|
76 |
+
drop_path_uniform: true
|
77 |
+
pretrained_weights: ''
|
78 |
+
ffn_layer: "mlp"
|
79 |
+
block_chunks: 0
|
80 |
+
qkv_bias: true
|
81 |
+
proj_bias: true
|
82 |
+
ffn_bias: true
|
83 |
+
teacher:
|
84 |
+
momentum_teacher: 0.992
|
85 |
+
final_momentum_teacher: 1
|
86 |
+
warmup_teacher_temp: 0.04
|
87 |
+
teacher_temp: 0.07
|
88 |
+
warmup_teacher_temp_epochs: 30
|
89 |
+
optim:
|
90 |
+
epochs: 100
|
91 |
+
weight_decay: 0.04
|
92 |
+
weight_decay_end: 0.4
|
93 |
+
base_lr: 0.004 # learning rate for a batch size of 1024
|
94 |
+
lr: 0. # will be set after applying scaling rule
|
95 |
+
warmup_epochs: 10
|
96 |
+
min_lr: 1.0e-06
|
97 |
+
clip_grad: 3.0
|
98 |
+
freeze_last_layer_epochs: 1
|
99 |
+
scaling_rule: sqrt_wrt_1024
|
100 |
+
patch_embed_lr_mult: 0.2
|
101 |
+
layerwise_decay: 0.9
|
102 |
+
adamw_beta1: 0.9
|
103 |
+
adamw_beta2: 0.999
|
104 |
+
crops:
|
105 |
+
global_crops_scale:
|
106 |
+
- 0.32
|
107 |
+
- 1.0
|
108 |
+
local_crops_number: 8
|
109 |
+
local_crops_scale:
|
110 |
+
- 0.05
|
111 |
+
- 0.32
|
112 |
+
global_crops_size: 224
|
113 |
+
local_crops_size: 96
|
114 |
+
evaluation:
|
115 |
+
eval_period_iterations: 12500
|
torchhub/facebookresearch_dinov2_main/dinov2/configs/train/vitg14.yaml
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
dino:
|
2 |
+
head_n_prototypes: 131072
|
3 |
+
head_bottleneck_dim: 384
|
4 |
+
ibot:
|
5 |
+
separate_head: true
|
6 |
+
head_n_prototypes: 131072
|
7 |
+
train:
|
8 |
+
batch_size_per_gpu: 12
|
9 |
+
dataset_path: ImageNet22k
|
10 |
+
centering: sinkhorn_knopp
|
11 |
+
student:
|
12 |
+
arch: vit_giant2
|
13 |
+
patch_size: 14
|
14 |
+
drop_path_rate: 0.4
|
15 |
+
ffn_layer: swiglufused
|
16 |
+
block_chunks: 4
|
17 |
+
teacher:
|
18 |
+
momentum_teacher: 0.994
|
19 |
+
optim:
|
20 |
+
epochs: 500
|
21 |
+
weight_decay_end: 0.2
|
22 |
+
base_lr: 2.0e-04 # learning rate for a batch size of 1024
|
23 |
+
warmup_epochs: 80
|
24 |
+
layerwise_decay: 1.0
|
25 |
+
crops:
|
26 |
+
local_crops_size: 98
|
torchhub/facebookresearch_dinov2_main/dinov2/configs/train/vitl14.yaml
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
dino:
|
2 |
+
head_n_prototypes: 131072
|
3 |
+
head_bottleneck_dim: 384
|
4 |
+
ibot:
|
5 |
+
separate_head: true
|
6 |
+
head_n_prototypes: 131072
|
7 |
+
train:
|
8 |
+
batch_size_per_gpu: 32
|
9 |
+
dataset_path: ImageNet22k
|
10 |
+
centering: sinkhorn_knopp
|
11 |
+
student:
|
12 |
+
arch: vit_large
|
13 |
+
patch_size: 14
|
14 |
+
drop_path_rate: 0.4
|
15 |
+
ffn_layer: swiglufused
|
16 |
+
block_chunks: 4
|
17 |
+
teacher:
|
18 |
+
momentum_teacher: 0.994
|
19 |
+
optim:
|
20 |
+
epochs: 500
|
21 |
+
weight_decay_end: 0.2
|
22 |
+
base_lr: 2.0e-04 # learning rate for a batch size of 1024
|
23 |
+
warmup_epochs: 80
|
24 |
+
layerwise_decay: 1.0
|
25 |
+
crops:
|
26 |
+
local_crops_size: 98
|
torchhub/facebookresearch_dinov2_main/dinov2/configs/train/vitl16_short.yaml
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# this corresponds to the default config
|
2 |
+
train:
|
3 |
+
dataset_path: ImageNet:split=TRAIN
|
4 |
+
batch_size_per_gpu: 64
|
5 |
+
student:
|
6 |
+
block_chunks: 4
|
torchhub/facebookresearch_dinov2_main/dinov2/data/__init__.py
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
2 |
+
# All rights reserved.
|
3 |
+
#
|
4 |
+
# This source code is licensed under the license found in the
|
5 |
+
# LICENSE file in the root directory of this source tree.
|
6 |
+
|
7 |
+
from .adapters import DatasetWithEnumeratedTargets
|
8 |
+
from .loaders import make_data_loader, make_dataset, SamplerType
|
9 |
+
from .collate import collate_data_and_cast
|
10 |
+
from .masking import MaskingGenerator
|
11 |
+
from .augmentations import DataAugmentationDINO
|
torchhub/facebookresearch_dinov2_main/dinov2/data/adapters.py
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
2 |
+
# All rights reserved.
|
3 |
+
#
|
4 |
+
# This source code is licensed under the license found in the
|
5 |
+
# LICENSE file in the root directory of this source tree.
|
6 |
+
|
7 |
+
from typing import Any, Tuple
|
8 |
+
|
9 |
+
from torch.utils.data import Dataset
|
10 |
+
|
11 |
+
|
12 |
+
class DatasetWithEnumeratedTargets(Dataset):
|
13 |
+
def __init__(self, dataset):
|
14 |
+
self._dataset = dataset
|
15 |
+
|
16 |
+
def get_image_data(self, index: int) -> bytes:
|
17 |
+
return self._dataset.get_image_data(index)
|
18 |
+
|
19 |
+
def get_target(self, index: int) -> Tuple[Any, int]:
|
20 |
+
target = self._dataset.get_target(index)
|
21 |
+
return (index, target)
|
22 |
+
|
23 |
+
def __getitem__(self, index: int) -> Tuple[Any, Tuple[Any, int]]:
|
24 |
+
image, target = self._dataset[index]
|
25 |
+
target = index if target is None else target
|
26 |
+
return image, (index, target)
|
27 |
+
|
28 |
+
def __len__(self) -> int:
|
29 |
+
return len(self._dataset)
|
torchhub/facebookresearch_dinov2_main/dinov2/data/augmentations.py
ADDED
@@ -0,0 +1,119 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
2 |
+
# All rights reserved.
|
3 |
+
#
|
4 |
+
# This source code is licensed under the license found in the
|
5 |
+
# LICENSE file in the root directory of this source tree.
|
6 |
+
|
7 |
+
import logging
|
8 |
+
|
9 |
+
from torchvision import transforms
|
10 |
+
|
11 |
+
from .transforms import (
|
12 |
+
GaussianBlur,
|
13 |
+
make_normalize_transform,
|
14 |
+
)
|
15 |
+
|
16 |
+
|
17 |
+
logger = logging.getLogger("dinov2")
|
18 |
+
|
19 |
+
|
20 |
+
class DataAugmentationDINO(object):
|
21 |
+
def __init__(
|
22 |
+
self,
|
23 |
+
global_crops_scale,
|
24 |
+
local_crops_scale,
|
25 |
+
local_crops_number,
|
26 |
+
global_crops_size=224,
|
27 |
+
local_crops_size=96,
|
28 |
+
):
|
29 |
+
self.global_crops_scale = global_crops_scale
|
30 |
+
self.local_crops_scale = local_crops_scale
|
31 |
+
self.local_crops_number = local_crops_number
|
32 |
+
self.global_crops_size = global_crops_size
|
33 |
+
self.local_crops_size = local_crops_size
|
34 |
+
|
35 |
+
logger.info("###################################")
|
36 |
+
logger.info("Using data augmentation parameters:")
|
37 |
+
logger.info(f"global_crops_scale: {global_crops_scale}")
|
38 |
+
logger.info(f"local_crops_scale: {local_crops_scale}")
|
39 |
+
logger.info(f"local_crops_number: {local_crops_number}")
|
40 |
+
logger.info(f"global_crops_size: {global_crops_size}")
|
41 |
+
logger.info(f"local_crops_size: {local_crops_size}")
|
42 |
+
logger.info("###################################")
|
43 |
+
|
44 |
+
# random resized crop and flip
|
45 |
+
self.geometric_augmentation_global = transforms.Compose(
|
46 |
+
[
|
47 |
+
transforms.RandomResizedCrop(
|
48 |
+
global_crops_size, scale=global_crops_scale, interpolation=transforms.InterpolationMode.BICUBIC
|
49 |
+
),
|
50 |
+
transforms.RandomHorizontalFlip(p=0.5),
|
51 |
+
]
|
52 |
+
)
|
53 |
+
|
54 |
+
self.geometric_augmentation_local = transforms.Compose(
|
55 |
+
[
|
56 |
+
transforms.RandomResizedCrop(
|
57 |
+
local_crops_size, scale=local_crops_scale, interpolation=transforms.InterpolationMode.BICUBIC
|
58 |
+
),
|
59 |
+
transforms.RandomHorizontalFlip(p=0.5),
|
60 |
+
]
|
61 |
+
)
|
62 |
+
|
63 |
+
# color distorsions / blurring
|
64 |
+
color_jittering = transforms.Compose(
|
65 |
+
[
|
66 |
+
transforms.RandomApply(
|
67 |
+
[transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.2, hue=0.1)],
|
68 |
+
p=0.8,
|
69 |
+
),
|
70 |
+
transforms.RandomGrayscale(p=0.2),
|
71 |
+
]
|
72 |
+
)
|
73 |
+
|
74 |
+
global_transfo1_extra = GaussianBlur(p=1.0)
|
75 |
+
|
76 |
+
global_transfo2_extra = transforms.Compose(
|
77 |
+
[
|
78 |
+
GaussianBlur(p=0.1),
|
79 |
+
transforms.RandomSolarize(threshold=128, p=0.2),
|
80 |
+
]
|
81 |
+
)
|
82 |
+
|
83 |
+
local_transfo_extra = GaussianBlur(p=0.5)
|
84 |
+
|
85 |
+
# normalization
|
86 |
+
self.normalize = transforms.Compose(
|
87 |
+
[
|
88 |
+
transforms.ToTensor(),
|
89 |
+
make_normalize_transform(),
|
90 |
+
]
|
91 |
+
)
|
92 |
+
|
93 |
+
self.global_transfo1 = transforms.Compose([color_jittering, global_transfo1_extra, self.normalize])
|
94 |
+
self.global_transfo2 = transforms.Compose([color_jittering, global_transfo2_extra, self.normalize])
|
95 |
+
self.local_transfo = transforms.Compose([color_jittering, local_transfo_extra, self.normalize])
|
96 |
+
|
97 |
+
def __call__(self, image):
|
98 |
+
output = {}
|
99 |
+
|
100 |
+
# global crops:
|
101 |
+
im1_base = self.geometric_augmentation_global(image)
|
102 |
+
global_crop_1 = self.global_transfo1(im1_base)
|
103 |
+
|
104 |
+
im2_base = self.geometric_augmentation_global(image)
|
105 |
+
global_crop_2 = self.global_transfo2(im2_base)
|
106 |
+
|
107 |
+
output["global_crops"] = [global_crop_1, global_crop_2]
|
108 |
+
|
109 |
+
# global crops for teacher:
|
110 |
+
output["global_crops_teacher"] = [global_crop_1, global_crop_2]
|
111 |
+
|
112 |
+
# local crops:
|
113 |
+
local_crops = [
|
114 |
+
self.local_transfo(self.geometric_augmentation_local(image)) for _ in range(self.local_crops_number)
|
115 |
+
]
|
116 |
+
output["local_crops"] = local_crops
|
117 |
+
output["offsets"] = ()
|
118 |
+
|
119 |
+
return output
|
torchhub/facebookresearch_dinov2_main/dinov2/data/collate.py
ADDED
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
2 |
+
# All rights reserved.
|
3 |
+
#
|
4 |
+
# This source code is licensed under the license found in the
|
5 |
+
# LICENSE file in the root directory of this source tree.
|
6 |
+
|
7 |
+
import torch
|
8 |
+
import random
|
9 |
+
|
10 |
+
|
11 |
+
def collate_data_and_cast(samples_list, mask_ratio_tuple, mask_probability, dtype, n_tokens=None, mask_generator=None):
|
12 |
+
# dtype = torch.half # TODO: Remove
|
13 |
+
|
14 |
+
n_global_crops = len(samples_list[0][0]["global_crops"])
|
15 |
+
n_local_crops = len(samples_list[0][0]["local_crops"])
|
16 |
+
|
17 |
+
collated_global_crops = torch.stack([s[0]["global_crops"][i] for i in range(n_global_crops) for s in samples_list])
|
18 |
+
|
19 |
+
collated_local_crops = torch.stack([s[0]["local_crops"][i] for i in range(n_local_crops) for s in samples_list])
|
20 |
+
|
21 |
+
B = len(collated_global_crops)
|
22 |
+
N = n_tokens
|
23 |
+
n_samples_masked = int(B * mask_probability)
|
24 |
+
probs = torch.linspace(*mask_ratio_tuple, n_samples_masked + 1)
|
25 |
+
upperbound = 0
|
26 |
+
masks_list = []
|
27 |
+
for i in range(0, n_samples_masked):
|
28 |
+
prob_min = probs[i]
|
29 |
+
prob_max = probs[i + 1]
|
30 |
+
masks_list.append(torch.BoolTensor(mask_generator(int(N * random.uniform(prob_min, prob_max)))))
|
31 |
+
upperbound += int(N * prob_max)
|
32 |
+
for i in range(n_samples_masked, B):
|
33 |
+
masks_list.append(torch.BoolTensor(mask_generator(0)))
|
34 |
+
|
35 |
+
random.shuffle(masks_list)
|
36 |
+
|
37 |
+
collated_masks = torch.stack(masks_list).flatten(1)
|
38 |
+
mask_indices_list = collated_masks.flatten().nonzero().flatten()
|
39 |
+
|
40 |
+
masks_weight = (1 / collated_masks.sum(-1).clamp(min=1.0)).unsqueeze(-1).expand_as(collated_masks)[collated_masks]
|
41 |
+
|
42 |
+
return {
|
43 |
+
"collated_global_crops": collated_global_crops.to(dtype),
|
44 |
+
"collated_local_crops": collated_local_crops.to(dtype),
|
45 |
+
"collated_masks": collated_masks,
|
46 |
+
"mask_indices_list": mask_indices_list,
|
47 |
+
"masks_weight": masks_weight,
|
48 |
+
"upperbound": upperbound,
|
49 |
+
"n_masked_patches": torch.full((1,), fill_value=mask_indices_list.shape[0], dtype=torch.long),
|
50 |
+
}
|
torchhub/facebookresearch_dinov2_main/dinov2/data/datasets/__init__.py
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
2 |
+
# All rights reserved.
|
3 |
+
#
|
4 |
+
# This source code is licensed under the license found in the
|
5 |
+
# LICENSE file in the root directory of this source tree.
|
6 |
+
|
7 |
+
from .image_net import ImageNet
|
8 |
+
from .image_net_22k import ImageNet22k
|
torchhub/facebookresearch_dinov2_main/dinov2/data/datasets/decoders.py
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
2 |
+
# All rights reserved.
|
3 |
+
#
|
4 |
+
# This source code is licensed under the license found in the
|
5 |
+
# LICENSE file in the root directory of this source tree.
|
6 |
+
|
7 |
+
from io import BytesIO
|
8 |
+
from typing import Any
|
9 |
+
|
10 |
+
from PIL import Image
|
11 |
+
|
12 |
+
|
13 |
+
class Decoder:
|
14 |
+
def decode(self) -> Any:
|
15 |
+
raise NotImplementedError
|
16 |
+
|
17 |
+
|
18 |
+
class ImageDataDecoder(Decoder):
|
19 |
+
def __init__(self, image_data: bytes) -> None:
|
20 |
+
self._image_data = image_data
|
21 |
+
|
22 |
+
def decode(self) -> Image:
|
23 |
+
f = BytesIO(self._image_data)
|
24 |
+
return Image.open(f).convert(mode="RGB")
|
25 |
+
|
26 |
+
|
27 |
+
class TargetDecoder(Decoder):
|
28 |
+
def __init__(self, target: Any):
|
29 |
+
self._target = target
|
30 |
+
|
31 |
+
def decode(self) -> Any:
|
32 |
+
return self._target
|
torchhub/facebookresearch_dinov2_main/dinov2/data/datasets/extended.py
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
2 |
+
# All rights reserved.
|
3 |
+
#
|
4 |
+
# This source code is licensed under the license found in the
|
5 |
+
# LICENSE file in the root directory of this source tree.
|
6 |
+
|
7 |
+
from typing import Any, Tuple
|
8 |
+
|
9 |
+
from torchvision.datasets import VisionDataset
|
10 |
+
|
11 |
+
from .decoders import TargetDecoder, ImageDataDecoder
|
12 |
+
|
13 |
+
|
14 |
+
class ExtendedVisionDataset(VisionDataset):
|
15 |
+
def __init__(self, *args, **kwargs) -> None:
|
16 |
+
super().__init__(*args, **kwargs) # type: ignore
|
17 |
+
|
18 |
+
def get_image_data(self, index: int) -> bytes:
|
19 |
+
raise NotImplementedError
|
20 |
+
|
21 |
+
def get_target(self, index: int) -> Any:
|
22 |
+
raise NotImplementedError
|
23 |
+
|
24 |
+
def __getitem__(self, index: int) -> Tuple[Any, Any]:
|
25 |
+
try:
|
26 |
+
image_data = self.get_image_data(index)
|
27 |
+
image = ImageDataDecoder(image_data).decode()
|
28 |
+
except Exception as e:
|
29 |
+
raise RuntimeError(f"can not read image for sample {index}") from e
|
30 |
+
target = self.get_target(index)
|
31 |
+
target = TargetDecoder(target).decode()
|
32 |
+
|
33 |
+
if self.transforms is not None:
|
34 |
+
image, target = self.transforms(image, target)
|
35 |
+
|
36 |
+
return image, target
|
37 |
+
|
38 |
+
def __len__(self) -> int:
|
39 |
+
raise NotImplementedError
|
torchhub/facebookresearch_dinov2_main/dinov2/data/datasets/image_net.py
ADDED
@@ -0,0 +1,291 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
2 |
+
# All rights reserved.
|
3 |
+
#
|
4 |
+
# This source code is licensed under the license found in the
|
5 |
+
# LICENSE file in the root directory of this source tree.
|
6 |
+
|
7 |
+
import csv
|
8 |
+
from enum import Enum
|
9 |
+
import logging
|
10 |
+
import os
|
11 |
+
from typing import Callable, List, Optional, Tuple, Union
|
12 |
+
|
13 |
+
import numpy as np
|
14 |
+
|
15 |
+
from .extended import ExtendedVisionDataset
|
16 |
+
|
17 |
+
|
18 |
+
logger = logging.getLogger("dinov2")
|
19 |
+
_Target = int
|
20 |
+
|
21 |
+
|
22 |
+
class _Split(Enum):
|
23 |
+
TRAIN = "train"
|
24 |
+
VAL = "val"
|
25 |
+
TEST = "test" # NOTE: torchvision does not support the test split
|
26 |
+
|
27 |
+
@property
|
28 |
+
def length(self) -> int:
|
29 |
+
split_lengths = {
|
30 |
+
_Split.TRAIN: 1_281_167,
|
31 |
+
_Split.VAL: 50_000,
|
32 |
+
_Split.TEST: 100_000,
|
33 |
+
}
|
34 |
+
return split_lengths[self]
|
35 |
+
|
36 |
+
def get_dirname(self, class_id: Optional[str] = None) -> str:
|
37 |
+
return self.value if class_id is None else os.path.join(self.value, class_id)
|
38 |
+
|
39 |
+
def get_image_relpath(self, actual_index: int, class_id: Optional[str] = None) -> str:
|
40 |
+
dirname = self.get_dirname(class_id)
|
41 |
+
if self == _Split.TRAIN:
|
42 |
+
basename = f"{class_id}_{actual_index}"
|
43 |
+
else: # self in (_Split.VAL, _Split.TEST):
|
44 |
+
basename = f"ILSVRC2012_{self.value}_{actual_index:08d}"
|
45 |
+
return os.path.join(dirname, basename + ".JPEG")
|
46 |
+
|
47 |
+
def parse_image_relpath(self, image_relpath: str) -> Tuple[str, int]:
|
48 |
+
assert self != _Split.TEST
|
49 |
+
dirname, filename = os.path.split(image_relpath)
|
50 |
+
class_id = os.path.split(dirname)[-1]
|
51 |
+
basename, _ = os.path.splitext(filename)
|
52 |
+
actual_index = int(basename.split("_")[-1])
|
53 |
+
return class_id, actual_index
|
54 |
+
|
55 |
+
|
56 |
+
class ImageNet(ExtendedVisionDataset):
|
57 |
+
Target = Union[_Target]
|
58 |
+
Split = Union[_Split]
|
59 |
+
|
60 |
+
def __init__(
|
61 |
+
self,
|
62 |
+
*,
|
63 |
+
split: "ImageNet.Split",
|
64 |
+
root: str,
|
65 |
+
extra: str,
|
66 |
+
transforms: Optional[Callable] = None,
|
67 |
+
transform: Optional[Callable] = None,
|
68 |
+
target_transform: Optional[Callable] = None,
|
69 |
+
) -> None:
|
70 |
+
super().__init__(root, transforms, transform, target_transform)
|
71 |
+
self._extra_root = extra
|
72 |
+
self._split = split
|
73 |
+
|
74 |
+
self._entries = None
|
75 |
+
self._class_ids = None
|
76 |
+
self._class_names = None
|
77 |
+
|
78 |
+
@property
|
79 |
+
def split(self) -> "ImageNet.Split":
|
80 |
+
return self._split
|
81 |
+
|
82 |
+
def _get_extra_full_path(self, extra_path: str) -> str:
|
83 |
+
return os.path.join(self._extra_root, extra_path)
|
84 |
+
|
85 |
+
def _load_extra(self, extra_path: str) -> np.ndarray:
|
86 |
+
extra_full_path = self._get_extra_full_path(extra_path)
|
87 |
+
return np.load(extra_full_path, mmap_mode="r")
|
88 |
+
|
89 |
+
def _save_extra(self, extra_array: np.ndarray, extra_path: str) -> None:
|
90 |
+
extra_full_path = self._get_extra_full_path(extra_path)
|
91 |
+
os.makedirs(self._extra_root, exist_ok=True)
|
92 |
+
np.save(extra_full_path, extra_array)
|
93 |
+
|
94 |
+
@property
|
95 |
+
def _entries_path(self) -> str:
|
96 |
+
return f"entries-{self._split.value.upper()}.npy"
|
97 |
+
|
98 |
+
@property
|
99 |
+
def _class_ids_path(self) -> str:
|
100 |
+
return f"class-ids-{self._split.value.upper()}.npy"
|
101 |
+
|
102 |
+
@property
|
103 |
+
def _class_names_path(self) -> str:
|
104 |
+
return f"class-names-{self._split.value.upper()}.npy"
|
105 |
+
|
106 |
+
def _get_entries(self) -> np.ndarray:
|
107 |
+
if self._entries is None:
|
108 |
+
self._entries = self._load_extra(self._entries_path)
|
109 |
+
assert self._entries is not None
|
110 |
+
return self._entries
|
111 |
+
|
112 |
+
def _get_class_ids(self) -> np.ndarray:
|
113 |
+
if self._split == _Split.TEST:
|
114 |
+
assert False, "Class IDs are not available in TEST split"
|
115 |
+
if self._class_ids is None:
|
116 |
+
self._class_ids = self._load_extra(self._class_ids_path)
|
117 |
+
assert self._class_ids is not None
|
118 |
+
return self._class_ids
|
119 |
+
|
120 |
+
def _get_class_names(self) -> np.ndarray:
|
121 |
+
if self._split == _Split.TEST:
|
122 |
+
assert False, "Class names are not available in TEST split"
|
123 |
+
if self._class_names is None:
|
124 |
+
self._class_names = self._load_extra(self._class_names_path)
|
125 |
+
assert self._class_names is not None
|
126 |
+
return self._class_names
|
127 |
+
|
128 |
+
def find_class_id(self, class_index: int) -> str:
|
129 |
+
class_ids = self._get_class_ids()
|
130 |
+
return str(class_ids[class_index])
|
131 |
+
|
132 |
+
def find_class_name(self, class_index: int) -> str:
|
133 |
+
class_names = self._get_class_names()
|
134 |
+
return str(class_names[class_index])
|
135 |
+
|
136 |
+
def get_image_data(self, index: int) -> bytes:
|
137 |
+
entries = self._get_entries()
|
138 |
+
actual_index = entries[index]["actual_index"]
|
139 |
+
|
140 |
+
class_id = self.get_class_id(index)
|
141 |
+
|
142 |
+
image_relpath = self.split.get_image_relpath(actual_index, class_id)
|
143 |
+
image_full_path = os.path.join(self.root, image_relpath)
|
144 |
+
with open(image_full_path, mode="rb") as f:
|
145 |
+
image_data = f.read()
|
146 |
+
return image_data
|
147 |
+
|
148 |
+
def get_target(self, index: int) -> Optional[Target]:
|
149 |
+
entries = self._get_entries()
|
150 |
+
class_index = entries[index]["class_index"]
|
151 |
+
return None if self.split == _Split.TEST else int(class_index)
|
152 |
+
|
153 |
+
def get_targets(self) -> Optional[np.ndarray]:
|
154 |
+
entries = self._get_entries()
|
155 |
+
return None if self.split == _Split.TEST else entries["class_index"]
|
156 |
+
|
157 |
+
def get_class_id(self, index: int) -> Optional[str]:
|
158 |
+
entries = self._get_entries()
|
159 |
+
class_id = entries[index]["class_id"]
|
160 |
+
return None if self.split == _Split.TEST else str(class_id)
|
161 |
+
|
162 |
+
def get_class_name(self, index: int) -> Optional[str]:
|
163 |
+
entries = self._get_entries()
|
164 |
+
class_name = entries[index]["class_name"]
|
165 |
+
return None if self.split == _Split.TEST else str(class_name)
|
166 |
+
|
167 |
+
def __len__(self) -> int:
|
168 |
+
entries = self._get_entries()
|
169 |
+
assert len(entries) == self.split.length
|
170 |
+
return len(entries)
|
171 |
+
|
172 |
+
def _load_labels(self, labels_path: str) -> List[Tuple[str, str]]:
|
173 |
+
labels_full_path = os.path.join(self.root, labels_path)
|
174 |
+
labels = []
|
175 |
+
|
176 |
+
try:
|
177 |
+
with open(labels_full_path, "r") as f:
|
178 |
+
reader = csv.reader(f)
|
179 |
+
for row in reader:
|
180 |
+
class_id, class_name = row
|
181 |
+
labels.append((class_id, class_name))
|
182 |
+
except OSError as e:
|
183 |
+
raise RuntimeError(f'can not read labels file "{labels_full_path}"') from e
|
184 |
+
|
185 |
+
return labels
|
186 |
+
|
187 |
+
def _dump_entries(self) -> None:
|
188 |
+
split = self.split
|
189 |
+
if split == ImageNet.Split.TEST:
|
190 |
+
dataset = None
|
191 |
+
sample_count = split.length
|
192 |
+
max_class_id_length, max_class_name_length = 0, 0
|
193 |
+
else:
|
194 |
+
labels_path = "labels.txt"
|
195 |
+
logger.info(f'loading labels from "{labels_path}"')
|
196 |
+
labels = self._load_labels(labels_path)
|
197 |
+
|
198 |
+
# NOTE: Using torchvision ImageFolder for consistency
|
199 |
+
from torchvision.datasets import ImageFolder
|
200 |
+
|
201 |
+
dataset_root = os.path.join(self.root, split.get_dirname())
|
202 |
+
dataset = ImageFolder(dataset_root)
|
203 |
+
sample_count = len(dataset)
|
204 |
+
max_class_id_length, max_class_name_length = -1, -1
|
205 |
+
for sample in dataset.samples:
|
206 |
+
_, class_index = sample
|
207 |
+
class_id, class_name = labels[class_index]
|
208 |
+
max_class_id_length = max(len(class_id), max_class_id_length)
|
209 |
+
max_class_name_length = max(len(class_name), max_class_name_length)
|
210 |
+
|
211 |
+
dtype = np.dtype(
|
212 |
+
[
|
213 |
+
("actual_index", "<u4"),
|
214 |
+
("class_index", "<u4"),
|
215 |
+
("class_id", f"U{max_class_id_length}"),
|
216 |
+
("class_name", f"U{max_class_name_length}"),
|
217 |
+
]
|
218 |
+
)
|
219 |
+
entries_array = np.empty(sample_count, dtype=dtype)
|
220 |
+
|
221 |
+
if split == ImageNet.Split.TEST:
|
222 |
+
old_percent = -1
|
223 |
+
for index in range(sample_count):
|
224 |
+
percent = 100 * (index + 1) // sample_count
|
225 |
+
if percent > old_percent:
|
226 |
+
logger.info(f"creating entries: {percent}%")
|
227 |
+
old_percent = percent
|
228 |
+
|
229 |
+
actual_index = index + 1
|
230 |
+
class_index = np.uint32(-1)
|
231 |
+
class_id, class_name = "", ""
|
232 |
+
entries_array[index] = (actual_index, class_index, class_id, class_name)
|
233 |
+
else:
|
234 |
+
class_names = {class_id: class_name for class_id, class_name in labels}
|
235 |
+
|
236 |
+
assert dataset
|
237 |
+
old_percent = -1
|
238 |
+
for index in range(sample_count):
|
239 |
+
percent = 100 * (index + 1) // sample_count
|
240 |
+
if percent > old_percent:
|
241 |
+
logger.info(f"creating entries: {percent}%")
|
242 |
+
old_percent = percent
|
243 |
+
|
244 |
+
image_full_path, class_index = dataset.samples[index]
|
245 |
+
image_relpath = os.path.relpath(image_full_path, self.root)
|
246 |
+
class_id, actual_index = split.parse_image_relpath(image_relpath)
|
247 |
+
class_name = class_names[class_id]
|
248 |
+
entries_array[index] = (actual_index, class_index, class_id, class_name)
|
249 |
+
|
250 |
+
logger.info(f'saving entries to "{self._entries_path}"')
|
251 |
+
self._save_extra(entries_array, self._entries_path)
|
252 |
+
|
253 |
+
def _dump_class_ids_and_names(self) -> None:
|
254 |
+
split = self.split
|
255 |
+
if split == ImageNet.Split.TEST:
|
256 |
+
return
|
257 |
+
|
258 |
+
entries_array = self._load_extra(self._entries_path)
|
259 |
+
|
260 |
+
max_class_id_length, max_class_name_length, max_class_index = -1, -1, -1
|
261 |
+
for entry in entries_array:
|
262 |
+
class_index, class_id, class_name = (
|
263 |
+
entry["class_index"],
|
264 |
+
entry["class_id"],
|
265 |
+
entry["class_name"],
|
266 |
+
)
|
267 |
+
max_class_index = max(int(class_index), max_class_index)
|
268 |
+
max_class_id_length = max(len(str(class_id)), max_class_id_length)
|
269 |
+
max_class_name_length = max(len(str(class_name)), max_class_name_length)
|
270 |
+
|
271 |
+
class_count = max_class_index + 1
|
272 |
+
class_ids_array = np.empty(class_count, dtype=f"U{max_class_id_length}")
|
273 |
+
class_names_array = np.empty(class_count, dtype=f"U{max_class_name_length}")
|
274 |
+
for entry in entries_array:
|
275 |
+
class_index, class_id, class_name = (
|
276 |
+
entry["class_index"],
|
277 |
+
entry["class_id"],
|
278 |
+
entry["class_name"],
|
279 |
+
)
|
280 |
+
class_ids_array[class_index] = class_id
|
281 |
+
class_names_array[class_index] = class_name
|
282 |
+
|
283 |
+
logger.info(f'saving class IDs to "{self._class_ids_path}"')
|
284 |
+
self._save_extra(class_ids_array, self._class_ids_path)
|
285 |
+
|
286 |
+
logger.info(f'saving class names to "{self._class_names_path}"')
|
287 |
+
self._save_extra(class_names_array, self._class_names_path)
|
288 |
+
|
289 |
+
def dump_extra(self) -> None:
|
290 |
+
self._dump_entries()
|
291 |
+
self._dump_class_ids_and_names()
|
torchhub/facebookresearch_dinov2_main/dinov2/data/datasets/image_net_22k.py
ADDED
@@ -0,0 +1,303 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
2 |
+
# All rights reserved.
|
3 |
+
#
|
4 |
+
# This source code is licensed under the license found in the
|
5 |
+
# LICENSE file in the root directory of this source tree.
|
6 |
+
|
7 |
+
from dataclasses import dataclass
|
8 |
+
from enum import Enum
|
9 |
+
from functools import lru_cache
|
10 |
+
from gzip import GzipFile
|
11 |
+
from io import BytesIO
|
12 |
+
from mmap import ACCESS_READ, mmap
|
13 |
+
import os
|
14 |
+
from typing import Any, Callable, List, Optional, Set, Tuple
|
15 |
+
import warnings
|
16 |
+
|
17 |
+
import numpy as np
|
18 |
+
|
19 |
+
from .extended import ExtendedVisionDataset
|
20 |
+
|
21 |
+
|
22 |
+
_Labels = int
|
23 |
+
|
24 |
+
_DEFAULT_MMAP_CACHE_SIZE = 16 # Warning: This can exhaust file descriptors
|
25 |
+
|
26 |
+
|
27 |
+
@dataclass
|
28 |
+
class _ClassEntry:
|
29 |
+
block_offset: int
|
30 |
+
maybe_filename: Optional[str] = None
|
31 |
+
|
32 |
+
|
33 |
+
@dataclass
|
34 |
+
class _Entry:
|
35 |
+
class_index: int # noqa: E701
|
36 |
+
start_offset: int
|
37 |
+
end_offset: int
|
38 |
+
filename: str
|
39 |
+
|
40 |
+
|
41 |
+
class _Split(Enum):
|
42 |
+
TRAIN = "train"
|
43 |
+
VAL = "val"
|
44 |
+
|
45 |
+
@property
|
46 |
+
def length(self) -> int:
|
47 |
+
return {
|
48 |
+
_Split.TRAIN: 11_797_647,
|
49 |
+
_Split.VAL: 561_050,
|
50 |
+
}[self]
|
51 |
+
|
52 |
+
def entries_path(self):
|
53 |
+
return f"imagenet21kp_{self.value}.txt"
|
54 |
+
|
55 |
+
|
56 |
+
def _get_tarball_path(class_id: str) -> str:
|
57 |
+
return f"{class_id}.tar"
|
58 |
+
|
59 |
+
|
60 |
+
def _make_mmap_tarball(tarballs_root: str, mmap_cache_size: int):
|
61 |
+
@lru_cache(maxsize=mmap_cache_size)
|
62 |
+
def _mmap_tarball(class_id: str) -> mmap:
|
63 |
+
tarball_path = _get_tarball_path(class_id)
|
64 |
+
tarball_full_path = os.path.join(tarballs_root, tarball_path)
|
65 |
+
with open(tarball_full_path) as f:
|
66 |
+
return mmap(fileno=f.fileno(), length=0, access=ACCESS_READ)
|
67 |
+
|
68 |
+
return _mmap_tarball
|
69 |
+
|
70 |
+
|
71 |
+
class ImageNet22k(ExtendedVisionDataset):
|
72 |
+
_GZIPPED_INDICES: Set[int] = {
|
73 |
+
841_545,
|
74 |
+
1_304_131,
|
75 |
+
2_437_921,
|
76 |
+
2_672_079,
|
77 |
+
2_795_676,
|
78 |
+
2_969_786,
|
79 |
+
6_902_965,
|
80 |
+
6_903_550,
|
81 |
+
6_903_628,
|
82 |
+
7_432_557,
|
83 |
+
7_432_589,
|
84 |
+
7_813_809,
|
85 |
+
8_329_633,
|
86 |
+
10_296_990,
|
87 |
+
10_417_652,
|
88 |
+
10_492_265,
|
89 |
+
10_598_078,
|
90 |
+
10_782_398,
|
91 |
+
10_902_612,
|
92 |
+
11_203_736,
|
93 |
+
11_342_890,
|
94 |
+
11_397_596,
|
95 |
+
11_589_762,
|
96 |
+
11_705_103,
|
97 |
+
12_936_875,
|
98 |
+
13_289_782,
|
99 |
+
}
|
100 |
+
Labels = _Labels
|
101 |
+
|
102 |
+
def __init__(
|
103 |
+
self,
|
104 |
+
*,
|
105 |
+
root: str,
|
106 |
+
extra: str,
|
107 |
+
transforms: Optional[Callable] = None,
|
108 |
+
transform: Optional[Callable] = None,
|
109 |
+
target_transform: Optional[Callable] = None,
|
110 |
+
mmap_cache_size: int = _DEFAULT_MMAP_CACHE_SIZE,
|
111 |
+
) -> None:
|
112 |
+
super().__init__(root, transforms, transform, target_transform)
|
113 |
+
self._extra_root = extra
|
114 |
+
|
115 |
+
entries_path = self._get_entries_path(root)
|
116 |
+
self._entries = self._load_extra(entries_path)
|
117 |
+
|
118 |
+
class_ids_path = self._get_class_ids_path(root)
|
119 |
+
self._class_ids = self._load_extra(class_ids_path)
|
120 |
+
|
121 |
+
self._gzipped_indices = ImageNet22k._GZIPPED_INDICES
|
122 |
+
self._mmap_tarball = _make_mmap_tarball(self._tarballs_root, mmap_cache_size)
|
123 |
+
|
124 |
+
def _get_entries_path(self, root: Optional[str] = None) -> str:
|
125 |
+
return "entries.npy"
|
126 |
+
|
127 |
+
def _get_class_ids_path(self, root: Optional[str] = None) -> str:
|
128 |
+
return "class-ids.npy"
|
129 |
+
|
130 |
+
def _find_class_ids(self, path: str) -> List[str]:
|
131 |
+
class_ids = []
|
132 |
+
|
133 |
+
with os.scandir(path) as entries:
|
134 |
+
for entry in entries:
|
135 |
+
root, ext = os.path.splitext(entry.name)
|
136 |
+
if ext != ".tar":
|
137 |
+
continue
|
138 |
+
class_ids.append(root)
|
139 |
+
|
140 |
+
return sorted(class_ids)
|
141 |
+
|
142 |
+
def _load_entries_class_ids(self, root: Optional[str] = None) -> Tuple[List[_Entry], List[str]]:
|
143 |
+
root = self.get_root(root)
|
144 |
+
entries: List[_Entry] = []
|
145 |
+
class_ids = self._find_class_ids(root)
|
146 |
+
|
147 |
+
for class_index, class_id in enumerate(class_ids):
|
148 |
+
path = os.path.join(root, "blocks", f"{class_id}.log")
|
149 |
+
class_entries = []
|
150 |
+
|
151 |
+
try:
|
152 |
+
with open(path) as f:
|
153 |
+
for line in f:
|
154 |
+
line = line.rstrip()
|
155 |
+
block, filename = line.split(":")
|
156 |
+
block_offset = int(block[6:])
|
157 |
+
filename = filename[1:]
|
158 |
+
|
159 |
+
maybe_filename = None
|
160 |
+
if filename != "** Block of NULs **":
|
161 |
+
maybe_filename = filename
|
162 |
+
_, ext = os.path.splitext(filename)
|
163 |
+
# assert ext == ".JPEG"
|
164 |
+
|
165 |
+
class_entry = _ClassEntry(block_offset, maybe_filename)
|
166 |
+
class_entries.append(class_entry)
|
167 |
+
except OSError as e:
|
168 |
+
raise RuntimeError(f'can not read blocks file "{path}"') from e
|
169 |
+
|
170 |
+
assert class_entries[-1].maybe_filename is None
|
171 |
+
|
172 |
+
for class_entry1, class_entry2 in zip(class_entries, class_entries[1:]):
|
173 |
+
assert class_entry1.block_offset <= class_entry2.block_offset
|
174 |
+
start_offset = 512 * class_entry1.block_offset
|
175 |
+
end_offset = 512 * class_entry2.block_offset
|
176 |
+
assert class_entry1.maybe_filename is not None
|
177 |
+
filename = class_entry1.maybe_filename
|
178 |
+
entry = _Entry(class_index, start_offset, end_offset, filename)
|
179 |
+
# Skip invalid image files (PIL throws UnidentifiedImageError)
|
180 |
+
if filename == "n06470073_47249.JPEG":
|
181 |
+
continue
|
182 |
+
entries.append(entry)
|
183 |
+
|
184 |
+
return entries, class_ids
|
185 |
+
|
186 |
+
def _load_extra(self, extra_path: str) -> np.ndarray:
|
187 |
+
extra_root = self._extra_root
|
188 |
+
extra_full_path = os.path.join(extra_root, extra_path)
|
189 |
+
return np.load(extra_full_path, mmap_mode="r")
|
190 |
+
|
191 |
+
def _save_extra(self, extra_array: np.ndarray, extra_path: str) -> None:
|
192 |
+
extra_root = self._extra_root
|
193 |
+
extra_full_path = os.path.join(extra_root, extra_path)
|
194 |
+
os.makedirs(extra_root, exist_ok=True)
|
195 |
+
np.save(extra_full_path, extra_array)
|
196 |
+
|
197 |
+
@property
|
198 |
+
def _tarballs_root(self) -> str:
|
199 |
+
return self.root
|
200 |
+
|
201 |
+
def find_class_id(self, class_index: int) -> str:
|
202 |
+
return str(self._class_ids[class_index])
|
203 |
+
|
204 |
+
def get_image_data(self, index: int) -> bytes:
|
205 |
+
entry = self._entries[index]
|
206 |
+
class_id = entry["class_id"]
|
207 |
+
class_mmap = self._mmap_tarball(class_id)
|
208 |
+
|
209 |
+
start_offset, end_offset = entry["start_offset"], entry["end_offset"]
|
210 |
+
try:
|
211 |
+
mapped_data = class_mmap[start_offset:end_offset]
|
212 |
+
data = mapped_data[512:] # Skip entry header block
|
213 |
+
|
214 |
+
if len(data) >= 2 and tuple(data[:2]) == (0x1F, 0x8B):
|
215 |
+
assert index in self._gzipped_indices, f"unexpected gzip header for sample {index}"
|
216 |
+
with GzipFile(fileobj=BytesIO(data)) as g:
|
217 |
+
data = g.read()
|
218 |
+
except Exception as e:
|
219 |
+
raise RuntimeError(f"can not retrieve image data for sample {index} " f'from "{class_id}" tarball') from e
|
220 |
+
|
221 |
+
return data
|
222 |
+
|
223 |
+
def get_target(self, index: int) -> Any:
|
224 |
+
return int(self._entries[index]["class_index"])
|
225 |
+
|
226 |
+
def get_targets(self) -> np.ndarray:
|
227 |
+
return self._entries["class_index"]
|
228 |
+
|
229 |
+
def get_class_id(self, index: int) -> str:
|
230 |
+
return str(self._entries[index]["class_id"])
|
231 |
+
|
232 |
+
def get_class_ids(self) -> np.ndarray:
|
233 |
+
return self._entries["class_id"]
|
234 |
+
|
235 |
+
def __getitem__(self, index: int) -> Tuple[Any, Any]:
|
236 |
+
with warnings.catch_warnings():
|
237 |
+
warnings.simplefilter("ignore")
|
238 |
+
return super().__getitem__(index)
|
239 |
+
|
240 |
+
def __len__(self) -> int:
|
241 |
+
return len(self._entries)
|
242 |
+
|
243 |
+
def _dump_entries(self, *args, **kwargs) -> None:
|
244 |
+
entries, class_ids = self._load_entries_class_ids(*args, **kwargs)
|
245 |
+
|
246 |
+
max_class_id_length, max_filename_length, max_class_index = -1, -1, -1
|
247 |
+
for entry in entries:
|
248 |
+
class_id = class_ids[entry.class_index]
|
249 |
+
max_class_index = max(entry.class_index, max_class_index)
|
250 |
+
max_class_id_length = max(len(class_id), max_class_id_length)
|
251 |
+
max_filename_length = max(len(entry.filename), max_filename_length)
|
252 |
+
|
253 |
+
dtype = np.dtype(
|
254 |
+
[
|
255 |
+
("class_index", "<u4"),
|
256 |
+
("class_id", f"U{max_class_id_length}"),
|
257 |
+
("start_offset", "<u4"),
|
258 |
+
("end_offset", "<u4"),
|
259 |
+
("filename", f"U{max_filename_length}"),
|
260 |
+
]
|
261 |
+
)
|
262 |
+
sample_count = len(entries)
|
263 |
+
entries_array = np.empty(sample_count, dtype=dtype)
|
264 |
+
for i, entry in enumerate(entries):
|
265 |
+
class_index = entry.class_index
|
266 |
+
class_id = class_ids[class_index]
|
267 |
+
start_offset = entry.start_offset
|
268 |
+
end_offset = entry.end_offset
|
269 |
+
filename = entry.filename
|
270 |
+
entries_array[i] = (
|
271 |
+
class_index,
|
272 |
+
class_id,
|
273 |
+
start_offset,
|
274 |
+
end_offset,
|
275 |
+
filename,
|
276 |
+
)
|
277 |
+
|
278 |
+
entries_path = self._get_entries_path(*args, **kwargs)
|
279 |
+
self._save_extra(entries_array, entries_path)
|
280 |
+
|
281 |
+
def _dump_class_ids(self, *args, **kwargs) -> None:
|
282 |
+
entries_path = self._get_entries_path(*args, **kwargs)
|
283 |
+
entries_array = self._load_extra(entries_path)
|
284 |
+
|
285 |
+
max_class_id_length, max_class_index = -1, -1
|
286 |
+
for entry in entries_array:
|
287 |
+
class_index, class_id = entry["class_index"], entry["class_id"]
|
288 |
+
max_class_index = max(int(class_index), max_class_index)
|
289 |
+
max_class_id_length = max(len(str(class_id)), max_class_id_length)
|
290 |
+
|
291 |
+
class_ids_array = np.empty(max_class_index + 1, dtype=f"U{max_class_id_length}")
|
292 |
+
for entry in entries_array:
|
293 |
+
class_index, class_id = entry["class_index"], entry["class_id"]
|
294 |
+
class_ids_array[class_index] = class_id
|
295 |
+
class_ids_path = self._get_class_ids_path(*args, **kwargs)
|
296 |
+
self._save_extra(class_ids_array, class_ids_path)
|
297 |
+
|
298 |
+
def _dump_extra(self, *args, **kwargs) -> None:
|
299 |
+
self._dump_entries(*args, *kwargs)
|
300 |
+
self._dump_class_ids(*args, *kwargs)
|
301 |
+
|
302 |
+
def dump_extra(self, root: Optional[str] = None) -> None:
|
303 |
+
return self._dump_extra(root)
|
torchhub/facebookresearch_dinov2_main/dinov2/data/loaders.py
ADDED
@@ -0,0 +1,223 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
2 |
+
# All rights reserved.
|
3 |
+
#
|
4 |
+
# This source code is licensed under the license found in the
|
5 |
+
# LICENSE file in the root directory of this source tree.
|
6 |
+
|
7 |
+
import logging
|
8 |
+
from enum import Enum
|
9 |
+
from typing import Any, Callable, List, Optional, TypeVar
|
10 |
+
|
11 |
+
import torch
|
12 |
+
from torch.utils.data import Sampler
|
13 |
+
|
14 |
+
from .datasets import ImageNet, ImageNet22k
|
15 |
+
from .samplers import EpochSampler, InfiniteSampler, ShardedInfiniteSampler
|
16 |
+
|
17 |
+
|
18 |
+
logger = logging.getLogger("dinov2")
|
19 |
+
|
20 |
+
|
21 |
+
class SamplerType(Enum):
|
22 |
+
DISTRIBUTED = 0
|
23 |
+
EPOCH = 1
|
24 |
+
INFINITE = 2
|
25 |
+
SHARDED_INFINITE = 3
|
26 |
+
SHARDED_INFINITE_NEW = 4
|
27 |
+
|
28 |
+
|
29 |
+
def _make_bool_str(b: bool) -> str:
|
30 |
+
return "yes" if b else "no"
|
31 |
+
|
32 |
+
|
33 |
+
def _make_sample_transform(image_transform: Optional[Callable] = None, target_transform: Optional[Callable] = None):
|
34 |
+
def transform(sample):
|
35 |
+
image, target = sample
|
36 |
+
if image_transform is not None:
|
37 |
+
image = image_transform(image)
|
38 |
+
if target_transform is not None:
|
39 |
+
target = target_transform(target)
|
40 |
+
return image, target
|
41 |
+
|
42 |
+
return transform
|
43 |
+
|
44 |
+
|
45 |
+
def _parse_dataset_str(dataset_str: str):
|
46 |
+
tokens = dataset_str.split(":")
|
47 |
+
|
48 |
+
name = tokens[0]
|
49 |
+
kwargs = {}
|
50 |
+
|
51 |
+
for token in tokens[1:]:
|
52 |
+
key, value = token.split("=")
|
53 |
+
assert key in ("root", "extra", "split")
|
54 |
+
kwargs[key] = value
|
55 |
+
|
56 |
+
if name == "ImageNet":
|
57 |
+
class_ = ImageNet
|
58 |
+
if "split" in kwargs:
|
59 |
+
kwargs["split"] = ImageNet.Split[kwargs["split"]]
|
60 |
+
elif name == "ImageNet22k":
|
61 |
+
class_ = ImageNet22k
|
62 |
+
else:
|
63 |
+
raise ValueError(f'Unsupported dataset "{name}"')
|
64 |
+
|
65 |
+
return class_, kwargs
|
66 |
+
|
67 |
+
|
68 |
+
def make_dataset(
|
69 |
+
*,
|
70 |
+
dataset_str: str,
|
71 |
+
transform: Optional[Callable] = None,
|
72 |
+
target_transform: Optional[Callable] = None,
|
73 |
+
):
|
74 |
+
"""
|
75 |
+
Creates a dataset with the specified parameters.
|
76 |
+
|
77 |
+
Args:
|
78 |
+
dataset_str: A dataset string description (e.g. ImageNet:split=TRAIN).
|
79 |
+
transform: A transform to apply to images.
|
80 |
+
target_transform: A transform to apply to targets.
|
81 |
+
|
82 |
+
Returns:
|
83 |
+
The created dataset.
|
84 |
+
"""
|
85 |
+
logger.info(f'using dataset: "{dataset_str}"')
|
86 |
+
|
87 |
+
class_, kwargs = _parse_dataset_str(dataset_str)
|
88 |
+
dataset = class_(transform=transform, target_transform=target_transform, **kwargs)
|
89 |
+
|
90 |
+
logger.info(f"# of dataset samples: {len(dataset):,d}")
|
91 |
+
|
92 |
+
# Aggregated datasets do not expose (yet) these attributes, so add them.
|
93 |
+
if not hasattr(dataset, "transform"):
|
94 |
+
setattr(dataset, "transform", transform)
|
95 |
+
if not hasattr(dataset, "target_transform"):
|
96 |
+
setattr(dataset, "target_transform", target_transform)
|
97 |
+
|
98 |
+
return dataset
|
99 |
+
|
100 |
+
|
101 |
+
def _make_sampler(
|
102 |
+
*,
|
103 |
+
dataset,
|
104 |
+
type: Optional[SamplerType] = None,
|
105 |
+
shuffle: bool = False,
|
106 |
+
seed: int = 0,
|
107 |
+
size: int = -1,
|
108 |
+
advance: int = 0,
|
109 |
+
) -> Optional[Sampler]:
|
110 |
+
sample_count = len(dataset)
|
111 |
+
|
112 |
+
if type == SamplerType.INFINITE:
|
113 |
+
logger.info("sampler: infinite")
|
114 |
+
if size > 0:
|
115 |
+
raise ValueError("sampler size > 0 is invalid")
|
116 |
+
return InfiniteSampler(
|
117 |
+
sample_count=sample_count,
|
118 |
+
shuffle=shuffle,
|
119 |
+
seed=seed,
|
120 |
+
advance=advance,
|
121 |
+
)
|
122 |
+
elif type in (SamplerType.SHARDED_INFINITE, SamplerType.SHARDED_INFINITE_NEW):
|
123 |
+
logger.info("sampler: sharded infinite")
|
124 |
+
if size > 0:
|
125 |
+
raise ValueError("sampler size > 0 is invalid")
|
126 |
+
# TODO: Remove support for old shuffling
|
127 |
+
use_new_shuffle_tensor_slice = type == SamplerType.SHARDED_INFINITE_NEW
|
128 |
+
return ShardedInfiniteSampler(
|
129 |
+
sample_count=sample_count,
|
130 |
+
shuffle=shuffle,
|
131 |
+
seed=seed,
|
132 |
+
advance=advance,
|
133 |
+
use_new_shuffle_tensor_slice=use_new_shuffle_tensor_slice,
|
134 |
+
)
|
135 |
+
elif type == SamplerType.EPOCH:
|
136 |
+
logger.info("sampler: epoch")
|
137 |
+
if advance > 0:
|
138 |
+
raise NotImplementedError("sampler advance > 0 is not supported")
|
139 |
+
size = size if size > 0 else sample_count
|
140 |
+
logger.info(f"# of samples / epoch: {size:,d}")
|
141 |
+
return EpochSampler(
|
142 |
+
size=size,
|
143 |
+
sample_count=sample_count,
|
144 |
+
shuffle=shuffle,
|
145 |
+
seed=seed,
|
146 |
+
)
|
147 |
+
elif type == SamplerType.DISTRIBUTED:
|
148 |
+
logger.info("sampler: distributed")
|
149 |
+
if size > 0:
|
150 |
+
raise ValueError("sampler size > 0 is invalid")
|
151 |
+
if advance > 0:
|
152 |
+
raise ValueError("sampler advance > 0 is invalid")
|
153 |
+
return torch.utils.data.DistributedSampler(
|
154 |
+
dataset=dataset,
|
155 |
+
shuffle=shuffle,
|
156 |
+
seed=seed,
|
157 |
+
drop_last=False,
|
158 |
+
)
|
159 |
+
|
160 |
+
logger.info("sampler: none")
|
161 |
+
return None
|
162 |
+
|
163 |
+
|
164 |
+
T = TypeVar("T")
|
165 |
+
|
166 |
+
|
167 |
+
def make_data_loader(
|
168 |
+
*,
|
169 |
+
dataset,
|
170 |
+
batch_size: int,
|
171 |
+
num_workers: int,
|
172 |
+
shuffle: bool = True,
|
173 |
+
seed: int = 0,
|
174 |
+
sampler_type: Optional[SamplerType] = SamplerType.INFINITE,
|
175 |
+
sampler_size: int = -1,
|
176 |
+
sampler_advance: int = 0,
|
177 |
+
drop_last: bool = True,
|
178 |
+
persistent_workers: bool = False,
|
179 |
+
collate_fn: Optional[Callable[[List[T]], Any]] = None,
|
180 |
+
):
|
181 |
+
"""
|
182 |
+
Creates a data loader with the specified parameters.
|
183 |
+
|
184 |
+
Args:
|
185 |
+
dataset: A dataset (third party, LaViDa or WebDataset).
|
186 |
+
batch_size: The size of batches to generate.
|
187 |
+
num_workers: The number of workers to use.
|
188 |
+
shuffle: Whether to shuffle samples.
|
189 |
+
seed: The random seed to use.
|
190 |
+
sampler_type: Which sampler to use: EPOCH, INFINITE, SHARDED_INFINITE, SHARDED_INFINITE_NEW, DISTRIBUTED or None.
|
191 |
+
sampler_size: The number of images per epoch (when applicable) or -1 for the entire dataset.
|
192 |
+
sampler_advance: How many samples to skip (when applicable).
|
193 |
+
drop_last: Whether the last non-full batch of data should be dropped.
|
194 |
+
persistent_workers: maintain the workers Dataset instances alive after a dataset has been consumed once.
|
195 |
+
collate_fn: Function that performs batch collation
|
196 |
+
"""
|
197 |
+
|
198 |
+
sampler = _make_sampler(
|
199 |
+
dataset=dataset,
|
200 |
+
type=sampler_type,
|
201 |
+
shuffle=shuffle,
|
202 |
+
seed=seed,
|
203 |
+
size=sampler_size,
|
204 |
+
advance=sampler_advance,
|
205 |
+
)
|
206 |
+
|
207 |
+
logger.info("using PyTorch data loader")
|
208 |
+
data_loader = torch.utils.data.DataLoader(
|
209 |
+
dataset,
|
210 |
+
sampler=sampler,
|
211 |
+
batch_size=batch_size,
|
212 |
+
num_workers=num_workers,
|
213 |
+
pin_memory=True,
|
214 |
+
drop_last=drop_last,
|
215 |
+
persistent_workers=persistent_workers,
|
216 |
+
collate_fn=collate_fn,
|
217 |
+
)
|
218 |
+
|
219 |
+
try:
|
220 |
+
logger.info(f"# of batches: {len(data_loader):,d}")
|
221 |
+
except TypeError: # data loader has no length
|
222 |
+
logger.info("infinite data loader")
|
223 |
+
return data_loader
|
torchhub/facebookresearch_dinov2_main/dinov2/data/masking.py
ADDED
@@ -0,0 +1,87 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
2 |
+
# All rights reserved.
|
3 |
+
#
|
4 |
+
# This source code is licensed under the license found in the
|
5 |
+
# LICENSE file in the root directory of this source tree.
|
6 |
+
|
7 |
+
import random
|
8 |
+
import math
|
9 |
+
import numpy as np
|
10 |
+
|
11 |
+
|
12 |
+
class MaskingGenerator:
|
13 |
+
def __init__(
|
14 |
+
self,
|
15 |
+
input_size,
|
16 |
+
num_masking_patches=None,
|
17 |
+
min_num_patches=4,
|
18 |
+
max_num_patches=None,
|
19 |
+
min_aspect=0.3,
|
20 |
+
max_aspect=None,
|
21 |
+
):
|
22 |
+
if not isinstance(input_size, tuple):
|
23 |
+
input_size = (input_size,) * 2
|
24 |
+
self.height, self.width = input_size
|
25 |
+
|
26 |
+
self.num_patches = self.height * self.width
|
27 |
+
self.num_masking_patches = num_masking_patches
|
28 |
+
|
29 |
+
self.min_num_patches = min_num_patches
|
30 |
+
self.max_num_patches = num_masking_patches if max_num_patches is None else max_num_patches
|
31 |
+
|
32 |
+
max_aspect = max_aspect or 1 / min_aspect
|
33 |
+
self.log_aspect_ratio = (math.log(min_aspect), math.log(max_aspect))
|
34 |
+
|
35 |
+
def __repr__(self):
|
36 |
+
repr_str = "Generator(%d, %d -> [%d ~ %d], max = %d, %.3f ~ %.3f)" % (
|
37 |
+
self.height,
|
38 |
+
self.width,
|
39 |
+
self.min_num_patches,
|
40 |
+
self.max_num_patches,
|
41 |
+
self.num_masking_patches,
|
42 |
+
self.log_aspect_ratio[0],
|
43 |
+
self.log_aspect_ratio[1],
|
44 |
+
)
|
45 |
+
return repr_str
|
46 |
+
|
47 |
+
def get_shape(self):
|
48 |
+
return self.height, self.width
|
49 |
+
|
50 |
+
def _mask(self, mask, max_mask_patches):
|
51 |
+
delta = 0
|
52 |
+
for _ in range(10):
|
53 |
+
target_area = random.uniform(self.min_num_patches, max_mask_patches)
|
54 |
+
aspect_ratio = math.exp(random.uniform(*self.log_aspect_ratio))
|
55 |
+
h = int(round(math.sqrt(target_area * aspect_ratio)))
|
56 |
+
w = int(round(math.sqrt(target_area / aspect_ratio)))
|
57 |
+
if w < self.width and h < self.height:
|
58 |
+
top = random.randint(0, self.height - h)
|
59 |
+
left = random.randint(0, self.width - w)
|
60 |
+
|
61 |
+
num_masked = mask[top : top + h, left : left + w].sum()
|
62 |
+
# Overlap
|
63 |
+
if 0 < h * w - num_masked <= max_mask_patches:
|
64 |
+
for i in range(top, top + h):
|
65 |
+
for j in range(left, left + w):
|
66 |
+
if mask[i, j] == 0:
|
67 |
+
mask[i, j] = 1
|
68 |
+
delta += 1
|
69 |
+
|
70 |
+
if delta > 0:
|
71 |
+
break
|
72 |
+
return delta
|
73 |
+
|
74 |
+
def __call__(self, num_masking_patches=0):
|
75 |
+
mask = np.zeros(shape=self.get_shape(), dtype=bool)
|
76 |
+
mask_count = 0
|
77 |
+
while mask_count < num_masking_patches:
|
78 |
+
max_mask_patches = num_masking_patches - mask_count
|
79 |
+
max_mask_patches = min(max_mask_patches, self.max_num_patches)
|
80 |
+
|
81 |
+
delta = self._mask(mask, max_mask_patches)
|
82 |
+
if delta == 0:
|
83 |
+
break
|
84 |
+
else:
|
85 |
+
mask_count += delta
|
86 |
+
|
87 |
+
return mask
|
torchhub/facebookresearch_dinov2_main/dinov2/data/samplers.py
ADDED
@@ -0,0 +1,230 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
2 |
+
# All rights reserved.
|
3 |
+
#
|
4 |
+
# This source code is licensed under the license found in the
|
5 |
+
# LICENSE file in the root directory of this source tree.
|
6 |
+
|
7 |
+
import itertools
|
8 |
+
from typing import Any, Optional
|
9 |
+
import warnings
|
10 |
+
|
11 |
+
import numpy as np
|
12 |
+
import torch
|
13 |
+
from torch.utils.data.sampler import Sampler
|
14 |
+
|
15 |
+
import dinov2.distributed as distributed
|
16 |
+
|
17 |
+
|
18 |
+
class EpochSampler(Sampler):
|
19 |
+
def __init__(
|
20 |
+
self,
|
21 |
+
*,
|
22 |
+
size: int,
|
23 |
+
sample_count: int,
|
24 |
+
shuffle: bool = False,
|
25 |
+
seed: int = 0,
|
26 |
+
start: Optional[int] = None,
|
27 |
+
step: Optional[int] = None,
|
28 |
+
):
|
29 |
+
self._size = size
|
30 |
+
self._sample_count = sample_count
|
31 |
+
self._shuffle = shuffle
|
32 |
+
self._seed = seed
|
33 |
+
self._start = distributed.get_global_rank() if start is None else start
|
34 |
+
self._step = distributed.get_global_size() if step is None else step
|
35 |
+
self._epoch = 0
|
36 |
+
|
37 |
+
def __iter__(self):
|
38 |
+
count = (self._size + self._sample_count - 1) // self._sample_count
|
39 |
+
tiled_indices = np.tile(np.arange(self._sample_count), count)
|
40 |
+
if self._shuffle:
|
41 |
+
seed = self._seed * self._epoch if self._seed != 0 else self._epoch
|
42 |
+
rng = np.random.default_rng(seed)
|
43 |
+
iterable = rng.choice(tiled_indices, self._size, replace=False)
|
44 |
+
else:
|
45 |
+
iterable = tiled_indices[: self._size]
|
46 |
+
|
47 |
+
yield from itertools.islice(iterable, self._start, None, self._step)
|
48 |
+
|
49 |
+
def __len__(self):
|
50 |
+
return (self._size - self._start + self._step - 1) // self._step
|
51 |
+
|
52 |
+
def set_epoch(self, epoch):
|
53 |
+
self._epoch = epoch
|
54 |
+
|
55 |
+
|
56 |
+
def _get_numpy_dtype(size: int) -> Any:
|
57 |
+
return np.int32 if size <= 2**31 else np.int64
|
58 |
+
|
59 |
+
|
60 |
+
def _get_torch_dtype(size: int) -> Any:
|
61 |
+
return torch.int32 if size <= 2**31 else torch.int64
|
62 |
+
|
63 |
+
|
64 |
+
def _generate_randperm_indices(*, size: int, generator: torch.Generator):
|
65 |
+
"""Generate the indices of a random permutation."""
|
66 |
+
dtype = _get_torch_dtype(size)
|
67 |
+
# This is actually matching PyTorch's CPU implementation, see: https://github.com/pytorch/pytorch/blob/master/aten/src/ATen/native/TensorFactories.cpp#L900-L921
|
68 |
+
perm = torch.arange(size, dtype=dtype)
|
69 |
+
for i in range(size):
|
70 |
+
j = torch.randint(i, size, size=(1,), generator=generator).item()
|
71 |
+
|
72 |
+
# Always swap even if no-op
|
73 |
+
value = perm[j].item()
|
74 |
+
perm[j] = perm[i].item()
|
75 |
+
perm[i] = value
|
76 |
+
yield value
|
77 |
+
|
78 |
+
|
79 |
+
class InfiniteSampler(Sampler):
|
80 |
+
def __init__(
|
81 |
+
self,
|
82 |
+
*,
|
83 |
+
sample_count: int,
|
84 |
+
shuffle: bool = False,
|
85 |
+
seed: int = 0,
|
86 |
+
start: Optional[int] = None,
|
87 |
+
step: Optional[int] = None,
|
88 |
+
advance: int = 0,
|
89 |
+
):
|
90 |
+
self._sample_count = sample_count
|
91 |
+
self._seed = seed
|
92 |
+
self._shuffle = shuffle
|
93 |
+
self._start = distributed.get_global_rank() if start is None else start
|
94 |
+
self._step = distributed.get_global_size() if step is None else step
|
95 |
+
self._advance = advance
|
96 |
+
|
97 |
+
def __iter__(self):
|
98 |
+
if self._shuffle:
|
99 |
+
iterator = self._shuffled_iterator()
|
100 |
+
else:
|
101 |
+
iterator = self._iterator()
|
102 |
+
|
103 |
+
yield from itertools.islice(iterator, self._advance, None)
|
104 |
+
|
105 |
+
def _iterator(self):
|
106 |
+
assert not self._shuffle
|
107 |
+
|
108 |
+
while True:
|
109 |
+
iterable = range(self._sample_count)
|
110 |
+
yield from itertools.islice(iterable, self._start, None, self._step)
|
111 |
+
|
112 |
+
def _shuffled_iterator(self):
|
113 |
+
assert self._shuffle
|
114 |
+
|
115 |
+
# Instantiate a generator here (rather than in the ctor) to keep the class
|
116 |
+
# picklable (requirement of mp.spawn)
|
117 |
+
generator = torch.Generator().manual_seed(self._seed)
|
118 |
+
|
119 |
+
while True:
|
120 |
+
iterable = _generate_randperm_indices(size=self._sample_count, generator=generator)
|
121 |
+
yield from itertools.islice(iterable, self._start, None, self._step)
|
122 |
+
|
123 |
+
|
124 |
+
# The following function is somewhat equivalent to _new_shuffle_tensor_slice below,
|
125 |
+
# but avoids a full in-place random permutation generation.
|
126 |
+
def _shuffle_tensor_slice(
|
127 |
+
*, tensor: torch.Tensor, start: int = 0, step: int = 1, generator: torch.Generator
|
128 |
+
) -> np.ndarray:
|
129 |
+
stop = len(tensor)
|
130 |
+
count = stop // step
|
131 |
+
drop_count = stop - step * count
|
132 |
+
if drop_count:
|
133 |
+
warnings.warn(f"# of dropped samples: {drop_count}")
|
134 |
+
|
135 |
+
dtype = _get_numpy_dtype(stop)
|
136 |
+
result = np.empty(count, dtype=dtype)
|
137 |
+
|
138 |
+
for i in range(count):
|
139 |
+
j = torch.randint(0, i + 1, size=(1,), generator=generator).item() if i > 0 else 0
|
140 |
+
|
141 |
+
result[i] = result[j]
|
142 |
+
result[j] = tensor[start + i * step].item()
|
143 |
+
|
144 |
+
return result
|
145 |
+
|
146 |
+
|
147 |
+
def _new_shuffle_tensor_slice(
|
148 |
+
*, tensor: torch.Tensor, start: int = 0, step: int = 1, generator: torch.Generator
|
149 |
+
) -> np.ndarray:
|
150 |
+
stop = len(tensor)
|
151 |
+
count = stop // step
|
152 |
+
dtype = torch.int64 # Needed for using randperm result as indices
|
153 |
+
count = stop // step
|
154 |
+
drop_count = stop - step * count
|
155 |
+
if drop_count:
|
156 |
+
warnings.warn(f"# of dropped samples: {drop_count}")
|
157 |
+
indices = torch.randperm(count, dtype=dtype, generator=generator)
|
158 |
+
return tensor[start::step][indices].numpy()
|
159 |
+
|
160 |
+
|
161 |
+
def _make_seed(seed: int, start: int, iter_count: int) -> int:
|
162 |
+
# NOTE: Tried a few variants (including iter_count << 32), this one worked best.
|
163 |
+
return seed + start + (iter_count << 24)
|
164 |
+
|
165 |
+
|
166 |
+
class ShardedInfiniteSampler(Sampler):
|
167 |
+
def __init__(
|
168 |
+
self,
|
169 |
+
*,
|
170 |
+
sample_count: int,
|
171 |
+
shuffle: bool = False,
|
172 |
+
seed: int = 0,
|
173 |
+
start: Optional[int] = None,
|
174 |
+
step: Optional[int] = None,
|
175 |
+
advance: int = 0,
|
176 |
+
use_new_shuffle_tensor_slice: bool = False,
|
177 |
+
):
|
178 |
+
self._sample_count = sample_count
|
179 |
+
self._seed = seed
|
180 |
+
self._shuffle = shuffle
|
181 |
+
self._start = distributed.get_global_rank() if start is None else start
|
182 |
+
self._step = distributed.get_global_size() if step is None else step
|
183 |
+
self._advance = advance
|
184 |
+
self._iter_count = 0
|
185 |
+
self._shuffle_tensor_slice_fn = (
|
186 |
+
_new_shuffle_tensor_slice if use_new_shuffle_tensor_slice else _shuffle_tensor_slice
|
187 |
+
)
|
188 |
+
|
189 |
+
def __iter__(self):
|
190 |
+
iter_count = self._advance // self._sample_count
|
191 |
+
if iter_count > 0:
|
192 |
+
self._advance -= iter_count * self._sample_count
|
193 |
+
self._iter_count += iter_count
|
194 |
+
|
195 |
+
if self._shuffle:
|
196 |
+
iterator = self._shuffled_iterator()
|
197 |
+
else:
|
198 |
+
iterator = self._iterator()
|
199 |
+
|
200 |
+
yield from itertools.islice(iterator, self._advance, None)
|
201 |
+
|
202 |
+
def _iterator(self):
|
203 |
+
assert not self._shuffle
|
204 |
+
|
205 |
+
while True:
|
206 |
+
iterable = range(self._sample_count)
|
207 |
+
yield from itertools.islice(iterable, self._start, None, self._step)
|
208 |
+
|
209 |
+
def _shuffled_iterator(self):
|
210 |
+
assert self._shuffle
|
211 |
+
|
212 |
+
# Instantiate a generator here (rather than in the ctor) to be keep the class
|
213 |
+
# picklable (requirement of mp.spawn)
|
214 |
+
generator = torch.Generator()
|
215 |
+
|
216 |
+
# Always shuffle everything first
|
217 |
+
generator.manual_seed(self._seed)
|
218 |
+
dtype = _get_torch_dtype(self._sample_count)
|
219 |
+
perm = torch.randperm(self._sample_count, dtype=dtype, generator=generator)
|
220 |
+
|
221 |
+
while True:
|
222 |
+
# Re-seed on each iteration to allow skipping whole permutations
|
223 |
+
seed = _make_seed(self._seed, self._start, self._iter_count)
|
224 |
+
generator.manual_seed(seed)
|
225 |
+
|
226 |
+
iterable = self._shuffle_tensor_slice_fn(
|
227 |
+
tensor=perm, start=self._start, step=self._step, generator=generator
|
228 |
+
)
|
229 |
+
yield from iterable
|
230 |
+
self._iter_count += 1
|
torchhub/facebookresearch_dinov2_main/dinov2/data/transforms.py
ADDED
@@ -0,0 +1,92 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
2 |
+
# All rights reserved.
|
3 |
+
#
|
4 |
+
# This source code is licensed under the license found in the
|
5 |
+
# LICENSE file in the root directory of this source tree.
|
6 |
+
|
7 |
+
from typing import Sequence
|
8 |
+
|
9 |
+
import torch
|
10 |
+
from torchvision import transforms
|
11 |
+
|
12 |
+
|
13 |
+
class GaussianBlur(transforms.RandomApply):
|
14 |
+
"""
|
15 |
+
Apply Gaussian Blur to the PIL image.
|
16 |
+
"""
|
17 |
+
|
18 |
+
def __init__(self, *, p: float = 0.5, radius_min: float = 0.1, radius_max: float = 2.0):
|
19 |
+
# NOTE: torchvision is applying 1 - probability to return the original image
|
20 |
+
keep_p = 1 - p
|
21 |
+
transform = transforms.GaussianBlur(kernel_size=9, sigma=(radius_min, radius_max))
|
22 |
+
super().__init__(transforms=[transform], p=keep_p)
|
23 |
+
|
24 |
+
|
25 |
+
class MaybeToTensor(transforms.ToTensor):
|
26 |
+
"""
|
27 |
+
Convert a ``PIL Image`` or ``numpy.ndarray`` to tensor, or keep as is if already a tensor.
|
28 |
+
"""
|
29 |
+
|
30 |
+
def __call__(self, pic):
|
31 |
+
"""
|
32 |
+
Args:
|
33 |
+
pic (PIL Image, numpy.ndarray or torch.tensor): Image to be converted to tensor.
|
34 |
+
Returns:
|
35 |
+
Tensor: Converted image.
|
36 |
+
"""
|
37 |
+
if isinstance(pic, torch.Tensor):
|
38 |
+
return pic
|
39 |
+
return super().__call__(pic)
|
40 |
+
|
41 |
+
|
42 |
+
# Use timm's names
|
43 |
+
IMAGENET_DEFAULT_MEAN = (0.485, 0.456, 0.406)
|
44 |
+
IMAGENET_DEFAULT_STD = (0.229, 0.224, 0.225)
|
45 |
+
|
46 |
+
|
47 |
+
def make_normalize_transform(
|
48 |
+
mean: Sequence[float] = IMAGENET_DEFAULT_MEAN,
|
49 |
+
std: Sequence[float] = IMAGENET_DEFAULT_STD,
|
50 |
+
) -> transforms.Normalize:
|
51 |
+
return transforms.Normalize(mean=mean, std=std)
|
52 |
+
|
53 |
+
|
54 |
+
# This roughly matches torchvision's preset for classification training:
|
55 |
+
# https://github.com/pytorch/vision/blob/main/references/classification/presets.py#L6-L44
|
56 |
+
def make_classification_train_transform(
|
57 |
+
*,
|
58 |
+
crop_size: int = 224,
|
59 |
+
interpolation=transforms.InterpolationMode.BICUBIC,
|
60 |
+
hflip_prob: float = 0.5,
|
61 |
+
mean: Sequence[float] = IMAGENET_DEFAULT_MEAN,
|
62 |
+
std: Sequence[float] = IMAGENET_DEFAULT_STD,
|
63 |
+
):
|
64 |
+
transforms_list = [transforms.RandomResizedCrop(crop_size, interpolation=interpolation)]
|
65 |
+
if hflip_prob > 0.0:
|
66 |
+
transforms_list.append(transforms.RandomHorizontalFlip(hflip_prob))
|
67 |
+
transforms_list.extend(
|
68 |
+
[
|
69 |
+
MaybeToTensor(),
|
70 |
+
make_normalize_transform(mean=mean, std=std),
|
71 |
+
]
|
72 |
+
)
|
73 |
+
return transforms.Compose(transforms_list)
|
74 |
+
|
75 |
+
|
76 |
+
# This matches (roughly) torchvision's preset for classification evaluation:
|
77 |
+
# https://github.com/pytorch/vision/blob/main/references/classification/presets.py#L47-L69
|
78 |
+
def make_classification_eval_transform(
|
79 |
+
*,
|
80 |
+
resize_size: int = 256,
|
81 |
+
interpolation=transforms.InterpolationMode.BICUBIC,
|
82 |
+
crop_size: int = 224,
|
83 |
+
mean: Sequence[float] = IMAGENET_DEFAULT_MEAN,
|
84 |
+
std: Sequence[float] = IMAGENET_DEFAULT_STD,
|
85 |
+
) -> transforms.Compose:
|
86 |
+
transforms_list = [
|
87 |
+
transforms.Resize(resize_size, interpolation=interpolation),
|
88 |
+
transforms.CenterCrop(crop_size),
|
89 |
+
MaybeToTensor(),
|
90 |
+
make_normalize_transform(mean=mean, std=std),
|
91 |
+
]
|
92 |
+
return transforms.Compose(transforms_list)
|
torchhub/facebookresearch_dinov2_main/dinov2/distributed/__init__.py
ADDED
@@ -0,0 +1,271 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
2 |
+
# All rights reserved.
|
3 |
+
#
|
4 |
+
# This source code is licensed under the license found in the
|
5 |
+
# LICENSE file in the root directory of this source tree.
|
6 |
+
|
7 |
+
import os
|
8 |
+
import random
|
9 |
+
import re
|
10 |
+
import socket
|
11 |
+
from typing import Dict, List
|
12 |
+
|
13 |
+
import torch
|
14 |
+
import torch.distributed as dist
|
15 |
+
|
16 |
+
_LOCAL_RANK = -1
|
17 |
+
_LOCAL_WORLD_SIZE = -1
|
18 |
+
|
19 |
+
|
20 |
+
def is_enabled() -> bool:
|
21 |
+
"""
|
22 |
+
Returns:
|
23 |
+
True if distributed training is enabled
|
24 |
+
"""
|
25 |
+
return dist.is_available() and dist.is_initialized()
|
26 |
+
|
27 |
+
|
28 |
+
def get_global_size() -> int:
|
29 |
+
"""
|
30 |
+
Returns:
|
31 |
+
The number of processes in the process group
|
32 |
+
"""
|
33 |
+
return dist.get_world_size() if is_enabled() else 1
|
34 |
+
|
35 |
+
|
36 |
+
def get_global_rank() -> int:
|
37 |
+
"""
|
38 |
+
Returns:
|
39 |
+
The rank of the current process within the global process group.
|
40 |
+
"""
|
41 |
+
return dist.get_rank() if is_enabled() else 0
|
42 |
+
|
43 |
+
|
44 |
+
def get_local_rank() -> int:
|
45 |
+
"""
|
46 |
+
Returns:
|
47 |
+
The rank of the current process within the local (per-machine) process group.
|
48 |
+
"""
|
49 |
+
if not is_enabled():
|
50 |
+
return 0
|
51 |
+
assert 0 <= _LOCAL_RANK < _LOCAL_WORLD_SIZE
|
52 |
+
return _LOCAL_RANK
|
53 |
+
|
54 |
+
|
55 |
+
def get_local_size() -> int:
|
56 |
+
"""
|
57 |
+
Returns:
|
58 |
+
The size of the per-machine process group,
|
59 |
+
i.e. the number of processes per machine.
|
60 |
+
"""
|
61 |
+
if not is_enabled():
|
62 |
+
return 1
|
63 |
+
assert 0 <= _LOCAL_RANK < _LOCAL_WORLD_SIZE
|
64 |
+
return _LOCAL_WORLD_SIZE
|
65 |
+
|
66 |
+
|
67 |
+
def is_main_process() -> bool:
|
68 |
+
"""
|
69 |
+
Returns:
|
70 |
+
True if the current process is the main one.
|
71 |
+
"""
|
72 |
+
return get_global_rank() == 0
|
73 |
+
|
74 |
+
|
75 |
+
def _restrict_print_to_main_process() -> None:
|
76 |
+
"""
|
77 |
+
This function disables printing when not in the main process
|
78 |
+
"""
|
79 |
+
import builtins as __builtin__
|
80 |
+
|
81 |
+
builtin_print = __builtin__.print
|
82 |
+
|
83 |
+
def print(*args, **kwargs):
|
84 |
+
force = kwargs.pop("force", False)
|
85 |
+
if is_main_process() or force:
|
86 |
+
builtin_print(*args, **kwargs)
|
87 |
+
|
88 |
+
__builtin__.print = print
|
89 |
+
|
90 |
+
|
91 |
+
def _get_master_port(seed: int = 0) -> int:
|
92 |
+
MIN_MASTER_PORT, MAX_MASTER_PORT = (20_000, 60_000)
|
93 |
+
|
94 |
+
master_port_str = os.environ.get("MASTER_PORT")
|
95 |
+
if master_port_str is None:
|
96 |
+
rng = random.Random(seed)
|
97 |
+
return rng.randint(MIN_MASTER_PORT, MAX_MASTER_PORT)
|
98 |
+
|
99 |
+
return int(master_port_str)
|
100 |
+
|
101 |
+
|
102 |
+
def _get_available_port() -> int:
|
103 |
+
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
|
104 |
+
# A "" host address means INADDR_ANY i.e. binding to all interfaces.
|
105 |
+
# Note this is not compatible with IPv6.
|
106 |
+
s.bind(("", 0))
|
107 |
+
port = s.getsockname()[1]
|
108 |
+
return port
|
109 |
+
|
110 |
+
|
111 |
+
_TORCH_DISTRIBUTED_ENV_VARS = (
|
112 |
+
"MASTER_ADDR",
|
113 |
+
"MASTER_PORT",
|
114 |
+
"RANK",
|
115 |
+
"WORLD_SIZE",
|
116 |
+
"LOCAL_RANK",
|
117 |
+
"LOCAL_WORLD_SIZE",
|
118 |
+
)
|
119 |
+
|
120 |
+
|
121 |
+
def _collect_env_vars() -> Dict[str, str]:
|
122 |
+
return {env_var: os.environ[env_var] for env_var in _TORCH_DISTRIBUTED_ENV_VARS if env_var in os.environ}
|
123 |
+
|
124 |
+
|
125 |
+
def _is_slurm_job_process() -> bool:
|
126 |
+
return "SLURM_JOB_ID" in os.environ
|
127 |
+
|
128 |
+
|
129 |
+
def _parse_slurm_node_list(s: str) -> List[str]:
|
130 |
+
nodes = []
|
131 |
+
# Extract "hostname", "hostname[1-2,3,4-5]," substrings
|
132 |
+
p = re.compile(r"(([^\[]+)(?:\[([^\]]+)\])?),?")
|
133 |
+
for m in p.finditer(s):
|
134 |
+
prefix, suffixes = s[m.start(2) : m.end(2)], s[m.start(3) : m.end(3)]
|
135 |
+
for suffix in suffixes.split(","):
|
136 |
+
span = suffix.split("-")
|
137 |
+
if len(span) == 1:
|
138 |
+
nodes.append(prefix + suffix)
|
139 |
+
else:
|
140 |
+
width = len(span[0])
|
141 |
+
start, end = int(span[0]), int(span[1]) + 1
|
142 |
+
nodes.extend([prefix + f"{i:0{width}}" for i in range(start, end)])
|
143 |
+
return nodes
|
144 |
+
|
145 |
+
|
146 |
+
def _check_env_variable(key: str, new_value: str):
|
147 |
+
# Only check for difference with preset environment variables
|
148 |
+
if key in os.environ and os.environ[key] != new_value:
|
149 |
+
raise RuntimeError(f"Cannot export environment variables as {key} is already set")
|
150 |
+
|
151 |
+
|
152 |
+
class _TorchDistributedEnvironment:
|
153 |
+
def __init__(self):
|
154 |
+
self.master_addr = "127.0.0.1"
|
155 |
+
self.master_port = 0
|
156 |
+
self.rank = -1
|
157 |
+
self.world_size = -1
|
158 |
+
self.local_rank = -1
|
159 |
+
self.local_world_size = -1
|
160 |
+
|
161 |
+
if _is_slurm_job_process():
|
162 |
+
return self._set_from_slurm_env()
|
163 |
+
|
164 |
+
env_vars = _collect_env_vars()
|
165 |
+
if not env_vars:
|
166 |
+
# Environment is not set
|
167 |
+
pass
|
168 |
+
elif len(env_vars) == len(_TORCH_DISTRIBUTED_ENV_VARS):
|
169 |
+
# Environment is fully set
|
170 |
+
return self._set_from_preset_env()
|
171 |
+
else:
|
172 |
+
# Environment is partially set
|
173 |
+
collected_env_vars = ", ".join(env_vars.keys())
|
174 |
+
raise RuntimeError(f"Partially set environment: {collected_env_vars}")
|
175 |
+
|
176 |
+
if torch.cuda.device_count() > 0:
|
177 |
+
return self._set_from_local()
|
178 |
+
|
179 |
+
raise RuntimeError("Can't initialize PyTorch distributed environment")
|
180 |
+
|
181 |
+
# Slurm job created with sbatch, submitit, etc...
|
182 |
+
def _set_from_slurm_env(self):
|
183 |
+
# logger.info("Initialization from Slurm environment")
|
184 |
+
job_id = int(os.environ["SLURM_JOB_ID"])
|
185 |
+
node_count = int(os.environ["SLURM_JOB_NUM_NODES"])
|
186 |
+
nodes = _parse_slurm_node_list(os.environ["SLURM_JOB_NODELIST"])
|
187 |
+
assert len(nodes) == node_count
|
188 |
+
|
189 |
+
self.master_addr = nodes[0]
|
190 |
+
self.master_port = _get_master_port(seed=job_id)
|
191 |
+
self.rank = int(os.environ["SLURM_PROCID"])
|
192 |
+
self.world_size = int(os.environ["SLURM_NTASKS"])
|
193 |
+
assert self.rank < self.world_size
|
194 |
+
self.local_rank = int(os.environ["SLURM_LOCALID"])
|
195 |
+
self.local_world_size = self.world_size // node_count
|
196 |
+
assert self.local_rank < self.local_world_size
|
197 |
+
|
198 |
+
# Single node job with preset environment (i.e. torchrun)
|
199 |
+
def _set_from_preset_env(self):
|
200 |
+
# logger.info("Initialization from preset environment")
|
201 |
+
self.master_addr = os.environ["MASTER_ADDR"]
|
202 |
+
self.master_port = os.environ["MASTER_PORT"]
|
203 |
+
self.rank = int(os.environ["RANK"])
|
204 |
+
self.world_size = int(os.environ["WORLD_SIZE"])
|
205 |
+
assert self.rank < self.world_size
|
206 |
+
self.local_rank = int(os.environ["LOCAL_RANK"])
|
207 |
+
self.local_world_size = int(os.environ["LOCAL_WORLD_SIZE"])
|
208 |
+
assert self.local_rank < self.local_world_size
|
209 |
+
|
210 |
+
# Single node and GPU job (i.e. local script run)
|
211 |
+
def _set_from_local(self):
|
212 |
+
# logger.info("Initialization from local")
|
213 |
+
self.master_addr = "127.0.0.1"
|
214 |
+
self.master_port = _get_available_port()
|
215 |
+
self.rank = 0
|
216 |
+
self.world_size = 1
|
217 |
+
self.local_rank = 0
|
218 |
+
self.local_world_size = 1
|
219 |
+
|
220 |
+
def export(self, *, overwrite: bool) -> "_TorchDistributedEnvironment":
|
221 |
+
# See the "Environment variable initialization" section from
|
222 |
+
# https://pytorch.org/docs/stable/distributed.html for the complete list of
|
223 |
+
# environment variables required for the env:// initialization method.
|
224 |
+
env_vars = {
|
225 |
+
"MASTER_ADDR": self.master_addr,
|
226 |
+
"MASTER_PORT": str(self.master_port),
|
227 |
+
"RANK": str(self.rank),
|
228 |
+
"WORLD_SIZE": str(self.world_size),
|
229 |
+
"LOCAL_RANK": str(self.local_rank),
|
230 |
+
"LOCAL_WORLD_SIZE": str(self.local_world_size),
|
231 |
+
}
|
232 |
+
if not overwrite:
|
233 |
+
for k, v in env_vars.items():
|
234 |
+
_check_env_variable(k, v)
|
235 |
+
|
236 |
+
os.environ.update(env_vars)
|
237 |
+
return self
|
238 |
+
|
239 |
+
|
240 |
+
def enable(*, set_cuda_current_device: bool = True, overwrite: bool = False, allow_nccl_timeout: bool = False):
|
241 |
+
"""Enable distributed mode
|
242 |
+
|
243 |
+
Args:
|
244 |
+
set_cuda_current_device: If True, call torch.cuda.set_device() to set the
|
245 |
+
current PyTorch CUDA device to the one matching the local rank.
|
246 |
+
overwrite: If True, overwrites already set variables. Else fails.
|
247 |
+
"""
|
248 |
+
|
249 |
+
global _LOCAL_RANK, _LOCAL_WORLD_SIZE
|
250 |
+
if _LOCAL_RANK >= 0 or _LOCAL_WORLD_SIZE >= 0:
|
251 |
+
raise RuntimeError("Distributed mode has already been enabled")
|
252 |
+
torch_env = _TorchDistributedEnvironment()
|
253 |
+
torch_env.export(overwrite=overwrite)
|
254 |
+
|
255 |
+
if set_cuda_current_device:
|
256 |
+
torch.cuda.set_device(torch_env.local_rank)
|
257 |
+
|
258 |
+
if allow_nccl_timeout:
|
259 |
+
# This allows to use torch distributed timeout in a NCCL backend
|
260 |
+
key, value = "NCCL_ASYNC_ERROR_HANDLING", "1"
|
261 |
+
if not overwrite:
|
262 |
+
_check_env_variable(key, value)
|
263 |
+
os.environ[key] = value
|
264 |
+
|
265 |
+
dist.init_process_group(backend="nccl")
|
266 |
+
dist.barrier()
|
267 |
+
|
268 |
+
# Finalize setup
|
269 |
+
_LOCAL_RANK = torch_env.local_rank
|
270 |
+
_LOCAL_WORLD_SIZE = torch_env.local_world_size
|
271 |
+
_restrict_print_to_main_process()
|
torchhub/facebookresearch_dinov2_main/dinov2/eval/__init__.py
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
2 |
+
# All rights reserved.
|
3 |
+
#
|
4 |
+
# This source code is licensed under the license found in the
|
5 |
+
# LICENSE file in the root directory of this source tree.
|
torchhub/facebookresearch_dinov2_main/dinov2/eval/knn.py
ADDED
@@ -0,0 +1,405 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
2 |
+
# All rights reserved.
|
3 |
+
#
|
4 |
+
# This source code is licensed under the license found in the
|
5 |
+
# LICENSE file in the root directory of this source tree.
|
6 |
+
|
7 |
+
import argparse
|
8 |
+
from functools import partial
|
9 |
+
import json
|
10 |
+
import logging
|
11 |
+
import os
|
12 |
+
import sys
|
13 |
+
from typing import List, Optional
|
14 |
+
|
15 |
+
import torch
|
16 |
+
from torch.nn.functional import one_hot, softmax
|
17 |
+
|
18 |
+
import dinov2.distributed as distributed
|
19 |
+
from dinov2.data import SamplerType, make_data_loader, make_dataset
|
20 |
+
from dinov2.data.transforms import make_classification_eval_transform
|
21 |
+
from dinov2.eval.metrics import AccuracyAveraging, build_topk_accuracy_metric
|
22 |
+
from dinov2.eval.setup import get_args_parser as get_setup_args_parser
|
23 |
+
from dinov2.eval.setup import setup_and_build_model
|
24 |
+
from dinov2.eval.utils import ModelWithNormalize, evaluate, extract_features
|
25 |
+
|
26 |
+
|
27 |
+
logger = logging.getLogger("dinov2")
|
28 |
+
|
29 |
+
|
30 |
+
def get_args_parser(
|
31 |
+
description: Optional[str] = None,
|
32 |
+
parents: Optional[List[argparse.ArgumentParser]] = None,
|
33 |
+
add_help: bool = True,
|
34 |
+
):
|
35 |
+
parents = parents or []
|
36 |
+
setup_args_parser = get_setup_args_parser(parents=parents, add_help=False)
|
37 |
+
parents = [setup_args_parser]
|
38 |
+
parser = argparse.ArgumentParser(
|
39 |
+
description=description,
|
40 |
+
parents=parents,
|
41 |
+
add_help=add_help,
|
42 |
+
)
|
43 |
+
parser.add_argument(
|
44 |
+
"--train-dataset",
|
45 |
+
dest="train_dataset_str",
|
46 |
+
type=str,
|
47 |
+
help="Training dataset",
|
48 |
+
)
|
49 |
+
parser.add_argument(
|
50 |
+
"--val-dataset",
|
51 |
+
dest="val_dataset_str",
|
52 |
+
type=str,
|
53 |
+
help="Validation dataset",
|
54 |
+
)
|
55 |
+
parser.add_argument(
|
56 |
+
"--nb_knn",
|
57 |
+
nargs="+",
|
58 |
+
type=int,
|
59 |
+
help="Number of NN to use. 20 is usually working the best.",
|
60 |
+
)
|
61 |
+
parser.add_argument(
|
62 |
+
"--temperature",
|
63 |
+
type=float,
|
64 |
+
help="Temperature used in the voting coefficient",
|
65 |
+
)
|
66 |
+
parser.add_argument(
|
67 |
+
"--gather-on-cpu",
|
68 |
+
action="store_true",
|
69 |
+
help="Whether to gather the train features on cpu, slower"
|
70 |
+
"but useful to avoid OOM for large datasets (e.g. ImageNet22k).",
|
71 |
+
)
|
72 |
+
parser.add_argument(
|
73 |
+
"--batch-size",
|
74 |
+
type=int,
|
75 |
+
help="Batch size.",
|
76 |
+
)
|
77 |
+
parser.add_argument(
|
78 |
+
"--n-per-class-list",
|
79 |
+
nargs="+",
|
80 |
+
type=int,
|
81 |
+
help="Number to take per class",
|
82 |
+
)
|
83 |
+
parser.add_argument(
|
84 |
+
"--n-tries",
|
85 |
+
type=int,
|
86 |
+
help="Number of tries",
|
87 |
+
)
|
88 |
+
parser.set_defaults(
|
89 |
+
train_dataset_str="ImageNet:split=TRAIN",
|
90 |
+
val_dataset_str="ImageNet:split=VAL",
|
91 |
+
nb_knn=[10, 20, 100, 200],
|
92 |
+
temperature=0.07,
|
93 |
+
batch_size=256,
|
94 |
+
n_per_class_list=[-1],
|
95 |
+
n_tries=1,
|
96 |
+
)
|
97 |
+
return parser
|
98 |
+
|
99 |
+
|
100 |
+
class KnnModule(torch.nn.Module):
|
101 |
+
"""
|
102 |
+
Gets knn of test features from all processes on a chunk of the train features
|
103 |
+
|
104 |
+
Each rank gets a chunk of the train features as well as a chunk of the test features.
|
105 |
+
In `compute_neighbors`, for each rank one after the other, its chunk of test features
|
106 |
+
is sent to all devices, partial knns are computed with each chunk of train features
|
107 |
+
then collated back on the original device.
|
108 |
+
"""
|
109 |
+
|
110 |
+
def __init__(self, train_features, train_labels, nb_knn, T, device, num_classes=1000):
|
111 |
+
super().__init__()
|
112 |
+
|
113 |
+
self.global_rank = distributed.get_global_rank()
|
114 |
+
self.global_size = distributed.get_global_size()
|
115 |
+
|
116 |
+
self.device = device
|
117 |
+
self.train_features_rank_T = train_features.chunk(self.global_size)[self.global_rank].T.to(self.device)
|
118 |
+
self.candidates = train_labels.chunk(self.global_size)[self.global_rank].view(1, -1).to(self.device)
|
119 |
+
|
120 |
+
self.nb_knn = nb_knn
|
121 |
+
self.max_k = max(self.nb_knn)
|
122 |
+
self.T = T
|
123 |
+
self.num_classes = num_classes
|
124 |
+
|
125 |
+
def _get_knn_sims_and_labels(self, similarity, train_labels):
|
126 |
+
topk_sims, indices = similarity.topk(self.max_k, largest=True, sorted=True)
|
127 |
+
neighbors_labels = torch.gather(train_labels, 1, indices)
|
128 |
+
return topk_sims, neighbors_labels
|
129 |
+
|
130 |
+
def _similarity_for_rank(self, features_rank, source_rank):
|
131 |
+
# Send the features from `source_rank` to all ranks
|
132 |
+
broadcast_shape = torch.tensor(features_rank.shape).to(self.device)
|
133 |
+
torch.distributed.broadcast(broadcast_shape, source_rank)
|
134 |
+
|
135 |
+
broadcasted = features_rank
|
136 |
+
if self.global_rank != source_rank:
|
137 |
+
broadcasted = torch.zeros(*broadcast_shape, dtype=features_rank.dtype, device=self.device)
|
138 |
+
torch.distributed.broadcast(broadcasted, source_rank)
|
139 |
+
|
140 |
+
# Compute the neighbors for `source_rank` among `train_features_rank_T`
|
141 |
+
similarity_rank = torch.mm(broadcasted, self.train_features_rank_T)
|
142 |
+
candidate_labels = self.candidates.expand(len(similarity_rank), -1)
|
143 |
+
return self._get_knn_sims_and_labels(similarity_rank, candidate_labels)
|
144 |
+
|
145 |
+
def _gather_all_knn_for_rank(self, topk_sims, neighbors_labels, target_rank):
|
146 |
+
# Gather all neighbors for `target_rank`
|
147 |
+
topk_sims_rank = retrieved_rank = None
|
148 |
+
if self.global_rank == target_rank:
|
149 |
+
topk_sims_rank = [torch.zeros_like(topk_sims) for _ in range(self.global_size)]
|
150 |
+
retrieved_rank = [torch.zeros_like(neighbors_labels) for _ in range(self.global_size)]
|
151 |
+
|
152 |
+
torch.distributed.gather(topk_sims, topk_sims_rank, dst=target_rank)
|
153 |
+
torch.distributed.gather(neighbors_labels, retrieved_rank, dst=target_rank)
|
154 |
+
|
155 |
+
if self.global_rank == target_rank:
|
156 |
+
# Perform a second top-k on the k * global_size retrieved neighbors
|
157 |
+
topk_sims_rank = torch.cat(topk_sims_rank, dim=1)
|
158 |
+
retrieved_rank = torch.cat(retrieved_rank, dim=1)
|
159 |
+
results = self._get_knn_sims_and_labels(topk_sims_rank, retrieved_rank)
|
160 |
+
return results
|
161 |
+
return None
|
162 |
+
|
163 |
+
def compute_neighbors(self, features_rank):
|
164 |
+
for rank in range(self.global_size):
|
165 |
+
topk_sims, neighbors_labels = self._similarity_for_rank(features_rank, rank)
|
166 |
+
results = self._gather_all_knn_for_rank(topk_sims, neighbors_labels, rank)
|
167 |
+
if results is not None:
|
168 |
+
topk_sims_rank, neighbors_labels_rank = results
|
169 |
+
return topk_sims_rank, neighbors_labels_rank
|
170 |
+
|
171 |
+
def forward(self, features_rank):
|
172 |
+
"""
|
173 |
+
Compute the results on all values of `self.nb_knn` neighbors from the full `self.max_k`
|
174 |
+
"""
|
175 |
+
assert all(k <= self.max_k for k in self.nb_knn)
|
176 |
+
|
177 |
+
topk_sims, neighbors_labels = self.compute_neighbors(features_rank)
|
178 |
+
batch_size = neighbors_labels.shape[0]
|
179 |
+
topk_sims_transform = softmax(topk_sims / self.T, 1)
|
180 |
+
matmul = torch.mul(
|
181 |
+
one_hot(neighbors_labels, num_classes=self.num_classes),
|
182 |
+
topk_sims_transform.view(batch_size, -1, 1),
|
183 |
+
)
|
184 |
+
probas_for_k = {k: torch.sum(matmul[:, :k, :], 1) for k in self.nb_knn}
|
185 |
+
return probas_for_k
|
186 |
+
|
187 |
+
|
188 |
+
class DictKeysModule(torch.nn.Module):
|
189 |
+
def __init__(self, keys):
|
190 |
+
super().__init__()
|
191 |
+
self.keys = keys
|
192 |
+
|
193 |
+
def forward(self, features_dict, targets):
|
194 |
+
for k in self.keys:
|
195 |
+
features_dict = features_dict[k]
|
196 |
+
return {"preds": features_dict, "target": targets}
|
197 |
+
|
198 |
+
|
199 |
+
def create_module_dict(*, module, n_per_class_list, n_tries, nb_knn, train_features, train_labels):
|
200 |
+
modules = {}
|
201 |
+
mapping = create_class_indices_mapping(train_labels)
|
202 |
+
for npc in n_per_class_list:
|
203 |
+
if npc < 0: # Only one try needed when using the full data
|
204 |
+
full_module = module(
|
205 |
+
train_features=train_features,
|
206 |
+
train_labels=train_labels,
|
207 |
+
nb_knn=nb_knn,
|
208 |
+
)
|
209 |
+
modules["full"] = ModuleDictWithForward({"1": full_module})
|
210 |
+
continue
|
211 |
+
all_tries = {}
|
212 |
+
for t in range(n_tries):
|
213 |
+
final_indices = filter_train(mapping, npc, seed=t)
|
214 |
+
k_list = list(set(nb_knn + [npc]))
|
215 |
+
k_list = sorted([el for el in k_list if el <= npc])
|
216 |
+
all_tries[str(t)] = module(
|
217 |
+
train_features=train_features[final_indices],
|
218 |
+
train_labels=train_labels[final_indices],
|
219 |
+
nb_knn=k_list,
|
220 |
+
)
|
221 |
+
modules[f"{npc} per class"] = ModuleDictWithForward(all_tries)
|
222 |
+
|
223 |
+
return ModuleDictWithForward(modules)
|
224 |
+
|
225 |
+
|
226 |
+
def filter_train(mapping, n_per_class, seed):
|
227 |
+
torch.manual_seed(seed)
|
228 |
+
final_indices = []
|
229 |
+
for k in mapping.keys():
|
230 |
+
index = torch.randperm(len(mapping[k]))[:n_per_class]
|
231 |
+
final_indices.append(mapping[k][index])
|
232 |
+
return torch.cat(final_indices).squeeze()
|
233 |
+
|
234 |
+
|
235 |
+
def create_class_indices_mapping(labels):
|
236 |
+
unique_labels, inverse = torch.unique(labels, return_inverse=True)
|
237 |
+
mapping = {unique_labels[i]: (inverse == i).nonzero() for i in range(len(unique_labels))}
|
238 |
+
return mapping
|
239 |
+
|
240 |
+
|
241 |
+
class ModuleDictWithForward(torch.nn.ModuleDict):
|
242 |
+
def forward(self, *args, **kwargs):
|
243 |
+
return {k: module(*args, **kwargs) for k, module in self._modules.items()}
|
244 |
+
|
245 |
+
|
246 |
+
def eval_knn(
|
247 |
+
model,
|
248 |
+
train_dataset,
|
249 |
+
val_dataset,
|
250 |
+
accuracy_averaging,
|
251 |
+
nb_knn,
|
252 |
+
temperature,
|
253 |
+
batch_size,
|
254 |
+
num_workers,
|
255 |
+
gather_on_cpu,
|
256 |
+
n_per_class_list=[-1],
|
257 |
+
n_tries=1,
|
258 |
+
):
|
259 |
+
model = ModelWithNormalize(model)
|
260 |
+
|
261 |
+
logger.info("Extracting features for train set...")
|
262 |
+
train_features, train_labels = extract_features(
|
263 |
+
model, train_dataset, batch_size, num_workers, gather_on_cpu=gather_on_cpu
|
264 |
+
)
|
265 |
+
logger.info(f"Train features created, shape {train_features.shape}.")
|
266 |
+
|
267 |
+
val_dataloader = make_data_loader(
|
268 |
+
dataset=val_dataset,
|
269 |
+
batch_size=batch_size,
|
270 |
+
num_workers=num_workers,
|
271 |
+
sampler_type=SamplerType.DISTRIBUTED,
|
272 |
+
drop_last=False,
|
273 |
+
shuffle=False,
|
274 |
+
persistent_workers=True,
|
275 |
+
)
|
276 |
+
num_classes = train_labels.max() + 1
|
277 |
+
metric_collection = build_topk_accuracy_metric(accuracy_averaging, num_classes=num_classes)
|
278 |
+
|
279 |
+
device = torch.cuda.current_device()
|
280 |
+
partial_module = partial(KnnModule, T=temperature, device=device, num_classes=num_classes)
|
281 |
+
knn_module_dict = create_module_dict(
|
282 |
+
module=partial_module,
|
283 |
+
n_per_class_list=n_per_class_list,
|
284 |
+
n_tries=n_tries,
|
285 |
+
nb_knn=nb_knn,
|
286 |
+
train_features=train_features,
|
287 |
+
train_labels=train_labels,
|
288 |
+
)
|
289 |
+
postprocessors, metrics = {}, {}
|
290 |
+
for n_per_class, knn_module in knn_module_dict.items():
|
291 |
+
for t, knn_try in knn_module.items():
|
292 |
+
postprocessors = {
|
293 |
+
**postprocessors,
|
294 |
+
**{(n_per_class, t, k): DictKeysModule([n_per_class, t, k]) for k in knn_try.nb_knn},
|
295 |
+
}
|
296 |
+
metrics = {**metrics, **{(n_per_class, t, k): metric_collection.clone() for k in knn_try.nb_knn}}
|
297 |
+
model_with_knn = torch.nn.Sequential(model, knn_module_dict)
|
298 |
+
|
299 |
+
# ============ evaluation ... ============
|
300 |
+
logger.info("Start the k-NN classification.")
|
301 |
+
_, results_dict = evaluate(model_with_knn, val_dataloader, postprocessors, metrics, device)
|
302 |
+
|
303 |
+
# Averaging the results over the n tries for each value of n_per_class
|
304 |
+
for n_per_class, knn_module in knn_module_dict.items():
|
305 |
+
first_try = list(knn_module.keys())[0]
|
306 |
+
k_list = knn_module[first_try].nb_knn
|
307 |
+
for k in k_list:
|
308 |
+
keys = results_dict[(n_per_class, first_try, k)].keys() # keys are e.g. `top-1` and `top-5`
|
309 |
+
results_dict[(n_per_class, k)] = {
|
310 |
+
key: torch.mean(torch.stack([results_dict[(n_per_class, t, k)][key] for t in knn_module.keys()]))
|
311 |
+
for key in keys
|
312 |
+
}
|
313 |
+
for t in knn_module.keys():
|
314 |
+
del results_dict[(n_per_class, t, k)]
|
315 |
+
|
316 |
+
return results_dict
|
317 |
+
|
318 |
+
|
319 |
+
def eval_knn_with_model(
|
320 |
+
model,
|
321 |
+
output_dir,
|
322 |
+
train_dataset_str="ImageNet:split=TRAIN",
|
323 |
+
val_dataset_str="ImageNet:split=VAL",
|
324 |
+
nb_knn=(10, 20, 100, 200),
|
325 |
+
temperature=0.07,
|
326 |
+
autocast_dtype=torch.float,
|
327 |
+
accuracy_averaging=AccuracyAveraging.MEAN_ACCURACY,
|
328 |
+
transform=None,
|
329 |
+
gather_on_cpu=False,
|
330 |
+
batch_size=256,
|
331 |
+
num_workers=5,
|
332 |
+
n_per_class_list=[-1],
|
333 |
+
n_tries=1,
|
334 |
+
):
|
335 |
+
transform = transform or make_classification_eval_transform()
|
336 |
+
|
337 |
+
train_dataset = make_dataset(
|
338 |
+
dataset_str=train_dataset_str,
|
339 |
+
transform=transform,
|
340 |
+
)
|
341 |
+
val_dataset = make_dataset(
|
342 |
+
dataset_str=val_dataset_str,
|
343 |
+
transform=transform,
|
344 |
+
)
|
345 |
+
|
346 |
+
with torch.cuda.amp.autocast(dtype=autocast_dtype):
|
347 |
+
results_dict_knn = eval_knn(
|
348 |
+
model=model,
|
349 |
+
train_dataset=train_dataset,
|
350 |
+
val_dataset=val_dataset,
|
351 |
+
accuracy_averaging=accuracy_averaging,
|
352 |
+
nb_knn=nb_knn,
|
353 |
+
temperature=temperature,
|
354 |
+
batch_size=batch_size,
|
355 |
+
num_workers=num_workers,
|
356 |
+
gather_on_cpu=gather_on_cpu,
|
357 |
+
n_per_class_list=n_per_class_list,
|
358 |
+
n_tries=n_tries,
|
359 |
+
)
|
360 |
+
|
361 |
+
results_dict = {}
|
362 |
+
if distributed.is_main_process():
|
363 |
+
for knn_ in results_dict_knn.keys():
|
364 |
+
top1 = results_dict_knn[knn_]["top-1"].item() * 100.0
|
365 |
+
top5 = results_dict_knn[knn_]["top-5"].item() * 100.0
|
366 |
+
results_dict[f"{knn_} Top 1"] = top1
|
367 |
+
results_dict[f"{knn_} Top 5"] = top5
|
368 |
+
logger.info(f"{knn_} classifier result: Top1: {top1:.2f} Top5: {top5:.2f}")
|
369 |
+
|
370 |
+
metrics_file_path = os.path.join(output_dir, "results_eval_knn.json")
|
371 |
+
with open(metrics_file_path, "a") as f:
|
372 |
+
for k, v in results_dict.items():
|
373 |
+
f.write(json.dumps({k: v}) + "\n")
|
374 |
+
|
375 |
+
if distributed.is_enabled():
|
376 |
+
torch.distributed.barrier()
|
377 |
+
return results_dict
|
378 |
+
|
379 |
+
|
380 |
+
def main(args):
|
381 |
+
model, autocast_dtype = setup_and_build_model(args)
|
382 |
+
eval_knn_with_model(
|
383 |
+
model=model,
|
384 |
+
output_dir=args.output_dir,
|
385 |
+
train_dataset_str=args.train_dataset_str,
|
386 |
+
val_dataset_str=args.val_dataset_str,
|
387 |
+
nb_knn=args.nb_knn,
|
388 |
+
temperature=args.temperature,
|
389 |
+
autocast_dtype=autocast_dtype,
|
390 |
+
accuracy_averaging=AccuracyAveraging.MEAN_ACCURACY,
|
391 |
+
transform=None,
|
392 |
+
gather_on_cpu=args.gather_on_cpu,
|
393 |
+
batch_size=args.batch_size,
|
394 |
+
num_workers=5,
|
395 |
+
n_per_class_list=args.n_per_class_list,
|
396 |
+
n_tries=args.n_tries,
|
397 |
+
)
|
398 |
+
return 0
|
399 |
+
|
400 |
+
|
401 |
+
if __name__ == "__main__":
|
402 |
+
description = "DINOv2 k-NN evaluation"
|
403 |
+
args_parser = get_args_parser(description=description)
|
404 |
+
args = args_parser.parse_args()
|
405 |
+
sys.exit(main(args))
|
torchhub/facebookresearch_dinov2_main/dinov2/eval/linear.py
ADDED
@@ -0,0 +1,626 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
2 |
+
# All rights reserved.
|
3 |
+
#
|
4 |
+
# This source code is licensed under the license found in the
|
5 |
+
# LICENSE file in the root directory of this source tree.
|
6 |
+
|
7 |
+
import argparse
|
8 |
+
from functools import partial
|
9 |
+
import json
|
10 |
+
import logging
|
11 |
+
import os
|
12 |
+
import sys
|
13 |
+
from typing import List, Optional
|
14 |
+
|
15 |
+
import numpy as np
|
16 |
+
import torch
|
17 |
+
import torch.nn as nn
|
18 |
+
from torch.nn.parallel import DistributedDataParallel
|
19 |
+
from fvcore.common.checkpoint import Checkpointer, PeriodicCheckpointer
|
20 |
+
|
21 |
+
from dinov2.data import SamplerType, make_data_loader, make_dataset
|
22 |
+
from dinov2.data.transforms import make_classification_eval_transform, make_classification_train_transform
|
23 |
+
import dinov2.distributed as distributed
|
24 |
+
from dinov2.eval.metrics import MetricType, build_metric
|
25 |
+
from dinov2.eval.setup import get_args_parser as get_setup_args_parser
|
26 |
+
from dinov2.eval.setup import setup_and_build_model
|
27 |
+
from dinov2.eval.utils import ModelWithIntermediateLayers, evaluate
|
28 |
+
from dinov2.logging import MetricLogger
|
29 |
+
|
30 |
+
|
31 |
+
logger = logging.getLogger("dinov2")
|
32 |
+
|
33 |
+
|
34 |
+
def get_args_parser(
|
35 |
+
description: Optional[str] = None,
|
36 |
+
parents: Optional[List[argparse.ArgumentParser]] = None,
|
37 |
+
add_help: bool = True,
|
38 |
+
):
|
39 |
+
parents = parents or []
|
40 |
+
setup_args_parser = get_setup_args_parser(parents=parents, add_help=False)
|
41 |
+
parents = [setup_args_parser]
|
42 |
+
parser = argparse.ArgumentParser(
|
43 |
+
description=description,
|
44 |
+
parents=parents,
|
45 |
+
add_help=add_help,
|
46 |
+
)
|
47 |
+
parser.add_argument(
|
48 |
+
"--train-dataset",
|
49 |
+
dest="train_dataset_str",
|
50 |
+
type=str,
|
51 |
+
help="Training dataset",
|
52 |
+
)
|
53 |
+
parser.add_argument(
|
54 |
+
"--val-dataset",
|
55 |
+
dest="val_dataset_str",
|
56 |
+
type=str,
|
57 |
+
help="Validation dataset",
|
58 |
+
)
|
59 |
+
parser.add_argument(
|
60 |
+
"--test-datasets",
|
61 |
+
dest="test_dataset_strs",
|
62 |
+
type=str,
|
63 |
+
nargs="+",
|
64 |
+
help="Test datasets, none to reuse the validation dataset",
|
65 |
+
)
|
66 |
+
parser.add_argument(
|
67 |
+
"--epochs",
|
68 |
+
type=int,
|
69 |
+
help="Number of training epochs",
|
70 |
+
)
|
71 |
+
parser.add_argument(
|
72 |
+
"--batch-size",
|
73 |
+
type=int,
|
74 |
+
help="Batch Size (per GPU)",
|
75 |
+
)
|
76 |
+
parser.add_argument(
|
77 |
+
"--num-workers",
|
78 |
+
type=int,
|
79 |
+
help="Number de Workers",
|
80 |
+
)
|
81 |
+
parser.add_argument(
|
82 |
+
"--epoch-length",
|
83 |
+
type=int,
|
84 |
+
help="Length of an epoch in number of iterations",
|
85 |
+
)
|
86 |
+
parser.add_argument(
|
87 |
+
"--save-checkpoint-frequency",
|
88 |
+
type=int,
|
89 |
+
help="Number of epochs between two named checkpoint saves.",
|
90 |
+
)
|
91 |
+
parser.add_argument(
|
92 |
+
"--eval-period-iterations",
|
93 |
+
type=int,
|
94 |
+
help="Number of iterations between two evaluations.",
|
95 |
+
)
|
96 |
+
parser.add_argument(
|
97 |
+
"--learning-rates",
|
98 |
+
nargs="+",
|
99 |
+
type=float,
|
100 |
+
help="Learning rates to grid search.",
|
101 |
+
)
|
102 |
+
parser.add_argument(
|
103 |
+
"--no-resume",
|
104 |
+
action="store_true",
|
105 |
+
help="Whether to not resume from existing checkpoints",
|
106 |
+
)
|
107 |
+
parser.add_argument(
|
108 |
+
"--val-metric-type",
|
109 |
+
type=MetricType,
|
110 |
+
choices=list(MetricType),
|
111 |
+
help="Validation metric",
|
112 |
+
)
|
113 |
+
parser.add_argument(
|
114 |
+
"--test-metric-types",
|
115 |
+
type=MetricType,
|
116 |
+
choices=list(MetricType),
|
117 |
+
nargs="+",
|
118 |
+
help="Evaluation metric",
|
119 |
+
)
|
120 |
+
parser.add_argument(
|
121 |
+
"--classifier-fpath",
|
122 |
+
type=str,
|
123 |
+
help="Path to a file containing pretrained linear classifiers",
|
124 |
+
)
|
125 |
+
parser.add_argument(
|
126 |
+
"--val-class-mapping-fpath",
|
127 |
+
type=str,
|
128 |
+
help="Path to a file containing a mapping to adjust classifier outputs",
|
129 |
+
)
|
130 |
+
parser.add_argument(
|
131 |
+
"--test-class-mapping-fpaths",
|
132 |
+
nargs="+",
|
133 |
+
type=str,
|
134 |
+
help="Path to a file containing a mapping to adjust classifier outputs",
|
135 |
+
)
|
136 |
+
parser.set_defaults(
|
137 |
+
train_dataset_str="ImageNet:split=TRAIN",
|
138 |
+
val_dataset_str="ImageNet:split=VAL",
|
139 |
+
test_dataset_strs=None,
|
140 |
+
epochs=10,
|
141 |
+
batch_size=128,
|
142 |
+
num_workers=8,
|
143 |
+
epoch_length=1250,
|
144 |
+
save_checkpoint_frequency=20,
|
145 |
+
eval_period_iterations=1250,
|
146 |
+
learning_rates=[1e-5, 2e-5, 5e-5, 1e-4, 2e-4, 5e-4, 1e-3, 2e-3, 5e-3, 1e-2, 2e-2, 5e-2, 0.1],
|
147 |
+
val_metric_type=MetricType.MEAN_ACCURACY,
|
148 |
+
test_metric_types=None,
|
149 |
+
classifier_fpath=None,
|
150 |
+
val_class_mapping_fpath=None,
|
151 |
+
test_class_mapping_fpaths=[None],
|
152 |
+
)
|
153 |
+
return parser
|
154 |
+
|
155 |
+
|
156 |
+
def has_ddp_wrapper(m: nn.Module) -> bool:
|
157 |
+
return isinstance(m, DistributedDataParallel)
|
158 |
+
|
159 |
+
|
160 |
+
def remove_ddp_wrapper(m: nn.Module) -> nn.Module:
|
161 |
+
return m.module if has_ddp_wrapper(m) else m
|
162 |
+
|
163 |
+
|
164 |
+
def _pad_and_collate(batch):
|
165 |
+
maxlen = max(len(targets) for image, targets in batch)
|
166 |
+
padded_batch = [
|
167 |
+
(image, np.pad(targets, (0, maxlen - len(targets)), constant_values=-1)) for image, targets in batch
|
168 |
+
]
|
169 |
+
return torch.utils.data.default_collate(padded_batch)
|
170 |
+
|
171 |
+
|
172 |
+
def create_linear_input(x_tokens_list, use_n_blocks, use_avgpool):
|
173 |
+
intermediate_output = x_tokens_list[-use_n_blocks:]
|
174 |
+
output = torch.cat([class_token for _, class_token in intermediate_output], dim=-1)
|
175 |
+
if use_avgpool:
|
176 |
+
output = torch.cat(
|
177 |
+
(
|
178 |
+
output,
|
179 |
+
torch.mean(intermediate_output[-1][0], dim=1), # patch tokens
|
180 |
+
),
|
181 |
+
dim=-1,
|
182 |
+
)
|
183 |
+
output = output.reshape(output.shape[0], -1)
|
184 |
+
return output.float()
|
185 |
+
|
186 |
+
|
187 |
+
class LinearClassifier(nn.Module):
|
188 |
+
"""Linear layer to train on top of frozen features"""
|
189 |
+
|
190 |
+
def __init__(self, out_dim, use_n_blocks, use_avgpool, num_classes=1000):
|
191 |
+
super().__init__()
|
192 |
+
self.out_dim = out_dim
|
193 |
+
self.use_n_blocks = use_n_blocks
|
194 |
+
self.use_avgpool = use_avgpool
|
195 |
+
self.num_classes = num_classes
|
196 |
+
self.linear = nn.Linear(out_dim, num_classes)
|
197 |
+
self.linear.weight.data.normal_(mean=0.0, std=0.01)
|
198 |
+
self.linear.bias.data.zero_()
|
199 |
+
|
200 |
+
def forward(self, x_tokens_list):
|
201 |
+
output = create_linear_input(x_tokens_list, self.use_n_blocks, self.use_avgpool)
|
202 |
+
return self.linear(output)
|
203 |
+
|
204 |
+
|
205 |
+
class AllClassifiers(nn.Module):
|
206 |
+
def __init__(self, classifiers_dict):
|
207 |
+
super().__init__()
|
208 |
+
self.classifiers_dict = nn.ModuleDict()
|
209 |
+
self.classifiers_dict.update(classifiers_dict)
|
210 |
+
|
211 |
+
def forward(self, inputs):
|
212 |
+
return {k: v.forward(inputs) for k, v in self.classifiers_dict.items()}
|
213 |
+
|
214 |
+
def __len__(self):
|
215 |
+
return len(self.classifiers_dict)
|
216 |
+
|
217 |
+
|
218 |
+
class LinearPostprocessor(nn.Module):
|
219 |
+
def __init__(self, linear_classifier, class_mapping=None):
|
220 |
+
super().__init__()
|
221 |
+
self.linear_classifier = linear_classifier
|
222 |
+
self.register_buffer("class_mapping", None if class_mapping is None else torch.LongTensor(class_mapping))
|
223 |
+
|
224 |
+
def forward(self, samples, targets):
|
225 |
+
preds = self.linear_classifier(samples)
|
226 |
+
return {
|
227 |
+
"preds": preds[:, self.class_mapping] if self.class_mapping is not None else preds,
|
228 |
+
"target": targets,
|
229 |
+
}
|
230 |
+
|
231 |
+
|
232 |
+
def scale_lr(learning_rates, batch_size):
|
233 |
+
return learning_rates * (batch_size * distributed.get_global_size()) / 256.0
|
234 |
+
|
235 |
+
|
236 |
+
def setup_linear_classifiers(sample_output, n_last_blocks_list, learning_rates, batch_size, num_classes=1000):
|
237 |
+
linear_classifiers_dict = nn.ModuleDict()
|
238 |
+
optim_param_groups = []
|
239 |
+
for n in n_last_blocks_list:
|
240 |
+
for avgpool in [False, True]:
|
241 |
+
for _lr in learning_rates:
|
242 |
+
lr = scale_lr(_lr, batch_size)
|
243 |
+
out_dim = create_linear_input(sample_output, use_n_blocks=n, use_avgpool=avgpool).shape[1]
|
244 |
+
linear_classifier = LinearClassifier(
|
245 |
+
out_dim, use_n_blocks=n, use_avgpool=avgpool, num_classes=num_classes
|
246 |
+
)
|
247 |
+
linear_classifier = linear_classifier.cuda()
|
248 |
+
linear_classifiers_dict[
|
249 |
+
f"classifier_{n}_blocks_avgpool_{avgpool}_lr_{lr:.5f}".replace(".", "_")
|
250 |
+
] = linear_classifier
|
251 |
+
optim_param_groups.append({"params": linear_classifier.parameters(), "lr": lr})
|
252 |
+
|
253 |
+
linear_classifiers = AllClassifiers(linear_classifiers_dict)
|
254 |
+
if distributed.is_enabled():
|
255 |
+
linear_classifiers = nn.parallel.DistributedDataParallel(linear_classifiers)
|
256 |
+
|
257 |
+
return linear_classifiers, optim_param_groups
|
258 |
+
|
259 |
+
|
260 |
+
@torch.no_grad()
|
261 |
+
def evaluate_linear_classifiers(
|
262 |
+
feature_model,
|
263 |
+
linear_classifiers,
|
264 |
+
data_loader,
|
265 |
+
metric_type,
|
266 |
+
metrics_file_path,
|
267 |
+
training_num_classes,
|
268 |
+
iteration,
|
269 |
+
prefixstring="",
|
270 |
+
class_mapping=None,
|
271 |
+
best_classifier_on_val=None,
|
272 |
+
):
|
273 |
+
logger.info("running validation !")
|
274 |
+
|
275 |
+
num_classes = len(class_mapping) if class_mapping is not None else training_num_classes
|
276 |
+
metric = build_metric(metric_type, num_classes=num_classes)
|
277 |
+
postprocessors = {k: LinearPostprocessor(v, class_mapping) for k, v in linear_classifiers.classifiers_dict.items()}
|
278 |
+
metrics = {k: metric.clone() for k in linear_classifiers.classifiers_dict}
|
279 |
+
|
280 |
+
_, results_dict_temp = evaluate(
|
281 |
+
feature_model,
|
282 |
+
data_loader,
|
283 |
+
postprocessors,
|
284 |
+
metrics,
|
285 |
+
torch.cuda.current_device(),
|
286 |
+
)
|
287 |
+
|
288 |
+
logger.info("")
|
289 |
+
results_dict = {}
|
290 |
+
max_accuracy = 0
|
291 |
+
best_classifier = ""
|
292 |
+
for i, (classifier_string, metric) in enumerate(results_dict_temp.items()):
|
293 |
+
logger.info(f"{prefixstring} -- Classifier: {classifier_string} * {metric}")
|
294 |
+
if (
|
295 |
+
best_classifier_on_val is None and metric["top-1"].item() > max_accuracy
|
296 |
+
) or classifier_string == best_classifier_on_val:
|
297 |
+
max_accuracy = metric["top-1"].item()
|
298 |
+
best_classifier = classifier_string
|
299 |
+
|
300 |
+
results_dict["best_classifier"] = {"name": best_classifier, "accuracy": max_accuracy}
|
301 |
+
|
302 |
+
logger.info(f"best classifier: {results_dict['best_classifier']}")
|
303 |
+
|
304 |
+
if distributed.is_main_process():
|
305 |
+
with open(metrics_file_path, "a") as f:
|
306 |
+
f.write(f"iter: {iteration}\n")
|
307 |
+
for k, v in results_dict.items():
|
308 |
+
f.write(json.dumps({k: v}) + "\n")
|
309 |
+
f.write("\n")
|
310 |
+
|
311 |
+
return results_dict
|
312 |
+
|
313 |
+
|
314 |
+
def eval_linear(
|
315 |
+
*,
|
316 |
+
feature_model,
|
317 |
+
linear_classifiers,
|
318 |
+
train_data_loader,
|
319 |
+
val_data_loader,
|
320 |
+
metrics_file_path,
|
321 |
+
optimizer,
|
322 |
+
scheduler,
|
323 |
+
output_dir,
|
324 |
+
max_iter,
|
325 |
+
checkpoint_period, # In number of iter, creates a new file every period
|
326 |
+
running_checkpoint_period, # Period to update main checkpoint file
|
327 |
+
eval_period,
|
328 |
+
metric_type,
|
329 |
+
training_num_classes,
|
330 |
+
resume=True,
|
331 |
+
classifier_fpath=None,
|
332 |
+
val_class_mapping=None,
|
333 |
+
):
|
334 |
+
checkpointer = Checkpointer(linear_classifiers, output_dir, optimizer=optimizer, scheduler=scheduler)
|
335 |
+
start_iter = checkpointer.resume_or_load(classifier_fpath or "", resume=resume).get("iteration", -1) + 1
|
336 |
+
|
337 |
+
periodic_checkpointer = PeriodicCheckpointer(checkpointer, checkpoint_period, max_iter=max_iter)
|
338 |
+
iteration = start_iter
|
339 |
+
logger.info("Starting training from iteration {}".format(start_iter))
|
340 |
+
metric_logger = MetricLogger(delimiter=" ")
|
341 |
+
header = "Training"
|
342 |
+
|
343 |
+
for data, labels in metric_logger.log_every(
|
344 |
+
train_data_loader,
|
345 |
+
10,
|
346 |
+
header,
|
347 |
+
max_iter,
|
348 |
+
start_iter,
|
349 |
+
):
|
350 |
+
data = data.cuda(non_blocking=True)
|
351 |
+
labels = labels.cuda(non_blocking=True)
|
352 |
+
|
353 |
+
features = feature_model(data)
|
354 |
+
outputs = linear_classifiers(features)
|
355 |
+
|
356 |
+
losses = {f"loss_{k}": nn.CrossEntropyLoss()(v, labels) for k, v in outputs.items()}
|
357 |
+
loss = sum(losses.values())
|
358 |
+
|
359 |
+
# compute the gradients
|
360 |
+
optimizer.zero_grad()
|
361 |
+
loss.backward()
|
362 |
+
|
363 |
+
# step
|
364 |
+
optimizer.step()
|
365 |
+
scheduler.step()
|
366 |
+
|
367 |
+
# log
|
368 |
+
if iteration % 10 == 0:
|
369 |
+
torch.cuda.synchronize()
|
370 |
+
metric_logger.update(loss=loss.item())
|
371 |
+
metric_logger.update(lr=optimizer.param_groups[0]["lr"])
|
372 |
+
print("lr", optimizer.param_groups[0]["lr"])
|
373 |
+
|
374 |
+
if iteration - start_iter > 5:
|
375 |
+
if iteration % running_checkpoint_period == 0:
|
376 |
+
torch.cuda.synchronize()
|
377 |
+
if distributed.is_main_process():
|
378 |
+
logger.info("Checkpointing running_checkpoint")
|
379 |
+
periodic_checkpointer.save("running_checkpoint_linear_eval", iteration=iteration)
|
380 |
+
torch.cuda.synchronize()
|
381 |
+
periodic_checkpointer.step(iteration)
|
382 |
+
|
383 |
+
if eval_period > 0 and (iteration + 1) % eval_period == 0 and iteration != max_iter - 1:
|
384 |
+
_ = evaluate_linear_classifiers(
|
385 |
+
feature_model=feature_model,
|
386 |
+
linear_classifiers=remove_ddp_wrapper(linear_classifiers),
|
387 |
+
data_loader=val_data_loader,
|
388 |
+
metrics_file_path=metrics_file_path,
|
389 |
+
prefixstring=f"ITER: {iteration}",
|
390 |
+
metric_type=metric_type,
|
391 |
+
training_num_classes=training_num_classes,
|
392 |
+
iteration=iteration,
|
393 |
+
class_mapping=val_class_mapping,
|
394 |
+
)
|
395 |
+
torch.cuda.synchronize()
|
396 |
+
|
397 |
+
iteration = iteration + 1
|
398 |
+
|
399 |
+
val_results_dict = evaluate_linear_classifiers(
|
400 |
+
feature_model=feature_model,
|
401 |
+
linear_classifiers=remove_ddp_wrapper(linear_classifiers),
|
402 |
+
data_loader=val_data_loader,
|
403 |
+
metrics_file_path=metrics_file_path,
|
404 |
+
metric_type=metric_type,
|
405 |
+
training_num_classes=training_num_classes,
|
406 |
+
iteration=iteration,
|
407 |
+
class_mapping=val_class_mapping,
|
408 |
+
)
|
409 |
+
return val_results_dict, feature_model, linear_classifiers, iteration
|
410 |
+
|
411 |
+
|
412 |
+
def make_eval_data_loader(test_dataset_str, batch_size, num_workers, metric_type):
|
413 |
+
test_dataset = make_dataset(
|
414 |
+
dataset_str=test_dataset_str,
|
415 |
+
transform=make_classification_eval_transform(),
|
416 |
+
)
|
417 |
+
test_data_loader = make_data_loader(
|
418 |
+
dataset=test_dataset,
|
419 |
+
batch_size=batch_size,
|
420 |
+
num_workers=num_workers,
|
421 |
+
sampler_type=SamplerType.DISTRIBUTED,
|
422 |
+
drop_last=False,
|
423 |
+
shuffle=False,
|
424 |
+
persistent_workers=False,
|
425 |
+
collate_fn=_pad_and_collate if metric_type == MetricType.IMAGENET_REAL_ACCURACY else None,
|
426 |
+
)
|
427 |
+
return test_data_loader
|
428 |
+
|
429 |
+
|
430 |
+
def test_on_datasets(
|
431 |
+
feature_model,
|
432 |
+
linear_classifiers,
|
433 |
+
test_dataset_strs,
|
434 |
+
batch_size,
|
435 |
+
num_workers,
|
436 |
+
test_metric_types,
|
437 |
+
metrics_file_path,
|
438 |
+
training_num_classes,
|
439 |
+
iteration,
|
440 |
+
best_classifier_on_val,
|
441 |
+
prefixstring="",
|
442 |
+
test_class_mappings=[None],
|
443 |
+
):
|
444 |
+
results_dict = {}
|
445 |
+
for test_dataset_str, class_mapping, metric_type in zip(test_dataset_strs, test_class_mappings, test_metric_types):
|
446 |
+
logger.info(f"Testing on {test_dataset_str}")
|
447 |
+
test_data_loader = make_eval_data_loader(test_dataset_str, batch_size, num_workers, metric_type)
|
448 |
+
dataset_results_dict = evaluate_linear_classifiers(
|
449 |
+
feature_model,
|
450 |
+
remove_ddp_wrapper(linear_classifiers),
|
451 |
+
test_data_loader,
|
452 |
+
metric_type,
|
453 |
+
metrics_file_path,
|
454 |
+
training_num_classes,
|
455 |
+
iteration,
|
456 |
+
prefixstring="",
|
457 |
+
class_mapping=class_mapping,
|
458 |
+
best_classifier_on_val=best_classifier_on_val,
|
459 |
+
)
|
460 |
+
results_dict[f"{test_dataset_str}_accuracy"] = 100.0 * dataset_results_dict["best_classifier"]["accuracy"]
|
461 |
+
return results_dict
|
462 |
+
|
463 |
+
|
464 |
+
def run_eval_linear(
|
465 |
+
model,
|
466 |
+
output_dir,
|
467 |
+
train_dataset_str,
|
468 |
+
val_dataset_str,
|
469 |
+
batch_size,
|
470 |
+
epochs,
|
471 |
+
epoch_length,
|
472 |
+
num_workers,
|
473 |
+
save_checkpoint_frequency,
|
474 |
+
eval_period_iterations,
|
475 |
+
learning_rates,
|
476 |
+
autocast_dtype,
|
477 |
+
test_dataset_strs=None,
|
478 |
+
resume=True,
|
479 |
+
classifier_fpath=None,
|
480 |
+
val_class_mapping_fpath=None,
|
481 |
+
test_class_mapping_fpaths=[None],
|
482 |
+
val_metric_type=MetricType.MEAN_ACCURACY,
|
483 |
+
test_metric_types=None,
|
484 |
+
):
|
485 |
+
seed = 0
|
486 |
+
|
487 |
+
if test_dataset_strs is None:
|
488 |
+
test_dataset_strs = [val_dataset_str]
|
489 |
+
if test_metric_types is None:
|
490 |
+
test_metric_types = [val_metric_type] * len(test_dataset_strs)
|
491 |
+
else:
|
492 |
+
assert len(test_metric_types) == len(test_dataset_strs)
|
493 |
+
assert len(test_dataset_strs) == len(test_class_mapping_fpaths)
|
494 |
+
|
495 |
+
train_transform = make_classification_train_transform()
|
496 |
+
train_dataset = make_dataset(
|
497 |
+
dataset_str=train_dataset_str,
|
498 |
+
transform=train_transform,
|
499 |
+
)
|
500 |
+
training_num_classes = len(torch.unique(torch.Tensor(train_dataset.get_targets().astype(int))))
|
501 |
+
sampler_type = SamplerType.SHARDED_INFINITE
|
502 |
+
# sampler_type = SamplerType.INFINITE
|
503 |
+
|
504 |
+
n_last_blocks_list = [1, 4]
|
505 |
+
n_last_blocks = max(n_last_blocks_list)
|
506 |
+
autocast_ctx = partial(torch.cuda.amp.autocast, enabled=True, dtype=autocast_dtype)
|
507 |
+
feature_model = ModelWithIntermediateLayers(model, n_last_blocks, autocast_ctx)
|
508 |
+
sample_output = feature_model(train_dataset[0][0].unsqueeze(0).cuda())
|
509 |
+
|
510 |
+
linear_classifiers, optim_param_groups = setup_linear_classifiers(
|
511 |
+
sample_output,
|
512 |
+
n_last_blocks_list,
|
513 |
+
learning_rates,
|
514 |
+
batch_size,
|
515 |
+
training_num_classes,
|
516 |
+
)
|
517 |
+
|
518 |
+
optimizer = torch.optim.SGD(optim_param_groups, momentum=0.9, weight_decay=0)
|
519 |
+
max_iter = epochs * epoch_length
|
520 |
+
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, max_iter, eta_min=0)
|
521 |
+
checkpointer = Checkpointer(linear_classifiers, output_dir, optimizer=optimizer, scheduler=scheduler)
|
522 |
+
start_iter = checkpointer.resume_or_load(classifier_fpath or "", resume=resume).get("iteration", -1) + 1
|
523 |
+
train_data_loader = make_data_loader(
|
524 |
+
dataset=train_dataset,
|
525 |
+
batch_size=batch_size,
|
526 |
+
num_workers=num_workers,
|
527 |
+
shuffle=True,
|
528 |
+
seed=seed,
|
529 |
+
sampler_type=sampler_type,
|
530 |
+
sampler_advance=start_iter,
|
531 |
+
drop_last=True,
|
532 |
+
persistent_workers=True,
|
533 |
+
)
|
534 |
+
val_data_loader = make_eval_data_loader(val_dataset_str, batch_size, num_workers, val_metric_type)
|
535 |
+
|
536 |
+
checkpoint_period = save_checkpoint_frequency * epoch_length
|
537 |
+
|
538 |
+
if val_class_mapping_fpath is not None:
|
539 |
+
logger.info(f"Using class mapping from {val_class_mapping_fpath}")
|
540 |
+
val_class_mapping = np.load(val_class_mapping_fpath)
|
541 |
+
else:
|
542 |
+
val_class_mapping = None
|
543 |
+
|
544 |
+
test_class_mappings = []
|
545 |
+
for class_mapping_fpath in test_class_mapping_fpaths:
|
546 |
+
if class_mapping_fpath is not None and class_mapping_fpath != "None":
|
547 |
+
logger.info(f"Using class mapping from {class_mapping_fpath}")
|
548 |
+
class_mapping = np.load(class_mapping_fpath)
|
549 |
+
else:
|
550 |
+
class_mapping = None
|
551 |
+
test_class_mappings.append(class_mapping)
|
552 |
+
|
553 |
+
metrics_file_path = os.path.join(output_dir, "results_eval_linear.json")
|
554 |
+
val_results_dict, feature_model, linear_classifiers, iteration = eval_linear(
|
555 |
+
feature_model=feature_model,
|
556 |
+
linear_classifiers=linear_classifiers,
|
557 |
+
train_data_loader=train_data_loader,
|
558 |
+
val_data_loader=val_data_loader,
|
559 |
+
metrics_file_path=metrics_file_path,
|
560 |
+
optimizer=optimizer,
|
561 |
+
scheduler=scheduler,
|
562 |
+
output_dir=output_dir,
|
563 |
+
max_iter=max_iter,
|
564 |
+
checkpoint_period=checkpoint_period,
|
565 |
+
running_checkpoint_period=epoch_length,
|
566 |
+
eval_period=eval_period_iterations,
|
567 |
+
metric_type=val_metric_type,
|
568 |
+
training_num_classes=training_num_classes,
|
569 |
+
resume=resume,
|
570 |
+
val_class_mapping=val_class_mapping,
|
571 |
+
classifier_fpath=classifier_fpath,
|
572 |
+
)
|
573 |
+
results_dict = {}
|
574 |
+
if len(test_dataset_strs) > 1 or test_dataset_strs[0] != val_dataset_str:
|
575 |
+
results_dict = test_on_datasets(
|
576 |
+
feature_model,
|
577 |
+
linear_classifiers,
|
578 |
+
test_dataset_strs,
|
579 |
+
batch_size,
|
580 |
+
0, # num_workers,
|
581 |
+
test_metric_types,
|
582 |
+
metrics_file_path,
|
583 |
+
training_num_classes,
|
584 |
+
iteration,
|
585 |
+
val_results_dict["best_classifier"]["name"],
|
586 |
+
prefixstring="",
|
587 |
+
test_class_mappings=test_class_mappings,
|
588 |
+
)
|
589 |
+
results_dict["best_classifier"] = val_results_dict["best_classifier"]["name"]
|
590 |
+
results_dict[f"{val_dataset_str}_accuracy"] = 100.0 * val_results_dict["best_classifier"]["accuracy"]
|
591 |
+
logger.info("Test Results Dict " + str(results_dict))
|
592 |
+
|
593 |
+
return results_dict
|
594 |
+
|
595 |
+
|
596 |
+
def main(args):
|
597 |
+
model, autocast_dtype = setup_and_build_model(args)
|
598 |
+
run_eval_linear(
|
599 |
+
model=model,
|
600 |
+
output_dir=args.output_dir,
|
601 |
+
train_dataset_str=args.train_dataset_str,
|
602 |
+
val_dataset_str=args.val_dataset_str,
|
603 |
+
test_dataset_strs=args.test_dataset_strs,
|
604 |
+
batch_size=args.batch_size,
|
605 |
+
epochs=args.epochs,
|
606 |
+
epoch_length=args.epoch_length,
|
607 |
+
num_workers=args.num_workers,
|
608 |
+
save_checkpoint_frequency=args.save_checkpoint_frequency,
|
609 |
+
eval_period_iterations=args.eval_period_iterations,
|
610 |
+
learning_rates=args.learning_rates,
|
611 |
+
autocast_dtype=autocast_dtype,
|
612 |
+
resume=not args.no_resume,
|
613 |
+
classifier_fpath=args.classifier_fpath,
|
614 |
+
val_metric_type=args.val_metric_type,
|
615 |
+
test_metric_types=args.test_metric_types,
|
616 |
+
val_class_mapping_fpath=args.val_class_mapping_fpath,
|
617 |
+
test_class_mapping_fpaths=args.test_class_mapping_fpaths,
|
618 |
+
)
|
619 |
+
return 0
|
620 |
+
|
621 |
+
|
622 |
+
if __name__ == "__main__":
|
623 |
+
description = "DINOv2 linear evaluation"
|
624 |
+
args_parser = get_args_parser(description=description)
|
625 |
+
args = args_parser.parse_args()
|
626 |
+
sys.exit(main(args))
|