Ishan Misra commited on
Commit
d5d3b04
0 Parent(s):

Initial commit

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitignore +62 -0
  2. .gitmodules +6 -0
  3. CODE_OF_CONDUCT.md +5 -0
  4. CONTRIBUTING.md +39 -0
  5. LICENSE +202 -0
  6. README.md +107 -0
  7. configs/Base-C2_L_R5021k_640b64_4x.yaml +82 -0
  8. configs/Base-DeformDETR_L_R50_4x.yaml +59 -0
  9. configs/Base_OVCOCO_C4_1x.yaml +31 -0
  10. configs/BoxSup-C2_LCOCO_CLIP_SwinB_896b32_4x.yaml +19 -0
  11. configs/BoxSup-C2_L_CLIP_R5021k_640b64_4x.yaml +4 -0
  12. configs/BoxSup-C2_L_CLIP_SwinB_896b32_4x.yaml +17 -0
  13. configs/BoxSup-C2_Lbase_CLIP_R5021k_640b64_4x.yaml +6 -0
  14. configs/BoxSup-C2_Lbase_CLIP_SwinB_896b32_4x.yaml +19 -0
  15. configs/BoxSup-DeformDETR_L_R50_2x.yaml +3 -0
  16. configs/BoxSup-DeformDETR_L_R50_4x.yaml +1 -0
  17. configs/BoxSup_OVCOCO_CLIP_R50_1x.yaml +1 -0
  18. configs/Detic_DeformDETR_LI_R50_4x_ft4x.yaml +22 -0
  19. configs/Detic_LCOCOI21k_CLIP_SwinB_896b32_4x_ft4x_max-size.yaml +43 -0
  20. configs/Detic_LI21k_CLIP_SwinB_896b32_4x_ft4x_max-size.yaml +43 -0
  21. configs/Detic_LI_CLIP_R5021k_640b64_4x_ft4x_max-size.yaml +27 -0
  22. configs/Detic_LI_CLIP_SwinB_896b32_4x_ft4x_max-size.yaml +33 -0
  23. configs/Detic_LbaseCCcapimg_CLIP_R5021k_640b64_4x_ft4x_max-size.yaml +30 -0
  24. configs/Detic_LbaseCCimg_CLIP_R5021k_640b64_4x_ft4x_max-size.yaml +27 -0
  25. configs/Detic_LbaseI_CLIP_R5021k_640b64_4x_ft4x_max-size.yaml +27 -0
  26. configs/Detic_LbaseI_CLIP_R5021k_640b64_4x_ft4x_predicted.yaml +27 -0
  27. configs/Detic_LbaseI_CLIP_SwinB_896b32_4x_ft4x_max-size.yaml +33 -0
  28. configs/Detic_OVCOCO_CLIP_R50_1x_caption.yaml +33 -0
  29. configs/Detic_OVCOCO_CLIP_R50_1x_max-size.yaml +30 -0
  30. configs/Detic_OVCOCO_CLIP_R50_1x_max-size_caption.yaml +35 -0
  31. datasets/README.md +207 -0
  32. datasets/metadata/Objects365_names_fix.csv +365 -0
  33. datasets/metadata/coco_clip_a+cname.npy +0 -0
  34. datasets/metadata/lvis_v1_clip_a+cname.npy +0 -0
  35. datasets/metadata/lvis_v1_train_cat_info.json +0 -0
  36. datasets/metadata/o365_clip_a+cnamefix.npy +0 -0
  37. datasets/metadata/oid_clip_a+cname.npy +0 -0
  38. demo.py +204 -0
  39. detic/__init__.py +19 -0
  40. detic/config.py +131 -0
  41. detic/custom_solver.py +78 -0
  42. detic/data/custom_build_augmentation.py +51 -0
  43. detic/data/custom_dataset_dataloader.py +331 -0
  44. detic/data/custom_dataset_mapper.py +280 -0
  45. detic/data/datasets/cc.py +23 -0
  46. detic/data/datasets/coco_zeroshot.py +121 -0
  47. detic/data/datasets/imagenet.py +41 -0
  48. detic/data/datasets/lvis_22k_categories.py +0 -0
  49. detic/data/datasets/lvis_v1.py +155 -0
  50. detic/data/datasets/objects365.py +770 -0
.gitignore ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ third_party/detectron2
2
+ ./models
3
+ configs-experimental
4
+ experiments
5
+ # output dir
6
+ index.html
7
+ data/*
8
+ slurm/
9
+ slurm
10
+ slurm-output
11
+ slurm-output/
12
+ output
13
+ instant_test_output
14
+ inference_test_output
15
+
16
+
17
+ *.png
18
+ *.diff
19
+ *.jpg
20
+ !/projects/DensePose/doc/images/*.jpg
21
+
22
+ # compilation and distribution
23
+ __pycache__
24
+ _ext
25
+ *.pyc
26
+ *.pyd
27
+ *.so
28
+ *.dll
29
+ *.egg-info/
30
+ build/
31
+ dist/
32
+ wheels/
33
+
34
+ # pytorch/python/numpy formats
35
+ *.pth
36
+ *.pkl
37
+ *.ts
38
+ model_ts*.txt
39
+
40
+ # ipython/jupyter notebooks
41
+ *.ipynb
42
+ **/.ipynb_checkpoints/
43
+
44
+ # Editor temporaries
45
+ *.swn
46
+ *.swo
47
+ *.swp
48
+ *~
49
+
50
+ # editor settings
51
+ .idea
52
+ .vscode
53
+ _darcs
54
+
55
+ # project dirs
56
+ /detectron2/model_zoo/configs
57
+ /datasets/*
58
+ !/datasets/*.*
59
+ !/datasets/metadata
60
+ /projects/*/datasets
61
+ /models
62
+ /snippet
.gitmodules ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
1
+ [submodule "third_party/Deformable-DETR"]
2
+ path = third_party/Deformable-DETR
3
+ url = https://github.com/fundamentalvision/Deformable-DETR.git
4
+ [submodule "third_party/CenterNet2"]
5
+ path = third_party/CenterNet2
6
+ url = https://github.com/xingyizhou/CenterNet2.git
CODE_OF_CONDUCT.md ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
1
+ # Code of Conduct
2
+
3
+ Facebook has adopted a Code of Conduct that we expect project participants to adhere to.
4
+ Please read the [full text](https://code.fb.com/codeofconduct/)
5
+ so that you can understand what actions will and will not be tolerated.
CONTRIBUTING.md ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Contributing to Detic
2
+ We want to make contributing to this project as easy and transparent as
3
+ possible.
4
+
5
+ ## Our Development Process
6
+ Minor changes and improvements will be released on an ongoing basis. Larger changes (e.g., changesets implementing a new paper) will be released on a more periodic basis.
7
+
8
+ ## Pull Requests
9
+ We actively welcome your pull requests.
10
+
11
+ 1. Fork the repo and create your branch from `main`.
12
+ 2. If you've added code that should be tested, add tests.
13
+ 3. If you've changed APIs, update the documentation.
14
+ 4. Ensure the test suite passes.
15
+ 5. Make sure your code lints.
16
+ 6. If you haven't already, complete the Contributor License Agreement ("CLA").
17
+
18
+ ## Contributor License Agreement ("CLA")
19
+ In order to accept your pull request, we need you to submit a CLA. You only need
20
+ to do this once to work on any of Facebook's open source projects.
21
+
22
+ Complete your CLA here: <https://code.facebook.com/cla>
23
+
24
+ ## Issues
25
+ We use GitHub issues to track public bugs. Please ensure your description is
26
+ clear and has sufficient instructions to be able to reproduce the issue.
27
+
28
+ Facebook has a [bounty program](https://www.facebook.com/whitehat/) for the safe
29
+ disclosure of security bugs. In those cases, please go through the process
30
+ outlined on that page and do not file a public issue.
31
+
32
+ ## Coding Style
33
+ * 4 spaces for indentation rather than tabs
34
+ * 80 character line length
35
+ * PEP8 formatting following [Black](https://black.readthedocs.io/en/stable/)
36
+
37
+ ## License
38
+ By contributing to Detic, you agree that your contributions will be licensed
39
+ under the LICENSE file in the root directory of this source tree.
LICENSE ADDED
@@ -0,0 +1,202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Apache License
2
+ Version 2.0, January 2004
3
+ http://www.apache.org/licenses/
4
+
5
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6
+
7
+ 1. Definitions.
8
+
9
+ "License" shall mean the terms and conditions for use, reproduction,
10
+ and distribution as defined by Sections 1 through 9 of this document.
11
+
12
+ "Licensor" shall mean the copyright owner or entity authorized by
13
+ the copyright owner that is granting the License.
14
+
15
+ "Legal Entity" shall mean the union of the acting entity and all
16
+ other entities that control, are controlled by, or are under common
17
+ control with that entity. For the purposes of this definition,
18
+ "control" means (i) the power, direct or indirect, to cause the
19
+ direction or management of such entity, whether by contract or
20
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
21
+ outstanding shares, or (iii) beneficial ownership of such entity.
22
+
23
+ "You" (or "Your") shall mean an individual or Legal Entity
24
+ exercising permissions granted by this License.
25
+
26
+ "Source" form shall mean the preferred form for making modifications,
27
+ including but not limited to software source code, documentation
28
+ source, and configuration files.
29
+
30
+ "Object" form shall mean any form resulting from mechanical
31
+ transformation or translation of a Source form, including but
32
+ not limited to compiled object code, generated documentation,
33
+ and conversions to other media types.
34
+
35
+ "Work" shall mean the work of authorship, whether in Source or
36
+ Object form, made available under the License, as indicated by a
37
+ copyright notice that is included in or attached to the work
38
+ (an example is provided in the Appendix below).
39
+
40
+ "Derivative Works" shall mean any work, whether in Source or Object
41
+ form, that is based on (or derived from) the Work and for which the
42
+ editorial revisions, annotations, elaborations, or other modifications
43
+ represent, as a whole, an original work of authorship. For the purposes
44
+ of this License, Derivative Works shall not include works that remain
45
+ separable from, or merely link (or bind by name) to the interfaces of,
46
+ the Work and Derivative Works thereof.
47
+
48
+ "Contribution" shall mean any work of authorship, including
49
+ the original version of the Work and any modifications or additions
50
+ to that Work or Derivative Works thereof, that is intentionally
51
+ submitted to Licensor for inclusion in the Work by the copyright owner
52
+ or by an individual or Legal Entity authorized to submit on behalf of
53
+ the copyright owner. For the purposes of this definition, "submitted"
54
+ means any form of electronic, verbal, or written communication sent
55
+ to the Licensor or its representatives, including but not limited to
56
+ communication on electronic mailing lists, source code control systems,
57
+ and issue tracking systems that are managed by, or on behalf of, the
58
+ Licensor for the purpose of discussing and improving the Work, but
59
+ excluding communication that is conspicuously marked or otherwise
60
+ designated in writing by the copyright owner as "Not a Contribution."
61
+
62
+ "Contributor" shall mean Licensor and any individual or Legal Entity
63
+ on behalf of whom a Contribution has been received by Licensor and
64
+ subsequently incorporated within the Work.
65
+
66
+ 2. Grant of Copyright License. Subject to the terms and conditions of
67
+ this License, each Contributor hereby grants to You a perpetual,
68
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69
+ copyright license to reproduce, prepare Derivative Works of,
70
+ publicly display, publicly perform, sublicense, and distribute the
71
+ Work and such Derivative Works in Source or Object form.
72
+
73
+ 3. Grant of Patent License. Subject to the terms and conditions of
74
+ this License, each Contributor hereby grants to You a perpetual,
75
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76
+ (except as stated in this section) patent license to make, have made,
77
+ use, offer to sell, sell, import, and otherwise transfer the Work,
78
+ where such license applies only to those patent claims licensable
79
+ by such Contributor that are necessarily infringed by their
80
+ Contribution(s) alone or by combination of their Contribution(s)
81
+ with the Work to which such Contribution(s) was submitted. If You
82
+ institute patent litigation against any entity (including a
83
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
84
+ or a Contribution incorporated within the Work constitutes direct
85
+ or contributory patent infringement, then any patent licenses
86
+ granted to You under this License for that Work shall terminate
87
+ as of the date such litigation is filed.
88
+
89
+ 4. Redistribution. You may reproduce and distribute copies of the
90
+ Work or Derivative Works thereof in any medium, with or without
91
+ modifications, and in Source or Object form, provided that You
92
+ meet the following conditions:
93
+
94
+ (a) You must give any other recipients of the Work or
95
+ Derivative Works a copy of this License; and
96
+
97
+ (b) You must cause any modified files to carry prominent notices
98
+ stating that You changed the files; and
99
+
100
+ (c) You must retain, in the Source form of any Derivative Works
101
+ that You distribute, all copyright, patent, trademark, and
102
+ attribution notices from the Source form of the Work,
103
+ excluding those notices that do not pertain to any part of
104
+ the Derivative Works; and
105
+
106
+ (d) If the Work includes a "NOTICE" text file as part of its
107
+ distribution, then any Derivative Works that You distribute must
108
+ include a readable copy of the attribution notices contained
109
+ within such NOTICE file, excluding those notices that do not
110
+ pertain to any part of the Derivative Works, in at least one
111
+ of the following places: within a NOTICE text file distributed
112
+ as part of the Derivative Works; within the Source form or
113
+ documentation, if provided along with the Derivative Works; or,
114
+ within a display generated by the Derivative Works, if and
115
+ wherever such third-party notices normally appear. The contents
116
+ of the NOTICE file are for informational purposes only and
117
+ do not modify the License. You may add Your own attribution
118
+ notices within Derivative Works that You distribute, alongside
119
+ or as an addendum to the NOTICE text from the Work, provided
120
+ that such additional attribution notices cannot be construed
121
+ as modifying the License.
122
+
123
+ You may add Your own copyright statement to Your modifications and
124
+ may provide additional or different license terms and conditions
125
+ for use, reproduction, or distribution of Your modifications, or
126
+ for any such Derivative Works as a whole, provided Your use,
127
+ reproduction, and distribution of the Work otherwise complies with
128
+ the conditions stated in this License.
129
+
130
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
131
+ any Contribution intentionally submitted for inclusion in the Work
132
+ by You to the Licensor shall be under the terms and conditions of
133
+ this License, without any additional terms or conditions.
134
+ Notwithstanding the above, nothing herein shall supersede or modify
135
+ the terms of any separate license agreement you may have executed
136
+ with Licensor regarding such Contributions.
137
+
138
+ 6. Trademarks. This License does not grant permission to use the trade
139
+ names, trademarks, service marks, or product names of the Licensor,
140
+ except as required for reasonable and customary use in describing the
141
+ origin of the Work and reproducing the content of the NOTICE file.
142
+
143
+ 7. Disclaimer of Warranty. Unless required by applicable law or
144
+ agreed to in writing, Licensor provides the Work (and each
145
+ Contributor provides its Contributions) on an "AS IS" BASIS,
146
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147
+ implied, including, without limitation, any warranties or conditions
148
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149
+ PARTICULAR PURPOSE. You are solely responsible for determining the
150
+ appropriateness of using or redistributing the Work and assume any
151
+ risks associated with Your exercise of permissions under this License.
152
+
153
+ 8. Limitation of Liability. In no event and under no legal theory,
154
+ whether in tort (including negligence), contract, or otherwise,
155
+ unless required by applicable law (such as deliberate and grossly
156
+ negligent acts) or agreed to in writing, shall any Contributor be
157
+ liable to You for damages, including any direct, indirect, special,
158
+ incidental, or consequential damages of any character arising as a
159
+ result of this License or out of the use or inability to use the
160
+ Work (including but not limited to damages for loss of goodwill,
161
+ work stoppage, computer failure or malfunction, or any and all
162
+ other commercial damages or losses), even if such Contributor
163
+ has been advised of the possibility of such damages.
164
+
165
+ 9. Accepting Warranty or Additional Liability. While redistributing
166
+ the Work or Derivative Works thereof, You may choose to offer,
167
+ and charge a fee for, acceptance of support, warranty, indemnity,
168
+ or other liability obligations and/or rights consistent with this
169
+ License. However, in accepting such obligations, You may act only
170
+ on Your own behalf and on Your sole responsibility, not on behalf
171
+ of any other Contributor, and only if You agree to indemnify,
172
+ defend, and hold each Contributor harmless for any liability
173
+ incurred by, or claims asserted against, such Contributor by reason
174
+ of your accepting any such warranty or additional liability.
175
+
176
+ END OF TERMS AND CONDITIONS
177
+
178
+ APPENDIX: How to apply the Apache License to your work.
179
+
180
+ To apply the Apache License to your work, attach the following
181
+ boilerplate notice, with the fields enclosed by brackets "[]"
182
+ replaced with your own identifying information. (Don't include
183
+ the brackets!) The text should be enclosed in the appropriate
184
+ comment syntax for the file format. We also recommend that a
185
+ file or class name and description of purpose be included on the
186
+ same "printed page" as the copyright notice for easier
187
+ identification within third-party archives.
188
+
189
+ Copyright [yyyy] [name of copyright owner]
190
+
191
+
192
+ Licensed under the Apache License, Version 2.0 (the "License");
193
+ you may not use this file except in compliance with the License.
194
+ You may obtain a copy of the License at
195
+
196
+ http://www.apache.org/licenses/LICENSE-2.0
197
+
198
+ Unless required by applicable law or agreed to in writing, software
199
+ distributed under the License is distributed on an "AS IS" BASIS,
200
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
201
+ See the License for the specific language governing permissions and
202
+ limitations under the License.
README.md ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Detecting Twenty-thousand Classes using Image-level Supervision
2
+
3
+ **Detic**: A **Det**ector with **i**mage **c**lasses that can use image-level labels to easily train detectors.
4
+
5
+ <p align="center"> <img src='docs/teaser.jpeg' align="center" height="300px"> </p>
6
+
7
+ > [**Detecting Twenty-thousand Classes using Image-level Supervision**](http://arxiv.org/abs/xxxx.xxxxx),
8
+ > Xingyi Zhou, Rohit Girdhar, Armand Joulin, Philipp Kr&auml;henb&uuml;hl, Ishan Misra,
9
+ > *arXiv technical report ([arXiv xxxx.xxxxx](http://arxiv.org/abs/xxxx.xxxxx))*
10
+
11
+
12
+ ## Features
13
+
14
+ - Detects **any** class given class names (using [CLIP](https://github.com/openai/CLIP)).
15
+
16
+ - We train the detector on ImageNet-21K dataset with 21K classes.
17
+
18
+ - Cross-dataset generalization to OpenImages and Objects365 **without finetuning**.
19
+
20
+ - State-of-the-art results on Open-vocabulary LVIS and Open-vocabulary COCO.
21
+
22
+ - Works for DETR-style detectors.
23
+
24
+
25
+ ## Installation
26
+
27
+ See [installation instructions](docs/INSTALL.md).
28
+
29
+ ## Demo
30
+
31
+ We use the default detectron2 [demo interface](https://github.com/facebookresearch/detectron2/blob/main/GETTING_STARTED.md).
32
+ For example, to run our [21K model](docs/MODEL_ZOO#cross-dataset-evaluation) on a [messy desk image](https://web.eecs.umich.edu/~fouhey/fun/desk/desk.jpg) (image credit [David Fouhey](https://web.eecs.umich.edu/~fouhey)) with the lvis vocabulary, run
33
+
34
+ ~~~
35
+ mkdir models
36
+ wget https://dl.fbaipublicfiles.com/detic/Detic_LCOCOI21k_CLIP_SwinB_896b32_4x_ft4x_max-size.pth -O models/Detic_LCOCOI21k_CLIP_SwinB_896b32_4x_ft4x_max-size.pth
37
+ wget https://web.eecs.umich.edu/~fouhey/fun/desk/desk.jpg
38
+ python demo.py --config-file configs/Detic_LCOCOI21k_CLIP_SwinB_896b32_4x_ft4x_max-size.yaml --input desk.jpg --output out.jpg --vocabulary lvis --opts MODEL.WEIGHTS models/Detic_LCOCOI21k_CLIP_SwinB_896b32_4x_ft4x_max-size.pth
39
+ ~~~
40
+
41
+ If setup correctly, the output should look like:
42
+
43
+ <p align="center"> <img src='docs/example_output_lvis.jpeg' align="center" height="450px"> </p>
44
+
45
+ The same model can run with other vocabularies (COCO, OpenImages, or Objects365), or a **custom vocabulary**. For example:
46
+
47
+ ~~~
48
+ python demo.py --config-file configs/Detic_LCOCOI21k_CLIP_SwinB_896b32_4x_ft4x_max-size.yaml --input desk.jpg --output out2.jpg --vocabulary custom --custom_vocabulary headphone,webcam,paper,coffe --confidence-threshold 0.3 --opts MODEL.WEIGHTS models/Detic_LCOCOI21k_CLIP_SwinB_896b32_4x_ft4x_max-size.pth
49
+ ~~~
50
+
51
+ The output should look like:
52
+
53
+ <p align="center"> <img src='docs/example_output_custom.jpeg' align="center" height="450px"> </p>
54
+
55
+ Note that `headphone`, `paper` and `coffe` (typo intended) are **not** LVIS classes. Despite the misspelled class name, our detector can produce a reasonable detection for `coffe`.
56
+
57
+ ## Benchmark evaluation and training
58
+
59
+ Please first [prepare datasets](datasets/README.md), then check our [MODEL ZOO](docs/MODEL_ZOO.md) to reproduce results in our paper. We highlight key results below:
60
+
61
+ - Open-vocabulary LVIS
62
+
63
+ | | mask mAP | mask mAP_novel |
64
+ |-----------------------|-----------|-----------------|
65
+ |Box-Supervised | 30.2 | 16.4 |
66
+ |Detic | 32.4 | 24.9 |
67
+
68
+ - Standard LVIS
69
+
70
+ | | Detector/ Backbone | mask mAP | mask mAP_rare |
71
+ |-----------------------|----------|-----------|-----------------|
72
+ |Box-Supervised | CenterNet2-ResNet50 | 31.5 | 25.6 |
73
+ |Detic | CenterNet2-ResNet50 | 33.2 | 29.7 |
74
+ |Box-Supervised | CenterNet2-SwinB | 40.7 | 35.9 |
75
+ |Detic | CenterNet2-SwinB | 41.7 | 41.7 |
76
+
77
+ | | Detector/ Backbone | box mAP | box mAP_rare |
78
+ |-----------------------|----------|-----------|-----------------|
79
+ |Box-Supervised | DeformableDETR-ResNet50 | 31.7 | 21.4 |
80
+ |Detic | DeformableDETR-ResNet50 | 32.5 | 26.2 |
81
+
82
+ - Cross-dataset generalization
83
+
84
+ | | Backbone | Objects365 box mAP | OpenImages box mAP50 |
85
+ |-----------------------|----------|-----------|-----------------|
86
+ |Box-Supervised | SwinB | 19.1 | 46.2 |
87
+ |Detic | SwinB | 21.4 | 55.2 |
88
+
89
+
90
+ ## License
91
+
92
+ The majority of Detic is licensed under the [Apache 2.0 license](LICENSE), however portions of the project are available under separate license terms: SWIN-Transformer, CLIP, and TensorFlow Object Detection API are licensed under the MIT license; UniDet is licensed under the Apache 2.0 license; and the LVIS API is licensed under a custom license (https://github.com/lvis-dataset/lvis-api/blob/master/LICENSE)” If you later add other third party code, please keep this license info updated, and please let us know if that component is licensed under something other than CC-BY-NC, MIT, or CC0
93
+
94
+ ## Ethical Considerations
95
+ Detic's wide range of detection capabilities may introduce similar challenges to many other visual recognition and open-set recognition methods.
96
+ As the user can define arbitrary detection classes, class design and semantics may impact the model output.
97
+
98
+ ## Citation
99
+
100
+ If you find this project useful for your research, please use the following BibTeX entry.
101
+
102
+ @inproceedings{zhou2021detecting,
103
+ title={Detecting Twenty-thousand Classes using Image-level Supervision},
104
+ author={Zhou, Xingyi and Girdhar, Rohit and Joulin, Armand and Kr{\"a}henb{\"u}hl, Philipp and Misra, Ishan},
105
+ booktitle={arXiv preprint arXiv:xxxx.xxxxx},
106
+ year={2021}
107
+ }
configs/Base-C2_L_R5021k_640b64_4x.yaml ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MODEL:
2
+ META_ARCHITECTURE: "CustomRCNN"
3
+ MASK_ON: True
4
+ PROPOSAL_GENERATOR:
5
+ NAME: "CenterNet"
6
+ WEIGHTS: "models/resnet50_miil_21k.pkl"
7
+ BACKBONE:
8
+ NAME: build_p67_timm_fpn_backbone
9
+ TIMM:
10
+ BASE_NAME: resnet50_in21k
11
+ FPN:
12
+ IN_FEATURES: ["layer3", "layer4", "layer5"]
13
+ PIXEL_MEAN: [123.675, 116.280, 103.530]
14
+ PIXEL_STD: [58.395, 57.12, 57.375]
15
+ ROI_HEADS:
16
+ NAME: DeticCascadeROIHeads
17
+ IN_FEATURES: ["p3", "p4", "p5"]
18
+ IOU_THRESHOLDS: [0.6]
19
+ NUM_CLASSES: 1203
20
+ SCORE_THRESH_TEST: 0.02
21
+ NMS_THRESH_TEST: 0.5
22
+ ROI_BOX_CASCADE_HEAD:
23
+ IOUS: [0.6, 0.7, 0.8]
24
+ ROI_BOX_HEAD:
25
+ NAME: "FastRCNNConvFCHead"
26
+ NUM_FC: 2
27
+ POOLER_RESOLUTION: 7
28
+ CLS_AGNOSTIC_BBOX_REG: True
29
+ MULT_PROPOSAL_SCORE: True
30
+
31
+ USE_SIGMOID_CE: True
32
+ USE_FED_LOSS: True
33
+ ROI_MASK_HEAD:
34
+ NAME: "MaskRCNNConvUpsampleHead"
35
+ NUM_CONV: 4
36
+ POOLER_RESOLUTION: 14
37
+ CLS_AGNOSTIC_MASK: True
38
+ CENTERNET:
39
+ NUM_CLASSES: 1203
40
+ REG_WEIGHT: 1.
41
+ NOT_NORM_REG: True
42
+ ONLY_PROPOSAL: True
43
+ WITH_AGN_HM: True
44
+ INFERENCE_TH: 0.0001
45
+ PRE_NMS_TOPK_TRAIN: 4000
46
+ POST_NMS_TOPK_TRAIN: 2000
47
+ PRE_NMS_TOPK_TEST: 1000
48
+ POST_NMS_TOPK_TEST: 256
49
+ NMS_TH_TRAIN: 0.9
50
+ NMS_TH_TEST: 0.9
51
+ POS_WEIGHT: 0.5
52
+ NEG_WEIGHT: 0.5
53
+ IGNORE_HIGH_FP: 0.85
54
+ DATASETS:
55
+ TRAIN: ("lvis_v1_train",)
56
+ TEST: ("lvis_v1_val",)
57
+ DATALOADER:
58
+ SAMPLER_TRAIN: "RepeatFactorTrainingSampler"
59
+ REPEAT_THRESHOLD: 0.001
60
+ NUM_WORKERS: 8
61
+ TEST:
62
+ DETECTIONS_PER_IMAGE: 300
63
+ SOLVER:
64
+ LR_SCHEDULER_NAME: "WarmupCosineLR"
65
+ CHECKPOINT_PERIOD: 1000000000
66
+ WARMUP_ITERS: 10000
67
+ WARMUP_FACTOR: 0.0001
68
+ USE_CUSTOM_SOLVER: True
69
+ OPTIMIZER: "ADAMW"
70
+ MAX_ITER: 90000
71
+ IMS_PER_BATCH: 64
72
+ BASE_LR: 0.0002
73
+ CLIP_GRADIENTS:
74
+ ENABLED: True
75
+ INPUT:
76
+ FORMAT: RGB
77
+ CUSTOM_AUG: EfficientDetResizeCrop
78
+ TRAIN_SIZE: 640
79
+ OUTPUT_DIR: "./output/Detic/auto"
80
+ EVAL_PROPOSAL_AR: False
81
+ VERSION: 2
82
+ FP16: True
configs/Base-DeformDETR_L_R50_4x.yaml ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MODEL:
2
+ META_ARCHITECTURE: "DeformableDetr"
3
+ WEIGHTS: "detectron2://ImageNetPretrained/torchvision/R-50.pkl"
4
+ PIXEL_MEAN: [123.675, 116.280, 103.530]
5
+ PIXEL_STD: [58.395, 57.120, 57.375]
6
+ MASK_ON: False
7
+ RESNETS:
8
+ DEPTH: 50
9
+ STRIDE_IN_1X1: False
10
+ OUT_FEATURES: ["res3", "res4", "res5"]
11
+ DETR:
12
+ CLS_WEIGHT: 2.0
13
+ GIOU_WEIGHT: 2.0
14
+ L1_WEIGHT: 5.0
15
+ NUM_OBJECT_QUERIES: 300
16
+ DIM_FEEDFORWARD: 1024
17
+ WITH_BOX_REFINE: True
18
+ TWO_STAGE: True
19
+ NUM_CLASSES: 1203
20
+ USE_FED_LOSS: True
21
+ DATASETS:
22
+ TRAIN: ("lvis_v1_train",)
23
+ TEST: ("lvis_v1_val",)
24
+ SOLVER:
25
+ CHECKPOINT_PERIOD: 10000000
26
+ USE_CUSTOM_SOLVER: True
27
+ IMS_PER_BATCH: 32
28
+ BASE_LR: 0.0002
29
+ STEPS: (150000,)
30
+ MAX_ITER: 180000
31
+ WARMUP_FACTOR: 1.0
32
+ WARMUP_ITERS: 10
33
+ WEIGHT_DECAY: 0.0001
34
+ OPTIMIZER: "ADAMW"
35
+ BACKBONE_MULTIPLIER: 0.1
36
+ CLIP_GRADIENTS:
37
+ ENABLED: True
38
+ CLIP_TYPE: "full_model"
39
+ CLIP_VALUE: 0.01
40
+ NORM_TYPE: 2.0
41
+ CUSTOM_MULTIPLIER: 0.1
42
+ CUSTOM_MULTIPLIER_NAME: ['reference_points', 'sampling_offsets']
43
+ INPUT:
44
+ FORMAT: "RGB"
45
+ MIN_SIZE_TRAIN: (480, 512, 544, 576, 608, 640, 672, 704, 736, 768, 800)
46
+ CROP:
47
+ ENABLED: True
48
+ TYPE: "absolute_range"
49
+ SIZE: (384, 600)
50
+ CUSTOM_AUG: "DETR"
51
+ TEST:
52
+ DETECTIONS_PER_IMAGE: 300
53
+ DATALOADER:
54
+ FILTER_EMPTY_ANNOTATIONS: False
55
+ NUM_WORKERS: 4
56
+ SAMPLER_TRAIN: "RepeatFactorTrainingSampler"
57
+ REPEAT_THRESHOLD: 0.001
58
+ OUTPUT_DIR: "output/Detic/auto"
59
+ VERSION: 2
configs/Base_OVCOCO_C4_1x.yaml ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MODEL:
2
+ META_ARCHITECTURE: "CustomRCNN"
3
+ RPN:
4
+ PRE_NMS_TOPK_TEST: 6000
5
+ POST_NMS_TOPK_TEST: 1000
6
+ ROI_HEADS:
7
+ NAME: "CustomRes5ROIHeads"
8
+ WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl"
9
+ RESNETS:
10
+ DEPTH: 50
11
+ ROI_BOX_HEAD:
12
+ CLS_AGNOSTIC_BBOX_REG: True
13
+ USE_SIGMOID_CE: True
14
+ USE_ZEROSHOT_CLS: True
15
+ ZEROSHOT_WEIGHT_PATH: 'datasets/metadata/coco_clip_a+cname.npy'
16
+ IGNORE_ZERO_CATS: True
17
+ CAT_FREQ_PATH: 'datasets/coco/zero-shot/instances_train2017_seen_2_oriorder_cat_info.json'
18
+ DATASETS:
19
+ TRAIN: ("coco_zeroshot_train_oriorder",)
20
+ TEST: ("coco_generalized_zeroshot_val",)
21
+ SOLVER:
22
+ IMS_PER_BATCH: 16
23
+ BASE_LR: 0.02
24
+ STEPS: (60000, 80000)
25
+ MAX_ITER: 90000
26
+ CHECKPOINT_PERIOD: 1000000000
27
+ INPUT:
28
+ MIN_SIZE_TRAIN: (800,)
29
+ VERSION: 2
30
+ OUTPUT_DIR: output/Detic-COCO/auto
31
+ FP16: True
configs/BoxSup-C2_LCOCO_CLIP_SwinB_896b32_4x.yaml ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ _BASE_: "Base-C2_L_R5021k_640b64_4x.yaml"
2
+ MODEL:
3
+ ROI_BOX_HEAD:
4
+ USE_ZEROSHOT_CLS: True
5
+ WEIGHTS: "models/swin_base_patch4_window7_224_22k.pkl"
6
+ BACKBONE:
7
+ NAME: build_swintransformer_fpn_backbone
8
+ SWIN:
9
+ SIZE: B-22k
10
+ FPN:
11
+ IN_FEATURES: ["swin1", "swin2", "swin3"]
12
+ SOLVER:
13
+ MAX_ITER: 180000
14
+ IMS_PER_BATCH: 32
15
+ BASE_LR: 0.0001
16
+ INPUT:
17
+ TRAIN_SIZE: 896
18
+ DATASETS:
19
+ TRAIN: ("lvis_v1_train+coco",)
configs/BoxSup-C2_L_CLIP_R5021k_640b64_4x.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
1
+ _BASE_: "Base-C2_L_R5021k_640b64_4x.yaml"
2
+ MODEL:
3
+ ROI_BOX_HEAD:
4
+ USE_ZEROSHOT_CLS: True
configs/BoxSup-C2_L_CLIP_SwinB_896b32_4x.yaml ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ _BASE_: "Base-C2_L_R5021k_640b64_4x.yaml"
2
+ MODEL:
3
+ ROI_BOX_HEAD:
4
+ USE_ZEROSHOT_CLS: True
5
+ WEIGHTS: "models/swin_base_patch4_window7_224_22k.pkl"
6
+ BACKBONE:
7
+ NAME: build_swintransformer_fpn_backbone
8
+ SWIN:
9
+ SIZE: B-22k
10
+ FPN:
11
+ IN_FEATURES: ["swin1", "swin2", "swin3"]
12
+ SOLVER:
13
+ MAX_ITER: 180000
14
+ IMS_PER_BATCH: 32
15
+ BASE_LR: 0.0001
16
+ INPUT:
17
+ TRAIN_SIZE: 896
configs/BoxSup-C2_Lbase_CLIP_R5021k_640b64_4x.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
1
+ _BASE_: "Base-C2_L_R5021k_640b64_4x.yaml"
2
+ MODEL:
3
+ ROI_BOX_HEAD:
4
+ USE_ZEROSHOT_CLS: True
5
+ DATASETS:
6
+ TRAIN: ("lvis_v1_train_norare",)
configs/BoxSup-C2_Lbase_CLIP_SwinB_896b32_4x.yaml ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ _BASE_: "Base-C2_L_R5021k_640b64_4x.yaml"
2
+ MODEL:
3
+ ROI_BOX_HEAD:
4
+ USE_ZEROSHOT_CLS: True
5
+ WEIGHTS: "models/swin_base_patch4_window7_224_22k.pkl"
6
+ BACKBONE:
7
+ NAME: build_swintransformer_fpn_backbone
8
+ SWIN:
9
+ SIZE: B-22k
10
+ FPN:
11
+ IN_FEATURES: ["swin1", "swin2", "swin3"]
12
+ SOLVER:
13
+ MAX_ITER: 180000
14
+ IMS_PER_BATCH: 32
15
+ BASE_LR: 0.0001
16
+ INPUT:
17
+ TRAIN_SIZE: 896
18
+ DATASETS:
19
+ TRAIN: ("lvis_v1_train_norare",)
configs/BoxSup-DeformDETR_L_R50_2x.yaml ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ _BASE_: "Base-DeformDETR_L_R50_4x.yaml"
2
+ SOLVER:
3
+ IMS_PER_BATCH: 16
configs/BoxSup-DeformDETR_L_R50_4x.yaml ADDED
@@ -0,0 +1 @@
 
1
+ _BASE_: "Base-DeformDETR_L_R50_4x.yaml"
configs/BoxSup_OVCOCO_CLIP_R50_1x.yaml ADDED
@@ -0,0 +1 @@
 
1
+ _BASE_: "Base_OVCOCO_C4_1x.yaml"
configs/Detic_DeformDETR_LI_R50_4x_ft4x.yaml ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ _BASE_: "Base-DeformDETR_L_R50_4x.yaml"
2
+ MODEL:
3
+ WEIGHTS: "models/BoxSup-DeformDETR_L_R50_4x.pth"
4
+ INPUT:
5
+ CUSTOM_AUG: ResizeShortestEdge
6
+ MIN_SIZE_TRAIN_SAMPLING: range
7
+ MIN_SIZE_TRAIN: [480, 800]
8
+ DATASETS:
9
+ TRAIN: ("lvis_v1_train","imagenet_lvis_v1")
10
+ TEST: ("lvis_v1_val",)
11
+ DATALOADER:
12
+ SAMPLER_TRAIN: "MultiDatasetSampler"
13
+ DATASET_RATIO: [1, 4]
14
+ USE_DIFF_BS_SIZE: True
15
+ DATASET_BS: [4, 16]
16
+ USE_RFS: [True, False]
17
+ DATASET_MIN_SIZES: [[480, 800], [240, 400]]
18
+ DATASET_MAX_SIZES: [1333, 667]
19
+ FILTER_EMPTY_ANNOTATIONS: False
20
+ MULTI_DATASET_GROUPING: True
21
+ DATASET_ANN: ['box', 'image']
22
+ WITH_IMAGE_LABELS: True
configs/Detic_LCOCOI21k_CLIP_SwinB_896b32_4x_ft4x_max-size.yaml ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ _BASE_: "Base-C2_L_R5021k_640b64_4x.yaml"
2
+ MODEL:
3
+ WEIGHTS: "models/BoxSup-C2_LCOCO_CLIP_SwinB_896b32_4x.pth"
4
+ DYNAMIC_CLASSIFIER: True
5
+ ROI_BOX_HEAD:
6
+ USE_ZEROSHOT_CLS: True
7
+ IMAGE_LABEL_LOSS: 'max_size'
8
+ ZEROSHOT_WEIGHT_PATH: 'datasets/metadata/lvis-21k_clip_a+cname.npy'
9
+ USE_FED_LOSS: False # Federated loss is enabled when DYNAMIC_CLASSIFIER is on
10
+ ROI_HEADS:
11
+ NUM_CLASSES: 22047
12
+ BACKBONE:
13
+ NAME: build_swintransformer_fpn_backbone
14
+ SWIN:
15
+ SIZE: B-22k
16
+ FPN:
17
+ IN_FEATURES: ["swin1", "swin2", "swin3"]
18
+ RESET_CLS_TESTS: True
19
+ TEST_CLASSIFIERS: ("datasets/metadata/oid_clip_a+cname.npy","datasets/metadata/o365_clip_a+cnamefix.npy")
20
+ TEST_NUM_CLASSES: [500, 365]
21
+ SOLVER:
22
+ MAX_ITER: 180000
23
+ IMS_PER_BATCH: 32
24
+ BASE_LR: 0.0001
25
+ WARMUP_ITERS: 1000
26
+ WARMUP_FACTOR: 0.001
27
+ DATASETS:
28
+ TRAIN: ("lvis_v1_train+coco","imagenet_lvis-22k")
29
+ TEST: ('oid_val_expanded', 'objects365_v2_val')
30
+ DATALOADER:
31
+ SAMPLER_TRAIN: "MultiDatasetSampler"
32
+ DATASET_RATIO: [1, 16]
33
+ USE_DIFF_BS_SIZE: True
34
+ DATASET_BS: [4, 16]
35
+ DATASET_INPUT_SIZE: [896, 448]
36
+ USE_RFS: [True, False]
37
+ DATASET_INPUT_SCALE: [[0.1, 2.0], [0.5, 1.5]]
38
+ FILTER_EMPTY_ANNOTATIONS: False
39
+ MULTI_DATASET_GROUPING: True
40
+ DATASET_ANN: ['box', 'image']
41
+ NUM_WORKERS: 4
42
+ USE_TAR_DATASET: True
43
+ WITH_IMAGE_LABELS: True
configs/Detic_LI21k_CLIP_SwinB_896b32_4x_ft4x_max-size.yaml ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ _BASE_: "Base-C2_L_R5021k_640b64_4x.yaml"
2
+ MODEL:
3
+ WEIGHTS: "models/BoxSup-C2_L_CLIP_SwinB_896b32_4x.pth"
4
+ DYNAMIC_CLASSIFIER: True
5
+ ROI_BOX_HEAD:
6
+ USE_ZEROSHOT_CLS: True
7
+ IMAGE_LABEL_LOSS: 'max_size'
8
+ ZEROSHOT_WEIGHT_PATH: 'datasets/metadata/lvis-21k_clip_a+cname.npy'
9
+ USE_FED_LOSS: False # Federated loss is enabled when DYNAMIC_CLASSIFIER is on
10
+ ROI_HEADS:
11
+ NUM_CLASSES: 22047
12
+ BACKBONE:
13
+ NAME: build_swintransformer_fpn_backbone
14
+ SWIN:
15
+ SIZE: B-22k
16
+ FPN:
17
+ IN_FEATURES: ["swin1", "swin2", "swin3"]
18
+ RESET_CLS_TESTS: True
19
+ TEST_CLASSIFIERS: ("datasets/metadata/oid_clip_a+cname.npy","datasets/metadata/o365_clip_a+cnamefix.npy")
20
+ TEST_NUM_CLASSES: [500, 365]
21
+ SOLVER:
22
+ MAX_ITER: 180000
23
+ IMS_PER_BATCH: 32
24
+ BASE_LR: 0.0001
25
+ WARMUP_ITERS: 1000
26
+ WARMUP_FACTOR: 0.001
27
+ DATASETS:
28
+ TRAIN: ("lvis_v1_train","imagenet_lvis-22k")
29
+ TEST: ('oid_val_expanded', 'objects365_v2_val')
30
+ DATALOADER:
31
+ SAMPLER_TRAIN: "MultiDatasetSampler"
32
+ DATASET_RATIO: [1, 16]
33
+ USE_DIFF_BS_SIZE: True
34
+ DATASET_BS: [4, 16]
35
+ DATASET_INPUT_SIZE: [896, 448]
36
+ USE_RFS: [True, False]
37
+ DATASET_INPUT_SCALE: [[0.1, 2.0], [0.5, 1.5]]
38
+ FILTER_EMPTY_ANNOTATIONS: False
39
+ MULTI_DATASET_GROUPING: True
40
+ DATASET_ANN: ['box', 'image']
41
+ NUM_WORKERS: 4
42
+ USE_TAR_DATASET: True
43
+ WITH_IMAGE_LABELS: True
configs/Detic_LI_CLIP_R5021k_640b64_4x_ft4x_max-size.yaml ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ _BASE_: "Base-C2_L_R5021k_640b64_4x.yaml"
2
+ MODEL:
3
+ ROI_BOX_HEAD:
4
+ USE_ZEROSHOT_CLS: True
5
+ IMAGE_LABEL_LOSS: 'max_size'
6
+ WEIGHTS: "models/BoxSup-C2_L_CLIP_R5021k_640b64_4x.pth"
7
+ SOLVER:
8
+ MAX_ITER: 90000
9
+ IMS_PER_BATCH: 64
10
+ BASE_LR: 0.0002
11
+ WARMUP_ITERS: 1000
12
+ WARMUP_FACTOR: 0.001
13
+ DATASETS:
14
+ TRAIN: ("lvis_v1_train","imagenet_lvis_v1")
15
+ DATALOADER:
16
+ SAMPLER_TRAIN: "MultiDatasetSampler"
17
+ DATASET_RATIO: [1, 4]
18
+ USE_DIFF_BS_SIZE: True
19
+ DATASET_BS: [8, 32]
20
+ DATASET_INPUT_SIZE: [640, 320]
21
+ USE_RFS: [True, False]
22
+ DATASET_INPUT_SCALE: [[0.1, 2.0], [0.5, 1.5]]
23
+ FILTER_EMPTY_ANNOTATIONS: False
24
+ MULTI_DATASET_GROUPING: True
25
+ DATASET_ANN: ['box', 'image']
26
+ NUM_WORKERS: 8
27
+ WITH_IMAGE_LABELS: True
configs/Detic_LI_CLIP_SwinB_896b32_4x_ft4x_max-size.yaml ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ _BASE_: "Base-C2_L_R5021k_640b64_4x.yaml"
2
+ MODEL:
3
+ ROI_BOX_HEAD:
4
+ USE_ZEROSHOT_CLS: True
5
+ IMAGE_LABEL_LOSS: 'max_size'
6
+ BACKBONE:
7
+ NAME: build_swintransformer_fpn_backbone
8
+ SWIN:
9
+ SIZE: B-22k
10
+ FPN:
11
+ IN_FEATURES: ["swin1", "swin2", "swin3"]
12
+ WEIGHTS: "models/BoxSup-C2_L_CLIP_SwinB_896b32_4x.pth"
13
+ SOLVER:
14
+ MAX_ITER: 180000
15
+ IMS_PER_BATCH: 32
16
+ BASE_LR: 0.0001
17
+ WARMUP_ITERS: 1000
18
+ WARMUP_FACTOR: 0.001
19
+ DATASETS:
20
+ TRAIN: ("lvis_v1_train","imagenet_lvis_v1")
21
+ DATALOADER:
22
+ SAMPLER_TRAIN: "MultiDatasetSampler"
23
+ DATASET_RATIO: [1, 4]
24
+ USE_DIFF_BS_SIZE: True
25
+ DATASET_BS: [4, 16]
26
+ DATASET_INPUT_SIZE: [896, 448]
27
+ USE_RFS: [True, False]
28
+ DATASET_INPUT_SCALE: [[0.1, 2.0], [0.5, 1.5]]
29
+ FILTER_EMPTY_ANNOTATIONS: False
30
+ MULTI_DATASET_GROUPING: True
31
+ DATASET_ANN: ['box', 'image']
32
+ NUM_WORKERS: 8
33
+ WITH_IMAGE_LABELS: True
configs/Detic_LbaseCCcapimg_CLIP_R5021k_640b64_4x_ft4x_max-size.yaml ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ _BASE_: "Base-C2_L_R5021k_640b64_4x.yaml"
2
+ MODEL:
3
+ WITH_CAPTION: True
4
+ SYNC_CAPTION_BATCH: True
5
+ ROI_BOX_HEAD:
6
+ ADD_IMAGE_BOX: True # caption loss is added to the image-box
7
+ USE_ZEROSHOT_CLS: True
8
+ IMAGE_LABEL_LOSS: 'max_size'
9
+ WEIGHTS: "models/BoxSup-C2_Lbase_CLIP_R5021k_640b64_4x.pth"
10
+ SOLVER:
11
+ MAX_ITER: 90000
12
+ IMS_PER_BATCH: 64
13
+ BASE_LR: 0.0002
14
+ WARMUP_ITERS: 1000
15
+ WARMUP_FACTOR: 0.001
16
+ DATASETS:
17
+ TRAIN: ("lvis_v1_train_norare","cc3m_v1_train_tags")
18
+ DATALOADER:
19
+ SAMPLER_TRAIN: "MultiDatasetSampler"
20
+ DATASET_RATIO: [1, 4]
21
+ USE_DIFF_BS_SIZE: True
22
+ DATASET_BS: [8, 32]
23
+ DATASET_INPUT_SIZE: [640, 320]
24
+ USE_RFS: [True, False]
25
+ DATASET_INPUT_SCALE: [[0.1, 2.0], [0.5, 1.5]]
26
+ FILTER_EMPTY_ANNOTATIONS: False
27
+ MULTI_DATASET_GROUPING: True
28
+ DATASET_ANN: ['box', 'captiontag']
29
+ NUM_WORKERS: 8
30
+ WITH_IMAGE_LABELS: True
configs/Detic_LbaseCCimg_CLIP_R5021k_640b64_4x_ft4x_max-size.yaml ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ _BASE_: "Base-C2_L_R5021k_640b64_4x.yaml"
2
+ MODEL:
3
+ ROI_BOX_HEAD:
4
+ USE_ZEROSHOT_CLS: True
5
+ IMAGE_LABEL_LOSS: 'max_size'
6
+ WEIGHTS: "models/BoxSup-C2_Lbase_CLIP_R5021k_640b64_4x.pth"
7
+ SOLVER:
8
+ MAX_ITER: 90000
9
+ IMS_PER_BATCH: 64
10
+ BASE_LR: 0.0002
11
+ WARMUP_ITERS: 1000
12
+ WARMUP_FACTOR: 0.001
13
+ DATASETS:
14
+ TRAIN: ("lvis_v1_train_norare","cc3m_v1_train_tags")
15
+ DATALOADER:
16
+ SAMPLER_TRAIN: "MultiDatasetSampler"
17
+ DATASET_RATIO: [1, 4]
18
+ USE_DIFF_BS_SIZE: True
19
+ DATASET_BS: [8, 32]
20
+ DATASET_INPUT_SIZE: [640, 320]
21
+ USE_RFS: [True, False]
22
+ DATASET_INPUT_SCALE: [[0.1, 2.0], [0.5, 1.5]]
23
+ FILTER_EMPTY_ANNOTATIONS: False
24
+ MULTI_DATASET_GROUPING: True
25
+ DATASET_ANN: ['box', 'image']
26
+ NUM_WORKERS: 8
27
+ WITH_IMAGE_LABELS: True
configs/Detic_LbaseI_CLIP_R5021k_640b64_4x_ft4x_max-size.yaml ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ _BASE_: "Base-C2_L_R5021k_640b64_4x.yaml"
2
+ MODEL:
3
+ ROI_BOX_HEAD:
4
+ USE_ZEROSHOT_CLS: True
5
+ IMAGE_LABEL_LOSS: 'max_size'
6
+ WEIGHTS: "models/BoxSup-C2_Lbase_CLIP_R5021k_640b64_4x.pth"
7
+ SOLVER:
8
+ MAX_ITER: 90000
9
+ IMS_PER_BATCH: 64
10
+ BASE_LR: 0.0002
11
+ WARMUP_ITERS: 1000
12
+ WARMUP_FACTOR: 0.001
13
+ DATASETS:
14
+ TRAIN: ("lvis_v1_train_norare","imagenet_lvis_v1")
15
+ DATALOADER:
16
+ SAMPLER_TRAIN: "MultiDatasetSampler"
17
+ DATASET_RATIO: [1, 4]
18
+ USE_DIFF_BS_SIZE: True
19
+ DATASET_BS: [8, 32]
20
+ DATASET_INPUT_SIZE: [640, 320]
21
+ USE_RFS: [True, False]
22
+ DATASET_INPUT_SCALE: [[0.1, 2.0], [0.5, 1.5]]
23
+ FILTER_EMPTY_ANNOTATIONS: False
24
+ MULTI_DATASET_GROUPING: True
25
+ DATASET_ANN: ['box', 'image']
26
+ NUM_WORKERS: 8
27
+ WITH_IMAGE_LABELS: True
configs/Detic_LbaseI_CLIP_R5021k_640b64_4x_ft4x_predicted.yaml ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ _BASE_: "Base-C2_L_R5021k_640b64_4x.yaml"
2
+ MODEL:
3
+ ROI_BOX_HEAD:
4
+ USE_ZEROSHOT_CLS: True
5
+ IMAGE_LABEL_LOSS: 'max_score'
6
+ WEIGHTS: "models/BoxSup-C2_Lbase_CLIP_R5021k_640b64_4x.pth"
7
+ SOLVER:
8
+ MAX_ITER: 90000
9
+ IMS_PER_BATCH: 64
10
+ BASE_LR: 0.0002
11
+ WARMUP_ITERS: 1000
12
+ WARMUP_FACTOR: 0.001
13
+ DATASETS:
14
+ TRAIN: ("lvis_v1_train_norare","imagenet_lvis_v1")
15
+ DATALOADER:
16
+ SAMPLER_TRAIN: "MultiDatasetSampler"
17
+ DATASET_RATIO: [1, 4]
18
+ USE_DIFF_BS_SIZE: True
19
+ DATASET_BS: [8, 32]
20
+ DATASET_INPUT_SIZE: [640, 320]
21
+ USE_RFS: [True, False]
22
+ DATASET_INPUT_SCALE: [[0.1, 2.0], [0.5, 1.5]]
23
+ FILTER_EMPTY_ANNOTATIONS: False
24
+ MULTI_DATASET_GROUPING: True
25
+ DATASET_ANN: ['box', 'image']
26
+ NUM_WORKERS: 8
27
+ WITH_IMAGE_LABELS: True
configs/Detic_LbaseI_CLIP_SwinB_896b32_4x_ft4x_max-size.yaml ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ _BASE_: "Base-C2_L_R5021k_640b64_4x.yaml"
2
+ MODEL:
3
+ ROI_BOX_HEAD:
4
+ USE_ZEROSHOT_CLS: True
5
+ IMAGE_LABEL_LOSS: 'max_size'
6
+ BACKBONE:
7
+ NAME: build_swintransformer_fpn_backbone
8
+ SWIN:
9
+ SIZE: B-22k
10
+ FPN:
11
+ IN_FEATURES: ["swin1", "swin2", "swin3"]
12
+ WEIGHTS: "models/BoxSup-C2_Lbase_CLIP_SwinB_896b32_4x.pth"
13
+ SOLVER:
14
+ MAX_ITER: 180000
15
+ IMS_PER_BATCH: 32
16
+ BASE_LR: 0.0001
17
+ WARMUP_ITERS: 1000
18
+ WARMUP_FACTOR: 0.001
19
+ DATASETS:
20
+ TRAIN: ("lvis_v1_train_norare","imagenet_lvis_v1")
21
+ DATALOADER:
22
+ SAMPLER_TRAIN: "MultiDatasetSampler"
23
+ DATASET_RATIO: [1, 4]
24
+ USE_DIFF_BS_SIZE: True
25
+ DATASET_BS: [4, 16]
26
+ DATASET_INPUT_SIZE: [896, 448]
27
+ USE_RFS: [True, False]
28
+ DATASET_INPUT_SCALE: [[0.1, 2.0], [0.5, 1.5]]
29
+ FILTER_EMPTY_ANNOTATIONS: False
30
+ MULTI_DATASET_GROUPING: True
31
+ DATASET_ANN: ['box', 'image']
32
+ NUM_WORKERS: 8
33
+ WITH_IMAGE_LABELS: True
configs/Detic_OVCOCO_CLIP_R50_1x_caption.yaml ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ _BASE_: "Base_OVCOCO_C4_1x.yaml"
2
+ MODEL:
3
+ WEIGHTS: "models/BoxSup_OVCOCO_CLIP_R50_1x.pth"
4
+ WITH_CAPTION: True
5
+ SYNC_CAPTION_BATCH: True
6
+ ROI_BOX_HEAD:
7
+ WS_NUM_PROPS: 1
8
+ ADD_IMAGE_BOX: True
9
+ NEG_CAP_WEIGHT: 1.0
10
+ SOLVER:
11
+ IMS_PER_BATCH: 16
12
+ BASE_LR: 0.02
13
+ STEPS: (60000, 80000)
14
+ MAX_ITER: 90000
15
+ DATASETS:
16
+ TRAIN: ("coco_zeroshot_train_oriorder", "coco_caption_train_tags")
17
+ INPUT:
18
+ CUSTOM_AUG: ResizeShortestEdge
19
+ MIN_SIZE_TRAIN_SAMPLING: range
20
+ MIN_SIZE_TRAIN: (800, 800)
21
+ DATALOADER:
22
+ SAMPLER_TRAIN: "MultiDatasetSampler"
23
+ DATASET_RATIO: [1, 4]
24
+ USE_DIFF_BS_SIZE: True
25
+ DATASET_BS: [2, 8]
26
+ USE_RFS: [False, False]
27
+ DATASET_MIN_SIZES: [[800, 800], [400, 400]]
28
+ DATASET_MAX_SIZES: [1333, 667]
29
+ FILTER_EMPTY_ANNOTATIONS: False
30
+ MULTI_DATASET_GROUPING: True
31
+ DATASET_ANN: ['box', 'caption']
32
+ NUM_WORKERS: 8
33
+ WITH_IMAGE_LABELS: True
configs/Detic_OVCOCO_CLIP_R50_1x_max-size.yaml ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ _BASE_: "Base_OVCOCO_C4_1x.yaml"
2
+ MODEL:
3
+ WEIGHTS: "models/BoxSup_OVCOCO_CLIP_R50_1x.pth"
4
+ ROI_BOX_HEAD:
5
+ WS_NUM_PROPS: 32
6
+ IMAGE_LABEL_LOSS: 'max_size'
7
+ SOLVER:
8
+ IMS_PER_BATCH: 16
9
+ BASE_LR: 0.02
10
+ STEPS: (60000, 80000)
11
+ MAX_ITER: 90000
12
+ DATASETS:
13
+ TRAIN: ("coco_zeroshot_train_oriorder", "coco_caption_train_tags")
14
+ INPUT:
15
+ CUSTOM_AUG: ResizeShortestEdge
16
+ MIN_SIZE_TRAIN_SAMPLING: range
17
+ MIN_SIZE_TRAIN: (800, 800)
18
+ DATALOADER:
19
+ SAMPLER_TRAIN: "MultiDatasetSampler"
20
+ DATASET_RATIO: [1, 4]
21
+ USE_DIFF_BS_SIZE: True
22
+ DATASET_BS: [2, 8]
23
+ USE_RFS: [False, False]
24
+ DATASET_MIN_SIZES: [[800, 800], [400, 400]]
25
+ DATASET_MAX_SIZES: [1333, 667]
26
+ FILTER_EMPTY_ANNOTATIONS: False
27
+ MULTI_DATASET_GROUPING: True
28
+ DATASET_ANN: ['box', 'image']
29
+ NUM_WORKERS: 8
30
+ WITH_IMAGE_LABELS: True
configs/Detic_OVCOCO_CLIP_R50_1x_max-size_caption.yaml ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ _BASE_: "Base_OVCOCO_C4_1x.yaml"
2
+ MODEL:
3
+ WEIGHTS: "models/BoxSup_OVCOCO_CLIP_R50_1x.pth"
4
+ WITH_CAPTION: True
5
+ SYNC_CAPTION_BATCH: True
6
+ ROI_BOX_HEAD:
7
+ WS_NUM_PROPS: 32
8
+ ADD_IMAGE_BOX: True # caption loss is added to the image-box
9
+ IMAGE_LABEL_LOSS: 'max_size'
10
+
11
+ NEG_CAP_WEIGHT: 1.0
12
+ SOLVER:
13
+ IMS_PER_BATCH: 16
14
+ BASE_LR: 0.02
15
+ STEPS: (60000, 80000)
16
+ MAX_ITER: 90000
17
+ DATASETS:
18
+ TRAIN: ("coco_zeroshot_train_oriorder", "coco_caption_train_tags")
19
+ INPUT:
20
+ CUSTOM_AUG: ResizeShortestEdge
21
+ MIN_SIZE_TRAIN_SAMPLING: range
22
+ MIN_SIZE_TRAIN: (800, 800)
23
+ DATALOADER:
24
+ SAMPLER_TRAIN: "MultiDatasetSampler"
25
+ DATASET_RATIO: [1, 4]
26
+ USE_DIFF_BS_SIZE: True
27
+ DATASET_BS: [2, 8]
28
+ USE_RFS: [False, False]
29
+ DATASET_MIN_SIZES: [[800, 800], [400, 400]]
30
+ DATASET_MAX_SIZES: [1333, 667]
31
+ FILTER_EMPTY_ANNOTATIONS: False
32
+ MULTI_DATASET_GROUPING: True
33
+ DATASET_ANN: ['box', 'captiontag']
34
+ NUM_WORKERS: 8
35
+ WITH_IMAGE_LABELS: True
datasets/README.md ADDED
@@ -0,0 +1,207 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Prepare datasets for Detic
2
+
3
+ The basic training of our model uses [LVIS](https://www.lvisdataset.org/) (which uses [COCO](https://cocodataset.org/) images) and [ImageNet-21K](https://www.image-net.org/download.php).
4
+ Some models are trained on [Conceptual Caption (CC3M)](https://ai.google.com/research/ConceptualCaptions/).
5
+ Optionally, we use [Objects365](https://www.objects365.org/) and [OpenImages (Challenge 2019 version)](https://storage.googleapis.com/openimages/web/challenge2019.html) for cross-dataset evaluation.
6
+ Before starting processing, please download the (selected) datasets from the official websites and place or sim-link them under `$Detic_ROOT/datasets/`.
7
+
8
+ ```
9
+ $Detic_ROOT/datasets/
10
+ metadata/
11
+ lvis/
12
+ coco/
13
+ imagenet/
14
+ cc3m/
15
+ objects365/
16
+ oid/
17
+ ```
18
+ `metadata/` is our preprocessed meta-data (included in the repo). See the below [section](#Metadata) for details.
19
+ Please follow the following instruction to pre-process individual datasets.
20
+
21
+ ### COCO and LVIS
22
+
23
+ First, download COCO and LVIS data place them in the following way:
24
+
25
+ ```
26
+ lvis/
27
+ lvis_v1_train.json
28
+ lvis_v1_val.json
29
+ coco/
30
+ train2017/
31
+ val2017/
32
+ annotations/
33
+ captions_train2017.json
34
+ instances_train2017.json
35
+ instances_val2017.json
36
+ ```
37
+
38
+ Next, prepare the open-vocabulary LVIS training set using
39
+
40
+ ```
41
+ python tools/remove_lvis_rare.py --ann datasets/lvis/lvis_v1_train.json
42
+ ```
43
+
44
+ This will generate `datasets/lvis/lvis_v1_train_norare.json`.
45
+
46
+ ### ImageNet-21K
47
+
48
+ The ImageNet-21K folder should look like:
49
+ ```
50
+ imagenet/
51
+ ImageNet-21K/
52
+ n01593028.tar
53
+ n01593282.tar
54
+ ...
55
+ ```
56
+
57
+ We first unzip the overlapping classes of LVIS (we will directly work with the .tar file for the rest classes) and convert them into LVIS annotation format.
58
+
59
+ ~~~
60
+ mkdir imagenet/annotations
61
+ python tools/unzip_imagenet_lvis.py --dst_path datasets/imagenet/ImageNet-LVIS
62
+ python tools/create_imagenetlvis_json.py --imagenet_path datasets/imagenet/ImageNet-LVIS --out_path datasets/imagenet/annotations/imagenet_lvis_image_info.json
63
+ ~~~
64
+ This creates `datasets/imagenet/annotations/imagenet_lvis_image_info.json`.
65
+
66
+ [Optional] To train with all the 21K classes, run
67
+
68
+ ~~~
69
+ python tools/get_imagenet_21k_full_tar_json.py
70
+ python tools/create_lvis_21k.py
71
+ ~~~
72
+ This creates `datasets/imagenet/annotations/imagenet-21k_image_info_lvis-21k.json` and `datasets/lvis/lvis_v1_train_lvis-21k.json` (combined LVIS and ImageNet-21K classes in `categories`).
73
+
74
+ [Optional] To train on combined LVIS and COCO, run
75
+
76
+ ~~~
77
+ python tools/merge_lvis_coco.py
78
+ ~~~
79
+ This creates `datasets/lvis/lvis_v1_train+coco_mask.json`
80
+
81
+ ### Conceptual Caption
82
+
83
+
84
+ Download the dataset from [this](https://ai.google.com/research/ConceptualCaptions/download) page and place them as:
85
+ ```
86
+ cc3m/
87
+ GCC-training.tsv
88
+ ```
89
+
90
+ Run the following command to download the images and convert the annotations to LVIS format (Note: download images takes long).
91
+
92
+ ~~~
93
+ python tools/download_cc.py --ann datasets/cc3m/GCC-training.tsv --save_image_path datasets/cc3m/training/ --out_path datasets/cc3m/train_image_info.json
94
+ python tools/get_cc_tags.py
95
+ ~~~
96
+
97
+ This creates `datasets/cc3m/train_image_info_tags.json`.
98
+
99
+ ### Objects365
100
+ Download Objects365 (v2) from the website. We only need the validation set in this project:
101
+ ```
102
+ objects365/
103
+ annotations/
104
+ zhiyuan_objv2_val.json
105
+ val/
106
+ images/
107
+ v1/
108
+ patch0/
109
+ ...
110
+ patch15/
111
+ v2/
112
+ patch16/
113
+ ...
114
+ patch49/
115
+
116
+ ```
117
+
118
+ The original annotation has typos in the class names, we first fix them for our following use of language embeddings.
119
+
120
+ ```
121
+ python tools/fix_o365_names.py --ann datasets/objects365/annotations/zhiyuan_objv2_val.json
122
+ ```
123
+ This creates `datasets/objects365/zhiyuan_objv2_val_fixname.json`.
124
+
125
+ To train on Objects365, download the training images and use the command above. We note some images in the training annotation do not exist.
126
+ We use the following command to filter the missing images.
127
+ ~~~
128
+ python tools/fix_0365_path.py
129
+ ~~~
130
+ This creates `datasets/objects365/zhiyuan_objv2_train_fixname_fixmiss.json`.
131
+
132
+ ### OpenImages
133
+
134
+ We followed the instructions in [UniDet](https://github.com/xingyizhou/UniDet/blob/master/projects/UniDet/unidet_docs/DATASETS.md#openimages) to convert the metadata for OpenImages.
135
+
136
+ The converted folder should look like
137
+
138
+ ```
139
+ oid/
140
+ annotations/
141
+ oid_challenge_2019_train_bbox.json
142
+ oid_challenge_2019_val_expanded.json
143
+ images/
144
+ 0/
145
+ 1/
146
+ 2/
147
+ ...
148
+ ```
149
+
150
+ ### Open-vocabulary COCO
151
+
152
+ We first follow [OVR-CNN](https://github.com/alirezazareian/ovr-cnn/blob/master/ipynb/003.ipynb) to create the open-vocabulary COCO split. The converted files should be like
153
+
154
+ ```
155
+ coco/
156
+ zero-shot/
157
+ instances_train2017_seen_2.json
158
+ instances_val2017_all_2.json
159
+ ```
160
+
161
+ We further pre-process the annotation format for easier evaluation:
162
+
163
+ ```
164
+ python tools/get_coco_zeroshot_oriorder.py --data_path datasets/coco/zero-shot/instances_train2017_seen_2.json
165
+ python tools/get_coco_zeroshot_oriorder.py --data_path datasets/coco/zero-shot/instances_val2017_all_2.json
166
+ ```
167
+
168
+ Next, we preprocess the COCO caption data:
169
+
170
+ ```
171
+ python tools/get_cc_tags.py --cc_ann datasets/coco/annotations/captions_train2017.json --out_path datasets/coco/captions_train2017_tags_allcaps.json --allcaps --convert_caption
172
+ ```
173
+ This creates `datasets/coco/captions_train2017_tags_allcaps.json`.
174
+
175
+ ### Metadata
176
+
177
+ ```
178
+ metadata/
179
+ lvis_v1_train_cat_info.json
180
+ coco_clip_a+cname.npy
181
+ lvis_v1_clip_a+cname.npy
182
+ o365_clip_a+cnamefix.npy
183
+ oid_clip_a+cname.npy
184
+ imagenet_lvis_wnid.txt
185
+ Objects365_names_fix.csv
186
+ ```
187
+
188
+ `lvis_v1_train_cat_info.json` is used by the Federated loss.
189
+ This is created by
190
+ ~~~
191
+ python tools/get_lvis_cat_info.py --ann datasets/lvis/lvis_v1_train.json
192
+ ~~~
193
+
194
+ `*_clip_a+cname.npy` is the pre-computed CLIP embeddings for each datasets.
195
+ They are created by (taking LVIS as an example)
196
+ ~~~
197
+ python tools/dump_clip_features.py --ann datasets/lvis/lvis_v1_val.json --out_path metadata/lvis_v1_clip_a+cname.npy
198
+ ~~~
199
+ Note we do not include the 21K class embeddings due to the large file size.
200
+ To create it, run
201
+ ~~~
202
+ python tools/dump_clip_features.py --ann datasets/lvis/lvis_v1_val_lvis-21k.json --out_path datasets/metadata/lvis-21k_clip_a+cname.npy
203
+ ~~~
204
+
205
+ `imagenet_lvis_wnid.txt` is the list of matched classes between ImageNet-21K and LVIS.
206
+
207
+ `Objects365_names_fix.csv` is our manual fix of the Objects365 names.
datasets/metadata/Objects365_names_fix.csv ADDED
@@ -0,0 +1,365 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 1,Person,Person
2
+ 2,Sneakers,Sneakers
3
+ 3,Chair,Chair
4
+ 4,Other Shoes,Other Shoes
5
+ 5,Hat,Hat
6
+ 6,Car,Car
7
+ 7,Lamp,Lamp
8
+ 8,Glasses,Glasses
9
+ 9,Bottle,Bottle
10
+ 10,Desk,Desk
11
+ 11,Cup,Cup
12
+ 12,Street Lights,Street Lights
13
+ 13,Cabinet/shelf,Cabinet/shelf
14
+ 14,Handbag/Satchel,Handbag/Satchel
15
+ 15,Bracelet,Bracelet
16
+ 16,Plate,Plate
17
+ 17,Picture/Frame,Picture/Frame
18
+ 18,Helmet,Helmet
19
+ 19,Book,Book
20
+ 20,Gloves,Gloves
21
+ 21,Storage box,Storage box
22
+ 22,Boat,Boat
23
+ 23,Leather Shoes,Leather Shoes
24
+ 24,Flower,Flower
25
+ 25,Bench,Bench
26
+ 26,Potted Plant,Potted Plant
27
+ 27,Bowl/Basin,Bowl/Basin
28
+ 28,Flag,Flag
29
+ 29,Pillow,Pillow
30
+ 30,Boots,Boots
31
+ 31,Vase,Vase
32
+ 32,Microphone,Microphone
33
+ 33,Necklace,Necklace
34
+ 34,Ring,Ring
35
+ 35,SUV,SUV
36
+ 36,Wine Glass,Wine Glass
37
+ 37,Belt,Belt
38
+ 38,Moniter/TV,Monitor/TV
39
+ 39,Backpack,Backpack
40
+ 40,Umbrella,Umbrella
41
+ 41,Traffic Light,Traffic Light
42
+ 42,Speaker,Speaker
43
+ 43,Watch,Watch
44
+ 44,Tie,Tie
45
+ 45,Trash bin Can,Trash bin Can
46
+ 46,Slippers,Slippers
47
+ 47,Bicycle,Bicycle
48
+ 48,Stool,Stool
49
+ 49,Barrel/bucket,Barrel/bucket
50
+ 50,Van,Van
51
+ 51,Couch,Couch
52
+ 52,Sandals,Sandals
53
+ 53,Bakset,Basket
54
+ 54,Drum,Drum
55
+ 55,Pen/Pencil,Pen/Pencil
56
+ 56,Bus,Bus
57
+ 57,Wild Bird,Wild Bird
58
+ 58,High Heels,High Heels
59
+ 59,Motorcycle,Motorcycle
60
+ 60,Guitar,Guitar
61
+ 61,Carpet,Carpet
62
+ 62,Cell Phone,Cell Phone
63
+ 63,Bread,Bread
64
+ 64,Camera,Camera
65
+ 65,Canned,Canned
66
+ 66,Truck,Truck
67
+ 67,Traffic cone,Traffic cone
68
+ 68,Cymbal,Cymbal
69
+ 69,Lifesaver,Lifesaver
70
+ 70,Towel,Towel
71
+ 71,Stuffed Toy,Stuffed Toy
72
+ 72,Candle,Candle
73
+ 73,Sailboat,Sailboat
74
+ 74,Laptop,Laptop
75
+ 75,Awning,Awning
76
+ 76,Bed,Bed
77
+ 77,Faucet,Faucet
78
+ 78,Tent,Tent
79
+ 79,Horse,Horse
80
+ 80,Mirror,Mirror
81
+ 81,Power outlet,Power outlet
82
+ 82,Sink,Sink
83
+ 83,Apple,Apple
84
+ 84,Air Conditioner,Air Conditioner
85
+ 85,Knife,Knife
86
+ 86,Hockey Stick,Hockey Stick
87
+ 87,Paddle,Paddle
88
+ 88,Pickup Truck,Pickup Truck
89
+ 89,Fork,Fork
90
+ 90,Traffic Sign,Traffic Sign
91
+ 91,Ballon,Ballon
92
+ 92,Tripod,Tripod
93
+ 93,Dog,Dog
94
+ 94,Spoon,Spoon
95
+ 95,Clock,Clock
96
+ 96,Pot,Pot
97
+ 97,Cow,Cow
98
+ 98,Cake,Cake
99
+ 99,Dinning Table,Dining Table
100
+ 100,Sheep,Sheep
101
+ 101,Hanger,Hanger
102
+ 102,Blackboard/Whiteboard,Blackboard/Whiteboard
103
+ 103,Napkin,Napkin
104
+ 104,Other Fish,Other Fish
105
+ 105,Orange/Tangerine,Orange/Tangerine
106
+ 106,Toiletry,Toiletry
107
+ 107,Keyboard,Keyboard
108
+ 108,Tomato,Tomato
109
+ 109,Lantern,Lantern
110
+ 110,Machinery Vehicle,Machinery Vehicle
111
+ 111,Fan,Fan
112
+ 112,Green Vegetables,Green Vegetables
113
+ 113,Banana,Banana
114
+ 114,Baseball Glove,Baseball Glove
115
+ 115,Airplane,Airplane
116
+ 116,Mouse,Mouse
117
+ 117,Train,Train
118
+ 118,Pumpkin,Pumpkin
119
+ 119,Soccer,Soccer
120
+ 120,Skiboard,Skiboard
121
+ 121,Luggage,Luggage
122
+ 122,Nightstand,Nightstand
123
+ 123,Tea pot,Teapot
124
+ 124,Telephone,Telephone
125
+ 125,Trolley,Trolley
126
+ 126,Head Phone,Head Phone
127
+ 127,Sports Car,Sports Car
128
+ 128,Stop Sign,Stop Sign
129
+ 129,Dessert,Dessert
130
+ 130,Scooter,Scooter
131
+ 131,Stroller,Stroller
132
+ 132,Crane,Crane
133
+ 133,Remote,Remote
134
+ 134,Refrigerator,Refrigerator
135
+ 135,Oven,Oven
136
+ 136,Lemon,Lemon
137
+ 137,Duck,Duck
138
+ 138,Baseball Bat,Baseball Bat
139
+ 139,Surveillance Camera,Surveillance Camera
140
+ 140,Cat,Cat
141
+ 141,Jug,Jug
142
+ 142,Broccoli,Broccoli
143
+ 143,Piano,Piano
144
+ 144,Pizza,Pizza
145
+ 145,Elephant,Elephant
146
+ 146,Skateboard,Skateboard
147
+ 147,Surfboard,Surfboard
148
+ 148,Gun,Gun
149
+ 149,Skating and Skiing shoes,Skating and Skiing shoes
150
+ 150,Gas stove,Gas stove
151
+ 151,Donut,Donut
152
+ 152,Bow Tie,Bow Tie
153
+ 153,Carrot,Carrot
154
+ 154,Toilet,Toilet
155
+ 155,Kite,Kite
156
+ 156,Strawberry,Strawberry
157
+ 157,Other Balls,Other Balls
158
+ 158,Shovel,Shovel
159
+ 159,Pepper,Pepper
160
+ 160,Computer Box,Computer Box
161
+ 161,Toilet Paper,Toilet Paper
162
+ 162,Cleaning Products,Cleaning Products
163
+ 163,Chopsticks,Chopsticks
164
+ 164,Microwave,Microwave
165
+ 165,Pigeon,Pigeon
166
+ 166,Baseball,Baseball
167
+ 167,Cutting/chopping Board,Cutting/chopping Board
168
+ 168,Coffee Table,Coffee Table
169
+ 169,Side Table,Side Table
170
+ 170,Scissors,Scissors
171
+ 171,Marker,Marker
172
+ 172,Pie,Pie
173
+ 173,Ladder,Ladder
174
+ 174,Snowboard,Snowboard
175
+ 175,Cookies,Cookies
176
+ 176,Radiator,Radiator
177
+ 177,Fire Hydrant,Fire Hydrant
178
+ 178,Basketball,Basketball
179
+ 179,Zebra,Zebra
180
+ 180,Grape,Grape
181
+ 181,Giraffe,Giraffe
182
+ 182,Potato,Potato
183
+ 183,Sausage,Sausage
184
+ 184,Tricycle,Tricycle
185
+ 185,Violin,Violin
186
+ 186,Egg,Egg
187
+ 187,Fire Extinguisher,Fire Extinguisher
188
+ 188,Candy,Candy
189
+ 189,Fire Truck,Fire Truck
190
+ 190,Billards,Billards
191
+ 191,Converter,Converter
192
+ 192,Bathtub,Bathtub
193
+ 193,Wheelchair,Wheelchair
194
+ 194,Golf Club,Golf Club
195
+ 195,Briefcase,Briefcase
196
+ 196,Cucumber,Cucumber
197
+ 197,Cigar/Cigarette,Cigar/Cigarette
198
+ 198,Paint Brush,Paint Brush
199
+ 199,Pear,Pear
200
+ 200,Heavy Truck,Heavy Truck
201
+ 201,Hamburger,Hamburger
202
+ 202,Extractor,Extractor
203
+ 203,Extention Cord,Extension Cord
204
+ 204,Tong,Tong
205
+ 205,Tennis Racket,Tennis Racket
206
+ 206,Folder,Folder
207
+ 207,American Football,American Football
208
+ 208,earphone,earphone
209
+ 209,Mask,Mask
210
+ 210,Kettle,Kettle
211
+ 211,Tennis,Tennis
212
+ 212,Ship,Ship
213
+ 213,Swing,Swing
214
+ 214,Coffee Machine,Coffee Machine
215
+ 215,Slide,Slide
216
+ 216,Carriage,Carriage
217
+ 217,Onion,Onion
218
+ 218,Green beans,Green beans
219
+ 219,Projector,Projector
220
+ 220,Frisbee,Frisbee
221
+ 221,Washing Machine/Drying Machine,Washing Machine/Drying Machine
222
+ 222,Chicken,Chicken
223
+ 223,Printer,Printer
224
+ 224,Watermelon,Watermelon
225
+ 225,Saxophone,Saxophone
226
+ 226,Tissue,Tissue
227
+ 227,Toothbrush,Toothbrush
228
+ 228,Ice cream,Ice cream
229
+ 229,Hotair ballon,Hot air balloon
230
+ 230,Cello,Cello
231
+ 231,French Fries,French Fries
232
+ 232,Scale,Scale
233
+ 233,Trophy,Trophy
234
+ 234,Cabbage,Cabbage
235
+ 235,Hot dog,Hot dog
236
+ 236,Blender,Blender
237
+ 237,Peach,Peach
238
+ 238,Rice,Rice
239
+ 239,Wallet/Purse,Wallet/Purse
240
+ 240,Volleyball,Volleyball
241
+ 241,Deer,Deer
242
+ 242,Goose,Goose
243
+ 243,Tape,Tape
244
+ 244,Tablet,Tablet
245
+ 245,Cosmetics,Cosmetics
246
+ 246,Trumpet,Trumpet
247
+ 247,Pineapple,Pineapple
248
+ 248,Golf Ball,Golf Ball
249
+ 249,Ambulance,Ambulance
250
+ 250,Parking meter,Parking meter
251
+ 251,Mango,Mango
252
+ 252,Key,Key
253
+ 253,Hurdle,Hurdle
254
+ 254,Fishing Rod,Fishing Rod
255
+ 255,Medal,Medal
256
+ 256,Flute,Flute
257
+ 257,Brush,Brush
258
+ 258,Penguin,Penguin
259
+ 259,Megaphone,Megaphone
260
+ 260,Corn,Corn
261
+ 261,Lettuce,Lettuce
262
+ 262,Garlic,Garlic
263
+ 263,Swan,Swan
264
+ 264,Helicopter,Helicopter
265
+ 265,Green Onion,Green Onion
266
+ 266,Sandwich,Sandwich
267
+ 267,Nuts,Nuts
268
+ 268,Speed Limit Sign,Speed Limit Sign
269
+ 269,Induction Cooker,Induction Cooker
270
+ 270,Broom,Broom
271
+ 271,Trombone,Trombone
272
+ 272,Plum,Plum
273
+ 273,Rickshaw,Rickshaw
274
+ 274,Goldfish,Goldfish
275
+ 275,Kiwi fruit,Kiwi fruit
276
+ 276,Router/modem,Router/modem
277
+ 277,Poker Card,Poker Card
278
+ 278,Toaster,Toaster
279
+ 279,Shrimp,Shrimp
280
+ 280,Sushi,Sushi
281
+ 281,Cheese,Cheese
282
+ 282,Notepaper,Notepaper
283
+ 283,Cherry,Cherry
284
+ 284,Pliers,Pliers
285
+ 285,CD,CD
286
+ 286,Pasta,Pasta
287
+ 287,Hammer,Hammer
288
+ 288,Cue,Cue
289
+ 289,Avocado,Avocado
290
+ 290,Hamimelon,Hami melon
291
+ 291,Flask,Flask
292
+ 292,Mushroon,Mushroom
293
+ 293,Screwdriver,Screwdriver
294
+ 294,Soap,Soap
295
+ 295,Recorder,Recorder
296
+ 296,Bear,Bear
297
+ 297,Eggplant,Eggplant
298
+ 298,Board Eraser,Board Eraser
299
+ 299,Coconut,Coconut
300
+ 300,Tape Measur/ Ruler,Tape Measure/ Ruler
301
+ 301,Pig,Pig
302
+ 302,Showerhead,Showerhead
303
+ 303,Globe,Globe
304
+ 304,Chips,Chips
305
+ 305,Steak,Steak
306
+ 306,Crosswalk Sign,Crosswalk Sign
307
+ 307,Stapler,Stapler
308
+ 308,Campel,Camel
309
+ 309,Formula 1,Formula 1
310
+ 310,Pomegranate,Pomegranate
311
+ 311,Dishwasher,Dishwasher
312
+ 312,Crab,Crab
313
+ 313,Hoverboard,Hoverboard
314
+ 314,Meat ball,Meatball
315
+ 315,Rice Cooker,Rice Cooker
316
+ 316,Tuba,Tuba
317
+ 317,Calculator,Calculator
318
+ 318,Papaya,Papaya
319
+ 319,Antelope,Antelope
320
+ 320,Parrot,Parrot
321
+ 321,Seal,Seal
322
+ 322,Buttefly,Butterfly
323
+ 323,Dumbbell,Dumbbell
324
+ 324,Donkey,Donkey
325
+ 325,Lion,Lion
326
+ 326,Urinal,Urinal
327
+ 327,Dolphin,Dolphin
328
+ 328,Electric Drill,Electric Drill
329
+ 329,Hair Dryer,Hair Dryer
330
+ 330,Egg tart,Egg tart
331
+ 331,Jellyfish,Jellyfish
332
+ 332,Treadmill,Treadmill
333
+ 333,Lighter,Lighter
334
+ 334,Grapefruit,Grapefruit
335
+ 335,Game board,Game board
336
+ 336,Mop,Mop
337
+ 337,Radish,Radish
338
+ 338,Baozi,Baozi
339
+ 339,Target,Target
340
+ 340,French,French
341
+ 341,Spring Rolls,Spring Rolls
342
+ 342,Monkey,Monkey
343
+ 343,Rabbit,Rabbit
344
+ 344,Pencil Case,Pencil Case
345
+ 345,Yak,Yak
346
+ 346,Red Cabbage,Red Cabbage
347
+ 347,Binoculars,Binoculars
348
+ 348,Asparagus,Asparagus
349
+ 349,Barbell,Barbell
350
+ 350,Scallop,Scallop
351
+ 351,Noddles,Noddles
352
+ 352,Comb,Comb
353
+ 353,Dumpling,Dumpling
354
+ 354,Oyster,Oyster
355
+ 355,Table Teniis paddle,Table Tennis paddle
356
+ 356,Cosmetics Brush/Eyeliner Pencil,Cosmetics Brush/Eyeliner Pencil
357
+ 357,Chainsaw,Chainsaw
358
+ 358,Eraser,Eraser
359
+ 359,Lobster,Lobster
360
+ 360,Durian,Durian
361
+ 361,Okra,Okra
362
+ 362,Lipstick,Lipstick
363
+ 363,Cosmetics Mirror,Cosmetics Mirror
364
+ 364,Curling,Curling
365
+ 365,Table Tennis,Table Tennis
datasets/metadata/coco_clip_a+cname.npy ADDED
Binary file (82 kB). View file
datasets/metadata/lvis_v1_clip_a+cname.npy ADDED
Binary file (1.23 MB). View file
datasets/metadata/lvis_v1_train_cat_info.json ADDED
The diff for this file is too large to render. See raw diff
datasets/metadata/o365_clip_a+cnamefix.npy ADDED
Binary file (374 kB). View file
datasets/metadata/oid_clip_a+cname.npy ADDED
Binary file (512 kB). View file
demo.py ADDED
@@ -0,0 +1,204 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ import argparse
3
+ import glob
4
+ import multiprocessing as mp
5
+ import numpy as np
6
+ import os
7
+ import tempfile
8
+ import time
9
+ import warnings
10
+ import cv2
11
+ import tqdm
12
+ import sys
13
+
14
+ from detectron2.config import get_cfg
15
+ from detectron2.data.detection_utils import read_image
16
+ from detectron2.utils.logger import setup_logger
17
+
18
+ sys.path.insert(0, 'third_party/CenterNet2/projects/CenterNet2/')
19
+ from centernet.config import add_centernet_config
20
+ from detic.config import add_detic_config
21
+
22
+ from detic.predictor import VisualizationDemo
23
+
24
+
25
+ # constants
26
+ WINDOW_NAME = "Detic"
27
+
28
+ def setup_cfg(args):
29
+ cfg = get_cfg()
30
+ add_centernet_config(cfg)
31
+ add_detic_config(cfg)
32
+ cfg.merge_from_file(args.config_file)
33
+ cfg.merge_from_list(args.opts)
34
+ # Set score_threshold for builtin models
35
+ cfg.MODEL.RETINANET.SCORE_THRESH_TEST = args.confidence_threshold
36
+ cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = args.confidence_threshold
37
+ cfg.MODEL.PANOPTIC_FPN.COMBINE.INSTANCES_CONFIDENCE_THRESH = args.confidence_threshold
38
+ cfg.MODEL.ROI_BOX_HEAD.ZEROSHOT_WEIGHT_PATH = 'rand' # load later
39
+ if not args.pred_all_class:
40
+ cfg.MODEL.ROI_HEADS.ONE_CLASS_PER_PROPOSAL = True
41
+ cfg.freeze()
42
+ return cfg
43
+
44
+
45
+ def get_parser():
46
+ parser = argparse.ArgumentParser(description="Detectron2 demo for builtin configs")
47
+ parser.add_argument(
48
+ "--config-file",
49
+ default="configs/quick_schedules/mask_rcnn_R_50_FPN_inference_acc_test.yaml",
50
+ metavar="FILE",
51
+ help="path to config file",
52
+ )
53
+ parser.add_argument("--webcam", action="store_true", help="Take inputs from webcam.")
54
+ parser.add_argument("--video-input", help="Path to video file.")
55
+ parser.add_argument(
56
+ "--input",
57
+ nargs="+",
58
+ help="A list of space separated input images; "
59
+ "or a single glob pattern such as 'directory/*.jpg'",
60
+ )
61
+ parser.add_argument(
62
+ "--output",
63
+ help="A file or directory to save output visualizations. "
64
+ "If not given, will show output in an OpenCV window.",
65
+ )
66
+ parser.add_argument(
67
+ "--vocabulary",
68
+ default="lvis",
69
+ choices=['lvis', 'openimages', 'objects365', 'coco', 'custom'],
70
+ help="",
71
+ )
72
+ parser.add_argument(
73
+ "--custom_vocabulary",
74
+ default="",
75
+ help="",
76
+ )
77
+ parser.add_argument("--pred_all_class", action='store_true')
78
+ parser.add_argument(
79
+ "--confidence-threshold",
80
+ type=float,
81
+ default=0.5,
82
+ help="Minimum score for instance predictions to be shown",
83
+ )
84
+ parser.add_argument(
85
+ "--opts",
86
+ help="Modify config options using the command-line 'KEY VALUE' pairs",
87
+ default=[],
88
+ nargs=argparse.REMAINDER,
89
+ )
90
+ return parser
91
+
92
+
93
+ def test_opencv_video_format(codec, file_ext):
94
+ with tempfile.TemporaryDirectory(prefix="video_format_test") as dir:
95
+ filename = os.path.join(dir, "test_file" + file_ext)
96
+ writer = cv2.VideoWriter(
97
+ filename=filename,
98
+ fourcc=cv2.VideoWriter_fourcc(*codec),
99
+ fps=float(30),
100
+ frameSize=(10, 10),
101
+ isColor=True,
102
+ )
103
+ [writer.write(np.zeros((10, 10, 3), np.uint8)) for _ in range(30)]
104
+ writer.release()
105
+ if os.path.isfile(filename):
106
+ return True
107
+ return False
108
+
109
+
110
+ if __name__ == "__main__":
111
+ mp.set_start_method("spawn", force=True)
112
+ args = get_parser().parse_args()
113
+ setup_logger(name="fvcore")
114
+ logger = setup_logger()
115
+ logger.info("Arguments: " + str(args))
116
+
117
+ cfg = setup_cfg(args)
118
+
119
+ demo = VisualizationDemo(cfg, args)
120
+
121
+ if args.input:
122
+ if len(args.input) == 1:
123
+ args.input = glob.glob(os.path.expanduser(args.input[0]))
124
+ assert args.input, "The input path(s) was not found"
125
+ for path in tqdm.tqdm(args.input, disable=not args.output):
126
+ img = read_image(path, format="BGR")
127
+ start_time = time.time()
128
+ predictions, visualized_output = demo.run_on_image(img)
129
+ logger.info(
130
+ "{}: {} in {:.2f}s".format(
131
+ path,
132
+ "detected {} instances".format(len(predictions["instances"]))
133
+ if "instances" in predictions
134
+ else "finished",
135
+ time.time() - start_time,
136
+ )
137
+ )
138
+
139
+ if args.output:
140
+ if os.path.isdir(args.output):
141
+ assert os.path.isdir(args.output), args.output
142
+ out_filename = os.path.join(args.output, os.path.basename(path))
143
+ else:
144
+ assert len(args.input) == 1, "Please specify a directory with args.output"
145
+ out_filename = args.output
146
+ visualized_output.save(out_filename)
147
+ else:
148
+ cv2.namedWindow(WINDOW_NAME, cv2.WINDOW_NORMAL)
149
+ cv2.imshow(WINDOW_NAME, visualized_output.get_image()[:, :, ::-1])
150
+ if cv2.waitKey(0) == 27:
151
+ break # esc to quit
152
+ elif args.webcam:
153
+ assert args.input is None, "Cannot have both --input and --webcam!"
154
+ assert args.output is None, "output not yet supported with --webcam!"
155
+ cam = cv2.VideoCapture(0)
156
+ for vis in tqdm.tqdm(demo.run_on_video(cam)):
157
+ cv2.namedWindow(WINDOW_NAME, cv2.WINDOW_NORMAL)
158
+ cv2.imshow(WINDOW_NAME, vis)
159
+ if cv2.waitKey(1) == 27:
160
+ break # esc to quit
161
+ cam.release()
162
+ cv2.destroyAllWindows()
163
+ elif args.video_input:
164
+ video = cv2.VideoCapture(args.video_input)
165
+ width = int(video.get(cv2.CAP_PROP_FRAME_WIDTH))
166
+ height = int(video.get(cv2.CAP_PROP_FRAME_HEIGHT))
167
+ frames_per_second = video.get(cv2.CAP_PROP_FPS)
168
+ num_frames = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
169
+ basename = os.path.basename(args.video_input)
170
+ codec, file_ext = (
171
+ ("x264", ".mkv") if test_opencv_video_format("x264", ".mkv") else ("mp4v", ".mp4")
172
+ )
173
+ if codec == ".mp4v":
174
+ warnings.warn("x264 codec not available, switching to mp4v")
175
+ if args.output:
176
+ if os.path.isdir(args.output):
177
+ output_fname = os.path.join(args.output, basename)
178
+ output_fname = os.path.splitext(output_fname)[0] + file_ext
179
+ else:
180
+ output_fname = args.output
181
+ assert not os.path.isfile(output_fname), output_fname
182
+ output_file = cv2.VideoWriter(
183
+ filename=output_fname,
184
+ # some installation of opencv may not support x264 (due to its license),
185
+ # you can try other format (e.g. MPEG)
186
+ fourcc=cv2.VideoWriter_fourcc(*codec),
187
+ fps=float(frames_per_second),
188
+ frameSize=(width, height),
189
+ isColor=True,
190
+ )
191
+ assert os.path.isfile(args.video_input)
192
+ for vis_frame in tqdm.tqdm(demo.run_on_video(video), total=num_frames):
193
+ if args.output:
194
+ output_file.write(vis_frame)
195
+ else:
196
+ cv2.namedWindow(basename, cv2.WINDOW_NORMAL)
197
+ cv2.imshow(basename, vis_frame)
198
+ if cv2.waitKey(1) == 27:
199
+ break # esc to quit
200
+ video.release()
201
+ if args.output:
202
+ output_file.release()
203
+ else:
204
+ cv2.destroyAllWindows()
detic/__init__.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ from .modeling.meta_arch import custom_rcnn
3
+ from .modeling.roi_heads import detic_roi_heads
4
+ from .modeling.roi_heads import res5_roi_heads
5
+ from .modeling.backbone import swintransformer
6
+ from .modeling.backbone import timm
7
+
8
+
9
+ from .data.datasets import lvis_v1
10
+ from .data.datasets import imagenet
11
+ from .data.datasets import cc
12
+ from .data.datasets import objects365
13
+ from .data.datasets import oid
14
+ from .data.datasets import coco_zeroshot
15
+
16
+ try:
17
+ from .modeling.meta_arch import d2_deformable_detr
18
+ except:
19
+ pass
detic/config.py ADDED
@@ -0,0 +1,131 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ from detectron2.config import CfgNode as CN
3
+
4
+ def add_detic_config(cfg):
5
+ _C = cfg
6
+
7
+ _C.WITH_IMAGE_LABELS = False # Turn on co-training with classification data
8
+
9
+ # Open-vocabulary classifier
10
+ _C.MODEL.ROI_BOX_HEAD.USE_ZEROSHOT_CLS = False # Use fixed classifier for open-vocabulary detection
11
+ _C.MODEL.ROI_BOX_HEAD.ZEROSHOT_WEIGHT_PATH = 'datasets/metadata/lvis_v1_clip_a+cname.npy'
12
+ _C.MODEL.ROI_BOX_HEAD.ZEROSHOT_WEIGHT_DIM = 512
13
+ _C.MODEL.ROI_BOX_HEAD.NORM_WEIGHT = True
14
+ _C.MODEL.ROI_BOX_HEAD.NORM_TEMP = 50.0
15
+ _C.MODEL.ROI_BOX_HEAD.IGNORE_ZERO_CATS = False
16
+ _C.MODEL.ROI_BOX_HEAD.USE_BIAS = 0.0 # >= 0: not use
17
+
18
+ _C.MODEL.ROI_BOX_HEAD.MULT_PROPOSAL_SCORE = False # CenterNet2
19
+ _C.MODEL.ROI_BOX_HEAD.USE_SIGMOID_CE = False
20
+ _C.MODEL.ROI_BOX_HEAD.PRIOR_PROB = 0.01
21
+ _C.MODEL.ROI_BOX_HEAD.USE_FED_LOSS = False # Federated Loss
22
+ _C.MODEL.ROI_BOX_HEAD.CAT_FREQ_PATH = \
23
+ 'datasets/metadata/lvis_v1_train_cat_info.json'
24
+ _C.MODEL.ROI_BOX_HEAD.FED_LOSS_NUM_CAT = 50
25
+ _C.MODEL.ROI_BOX_HEAD.FED_LOSS_FREQ_WEIGHT = 0.5
26
+
27
+ # Classification data configs
28
+ _C.MODEL.ROI_BOX_HEAD.IMAGE_LABEL_LOSS = 'max_size' # max, softmax, sum
29
+ _C.MODEL.ROI_BOX_HEAD.IMAGE_LOSS_WEIGHT = 0.1
30
+ _C.MODEL.ROI_BOX_HEAD.IMAGE_BOX_SIZE = 1.0
31
+ _C.MODEL.ROI_BOX_HEAD.ADD_IMAGE_BOX = False # Used for image-box loss and caption loss
32
+ _C.MODEL.ROI_BOX_HEAD.WS_NUM_PROPS = 128 # num proposals for image-labeled data
33
+ _C.MODEL.ROI_BOX_HEAD.WITH_SOFTMAX_PROP = False # Used for WSDDN
34
+ _C.MODEL.ROI_BOX_HEAD.CAPTION_WEIGHT = 1.0 # Caption loss weight
35
+ _C.MODEL.ROI_BOX_HEAD.NEG_CAP_WEIGHT = 0.125 # Caption loss hyper-parameter
36
+ _C.MODEL.ROI_BOX_HEAD.ADD_FEATURE_TO_PROP = False # Used for WSDDN
37
+ _C.MODEL.ROI_BOX_HEAD.SOFTMAX_WEAK_LOSS = False # Used when USE_SIGMOID_CE is False
38
+
39
+ _C.MODEL.ROI_HEADS.MASK_WEIGHT = 1.0
40
+ _C.MODEL.ROI_HEADS.ONE_CLASS_PER_PROPOSAL = False # For demo only
41
+
42
+ # Caption losses
43
+ _C.MODEL.CAP_BATCH_RATIO = 4 # Ratio between detection data and caption data
44
+ _C.MODEL.WITH_CAPTION = False
45
+ _C.MODEL.SYNC_CAPTION_BATCH = False # synchronize across GPUs to enlarge # "classes"
46
+
47
+ # dynamic class sampling when training with 21K classes
48
+ _C.MODEL.DYNAMIC_CLASSIFIER = False
49
+ _C.MODEL.NUM_SAMPLE_CATS = 50
50
+
51
+ # Different classifiers in testing, used in cross-dataset evaluation
52
+ _C.MODEL.RESET_CLS_TESTS = False
53
+ _C.MODEL.TEST_CLASSIFIERS = []
54
+ _C.MODEL.TEST_NUM_CLASSES = []
55
+
56
+ # Backbones
57
+ _C.MODEL.SWIN = CN()
58
+ _C.MODEL.SWIN.SIZE = 'T' # 'T', 'S', 'B'
59
+ _C.MODEL.SWIN.USE_CHECKPOINT = False
60
+ _C.MODEL.SWIN.OUT_FEATURES = (1, 2, 3) # FPN stride 8 - 32
61
+
62
+ _C.MODEL.TIMM = CN()
63
+ _C.MODEL.TIMM.BASE_NAME = 'resnet50'
64
+ _C.MODEL.TIMM.OUT_LEVELS = (3, 4, 5)
65
+ _C.MODEL.TIMM.NORM = 'FrozenBN'
66
+ _C.MODEL.TIMM.FREEZE_AT = 0
67
+ _C.MODEL.DATASET_LOSS_WEIGHT = []
68
+
69
+ # Multi-dataset dataloader
70
+ _C.DATALOADER.DATASET_RATIO = [1, 1] # sample ratio
71
+ _C.DATALOADER.USE_RFS = [False, False]
72
+ _C.DATALOADER.MULTI_DATASET_GROUPING = False # Always true when multi-dataset is enabled
73
+ _C.DATALOADER.DATASET_ANN = ['box', 'box'] # Annotation type of each dataset
74
+ _C.DATALOADER.USE_DIFF_BS_SIZE = False # Use different batchsize for each dataset
75
+ _C.DATALOADER.DATASET_BS = [8, 32] # Used when USE_DIFF_BS_SIZE is on
76
+ _C.DATALOADER.DATASET_INPUT_SIZE = [896, 384] # Used when USE_DIFF_BS_SIZE is on
77
+ _C.DATALOADER.DATASET_INPUT_SCALE = [(0.1, 2.0), (0.5, 1.5)] # Used when USE_DIFF_BS_SIZE is on
78
+ _C.DATALOADER.DATASET_MIN_SIZES = [(640, 800), (320, 400)] # Used when USE_DIFF_BS_SIZE is on
79
+ _C.DATALOADER.DATASET_MAX_SIZES = [1333, 667] # Used when USE_DIFF_BS_SIZE is on
80
+ _C.DATALOADER.USE_TAR_DATASET = False # for ImageNet-21K, directly reading from unziped files
81
+ _C.DATALOADER.TARFILE_PATH = 'datasets/imagenet/metadata-22k/tar_files.npy'
82
+ _C.DATALOADER.TAR_INDEX_DIR = 'datasets/imagenet/metadata-22k/tarindex_npy'
83
+
84
+ _C.SOLVER.USE_CUSTOM_SOLVER = False
85
+ _C.SOLVER.OPTIMIZER = 'SGD'
86
+ _C.SOLVER.BACKBONE_MULTIPLIER = 1.0 # Used in DETR
87
+ _C.SOLVER.CUSTOM_MULTIPLIER = 1.0 # Used in DETR
88
+ _C.SOLVER.CUSTOM_MULTIPLIER_NAME = [] # Used in DETR
89
+
90
+ # Deformable DETR
91
+ _C.MODEL.DETR = CN()
92
+ _C.MODEL.DETR.NUM_CLASSES = 80
93
+ _C.MODEL.DETR.FROZEN_WEIGHTS = '' # For Segmentation
94
+ _C.MODEL.DETR.GIOU_WEIGHT = 2.0
95
+ _C.MODEL.DETR.L1_WEIGHT = 5.0
96
+ _C.MODEL.DETR.DEEP_SUPERVISION = True
97
+ _C.MODEL.DETR.NO_OBJECT_WEIGHT = 0.1
98
+ _C.MODEL.DETR.CLS_WEIGHT = 2.0
99
+ _C.MODEL.DETR.NUM_FEATURE_LEVELS = 4
100
+ _C.MODEL.DETR.TWO_STAGE = False
101
+ _C.MODEL.DETR.WITH_BOX_REFINE = False
102
+ _C.MODEL.DETR.FOCAL_ALPHA = 0.25
103
+ _C.MODEL.DETR.NHEADS = 8
104
+ _C.MODEL.DETR.DROPOUT = 0.1
105
+ _C.MODEL.DETR.DIM_FEEDFORWARD = 2048
106
+ _C.MODEL.DETR.ENC_LAYERS = 6
107
+ _C.MODEL.DETR.DEC_LAYERS = 6
108
+ _C.MODEL.DETR.PRE_NORM = False
109
+ _C.MODEL.DETR.HIDDEN_DIM = 256
110
+ _C.MODEL.DETR.NUM_OBJECT_QUERIES = 100
111
+
112
+ _C.MODEL.DETR.USE_FED_LOSS = False
113
+ _C.MODEL.DETR.WEAK_WEIGHT = 0.1
114
+
115
+ _C.INPUT.CUSTOM_AUG = ''
116
+ _C.INPUT.TRAIN_SIZE = 640
117
+ _C.INPUT.TEST_SIZE = 640
118
+ _C.INPUT.SCALE_RANGE = (0.1, 2.)
119
+ # 'default' for fixed short/ long edge, 'square' for max size=INPUT.SIZE
120
+ _C.INPUT.TEST_INPUT_TYPE = 'default'
121
+
122
+ _C.FIND_UNUSED_PARAM = True
123
+ _C.EVAL_PRED_AR = False
124
+ _C.EVAL_PROPOSAL_AR = False
125
+ _C.EVAL_CAT_SPEC_AR = False
126
+ _C.IS_DEBUG = False
127
+ _C.QUICK_DEBUG = False
128
+ _C.FP16 = False
129
+ _C.EVAL_AP_FIX = False
130
+ _C.GEN_PSEDO_LABELS = False
131
+ _C.SAVE_DEBUG_PATH = 'output/save_debug/'
detic/custom_solver.py ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2
+ from enum import Enum
3
+ import itertools
4
+ from typing import Any, Callable, Dict, Iterable, List, Set, Type, Union
5
+ import torch
6
+
7
+ from detectron2.config import CfgNode
8
+
9
+ from detectron2.solver.build import maybe_add_gradient_clipping
10
+
11
+ def match_name_keywords(n, name_keywords):
12
+ out = False
13
+ for b in name_keywords:
14
+ if b in n:
15
+ out = True
16
+ break
17
+ return out
18
+
19
+ def build_custom_optimizer(cfg: CfgNode, model: torch.nn.Module) -> torch.optim.Optimizer:
20
+ """
21
+ Build an optimizer from config.
22
+ """
23
+ params: List[Dict[str, Any]] = []
24
+ memo: Set[torch.nn.parameter.Parameter] = set()
25
+ custom_multiplier_name = cfg.SOLVER.CUSTOM_MULTIPLIER_NAME
26
+ optimizer_type = cfg.SOLVER.OPTIMIZER
27
+ for key, value in model.named_parameters(recurse=True):
28
+ if not value.requires_grad:
29
+ continue
30
+ # Avoid duplicating parameters
31
+ if value in memo:
32
+ continue
33
+ memo.add(value)
34
+ lr = cfg.SOLVER.BASE_LR
35
+ weight_decay = cfg.SOLVER.WEIGHT_DECAY
36
+ if "backbone" in key:
37
+ lr = lr * cfg.SOLVER.BACKBONE_MULTIPLIER
38
+ if match_name_keywords(key, custom_multiplier_name):
39
+ lr = lr * cfg.SOLVER.CUSTOM_MULTIPLIER
40
+ print('Costum LR', key, lr)
41
+ param = {"params": [value], "lr": lr}
42
+ if optimizer_type != 'ADAMW':
43
+ param['weight_decay'] = weight_decay
44
+ params += [param]
45
+
46
+ def maybe_add_full_model_gradient_clipping(optim): # optim: the optimizer class
47
+ # detectron2 doesn't have full model gradient clipping now
48
+ clip_norm_val = cfg.SOLVER.CLIP_GRADIENTS.CLIP_VALUE
49
+ enable = (
50
+ cfg.SOLVER.CLIP_GRADIENTS.ENABLED
51
+ and cfg.SOLVER.CLIP_GRADIENTS.CLIP_TYPE == "full_model"
52
+ and clip_norm_val > 0.0
53
+ )
54
+
55
+ class FullModelGradientClippingOptimizer(optim):
56
+ def step(self, closure=None):
57
+ all_params = itertools.chain(*[x["params"] for x in self.param_groups])
58
+ torch.nn.utils.clip_grad_norm_(all_params, clip_norm_val)
59
+ super().step(closure=closure)
60
+
61
+ return FullModelGradientClippingOptimizer if enable else optim
62
+
63
+
64
+ if optimizer_type == 'SGD':
65
+ optimizer = maybe_add_full_model_gradient_clipping(torch.optim.SGD)(
66
+ params, cfg.SOLVER.BASE_LR, momentum=cfg.SOLVER.MOMENTUM,
67
+ nesterov=cfg.SOLVER.NESTEROV
68
+ )
69
+ elif optimizer_type == 'ADAMW':
70
+ optimizer = maybe_add_full_model_gradient_clipping(torch.optim.AdamW)(
71
+ params, cfg.SOLVER.BASE_LR,
72
+ weight_decay=cfg.SOLVER.WEIGHT_DECAY
73
+ )
74
+ else:
75
+ raise NotImplementedError(f"no optimizer type {optimizer_type}")
76
+ if not cfg.SOLVER.CLIP_GRADIENTS.CLIP_TYPE == "full_model":
77
+ optimizer = maybe_add_gradient_clipping(cfg, optimizer)
78
+ return optimizer
detic/data/custom_build_augmentation.py ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ import logging
3
+ import numpy as np
4
+ import pycocotools.mask as mask_util
5
+ import torch
6
+ from fvcore.common.file_io import PathManager
7
+ from PIL import Image
8
+
9
+
10
+ from detectron2.data import transforms as T
11
+ from .transforms.custom_augmentation_impl import EfficientDetResizeCrop
12
+
13
+ def build_custom_augmentation(cfg, is_train, scale=None, size=None, \
14
+ min_size=None, max_size=None):
15
+ """
16
+ Create a list of default :class:`Augmentation` from config.
17
+ Now it includes resizing and flipping.
18
+
19
+ Returns:
20
+ list[Augmentation]
21
+ """
22
+ if cfg.INPUT.CUSTOM_AUG == 'ResizeShortestEdge':
23
+ if is_train:
24
+ min_size = cfg.INPUT.MIN_SIZE_TRAIN if min_size is None else min_size
25
+ max_size = cfg.INPUT.MAX_SIZE_TRAIN if max_size is None else max_size
26
+ sample_style = cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING
27
+ else:
28
+ min_size = cfg.INPUT.MIN_SIZE_TEST
29
+ max_size = cfg.INPUT.MAX_SIZE_TEST
30
+ sample_style = "choice"
31
+ augmentation = [T.ResizeShortestEdge(min_size, max_size, sample_style)]
32
+ elif cfg.INPUT.CUSTOM_AUG == 'EfficientDetResizeCrop':
33
+ if is_train:
34
+ scale = cfg.INPUT.SCALE_RANGE if scale is None else scale
35
+ size = cfg.INPUT.TRAIN_SIZE if size is None else size
36
+ else:
37
+ scale = (1, 1)
38
+ size = cfg.INPUT.TEST_SIZE
39
+ augmentation = [EfficientDetResizeCrop(size, scale)]
40
+ else:
41
+ assert 0, cfg.INPUT.CUSTOM_AUG
42
+
43
+ if is_train:
44
+ augmentation.append(T.RandomFlip())
45
+ return augmentation
46
+
47
+
48
+ build_custom_transform_gen = build_custom_augmentation
49
+ """
50
+ Alias for backward-compatibility.
51
+ """
detic/data/custom_dataset_dataloader.py ADDED
@@ -0,0 +1,331 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ # Part of the code is from https://github.com/xingyizhou/UniDet/blob/master/projects/UniDet/unidet/data/multi_dataset_dataloader.py (Apache-2.0 License)
3
+ import copy
4
+ import logging
5
+ import numpy as np
6
+ import operator
7
+ import torch
8
+ import torch.utils.data
9
+ import json
10
+ from detectron2.utils.comm import get_world_size
11
+ from detectron2.utils.logger import _log_api_usage, log_first_n
12
+
13
+ from detectron2.config import configurable
14
+ from detectron2.data import samplers
15
+ from torch.utils.data.sampler import BatchSampler, Sampler
16
+ from detectron2.data.common import DatasetFromList, MapDataset
17
+ from detectron2.data.dataset_mapper import DatasetMapper
18
+ from detectron2.data.build import get_detection_dataset_dicts, build_batch_data_loader
19
+ from detectron2.data.samplers import TrainingSampler, RepeatFactorTrainingSampler
20
+ from detectron2.data.build import worker_init_reset_seed, print_instances_class_histogram
21
+ from detectron2.data.build import filter_images_with_only_crowd_annotations
22
+ from detectron2.data.build import filter_images_with_few_keypoints
23
+ from detectron2.data.build import check_metadata_consistency
24
+ from detectron2.data.catalog import MetadataCatalog, DatasetCatalog
25
+ from detectron2.utils import comm
26
+ import itertools
27
+ import math
28
+ from collections import defaultdict
29
+ from typing import Optional
30
+
31
+
32
+ def _custom_train_loader_from_config(cfg, mapper=None, *, dataset=None, sampler=None):
33
+ sampler_name = cfg.DATALOADER.SAMPLER_TRAIN
34
+ if 'MultiDataset' in sampler_name:
35
+ dataset_dicts = get_detection_dataset_dicts_with_source(
36
+ cfg.DATASETS.TRAIN,
37
+ filter_empty=cfg.DATALOADER.FILTER_EMPTY_ANNOTATIONS,
38
+ min_keypoints=cfg.MODEL.ROI_KEYPOINT_HEAD.MIN_KEYPOINTS_PER_IMAGE
39
+ if cfg.MODEL.KEYPOINT_ON else 0,
40
+ proposal_files=cfg.DATASETS.PROPOSAL_FILES_TRAIN if cfg.MODEL.LOAD_PROPOSALS else None,
41
+ )
42
+ else:
43
+ dataset_dicts = get_detection_dataset_dicts(
44
+ cfg.DATASETS.TRAIN,
45
+ filter_empty=cfg.DATALOADER.FILTER_EMPTY_ANNOTATIONS,
46
+ min_keypoints=cfg.MODEL.ROI_KEYPOINT_HEAD.MIN_KEYPOINTS_PER_IMAGE
47
+ if cfg.MODEL.KEYPOINT_ON else 0,
48
+ proposal_files=cfg.DATASETS.PROPOSAL_FILES_TRAIN if cfg.MODEL.LOAD_PROPOSALS else None,
49
+ )
50
+
51
+ if mapper is None:
52
+ mapper = DatasetMapper(cfg, True)
53
+
54
+ if sampler is not None:
55
+ pass
56
+ elif sampler_name == "TrainingSampler":
57
+ sampler = TrainingSampler(len(dataset))
58
+ elif sampler_name == "MultiDatasetSampler":
59
+ sampler = MultiDatasetSampler(
60
+ dataset_dicts,
61
+ dataset_ratio = cfg.DATALOADER.DATASET_RATIO,
62
+ use_rfs = cfg.DATALOADER.USE_RFS,
63
+ dataset_ann = cfg.DATALOADER.DATASET_ANN,
64
+ repeat_threshold = cfg.DATALOADER.REPEAT_THRESHOLD,
65
+ )
66
+ elif sampler_name == "RepeatFactorTrainingSampler":
67
+ repeat_factors = RepeatFactorTrainingSampler.repeat_factors_from_category_frequency(
68
+ dataset_dicts, cfg.DATALOADER.REPEAT_THRESHOLD
69
+ )
70
+ sampler = RepeatFactorTrainingSampler(repeat_factors)
71
+ else:
72
+ raise ValueError("Unknown training sampler: {}".format(sampler_name))
73
+
74
+ return {
75
+ "dataset": dataset_dicts,
76
+ "sampler": sampler,
77
+ "mapper": mapper,
78
+ "total_batch_size": cfg.SOLVER.IMS_PER_BATCH,
79
+ "aspect_ratio_grouping": cfg.DATALOADER.ASPECT_RATIO_GROUPING,
80
+ "num_workers": cfg.DATALOADER.NUM_WORKERS,
81
+ 'multi_dataset_grouping': cfg.DATALOADER.MULTI_DATASET_GROUPING,
82
+ 'use_diff_bs_size': cfg.DATALOADER.USE_DIFF_BS_SIZE,
83
+ 'dataset_bs': cfg.DATALOADER.DATASET_BS,
84
+ 'num_datasets': len(cfg.DATASETS.TRAIN)
85
+ }
86
+
87
+
88
+ @configurable(from_config=_custom_train_loader_from_config)
89
+ def build_custom_train_loader(
90
+ dataset, *, mapper, sampler,
91
+ total_batch_size=16,
92
+ aspect_ratio_grouping=True,
93
+ num_workers=0,
94
+ num_datasets=1,
95
+ multi_dataset_grouping=False,
96
+ use_diff_bs_size=False,
97
+ dataset_bs=[]
98
+ ):
99
+ """
100
+ Modified from detectron2.data.build.build_custom_train_loader, but supports
101
+ different samplers
102
+ """
103
+ if isinstance(dataset, list):
104
+ dataset = DatasetFromList(dataset, copy=False)
105
+ if mapper is not None:
106
+ dataset = MapDataset(dataset, mapper)
107
+ if sampler is None:
108
+ sampler = TrainingSampler(len(dataset))
109
+ assert isinstance(sampler, torch.utils.data.sampler.Sampler)
110
+ if multi_dataset_grouping:
111
+ return build_multi_dataset_batch_data_loader(
112
+ use_diff_bs_size,
113
+ dataset_bs,
114
+ dataset,
115
+ sampler,
116
+ total_batch_size,
117
+ num_datasets=num_datasets,
118
+ num_workers=num_workers,
119
+ )
120
+ else:
121
+ return build_batch_data_loader(
122
+ dataset,
123
+ sampler,
124
+ total_batch_size,
125
+ aspect_ratio_grouping=aspect_ratio_grouping,
126
+ num_workers=num_workers,
127
+ )
128
+
129
+
130
+ def build_multi_dataset_batch_data_loader(
131
+ use_diff_bs_size, dataset_bs,
132
+ dataset, sampler, total_batch_size, num_datasets, num_workers=0
133
+ ):
134
+ """
135
+ """
136
+ world_size = get_world_size()
137
+ assert (
138
+ total_batch_size > 0 and total_batch_size % world_size == 0
139
+ ), "Total batch size ({}) must be divisible by the number of gpus ({}).".format(
140
+ total_batch_size, world_size
141
+ )
142
+
143
+ batch_size = total_batch_size // world_size
144
+ data_loader = torch.utils.data.DataLoader(
145
+ dataset,
146
+ sampler=sampler,
147
+ num_workers=num_workers,
148
+ batch_sampler=None,
149
+ collate_fn=operator.itemgetter(0), # don't batch, but yield individual elements
150
+ worker_init_fn=worker_init_reset_seed,
151
+ ) # yield individual mapped dict
152
+ if use_diff_bs_size:
153
+ return DIFFMDAspectRatioGroupedDataset(
154
+ data_loader, dataset_bs, num_datasets)
155
+ else:
156
+ return MDAspectRatioGroupedDataset(
157
+ data_loader, batch_size, num_datasets)
158
+
159
+
160
+ def get_detection_dataset_dicts_with_source(
161
+ dataset_names, filter_empty=True, min_keypoints=0, proposal_files=None
162
+ ):
163
+ assert len(dataset_names)
164
+ dataset_dicts = [DatasetCatalog.get(dataset_name) for dataset_name in dataset_names]
165
+ for dataset_name, dicts in zip(dataset_names, dataset_dicts):
166
+ assert len(dicts), "Dataset '{}' is empty!".format(dataset_name)
167
+
168
+ for source_id, (dataset_name, dicts) in \
169
+ enumerate(zip(dataset_names, dataset_dicts)):
170
+ assert len(dicts), "Dataset '{}' is empty!".format(dataset_name)
171
+ for d in dicts:
172
+ d['dataset_source'] = source_id
173
+
174
+ if "annotations" in dicts[0]:
175
+ try:
176
+ class_names = MetadataCatalog.get(dataset_name).thing_classes
177
+ check_metadata_consistency("thing_classes", dataset_name)
178
+ print_instances_class_histogram(dicts, class_names)
179
+ except AttributeError: # class names are not available for this dataset
180
+ pass
181
+
182
+ assert proposal_files is None
183
+
184
+ dataset_dicts = list(itertools.chain.from_iterable(dataset_dicts))
185
+
186
+ has_instances = "annotations" in dataset_dicts[0]
187
+ if filter_empty and has_instances:
188
+ dataset_dicts = filter_images_with_only_crowd_annotations(dataset_dicts)
189
+ if min_keypoints > 0 and has_instances:
190
+ dataset_dicts = filter_images_with_few_keypoints(dataset_dicts, min_keypoints)
191
+
192
+ return dataset_dicts
193
+
194
+
195
+ class MultiDatasetSampler(Sampler):
196
+ def __init__(
197
+ self,
198
+ dataset_dicts,
199
+ dataset_ratio,
200
+ use_rfs,
201
+ dataset_ann,
202
+ repeat_threshold=0.001,
203
+ seed: Optional[int] = None,
204
+ ):
205
+ """
206
+ """
207
+ sizes = [0 for _ in range(len(dataset_ratio))]
208
+ for d in dataset_dicts:
209
+ sizes[d['dataset_source']] += 1
210
+ print('dataset sizes', sizes)
211
+ self.sizes = sizes
212
+ assert len(dataset_ratio) == len(sizes), \
213
+ 'length of dataset ratio {} should be equal to number if dataset {}'.format(
214
+ len(dataset_ratio), len(sizes)
215
+ )
216
+ if seed is None:
217
+ seed = comm.shared_random_seed()
218
+ self._seed = int(seed)
219
+ self._rank = comm.get_rank()
220
+ self._world_size = comm.get_world_size()
221
+
222
+ self.dataset_ids = torch.tensor(
223
+ [d['dataset_source'] for d in dataset_dicts], dtype=torch.long)
224
+
225
+ dataset_weight = [torch.ones(s) * max(sizes) / s * r / sum(dataset_ratio) \
226
+ for i, (r, s) in enumerate(zip(dataset_ratio, sizes))]
227
+ dataset_weight = torch.cat(dataset_weight)
228
+
229
+ rfs_factors = []
230
+ st = 0
231
+ for i, s in enumerate(sizes):
232
+ if use_rfs[i]:
233
+ if dataset_ann[i] == 'box':
234
+ rfs_func = RepeatFactorTrainingSampler.repeat_factors_from_category_frequency
235
+ else:
236
+ rfs_func = repeat_factors_from_tag_frequency
237
+ rfs_factor = rfs_func(
238
+ dataset_dicts[st: st + s],
239
+ repeat_thresh=repeat_threshold)
240
+ rfs_factor = rfs_factor * (s / rfs_factor.sum())
241
+ else:
242
+ rfs_factor = torch.ones(s)
243
+ rfs_factors.append(rfs_factor)
244
+ st = st + s
245
+ rfs_factors = torch.cat(rfs_factors)
246
+
247
+ self.weights = dataset_weight * rfs_factors
248
+ self.sample_epoch_size = len(self.weights)
249
+
250
+ def __iter__(self):
251
+ start = self._rank
252
+ yield from itertools.islice(
253
+ self._infinite_indices(), start, None, self._world_size)
254
+
255
+
256
+ def _infinite_indices(self):
257
+ g = torch.Generator()
258
+ g.manual_seed(self._seed)
259
+ while True:
260
+ ids = torch.multinomial(
261
+ self.weights, self.sample_epoch_size, generator=g,
262
+ replacement=True)
263
+ nums = [(self.dataset_ids[ids] == i).sum().int().item() \
264
+ for i in range(len(self.sizes))]
265
+ yield from ids
266
+
267
+
268
+ class MDAspectRatioGroupedDataset(torch.utils.data.IterableDataset):
269
+ def __init__(self, dataset, batch_size, num_datasets):
270
+ """
271
+ """
272
+ self.dataset = dataset
273
+ self.batch_size = batch_size
274
+ self._buckets = [[] for _ in range(2 * num_datasets)]
275
+
276
+ def __iter__(self):
277
+ for d in self.dataset:
278
+ w, h = d["width"], d["height"]
279
+ aspect_ratio_bucket_id = 0 if w > h else 1
280
+ bucket_id = d['dataset_source'] * 2 + aspect_ratio_bucket_id
281
+ bucket = self._buckets[bucket_id]
282
+ bucket.append(d)
283
+ if len(bucket) == self.batch_size:
284
+ yield bucket[:]
285
+ del bucket[:]
286
+
287
+
288
+ class DIFFMDAspectRatioGroupedDataset(torch.utils.data.IterableDataset):
289
+ def __init__(self, dataset, batch_sizes, num_datasets):
290
+ """
291
+ """
292
+ self.dataset = dataset
293
+ self.batch_sizes = batch_sizes
294
+ self._buckets = [[] for _ in range(2 * num_datasets)]
295
+
296
+ def __iter__(self):
297
+ for d in self.dataset:
298
+ w, h = d["width"], d["height"]
299
+ aspect_ratio_bucket_id = 0 if w > h else 1
300
+ bucket_id = d['dataset_source'] * 2 + aspect_ratio_bucket_id
301
+ bucket = self._buckets[bucket_id]
302
+ bucket.append(d)
303
+ if len(bucket) == self.batch_sizes[d['dataset_source']]:
304
+ yield bucket[:]
305
+ del bucket[:]
306
+
307
+
308
+ def repeat_factors_from_tag_frequency(dataset_dicts, repeat_thresh):
309
+ """
310
+ """
311
+ category_freq = defaultdict(int)
312
+ for dataset_dict in dataset_dicts:
313
+ cat_ids = dataset_dict['pos_category_ids']
314
+ for cat_id in cat_ids:
315
+ category_freq[cat_id] += 1
316
+ num_images = len(dataset_dicts)
317
+ for k, v in category_freq.items():
318
+ category_freq[k] = v / num_images
319
+
320
+ category_rep = {
321
+ cat_id: max(1.0, math.sqrt(repeat_thresh / cat_freq))
322
+ for cat_id, cat_freq in category_freq.items()
323
+ }
324
+
325
+ rep_factors = []
326
+ for dataset_dict in dataset_dicts:
327
+ cat_ids = dataset_dict['pos_category_ids']
328
+ rep_factor = max({category_rep[cat_id] for cat_id in cat_ids}, default=1.0)
329
+ rep_factors.append(rep_factor)
330
+
331
+ return torch.tensor(rep_factors, dtype=torch.float32)
detic/data/custom_dataset_mapper.py ADDED
@@ -0,0 +1,280 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2
+ import copy
3
+ import logging
4
+ import numpy as np
5
+ from typing import List, Optional, Union
6
+ import torch
7
+ import pycocotools.mask as mask_util
8
+
9
+ from detectron2.config import configurable
10
+
11
+ from detectron2.data import detection_utils as utils
12
+ from detectron2.data.detection_utils import transform_keypoint_annotations
13
+ from detectron2.data import transforms as T
14
+ from detectron2.data.dataset_mapper import DatasetMapper
15
+ from detectron2.structures import Boxes, BoxMode, Instances
16
+ from detectron2.structures import Keypoints, PolygonMasks, BitMasks
17
+ from fvcore.transforms.transform import TransformList
18
+ from .custom_build_augmentation import build_custom_augmentation
19
+ from .tar_dataset import DiskTarDataset
20
+
21
+ __all__ = ["CustomDatasetMapper"]
22
+
23
+ class CustomDatasetMapper(DatasetMapper):
24
+ @configurable
25
+ def __init__(self, is_train: bool,
26
+ with_ann_type=False,
27
+ dataset_ann=[],
28
+ use_diff_bs_size=False,
29
+ dataset_augs=[],
30
+ is_debug=False,
31
+ use_tar_dataset=False,
32
+ tarfile_path='',
33
+ tar_index_dir='',
34
+ **kwargs):
35
+ """
36
+ add image labels
37
+ """
38
+ self.with_ann_type = with_ann_type
39
+ self.dataset_ann = dataset_ann
40
+ self.use_diff_bs_size = use_diff_bs_size
41
+ if self.use_diff_bs_size and is_train:
42
+ self.dataset_augs = [T.AugmentationList(x) for x in dataset_augs]
43
+ self.is_debug = is_debug
44
+ self.use_tar_dataset = use_tar_dataset
45
+ if self.use_tar_dataset:
46
+ print('Using tar dataset')
47
+ self.tar_dataset = DiskTarDataset(tarfile_path, tar_index_dir)
48
+ super().__init__(is_train, **kwargs)
49
+
50
+
51
+ @classmethod
52
+ def from_config(cls, cfg, is_train: bool = True):
53
+ ret = super().from_config(cfg, is_train)
54
+ ret.update({
55
+ 'with_ann_type': cfg.WITH_IMAGE_LABELS,
56
+ 'dataset_ann': cfg.DATALOADER.DATASET_ANN,
57
+ 'use_diff_bs_size': cfg.DATALOADER.USE_DIFF_BS_SIZE,
58
+ 'is_debug': cfg.IS_DEBUG,
59
+ 'use_tar_dataset': cfg.DATALOADER.USE_TAR_DATASET,
60
+ 'tarfile_path': cfg.DATALOADER.TARFILE_PATH,
61
+ 'tar_index_dir': cfg.DATALOADER.TAR_INDEX_DIR,
62
+ })
63
+ if ret['use_diff_bs_size'] and is_train:
64
+ if cfg.INPUT.CUSTOM_AUG == 'EfficientDetResizeCrop':
65
+ dataset_scales = cfg.DATALOADER.DATASET_INPUT_SCALE
66
+ dataset_sizes = cfg.DATALOADER.DATASET_INPUT_SIZE
67
+ ret['dataset_augs'] = [
68
+ build_custom_augmentation(cfg, True, scale, size) \
69
+ for scale, size in zip(dataset_scales, dataset_sizes)]
70
+ else:
71
+ assert cfg.INPUT.CUSTOM_AUG == 'ResizeShortestEdge'
72
+ min_sizes = cfg.DATALOADER.DATASET_MIN_SIZES
73
+ max_sizes = cfg.DATALOADER.DATASET_MAX_SIZES
74
+ ret['dataset_augs'] = [
75
+ build_custom_augmentation(
76
+ cfg, True, min_size=mi, max_size=ma) \
77
+ for mi, ma in zip(min_sizes, max_sizes)]
78
+ else:
79
+ ret['dataset_augs'] = []
80
+
81
+ return ret
82
+
83
+ def __call__(self, dataset_dict):
84
+ """
85
+ include image labels
86
+ """
87
+ dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below
88
+ # USER: Write your own image loading if it's not from a file
89
+ if 'file_name' in dataset_dict:
90
+ ori_image = utils.read_image(
91
+ dataset_dict["file_name"], format=self.image_format)
92
+ else:
93
+ ori_image, _, _ = self.tar_dataset[dataset_dict["tar_index"]]
94
+ ori_image = utils._apply_exif_orientation(ori_image)
95
+ ori_image = utils.convert_PIL_to_numpy(ori_image, self.image_format)
96
+ utils.check_image_size(dataset_dict, ori_image)
97
+
98
+ # USER: Remove if you don't do semantic/panoptic segmentation.
99
+ if "sem_seg_file_name" in dataset_dict:
100
+ sem_seg_gt = utils.read_image(
101
+ dataset_dict.pop("sem_seg_file_name"), "L").squeeze(2)
102
+ else:
103
+ sem_seg_gt = None
104
+
105
+ if self.is_debug:
106
+ dataset_dict['dataset_source'] = 0
107
+
108
+ not_full_labeled = 'dataset_source' in dataset_dict and \
109
+ self.with_ann_type and \
110
+ self.dataset_ann[dataset_dict['dataset_source']] != 'box'
111
+
112
+ aug_input = T.AugInput(copy.deepcopy(ori_image), sem_seg=sem_seg_gt)
113
+ if self.use_diff_bs_size and self.is_train:
114
+ transforms = \
115
+ self.dataset_augs[dataset_dict['dataset_source']](aug_input)
116
+ else:
117
+ transforms = self.augmentations(aug_input)
118
+ image, sem_seg_gt = aug_input.image, aug_input.sem_seg
119
+
120
+ image_shape = image.shape[:2] # h, w
121
+ dataset_dict["image"] = torch.as_tensor(
122
+ np.ascontiguousarray(image.transpose(2, 0, 1)))
123
+
124
+ if sem_seg_gt is not None:
125
+ dataset_dict["sem_seg"] = torch.as_tensor(sem_seg_gt.astype("long"))
126
+
127
+ # USER: Remove if you don't use pre-computed proposals.
128
+ # Most users would not need this feature.
129
+ if self.proposal_topk is not None:
130
+ utils.transform_proposals(
131
+ dataset_dict, image_shape, transforms,
132
+ proposal_topk=self.proposal_topk
133
+ )
134
+
135
+ if not self.is_train:
136
+ # USER: Modify this if you want to keep them for some reason.
137
+ dataset_dict.pop("annotations", None)
138
+ dataset_dict.pop("sem_seg_file_name", None)
139
+ return dataset_dict
140
+
141
+ if "annotations" in dataset_dict:
142
+ # USER: Modify this if you want to keep them for some reason.
143
+ for anno in dataset_dict["annotations"]:
144
+ if not self.use_instance_mask:
145
+ anno.pop("segmentation", None)
146
+ if not self.use_keypoint:
147
+ anno.pop("keypoints", None)
148
+
149
+ # USER: Implement additional transformations if you have other types of data
150
+ all_annos = [
151
+ (utils.transform_instance_annotations(
152
+ obj, transforms, image_shape,
153
+ keypoint_hflip_indices=self.keypoint_hflip_indices,
154
+ ), obj.get("iscrowd", 0))
155
+ for obj in dataset_dict.pop("annotations")
156
+ ]
157
+ annos = [ann[0] for ann in all_annos if ann[1] == 0]
158
+ instances = utils.annotations_to_instances(
159
+ annos, image_shape, mask_format=self.instance_mask_format
160
+ )
161
+
162
+ del all_annos
163
+ if self.recompute_boxes:
164
+ instances.gt_boxes = instances.gt_masks.get_bounding_boxes()
165
+ dataset_dict["instances"] = utils.filter_empty_instances(instances)
166
+ if self.with_ann_type:
167
+ dataset_dict["pos_category_ids"] = dataset_dict.get(
168
+ 'pos_category_ids', [])
169
+ dataset_dict["ann_type"] = \
170
+ self.dataset_ann[dataset_dict['dataset_source']]
171
+ if self.is_debug and (('pos_category_ids' not in dataset_dict) or \
172
+ (dataset_dict['pos_category_ids'] == [])):
173
+ dataset_dict['pos_category_ids'] = [x for x in sorted(set(
174
+ dataset_dict['instances'].gt_classes.tolist()
175
+ ))]
176
+ return dataset_dict
177
+
178
+ # DETR augmentation
179
+ def build_transform_gen(cfg, is_train):
180
+ """
181
+ """
182
+ if is_train:
183
+ min_size = cfg.INPUT.MIN_SIZE_TRAIN
184
+ max_size = cfg.INPUT.MAX_SIZE_TRAIN
185
+ sample_style = cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING
186
+ else:
187
+ min_size = cfg.INPUT.MIN_SIZE_TEST
188
+ max_size = cfg.INPUT.MAX_SIZE_TEST
189
+ sample_style = "choice"
190
+ if sample_style == "range":
191
+ assert len(min_size) == 2, "more than 2 ({}) min_size(s) are provided for ranges".format(len(min_size))
192
+
193
+ logger = logging.getLogger(__name__)
194
+ tfm_gens = []
195
+ if is_train:
196
+ tfm_gens.append(T.RandomFlip())
197
+ tfm_gens.append(T.ResizeShortestEdge(min_size, max_size, sample_style))
198
+ if is_train:
199
+ logger.info("TransformGens used in training: " + str(tfm_gens))
200
+ return tfm_gens
201
+
202
+
203
+ class DetrDatasetMapper:
204
+ """
205
+ A callable which takes a dataset dict in Detectron2 Dataset format,
206
+ and map it into a format used by DETR.
207
+ The callable currently does the following:
208
+ 1. Read the image from "file_name"
209
+ 2. Applies geometric transforms to the image and annotation
210
+ 3. Find and applies suitable cropping to the image and annotation
211
+ 4. Prepare image and annotation to Tensors
212
+ """
213
+
214
+ def __init__(self, cfg, is_train=True):
215
+ if cfg.INPUT.CROP.ENABLED and is_train:
216
+ self.crop_gen = [
217
+ T.ResizeShortestEdge([400, 500, 600], sample_style="choice"),
218
+ T.RandomCrop(cfg.INPUT.CROP.TYPE, cfg.INPUT.CROP.SIZE),
219
+ ]
220
+ else:
221
+ self.crop_gen = None
222
+
223
+ self.mask_on = cfg.MODEL.MASK_ON
224
+ self.tfm_gens = build_transform_gen(cfg, is_train)
225
+ logging.getLogger(__name__).info(
226
+ "Full TransformGens used in training: {}, crop: {}".format(str(self.tfm_gens), str(self.crop_gen))
227
+ )
228
+
229
+ self.img_format = cfg.INPUT.FORMAT
230
+ self.is_train = is_train
231
+
232
+ def __call__(self, dataset_dict):
233
+ """
234
+ Args:
235
+ dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.
236
+ Returns:
237
+ dict: a format that builtin models in detectron2 accept
238
+ """
239
+ dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below
240
+ image = utils.read_image(dataset_dict["file_name"], format=self.img_format)
241
+ utils.check_image_size(dataset_dict, image)
242
+
243
+ if self.crop_gen is None:
244
+ image, transforms = T.apply_transform_gens(self.tfm_gens, image)
245
+ else:
246
+ if np.random.rand() > 0.5:
247
+ image, transforms = T.apply_transform_gens(self.tfm_gens, image)
248
+ else:
249
+ image, transforms = T.apply_transform_gens(
250
+ self.tfm_gens[:-1] + self.crop_gen + self.tfm_gens[-1:], image
251
+ )
252
+
253
+ image_shape = image.shape[:2] # h, w
254
+
255
+ # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,
256
+ # but not efficient on large generic data structures due to the use of pickle & mp.Queue.
257
+ # Therefore it's important to use torch.Tensor.
258
+ dataset_dict["image"] = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))
259
+
260
+ if not self.is_train:
261
+ # USER: Modify this if you want to keep them for some reason.
262
+ dataset_dict.pop("annotations", None)
263
+ return dataset_dict
264
+
265
+ if "annotations" in dataset_dict:
266
+ # USER: Modify this if you want to keep them for some reason.
267
+ for anno in dataset_dict["annotations"]:
268
+ if not self.mask_on:
269
+ anno.pop("segmentation", None)
270
+ anno.pop("keypoints", None)
271
+
272
+ # USER: Implement additional transformations if you have other types of data
273
+ annos = [
274
+ utils.transform_instance_annotations(obj, transforms, image_shape)
275
+ for obj in dataset_dict.pop("annotations")
276
+ if obj.get("iscrowd", 0) == 0
277
+ ]
278
+ instances = utils.annotations_to_instances(annos, image_shape)
279
+ dataset_dict["instances"] = utils.filter_empty_instances(instances)
280
+ return dataset_dict
detic/data/datasets/cc.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ import logging
3
+ import os
4
+
5
+ from detectron2.data.datasets.builtin_meta import _get_builtin_metadata
6
+ from detectron2.data.datasets.lvis import get_lvis_instances_meta
7
+ from .lvis_v1 import custom_register_lvis_instances
8
+
9
+ _CUSTOM_SPLITS = {
10
+ "cc3m_v1_val": ("cc3m/validation/", "cc3m/val_image_info.json"),
11
+ "cc3m_v1_train": ("cc3m/training/", "cc3m/train_image_info.json"),
12
+ "cc3m_v1_train_tags": ("cc3m/training/", "cc3m/train_image_info_tags.json"),
13
+
14
+ }
15
+
16
+ for key, (image_root, json_file) in _CUSTOM_SPLITS.items():
17
+ custom_register_lvis_instances(
18
+ key,
19
+ get_lvis_instances_meta('lvis_v1'),
20
+ os.path.join("datasets", json_file) if "://" not in json_file else json_file,
21
+ os.path.join("datasets", image_root),
22
+ )
23
+
detic/data/datasets/coco_zeroshot.py ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ import os
3
+
4
+ from detectron2.data.datasets.register_coco import register_coco_instances
5
+ from detectron2.data.datasets.builtin_meta import _get_builtin_metadata
6
+ from .lvis_v1 import custom_register_lvis_instances
7
+
8
+ categories_seen = [
9
+ {'id': 1, 'name': 'person'},
10
+ {'id': 2, 'name': 'bicycle'},
11
+ {'id': 3, 'name': 'car'},
12
+ {'id': 4, 'name': 'motorcycle'},
13
+ {'id': 7, 'name': 'train'},
14
+ {'id': 8, 'name': 'truck'},
15
+ {'id': 9, 'name': 'boat'},
16
+ {'id': 15, 'name': 'bench'},
17
+ {'id': 16, 'name': 'bird'},
18
+ {'id': 19, 'name': 'horse'},
19
+ {'id': 20, 'name': 'sheep'},
20
+ {'id': 23, 'name': 'bear'},
21
+ {'id': 24, 'name': 'zebra'},
22
+ {'id': 25, 'name': 'giraffe'},
23
+ {'id': 27, 'name': 'backpack'},
24
+ {'id': 31, 'name': 'handbag'},
25
+ {'id': 33, 'name': 'suitcase'},
26
+ {'id': 34, 'name': 'frisbee'},
27
+ {'id': 35, 'name': 'skis'},
28
+ {'id': 38, 'name': 'kite'},
29
+ {'id': 42, 'name': 'surfboard'},
30
+ {'id': 44, 'name': 'bottle'},
31
+ {'id': 48, 'name': 'fork'},
32
+ {'id': 50, 'name': 'spoon'},
33
+ {'id': 51, 'name': 'bowl'},
34
+ {'id': 52, 'name': 'banana'},
35
+ {'id': 53, 'name': 'apple'},
36
+ {'id': 54, 'name': 'sandwich'},
37
+ {'id': 55, 'name': 'orange'},
38
+ {'id': 56, 'name': 'broccoli'},
39
+ {'id': 57, 'name': 'carrot'},
40
+ {'id': 59, 'name': 'pizza'},
41
+ {'id': 60, 'name': 'donut'},
42
+ {'id': 62, 'name': 'chair'},
43
+ {'id': 65, 'name': 'bed'},
44
+ {'id': 70, 'name': 'toilet'},
45
+ {'id': 72, 'name': 'tv'},
46
+ {'id': 73, 'name': 'laptop'},
47
+ {'id': 74, 'name': 'mouse'},
48
+ {'id': 75, 'name': 'remote'},
49
+ {'id': 78, 'name': 'microwave'},
50
+ {'id': 79, 'name': 'oven'},
51
+ {'id': 80, 'name': 'toaster'},
52
+ {'id': 82, 'name': 'refrigerator'},
53
+ {'id': 84, 'name': 'book'},
54
+ {'id': 85, 'name': 'clock'},
55
+ {'id': 86, 'name': 'vase'},
56
+ {'id': 90, 'name': 'toothbrush'},
57
+ ]
58
+
59
+ categories_unseen = [
60
+ {'id': 5, 'name': 'airplane'},
61
+ {'id': 6, 'name': 'bus'},
62
+ {'id': 17, 'name': 'cat'},
63
+ {'id': 18, 'name': 'dog'},
64
+ {'id': 21, 'name': 'cow'},
65
+ {'id': 22, 'name': 'elephant'},
66
+ {'id': 28, 'name': 'umbrella'},
67
+ {'id': 32, 'name': 'tie'},
68
+ {'id': 36, 'name': 'snowboard'},
69
+ {'id': 41, 'name': 'skateboard'},
70
+ {'id': 47, 'name': 'cup'},
71
+ {'id': 49, 'name': 'knife'},
72
+ {'id': 61, 'name': 'cake'},
73
+ {'id': 63, 'name': 'couch'},
74
+ {'id': 76, 'name': 'keyboard'},
75
+ {'id': 81, 'name': 'sink'},
76
+ {'id': 87, 'name': 'scissors'},
77
+ ]
78
+
79
+ def _get_metadata(cat):
80
+ if cat == 'all':
81
+ return _get_builtin_metadata('coco')
82
+ elif cat == 'seen':
83
+ id_to_name = {x['id']: x['name'] for x in categories_seen}
84
+ else:
85
+ assert cat == 'unseen'
86
+ id_to_name = {x['id']: x['name'] for x in categories_unseen}
87
+
88
+ thing_dataset_id_to_contiguous_id = {
89
+ x: i for i, x in enumerate(sorted(id_to_name))}
90
+ thing_classes = [id_to_name[k] for k in sorted(id_to_name)]
91
+ return {
92
+ "thing_dataset_id_to_contiguous_id": thing_dataset_id_to_contiguous_id,
93
+ "thing_classes": thing_classes}
94
+
95
+ _PREDEFINED_SPLITS_COCO = {
96
+ "coco_zeroshot_train": ("coco/train2017", "coco/zero-shot/instances_train2017_seen_2.json", 'seen'),
97
+ "coco_zeroshot_val": ("coco/val2017", "coco/zero-shot/instances_val2017_unseen_2.json", 'unseen'),
98
+ "coco_not_zeroshot_val": ("coco/val2017", "coco/zero-shot/instances_val2017_seen_2.json", 'seen'),
99
+ "coco_generalized_zeroshot_val": ("coco/val2017", "coco/zero-shot/instances_val2017_all_2_oriorder.json", 'all'),
100
+ "coco_zeroshot_train_oriorder": ("coco/train2017", "coco/zero-shot/instances_train2017_seen_2_oriorder.json", 'all'),
101
+ }
102
+
103
+ for key, (image_root, json_file, cat) in _PREDEFINED_SPLITS_COCO.items():
104
+ register_coco_instances(
105
+ key,
106
+ _get_metadata(cat),
107
+ os.path.join("datasets", json_file) if "://" not in json_file else json_file,
108
+ os.path.join("datasets", image_root),
109
+ )
110
+
111
+ _CUSTOM_SPLITS_COCO = {
112
+ "cc3m_coco_train_tags": ("cc3m/training/", "cc3m/coco_train_image_info_tags.json"),
113
+ "coco_caption_train_tags": ("coco/train2017/", "coco/annotations/captions_train2017_tags_allcaps.json"),}
114
+
115
+ for key, (image_root, json_file) in _CUSTOM_SPLITS_COCO.items():
116
+ custom_register_lvis_instances(
117
+ key,
118
+ _get_builtin_metadata('coco'),
119
+ os.path.join("datasets", json_file) if "://" not in json_file else json_file,
120
+ os.path.join("datasets", image_root),
121
+ )
detic/data/datasets/imagenet.py ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ import logging
3
+ import os
4
+
5
+ from detectron2.data import DatasetCatalog, MetadataCatalog
6
+ from detectron2.data.datasets.lvis import get_lvis_instances_meta
7
+ from .lvis_v1 import custom_load_lvis_json, get_lvis_22k_meta
8
+ def custom_register_imagenet_instances(name, metadata, json_file, image_root):
9
+ """
10
+ """
11
+ DatasetCatalog.register(name, lambda: custom_load_lvis_json(
12
+ json_file, image_root, name))
13
+ MetadataCatalog.get(name).set(
14
+ json_file=json_file, image_root=image_root,
15
+ evaluator_type="imagenet", **metadata
16
+ )
17
+
18
+ _CUSTOM_SPLITS_IMAGENET = {
19
+ "imagenet_lvis_v1": ("imagenet/ImageNet-LVIS/", "imagenet/annotations/imagenet_lvis_image_info.json"),
20
+ }
21
+
22
+ for key, (image_root, json_file) in _CUSTOM_SPLITS_IMAGENET.items():
23
+ custom_register_imagenet_instances(
24
+ key,
25
+ get_lvis_instances_meta('lvis_v1'),
26
+ os.path.join("datasets", json_file) if "://" not in json_file else json_file,
27
+ os.path.join("datasets", image_root),
28
+ )
29
+
30
+
31
+ _CUSTOM_SPLITS_IMAGENET_22K = {
32
+ "imagenet_lvis-22k": ("imagenet/ImageNet-LVIS/", "imagenet/annotations/imagenet-22k_image_info_lvis-22k.json"),
33
+ }
34
+
35
+ for key, (image_root, json_file) in _CUSTOM_SPLITS_IMAGENET_22K.items():
36
+ custom_register_imagenet_instances(
37
+ key,
38
+ get_lvis_22k_meta(),
39
+ os.path.join("datasets", json_file) if "://" not in json_file else json_file,
40
+ os.path.join("datasets", image_root),
41
+ )
detic/data/datasets/lvis_22k_categories.py ADDED
The diff for this file is too large to render. See raw diff
detic/data/datasets/lvis_v1.py ADDED
@@ -0,0 +1,155 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ import logging
3
+ import os
4
+
5
+ from fvcore.common.timer import Timer
6
+ from detectron2.structures import BoxMode
7
+ from fvcore.common.file_io import PathManager
8
+ from detectron2.data import DatasetCatalog, MetadataCatalog
9
+ from detectron2.data.datasets.lvis import get_lvis_instances_meta
10
+
11
+ logger = logging.getLogger(__name__)
12
+
13
+ __all__ = ["custom_load_lvis_json", "custom_register_lvis_instances"]
14
+
15
+
16
+ def custom_register_lvis_instances(name, metadata, json_file, image_root):
17
+ """
18
+ """
19
+ DatasetCatalog.register(name, lambda: custom_load_lvis_json(
20
+ json_file, image_root, name))
21
+ MetadataCatalog.get(name).set(
22
+ json_file=json_file, image_root=image_root,
23
+ evaluator_type="lvis", **metadata
24
+ )
25
+
26
+
27
+ def custom_load_lvis_json(json_file, image_root, dataset_name=None):
28
+ '''
29
+ Modifications:
30
+ use `file_name`
31
+ convert neg_category_ids
32
+ add pos_category_ids
33
+ '''
34
+ from lvis import LVIS
35
+
36
+ json_file = PathManager.get_local_path(json_file)
37
+
38
+ timer = Timer()
39
+ lvis_api = LVIS(json_file)
40
+ if timer.seconds() > 1:
41
+ logger.info("Loading {} takes {:.2f} seconds.".format(
42
+ json_file, timer.seconds()))
43
+
44
+ catid2contid = {x['id']: i for i, x in enumerate(
45
+ sorted(lvis_api.dataset['categories'], key=lambda x: x['id']))}
46
+ if len(lvis_api.dataset['categories']) == 1203:
47
+ for x in lvis_api.dataset['categories']:
48
+ assert catid2contid[x['id']] == x['id'] - 1
49
+ img_ids = sorted(lvis_api.imgs.keys())
50
+ imgs = lvis_api.load_imgs(img_ids)
51
+ anns = [lvis_api.img_ann_map[img_id] for img_id in img_ids]
52
+
53
+ ann_ids = [ann["id"] for anns_per_image in anns for ann in anns_per_image]
54
+ assert len(set(ann_ids)) == len(ann_ids), \
55
+ "Annotation ids in '{}' are not unique".format(json_file)
56
+
57
+ imgs_anns = list(zip(imgs, anns))
58
+ logger.info("Loaded {} images in the LVIS v1 format from {}".format(
59
+ len(imgs_anns), json_file))
60
+
61
+ dataset_dicts = []
62
+
63
+ for (img_dict, anno_dict_list) in imgs_anns:
64
+ record = {}
65
+ if "file_name" in img_dict:
66
+ file_name = img_dict["file_name"]
67
+ if img_dict["file_name"].startswith("COCO"):
68
+ file_name = file_name[-16:]
69
+ record["file_name"] = os.path.join(image_root, file_name)
70
+ elif 'coco_url' in img_dict:
71
+ # e.g., http://images.cocodataset.org/train2017/000000391895.jpg
72
+ file_name = img_dict["coco_url"][30:]
73
+ record["file_name"] = os.path.join(image_root, file_name)
74
+ elif 'tar_index' in img_dict:
75
+ record['tar_index'] = img_dict['tar_index']
76
+
77
+ record["height"] = img_dict["height"]
78
+ record["width"] = img_dict["width"]
79
+ record["not_exhaustive_category_ids"] = img_dict.get(
80
+ "not_exhaustive_category_ids", [])
81
+ record["neg_category_ids"] = img_dict.get("neg_category_ids", [])
82
+ # NOTE: modified by Xingyi: convert to 0-based
83
+ record["neg_category_ids"] = [
84
+ catid2contid[x] for x in record["neg_category_ids"]]
85
+ if 'pos_category_ids' in img_dict:
86
+ record['pos_category_ids'] = [
87
+ catid2contid[x] for x in img_dict.get("pos_category_ids", [])]
88
+ if 'captions' in img_dict:
89
+ record['captions'] = img_dict['captions']
90
+ if 'caption_features' in img_dict:
91
+ record['caption_features'] = img_dict['caption_features']
92
+ image_id = record["image_id"] = img_dict["id"]
93
+
94
+ objs = []
95
+ for anno in anno_dict_list:
96
+ assert anno["image_id"] == image_id
97
+ if anno.get('iscrowd', 0) > 0:
98
+ continue
99
+ obj = {"bbox": anno["bbox"], "bbox_mode": BoxMode.XYWH_ABS}
100
+ obj["category_id"] = catid2contid[anno['category_id']]
101
+ if 'segmentation' in anno:
102
+ segm = anno["segmentation"]
103
+ valid_segm = [poly for poly in segm \
104
+ if len(poly) % 2 == 0 and len(poly) >= 6]
105
+ # assert len(segm) == len(
106
+ # valid_segm
107
+ # ), "Annotation contains an invalid polygon with < 3 points"
108
+ if not len(segm) == len(valid_segm):
109
+ print('Annotation contains an invalid polygon with < 3 points')
110
+ assert len(segm) > 0
111
+ obj["segmentation"] = segm
112
+ objs.append(obj)
113
+ record["annotations"] = objs
114
+ dataset_dicts.append(record)
115
+
116
+ return dataset_dicts
117
+
118
+ _CUSTOM_SPLITS_LVIS = {
119
+ "lvis_v1_train+coco": ("coco/", "lvis/lvis_v1_train+coco_mask.json"),
120
+ "lvis_v1_train_norare": ("coco/", "lvis/lvis_v1_train_norare.json"),
121
+ }
122
+
123
+
124
+ for key, (image_root, json_file) in _CUSTOM_SPLITS_LVIS.items():
125
+ custom_register_lvis_instances(
126
+ key,
127
+ get_lvis_instances_meta(key),
128
+ os.path.join("datasets", json_file) if "://" not in json_file else json_file,
129
+ os.path.join("datasets", image_root),
130
+ )
131
+
132
+
133
+ def get_lvis_22k_meta():
134
+ from .lvis_22k_categories import CATEGORIES
135
+ cat_ids = [k["id"] for k in CATEGORIES]
136
+ assert min(cat_ids) == 1 and max(cat_ids) == len(
137
+ cat_ids
138
+ ), "Category ids are not in [1, #categories], as expected"
139
+ # Ensure that the category list is sorted by id
140
+ lvis_categories = sorted(CATEGORIES, key=lambda x: x["id"])
141
+ thing_classes = [k["name"] for k in lvis_categories]
142
+ meta = {"thing_classes": thing_classes}
143
+ return meta
144
+
145
+ _CUSTOM_SPLITS_LVIS_22K = {
146
+ "lvis_v1_train_22k": ("coco/", "lvis/lvis_v1_train_lvis-22k.json"),
147
+ }
148
+
149
+ for key, (image_root, json_file) in _CUSTOM_SPLITS_LVIS_22K.items():
150
+ custom_register_lvis_instances(
151
+ key,
152
+ get_lvis_22k_meta(),
153
+ os.path.join("datasets", json_file) if "://" not in json_file else json_file,
154
+ os.path.join("datasets", image_root),
155
+ )
detic/data/datasets/objects365.py ADDED
@@ -0,0 +1,770 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ from detectron2.data.datasets.register_coco import register_coco_instances
3
+ import os
4
+
5
+ # categories_v2 = [
6
+ # {'id': 1, 'name': 'Person'},
7
+ # {'id': 2, 'name': 'Sneakers'},
8
+ # {'id': 3, 'name': 'Chair'},
9
+ # {'id': 4, 'name': 'Other Shoes'},
10
+ # {'id': 5, 'name': 'Hat'},
11
+ # {'id': 6, 'name': 'Car'},
12
+ # {'id': 7, 'name': 'Lamp'},
13
+ # {'id': 8, 'name': 'Glasses'},
14
+ # {'id': 9, 'name': 'Bottle'},
15
+ # {'id': 10, 'name': 'Desk'},
16
+ # {'id': 11, 'name': 'Cup'},
17
+ # {'id': 12, 'name': 'Street Lights'},
18
+ # {'id': 13, 'name': 'Cabinet/shelf'},
19
+ # {'id': 14, 'name': 'Handbag/Satchel'},
20
+ # {'id': 15, 'name': 'Bracelet'},
21
+ # {'id': 16, 'name': 'Plate'},
22
+ # {'id': 17, 'name': 'Picture/Frame'},
23
+ # {'id': 18, 'name': 'Helmet'},
24
+ # {'id': 19, 'name': 'Book'},
25
+ # {'id': 20, 'name': 'Gloves'},
26
+ # {'id': 21, 'name': 'Storage box'},
27
+ # {'id': 22, 'name': 'Boat'},
28
+ # {'id': 23, 'name': 'Leather Shoes'},
29
+ # {'id': 24, 'name': 'Flower'},
30
+ # {'id': 25, 'name': 'Bench'},
31
+ # {'id': 26, 'name': 'Potted Plant'},
32
+ # {'id': 27, 'name': 'Bowl/Basin'},
33
+ # {'id': 28, 'name': 'Flag'},
34
+ # {'id': 29, 'name': 'Pillow'},
35
+ # {'id': 30, 'name': 'Boots'},
36
+ # {'id': 31, 'name': 'Vase'},
37
+ # {'id': 32, 'name': 'Microphone'},
38
+ # {'id': 33, 'name': 'Necklace'},
39
+ # {'id': 34, 'name': 'Ring'},
40
+ # {'id': 35, 'name': 'SUV'},
41
+ # {'id': 36, 'name': 'Wine Glass'},
42
+ # {'id': 37, 'name': 'Belt'},
43
+ # {'id': 38, 'name': 'Moniter/TV'},
44
+ # {'id': 39, 'name': 'Backpack'},
45
+ # {'id': 40, 'name': 'Umbrella'},
46
+ # {'id': 41, 'name': 'Traffic Light'},
47
+ # {'id': 42, 'name': 'Speaker'},
48
+ # {'id': 43, 'name': 'Watch'},
49
+ # {'id': 44, 'name': 'Tie'},
50
+ # {'id': 45, 'name': 'Trash bin Can'},
51
+ # {'id': 46, 'name': 'Slippers'},
52
+ # {'id': 47, 'name': 'Bicycle'},
53
+ # {'id': 48, 'name': 'Stool'},
54
+ # {'id': 49, 'name': 'Barrel/bucket'},
55
+ # {'id': 50, 'name': 'Van'},
56
+ # {'id': 51, 'name': 'Couch'},
57
+ # {'id': 52, 'name': 'Sandals'},
58
+ # {'id': 53, 'name': 'Bakset'},
59
+ # {'id': 54, 'name': 'Drum'},
60
+ # {'id': 55, 'name': 'Pen/Pencil'},
61
+ # {'id': 56, 'name': 'Bus'},
62
+ # {'id': 57, 'name': 'Wild Bird'},
63
+ # {'id': 58, 'name': 'High Heels'},
64
+ # {'id': 59, 'name': 'Motorcycle'},
65
+ # {'id': 60, 'name': 'Guitar'},
66
+ # {'id': 61, 'name': 'Carpet'},
67
+ # {'id': 62, 'name': 'Cell Phone'},
68
+ # {'id': 63, 'name': 'Bread'},
69
+ # {'id': 64, 'name': 'Camera'},
70
+ # {'id': 65, 'name': 'Canned'},
71
+ # {'id': 66, 'name': 'Truck'},
72
+ # {'id': 67, 'name': 'Traffic cone'},
73
+ # {'id': 68, 'name': 'Cymbal'},
74
+ # {'id': 69, 'name': 'Lifesaver'},
75
+ # {'id': 70, 'name': 'Towel'},
76
+ # {'id': 71, 'name': 'Stuffed Toy'},
77
+ # {'id': 72, 'name': 'Candle'},
78
+ # {'id': 73, 'name': 'Sailboat'},
79
+ # {'id': 74, 'name': 'Laptop'},
80
+ # {'id': 75, 'name': 'Awning'},
81
+ # {'id': 76, 'name': 'Bed'},
82
+ # {'id': 77, 'name': 'Faucet'},
83
+ # {'id': 78, 'name': 'Tent'},
84
+ # {'id': 79, 'name': 'Horse'},
85
+ # {'id': 80, 'name': 'Mirror'},
86
+ # {'id': 81, 'name': 'Power outlet'},
87
+ # {'id': 82, 'name': 'Sink'},
88
+ # {'id': 83, 'name': 'Apple'},
89
+ # {'id': 84, 'name': 'Air Conditioner'},
90
+ # {'id': 85, 'name': 'Knife'},
91
+ # {'id': 86, 'name': 'Hockey Stick'},
92
+ # {'id': 87, 'name': 'Paddle'},
93
+ # {'id': 88, 'name': 'Pickup Truck'},
94
+ # {'id': 89, 'name': 'Fork'},
95
+ # {'id': 90, 'name': 'Traffic Sign'},
96
+ # {'id': 91, 'name': 'Ballon'},
97
+ # {'id': 92, 'name': 'Tripod'},
98
+ # {'id': 93, 'name': 'Dog'},
99
+ # {'id': 94, 'name': 'Spoon'},
100
+ # {'id': 95, 'name': 'Clock'},
101
+ # {'id': 96, 'name': 'Pot'},
102
+ # {'id': 97, 'name': 'Cow'},
103
+ # {'id': 98, 'name': 'Cake'},
104
+ # {'id': 99, 'name': 'Dinning Table'},
105
+ # {'id': 100, 'name': 'Sheep'},
106
+ # {'id': 101, 'name': 'Hanger'},
107
+ # {'id': 102, 'name': 'Blackboard/Whiteboard'},
108
+ # {'id': 103, 'name': 'Napkin'},
109
+ # {'id': 104, 'name': 'Other Fish'},
110
+ # {'id': 105, 'name': 'Orange/Tangerine'},
111
+ # {'id': 106, 'name': 'Toiletry'},
112
+ # {'id': 107, 'name': 'Keyboard'},
113
+ # {'id': 108, 'name': 'Tomato'},
114
+ # {'id': 109, 'name': 'Lantern'},
115
+ # {'id': 110, 'name': 'Machinery Vehicle'},
116
+ # {'id': 111, 'name': 'Fan'},
117
+ # {'id': 112, 'name': 'Green Vegetables'},
118
+ # {'id': 113, 'name': 'Banana'},
119
+ # {'id': 114, 'name': 'Baseball Glove'},
120
+ # {'id': 115, 'name': 'Airplane'},
121
+ # {'id': 116, 'name': 'Mouse'},
122
+ # {'id': 117, 'name': 'Train'},
123
+ # {'id': 118, 'name': 'Pumpkin'},
124
+ # {'id': 119, 'name': 'Soccer'},
125
+ # {'id': 120, 'name': 'Skiboard'},
126
+ # {'id': 121, 'name': 'Luggage'},
127
+ # {'id': 122, 'name': 'Nightstand'},
128
+ # {'id': 123, 'name': 'Tea pot'},
129
+ # {'id': 124, 'name': 'Telephone'},
130
+ # {'id': 125, 'name': 'Trolley'},
131
+ # {'id': 126, 'name': 'Head Phone'},
132
+ # {'id': 127, 'name': 'Sports Car'},
133
+ # {'id': 128, 'name': 'Stop Sign'},
134
+ # {'id': 129, 'name': 'Dessert'},
135
+ # {'id': 130, 'name': 'Scooter'},
136
+ # {'id': 131, 'name': 'Stroller'},
137
+ # {'id': 132, 'name': 'Crane'},
138
+ # {'id': 133, 'name': 'Remote'},
139
+ # {'id': 134, 'name': 'Refrigerator'},
140
+ # {'id': 135, 'name': 'Oven'},
141
+ # {'id': 136, 'name': 'Lemon'},
142
+ # {'id': 137, 'name': 'Duck'},
143
+ # {'id': 138, 'name': 'Baseball Bat'},
144
+ # {'id': 139, 'name': 'Surveillance Camera'},
145
+ # {'id': 140, 'name': 'Cat'},
146
+ # {'id': 141, 'name': 'Jug'},
147
+ # {'id': 142, 'name': 'Broccoli'},
148
+ # {'id': 143, 'name': 'Piano'},
149
+ # {'id': 144, 'name': 'Pizza'},
150
+ # {'id': 145, 'name': 'Elephant'},
151
+ # {'id': 146, 'name': 'Skateboard'},
152
+ # {'id': 147, 'name': 'Surfboard'},
153
+ # {'id': 148, 'name': 'Gun'},
154
+ # {'id': 149, 'name': 'Skating and Skiing shoes'},
155
+ # {'id': 150, 'name': 'Gas stove'},
156
+ # {'id': 151, 'name': 'Donut'},
157
+ # {'id': 152, 'name': 'Bow Tie'},
158
+ # {'id': 153, 'name': 'Carrot'},
159
+ # {'id': 154, 'name': 'Toilet'},
160
+ # {'id': 155, 'name': 'Kite'},
161
+ # {'id': 156, 'name': 'Strawberry'},
162
+ # {'id': 157, 'name': 'Other Balls'},
163
+ # {'id': 158, 'name': 'Shovel'},
164
+ # {'id': 159, 'name': 'Pepper'},
165
+ # {'id': 160, 'name': 'Computer Box'},
166
+ # {'id': 161, 'name': 'Toilet Paper'},
167
+ # {'id': 162, 'name': 'Cleaning Products'},
168
+ # {'id': 163, 'name': 'Chopsticks'},
169
+ # {'id': 164, 'name': 'Microwave'},
170
+ # {'id': 165, 'name': 'Pigeon'},
171
+ # {'id': 166, 'name': 'Baseball'},
172
+ # {'id': 167, 'name': 'Cutting/chopping Board'},
173
+ # {'id': 168, 'name': 'Coffee Table'},
174
+ # {'id': 169, 'name': 'Side Table'},
175
+ # {'id': 170, 'name': 'Scissors'},
176
+ # {'id': 171, 'name': 'Marker'},
177
+ # {'id': 172, 'name': 'Pie'},
178
+ # {'id': 173, 'name': 'Ladder'},
179
+ # {'id': 174, 'name': 'Snowboard'},
180
+ # {'id': 175, 'name': 'Cookies'},
181
+ # {'id': 176, 'name': 'Radiator'},
182
+ # {'id': 177, 'name': 'Fire Hydrant'},
183
+ # {'id': 178, 'name': 'Basketball'},
184
+ # {'id': 179, 'name': 'Zebra'},
185
+ # {'id': 180, 'name': 'Grape'},
186
+ # {'id': 181, 'name': 'Giraffe'},
187
+ # {'id': 182, 'name': 'Potato'},
188
+ # {'id': 183, 'name': 'Sausage'},
189
+ # {'id': 184, 'name': 'Tricycle'},
190
+ # {'id': 185, 'name': 'Violin'},
191
+ # {'id': 186, 'name': 'Egg'},
192
+ # {'id': 187, 'name': 'Fire Extinguisher'},
193
+ # {'id': 188, 'name': 'Candy'},
194
+ # {'id': 189, 'name': 'Fire Truck'},
195
+ # {'id': 190, 'name': 'Billards'},
196
+ # {'id': 191, 'name': 'Converter'},
197
+ # {'id': 192, 'name': 'Bathtub'},
198
+ # {'id': 193, 'name': 'Wheelchair'},
199
+ # {'id': 194, 'name': 'Golf Club'},
200
+ # {'id': 195, 'name': 'Briefcase'},
201
+ # {'id': 196, 'name': 'Cucumber'},
202
+ # {'id': 197, 'name': 'Cigar/Cigarette '},
203
+ # {'id': 198, 'name': 'Paint Brush'},
204
+ # {'id': 199, 'name': 'Pear'},
205
+ # {'id': 200, 'name': 'Heavy Truck'},
206
+ # {'id': 201, 'name': 'Hamburger'},
207
+ # {'id': 202, 'name': 'Extractor'},
208
+ # {'id': 203, 'name': 'Extention Cord'},
209
+ # {'id': 204, 'name': 'Tong'},
210
+ # {'id': 205, 'name': 'Tennis Racket'},
211
+ # {'id': 206, 'name': 'Folder'},
212
+ # {'id': 207, 'name': 'American Football'},
213
+ # {'id': 208, 'name': 'earphone'},
214
+ # {'id': 209, 'name': 'Mask'},
215
+ # {'id': 210, 'name': 'Kettle'},
216
+ # {'id': 211, 'name': 'Tennis'},
217
+ # {'id': 212, 'name': 'Ship'},
218
+ # {'id': 213, 'name': 'Swing'},
219
+ # {'id': 214, 'name': 'Coffee Machine'},
220
+ # {'id': 215, 'name': 'Slide'},
221
+ # {'id': 216, 'name': 'Carriage'},
222
+ # {'id': 217, 'name': 'Onion'},
223
+ # {'id': 218, 'name': 'Green beans'},
224
+ # {'id': 219, 'name': 'Projector'},
225
+ # {'id': 220, 'name': 'Frisbee'},
226
+ # {'id': 221, 'name': 'Washing Machine/Drying Machine'},
227
+ # {'id': 222, 'name': 'Chicken'},
228
+ # {'id': 223, 'name': 'Printer'},
229
+ # {'id': 224, 'name': 'Watermelon'},
230
+ # {'id': 225, 'name': 'Saxophone'},
231
+ # {'id': 226, 'name': 'Tissue'},
232
+ # {'id': 227, 'name': 'Toothbrush'},
233
+ # {'id': 228, 'name': 'Ice cream'},
234
+ # {'id': 229, 'name': 'Hotair ballon'},
235
+ # {'id': 230, 'name': 'Cello'},
236
+ # {'id': 231, 'name': 'French Fries'},
237
+ # {'id': 232, 'name': 'Scale'},
238
+ # {'id': 233, 'name': 'Trophy'},
239
+ # {'id': 234, 'name': 'Cabbage'},
240
+ # {'id': 235, 'name': 'Hot dog'},
241
+ # {'id': 236, 'name': 'Blender'},
242
+ # {'id': 237, 'name': 'Peach'},
243
+ # {'id': 238, 'name': 'Rice'},
244
+ # {'id': 239, 'name': 'Wallet/Purse'},
245
+ # {'id': 240, 'name': 'Volleyball'},
246
+ # {'id': 241, 'name': 'Deer'},
247
+ # {'id': 242, 'name': 'Goose'},
248
+ # {'id': 243, 'name': 'Tape'},
249
+ # {'id': 244, 'name': 'Tablet'},
250
+ # {'id': 245, 'name': 'Cosmetics'},
251
+ # {'id': 246, 'name': 'Trumpet'},
252
+ # {'id': 247, 'name': 'Pineapple'},
253
+ # {'id': 248, 'name': 'Golf Ball'},
254
+ # {'id': 249, 'name': 'Ambulance'},
255
+ # {'id': 250, 'name': 'Parking meter'},
256
+ # {'id': 251, 'name': 'Mango'},
257
+ # {'id': 252, 'name': 'Key'},
258
+ # {'id': 253, 'name': 'Hurdle'},
259
+ # {'id': 254, 'name': 'Fishing Rod'},
260
+ # {'id': 255, 'name': 'Medal'},
261
+ # {'id': 256, 'name': 'Flute'},
262
+ # {'id': 257, 'name': 'Brush'},
263
+ # {'id': 258, 'name': 'Penguin'},
264
+ # {'id': 259, 'name': 'Megaphone'},
265
+ # {'id': 260, 'name': 'Corn'},
266
+ # {'id': 261, 'name': 'Lettuce'},
267
+ # {'id': 262, 'name': 'Garlic'},
268
+ # {'id': 263, 'name': 'Swan'},
269
+ # {'id': 264, 'name': 'Helicopter'},
270
+ # {'id': 265, 'name': 'Green Onion'},
271
+ # {'id': 266, 'name': 'Sandwich'},
272
+ # {'id': 267, 'name': 'Nuts'},
273
+ # {'id': 268, 'name': 'Speed Limit Sign'},
274
+ # {'id': 269, 'name': 'Induction Cooker'},
275
+ # {'id': 270, 'name': 'Broom'},
276
+ # {'id': 271, 'name': 'Trombone'},
277
+ # {'id': 272, 'name': 'Plum'},
278
+ # {'id': 273, 'name': 'Rickshaw'},
279
+ # {'id': 274, 'name': 'Goldfish'},
280
+ # {'id': 275, 'name': 'Kiwi fruit'},
281
+ # {'id': 276, 'name': 'Router/modem'},
282
+ # {'id': 277, 'name': 'Poker Card'},
283
+ # {'id': 278, 'name': 'Toaster'},
284
+ # {'id': 279, 'name': 'Shrimp'},
285
+ # {'id': 280, 'name': 'Sushi'},
286
+ # {'id': 281, 'name': 'Cheese'},
287
+ # {'id': 282, 'name': 'Notepaper'},
288
+ # {'id': 283, 'name': 'Cherry'},
289
+ # {'id': 284, 'name': 'Pliers'},
290
+ # {'id': 285, 'name': 'CD'},
291
+ # {'id': 286, 'name': 'Pasta'},
292
+ # {'id': 287, 'name': 'Hammer'},
293
+ # {'id': 288, 'name': 'Cue'},
294
+ # {'id': 289, 'name': 'Avocado'},
295
+ # {'id': 290, 'name': 'Hamimelon'},
296
+ # {'id': 291, 'name': 'Flask'},
297
+ # {'id': 292, 'name': 'Mushroon'},
298
+ # {'id': 293, 'name': 'Screwdriver'},
299
+ # {'id': 294, 'name': 'Soap'},
300
+ # {'id': 295, 'name': 'Recorder'},
301
+ # {'id': 296, 'name': 'Bear'},
302
+ # {'id': 297, 'name': 'Eggplant'},
303
+ # {'id': 298, 'name': 'Board Eraser'},
304
+ # {'id': 299, 'name': 'Coconut'},
305
+ # {'id': 300, 'name': 'Tape Measur/ Ruler'},
306
+ # {'id': 301, 'name': 'Pig'},
307
+ # {'id': 302, 'name': 'Showerhead'},
308
+ # {'id': 303, 'name': 'Globe'},
309
+ # {'id': 304, 'name': 'Chips'},
310
+ # {'id': 305, 'name': 'Steak'},
311
+ # {'id': 306, 'name': 'Crosswalk Sign'},
312
+ # {'id': 307, 'name': 'Stapler'},
313
+ # {'id': 308, 'name': 'Campel'},
314
+ # {'id': 309, 'name': 'Formula 1 '},
315
+ # {'id': 310, 'name': 'Pomegranate'},
316
+ # {'id': 311, 'name': 'Dishwasher'},
317
+ # {'id': 312, 'name': 'Crab'},
318
+ # {'id': 313, 'name': 'Hoverboard'},
319
+ # {'id': 314, 'name': 'Meat ball'},
320
+ # {'id': 315, 'name': 'Rice Cooker'},
321
+ # {'id': 316, 'name': 'Tuba'},
322
+ # {'id': 317, 'name': 'Calculator'},
323
+ # {'id': 318, 'name': 'Papaya'},
324
+ # {'id': 319, 'name': 'Antelope'},
325
+ # {'id': 320, 'name': 'Parrot'},
326
+ # {'id': 321, 'name': 'Seal'},
327
+ # {'id': 322, 'name': 'Buttefly'},
328
+ # {'id': 323, 'name': 'Dumbbell'},
329
+ # {'id': 324, 'name': 'Donkey'},
330
+ # {'id': 325, 'name': 'Lion'},
331
+ # {'id': 326, 'name': 'Urinal'},
332
+ # {'id': 327, 'name': 'Dolphin'},
333
+ # {'id': 328, 'name': 'Electric Drill'},
334
+ # {'id': 329, 'name': 'Hair Dryer'},
335
+ # {'id': 330, 'name': 'Egg tart'},
336
+ # {'id': 331, 'name': 'Jellyfish'},
337
+ # {'id': 332, 'name': 'Treadmill'},
338
+ # {'id': 333, 'name': 'Lighter'},
339
+ # {'id': 334, 'name': 'Grapefruit'},
340
+ # {'id': 335, 'name': 'Game board'},
341
+ # {'id': 336, 'name': 'Mop'},
342
+ # {'id': 337, 'name': 'Radish'},
343
+ # {'id': 338, 'name': 'Baozi'},
344
+ # {'id': 339, 'name': 'Target'},
345
+ # {'id': 340, 'name': 'French'},
346
+ # {'id': 341, 'name': 'Spring Rolls'},
347
+ # {'id': 342, 'name': 'Monkey'},
348
+ # {'id': 343, 'name': 'Rabbit'},
349
+ # {'id': 344, 'name': 'Pencil Case'},
350
+ # {'id': 345, 'name': 'Yak'},
351
+ # {'id': 346, 'name': 'Red Cabbage'},
352
+ # {'id': 347, 'name': 'Binoculars'},
353
+ # {'id': 348, 'name': 'Asparagus'},
354
+ # {'id': 349, 'name': 'Barbell'},
355
+ # {'id': 350, 'name': 'Scallop'},
356
+ # {'id': 351, 'name': 'Noddles'},
357
+ # {'id': 352, 'name': 'Comb'},
358
+ # {'id': 353, 'name': 'Dumpling'},
359
+ # {'id': 354, 'name': 'Oyster'},
360
+ # {'id': 355, 'name': 'Table Teniis paddle'},
361
+ # {'id': 356, 'name': 'Cosmetics Brush/Eyeliner Pencil'},
362
+ # {'id': 357, 'name': 'Chainsaw'},
363
+ # {'id': 358, 'name': 'Eraser'},
364
+ # {'id': 359, 'name': 'Lobster'},
365
+ # {'id': 360, 'name': 'Durian'},
366
+ # {'id': 361, 'name': 'Okra'},
367
+ # {'id': 362, 'name': 'Lipstick'},
368
+ # {'id': 363, 'name': 'Cosmetics Mirror'},
369
+ # {'id': 364, 'name': 'Curling'},
370
+ # {'id': 365, 'name': 'Table Tennis '},
371
+ # ]
372
+
373
+ '''
374
+ The official Objects365 category names contains typos.
375
+ Below is a manual fix.
376
+ '''
377
+ categories_v2_fix = [
378
+ {'id': 1, 'name': 'Person'},
379
+ {'id': 2, 'name': 'Sneakers'},
380
+ {'id': 3, 'name': 'Chair'},
381
+ {'id': 4, 'name': 'Other Shoes'},
382
+ {'id': 5, 'name': 'Hat'},
383
+ {'id': 6, 'name': 'Car'},
384
+ {'id': 7, 'name': 'Lamp'},
385
+ {'id': 8, 'name': 'Glasses'},
386
+ {'id': 9, 'name': 'Bottle'},
387
+ {'id': 10, 'name': 'Desk'},
388
+ {'id': 11, 'name': 'Cup'},
389
+ {'id': 12, 'name': 'Street Lights'},
390
+ {'id': 13, 'name': 'Cabinet/shelf'},
391
+ {'id': 14, 'name': 'Handbag/Satchel'},
392
+ {'id': 15, 'name': 'Bracelet'},
393
+ {'id': 16, 'name': 'Plate'},
394
+ {'id': 17, 'name': 'Picture/Frame'},
395
+ {'id': 18, 'name': 'Helmet'},
396
+ {'id': 19, 'name': 'Book'},
397
+ {'id': 20, 'name': 'Gloves'},
398
+ {'id': 21, 'name': 'Storage box'},
399
+ {'id': 22, 'name': 'Boat'},
400
+ {'id': 23, 'name': 'Leather Shoes'},
401
+ {'id': 24, 'name': 'Flower'},
402
+ {'id': 25, 'name': 'Bench'},
403
+ {'id': 26, 'name': 'Potted Plant'},
404
+ {'id': 27, 'name': 'Bowl/Basin'},
405
+ {'id': 28, 'name': 'Flag'},
406
+ {'id': 29, 'name': 'Pillow'},
407
+ {'id': 30, 'name': 'Boots'},
408
+ {'id': 31, 'name': 'Vase'},
409
+ {'id': 32, 'name': 'Microphone'},
410
+ {'id': 33, 'name': 'Necklace'},
411
+ {'id': 34, 'name': 'Ring'},
412
+ {'id': 35, 'name': 'SUV'},
413
+ {'id': 36, 'name': 'Wine Glass'},
414
+ {'id': 37, 'name': 'Belt'},
415
+ {'id': 38, 'name': 'Monitor/TV'},
416
+ {'id': 39, 'name': 'Backpack'},
417
+ {'id': 40, 'name': 'Umbrella'},
418
+ {'id': 41, 'name': 'Traffic Light'},
419
+ {'id': 42, 'name': 'Speaker'},
420
+ {'id': 43, 'name': 'Watch'},
421
+ {'id': 44, 'name': 'Tie'},
422
+ {'id': 45, 'name': 'Trash bin Can'},
423
+ {'id': 46, 'name': 'Slippers'},
424
+ {'id': 47, 'name': 'Bicycle'},
425
+ {'id': 48, 'name': 'Stool'},
426
+ {'id': 49, 'name': 'Barrel/bucket'},
427
+ {'id': 50, 'name': 'Van'},
428
+ {'id': 51, 'name': 'Couch'},
429
+ {'id': 52, 'name': 'Sandals'},
430
+ {'id': 53, 'name': 'Basket'},
431
+ {'id': 54, 'name': 'Drum'},
432
+ {'id': 55, 'name': 'Pen/Pencil'},
433
+ {'id': 56, 'name': 'Bus'},
434
+ {'id': 57, 'name': 'Wild Bird'},
435
+ {'id': 58, 'name': 'High Heels'},
436
+ {'id': 59, 'name': 'Motorcycle'},
437
+ {'id': 60, 'name': 'Guitar'},
438
+ {'id': 61, 'name': 'Carpet'},
439
+ {'id': 62, 'name': 'Cell Phone'},
440
+ {'id': 63, 'name': 'Bread'},
441
+ {'id': 64, 'name': 'Camera'},
442
+ {'id': 65, 'name': 'Canned'},
443
+ {'id': 66, 'name': 'Truck'},
444
+ {'id': 67, 'name': 'Traffic cone'},
445
+ {'id': 68, 'name': 'Cymbal'},
446
+ {'id': 69, 'name': 'Lifesaver'},
447
+ {'id': 70, 'name': 'Towel'},
448
+ {'id': 71, 'name': 'Stuffed Toy'},
449
+ {'id': 72, 'name': 'Candle'},
450
+ {'id': 73, 'name': 'Sailboat'},
451
+ {'id': 74, 'name': 'Laptop'},
452
+ {'id': 75, 'name': 'Awning'},
453
+ {'id': 76, 'name': 'Bed'},
454
+ {'id': 77, 'name': 'Faucet'},
455
+ {'id': 78, 'name': 'Tent'},
456
+ {'id': 79, 'name': 'Horse'},
457
+ {'id': 80, 'name': 'Mirror'},
458
+ {'id': 81, 'name': 'Power outlet'},
459
+ {'id': 82, 'name': 'Sink'},
460
+ {'id': 83, 'name': 'Apple'},
461
+ {'id': 84, 'name': 'Air Conditioner'},
462
+ {'id': 85, 'name': 'Knife'},
463
+ {'id': 86, 'name': 'Hockey Stick'},
464
+ {'id': 87, 'name': 'Paddle'},
465
+ {'id': 88, 'name': 'Pickup Truck'},
466
+ {'id': 89, 'name': 'Fork'},
467
+ {'id': 90, 'name': 'Traffic Sign'},
468
+ {'id': 91, 'name': 'Ballon'},
469
+ {'id': 92, 'name': 'Tripod'},
470
+ {'id': 93, 'name': 'Dog'},
471
+ {'id': 94, 'name': 'Spoon'},
472
+ {'id': 95, 'name': 'Clock'},
473
+ {'id': 96, 'name': 'Pot'},
474
+ {'id': 97, 'name': 'Cow'},
475
+ {'id': 98, 'name': 'Cake'},
476
+ {'id': 99, 'name': 'Dining Table'},
477
+ {'id': 100, 'name': 'Sheep'},
478
+ {'id': 101, 'name': 'Hanger'},
479
+ {'id': 102, 'name': 'Blackboard/Whiteboard'},
480
+ {'id': 103, 'name': 'Napkin'},
481
+ {'id': 104, 'name': 'Other Fish'},
482
+ {'id': 105, 'name': 'Orange/Tangerine'},
483
+ {'id': 106, 'name': 'Toiletry'},
484
+ {'id': 107, 'name': 'Keyboard'},
485
+ {'id': 108, 'name': 'Tomato'},
486
+ {'id': 109, 'name': 'Lantern'},
487
+ {'id': 110, 'name': 'Machinery Vehicle'},
488
+ {'id': 111, 'name': 'Fan'},
489
+ {'id': 112, 'name': 'Green Vegetables'},
490
+ {'id': 113, 'name': 'Banana'},
491
+ {'id': 114, 'name': 'Baseball Glove'},
492
+ {'id': 115, 'name': 'Airplane'},
493
+ {'id': 116, 'name': 'Mouse'},
494
+ {'id': 117, 'name': 'Train'},
495
+ {'id': 118, 'name': 'Pumpkin'},
496
+ {'id': 119, 'name': 'Soccer'},
497
+ {'id': 120, 'name': 'Skiboard'},
498
+ {'id': 121, 'name': 'Luggage'},
499
+ {'id': 122, 'name': 'Nightstand'},
500
+ {'id': 123, 'name': 'Teapot'},
501
+ {'id': 124, 'name': 'Telephone'},
502
+ {'id': 125, 'name': 'Trolley'},
503
+ {'id': 126, 'name': 'Head Phone'},
504
+ {'id': 127, 'name': 'Sports Car'},
505
+ {'id': 128, 'name': 'Stop Sign'},
506
+ {'id': 129, 'name': 'Dessert'},
507
+ {'id': 130, 'name': 'Scooter'},
508
+ {'id': 131, 'name': 'Stroller'},
509
+ {'id': 132, 'name': 'Crane'},
510
+ {'id': 133, 'name': 'Remote'},
511
+ {'id': 134, 'name': 'Refrigerator'},
512
+ {'id': 135, 'name': 'Oven'},
513
+ {'id': 136, 'name': 'Lemon'},
514
+ {'id': 137, 'name': 'Duck'},
515
+ {'id': 138, 'name': 'Baseball Bat'},
516
+ {'id': 139, 'name': 'Surveillance Camera'},
517
+ {'id': 140, 'name': 'Cat'},
518
+ {'id': 141, 'name': 'Jug'},
519
+ {'id': 142, 'name': 'Broccoli'},
520
+ {'id': 143, 'name': 'Piano'},
521
+ {'id': 144, 'name': 'Pizza'},
522
+ {'id': 145, 'name': 'Elephant'},
523
+ {'id': 146, 'name': 'Skateboard'},
524
+ {'id': 147, 'name': 'Surfboard'},
525
+ {'id': 148, 'name': 'Gun'},
526
+ {'id': 149, 'name': 'Skating and Skiing shoes'},
527
+ {'id': 150, 'name': 'Gas stove'},
528
+ {'id': 151, 'name': 'Donut'},
529
+ {'id': 152, 'name': 'Bow Tie'},
530
+ {'id': 153, 'name': 'Carrot'},
531
+ {'id': 154, 'name': 'Toilet'},
532
+ {'id': 155, 'name': 'Kite'},
533
+ {'id': 156, 'name': 'Strawberry'},
534
+ {'id': 157, 'name': 'Other Balls'},
535
+ {'id': 158, 'name': 'Shovel'},
536
+ {'id': 159, 'name': 'Pepper'},
537
+ {'id': 160, 'name': 'Computer Box'},
538
+ {'id': 161, 'name': 'Toilet Paper'},
539
+ {'id': 162, 'name': 'Cleaning Products'},
540
+ {'id': 163, 'name': 'Chopsticks'},
541
+ {'id': 164, 'name': 'Microwave'},
542
+ {'id': 165, 'name': 'Pigeon'},
543
+ {'id': 166, 'name': 'Baseball'},
544
+ {'id': 167, 'name': 'Cutting/chopping Board'},
545
+ {'id': 168, 'name': 'Coffee Table'},
546
+ {'id': 169, 'name': 'Side Table'},
547
+ {'id': 170, 'name': 'Scissors'},
548
+ {'id': 171, 'name': 'Marker'},
549
+ {'id': 172, 'name': 'Pie'},
550
+ {'id': 173, 'name': 'Ladder'},
551
+ {'id': 174, 'name': 'Snowboard'},
552
+ {'id': 175, 'name': 'Cookies'},
553
+ {'id': 176, 'name': 'Radiator'},
554
+ {'id': 177, 'name': 'Fire Hydrant'},
555
+ {'id': 178, 'name': 'Basketball'},
556
+ {'id': 179, 'name': 'Zebra'},
557
+ {'id': 180, 'name': 'Grape'},
558
+ {'id': 181, 'name': 'Giraffe'},
559
+ {'id': 182, 'name': 'Potato'},
560
+ {'id': 183, 'name': 'Sausage'},
561
+ {'id': 184, 'name': 'Tricycle'},
562
+ {'id': 185, 'name': 'Violin'},
563
+ {'id': 186, 'name': 'Egg'},
564
+ {'id': 187, 'name': 'Fire Extinguisher'},
565
+ {'id': 188, 'name': 'Candy'},
566
+ {'id': 189, 'name': 'Fire Truck'},
567
+ {'id': 190, 'name': 'Billards'},
568
+ {'id': 191, 'name': 'Converter'},
569
+ {'id': 192, 'name': 'Bathtub'},
570
+ {'id': 193, 'name': 'Wheelchair'},
571
+ {'id': 194, 'name': 'Golf Club'},
572
+ {'id': 195, 'name': 'Briefcase'},
573
+ {'id': 196, 'name': 'Cucumber'},
574
+ {'id': 197, 'name': 'Cigar/Cigarette '},
575
+ {'id': 198, 'name': 'Paint Brush'},
576
+ {'id': 199, 'name': 'Pear'},
577
+ {'id': 200, 'name': 'Heavy Truck'},
578
+ {'id': 201, 'name': 'Hamburger'},
579
+ {'id': 202, 'name': 'Extractor'},
580
+ {'id': 203, 'name': 'Extension Cord'},
581
+ {'id': 204, 'name': 'Tong'},
582
+ {'id': 205, 'name': 'Tennis Racket'},
583
+ {'id': 206, 'name': 'Folder'},
584
+ {'id': 207, 'name': 'American Football'},
585
+ {'id': 208, 'name': 'earphone'},
586
+ {'id': 209, 'name': 'Mask'},
587
+ {'id': 210, 'name': 'Kettle'},
588
+ {'id': 211, 'name': 'Tennis'},
589
+ {'id': 212, 'name': 'Ship'},
590
+ {'id': 213, 'name': 'Swing'},
591
+ {'id': 214, 'name': 'Coffee Machine'},
592
+ {'id': 215, 'name': 'Slide'},
593
+ {'id': 216, 'name': 'Carriage'},
594
+ {'id': 217, 'name': 'Onion'},
595
+ {'id': 218, 'name': 'Green beans'},
596
+ {'id': 219, 'name': 'Projector'},
597
+ {'id': 220, 'name': 'Frisbee'},
598
+ {'id': 221, 'name': 'Washing Machine/Drying Machine'},
599
+ {'id': 222, 'name': 'Chicken'},
600
+ {'id': 223, 'name': 'Printer'},
601
+ {'id': 224, 'name': 'Watermelon'},
602
+ {'id': 225, 'name': 'Saxophone'},
603
+ {'id': 226, 'name': 'Tissue'},
604
+ {'id': 227, 'name': 'Toothbrush'},
605
+ {'id': 228, 'name': 'Ice cream'},
606
+ {'id': 229, 'name': 'Hot air balloon'},
607
+ {'id': 230, 'name': 'Cello'},
608
+ {'id': 231, 'name': 'French Fries'},
609
+ {'id': 232, 'name': 'Scale'},
610
+ {'id': 233, 'name': 'Trophy'},
611
+ {'id': 234, 'name': 'Cabbage'},
612
+ {'id': 235, 'name': 'Hot dog'},
613
+ {'id': 236, 'name': 'Blender'},
614
+ {'id': 237, 'name': 'Peach'},
615
+ {'id': 238, 'name': 'Rice'},
616
+ {'id': 239, 'name': 'Wallet/Purse'},
617
+ {'id': 240, 'name': 'Volleyball'},
618
+ {'id': 241, 'name': 'Deer'},
619
+ {'id': 242, 'name': 'Goose'},
620
+ {'id': 243, 'name': 'Tape'},
621
+ {'id': 244, 'name': 'Tablet'},
622
+ {'id': 245, 'name': 'Cosmetics'},
623
+ {'id': 246, 'name': 'Trumpet'},
624
+ {'id': 247, 'name': 'Pineapple'},
625
+ {'id': 248, 'name': 'Golf Ball'},
626
+ {'id': 249, 'name': 'Ambulance'},
627
+ {'id': 250, 'name': 'Parking meter'},
628
+ {'id': 251, 'name': 'Mango'},
629
+ {'id': 252, 'name': 'Key'},
630
+ {'id': 253, 'name': 'Hurdle'},
631
+ {'id': 254, 'name': 'Fishing Rod'},
632
+ {'id': 255, 'name': 'Medal'},
633
+ {'id': 256, 'name': 'Flute'},
634
+ {'id': 257, 'name': 'Brush'},
635
+ {'id': 258, 'name': 'Penguin'},
636
+ {'id': 259, 'name': 'Megaphone'},
637
+ {'id': 260, 'name': 'Corn'},
638
+ {'id': 261, 'name': 'Lettuce'},
639
+ {'id': 262, 'name': 'Garlic'},
640
+ {'id': 263, 'name': 'Swan'},
641
+ {'id': 264, 'name': 'Helicopter'},
642
+ {'id': 265, 'name': 'Green Onion'},
643
+ {'id': 266, 'name': 'Sandwich'},
644
+ {'id': 267, 'name': 'Nuts'},
645
+ {'id': 268, 'name': 'Speed Limit Sign'},
646
+ {'id': 269, 'name': 'Induction Cooker'},
647
+ {'id': 270, 'name': 'Broom'},
648
+ {'id': 271, 'name': 'Trombone'},
649
+ {'id': 272, 'name': 'Plum'},
650
+ {'id': 273, 'name': 'Rickshaw'},
651
+ {'id': 274, 'name': 'Goldfish'},
652
+ {'id': 275, 'name': 'Kiwi fruit'},
653
+ {'id': 276, 'name': 'Router/modem'},
654
+ {'id': 277, 'name': 'Poker Card'},
655
+ {'id': 278, 'name': 'Toaster'},
656
+ {'id': 279, 'name': 'Shrimp'},
657
+ {'id': 280, 'name': 'Sushi'},
658
+ {'id': 281, 'name': 'Cheese'},
659
+ {'id': 282, 'name': 'Notepaper'},
660
+ {'id': 283, 'name': 'Cherry'},
661
+ {'id': 284, 'name': 'Pliers'},
662
+ {'id': 285, 'name': 'CD'},
663
+ {'id': 286, 'name': 'Pasta'},
664
+ {'id': 287, 'name': 'Hammer'},
665
+ {'id': 288, 'name': 'Cue'},
666
+ {'id': 289, 'name': 'Avocado'},
667
+ {'id': 290, 'name': 'Hami melon'},
668
+ {'id': 291, 'name': 'Flask'},
669
+ {'id': 292, 'name': 'Mushroom'},
670
+ {'id': 293, 'name': 'Screwdriver'},
671
+ {'id': 294, 'name': 'Soap'},
672
+ {'id': 295, 'name': 'Recorder'},
673
+ {'id': 296, 'name': 'Bear'},
674
+ {'id': 297, 'name': 'Eggplant'},
675
+ {'id': 298, 'name': 'Board Eraser'},
676
+ {'id': 299, 'name': 'Coconut'},
677
+ {'id': 300, 'name': 'Tape Measure/ Ruler'},
678
+ {'id': 301, 'name': 'Pig'},
679
+ {'id': 302, 'name': 'Showerhead'},
680
+ {'id': 303, 'name': 'Globe'},
681
+ {'id': 304, 'name': 'Chips'},
682
+ {'id': 305, 'name': 'Steak'},
683
+ {'id': 306, 'name': 'Crosswalk Sign'},
684
+ {'id': 307, 'name': 'Stapler'},
685
+ {'id': 308, 'name': 'Camel'},
686
+ {'id': 309, 'name': 'Formula 1 '},
687
+ {'id': 310, 'name': 'Pomegranate'},
688
+ {'id': 311, 'name': 'Dishwasher'},
689
+ {'id': 312, 'name': 'Crab'},
690
+ {'id': 313, 'name': 'Hoverboard'},
691
+ {'id': 314, 'name': 'Meatball'},
692
+ {'id': 315, 'name': 'Rice Cooker'},
693
+ {'id': 316, 'name': 'Tuba'},
694
+ {'id': 317, 'name': 'Calculator'},
695
+ {'id': 318, 'name': 'Papaya'},
696
+ {'id': 319, 'name': 'Antelope'},
697
+ {'id': 320, 'name': 'Parrot'},
698
+ {'id': 321, 'name': 'Seal'},
699
+ {'id': 322, 'name': 'Butterfly'},
700
+ {'id': 323, 'name': 'Dumbbell'},
701
+ {'id': 324, 'name': 'Donkey'},
702
+ {'id': 325, 'name': 'Lion'},
703
+ {'id': 326, 'name': 'Urinal'},
704
+ {'id': 327, 'name': 'Dolphin'},
705
+ {'id': 328, 'name': 'Electric Drill'},
706
+ {'id': 329, 'name': 'Hair Dryer'},
707
+ {'id': 330, 'name': 'Egg tart'},
708
+ {'id': 331, 'name': 'Jellyfish'},
709
+ {'id': 332, 'name': 'Treadmill'},
710
+ {'id': 333, 'name': 'Lighter'},
711
+ {'id': 334, 'name': 'Grapefruit'},
712
+ {'id': 335, 'name': 'Game board'},
713
+ {'id': 336, 'name': 'Mop'},
714
+ {'id': 337, 'name': 'Radish'},
715
+ {'id': 338, 'name': 'Baozi'},
716
+ {'id': 339, 'name': 'Target'},
717
+ {'id': 340, 'name': 'French'},
718
+ {'id': 341, 'name': 'Spring Rolls'},
719
+ {'id': 342, 'name': 'Monkey'},
720
+ {'id': 343, 'name': 'Rabbit'},
721
+ {'id': 344, 'name': 'Pencil Case'},
722
+ {'id': 345, 'name': 'Yak'},
723
+ {'id': 346, 'name': 'Red Cabbage'},
724
+ {'id': 347, 'name': 'Binoculars'},
725
+ {'id': 348, 'name': 'Asparagus'},
726
+ {'id': 349, 'name': 'Barbell'},
727
+ {'id': 350, 'name': 'Scallop'},
728
+ {'id': 351, 'name': 'Noddles'},
729
+ {'id': 352, 'name': 'Comb'},
730
+ {'id': 353, 'name': 'Dumpling'},
731
+ {'id': 354, 'name': 'Oyster'},
732
+ {'id': 355, 'name': 'Table Tennis paddle'},
733
+ {'id': 356, 'name': 'Cosmetics Brush/Eyeliner Pencil'},
734
+ {'id': 357, 'name': 'Chainsaw'},
735
+ {'id': 358, 'name': 'Eraser'},
736
+ {'id': 359, 'name': 'Lobster'},
737
+ {'id': 360, 'name': 'Durian'},
738
+ {'id': 361, 'name': 'Okra'},
739
+ {'id': 362, 'name': 'Lipstick'},
740
+ {'id': 363, 'name': 'Cosmetics Mirror'},
741
+ {'id': 364, 'name': 'Curling'},
742
+ {'id': 365, 'name': 'Table Tennis '},
743
+ ]
744
+
745
+
746
+ def _get_builtin_metadata():
747
+ id_to_name = {x['id']: x['name'] for x in categories_v2_fix}
748
+ thing_dataset_id_to_contiguous_id = {
749
+ x['id']: i for i, x in enumerate(
750
+ sorted(categories_v2_fix, key=lambda x: x['id']))}
751
+ thing_classes = [id_to_name[k] for k in sorted(id_to_name)]
752
+ return {
753
+ "thing_dataset_id_to_contiguous_id": thing_dataset_id_to_contiguous_id,
754
+ "thing_classes": thing_classes}
755
+
756
+
757
+ _PREDEFINED_SPLITS_OBJECTS365 = {
758
+ "objects365_v2_train": ("objects365/train", "objects365/annotations/zhiyuan_objv2_train_fixname_fixmiss.json"),
759
+ # 80,000 images, 1,240,587 annotations
760
+ "objects365_v2_val": ("objects365/val", "objects365/annotations/zhiyuan_objv2_val_fixname.json"),
761
+ "objects365_v2_val_rare": ("objects365/val", "objects365/annotations/zhiyuan_objv2_val_fixname_rare.json"),
762
+ }
763
+
764
+ for key, (image_root, json_file) in _PREDEFINED_SPLITS_OBJECTS365.items():
765
+ register_coco_instances(
766
+ key,
767
+ _get_builtin_metadata(),
768
+ os.path.join("datasets", json_file) if "://" not in json_file else json_file,
769
+ os.path.join("datasets", image_root),
770
+ )