JustinLin610 commited on
Commit
1bb90fa
1 Parent(s): 9f38a15
.idea/.gitignore ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ # 默认忽略的文件
2
+ /shelf/
3
+ /workspace.xml
.idea/ImageBind_zeroshot_demo.iml ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <module type="PYTHON_MODULE" version="4">
3
+ <component name="NewModuleRootManager">
4
+ <content url="file://$MODULE_DIR$" />
5
+ <orderEntry type="inheritedJdk" />
6
+ <orderEntry type="sourceFolder" forTests="false" />
7
+ </component>
8
+ <component name="PyDocumentationSettings">
9
+ <option name="format" value="PLAIN" />
10
+ <option name="myDocStringFormat" value="Plain" />
11
+ </component>
12
+ </module>
.idea/inspectionProfiles/Project_Default.xml ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <component name="InspectionProjectProfileManager">
2
+ <profile version="1.0">
3
+ <option name="myName" value="Project Default" />
4
+ <inspection_tool class="PyPackageRequirementsInspection" enabled="true" level="WARNING" enabled_by_default="true">
5
+ <option name="ignoredPackages">
6
+ <value>
7
+ <list size="11">
8
+ <item index="0" class="java.lang.String" itemvalue="iopath" />
9
+ <item index="1" class="java.lang.String" itemvalue="oss2" />
10
+ <item index="2" class="java.lang.String" itemvalue="efficientnet_pytorch" />
11
+ <item index="3" class="java.lang.String" itemvalue="pytorch_lightning" />
12
+ <item index="4" class="java.lang.String" itemvalue="einops" />
13
+ <item index="5" class="java.lang.String" itemvalue="timm" />
14
+ <item index="6" class="java.lang.String" itemvalue="numpy" />
15
+ <item index="7" class="java.lang.String" itemvalue="pycocotools" />
16
+ <item index="8" class="java.lang.String" itemvalue="wandb" />
17
+ <item index="9" class="java.lang.String" itemvalue="ftfy" />
18
+ <item index="10" class="java.lang.String" itemvalue="tensorboardX" />
19
+ </list>
20
+ </value>
21
+ </option>
22
+ </inspection_tool>
23
+ </profile>
24
+ </component>
.idea/inspectionProfiles/profiles_settings.xml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ <component name="InspectionProjectProfileManager">
2
+ <settings>
3
+ <option name="USE_PROJECT_PROFILE" value="false" />
4
+ <version value="1.0" />
5
+ </settings>
6
+ </component>
.idea/misc.xml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <project version="4">
3
+ <component name="ProjectRootManager" version="2" project-jdk-name="Python 3.7 (py37)" project-jdk-type="Python SDK" />
4
+ </project>
.idea/modules.xml ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <project version="4">
3
+ <component name="ProjectModuleManager">
4
+ <modules>
5
+ <module fileurl="file://$PROJECT_DIR$/.idea/ImageBind_zeroshot_demo.iml" filepath="$PROJECT_DIR$/.idea/ImageBind_zeroshot_demo.iml" />
6
+ </modules>
7
+ </component>
8
+ </project>
.idea/vcs.xml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <project version="4">
3
+ <component name="VcsDirectoryMappings">
4
+ <mapping directory="$PROJECT_DIR$" vcs="Git" />
5
+ </component>
6
+ </project>
CODE_OF_CONDUCT.md ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Code of Conduct
2
+
3
+ ## Our Pledge
4
+
5
+ In the interest of fostering an open and welcoming environment, we as
6
+ contributors and maintainers pledge to make participation in our project and
7
+ our community a harassment-free experience for everyone, regardless of age, body
8
+ size, disability, ethnicity, sex characteristics, gender identity and expression,
9
+ level of experience, education, socio-economic status, nationality, personal
10
+ appearance, race, religion, or sexual identity and orientation.
11
+
12
+ ## Our Standards
13
+
14
+ Examples of behavior that contributes to creating a positive environment
15
+ include:
16
+
17
+ * Using welcoming and inclusive language
18
+ * Being respectful of differing viewpoints and experiences
19
+ * Gracefully accepting constructive criticism
20
+ * Focusing on what is best for the community
21
+ * Showing empathy towards other community members
22
+
23
+ Examples of unacceptable behavior by participants include:
24
+
25
+ * The use of sexualized language or imagery and unwelcome sexual attention or
26
+ advances
27
+ * Trolling, insulting/derogatory comments, and personal or political attacks
28
+ * Public or private harassment
29
+ * Publishing others' private information, such as a physical or electronic
30
+ address, without explicit permission
31
+ * Other conduct which could reasonably be considered inappropriate in a
32
+ professional setting
33
+
34
+ ## Our Responsibilities
35
+
36
+ Project maintainers are responsible for clarifying the standards of acceptable
37
+ behavior and are expected to take appropriate and fair corrective action in
38
+ response to any instances of unacceptable behavior.
39
+
40
+ Project maintainers have the right and responsibility to remove, edit, or
41
+ reject comments, commits, code, wiki edits, issues, and other contributions
42
+ that are not aligned to this Code of Conduct, or to ban temporarily or
43
+ permanently any contributor for other behaviors that they deem inappropriate,
44
+ threatening, offensive, or harmful.
45
+
46
+ ## Scope
47
+
48
+ This Code of Conduct applies within all project spaces, and it also applies when
49
+ an individual is representing the project or its community in public spaces.
50
+ Examples of representing a project or community include using an official
51
+ project e-mail address, posting via an official social media account, or acting
52
+ as an appointed representative at an online or offline event. Representation of
53
+ a project may be further defined and clarified by project maintainers.
54
+
55
+ This Code of Conduct also applies outside the project spaces when there is a
56
+ reasonable belief that an individual's behavior may have a negative impact on
57
+ the project or its community.
58
+
59
+ ## Enforcement
60
+
61
+ Instances of abusive, harassing, or otherwise unacceptable behavior may be
62
+ reported by contacting the project team at <opensource-conduct@fb.com>. All
63
+ complaints will be reviewed and investigated and will result in a response that
64
+ is deemed necessary and appropriate to the circumstances. The project team is
65
+ obligated to maintain confidentiality with regard to the reporter of an incident.
66
+ Further details of specific enforcement policies may be posted separately.
67
+
68
+ Project maintainers who do not follow or enforce the Code of Conduct in good
69
+ faith may face temporary or permanent repercussions as determined by other
70
+ members of the project's leadership.
71
+
72
+ ## Attribution
73
+
74
+ This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
75
+ available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html
76
+
77
+ [homepage]: https://www.contributor-covenant.org
78
+
79
+ For answers to common questions about this code of conduct, see
80
+ https://www.contributor-covenant.org/faq
CONTRIBUTING.md ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Contributing to ImageBind
2
+ We want to make contributing to this project as easy and transparent as
3
+ possible.
4
+
5
+ ## Pull Requests
6
+ We actively welcome your pull requests.
7
+
8
+ 1. Fork the repo and create your branch from `main`.
9
+ 2. If you've added code that should be tested, add tests.
10
+ 3. If you've changed APIs, update the documentation.
11
+ 4. Ensure the test suite passes.
12
+ 5. Make sure your code lints.
13
+ 6. If you haven't already, complete the Contributor License Agreement ("CLA").
14
+
15
+ ## Contributor License Agreement ("CLA")
16
+ In order to accept your pull request, we need you to submit a CLA. You only need
17
+ to do this once to work on any of Meta's open source projects.
18
+
19
+ Complete your CLA here: <https://code.facebook.com/cla>
20
+
21
+ ## Issues
22
+ We use GitHub issues to track public bugs. Please ensure your description is
23
+ clear and has sufficient instructions to be able to reproduce the issue.
24
+
25
+ Meta has a [bounty program](https://www.facebook.com/whitehat/) for the safe
26
+ disclosure of security bugs. In those cases, please go through the process
27
+ outlined on that page and do not file a public issue.
28
+
29
+ ## License
30
+ By contributing to Omnivore, you agree that your contributions will be licensed
31
+ under the [LICENSE](LICENSE) file in the root directory of this source tree.
LICENSE ADDED
@@ -0,0 +1,437 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Attribution-NonCommercial-ShareAlike 4.0 International
2
+
3
+ =======================================================================
4
+
5
+ Creative Commons Corporation ("Creative Commons") is not a law firm and
6
+ does not provide legal services or legal advice. Distribution of
7
+ Creative Commons public licenses does not create a lawyer-client or
8
+ other relationship. Creative Commons makes its licenses and related
9
+ information available on an "as-is" basis. Creative Commons gives no
10
+ warranties regarding its licenses, any material licensed under their
11
+ terms and conditions, or any related information. Creative Commons
12
+ disclaims all liability for damages resulting from their use to the
13
+ fullest extent possible.
14
+
15
+ Using Creative Commons Public Licenses
16
+
17
+ Creative Commons public licenses provide a standard set of terms and
18
+ conditions that creators and other rights holders may use to share
19
+ original works of authorship and other material subject to copyright
20
+ and certain other rights specified in the public license below. The
21
+ following considerations are for informational purposes only, are not
22
+ exhaustive, and do not form part of our licenses.
23
+
24
+ Considerations for licensors: Our public licenses are
25
+ intended for use by those authorized to give the public
26
+ permission to use material in ways otherwise restricted by
27
+ copyright and certain other rights. Our licenses are
28
+ irrevocable. Licensors should read and understand the terms
29
+ and conditions of the license they choose before applying it.
30
+ Licensors should also secure all rights necessary before
31
+ applying our licenses so that the public can reuse the
32
+ material as expected. Licensors should clearly mark any
33
+ material not subject to the license. This includes other CC-
34
+ licensed material, or material used under an exception or
35
+ limitation to copyright. More considerations for licensors:
36
+ wiki.creativecommons.org/Considerations_for_licensors
37
+
38
+ Considerations for the public: By using one of our public
39
+ licenses, a licensor grants the public permission to use the
40
+ licensed material under specified terms and conditions. If
41
+ the licensor's permission is not necessary for any reason--for
42
+ example, because of any applicable exception or limitation to
43
+ copyright--then that use is not regulated by the license. Our
44
+ licenses grant only permissions under copyright and certain
45
+ other rights that a licensor has authority to grant. Use of
46
+ the licensed material may still be restricted for other
47
+ reasons, including because others have copyright or other
48
+ rights in the material. A licensor may make special requests,
49
+ such as asking that all changes be marked or described.
50
+ Although not required by our licenses, you are encouraged to
51
+ respect those requests where reasonable. More considerations
52
+ for the public:
53
+ wiki.creativecommons.org/Considerations_for_licensees
54
+
55
+ =======================================================================
56
+
57
+ Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International
58
+ Public License
59
+
60
+ By exercising the Licensed Rights (defined below), You accept and agree
61
+ to be bound by the terms and conditions of this Creative Commons
62
+ Attribution-NonCommercial-ShareAlike 4.0 International Public License
63
+ ("Public License"). To the extent this Public License may be
64
+ interpreted as a contract, You are granted the Licensed Rights in
65
+ consideration of Your acceptance of these terms and conditions, and the
66
+ Licensor grants You such rights in consideration of benefits the
67
+ Licensor receives from making the Licensed Material available under
68
+ these terms and conditions.
69
+
70
+
71
+ Section 1 -- Definitions.
72
+
73
+ a. Adapted Material means material subject to Copyright and Similar
74
+ Rights that is derived from or based upon the Licensed Material
75
+ and in which the Licensed Material is translated, altered,
76
+ arranged, transformed, or otherwise modified in a manner requiring
77
+ permission under the Copyright and Similar Rights held by the
78
+ Licensor. For purposes of this Public License, where the Licensed
79
+ Material is a musical work, performance, or sound recording,
80
+ Adapted Material is always produced where the Licensed Material is
81
+ synched in timed relation with a moving image.
82
+
83
+ b. Adapter's License means the license You apply to Your Copyright
84
+ and Similar Rights in Your contributions to Adapted Material in
85
+ accordance with the terms and conditions of this Public License.
86
+
87
+ c. BY-NC-SA Compatible License means a license listed at
88
+ creativecommons.org/compatiblelicenses, approved by Creative
89
+ Commons as essentially the equivalent of this Public License.
90
+
91
+ d. Copyright and Similar Rights means copyright and/or similar rights
92
+ closely related to copyright including, without limitation,
93
+ performance, broadcast, sound recording, and Sui Generis Database
94
+ Rights, without regard to how the rights are labeled or
95
+ categorized. For purposes of this Public License, the rights
96
+ specified in Section 2(b)(1)-(2) are not Copyright and Similar
97
+ Rights.
98
+
99
+ e. Effective Technological Measures means those measures that, in the
100
+ absence of proper authority, may not be circumvented under laws
101
+ fulfilling obligations under Article 11 of the WIPO Copyright
102
+ Treaty adopted on December 20, 1996, and/or similar international
103
+ agreements.
104
+
105
+ f. Exceptions and Limitations means fair use, fair dealing, and/or
106
+ any other exception or limitation to Copyright and Similar Rights
107
+ that applies to Your use of the Licensed Material.
108
+
109
+ g. License Elements means the license attributes listed in the name
110
+ of a Creative Commons Public License. The License Elements of this
111
+ Public License are Attribution, NonCommercial, and ShareAlike.
112
+
113
+ h. Licensed Material means the artistic or literary work, database,
114
+ or other material to which the Licensor applied this Public
115
+ License.
116
+
117
+ i. Licensed Rights means the rights granted to You subject to the
118
+ terms and conditions of this Public License, which are limited to
119
+ all Copyright and Similar Rights that apply to Your use of the
120
+ Licensed Material and that the Licensor has authority to license.
121
+
122
+ j. Licensor means the individual(s) or entity(ies) granting rights
123
+ under this Public License.
124
+
125
+ k. NonCommercial means not primarily intended for or directed towards
126
+ commercial advantage or monetary compensation. For purposes of
127
+ this Public License, the exchange of the Licensed Material for
128
+ other material subject to Copyright and Similar Rights by digital
129
+ file-sharing or similar means is NonCommercial provided there is
130
+ no payment of monetary compensation in connection with the
131
+ exchange.
132
+
133
+ l. Share means to provide material to the public by any means or
134
+ process that requires permission under the Licensed Rights, such
135
+ as reproduction, public display, public performance, distribution,
136
+ dissemination, communication, or importation, and to make material
137
+ available to the public including in ways that members of the
138
+ public may access the material from a place and at a time
139
+ individually chosen by them.
140
+
141
+ m. Sui Generis Database Rights means rights other than copyright
142
+ resulting from Directive 96/9/EC of the European Parliament and of
143
+ the Council of 11 March 1996 on the legal protection of databases,
144
+ as amended and/or succeeded, as well as other essentially
145
+ equivalent rights anywhere in the world.
146
+
147
+ n. You means the individual or entity exercising the Licensed Rights
148
+ under this Public License. Your has a corresponding meaning.
149
+
150
+
151
+ Section 2 -- Scope.
152
+
153
+ a. License grant.
154
+
155
+ 1. Subject to the terms and conditions of this Public License,
156
+ the Licensor hereby grants You a worldwide, royalty-free,
157
+ non-sublicensable, non-exclusive, irrevocable license to
158
+ exercise the Licensed Rights in the Licensed Material to:
159
+
160
+ a. reproduce and Share the Licensed Material, in whole or
161
+ in part, for NonCommercial purposes only; and
162
+
163
+ b. produce, reproduce, and Share Adapted Material for
164
+ NonCommercial purposes only.
165
+
166
+ 2. Exceptions and Limitations. For the avoidance of doubt, where
167
+ Exceptions and Limitations apply to Your use, this Public
168
+ License does not apply, and You do not need to comply with
169
+ its terms and conditions.
170
+
171
+ 3. Term. The term of this Public License is specified in Section
172
+ 6(a).
173
+
174
+ 4. Media and formats; technical modifications allowed. The
175
+ Licensor authorizes You to exercise the Licensed Rights in
176
+ all media and formats whether now known or hereafter created,
177
+ and to make technical modifications necessary to do so. The
178
+ Licensor waives and/or agrees not to assert any right or
179
+ authority to forbid You from making technical modifications
180
+ necessary to exercise the Licensed Rights, including
181
+ technical modifications necessary to circumvent Effective
182
+ Technological Measures. For purposes of this Public License,
183
+ simply making modifications authorized by this Section 2(a)
184
+ (4) never produces Adapted Material.
185
+
186
+ 5. Downstream recipients.
187
+
188
+ a. Offer from the Licensor -- Licensed Material. Every
189
+ recipient of the Licensed Material automatically
190
+ receives an offer from the Licensor to exercise the
191
+ Licensed Rights under the terms and conditions of this
192
+ Public License.
193
+
194
+ b. Additional offer from the Licensor -- Adapted Material.
195
+ Every recipient of Adapted Material from You
196
+ automatically receives an offer from the Licensor to
197
+ exercise the Licensed Rights in the Adapted Material
198
+ under the conditions of the Adapter's License You apply.
199
+
200
+ c. No downstream restrictions. You may not offer or impose
201
+ any additional or different terms or conditions on, or
202
+ apply any Effective Technological Measures to, the
203
+ Licensed Material if doing so restricts exercise of the
204
+ Licensed Rights by any recipient of the Licensed
205
+ Material.
206
+
207
+ 6. No endorsement. Nothing in this Public License constitutes or
208
+ may be construed as permission to assert or imply that You
209
+ are, or that Your use of the Licensed Material is, connected
210
+ with, or sponsored, endorsed, or granted official status by,
211
+ the Licensor or others designated to receive attribution as
212
+ provided in Section 3(a)(1)(A)(i).
213
+
214
+ b. Other rights.
215
+
216
+ 1. Moral rights, such as the right of integrity, are not
217
+ licensed under this Public License, nor are publicity,
218
+ privacy, and/or other similar personality rights; however, to
219
+ the extent possible, the Licensor waives and/or agrees not to
220
+ assert any such rights held by the Licensor to the limited
221
+ extent necessary to allow You to exercise the Licensed
222
+ Rights, but not otherwise.
223
+
224
+ 2. Patent and trademark rights are not licensed under this
225
+ Public License.
226
+
227
+ 3. To the extent possible, the Licensor waives any right to
228
+ collect royalties from You for the exercise of the Licensed
229
+ Rights, whether directly or through a collecting society
230
+ under any voluntary or waivable statutory or compulsory
231
+ licensing scheme. In all other cases the Licensor expressly
232
+ reserves any right to collect such royalties, including when
233
+ the Licensed Material is used other than for NonCommercial
234
+ purposes.
235
+
236
+
237
+ Section 3 -- License Conditions.
238
+
239
+ Your exercise of the Licensed Rights is expressly made subject to the
240
+ following conditions.
241
+
242
+ a. Attribution.
243
+
244
+ 1. If You Share the Licensed Material (including in modified
245
+ form), You must:
246
+
247
+ a. retain the following if it is supplied by the Licensor
248
+ with the Licensed Material:
249
+
250
+ i. identification of the creator(s) of the Licensed
251
+ Material and any others designated to receive
252
+ attribution, in any reasonable manner requested by
253
+ the Licensor (including by pseudonym if
254
+ designated);
255
+
256
+ ii. a copyright notice;
257
+
258
+ iii. a notice that refers to this Public License;
259
+
260
+ iv. a notice that refers to the disclaimer of
261
+ warranties;
262
+
263
+ v. a URI or hyperlink to the Licensed Material to the
264
+ extent reasonably practicable;
265
+
266
+ b. indicate if You modified the Licensed Material and
267
+ retain an indication of any previous modifications; and
268
+
269
+ c. indicate the Licensed Material is licensed under this
270
+ Public License, and include the text of, or the URI or
271
+ hyperlink to, this Public License.
272
+
273
+ 2. You may satisfy the conditions in Section 3(a)(1) in any
274
+ reasonable manner based on the medium, means, and context in
275
+ which You Share the Licensed Material. For example, it may be
276
+ reasonable to satisfy the conditions by providing a URI or
277
+ hyperlink to a resource that includes the required
278
+ information.
279
+ 3. If requested by the Licensor, You must remove any of the
280
+ information required by Section 3(a)(1)(A) to the extent
281
+ reasonably practicable.
282
+
283
+ b. ShareAlike.
284
+
285
+ In addition to the conditions in Section 3(a), if You Share
286
+ Adapted Material You produce, the following conditions also apply.
287
+
288
+ 1. The Adapter's License You apply must be a Creative Commons
289
+ license with the same License Elements, this version or
290
+ later, or a BY-NC-SA Compatible License.
291
+
292
+ 2. You must include the text of, or the URI or hyperlink to, the
293
+ Adapter's License You apply. You may satisfy this condition
294
+ in any reasonable manner based on the medium, means, and
295
+ context in which You Share Adapted Material.
296
+
297
+ 3. You may not offer or impose any additional or different terms
298
+ or conditions on, or apply any Effective Technological
299
+ Measures to, Adapted Material that restrict exercise of the
300
+ rights granted under the Adapter's License You apply.
301
+
302
+
303
+ Section 4 -- Sui Generis Database Rights.
304
+
305
+ Where the Licensed Rights include Sui Generis Database Rights that
306
+ apply to Your use of the Licensed Material:
307
+
308
+ a. for the avoidance of doubt, Section 2(a)(1) grants You the right
309
+ to extract, reuse, reproduce, and Share all or a substantial
310
+ portion of the contents of the database for NonCommercial purposes
311
+ only;
312
+
313
+ b. if You include all or a substantial portion of the database
314
+ contents in a database in which You have Sui Generis Database
315
+ Rights, then the database in which You have Sui Generis Database
316
+ Rights (but not its individual contents) is Adapted Material,
317
+ including for purposes of Section 3(b); and
318
+
319
+ c. You must comply with the conditions in Section 3(a) if You Share
320
+ all or a substantial portion of the contents of the database.
321
+
322
+ For the avoidance of doubt, this Section 4 supplements and does not
323
+ replace Your obligations under this Public License where the Licensed
324
+ Rights include other Copyright and Similar Rights.
325
+
326
+
327
+ Section 5 -- Disclaimer of Warranties and Limitation of Liability.
328
+
329
+ a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE
330
+ EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS
331
+ AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF
332
+ ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS,
333
+ IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION,
334
+ WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR
335
+ PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS,
336
+ ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT
337
+ KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT
338
+ ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU.
339
+
340
+ b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE
341
+ TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION,
342
+ NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT,
343
+ INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES,
344
+ COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR
345
+ USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN
346
+ ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR
347
+ DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR
348
+ IN PART, THIS LIMITATION MAY NOT APPLY TO YOU.
349
+
350
+ c. The disclaimer of warranties and limitation of liability provided
351
+ above shall be interpreted in a manner that, to the extent
352
+ possible, most closely approximates an absolute disclaimer and
353
+ waiver of all liability.
354
+
355
+
356
+ Section 6 -- Term and Termination.
357
+
358
+ a. This Public License applies for the term of the Copyright and
359
+ Similar Rights licensed here. However, if You fail to comply with
360
+ this Public License, then Your rights under this Public License
361
+ terminate automatically.
362
+
363
+ b. Where Your right to use the Licensed Material has terminated under
364
+ Section 6(a), it reinstates:
365
+
366
+ 1. automatically as of the date the violation is cured, provided
367
+ it is cured within 30 days of Your discovery of the
368
+ violation; or
369
+
370
+ 2. upon express reinstatement by the Licensor.
371
+
372
+ For the avoidance of doubt, this Section 6(b) does not affect any
373
+ right the Licensor may have to seek remedies for Your violations
374
+ of this Public License.
375
+
376
+ c. For the avoidance of doubt, the Licensor may also offer the
377
+ Licensed Material under separate terms or conditions or stop
378
+ distributing the Licensed Material at any time; however, doing so
379
+ will not terminate this Public License.
380
+
381
+ d. Sections 1, 5, 6, 7, and 8 survive termination of this Public
382
+ License.
383
+
384
+
385
+ Section 7 -- Other Terms and Conditions.
386
+
387
+ a. The Licensor shall not be bound by any additional or different
388
+ terms or conditions communicated by You unless expressly agreed.
389
+
390
+ b. Any arrangements, understandings, or agreements regarding the
391
+ Licensed Material not stated herein are separate from and
392
+ independent of the terms and conditions of this Public License.
393
+
394
+
395
+ Section 8 -- Interpretation.
396
+
397
+ a. For the avoidance of doubt, this Public License does not, and
398
+ shall not be interpreted to, reduce, limit, restrict, or impose
399
+ conditions on any use of the Licensed Material that could lawfully
400
+ be made without permission under this Public License.
401
+
402
+ b. To the extent possible, if any provision of this Public License is
403
+ deemed unenforceable, it shall be automatically reformed to the
404
+ minimum extent necessary to make it enforceable. If the provision
405
+ cannot be reformed, it shall be severed from this Public License
406
+ without affecting the enforceability of the remaining terms and
407
+ conditions.
408
+
409
+ c. No term or condition of this Public License will be waived and no
410
+ failure to comply consented to unless expressly agreed to by the
411
+ Licensor.
412
+
413
+ d. Nothing in this Public License constitutes or may be interpreted
414
+ as a limitation upon, or waiver of, any privileges and immunities
415
+ that apply to the Licensor or You, including from the legal
416
+ processes of any jurisdiction or authority.
417
+
418
+ =======================================================================
419
+
420
+ Creative Commons is not a party to its public
421
+ licenses. Notwithstanding, Creative Commons may elect to apply one of
422
+ its public licenses to material it publishes and in those instances
423
+ will be considered the “Licensor.” The text of the Creative Commons
424
+ public licenses is dedicated to the public domain under the CC0 Public
425
+ Domain Dedication. Except for the limited purpose of indicating that
426
+ material is shared under a Creative Commons public license or as
427
+ otherwise permitted by the Creative Commons policies published at
428
+ creativecommons.org/policies, Creative Commons does not authorize the
429
+ use of the trademark "Creative Commons" or any other trademark or logo
430
+ of Creative Commons without its prior written consent including,
431
+ without limitation, in connection with any unauthorized modifications
432
+ to any of its public licenses or any other arrangements,
433
+ understandings, or agreements concerning use of licensed material. For
434
+ the avoidance of doubt, this paragraph does not form part of the
435
+ public licenses.
436
+
437
+ Creative Commons may be contacted at creativecommons.org.
app.py CHANGED
@@ -1,4 +1,4 @@
1
- # import data
2
  import torch
3
  import gradio as gr
4
  from models import imagebind_model
 
1
+ import data
2
  import torch
3
  import gradio as gr
4
  from models import imagebind_model
bpe/bpe_simple_vocab_16e6.txt.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:924691ac288e54409236115652ad4aa250f48203de50a9e4722a6ecd48d6804a
3
+ size 1356917
data.py ADDED
@@ -0,0 +1,350 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ # Portions Copyright (c) Meta Platforms, Inc. and affiliates.
3
+ # All rights reserved.
4
+
5
+ # This source code is licensed under the license found in the
6
+ # LICENSE file in the root directory of this source tree.
7
+
8
+ import math
9
+
10
+ import torch
11
+ import torch.nn as nn
12
+ import torchaudio
13
+ import logging
14
+
15
+ from models.multimodal_preprocessors import SimpleTokenizer
16
+ from PIL import Image
17
+ from pytorchvideo import transforms as pv_transforms
18
+ from pytorchvideo.data.clip_sampling import ConstantClipsPerVideoSampler
19
+ from pytorchvideo.data.encoded_video import EncodedVideo
20
+
21
+ from torchvision import transforms
22
+ from torchvision.transforms._transforms_video import NormalizeVideo
23
+
24
+ DEFAULT_AUDIO_FRAME_SHIFT_MS = 10 # in milliseconds
25
+
26
+ BPE_PATH = "bpe/bpe_simple_vocab_16e6.txt.gz"
27
+
28
+
29
+ def waveform2melspec(waveform, sample_rate, num_mel_bins, target_length):
30
+ # Based on https://github.com/YuanGongND/ast/blob/d7d8b4b8e06cdaeb6c843cdb38794c1c7692234c/src/dataloader.py#L102
31
+ waveform -= waveform.mean()
32
+ fbank = torchaudio.compliance.kaldi.fbank(
33
+ waveform,
34
+ htk_compat=True,
35
+ sample_frequency=sample_rate,
36
+ use_energy=False,
37
+ window_type="hanning",
38
+ num_mel_bins=num_mel_bins,
39
+ dither=0.0,
40
+ frame_length=25,
41
+ frame_shift=DEFAULT_AUDIO_FRAME_SHIFT_MS,
42
+ )
43
+ # Convert to [mel_bins, num_frames] shape
44
+ fbank = fbank.transpose(0, 1)
45
+ # Pad to target_length
46
+ n_frames = fbank.size(1)
47
+ p = target_length - n_frames
48
+ # if p is too large (say >20%), flash a warning
49
+ if abs(p) / n_frames > 0.2:
50
+ logging.warning(
51
+ "Large gap between audio n_frames(%d) and "
52
+ "target_length (%d). Is the audio_target_length "
53
+ "setting correct?",
54
+ n_frames,
55
+ target_length,
56
+ )
57
+ # cut and pad
58
+ if p > 0:
59
+ fbank = torch.nn.functional.pad(fbank, (0, p), mode="constant", value=0)
60
+ elif p < 0:
61
+ fbank = fbank[:, 0:target_length]
62
+ # Convert to [1, mel_bins, num_frames] shape, essentially like a 1
63
+ # channel image
64
+ fbank = fbank.unsqueeze(0)
65
+ return fbank
66
+
67
+
68
+ def get_clip_timepoints(clip_sampler, duration):
69
+ # Read out all clips in this video
70
+ all_clips_timepoints = []
71
+ is_last_clip = False
72
+ end = 0.0
73
+ while not is_last_clip:
74
+ start, end, _, _, is_last_clip = clip_sampler(end, duration, annotation=None)
75
+ all_clips_timepoints.append((start, end))
76
+ return all_clips_timepoints
77
+
78
+
79
+ def load_and_transform_vision_data(image_paths, device):
80
+ if image_paths is None:
81
+ return None
82
+
83
+ image_ouputs = []
84
+ for image_path in image_paths:
85
+ data_transform = transforms.Compose(
86
+ [
87
+ transforms.Resize(
88
+ 224, interpolation=transforms.InterpolationMode.BICUBIC
89
+ ),
90
+ transforms.CenterCrop(224),
91
+ transforms.ToTensor(),
92
+ transforms.Normalize(
93
+ mean=(0.48145466, 0.4578275, 0.40821073),
94
+ std=(0.26862954, 0.26130258, 0.27577711),
95
+ ),
96
+ ]
97
+ )
98
+ with open(image_path, "rb") as fopen:
99
+ image = Image.open(fopen).convert("RGB")
100
+
101
+ image = data_transform(image).to(device)
102
+ image_ouputs.append(image)
103
+ return torch.stack(image_ouputs, dim=0)
104
+
105
+
106
+ def load_and_transform_text(text, device):
107
+ if text is None:
108
+ return None
109
+ tokenizer = SimpleTokenizer(bpe_path=BPE_PATH)
110
+ tokens = [tokenizer(t).unsqueeze(0).to(device) for t in text]
111
+ tokens = torch.cat(tokens, dim=0)
112
+ return tokens
113
+
114
+
115
+ def load_and_transform_audio_data(
116
+ audio_paths,
117
+ device,
118
+ num_mel_bins=128,
119
+ target_length=204,
120
+ sample_rate=16000,
121
+ clip_duration=2,
122
+ clips_per_video=3,
123
+ mean=-4.268,
124
+ std=9.138,
125
+ ):
126
+ if audio_paths is None:
127
+ return None
128
+
129
+ audio_outputs = []
130
+ clip_sampler = ConstantClipsPerVideoSampler(
131
+ clip_duration=clip_duration, clips_per_video=clips_per_video
132
+ )
133
+
134
+ for audio_path in audio_paths:
135
+ waveform, sr = torchaudio.load(audio_path)
136
+ if sample_rate != sr:
137
+ waveform = torchaudio.functional.resample(
138
+ waveform, orig_freq=sr, new_freq=sample_rate
139
+ )
140
+ all_clips_timepoints = get_clip_timepoints(
141
+ clip_sampler, waveform.size(1) / sample_rate
142
+ )
143
+ all_clips = []
144
+ for clip_timepoints in all_clips_timepoints:
145
+ waveform_clip = waveform[
146
+ :,
147
+ int(clip_timepoints[0] * sample_rate) : int(
148
+ clip_timepoints[1] * sample_rate
149
+ ),
150
+ ]
151
+ waveform_melspec = waveform2melspec(
152
+ waveform_clip, sample_rate, num_mel_bins, target_length
153
+ )
154
+ all_clips.append(waveform_melspec)
155
+
156
+ normalize = transforms.Normalize(mean=mean, std=std)
157
+ all_clips = [normalize(ac).to(device) for ac in all_clips]
158
+
159
+ all_clips = torch.stack(all_clips, dim=0)
160
+ audio_outputs.append(all_clips)
161
+
162
+ return torch.stack(audio_outputs, dim=0)
163
+
164
+
165
+ def get_clip_timepoints(clip_sampler, duration):
166
+ # Read out all clips in this video
167
+ all_clips_timepoints = []
168
+ is_last_clip = False
169
+ end = 0.0
170
+ while not is_last_clip:
171
+ start, end, _, _, is_last_clip = clip_sampler(end, duration, annotation=None)
172
+ all_clips_timepoints.append((start, end))
173
+ return all_clips_timepoints
174
+
175
+
176
+ def crop_boxes(boxes, x_offset, y_offset):
177
+ """
178
+ Peform crop on the bounding boxes given the offsets.
179
+ Args:
180
+ boxes (ndarray or None): bounding boxes to peform crop. The dimension
181
+ is `num boxes` x 4.
182
+ x_offset (int): cropping offset in the x axis.
183
+ y_offset (int): cropping offset in the y axis.
184
+ Returns:
185
+ cropped_boxes (ndarray or None): the cropped boxes with dimension of
186
+ `num boxes` x 4.
187
+ """
188
+ cropped_boxes = boxes.copy()
189
+ cropped_boxes[:, [0, 2]] = boxes[:, [0, 2]] - x_offset
190
+ cropped_boxes[:, [1, 3]] = boxes[:, [1, 3]] - y_offset
191
+
192
+ return cropped_boxes
193
+
194
+
195
+ def uniform_crop(images, size, spatial_idx, boxes=None, scale_size=None):
196
+ """
197
+ Perform uniform spatial sampling on the images and corresponding boxes.
198
+ Args:
199
+ images (tensor): images to perform uniform crop. The dimension is
200
+ `num frames` x `channel` x `height` x `width`.
201
+ size (int): size of height and weight to crop the images.
202
+ spatial_idx (int): 0, 1, or 2 for left, center, and right crop if width
203
+ is larger than height. Or 0, 1, or 2 for top, center, and bottom
204
+ crop if height is larger than width.
205
+ boxes (ndarray or None): optional. Corresponding boxes to images.
206
+ Dimension is `num boxes` x 4.
207
+ scale_size (int): optinal. If not None, resize the images to scale_size before
208
+ performing any crop.
209
+ Returns:
210
+ cropped (tensor): images with dimension of
211
+ `num frames` x `channel` x `size` x `size`.
212
+ cropped_boxes (ndarray or None): the cropped boxes with dimension of
213
+ `num boxes` x 4.
214
+ """
215
+ assert spatial_idx in [0, 1, 2]
216
+ ndim = len(images.shape)
217
+ if ndim == 3:
218
+ images = images.unsqueeze(0)
219
+ height = images.shape[2]
220
+ width = images.shape[3]
221
+
222
+ if scale_size is not None:
223
+ if width <= height:
224
+ width, height = scale_size, int(height / width * scale_size)
225
+ else:
226
+ width, height = int(width / height * scale_size), scale_size
227
+ images = torch.nn.functional.interpolate(
228
+ images,
229
+ size=(height, width),
230
+ mode="bilinear",
231
+ align_corners=False,
232
+ )
233
+
234
+ y_offset = int(math.ceil((height - size) / 2))
235
+ x_offset = int(math.ceil((width - size) / 2))
236
+
237
+ if height > width:
238
+ if spatial_idx == 0:
239
+ y_offset = 0
240
+ elif spatial_idx == 2:
241
+ y_offset = height - size
242
+ else:
243
+ if spatial_idx == 0:
244
+ x_offset = 0
245
+ elif spatial_idx == 2:
246
+ x_offset = width - size
247
+ cropped = images[:, :, y_offset : y_offset + size, x_offset : x_offset + size]
248
+ cropped_boxes = crop_boxes(boxes, x_offset, y_offset) if boxes is not None else None
249
+ if ndim == 3:
250
+ cropped = cropped.squeeze(0)
251
+ return cropped, cropped_boxes
252
+
253
+
254
+ class SpatialCrop(nn.Module):
255
+ """
256
+ Convert the video into 3 smaller clips spatially. Must be used after the
257
+ temporal crops to get spatial crops, and should be used with
258
+ -2 in the spatial crop at the slowfast augmentation stage (so full
259
+ frames are passed in here). Will return a larger list with the
260
+ 3x spatial crops as well.
261
+ """
262
+
263
+ def __init__(self, crop_size: int = 224, num_crops: int = 3):
264
+ super().__init__()
265
+ self.crop_size = crop_size
266
+ if num_crops == 3:
267
+ self.crops_to_ext = [0, 1, 2]
268
+ self.flipped_crops_to_ext = []
269
+ elif num_crops == 1:
270
+ self.crops_to_ext = [1]
271
+ self.flipped_crops_to_ext = []
272
+ else:
273
+ raise NotImplementedError("Nothing else supported yet")
274
+
275
+ def forward(self, videos):
276
+ """
277
+ Args:
278
+ videos: A list of C, T, H, W videos.
279
+ Returns:
280
+ videos: A list with 3x the number of elements. Each video converted
281
+ to C, T, H', W' by spatial cropping.
282
+ """
283
+ assert isinstance(videos, list), "Must be a list of videos after temporal crops"
284
+ assert all([video.ndim == 4 for video in videos]), "Must be (C,T,H,W)"
285
+ res = []
286
+ for video in videos:
287
+ for spatial_idx in self.crops_to_ext:
288
+ res.append(uniform_crop(video, self.crop_size, spatial_idx)[0])
289
+ if not self.flipped_crops_to_ext:
290
+ continue
291
+ flipped_video = transforms.functional.hflip(video)
292
+ for spatial_idx in self.flipped_crops_to_ext:
293
+ res.append(uniform_crop(flipped_video, self.crop_size, spatial_idx)[0])
294
+ return res
295
+
296
+
297
+ def load_and_transform_video_data(
298
+ video_paths,
299
+ device,
300
+ clip_duration=2,
301
+ clips_per_video=5,
302
+ sample_rate=16000,
303
+ ):
304
+ if video_paths is None:
305
+ return None
306
+
307
+ video_outputs = []
308
+ video_transform = transforms.Compose(
309
+ [
310
+ pv_transforms.ShortSideScale(224),
311
+ NormalizeVideo(
312
+ mean=(0.48145466, 0.4578275, 0.40821073),
313
+ std=(0.26862954, 0.26130258, 0.27577711),
314
+ ),
315
+ ]
316
+ )
317
+
318
+ clip_sampler = ConstantClipsPerVideoSampler(
319
+ clip_duration=clip_duration, clips_per_video=clips_per_video
320
+ )
321
+ frame_sampler = pv_transforms.UniformTemporalSubsample(num_samples=clip_duration)
322
+
323
+ for video_path in video_paths:
324
+ video = EncodedVideo.from_path(
325
+ video_path,
326
+ decoder="decord",
327
+ decode_audio=False,
328
+ **{"sample_rate": sample_rate},
329
+ )
330
+
331
+ all_clips_timepoints = get_clip_timepoints(clip_sampler, video.duration)
332
+
333
+ all_video = []
334
+ for clip_timepoints in all_clips_timepoints:
335
+ # Read the clip, get frames
336
+ clip = video.get_clip(clip_timepoints[0], clip_timepoints[1])
337
+ if clip is None:
338
+ raise ValueError("No clip found")
339
+ video_clip = frame_sampler(clip["video"])
340
+ video_clip = video_clip / 255.0 # since this is float, need 0-1
341
+
342
+ all_video.append(video_clip)
343
+
344
+ all_video = [video_transform(clip) for clip in all_video]
345
+ all_video = SpatialCrop(224, num_crops=3)(all_video)
346
+
347
+ all_video = torch.stack(all_video, dim=0)
348
+ video_outputs.append(all_video)
349
+
350
+ return torch.stack(video_outputs, dim=0).to(device)
model_card.md ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Model Card for ImageBind
2
+
3
+ Multimodal joint embedding model for image/video, text, audio, depth, IMU, and thermal images.
4
+ Input any of the six modalities and get the same sized embedding that can be used for cross-modal and multimodal tasks.
5
+
6
+ # Model Details
7
+
8
+ ## Model Description
9
+
10
+ <!-- Provide a longer summary of what this model is/does. -->
11
+ Multimodal joint embedding model for image/video, text, audio, depth, IMU, and thermal images
12
+
13
+ - **Developed by:** Meta AI
14
+ - **Model type:** Multimodal model
15
+ - **Language(s) (NLP):** en
16
+ - **License:** CC BY-NC-SA 4.0
17
+ - **Resources for more information:**
18
+ - [GitHub Repo](https://github.com/facebookresearch/ImageBind)
19
+
20
+
21
+ # Uses
22
+
23
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
24
+ This model is intended only for research purposes. It provides a joint embedding space for different modalities -- image/video, text, audio, depth, IMU and thermal images.
25
+ We hope that these joint embeddings can be used for a variety of different cross-modal research, e.g., cross-modal retrieval and combining embeddings from different modalities.
26
+
27
+ ## Out-of-Scope Use
28
+
29
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
30
+ <!-- If the user enters content, print that. If not, but they enter a task in the list, use that. If neither, say "more info needed." -->
31
+
32
+ This model is *NOT* intended to be used in any real world application -- commercial or otherwise.
33
+ It may produce harmful associations with different inputs.
34
+ The model needs to be investigated and likely re-trained on specific data for any such application.
35
+ The model is expected to work better on web-based visual data since it was trained on such data.
36
+ The text encoder is likely to work only on English language text because of the underlying training datasets.
37
+
38
+ # Bias, Risks, and Limitations
39
+
40
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
41
+ Open-domain joint embedding models are prone to producing specific biases, e.g., study from [CLIP](https://github.com/openai/CLIP/blob/main/model-card.md#bias-and-fairness).
42
+ Since our model uses such models as initialization, it will exhibit such biases too.
43
+ Moreover, for learning joint embeddings for other modalities such as audio, thermal, depth, and IMU we leverage datasets that are relatively small. These joint embeddings are thus limited to the concepts present in the datasets. For example, the thermal datasets we used are limited to outdoor street scenes, while the depth datasets are limited to indoor scenes.
44
+
45
+
46
+
47
+ # Training Details
48
+
49
+ ## Training Data
50
+
51
+ <!-- This should link to a Data Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
52
+
53
+ ImageBind uses image-paired data for training -- (image, X) where X is one of text, audio, depth, IMU or thermal data.
54
+ In particular, we initialize and freeze the image and text encoders using an OpenCLIP ViT-H encoder.
55
+ We train audio embeddings using Audioset, depth embeddings using the SUN RGB-D dataset, IMU using the Ego4D dataset and thermal embeddings using the LLVIP dataset.
56
+ We provide the exact training data details in the paper.
57
+
58
+
59
+ ## Training Procedure
60
+
61
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
62
+ Please refer to the research paper and github repo for exact details on this.
63
+
64
+ # Evaluation
65
+
66
+ ## Testing Data, Factors & Metrics
67
+
68
+ We evaluate the model on a variety of different classification benchmarks for each modality.
69
+ The evaluation details are presented in the paper.
70
+ The models performance is measured using standard classification metrics such as accuracy and mAP.
71
+
72
+ # Citation
73
+
74
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
75
+
76
+ **BibTeX:**
77
+ ```
78
+ @inproceedings{girdhar2023imagebind,
79
+ title={ImageBind: One Embedding Space To Bind Them All},
80
+ author={Girdhar, Rohit and El-Nouby, Alaaeldin and Liu, Zhuang
81
+ and Singh, Mannat and Alwala, Kalyan Vasudev and Joulin, Armand and Misra, Ishan},
82
+ booktitle={CVPR},
83
+ year={2023}
84
+ }
85
+ ```
86
+
87
+
88
+ # Model Card Contact
89
+
90
+ Please reach out to the authors at: rgirdhar@meta.com imisra@meta.com alaaelnouby@gmail.com
91
+
92
+ # How to Get Started with the Model
93
+
94
+ Our github repo provides a simple example to extract embeddings from images, audio etc.
models/__init__.py ADDED
File without changes
models/helpers.py ADDED
@@ -0,0 +1,141 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ # Portions Copyright (c) Meta Platforms, Inc. and affiliates.
3
+ # All rights reserved.
4
+
5
+ # This source code is licensed under the license found in the
6
+ # LICENSE file in the root directory of this source tree.
7
+
8
+ import math
9
+
10
+ import einops
11
+ import numpy as np
12
+ import torch
13
+
14
+ import torch.nn as nn
15
+
16
+
17
+ class Normalize(nn.Module):
18
+ def __init__(self, dim: int) -> None:
19
+ super().__init__()
20
+ self.dim = dim
21
+
22
+ def forward(self, x):
23
+ return torch.nn.functional.normalize(x, dim=self.dim, p=2)
24
+
25
+
26
+ class LearnableLogitScaling(nn.Module):
27
+ def __init__(
28
+ self,
29
+ logit_scale_init: float = 1 / 0.07,
30
+ learnable: bool = True,
31
+ max_logit_scale: float = 100,
32
+ ) -> None:
33
+ super().__init__()
34
+ self.max_logit_scale = max_logit_scale
35
+ self.logit_scale_init = logit_scale_init
36
+ self.learnable = learnable
37
+ log_logit_scale = torch.ones([]) * np.log(self.logit_scale_init)
38
+ if learnable:
39
+ self.log_logit_scale = nn.Parameter(log_logit_scale)
40
+ else:
41
+ self.register_buffer("log_logit_scale", log_logit_scale)
42
+
43
+ def forward(self, x):
44
+ return torch.clip(self.log_logit_scale.exp(), max=self.max_logit_scale) * x
45
+
46
+ def extra_repr(self):
47
+ st = f"logit_scale_init={self.logit_scale_init},learnable={self.learnable}, max_logit_scale={self.max_logit_scale}"
48
+ return st
49
+
50
+
51
+ class EinOpsRearrange(nn.Module):
52
+ def __init__(self, rearrange_expr: str, **kwargs) -> None:
53
+ super().__init__()
54
+ self.rearrange_expr = rearrange_expr
55
+ self.kwargs = kwargs
56
+
57
+ def forward(self, x):
58
+ assert isinstance(x, torch.Tensor)
59
+ return einops.rearrange(x, self.rearrange_expr, **self.kwargs)
60
+
61
+
62
+ class VerboseNNModule(nn.Module):
63
+ """
64
+ Wrapper around nn.Module that prints registered buffers and parameter names.
65
+ """
66
+
67
+ @staticmethod
68
+ def get_readable_tensor_repr(name: str, tensor: torch.Tensor) -> str:
69
+ st = (
70
+ "("
71
+ + name
72
+ + "): "
73
+ + "tensor("
74
+ + str(tuple(tensor[1].shape))
75
+ + ", requires_grad="
76
+ + str(tensor[1].requires_grad)
77
+ + ")\n"
78
+ )
79
+ return st
80
+
81
+ def extra_repr(self) -> str:
82
+ named_modules = set()
83
+ for p in self.named_modules():
84
+ named_modules.update([p[0]])
85
+ named_modules = list(named_modules)
86
+
87
+ string_repr = ""
88
+ for p in self.named_parameters():
89
+ name = p[0].split(".")[0]
90
+ if name not in named_modules:
91
+ string_repr += self.get_readable_tensor_repr(name, p)
92
+
93
+ for p in self.named_buffers():
94
+ name = p[0].split(".")[0]
95
+ string_repr += self.get_readable_tensor_repr(name, p)
96
+
97
+ return string_repr
98
+
99
+
100
+ def cast_if_src_dtype(
101
+ tensor: torch.Tensor, src_dtype: torch.dtype, tgt_dtype: torch.dtype
102
+ ):
103
+ updated = False
104
+ if tensor.dtype == src_dtype:
105
+ tensor = tensor.to(dtype=tgt_dtype)
106
+ updated = True
107
+ return tensor, updated
108
+
109
+
110
+ class QuickGELU(nn.Module):
111
+ # From https://github.com/openai/CLIP/blob/d50d76daa670286dd6cacf3bcd80b5e4823fc8e1/clip/model.py#L166
112
+ def forward(self, x: torch.Tensor):
113
+ return x * torch.sigmoid(1.702 * x)
114
+
115
+
116
+ class SelectElement(nn.Module):
117
+ def __init__(self, index) -> None:
118
+ super().__init__()
119
+ self.index = index
120
+
121
+ def forward(self, x):
122
+ assert x.ndim >= 3
123
+ return x[:, self.index, ...]
124
+
125
+
126
+ class SelectEOSAndProject(nn.Module):
127
+ """
128
+ Text Pooling used in OpenCLIP
129
+ """
130
+
131
+ def __init__(self, proj: nn.Module) -> None:
132
+ super().__init__()
133
+ self.proj = proj
134
+
135
+ def forward(self, x, seq_len):
136
+ assert x.ndim == 3
137
+ # x is of shape B x L x D
138
+ # take features from the eot embedding (eot_token is the highest number in each sequence)
139
+ x = x[torch.arange(x.shape[0]), seq_len]
140
+ x = self.proj(x)
141
+ return x
models/imagebind_model.py ADDED
@@ -0,0 +1,517 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ # Portions Copyright (c) Meta Platforms, Inc. and affiliates.
3
+ # All rights reserved.
4
+
5
+ # This source code is licensed under the license found in the
6
+ # LICENSE file in the root directory of this source tree.
7
+
8
+
9
+ import os
10
+ import urllib
11
+ from functools import partial
12
+ from types import SimpleNamespace
13
+
14
+ import torch
15
+ import torch.nn as nn
16
+
17
+ from models.helpers import (
18
+ EinOpsRearrange,
19
+ LearnableLogitScaling,
20
+ Normalize,
21
+ SelectElement,
22
+ SelectEOSAndProject,
23
+ )
24
+ from models.multimodal_preprocessors import (
25
+ AudioPreprocessor,
26
+ IMUPreprocessor,
27
+ PadIm2Video,
28
+ PatchEmbedGeneric,
29
+ RGBDTPreprocessor,
30
+ SpatioTemporalPosEmbeddingHelper,
31
+ TextPreprocessor,
32
+ ThermalPreprocessor,
33
+ )
34
+
35
+ from models.transformer import MultiheadAttention, SimpleTransformer
36
+
37
+
38
+ ModalityType = SimpleNamespace(
39
+ VISION="vision",
40
+ TEXT="text",
41
+ AUDIO="audio",
42
+ THERMAL="thermal",
43
+ DEPTH="depth",
44
+ IMU="imu",
45
+ )
46
+
47
+
48
+ class ImageBindModel(nn.Module):
49
+ def __init__(
50
+ self,
51
+ video_frames=2,
52
+ kernel_size=(2, 14, 14),
53
+ audio_kernel_size=16,
54
+ audio_stride=10,
55
+ out_embed_dim=768,
56
+ vision_embed_dim=1024,
57
+ vision_num_blocks=24,
58
+ vision_num_heads=16,
59
+ audio_embed_dim=768,
60
+ audio_num_blocks=12,
61
+ audio_num_heads=12,
62
+ audio_num_mel_bins=128,
63
+ audio_target_len=204,
64
+ audio_drop_path=0.1,
65
+ text_embed_dim=768,
66
+ text_num_blocks=12,
67
+ text_num_heads=12,
68
+ depth_embed_dim=384,
69
+ depth_kernel_size=16,
70
+ depth_num_blocks=12,
71
+ depth_num_heads=8,
72
+ depth_drop_path=0.0,
73
+ thermal_embed_dim=768,
74
+ thermal_kernel_size=16,
75
+ thermal_num_blocks=12,
76
+ thermal_num_heads=12,
77
+ thermal_drop_path=0.0,
78
+ imu_embed_dim=512,
79
+ imu_kernel_size=8,
80
+ imu_num_blocks=6,
81
+ imu_num_heads=8,
82
+ imu_drop_path=0.7,
83
+ ):
84
+ super().__init__()
85
+
86
+ self.modality_preprocessors = self._create_modality_preprocessors(
87
+ video_frames,
88
+ vision_embed_dim,
89
+ kernel_size,
90
+ text_embed_dim,
91
+ audio_embed_dim,
92
+ audio_kernel_size,
93
+ audio_stride,
94
+ audio_num_mel_bins,
95
+ audio_target_len,
96
+ depth_embed_dim,
97
+ depth_kernel_size,
98
+ thermal_embed_dim,
99
+ thermal_kernel_size,
100
+ imu_embed_dim,
101
+ )
102
+
103
+ self.modality_trunks = self._create_modality_trunks(
104
+ vision_embed_dim,
105
+ vision_num_blocks,
106
+ vision_num_heads,
107
+ text_embed_dim,
108
+ text_num_blocks,
109
+ text_num_heads,
110
+ audio_embed_dim,
111
+ audio_num_blocks,
112
+ audio_num_heads,
113
+ audio_drop_path,
114
+ depth_embed_dim,
115
+ depth_num_blocks,
116
+ depth_num_heads,
117
+ depth_drop_path,
118
+ thermal_embed_dim,
119
+ thermal_num_blocks,
120
+ thermal_num_heads,
121
+ thermal_drop_path,
122
+ imu_embed_dim,
123
+ imu_num_blocks,
124
+ imu_num_heads,
125
+ imu_drop_path,
126
+ )
127
+
128
+ self.modality_heads = self._create_modality_heads(
129
+ out_embed_dim,
130
+ vision_embed_dim,
131
+ text_embed_dim,
132
+ audio_embed_dim,
133
+ depth_embed_dim,
134
+ thermal_embed_dim,
135
+ imu_embed_dim,
136
+ )
137
+
138
+ self.modality_postprocessors = self._create_modality_postprocessors(
139
+ out_embed_dim
140
+ )
141
+
142
+ def _create_modality_preprocessors(
143
+ self,
144
+ video_frames=2,
145
+ vision_embed_dim=1024,
146
+ kernel_size=(2, 14, 14),
147
+ text_embed_dim=768,
148
+ audio_embed_dim=768,
149
+ audio_kernel_size=16,
150
+ audio_stride=10,
151
+ audio_num_mel_bins=128,
152
+ audio_target_len=204,
153
+ depth_embed_dim=768,
154
+ depth_kernel_size=16,
155
+ thermal_embed_dim=768,
156
+ thermal_kernel_size=16,
157
+ imu_embed_dim=512,
158
+ ):
159
+ rgbt_stem = PatchEmbedGeneric(
160
+ proj_stem=[
161
+ PadIm2Video(pad_type="repeat", ntimes=2),
162
+ nn.Conv3d(
163
+ in_channels=3,
164
+ kernel_size=kernel_size,
165
+ out_channels=vision_embed_dim,
166
+ stride=kernel_size,
167
+ bias=False,
168
+ ),
169
+ ]
170
+ )
171
+ rgbt_preprocessor = RGBDTPreprocessor(
172
+ img_size=[3, video_frames, 224, 224],
173
+ num_cls_tokens=1,
174
+ pos_embed_fn=partial(SpatioTemporalPosEmbeddingHelper, learnable=True),
175
+ rgbt_stem=rgbt_stem,
176
+ depth_stem=None,
177
+ )
178
+
179
+ text_preprocessor = TextPreprocessor(
180
+ context_length=77,
181
+ vocab_size=49408,
182
+ embed_dim=text_embed_dim,
183
+ causal_masking=True,
184
+ )
185
+
186
+ audio_stem = PatchEmbedGeneric(
187
+ proj_stem=[
188
+ nn.Conv2d(
189
+ in_channels=1,
190
+ kernel_size=audio_kernel_size,
191
+ stride=audio_stride,
192
+ out_channels=audio_embed_dim,
193
+ bias=False,
194
+ ),
195
+ ],
196
+ norm_layer=nn.LayerNorm(normalized_shape=audio_embed_dim),
197
+ )
198
+ audio_preprocessor = AudioPreprocessor(
199
+ img_size=[1, audio_num_mel_bins, audio_target_len],
200
+ num_cls_tokens=1,
201
+ pos_embed_fn=partial(SpatioTemporalPosEmbeddingHelper, learnable=True),
202
+ audio_stem=audio_stem,
203
+ )
204
+
205
+ depth_stem = PatchEmbedGeneric(
206
+ [
207
+ nn.Conv2d(
208
+ kernel_size=depth_kernel_size,
209
+ in_channels=1,
210
+ out_channels=depth_embed_dim,
211
+ stride=depth_kernel_size,
212
+ bias=False,
213
+ ),
214
+ ],
215
+ norm_layer=nn.LayerNorm(normalized_shape=depth_embed_dim),
216
+ )
217
+
218
+ depth_preprocessor = RGBDTPreprocessor(
219
+ img_size=[1, 224, 224],
220
+ num_cls_tokens=1,
221
+ pos_embed_fn=partial(SpatioTemporalPosEmbeddingHelper, learnable=True),
222
+ rgbt_stem=None,
223
+ depth_stem=depth_stem,
224
+ )
225
+
226
+ thermal_stem = PatchEmbedGeneric(
227
+ [
228
+ nn.Conv2d(
229
+ kernel_size=thermal_kernel_size,
230
+ in_channels=1,
231
+ out_channels=thermal_embed_dim,
232
+ stride=thermal_kernel_size,
233
+ bias=False,
234
+ ),
235
+ ],
236
+ norm_layer=nn.LayerNorm(normalized_shape=thermal_embed_dim),
237
+ )
238
+ thermal_preprocessor = ThermalPreprocessor(
239
+ img_size=[1, 224, 224],
240
+ num_cls_tokens=1,
241
+ pos_embed_fn=partial(SpatioTemporalPosEmbeddingHelper, learnable=True),
242
+ thermal_stem=thermal_stem,
243
+ )
244
+
245
+ imu_stem = PatchEmbedGeneric(
246
+ [
247
+ nn.Linear(
248
+ in_features=48,
249
+ out_features=imu_embed_dim,
250
+ bias=False,
251
+ ),
252
+ ],
253
+ norm_layer=nn.LayerNorm(normalized_shape=imu_embed_dim),
254
+ )
255
+
256
+ imu_preprocessor = IMUPreprocessor(
257
+ img_size=[6, 2000],
258
+ num_cls_tokens=1,
259
+ kernel_size=8,
260
+ embed_dim=imu_embed_dim,
261
+ pos_embed_fn=partial(SpatioTemporalPosEmbeddingHelper, learnable=True),
262
+ imu_stem=imu_stem,
263
+ )
264
+
265
+ modality_preprocessors = {
266
+ ModalityType.VISION: rgbt_preprocessor,
267
+ ModalityType.TEXT: text_preprocessor,
268
+ ModalityType.AUDIO: audio_preprocessor,
269
+ ModalityType.DEPTH: depth_preprocessor,
270
+ ModalityType.THERMAL: thermal_preprocessor,
271
+ ModalityType.IMU: imu_preprocessor,
272
+ }
273
+
274
+ return nn.ModuleDict(modality_preprocessors)
275
+
276
+ def _create_modality_trunks(
277
+ self,
278
+ vision_embed_dim=1024,
279
+ vision_num_blocks=24,
280
+ vision_num_heads=16,
281
+ text_embed_dim=768,
282
+ text_num_blocks=12,
283
+ text_num_heads=12,
284
+ audio_embed_dim=768,
285
+ audio_num_blocks=12,
286
+ audio_num_heads=12,
287
+ audio_drop_path=0.0,
288
+ depth_embed_dim=768,
289
+ depth_num_blocks=12,
290
+ depth_num_heads=12,
291
+ depth_drop_path=0.0,
292
+ thermal_embed_dim=768,
293
+ thermal_num_blocks=12,
294
+ thermal_num_heads=12,
295
+ thermal_drop_path=0.0,
296
+ imu_embed_dim=512,
297
+ imu_num_blocks=6,
298
+ imu_num_heads=8,
299
+ imu_drop_path=0.7,
300
+ ):
301
+ def instantiate_trunk(
302
+ embed_dim, num_blocks, num_heads, pre_transformer_ln, add_bias_kv, drop_path
303
+ ):
304
+ return SimpleTransformer(
305
+ embed_dim=embed_dim,
306
+ num_blocks=num_blocks,
307
+ ffn_dropout_rate=0.0,
308
+ drop_path_rate=drop_path,
309
+ attn_target=partial(
310
+ MultiheadAttention,
311
+ embed_dim=embed_dim,
312
+ num_heads=num_heads,
313
+ bias=True,
314
+ add_bias_kv=add_bias_kv,
315
+ ),
316
+ pre_transformer_layer=nn.Sequential(
317
+ nn.LayerNorm(embed_dim, eps=1e-6)
318
+ if pre_transformer_ln
319
+ else nn.Identity(),
320
+ EinOpsRearrange("b l d -> l b d"),
321
+ ),
322
+ post_transformer_layer=EinOpsRearrange("l b d -> b l d"),
323
+ )
324
+
325
+ modality_trunks = {}
326
+ modality_trunks[ModalityType.VISION] = instantiate_trunk(
327
+ vision_embed_dim,
328
+ vision_num_blocks,
329
+ vision_num_heads,
330
+ pre_transformer_ln=True,
331
+ add_bias_kv=False,
332
+ drop_path=0.0,
333
+ )
334
+ modality_trunks[ModalityType.TEXT] = instantiate_trunk(
335
+ text_embed_dim,
336
+ text_num_blocks,
337
+ text_num_heads,
338
+ pre_transformer_ln=False,
339
+ add_bias_kv=False,
340
+ drop_path=0.0,
341
+ )
342
+ modality_trunks[ModalityType.AUDIO] = instantiate_trunk(
343
+ audio_embed_dim,
344
+ audio_num_blocks,
345
+ audio_num_heads,
346
+ pre_transformer_ln=False,
347
+ add_bias_kv=True,
348
+ drop_path=audio_drop_path,
349
+ )
350
+ modality_trunks[ModalityType.DEPTH] = instantiate_trunk(
351
+ depth_embed_dim,
352
+ depth_num_blocks,
353
+ depth_num_heads,
354
+ pre_transformer_ln=False,
355
+ add_bias_kv=True,
356
+ drop_path=depth_drop_path,
357
+ )
358
+ modality_trunks[ModalityType.THERMAL] = instantiate_trunk(
359
+ thermal_embed_dim,
360
+ thermal_num_blocks,
361
+ thermal_num_heads,
362
+ pre_transformer_ln=False,
363
+ add_bias_kv=True,
364
+ drop_path=thermal_drop_path,
365
+ )
366
+ modality_trunks[ModalityType.IMU] = instantiate_trunk(
367
+ imu_embed_dim,
368
+ imu_num_blocks,
369
+ imu_num_heads,
370
+ pre_transformer_ln=False,
371
+ add_bias_kv=True,
372
+ drop_path=imu_drop_path,
373
+ )
374
+
375
+ return nn.ModuleDict(modality_trunks)
376
+
377
+ def _create_modality_heads(
378
+ self,
379
+ out_embed_dim,
380
+ vision_embed_dim,
381
+ text_embed_dim,
382
+ audio_embed_dim,
383
+ depth_embed_dim,
384
+ thermal_embed_dim,
385
+ imu_embed_dim,
386
+ ):
387
+ modality_heads = {}
388
+
389
+ modality_heads[ModalityType.VISION] = nn.Sequential(
390
+ nn.LayerNorm(normalized_shape=vision_embed_dim, eps=1e-6),
391
+ SelectElement(index=0),
392
+ nn.Linear(vision_embed_dim, out_embed_dim, bias=False),
393
+ )
394
+
395
+ modality_heads[ModalityType.TEXT] = SelectEOSAndProject(
396
+ proj=nn.Sequential(
397
+ nn.LayerNorm(normalized_shape=text_embed_dim, eps=1e-6),
398
+ nn.Linear(text_embed_dim, out_embed_dim, bias=False),
399
+ )
400
+ )
401
+
402
+ modality_heads[ModalityType.AUDIO] = nn.Sequential(
403
+ nn.LayerNorm(normalized_shape=audio_embed_dim, eps=1e-6),
404
+ SelectElement(index=0),
405
+ nn.Linear(audio_embed_dim, out_embed_dim, bias=False),
406
+ )
407
+
408
+ modality_heads[ModalityType.DEPTH] = nn.Sequential(
409
+ nn.LayerNorm(normalized_shape=depth_embed_dim, eps=1e-6),
410
+ SelectElement(index=0),
411
+ nn.Linear(depth_embed_dim, out_embed_dim, bias=False),
412
+ )
413
+
414
+ modality_heads[ModalityType.THERMAL] = nn.Sequential(
415
+ nn.LayerNorm(normalized_shape=thermal_embed_dim, eps=1e-6),
416
+ SelectElement(index=0),
417
+ nn.Linear(thermal_embed_dim, out_embed_dim, bias=False),
418
+ )
419
+
420
+ modality_heads[ModalityType.IMU] = nn.Sequential(
421
+ nn.LayerNorm(normalized_shape=imu_embed_dim, eps=1e-6),
422
+ SelectElement(index=0),
423
+ nn.Dropout(p=0.5),
424
+ nn.Linear(imu_embed_dim, out_embed_dim, bias=False),
425
+ )
426
+
427
+ return nn.ModuleDict(modality_heads)
428
+
429
+ def _create_modality_postprocessors(self, out_embed_dim):
430
+ modality_postprocessors = {}
431
+
432
+ modality_postprocessors[ModalityType.VISION] = Normalize(dim=-1)
433
+ modality_postprocessors[ModalityType.TEXT] = nn.Sequential(
434
+ Normalize(dim=-1), LearnableLogitScaling(learnable=True)
435
+ )
436
+ modality_postprocessors[ModalityType.AUDIO] = nn.Sequential(
437
+ Normalize(dim=-1),
438
+ LearnableLogitScaling(logit_scale_init=20.0, learnable=False),
439
+ )
440
+ modality_postprocessors[ModalityType.DEPTH] = nn.Sequential(
441
+ Normalize(dim=-1),
442
+ LearnableLogitScaling(logit_scale_init=5.0, learnable=False),
443
+ )
444
+ modality_postprocessors[ModalityType.THERMAL] = nn.Sequential(
445
+ Normalize(dim=-1),
446
+ LearnableLogitScaling(logit_scale_init=10.0, learnable=False),
447
+ )
448
+ modality_postprocessors[ModalityType.IMU] = nn.Sequential(
449
+ Normalize(dim=-1),
450
+ LearnableLogitScaling(logit_scale_init=5.0, learnable=False),
451
+ )
452
+
453
+ return nn.ModuleDict(modality_postprocessors)
454
+
455
+ def forward(self, inputs):
456
+ outputs = {}
457
+ for modality_key, modality_value in inputs.items():
458
+ reduce_list = (
459
+ modality_value.ndim >= 5
460
+ ) # Audio and Video inputs consist of multiple clips
461
+ if reduce_list:
462
+ B, S = modality_value.shape[:2]
463
+ modality_value = modality_value.reshape(
464
+ B * S, *modality_value.shape[2:]
465
+ )
466
+
467
+ if modality_value is not None:
468
+ modality_value = self.modality_preprocessors[modality_key](
469
+ **{modality_key: modality_value}
470
+ )
471
+ trunk_inputs = modality_value["trunk"]
472
+ head_inputs = modality_value["head"]
473
+ modality_value = self.modality_trunks[modality_key](**trunk_inputs)
474
+ modality_value = self.modality_heads[modality_key](
475
+ modality_value, **head_inputs
476
+ )
477
+ modality_value = self.modality_postprocessors[modality_key](
478
+ modality_value
479
+ )
480
+
481
+ if reduce_list:
482
+ modality_value = modality_value.reshape(B, S, -1)
483
+ modality_value = modality_value.mean(dim=1)
484
+
485
+ outputs[modality_key] = modality_value
486
+
487
+ return outputs
488
+
489
+
490
+ def imagebind_huge(pretrained=False):
491
+ model = ImageBindModel(
492
+ vision_embed_dim=1280,
493
+ vision_num_blocks=32,
494
+ vision_num_heads=16,
495
+ text_embed_dim=1024,
496
+ text_num_blocks=24,
497
+ text_num_heads=16,
498
+ out_embed_dim=1024,
499
+ audio_drop_path=0.1,
500
+ imu_drop_path=0.7,
501
+ )
502
+
503
+ if pretrained:
504
+ if not os.path.exists(".checkpoints/imagebind_huge.pth"):
505
+ print(
506
+ "Downloading imagebind weights to .checkpoints/imagebind_huge.pth ..."
507
+ )
508
+ os.makedirs(".checkpoints", exist_ok=True)
509
+ torch.hub.download_url_to_file(
510
+ "https://dl.fbaipublicfiles.com/imagebind/imagebind_huge.pth",
511
+ ".checkpoints/imagebind_huge.pth",
512
+ progress=True,
513
+ )
514
+
515
+ model.load_state_dict(torch.load(".checkpoints/imagebind_huge.pth"))
516
+
517
+ return model
models/multimodal_preprocessors.py ADDED
@@ -0,0 +1,687 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ # Portions Copyright (c) Meta Platforms, Inc. and affiliates.
3
+ # All rights reserved.
4
+
5
+ # This source code is licensed under the license found in the
6
+ # LICENSE file in the root directory of this source tree.
7
+
8
+ import gzip
9
+ import html
10
+ import io
11
+ import math
12
+ from functools import lru_cache
13
+ from typing import Callable, List, Optional
14
+
15
+ import ftfy
16
+
17
+ import numpy as np
18
+ import regex as re
19
+ import torch
20
+ import torch.nn as nn
21
+ from iopath.common.file_io import g_pathmgr
22
+ from timm.models.layers import trunc_normal_
23
+
24
+ from models.helpers import cast_if_src_dtype, VerboseNNModule
25
+
26
+
27
+ def get_sinusoid_encoding_table(n_position, d_hid):
28
+ """Sinusoid position encoding table"""
29
+
30
+ # TODO: make it with torch instead of numpy
31
+ def get_position_angle_vec(position):
32
+ return [
33
+ position / np.power(10000, 2 * (hid_j // 2) / d_hid)
34
+ for hid_j in range(d_hid)
35
+ ]
36
+
37
+ sinusoid_table = np.array(
38
+ [get_position_angle_vec(pos_i) for pos_i in range(n_position)]
39
+ )
40
+ sinusoid_table[:, 0::2] = np.sin(sinusoid_table[:, 0::2]) # dim 2i
41
+ sinusoid_table[:, 1::2] = np.cos(sinusoid_table[:, 1::2]) # dim 2i+1
42
+
43
+ return torch.FloatTensor(sinusoid_table).unsqueeze(0)
44
+
45
+
46
+ def interpolate_pos_encoding_2d(target_spatial_size, pos_embed):
47
+ N = pos_embed.shape[1]
48
+ if N == target_spatial_size:
49
+ return pos_embed
50
+ dim = pos_embed.shape[-1]
51
+ # nn.functional.interpolate doesn't work with bfloat16 so we cast to float32
52
+ pos_embed, updated = cast_if_src_dtype(pos_embed, torch.bfloat16, torch.float32)
53
+ pos_embed = nn.functional.interpolate(
54
+ pos_embed.reshape(1, int(math.sqrt(N)), int(math.sqrt(N)), dim).permute(
55
+ 0, 3, 1, 2
56
+ ),
57
+ scale_factor=math.sqrt(target_spatial_size / N),
58
+ mode="bicubic",
59
+ )
60
+ if updated:
61
+ pos_embed, _ = cast_if_src_dtype(pos_embed, torch.float32, torch.bfloat16)
62
+ pos_embed = pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
63
+ return pos_embed
64
+
65
+
66
+ def interpolate_pos_encoding(
67
+ npatch_per_img,
68
+ pos_embed,
69
+ patches_layout,
70
+ input_shape=None,
71
+ first_patch_idx=1,
72
+ ):
73
+ assert first_patch_idx == 0 or first_patch_idx == 1, "there is 1 CLS token or none"
74
+ N = pos_embed.shape[1] - first_patch_idx # since it's 1 if cls_token exists
75
+ if npatch_per_img == N:
76
+ return pos_embed
77
+
78
+ assert (
79
+ patches_layout[-1] == patches_layout[-2]
80
+ ), "Interpolation of pos embed not supported for non-square layouts"
81
+
82
+ class_emb = pos_embed[:, :first_patch_idx]
83
+ pos_embed = pos_embed[:, first_patch_idx:]
84
+
85
+ if input_shape is None or patches_layout[0] == 1:
86
+ # simple 2D pos embedding, no temporal component
87
+ pos_embed = interpolate_pos_encoding_2d(npatch_per_img, pos_embed)
88
+ elif patches_layout[0] > 1:
89
+ # pos embed has a temporal component
90
+ assert len(input_shape) == 4, "temporal interpolation not supported"
91
+ # we only support 2D interpolation in this case
92
+ num_frames = patches_layout[0]
93
+ num_spatial_tokens = patches_layout[1] * patches_layout[2]
94
+ pos_embed = pos_embed.view(1, num_frames, num_spatial_tokens, -1)
95
+ # interpolate embedding for zeroth frame
96
+ pos_embed = interpolate_pos_encoding_2d(
97
+ npatch_per_img, pos_embed[0, 0, ...].unsqueeze(0)
98
+ )
99
+ else:
100
+ raise ValueError("This type of interpolation isn't implemented")
101
+
102
+ return torch.cat((class_emb, pos_embed), dim=1)
103
+
104
+
105
+ def _get_pos_embedding(
106
+ npatch_per_img,
107
+ pos_embed,
108
+ patches_layout,
109
+ input_shape,
110
+ first_patch_idx=1,
111
+ ):
112
+ pos_embed = interpolate_pos_encoding(
113
+ npatch_per_img,
114
+ pos_embed,
115
+ patches_layout,
116
+ input_shape=input_shape,
117
+ first_patch_idx=first_patch_idx,
118
+ )
119
+ return pos_embed
120
+
121
+
122
+ class PatchEmbedGeneric(nn.Module):
123
+ """
124
+ PatchEmbed from Hydra
125
+ """
126
+
127
+ def __init__(self, proj_stem, norm_layer: Optional[nn.Module] = None):
128
+ super().__init__()
129
+
130
+ if len(proj_stem) > 1:
131
+ self.proj = nn.Sequential(*proj_stem)
132
+ else:
133
+ # Special case to be able to load pre-trained models that were
134
+ # trained with a standard stem
135
+ self.proj = proj_stem[0]
136
+ self.norm_layer = norm_layer
137
+
138
+ def get_patch_layout(self, img_size):
139
+ with torch.no_grad():
140
+ dummy_img = torch.zeros(
141
+ [
142
+ 1,
143
+ ]
144
+ + img_size
145
+ )
146
+ dummy_out = self.proj(dummy_img)
147
+ embed_dim = dummy_out.shape[1]
148
+ patches_layout = tuple(dummy_out.shape[2:])
149
+ num_patches = np.prod(patches_layout)
150
+ return patches_layout, num_patches, embed_dim
151
+
152
+ def forward(self, x):
153
+ x = self.proj(x)
154
+ # B C (T) H W -> B (T)HW C
155
+ x = x.flatten(2).transpose(1, 2)
156
+ if self.norm_layer is not None:
157
+ x = self.norm_layer(x)
158
+ return x
159
+
160
+
161
+ class SpatioTemporalPosEmbeddingHelper(VerboseNNModule):
162
+ def __init__(
163
+ self,
164
+ patches_layout: List,
165
+ num_patches: int,
166
+ num_cls_tokens: int,
167
+ embed_dim: int,
168
+ learnable: bool,
169
+ ) -> None:
170
+ super().__init__()
171
+ self.num_cls_tokens = num_cls_tokens
172
+ self.patches_layout = patches_layout
173
+ self.num_patches = num_patches
174
+ self.num_tokens = num_cls_tokens + num_patches
175
+ self.learnable = learnable
176
+ if self.learnable:
177
+ self.pos_embed = nn.Parameter(torch.zeros(1, self.num_tokens, embed_dim))
178
+ trunc_normal_(self.pos_embed, std=0.02)
179
+ else:
180
+ self.register_buffer(
181
+ "pos_embed", get_sinusoid_encoding_table(self.num_tokens, embed_dim)
182
+ )
183
+
184
+ def get_pos_embedding(self, vision_input, all_vision_tokens):
185
+ input_shape = vision_input.shape
186
+ pos_embed = _get_pos_embedding(
187
+ all_vision_tokens.size(1) - self.num_cls_tokens,
188
+ pos_embed=self.pos_embed,
189
+ patches_layout=self.patches_layout,
190
+ input_shape=input_shape,
191
+ first_patch_idx=self.num_cls_tokens,
192
+ )
193
+ return pos_embed
194
+
195
+
196
+ class RGBDTPreprocessor(VerboseNNModule):
197
+ def __init__(
198
+ self,
199
+ rgbt_stem: PatchEmbedGeneric,
200
+ depth_stem: PatchEmbedGeneric,
201
+ img_size: List = (3, 224, 224),
202
+ num_cls_tokens: int = 1,
203
+ pos_embed_fn: Callable = None,
204
+ use_type_embed: bool = False,
205
+ init_param_style: str = "openclip",
206
+ ) -> None:
207
+ super().__init__()
208
+ stem = rgbt_stem if rgbt_stem is not None else depth_stem
209
+ (
210
+ self.patches_layout,
211
+ self.num_patches,
212
+ self.embed_dim,
213
+ ) = stem.get_patch_layout(img_size)
214
+ self.rgbt_stem = rgbt_stem
215
+ self.depth_stem = depth_stem
216
+ self.use_pos_embed = pos_embed_fn is not None
217
+ self.use_type_embed = use_type_embed
218
+ self.num_cls_tokens = num_cls_tokens
219
+
220
+ if self.use_pos_embed:
221
+ self.pos_embedding_helper = pos_embed_fn(
222
+ patches_layout=self.patches_layout,
223
+ num_cls_tokens=num_cls_tokens,
224
+ num_patches=self.num_patches,
225
+ embed_dim=self.embed_dim,
226
+ )
227
+ if self.num_cls_tokens > 0:
228
+ self.cls_token = nn.Parameter(
229
+ torch.zeros(1, self.num_cls_tokens, self.embed_dim)
230
+ )
231
+ if self.use_type_embed:
232
+ self.type_embed = nn.Parameter(torch.zeros(1, 1, self.embed_dim))
233
+
234
+ self.init_parameters(init_param_style)
235
+
236
+ @torch.no_grad()
237
+ def init_parameters(self, init_param_style):
238
+ if init_param_style == "openclip":
239
+ # OpenCLIP style initialization
240
+ scale = self.embed_dim**-0.5
241
+ if self.use_pos_embed:
242
+ nn.init.normal_(self.pos_embedding_helper.pos_embed)
243
+ self.pos_embedding_helper.pos_embed *= scale
244
+
245
+ if self.num_cls_tokens > 0:
246
+ nn.init.normal_(self.cls_token)
247
+ self.cls_token *= scale
248
+ elif init_param_style == "vit":
249
+ self.cls_token.data.fill_(0)
250
+ else:
251
+ raise ValueError(f"Unknown init {init_param_style}")
252
+
253
+ if self.use_type_embed:
254
+ nn.init.normal_(self.type_embed)
255
+
256
+ def tokenize_input_and_cls_pos(self, input, stem, mask):
257
+ # tokens is of shape B x L x D
258
+ tokens = stem(input)
259
+ assert tokens.ndim == 3
260
+ assert tokens.shape[2] == self.embed_dim
261
+ B = tokens.shape[0]
262
+ if self.num_cls_tokens > 0:
263
+ class_tokens = self.cls_token.expand(
264
+ B, -1, -1
265
+ ) # stole class_tokens impl from Phil Wang, thanks
266
+ tokens = torch.cat((class_tokens, tokens), dim=1)
267
+ if self.use_pos_embed:
268
+ pos_embed = self.pos_embedding_helper.get_pos_embedding(input, tokens)
269
+ tokens = tokens + pos_embed
270
+ if self.use_type_embed:
271
+ tokens = tokens + self.type_embed.expand(B, -1, -1)
272
+ return tokens
273
+
274
+ def forward(self, vision=None, depth=None, patch_mask=None):
275
+ if patch_mask is not None:
276
+ raise NotImplementedError()
277
+
278
+ if vision is not None:
279
+ vision_tokens = self.tokenize_input_and_cls_pos(
280
+ vision, self.rgbt_stem, patch_mask
281
+ )
282
+
283
+ if depth is not None:
284
+ depth_tokens = self.tokenize_input_and_cls_pos(
285
+ depth, self.depth_stem, patch_mask
286
+ )
287
+
288
+ # aggregate tokens
289
+ if vision is not None and depth is not None:
290
+ final_tokens = vision_tokens + depth_tokens
291
+ else:
292
+ final_tokens = vision_tokens if vision is not None else depth_tokens
293
+ return_dict = {
294
+ "trunk": {
295
+ "tokens": final_tokens,
296
+ },
297
+ "head": {},
298
+ }
299
+ return return_dict
300
+
301
+
302
+ class AudioPreprocessor(RGBDTPreprocessor):
303
+ def __init__(self, audio_stem: PatchEmbedGeneric, **kwargs) -> None:
304
+ super().__init__(rgbt_stem=audio_stem, depth_stem=None, **kwargs)
305
+
306
+ def forward(self, audio=None):
307
+ return super().forward(vision=audio)
308
+
309
+
310
+ class ThermalPreprocessor(RGBDTPreprocessor):
311
+ def __init__(self, thermal_stem: PatchEmbedGeneric, **kwargs) -> None:
312
+ super().__init__(rgbt_stem=thermal_stem, depth_stem=None, **kwargs)
313
+
314
+ def forward(self, thermal=None):
315
+ return super().forward(vision=thermal)
316
+
317
+
318
+ def build_causal_attention_mask(context_length):
319
+ # lazily create causal attention mask, with full attention between the vision tokens
320
+ # pytorch uses additive attention mask; fill with -inf
321
+ mask = torch.empty(context_length, context_length, requires_grad=False)
322
+ mask.fill_(float("-inf"))
323
+ mask.triu_(1) # zero out the lower diagonal
324
+ return mask
325
+
326
+
327
+ class TextPreprocessor(VerboseNNModule):
328
+ def __init__(
329
+ self,
330
+ vocab_size: int,
331
+ context_length: int,
332
+ embed_dim: int,
333
+ causal_masking: bool,
334
+ supply_seq_len_to_head: bool = True,
335
+ num_cls_tokens: int = 0,
336
+ init_param_style: str = "openclip",
337
+ ) -> None:
338
+ super().__init__()
339
+ self.vocab_size = vocab_size
340
+ self.context_length = context_length
341
+ self.token_embedding = nn.Embedding(vocab_size, embed_dim)
342
+ self.pos_embed = nn.Parameter(
343
+ torch.empty(1, self.context_length + num_cls_tokens, embed_dim)
344
+ )
345
+ self.causal_masking = causal_masking
346
+ if self.causal_masking:
347
+ mask = build_causal_attention_mask(self.context_length)
348
+ # register the mask as a buffer so it can be moved to the right device
349
+ self.register_buffer("mask", mask)
350
+
351
+ self.supply_seq_len_to_head = supply_seq_len_to_head
352
+ self.num_cls_tokens = num_cls_tokens
353
+ self.embed_dim = embed_dim
354
+ if num_cls_tokens > 0:
355
+ assert self.causal_masking is False, "Masking + CLS token isn't implemented"
356
+ self.cls_token = nn.Parameter(
357
+ torch.zeros(1, self.num_cls_tokens, embed_dim)
358
+ )
359
+
360
+ self.init_parameters(init_param_style)
361
+
362
+ @torch.no_grad()
363
+ def init_parameters(self, init_param_style="openclip"):
364
+ # OpenCLIP style initialization
365
+ nn.init.normal_(self.token_embedding.weight, std=0.02)
366
+ nn.init.normal_(self.pos_embed, std=0.01)
367
+
368
+ if init_param_style == "openclip":
369
+ # OpenCLIP style initialization
370
+ scale = self.embed_dim**-0.5
371
+ if self.num_cls_tokens > 0:
372
+ nn.init.normal_(self.cls_token)
373
+ self.cls_token *= scale
374
+ elif init_param_style == "vit":
375
+ self.cls_token.data.fill_(0)
376
+ else:
377
+ raise ValueError(f"Unknown init {init_param_style}")
378
+
379
+ def forward(self, text):
380
+ # text tokens are of shape B x L x D
381
+ text_tokens = self.token_embedding(text)
382
+ # concat CLS tokens if any
383
+ if self.num_cls_tokens > 0:
384
+ B = text_tokens.shape[0]
385
+ class_tokens = self.cls_token.expand(
386
+ B, -1, -1
387
+ ) # stole class_tokens impl from Phil Wang, thanks
388
+ text_tokens = torch.cat((class_tokens, text_tokens), dim=1)
389
+ text_tokens = text_tokens + self.pos_embed
390
+ return_dict = {
391
+ "trunk": {
392
+ "tokens": text_tokens,
393
+ },
394
+ "head": {},
395
+ }
396
+ # Compute sequence length after adding CLS tokens
397
+ if self.supply_seq_len_to_head:
398
+ text_lengths = text.argmax(dim=-1)
399
+ return_dict["head"] = {
400
+ "seq_len": text_lengths,
401
+ }
402
+ if self.causal_masking:
403
+ return_dict["trunk"].update({"attn_mask": self.mask})
404
+ return return_dict
405
+
406
+
407
+ class Im2Video(nn.Module):
408
+ """Convert an image into a trivial video."""
409
+
410
+ def __init__(self, time_dim=2):
411
+ super().__init__()
412
+ self.time_dim = time_dim
413
+
414
+ def forward(self, x):
415
+ if x.ndim == 4:
416
+ # B, C, H, W -> B, C, T, H, W
417
+ return x.unsqueeze(self.time_dim)
418
+ elif x.ndim == 5:
419
+ return x
420
+ else:
421
+ raise ValueError(f"Dimension incorrect {x.shape}")
422
+
423
+
424
+ class PadIm2Video(Im2Video):
425
+ def __init__(self, ntimes, pad_type, time_dim=2):
426
+ super().__init__(time_dim=time_dim)
427
+ assert ntimes > 0
428
+ assert pad_type in ["zero", "repeat"]
429
+ self.ntimes = ntimes
430
+ self.pad_type = pad_type
431
+
432
+ def forward(self, x):
433
+ x = super().forward(x)
434
+ if x.shape[self.time_dim] == 1:
435
+ if self.pad_type == "repeat":
436
+ new_shape = [1] * len(x.shape)
437
+ new_shape[self.time_dim] = self.ntimes
438
+ x = x.repeat(new_shape)
439
+ elif self.pad_type == "zero":
440
+ padarg = [0, 0] * len(x.shape)
441
+ padarg[2 * self.time_dim + 1] = self.ntimes - x.shape[self.time_dim]
442
+ x = nn.functional.pad(x, padarg)
443
+ return x
444
+
445
+
446
+ # Modified from github.com/openai/CLIP
447
+ @lru_cache()
448
+ def bytes_to_unicode():
449
+ """
450
+ Returns list of utf-8 byte and a corresponding list of unicode strings.
451
+ The reversible bpe codes work on unicode strings.
452
+ This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.
453
+ When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.
454
+ This is a signficant percentage of your normal, say, 32K bpe vocab.
455
+ To avoid that, we want lookup tables between utf-8 bytes and unicode strings.
456
+ And avoids mapping to whitespace/control characters the bpe code barfs on.
457
+ """
458
+ bs = (
459
+ list(range(ord("!"), ord("~") + 1))
460
+ + list(range(ord("¡"), ord("¬") + 1))
461
+ + list(range(ord("®"), ord("ÿ") + 1))
462
+ )
463
+ cs = bs[:]
464
+ n = 0
465
+ for b in range(2**8):
466
+ if b not in bs:
467
+ bs.append(b)
468
+ cs.append(2**8 + n)
469
+ n += 1
470
+ cs = [chr(n) for n in cs]
471
+ return dict(zip(bs, cs))
472
+
473
+
474
+ def get_pairs(word):
475
+ """Return set of symbol pairs in a word.
476
+ Word is represented as tuple of symbols (symbols being variable-length strings).
477
+ """
478
+ pairs = set()
479
+ prev_char = word[0]
480
+ for char in word[1:]:
481
+ pairs.add((prev_char, char))
482
+ prev_char = char
483
+ return pairs
484
+
485
+
486
+ def basic_clean(text):
487
+ text = ftfy.fix_text(text)
488
+ text = html.unescape(html.unescape(text))
489
+ return text.strip()
490
+
491
+
492
+ def whitespace_clean(text):
493
+ text = re.sub(r"\s+", " ", text)
494
+ text = text.strip()
495
+ return text
496
+
497
+
498
+ class SimpleTokenizer(object):
499
+ def __init__(self, bpe_path: str, context_length=77):
500
+ self.byte_encoder = bytes_to_unicode()
501
+ self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
502
+
503
+ with g_pathmgr.open(bpe_path, "rb") as fh:
504
+ bpe_bytes = io.BytesIO(fh.read())
505
+ merges = gzip.open(bpe_bytes).read().decode("utf-8").split("\n")
506
+ merges = merges[1 : 49152 - 256 - 2 + 1]
507
+ merges = [tuple(merge.split()) for merge in merges]
508
+ vocab = list(bytes_to_unicode().values())
509
+ vocab = vocab + [v + "</w>" for v in vocab]
510
+ for merge in merges:
511
+ vocab.append("".join(merge))
512
+ vocab.extend(["<|startoftext|>", "<|endoftext|>"])
513
+ self.encoder = dict(zip(vocab, range(len(vocab))))
514
+ self.decoder = {v: k for k, v in self.encoder.items()}
515
+ self.bpe_ranks = dict(zip(merges, range(len(merges))))
516
+ self.cache = {
517
+ "<|startoftext|>": "<|startoftext|>",
518
+ "<|endoftext|>": "<|endoftext|>",
519
+ }
520
+ self.pat = re.compile(
521
+ r"""<\|startoftext\|>|<\|endoftext\|>|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""",
522
+ re.IGNORECASE,
523
+ )
524
+ self.context_length = context_length
525
+
526
+ def bpe(self, token):
527
+ if token in self.cache:
528
+ return self.cache[token]
529
+ word = tuple(token[:-1]) + (token[-1] + "</w>",)
530
+ pairs = get_pairs(word)
531
+
532
+ if not pairs:
533
+ return token + "</w>"
534
+
535
+ while True:
536
+ bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf")))
537
+ if bigram not in self.bpe_ranks:
538
+ break
539
+ first, second = bigram
540
+ new_word = []
541
+ i = 0
542
+ while i < len(word):
543
+ try:
544
+ j = word.index(first, i)
545
+ new_word.extend(word[i:j])
546
+ i = j
547
+ except:
548
+ new_word.extend(word[i:])
549
+ break
550
+
551
+ if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
552
+ new_word.append(first + second)
553
+ i += 2
554
+ else:
555
+ new_word.append(word[i])
556
+ i += 1
557
+ new_word = tuple(new_word)
558
+ word = new_word
559
+ if len(word) == 1:
560
+ break
561
+ else:
562
+ pairs = get_pairs(word)
563
+ word = " ".join(word)
564
+ self.cache[token] = word
565
+ return word
566
+
567
+ def encode(self, text):
568
+ bpe_tokens = []
569
+ text = whitespace_clean(basic_clean(text)).lower()
570
+ for token in re.findall(self.pat, text):
571
+ token = "".join(self.byte_encoder[b] for b in token.encode("utf-8"))
572
+ bpe_tokens.extend(
573
+ self.encoder[bpe_token] for bpe_token in self.bpe(token).split(" ")
574
+ )
575
+ return bpe_tokens
576
+
577
+ def decode(self, tokens):
578
+ text = "".join([self.decoder[token] for token in tokens])
579
+ text = (
580
+ bytearray([self.byte_decoder[c] for c in text])
581
+ .decode("utf-8", errors="replace")
582
+ .replace("</w>", " ")
583
+ )
584
+ return text
585
+
586
+ def __call__(self, texts, context_length=None):
587
+ if not context_length:
588
+ context_length = self.context_length
589
+
590
+ if isinstance(texts, str):
591
+ texts = [texts]
592
+
593
+ sot_token = self.encoder["<|startoftext|>"]
594
+ eot_token = self.encoder["<|endoftext|>"]
595
+ all_tokens = [[sot_token] + self.encode(text) + [eot_token] for text in texts]
596
+ result = torch.zeros(len(all_tokens), context_length, dtype=torch.long)
597
+
598
+ for i, tokens in enumerate(all_tokens):
599
+ tokens = tokens[:context_length]
600
+ result[i, : len(tokens)] = torch.tensor(tokens)
601
+
602
+ if len(result) == 1:
603
+ return result[0]
604
+ return result
605
+
606
+
607
+ class IMUPreprocessor(VerboseNNModule):
608
+ def __init__(
609
+ self,
610
+ kernel_size: int,
611
+ imu_stem: PatchEmbedGeneric,
612
+ embed_dim: int,
613
+ img_size: List = (6, 2000),
614
+ num_cls_tokens: int = 1,
615
+ pos_embed_fn: Callable = None,
616
+ init_param_style: str = "openclip",
617
+ ) -> None:
618
+ super().__init__()
619
+ stem = imu_stem
620
+ self.imu_stem = imu_stem
621
+ self.embed_dim = embed_dim
622
+ self.use_pos_embed = pos_embed_fn is not None
623
+ self.num_cls_tokens = num_cls_tokens
624
+ self.kernel_size = kernel_size
625
+ self.pos_embed = nn.Parameter(
626
+ torch.empty(1, (img_size[1] // kernel_size) + num_cls_tokens, embed_dim)
627
+ )
628
+
629
+ if self.num_cls_tokens > 0:
630
+ self.cls_token = nn.Parameter(
631
+ torch.zeros(1, self.num_cls_tokens, self.embed_dim)
632
+ )
633
+
634
+ self.init_parameters(init_param_style)
635
+
636
+ @torch.no_grad()
637
+ def init_parameters(self, init_param_style):
638
+ nn.init.normal_(self.pos_embed, std=0.01)
639
+
640
+ if init_param_style == "openclip":
641
+ # OpenCLIP style initialization
642
+ scale = self.embed_dim**-0.5
643
+
644
+ if self.num_cls_tokens > 0:
645
+ nn.init.normal_(self.cls_token)
646
+ self.cls_token *= scale
647
+ elif init_param_style == "vit":
648
+ self.cls_token.data.fill_(0)
649
+ else:
650
+ raise ValueError(f"Unknown init {init_param_style}")
651
+
652
+ def tokenize_input_and_cls_pos(self, input, stem):
653
+ # tokens is of shape B x L x D
654
+ tokens = stem.norm_layer(stem.proj(input))
655
+ assert tokens.ndim == 3
656
+ assert tokens.shape[2] == self.embed_dim
657
+ B = tokens.shape[0]
658
+ if self.num_cls_tokens > 0:
659
+ class_tokens = self.cls_token.expand(
660
+ B, -1, -1
661
+ ) # stole class_tokens impl from Phil Wang, thanks
662
+ tokens = torch.cat((class_tokens, tokens), dim=1)
663
+ if self.use_pos_embed:
664
+ tokens = tokens + self.pos_embed
665
+ return tokens
666
+
667
+ def forward(self, imu):
668
+ # Patchify
669
+ imu = imu.unfold(
670
+ -1,
671
+ self.kernel_size,
672
+ self.kernel_size,
673
+ ).permute(0, 2, 1, 3)
674
+ imu = imu.reshape(imu.size(0), imu.size(1), -1)
675
+
676
+ imu_tokens = self.tokenize_input_and_cls_pos(
677
+ imu,
678
+ self.imu_stem,
679
+ )
680
+
681
+ return_dict = {
682
+ "trunk": {
683
+ "tokens": imu_tokens,
684
+ },
685
+ "head": {},
686
+ }
687
+ return return_dict
models/transformer.py ADDED
@@ -0,0 +1,284 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ # Portions Copyright (c) Meta Platforms, Inc. and affiliates.
3
+ # All rights reserved.
4
+
5
+ # This source code is licensed under the license found in the
6
+ # LICENSE file in the root directory of this source tree.
7
+
8
+ # Code modified from
9
+ # https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py ;
10
+ # https://github.com/facebookresearch/deit/blob/main/models.py
11
+ # and https://github.com/facebookresearch/vissl/blob/main/vissl/models/trunks/vision_transformer.py
12
+
13
+
14
+ import copy
15
+ import fnmatch
16
+ import logging
17
+ from functools import partial
18
+ from typing import Callable, List
19
+
20
+ import torch
21
+ import torch.nn as nn
22
+ import torch.utils.checkpoint as checkpoint
23
+
24
+ from timm.models.layers import DropPath, trunc_normal_
25
+
26
+
27
+ class Attention(nn.Module):
28
+ def __init__(
29
+ self,
30
+ dim,
31
+ num_heads=8,
32
+ qkv_bias=False,
33
+ qk_scale=None,
34
+ attn_drop=0.0,
35
+ proj_drop=0.0,
36
+ ):
37
+ super().__init__()
38
+ self.num_heads = num_heads
39
+ head_dim = dim // num_heads
40
+ # NOTE scale factor was wrong in my original version,
41
+ # can set manually to be compat with prev weights
42
+ self.scale = qk_scale or head_dim**-0.5
43
+
44
+ self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
45
+ self.attn_drop = nn.Dropout(attn_drop)
46
+ self.proj = nn.Linear(dim, dim)
47
+ self.proj_drop = nn.Dropout(proj_drop)
48
+
49
+ def forward(self, x):
50
+ B, N, C = x.shape
51
+ qkv = (
52
+ self.qkv(x)
53
+ .reshape(B, N, 3, self.num_heads, C // self.num_heads)
54
+ .permute(2, 0, 3, 1, 4)
55
+ )
56
+ q, k, v = (
57
+ qkv[0],
58
+ qkv[1],
59
+ qkv[2],
60
+ ) # make torchscript happy (cannot use tensor as tuple)
61
+
62
+ attn = (q @ k.transpose(-2, -1)) * self.scale
63
+ attn = attn.softmax(dim=-1)
64
+ attn = self.attn_drop(attn)
65
+
66
+ x = (attn @ v).transpose(1, 2).reshape(B, N, C)
67
+ x = self.proj(x)
68
+ x = self.proj_drop(x)
69
+ return x
70
+
71
+
72
+ class Mlp(nn.Module):
73
+ def __init__(
74
+ self,
75
+ in_features,
76
+ hidden_features=None,
77
+ out_features=None,
78
+ act_layer=nn.GELU,
79
+ drop=0.0,
80
+ ):
81
+ super().__init__()
82
+ out_features = out_features or in_features
83
+ hidden_features = hidden_features or in_features
84
+ self.fc1 = nn.Linear(in_features, hidden_features)
85
+ self.act = act_layer()
86
+ self.fc2 = nn.Linear(hidden_features, out_features)
87
+ self.drop = nn.Dropout(drop)
88
+
89
+ def forward(self, x):
90
+ x = self.fc1(x)
91
+ x = self.act(x)
92
+ x = self.drop(x)
93
+ x = self.fc2(x)
94
+ x = self.drop(x)
95
+ return x
96
+
97
+
98
+ class MultiheadAttention(nn.MultiheadAttention):
99
+ def forward(self, x: torch.Tensor, attn_mask: torch.Tensor):
100
+ return super().forward(x, x, x, need_weights=False, attn_mask=attn_mask)[0]
101
+
102
+
103
+ class ViTAttention(Attention):
104
+ def forward(self, x: torch.Tensor, attn_mask: torch.Tensor):
105
+ assert attn_mask is None
106
+ return super().forward(x)
107
+
108
+
109
+ class BlockWithMasking(nn.Module):
110
+ def __init__(
111
+ self,
112
+ dim: int,
113
+ attn_target: Callable,
114
+ mlp_ratio: int = 4,
115
+ act_layer: Callable = nn.GELU,
116
+ norm_layer: Callable = nn.LayerNorm,
117
+ ffn_dropout_rate: float = 0.0,
118
+ drop_path: float = 0.0,
119
+ layer_scale_type: str = None,
120
+ layer_scale_init_value: float = 1e-4,
121
+ ):
122
+ super().__init__()
123
+
124
+ assert not isinstance(
125
+ attn_target, nn.Module
126
+ ), "attn_target should be a Callable. Otherwise attn_target is shared across blocks!"
127
+ self.attn = attn_target()
128
+ if drop_path > 0.0:
129
+ self.drop_path = DropPath(drop_path)
130
+ else:
131
+ self.drop_path = nn.Identity()
132
+ self.norm_1 = norm_layer(dim)
133
+ mlp_hidden_dim = int(mlp_ratio * dim)
134
+ self.mlp = Mlp(
135
+ in_features=dim,
136
+ hidden_features=mlp_hidden_dim,
137
+ act_layer=act_layer,
138
+ drop=ffn_dropout_rate,
139
+ )
140
+ self.norm_2 = norm_layer(dim)
141
+ self.layer_scale_type = layer_scale_type
142
+ if self.layer_scale_type is not None:
143
+ assert self.layer_scale_type in [
144
+ "per_channel",
145
+ "scalar",
146
+ ], f"Found Layer scale type {self.layer_scale_type}"
147
+ if self.layer_scale_type == "per_channel":
148
+ # one gamma value per channel
149
+ gamma_shape = [1, 1, dim]
150
+ elif self.layer_scale_type == "scalar":
151
+ # single gamma value for all channels
152
+ gamma_shape = [1, 1, 1]
153
+ # two gammas: for each part of the fwd in the encoder
154
+ self.layer_scale_gamma1 = nn.Parameter(
155
+ torch.ones(size=gamma_shape) * layer_scale_init_value,
156
+ requires_grad=True,
157
+ )
158
+ self.layer_scale_gamma2 = nn.Parameter(
159
+ torch.ones(size=gamma_shape) * layer_scale_init_value,
160
+ requires_grad=True,
161
+ )
162
+
163
+ def forward(self, x: torch.Tensor, attn_mask: torch.Tensor):
164
+ if self.layer_scale_type is None:
165
+ x = x + self.drop_path(self.attn(self.norm_1(x), attn_mask))
166
+ x = x + self.drop_path(self.mlp(self.norm_2(x)))
167
+ else:
168
+ x = (
169
+ x
170
+ + self.drop_path(self.attn(self.norm_1(x), attn_mask))
171
+ * self.layer_scale_gamma1
172
+ )
173
+ x = x + self.drop_path(self.mlp(self.norm_2(x))) * self.layer_scale_gamma2
174
+ return x
175
+
176
+
177
+ _LAYER_NORM = partial(nn.LayerNorm, eps=1e-6)
178
+
179
+
180
+ class SimpleTransformer(nn.Module):
181
+ def __init__(
182
+ self,
183
+ attn_target: Callable,
184
+ embed_dim: int,
185
+ num_blocks: int,
186
+ block: Callable = BlockWithMasking,
187
+ pre_transformer_layer: Callable = None,
188
+ post_transformer_layer: Callable = None,
189
+ drop_path_rate: float = 0.0,
190
+ drop_path_type: str = "progressive",
191
+ norm_layer: Callable = _LAYER_NORM,
192
+ mlp_ratio: int = 4,
193
+ ffn_dropout_rate: float = 0.0,
194
+ layer_scale_type: str = None, # from cait; possible values are None, "per_channel", "scalar"
195
+ layer_scale_init_value: float = 1e-4, # from cait; float
196
+ weight_init_style: str = "jax", # possible values jax or pytorch
197
+ ):
198
+ """
199
+ Simple Transformer with the following features
200
+ 1. Supports masked attention
201
+ 2. Supports DropPath
202
+ 3. Supports LayerScale
203
+ 4. Supports Dropout in Attention and FFN
204
+ 5. Makes few assumptions about the input except that it is a Tensor
205
+ """
206
+ super().__init__()
207
+ self.pre_transformer_layer = pre_transformer_layer
208
+ if drop_path_type == "progressive":
209
+ dpr = [x.item() for x in torch.linspace(0, drop_path_rate, num_blocks)]
210
+ elif drop_path_type == "uniform":
211
+ dpr = [drop_path_rate for i in range(num_blocks)]
212
+ else:
213
+ raise ValueError(f"Unknown drop_path_type: {drop_path_type}")
214
+
215
+ self.blocks = nn.Sequential(
216
+ *[
217
+ block(
218
+ dim=embed_dim,
219
+ attn_target=attn_target,
220
+ mlp_ratio=mlp_ratio,
221
+ ffn_dropout_rate=ffn_dropout_rate,
222
+ drop_path=dpr[i],
223
+ norm_layer=norm_layer,
224
+ layer_scale_type=layer_scale_type,
225
+ layer_scale_init_value=layer_scale_init_value,
226
+ )
227
+ for i in range(num_blocks)
228
+ ]
229
+ )
230
+ self.post_transformer_layer = post_transformer_layer
231
+ self.weight_init_style = weight_init_style
232
+ self.apply(self._init_weights)
233
+
234
+ def _init_weights(self, m):
235
+ if isinstance(m, nn.Linear):
236
+ if self.weight_init_style == "jax":
237
+ # Based on MAE and official Jax ViT implementation
238
+ torch.nn.init.xavier_uniform_(m.weight)
239
+ elif self.weight_init_style == "pytorch":
240
+ # PyTorch ViT uses trunc_normal_
241
+ trunc_normal_(m.weight, std=0.02)
242
+
243
+ if m.bias is not None:
244
+ nn.init.constant_(m.bias, 0)
245
+ elif isinstance(m, (nn.LayerNorm)):
246
+ nn.init.constant_(m.bias, 0)
247
+ nn.init.constant_(m.weight, 1.0)
248
+
249
+ def forward(
250
+ self,
251
+ tokens: torch.Tensor,
252
+ attn_mask: torch.Tensor = None,
253
+ use_checkpoint: bool = False,
254
+ checkpoint_every_n: int = 1,
255
+ checkpoint_blk_ids: List[int] = None,
256
+ ):
257
+ """
258
+ Inputs
259
+ - tokens: data of shape N x L x D (or L x N x D depending on the attention implementation)
260
+ - attn: mask of shape L x L
261
+
262
+ Output
263
+ - x: data of shape N x L x D (or L x N x D depending on the attention implementation)
264
+ """
265
+ if self.pre_transformer_layer:
266
+ tokens = self.pre_transformer_layer(tokens)
267
+ if use_checkpoint and checkpoint_blk_ids is None:
268
+ checkpoint_blk_ids = [
269
+ blk_id
270
+ for blk_id in range(len(self.blocks))
271
+ if blk_id % checkpoint_every_n == 0
272
+ ]
273
+ if checkpoint_blk_ids:
274
+ checkpoint_blk_ids = set(checkpoint_blk_ids)
275
+ for blk_id, blk in enumerate(self.blocks):
276
+ if use_checkpoint and blk_id in checkpoint_blk_ids:
277
+ tokens = checkpoint.checkpoint(
278
+ blk, tokens, attn_mask, use_reentrant=False
279
+ )
280
+ else:
281
+ tokens = blk(tokens, attn_mask=attn_mask)
282
+ if self.post_transformer_layer:
283
+ tokens = self.post_transformer_layer(tokens)
284
+ return tokens
requirements.txt ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ --extra-index-url https://download.pytorch.org/whl/cu113
2
+ torch==1.13
3
+ torchvision==0.14.0
4
+ torchaudio==0.13.0
5
+ pytorchvideo @ git+https://github.com/facebookresearch/pytorchvideo.git@28fe037d212663c6a24f373b94cc5d478c8c1a1d
6
+ timm==0.6.7
7
+ ftfy
8
+ regex
9
+ einops
10
+ fvcore
11
+ decord==0.6.0