Spaces:
Running
Running
linbin
commited on
Commit
•
8373c11
1
Parent(s):
989983f
Upload 323 files
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- DATASETS.md +1 -0
- DATASET_LICENSE +400 -0
- LICENSE +21 -0
- README.md +240 -88
- TRAIN_AND_VALIDATE.md +207 -0
- a_cls/__pycache__/precision.cpython-38.pyc +0 -0
- a_cls/__pycache__/stats.cpython-38.pyc +0 -0
- a_cls/__pycache__/zero_shot.cpython-38.pyc +0 -0
- a_cls/__pycache__/zero_shot_classifier.cpython-38.pyc +0 -0
- a_cls/__pycache__/zero_shot_metadata.cpython-38.pyc +0 -0
- a_cls/__pycache__/zeroshot_cls.cpython-38.pyc +0 -0
- app.py +99 -224
- assets/audio/0.wav +0 -0
- assets/audio/1.wav +0 -0
- assets/demo.png +0 -0
- assets/depth/0.png +0 -0
- assets/depth/1.png +0 -0
- assets/iclr_dataset_sample.jpg +0 -0
- assets/image/0.jpg +0 -0
- assets/image/1.jpg +0 -0
- assets/res2.jpg +0 -0
- assets/thermal/0.jpg +0 -0
- assets/thermal/1.jpg +0 -0
- assets/video/0.mp4 +0 -0
- assets/video/1.mp4 +0 -0
- d_cls/__pycache__/precision.cpython-38.pyc +0 -0
- d_cls/__pycache__/zero_shot.cpython-38.pyc +0 -0
- d_cls/__pycache__/zero_shot_classifier.cpython-38.pyc +0 -0
- d_cls/__pycache__/zero_shot_metadata.cpython-38.pyc +0 -0
- d_cls/__pycache__/zeroshot_cls.cpython-38.pyc +0 -0
- data/__pycache__/base_datasets.cpython-38.pyc +0 -0
- data/__pycache__/build_datasets.cpython-38.pyc +0 -0
- data/__pycache__/new_loadvat.cpython-38.pyc +0 -0
- data/__pycache__/process_audio.cpython-38.pyc +0 -0
- data/__pycache__/process_depth.cpython-38.pyc +0 -0
- data/__pycache__/process_image.cpython-38.pyc +0 -0
- data/__pycache__/process_text.cpython-38.pyc +0 -0
- data/__pycache__/process_thermal.cpython-38.pyc +0 -0
- data/__pycache__/process_video.cpython-38.pyc +0 -0
- data/process_audio.py +4 -4
- i_cls/__pycache__/precision.cpython-38.pyc +0 -0
- i_cls/__pycache__/zero_shot.cpython-38.pyc +0 -0
- i_cls/__pycache__/zeroshot_cls.cpython-38.pyc +0 -0
- inference.py +48 -0
- languagebind/__init__.py +89 -0
- languagebind/__pycache__/__init__.cpython-38.pyc +0 -0
- languagebind/audio/__pycache__/configuration_audio.cpython-38.pyc +0 -0
- languagebind/audio/__pycache__/modeling_audio.cpython-38.pyc +0 -0
- languagebind/audio/__pycache__/processing_audio.cpython-38.pyc +0 -0
- languagebind/audio/__pycache__/tokenization_audio.cpython-38.pyc +0 -0
DATASETS.md
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
Release the dataset after publication...
|
DATASET_LICENSE
ADDED
@@ -0,0 +1,400 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
Attribution-NonCommercial 4.0 International
|
3 |
+
|
4 |
+
=======================================================================
|
5 |
+
|
6 |
+
Creative Commons Corporation ("Creative Commons") is not a law firm and
|
7 |
+
does not provide legal services or legal advice. Distribution of
|
8 |
+
Creative Commons public licenses does not create a lawyer-client or
|
9 |
+
other relationship. Creative Commons makes its licenses and related
|
10 |
+
information available on an "as-is" basis. Creative Commons gives no
|
11 |
+
warranties regarding its licenses, any material licensed under their
|
12 |
+
terms and conditions, or any related information. Creative Commons
|
13 |
+
disclaims all liability for damages resulting from their use to the
|
14 |
+
fullest extent possible.
|
15 |
+
|
16 |
+
Using Creative Commons Public Licenses
|
17 |
+
|
18 |
+
Creative Commons public licenses provide a standard set of terms and
|
19 |
+
conditions that creators and other rights holders may use to share
|
20 |
+
original works of authorship and other material subject to copyright
|
21 |
+
and certain other rights specified in the public license below. The
|
22 |
+
following considerations are for informational purposes only, are not
|
23 |
+
exhaustive, and do not form part of our licenses.
|
24 |
+
|
25 |
+
Considerations for licensors: Our public licenses are
|
26 |
+
intended for use by those authorized to give the public
|
27 |
+
permission to use material in ways otherwise restricted by
|
28 |
+
copyright and certain other rights. Our licenses are
|
29 |
+
irrevocable. Licensors should read and understand the terms
|
30 |
+
and conditions of the license they choose before applying it.
|
31 |
+
Licensors should also secure all rights necessary before
|
32 |
+
applying our licenses so that the public can reuse the
|
33 |
+
material as expected. Licensors should clearly mark any
|
34 |
+
material not subject to the license. This includes other CC-
|
35 |
+
licensed material, or material used under an exception or
|
36 |
+
limitation to copyright. More considerations for licensors:
|
37 |
+
wiki.creativecommons.org/Considerations_for_licensors
|
38 |
+
|
39 |
+
Considerations for the public: By using one of our public
|
40 |
+
licenses, a licensor grants the public permission to use the
|
41 |
+
licensed material under specified terms and conditions. If
|
42 |
+
the licensor's permission is not necessary for any reason--for
|
43 |
+
example, because of any applicable exception or limitation to
|
44 |
+
copyright--then that use is not regulated by the license. Our
|
45 |
+
licenses grant only permissions under copyright and certain
|
46 |
+
other rights that a licensor has authority to grant. Use of
|
47 |
+
the licensed material may still be restricted for other
|
48 |
+
reasons, including because others have copyright or other
|
49 |
+
rights in the material. A licensor may make special requests,
|
50 |
+
such as asking that all changes be marked or described.
|
51 |
+
Although not required by our licenses, you are encouraged to
|
52 |
+
respect those requests where reasonable. More_considerations
|
53 |
+
for the public:
|
54 |
+
wiki.creativecommons.org/Considerations_for_licensees
|
55 |
+
|
56 |
+
=======================================================================
|
57 |
+
|
58 |
+
Creative Commons Attribution-NonCommercial 4.0 International Public
|
59 |
+
License
|
60 |
+
|
61 |
+
By exercising the Licensed Rights (defined below), You accept and agree
|
62 |
+
to be bound by the terms and conditions of this Creative Commons
|
63 |
+
Attribution-NonCommercial 4.0 International Public License ("Public
|
64 |
+
License"). To the extent this Public License may be interpreted as a
|
65 |
+
contract, You are granted the Licensed Rights in consideration of Your
|
66 |
+
acceptance of these terms and conditions, and the Licensor grants You
|
67 |
+
such rights in consideration of benefits the Licensor receives from
|
68 |
+
making the Licensed Material available under these terms and
|
69 |
+
conditions.
|
70 |
+
|
71 |
+
Section 1 -- Definitions.
|
72 |
+
|
73 |
+
a. Adapted Material means material subject to Copyright and Similar
|
74 |
+
Rights that is derived from or based upon the Licensed Material
|
75 |
+
and in which the Licensed Material is translated, altered,
|
76 |
+
arranged, transformed, or otherwise modified in a manner requiring
|
77 |
+
permission under the Copyright and Similar Rights held by the
|
78 |
+
Licensor. For purposes of this Public License, where the Licensed
|
79 |
+
Material is a musical work, performance, or sound recording,
|
80 |
+
Adapted Material is always produced where the Licensed Material is
|
81 |
+
synched in timed relation with a moving image.
|
82 |
+
|
83 |
+
b. Adapter's License means the license You apply to Your Copyright
|
84 |
+
and Similar Rights in Your contributions to Adapted Material in
|
85 |
+
accordance with the terms and conditions of this Public License.
|
86 |
+
|
87 |
+
c. Copyright and Similar Rights means copyright and/or similar rights
|
88 |
+
closely related to copyright including, without limitation,
|
89 |
+
performance, broadcast, sound recording, and Sui Generis Database
|
90 |
+
Rights, without regard to how the rights are labeled or
|
91 |
+
categorized. For purposes of this Public License, the rights
|
92 |
+
specified in Section 2(b)(1)-(2) are not Copyright and Similar
|
93 |
+
Rights.
|
94 |
+
d. Effective Technological Measures means those measures that, in the
|
95 |
+
absence of proper authority, may not be circumvented under laws
|
96 |
+
fulfilling obligations under Article 11 of the WIPO Copyright
|
97 |
+
Treaty adopted on December 20, 1996, and/or similar international
|
98 |
+
agreements.
|
99 |
+
|
100 |
+
e. Exceptions and Limitations means fair use, fair dealing, and/or
|
101 |
+
any other exception or limitation to Copyright and Similar Rights
|
102 |
+
that applies to Your use of the Licensed Material.
|
103 |
+
|
104 |
+
f. Licensed Material means the artistic or literary work, database,
|
105 |
+
or other material to which the Licensor applied this Public
|
106 |
+
License.
|
107 |
+
|
108 |
+
g. Licensed Rights means the rights granted to You subject to the
|
109 |
+
terms and conditions of this Public License, which are limited to
|
110 |
+
all Copyright and Similar Rights that apply to Your use of the
|
111 |
+
Licensed Material and that the Licensor has authority to license.
|
112 |
+
|
113 |
+
h. Licensor means the individual(s) or entity(ies) granting rights
|
114 |
+
under this Public License.
|
115 |
+
|
116 |
+
i. NonCommercial means not primarily intended for or directed towards
|
117 |
+
commercial advantage or monetary compensation. For purposes of
|
118 |
+
this Public License, the exchange of the Licensed Material for
|
119 |
+
other material subject to Copyright and Similar Rights by digital
|
120 |
+
file-sharing or similar means is NonCommercial provided there is
|
121 |
+
no payment of monetary compensation in connection with the
|
122 |
+
exchange.
|
123 |
+
|
124 |
+
j. Share means to provide material to the public by any means or
|
125 |
+
process that requires permission under the Licensed Rights, such
|
126 |
+
as reproduction, public display, public performance, distribution,
|
127 |
+
dissemination, communication, or importation, and to make material
|
128 |
+
available to the public including in ways that members of the
|
129 |
+
public may access the material from a place and at a time
|
130 |
+
individually chosen by them.
|
131 |
+
|
132 |
+
k. Sui Generis Database Rights means rights other than copyright
|
133 |
+
resulting from Directive 96/9/EC of the European Parliament and of
|
134 |
+
the Council of 11 March 1996 on the legal protection of databases,
|
135 |
+
as amended and/or succeeded, as well as other essentially
|
136 |
+
equivalent rights anywhere in the world.
|
137 |
+
|
138 |
+
l. You means the individual or entity exercising the Licensed Rights
|
139 |
+
under this Public License. Your has a corresponding meaning.
|
140 |
+
|
141 |
+
Section 2 -- Scope.
|
142 |
+
|
143 |
+
a. License grant.
|
144 |
+
|
145 |
+
1. Subject to the terms and conditions of this Public License,
|
146 |
+
the Licensor hereby grants You a worldwide, royalty-free,
|
147 |
+
non-sublicensable, non-exclusive, irrevocable license to
|
148 |
+
exercise the Licensed Rights in the Licensed Material to:
|
149 |
+
|
150 |
+
a. reproduce and Share the Licensed Material, in whole or
|
151 |
+
in part, for NonCommercial purposes only; and
|
152 |
+
|
153 |
+
b. produce, reproduce, and Share Adapted Material for
|
154 |
+
NonCommercial purposes only.
|
155 |
+
|
156 |
+
2. Exceptions and Limitations. For the avoidance of doubt, where
|
157 |
+
Exceptions and Limitations apply to Your use, this Public
|
158 |
+
License does not apply, and You do not need to comply with
|
159 |
+
its terms and conditions.
|
160 |
+
|
161 |
+
3. Term. The term of this Public License is specified in Section
|
162 |
+
6(a).
|
163 |
+
|
164 |
+
4. Media and formats; technical modifications allowed. The
|
165 |
+
Licensor authorizes You to exercise the Licensed Rights in
|
166 |
+
all media and formats whether now known or hereafter created,
|
167 |
+
and to make technical modifications necessary to do so. The
|
168 |
+
Licensor waives and/or agrees not to assert any right or
|
169 |
+
authority to forbid You from making technical modifications
|
170 |
+
necessary to exercise the Licensed Rights, including
|
171 |
+
technical modifications necessary to circumvent Effective
|
172 |
+
Technological Measures. For purposes of this Public License,
|
173 |
+
simply making modifications authorized by this Section 2(a)
|
174 |
+
(4) never produces Adapted Material.
|
175 |
+
|
176 |
+
5. Downstream recipients.
|
177 |
+
|
178 |
+
a. Offer from the Licensor -- Licensed Material. Every
|
179 |
+
recipient of the Licensed Material automatically
|
180 |
+
receives an offer from the Licensor to exercise the
|
181 |
+
Licensed Rights under the terms and conditions of this
|
182 |
+
Public License.
|
183 |
+
|
184 |
+
b. No downstream restrictions. You may not offer or impose
|
185 |
+
any additional or different terms or conditions on, or
|
186 |
+
apply any Effective Technological Measures to, the
|
187 |
+
Licensed Material if doing so restricts exercise of the
|
188 |
+
Licensed Rights by any recipient of the Licensed
|
189 |
+
Material.
|
190 |
+
|
191 |
+
6. No endorsement. Nothing in this Public License constitutes or
|
192 |
+
may be construed as permission to assert or imply that You
|
193 |
+
are, or that Your use of the Licensed Material is, connected
|
194 |
+
with, or sponsored, endorsed, or granted official status by,
|
195 |
+
the Licensor or others designated to receive attribution as
|
196 |
+
provided in Section 3(a)(1)(A)(i).
|
197 |
+
|
198 |
+
b. Other rights.
|
199 |
+
|
200 |
+
1. Moral rights, such as the right of integrity, are not
|
201 |
+
licensed under this Public License, nor are publicity,
|
202 |
+
privacy, and/or other similar personality rights; however, to
|
203 |
+
the extent possible, the Licensor waives and/or agrees not to
|
204 |
+
assert any such rights held by the Licensor to the limited
|
205 |
+
extent necessary to allow You to exercise the Licensed
|
206 |
+
Rights, but not otherwise.
|
207 |
+
|
208 |
+
2. Patent and trademark rights are not licensed under this
|
209 |
+
Public License.
|
210 |
+
|
211 |
+
3. To the extent possible, the Licensor waives any right to
|
212 |
+
collect royalties from You for the exercise of the Licensed
|
213 |
+
Rights, whether directly or through a collecting society
|
214 |
+
under any voluntary or waivable statutory or compulsory
|
215 |
+
licensing scheme. In all other cases the Licensor expressly
|
216 |
+
reserves any right to collect such royalties, including when
|
217 |
+
the Licensed Material is used other than for NonCommercial
|
218 |
+
purposes.
|
219 |
+
|
220 |
+
Section 3 -- License Conditions.
|
221 |
+
|
222 |
+
Your exercise of the Licensed Rights is expressly made subject to the
|
223 |
+
following conditions.
|
224 |
+
|
225 |
+
a. Attribution.
|
226 |
+
|
227 |
+
1. If You Share the Licensed Material (including in modified
|
228 |
+
form), You must:
|
229 |
+
|
230 |
+
a. retain the following if it is supplied by the Licensor
|
231 |
+
with the Licensed Material:
|
232 |
+
|
233 |
+
i. identification of the creator(s) of the Licensed
|
234 |
+
Material and any others designated to receive
|
235 |
+
attribution, in any reasonable manner requested by
|
236 |
+
the Licensor (including by pseudonym if
|
237 |
+
designated);
|
238 |
+
|
239 |
+
ii. a copyright notice;
|
240 |
+
|
241 |
+
iii. a notice that refers to this Public License;
|
242 |
+
|
243 |
+
iv. a notice that refers to the disclaimer of
|
244 |
+
warranties;
|
245 |
+
|
246 |
+
v. a URI or hyperlink to the Licensed Material to the
|
247 |
+
extent reasonably practicable;
|
248 |
+
|
249 |
+
b. indicate if You modified the Licensed Material and
|
250 |
+
retain an indication of any previous modifications; and
|
251 |
+
|
252 |
+
c. indicate the Licensed Material is licensed under this
|
253 |
+
Public License, and include the text of, or the URI or
|
254 |
+
hyperlink to, this Public License.
|
255 |
+
|
256 |
+
2. You may satisfy the conditions in Section 3(a)(1) in any
|
257 |
+
reasonable manner based on the medium, means, and context in
|
258 |
+
which You Share the Licensed Material. For example, it may be
|
259 |
+
reasonable to satisfy the conditions by providing a URI or
|
260 |
+
hyperlink to a resource that includes the required
|
261 |
+
information.
|
262 |
+
|
263 |
+
3. If requested by the Licensor, You must remove any of the
|
264 |
+
information required by Section 3(a)(1)(A) to the extent
|
265 |
+
reasonably practicable.
|
266 |
+
|
267 |
+
4. If You Share Adapted Material You produce, the Adapter's
|
268 |
+
License You apply must not prevent recipients of the Adapted
|
269 |
+
Material from complying with this Public License.
|
270 |
+
|
271 |
+
Section 4 -- Sui Generis Database Rights.
|
272 |
+
|
273 |
+
Where the Licensed Rights include Sui Generis Database Rights that
|
274 |
+
apply to Your use of the Licensed Material:
|
275 |
+
|
276 |
+
a. for the avoidance of doubt, Section 2(a)(1) grants You the right
|
277 |
+
to extract, reuse, reproduce, and Share all or a substantial
|
278 |
+
portion of the contents of the database for NonCommercial purposes
|
279 |
+
only;
|
280 |
+
|
281 |
+
b. if You include all or a substantial portion of the database
|
282 |
+
contents in a database in which You have Sui Generis Database
|
283 |
+
Rights, then the database in which You have Sui Generis Database
|
284 |
+
Rights (but not its individual contents) is Adapted Material; and
|
285 |
+
|
286 |
+
c. You must comply with the conditions in Section 3(a) if You Share
|
287 |
+
all or a substantial portion of the contents of the database.
|
288 |
+
|
289 |
+
For the avoidance of doubt, this Section 4 supplements and does not
|
290 |
+
replace Your obligations under this Public License where the Licensed
|
291 |
+
Rights include other Copyright and Similar Rights.
|
292 |
+
|
293 |
+
Section 5 -- Disclaimer of Warranties and Limitation of Liability.
|
294 |
+
|
295 |
+
a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE
|
296 |
+
EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS
|
297 |
+
AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF
|
298 |
+
ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS,
|
299 |
+
IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION,
|
300 |
+
WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR
|
301 |
+
PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS,
|
302 |
+
ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT
|
303 |
+
KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT
|
304 |
+
ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU.
|
305 |
+
|
306 |
+
b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE
|
307 |
+
TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION,
|
308 |
+
NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT,
|
309 |
+
INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES,
|
310 |
+
COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR
|
311 |
+
USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN
|
312 |
+
ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR
|
313 |
+
DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR
|
314 |
+
IN PART, THIS LIMITATION MAY NOT APPLY TO YOU.
|
315 |
+
|
316 |
+
c. The disclaimer of warranties and limitation of liability provided
|
317 |
+
above shall be interpreted in a manner that, to the extent
|
318 |
+
possible, most closely approximates an absolute disclaimer and
|
319 |
+
waiver of all liability.
|
320 |
+
|
321 |
+
Section 6 -- Term and Termination.
|
322 |
+
|
323 |
+
a. This Public License applies for the term of the Copyright and
|
324 |
+
Similar Rights licensed here. However, if You fail to comply with
|
325 |
+
this Public License, then Your rights under this Public License
|
326 |
+
terminate automatically.
|
327 |
+
|
328 |
+
b. Where Your right to use the Licensed Material has terminated under
|
329 |
+
Section 6(a), it reinstates:
|
330 |
+
|
331 |
+
1. automatically as of the date the violation is cured, provided
|
332 |
+
it is cured within 30 days of Your discovery of the
|
333 |
+
violation; or
|
334 |
+
|
335 |
+
2. upon express reinstatement by the Licensor.
|
336 |
+
|
337 |
+
For the avoidance of doubt, this Section 6(b) does not affect any
|
338 |
+
right the Licensor may have to seek remedies for Your violations
|
339 |
+
of this Public License.
|
340 |
+
|
341 |
+
c. For the avoidance of doubt, the Licensor may also offer the
|
342 |
+
Licensed Material under separate terms or conditions or stop
|
343 |
+
distributing the Licensed Material at any time; however, doing so
|
344 |
+
will not terminate this Public License.
|
345 |
+
|
346 |
+
d. Sections 1, 5, 6, 7, and 8 survive termination of this Public
|
347 |
+
License.
|
348 |
+
|
349 |
+
Section 7 -- Other Terms and Conditions.
|
350 |
+
|
351 |
+
a. The Licensor shall not be bound by any additional or different
|
352 |
+
terms or conditions communicated by You unless expressly agreed.
|
353 |
+
|
354 |
+
b. Any arrangements, understandings, or agreements regarding the
|
355 |
+
Licensed Material not stated herein are separate from and
|
356 |
+
independent of the terms and conditions of this Public License.
|
357 |
+
|
358 |
+
Section 8 -- Interpretation.
|
359 |
+
|
360 |
+
a. For the avoidance of doubt, this Public License does not, and
|
361 |
+
shall not be interpreted to, reduce, limit, restrict, or impose
|
362 |
+
conditions on any use of the Licensed Material that could lawfully
|
363 |
+
be made without permission under this Public License.
|
364 |
+
|
365 |
+
b. To the extent possible, if any provision of this Public License is
|
366 |
+
deemed unenforceable, it shall be automatically reformed to the
|
367 |
+
minimum extent necessary to make it enforceable. If the provision
|
368 |
+
cannot be reformed, it shall be severed from this Public License
|
369 |
+
without affecting the enforceability of the remaining terms and
|
370 |
+
conditions.
|
371 |
+
|
372 |
+
c. No term or condition of this Public License will be waived and no
|
373 |
+
failure to comply consented to unless expressly agreed to by the
|
374 |
+
Licensor.
|
375 |
+
|
376 |
+
d. Nothing in this Public License constitutes or may be interpreted
|
377 |
+
as a limitation upon, or waiver of, any privileges and immunities
|
378 |
+
that apply to the Licensor or You, including from the legal
|
379 |
+
processes of any jurisdiction or authority.
|
380 |
+
|
381 |
+
=======================================================================
|
382 |
+
|
383 |
+
Creative Commons is not a party to its public
|
384 |
+
licenses. Notwithstanding, Creative Commons may elect to apply one of
|
385 |
+
its public licenses to material it publishes and in those instances
|
386 |
+
will be considered the “Licensor.” The text of the Creative Commons
|
387 |
+
public licenses is dedicated to the public domain under the CC0 Public
|
388 |
+
Domain Dedication. Except for the limited purpose of indicating that
|
389 |
+
material is shared under a Creative Commons public license or as
|
390 |
+
otherwise permitted by the Creative Commons policies published at
|
391 |
+
creativecommons.org/policies, Creative Commons does not authorize the
|
392 |
+
use of the trademark "Creative Commons" or any other trademark or logo
|
393 |
+
of Creative Commons without its prior written consent including,
|
394 |
+
without limitation, in connection with any unauthorized modifications
|
395 |
+
to any of its public licenses or any other arrangements,
|
396 |
+
understandings, or agreements concerning use of licensed material. For
|
397 |
+
the avoidance of doubt, this paragraph does not form part of the
|
398 |
+
public licenses.
|
399 |
+
|
400 |
+
Creative Commons may be contacted at creativecommons.org.
|
LICENSE
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
MIT License
|
2 |
+
|
3 |
+
Copyright (c) 2023 PKU-YUAN's Group (袁粒课题组-北大信工)
|
4 |
+
|
5 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
6 |
+
of this software and associated documentation files (the "Software"), to deal
|
7 |
+
in the Software without restriction, including without limitation the rights
|
8 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
9 |
+
copies of the Software, and to permit persons to whom the Software is
|
10 |
+
furnished to do so, subject to the following conditions:
|
11 |
+
|
12 |
+
The above copyright notice and this permission notice shall be included in all
|
13 |
+
copies or substantial portions of the Software.
|
14 |
+
|
15 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
16 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
17 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
18 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
19 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
20 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
21 |
+
SOFTWARE.
|
README.md
CHANGED
@@ -1,95 +1,79 @@
|
|
1 |
-
|
2 |
-
title: LanguageBind
|
3 |
-
emoji: ⚡
|
4 |
-
colorFrom: pink
|
5 |
-
colorTo: blue
|
6 |
-
sdk: gradio
|
7 |
-
pinned: false
|
8 |
-
---
|
9 |
|
10 |
|
11 |
<p align="center">
|
12 |
<img src="assets/logo.png" width="250" />
|
13 |
<p>
|
14 |
-
<h2 align="center"> LanguageBind: Extending Video-Language Pretraining to N-modality by Language-based Semantic Alignment
|
15 |
-
|
16 |
-
<h5 align="center"> If you like our project, please give us a star ✨ on Github for latest update. </h2>
|
17 |
|
18 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
19 |
|
20 |
-
|
21 |
|
22 |
-
|
23 |
-
|
|
|
24 |
|
25 |
-
|
|
|
|
|
26 |
|
27 |
-
|
|
|
28 |
|
29 |
<p align="center">
|
30 |
<img src="assets/languagebind.jpg" width=100%>
|
31 |
</p>
|
32 |
-
|
33 |
-
<br>
|
34 |
-
|
35 |
-
|
36 |
-
# News
|
37 |
-
* **2023.10.02:** Released the code. Training & validating scripts and checkpoints.
|
38 |
-
<br></br>
|
39 |
-
# Online Demo
|
40 |
-
Coming soon...
|
41 |
-
|
42 |
-
# Models and Results
|
43 |
-
## Model Zoo
|
44 |
-
We list the parameters and pretrained checkpoints of LanguageBind below. Note that LanguageBind can be disassembled into different branches to handle different tasks.
|
45 |
-
The cache comes from OpenCLIP, which we downloaded from HuggingFace. Note that the original cache for pretrained weights is the Image-Language weights, just a few more HF profiles.
|
46 |
-
We additionally trained Video-Language with the LanguageBind method, which is stronger than on CLIP4Clip framework.
|
47 |
-
<table border="1" width="100%">
|
48 |
-
<tr align="center">
|
49 |
-
<th>Model</th><th>Ckpt</th><th>Params</th><th>Modality Hidden size</th><th>Modality Layers</th><th>Language Hidden size</th><th>Language Layers</th>
|
50 |
-
</tr>
|
51 |
-
<tr align="center">
|
52 |
-
<td>Video-Language</td><td>TODO</td><td>330M</td><td>1024</td><td>24</td><td>768</td><td>12</td>
|
53 |
-
</tr>
|
54 |
-
</tr>
|
55 |
-
<tr align="center">
|
56 |
-
<td>Audio-Language</td><td><a href="https://pan.baidu.com/s/1PFN8aGlnzsOkGjVk6Mzlfg?pwd=sisz">BaiDu</a></td><td>330M</td><td>1024</td><td>24</td><td>768</td><td>12</td>
|
57 |
-
</tr>
|
58 |
-
</tr>
|
59 |
-
<tr align="center">
|
60 |
-
<td>Depth-Language</td><td><a href="https://pan.baidu.com/s/1YWlaxqTRhpGvXqCyBbmhyg?pwd=olom">BaiDu</a></td><td>330M</td><td>1024</td><td>24</td><td>768</td><td>12</td>
|
61 |
-
</tr>
|
62 |
-
</tr>
|
63 |
-
<tr align="center">
|
64 |
-
<td>Thermal(Infrared)-Language</td><td><a href="https://pan.baidu.com/s/1luUyyKxhadKKc1nk1wizWg?pwd=raf5">BaiDu</a></td><td>330M</td><td>1024</td><td>24</td><td>768</td><td>12</td>
|
65 |
-
</tr>
|
66 |
-
</tr>
|
67 |
-
<tr align="center">
|
68 |
-
<td>Image-Language</td><td><a href="https://pan.baidu.com/s/1VBE4OjecMTeIzU08axfFHA?pwd=7j0m">BaiDu</a></td><td>330M</td><td>1024</td><td>24</td><td>768</td><td>12</td>
|
69 |
-
</tr>
|
70 |
-
</tr>
|
71 |
-
<tr align="center">
|
72 |
-
<td>Cache for pretrained weight</td><td><a href="https://pan.baidu.com/s/1Tytx5MDSo96rwUmQZVY1Ww?pwd=c7r0">BaiDu</a></td><td>330M</td><td>1024</td><td>24</td><td>768</td><td>12</td>
|
73 |
-
</tr>
|
74 |
-
|
75 |
-
</table>
|
76 |
-
<br>
|
77 |
-
|
78 |
-
## Results
|
79 |
-
Zero-shot Video-Text Retrieval Performance on MSR-VTT and MSVD datasets. We focus on reporting the parameters of the vision
|
80 |
-
encoder. Our experiments are based on 3 million video-text pairs of VIDAL-10M, and we train on the CLIP4Clip framework..
|
81 |
<p align="center">
|
82 |
-
<img src="assets/
|
83 |
</p>
|
84 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
85 |
<p align="center">
|
86 |
-
<img src="assets/
|
87 |
</p>
|
88 |
|
89 |
|
90 |
-
<br></br>
|
91 |
|
92 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
93 |
* Python >= 3.8
|
94 |
* Pytorch >= 1.13.0
|
95 |
* CUDA Version >= 10.2 (recommend 11.6)
|
@@ -100,35 +84,203 @@ cd LanguageBind
|
|
100 |
pip install -r requirements.txt
|
101 |
```
|
102 |
|
103 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
104 |
|
105 |
-
|
106 |
-
|
|
|
|
|
107 |
|
108 |
-
|
|
|
|
|
|
|
109 |
|
110 |
-
|
111 |
-
|
112 |
|
113 |
-
|
|
|
|
|
|
|
114 |
|
115 |
-
|
116 |
-
|
|
|
|
|
117 |
|
118 |
-
|
|
|
|
|
|
|
119 |
|
120 |
-
|
121 |
-
|
122 |
|
123 |
-
|
|
|
|
|
|
|
124 |
|
|
|
|
|
|
|
|
|
125 |
|
126 |
-
|
|
|
|
|
|
|
127 |
|
128 |
-
|
|
|
129 |
|
130 |
-
|
|
|
|
|
|
|
131 |
|
132 |
-
|
|
|
|
|
|
|
133 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
134 |
```
|
|
|
1 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
|
3 |
|
4 |
<p align="center">
|
5 |
<img src="assets/logo.png" width="250" />
|
6 |
<p>
|
7 |
+
<h2 align="center"> <a href="https://arxiv.org/pdf/2310.01852.pdf">LanguageBind: Extending Video-Language Pretraining to N-modality by Language-based Semantic Alignment</a></h2>
|
8 |
+
<h5 align="center"> If you like our project, please give us a star ✨ on GitHub for latest update. </h2>
|
|
|
9 |
|
10 |
+
<p align="center">
|
11 |
+
📖 <a href="https://arxiv.org/pdf/2310.01852.pdf">Paper</a>
|
12 |
+
 | 
|
13 |
+
🤗<a href="https://huggingface.co/spaces/lb203/LanguageBind">Demo</a>
|
14 |
+
  |  
|
15 |
+
🤖 <a href="https://github.com/PKU-YuanGroup/LanguageBind#usage">API</a>
|
16 |
+
  |  
|
17 |
+
📄<a href="TRAIN_AND_VALIDATE.md">Instruction</a>
|
18 |
+
 |
|
19 |
+
💥<a href="DATASETS.md">Datasets</a>
|
20 |
+
</p>
|
21 |
|
22 |
+
## 😮 Highlights
|
23 |
|
24 |
+
### 💡 High performance, but NO intermediate modality required
|
25 |
+
LanguageBind is a **language-centric** multimodal pretraining approach, **taking the language as the bind across different modalities** because the language modality is well-explored and contains rich semantics.
|
26 |
+
* The following first figure shows the architecture of LanguageBind. LanguageBind can be easily extended to segmentation, detection tasks, and potentially to unlimited modalities.
|
27 |
|
28 |
+
### ⚡️ A multimodal, fully aligned and voluminous dataset
|
29 |
+
We propose **VIDAL-10M**, **10 Million data** with **V**ideo, **I**nfrared, **D**epth, **A**udio and their corresponding **L**anguage, which greatly expands the data beyond visual modalities.
|
30 |
+
* The second figure shows our proposed VIDAL-10M dataset, which includes five modalities: video, infrared, depth, audio, and language.
|
31 |
|
32 |
+
### 🔥 Multi-view enhanced description for training
|
33 |
+
We make multi-view enhancements to language. We produce multi-view description that combines **meta-data**, **spatial**, and **temporal** to greatly enhance the semantic information of the language. In addition we further **enhance the language with ChatGPT** to create a good semantic space for each modality aligned language.
|
34 |
|
35 |
<p align="center">
|
36 |
<img src="assets/languagebind.jpg" width=100%>
|
37 |
</p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
38 |
<p align="center">
|
39 |
+
<img src="assets/iclr_dataset_sample.jpg" width=99%>
|
40 |
</p>
|
41 |
+
|
42 |
+
|
43 |
+
## 📰 News
|
44 |
+
**[2023.10.10]** 🎉 We updated the weights of audio to exceed ImageBind by 16.2% on the ESC-50 dataset. Sample data can be found in [assets](assets), and [emergency zero-shot usage](#emergency-zero-shot) is described.<br>
|
45 |
+
**[2023.10.07]** The checkpoints are available on 🤗 [Huggingface Model](https://huggingface.co/lb203). <br>
|
46 |
+
**[2023.10.04]** Code and demo are available now! Welcome to **watch** 👀 this repository for the latest updates.
|
47 |
+
|
48 |
+
## 🤗 Demo
|
49 |
+
|
50 |
+
* **Local demo.** Highly recommend trying out our web demo, which incorporates all features currently supported by LanguageBind.
|
51 |
+
```bash
|
52 |
+
python gradio_app.py
|
53 |
+
```
|
54 |
+
|
55 |
+
* **Online demo.** We provide the [online demo](https://huggingface.co/spaces/lb203/LanguageBind) in Huggingface Spaces. In this demo, you can calculate the similarity of modalities to language, such as audio-to-language, video-to-language, and depth-to-image.
|
56 |
<p align="center">
|
57 |
+
<img src="assets/demo.png" width=100%>
|
58 |
</p>
|
59 |
|
60 |
|
|
|
61 |
|
62 |
+
## 🚀 Main Results
|
63 |
+
|
64 |
+
### ✨ Video-Language
|
65 |
+
We focus on reporting the parameters of the vision encoder. Our experiments are based on 3 million video-text pairs of VIDAL-10M, and we train on the CLIP4Clip framework.
|
66 |
+
<p align="center">
|
67 |
+
<img src="assets/res1.jpg" width=80%>
|
68 |
+
</p>
|
69 |
+
|
70 |
+
### ✨ Multiple Modalities
|
71 |
+
Infrared-Language, Depth-Language, and Audio-Language zero-shot classification. We report text-to-audio R@1 for the Clotho dataset and top-1 accuracy for the rest of the datasets.
|
72 |
+
<p align="center">
|
73 |
+
<img src="assets/res2.jpg" width=70%>
|
74 |
+
</p>
|
75 |
+
|
76 |
+
## 🛠️ Requirements and Installation
|
77 |
* Python >= 3.8
|
78 |
* Pytorch >= 1.13.0
|
79 |
* CUDA Version >= 10.2 (recommend 11.6)
|
|
|
84 |
pip install -r requirements.txt
|
85 |
```
|
86 |
|
87 |
+
## 🤖 API
|
88 |
+
**We open source all modalities preprocessing code.** If you want to load the model (e.g. ```lb203/LanguageBind_Thermal```) from the model hub on Huggingface or on local, you can use the following code snippets.
|
89 |
+
|
90 |
+
### Inference for Multi-modal Binding
|
91 |
+
We have provided some sample datasets in [assets](assets) to quickly see how languagebind works.
|
92 |
+
```python
|
93 |
+
import torch
|
94 |
+
from languagebind import LanguageBind, to_device, transform_dict, LanguageBindImageTokenizer
|
95 |
+
|
96 |
+
if __name__ == '__main__':
|
97 |
+
device = 'cuda:0'
|
98 |
+
device = torch.device(device)
|
99 |
+
clip_type = ('thermal', 'image', 'video', 'depth', 'audio')
|
100 |
+
model = LanguageBind(clip_type=clip_type, cache_dir='./cache_dir')
|
101 |
+
model = model.to(device)
|
102 |
+
model.eval()
|
103 |
+
pretrained_ckpt = f'lb203/LanguageBind_Image'
|
104 |
+
tokenizer = LanguageBindImageTokenizer.from_pretrained(pretrained_ckpt, cache_dir='./cache_dir/tokenizer_cache_dir')
|
105 |
+
modality_transform = {c: transform_dict[c](model.modality_config[c]) for c in clip_type}
|
106 |
+
|
107 |
+
image = ['assets/image/0.jpg', 'assets/image/1.jpg']
|
108 |
+
audio = ['assets/audio/0.wav', 'assets/audio/1.wav']
|
109 |
+
video = ['assets/video/0.mp4', 'assets/video/1.mp4']
|
110 |
+
depth = ['assets/depth/0.png', 'assets/depth/1.png']
|
111 |
+
thermal = ['assets/thermal/0.jpg', 'assets/thermal/1.jpg']
|
112 |
+
language = ["Training a parakeet to climb up a ladder.", 'A lion climbing a tree to catch a monkey.']
|
113 |
+
|
114 |
+
inputs = {
|
115 |
+
'image': to_device(modality_transform['image'](image), device),
|
116 |
+
'video': to_device(modality_transform['video'](video), device),
|
117 |
+
'audio': to_device(modality_transform['audio'](audio), device),
|
118 |
+
'depth': to_device(modality_transform['depth'](depth), device),
|
119 |
+
'thermal': to_device(modality_transform['thermal'](thermal), device),
|
120 |
+
}
|
121 |
+
inputs['language'] = to_device(tokenizer(language, max_length=77, padding='max_length',
|
122 |
+
truncation=True, return_tensors='pt'), device)
|
123 |
+
with torch.no_grad():
|
124 |
+
embeddings = model(inputs)
|
125 |
+
print("Video x Text: \n",
|
126 |
+
torch.softmax(embeddings['video'] @ embeddings['language'].T, dim=-1).detach().cpu().numpy())
|
127 |
+
print("Image x Text: \n",
|
128 |
+
torch.softmax(embeddings['image'] @ embeddings['language'].T, dim=-1).detach().cpu().numpy())
|
129 |
+
print("Depth x Text: \n",
|
130 |
+
torch.softmax(embeddings['depth'] @ embeddings['language'].T, dim=-1).detach().cpu().numpy())
|
131 |
+
print("Audio x Text: \n",
|
132 |
+
torch.softmax(embeddings['audio'] @ embeddings['language'].T, dim=-1).detach().cpu().numpy())
|
133 |
+
print("Thermal x Text: \n",
|
134 |
+
torch.softmax(embeddings['thermal'] @ embeddings['language'].T, dim=-1).detach().cpu().numpy())
|
135 |
+
```
|
136 |
+
Then returns the following result.
|
137 |
+
```bash
|
138 |
+
Video x Text:
|
139 |
+
[[9.9999845e-01 1.5308899e-06]
|
140 |
+
[3.6420031e-06 9.9999630e-01]]
|
141 |
+
Image x Text:
|
142 |
+
[[1.0000000e+00 4.0599781e-09]
|
143 |
+
[1.2165208e-08 1.0000000e+00]]
|
144 |
+
Depth x Text:
|
145 |
+
[[9.9952829e-01 4.7178473e-04]
|
146 |
+
[1.6411507e-01 8.3588487e-01]]
|
147 |
+
Audio x Text:
|
148 |
+
[[0.61346906 0.38653097]
|
149 |
+
[0.00996918 0.99003077]]
|
150 |
+
Thermal x Text:
|
151 |
+
[[0.9744922 0.02550781]
|
152 |
+
[0.3656127 0.6343873 ]]
|
153 |
+
```
|
154 |
+
### Emergency zero-shot
|
155 |
+
Since languagebind binds each modality together, we also found the **emergency zero-shot**. It's very simple to use.
|
156 |
+
```python
|
157 |
+
print("Video x Audio: \n", torch.softmax(embeddings['video'] @ embeddings['audio'].T, dim=-1).detach().cpu().numpy())
|
158 |
+
```
|
159 |
+
Then, you will get:
|
160 |
+
```
|
161 |
+
Video x Audio:
|
162 |
+
[[1.0000000e+00 0.0000000e+00]
|
163 |
+
[7.2774713e-22 1.0000000e+00]]
|
164 |
+
```
|
165 |
+
|
166 |
+
### Different branches for X-Language task
|
167 |
+
**Additionally, LanguageBind can be disassembled into different branches to handle different tasks.**
|
168 |
+
#### Thermal
|
169 |
+
```python
|
170 |
+
import torch
|
171 |
+
from languagebind import LanguageBindThermal, LanguageBindThermalTokenizer, LanguageBindThermalProcessor
|
172 |
+
|
173 |
+
pretrained_ckpt = 'lb203/LanguageBind_Thermal'
|
174 |
+
model = LanguageBindThermal.from_pretrained(pretrained_ckpt, cache_dir='./languagebind/cache_dir')
|
175 |
+
tokenizer = LanguageBindThermalTokenizer.from_pretrained(pretrained_ckpt, cache_dir='./languagebind/cache_dir')
|
176 |
+
thermal_process = LanguageBindThermalProcessor(model.config, tokenizer)
|
177 |
+
|
178 |
+
model.eval()
|
179 |
+
data = thermal_process([r"your/thermal.jpg"], ['your text'], return_tensors='pt')
|
180 |
+
with torch.no_grad():
|
181 |
+
out = model(**data)
|
182 |
+
|
183 |
+
print(out.text_embeds @ out.image_embeds.T)
|
184 |
+
```
|
185 |
+
|
186 |
+
#### Depth
|
187 |
+
```python
|
188 |
+
import torch
|
189 |
+
from languagebind import LanguageBindDepth, LanguageBindDepthTokenizer, LanguageBindDepthProcessor
|
190 |
|
191 |
+
pretrained_ckpt = 'lb203/LanguageBind_Depth'
|
192 |
+
model = LanguageBindDepth.from_pretrained(pretrained_ckpt, cache_dir='./languagebind/cache_dir')
|
193 |
+
tokenizer = LanguageBindDepthTokenizer.from_pretrained(pretrained_ckpt, cache_dir='./languagebind/cache_dir')
|
194 |
+
depth_process = LanguageBindDepthProcessor(model.config, tokenizer)
|
195 |
|
196 |
+
model.eval()
|
197 |
+
data = depth_process([r"your/depth.png"], ['your text.'], return_tensors='pt')
|
198 |
+
with torch.no_grad():
|
199 |
+
out = model(**data)
|
200 |
|
201 |
+
print(out.text_embeds @ out.image_embeds.T)
|
202 |
+
```
|
203 |
|
204 |
+
#### Video
|
205 |
+
```python
|
206 |
+
import torch
|
207 |
+
from languagebind import LanguageBindVideo, LanguageBindVideoTokenizer, LanguageBindVideoProcessor
|
208 |
|
209 |
+
pretrained_ckpt = 'lb203/LanguageBind_Video'
|
210 |
+
model = LanguageBindVideo.from_pretrained(pretrained_ckpt, cache_dir='./languagebind/cache_dir')
|
211 |
+
tokenizer = LanguageBindVideoTokenizer.from_pretrained(pretrained_ckpt, cache_dir='./languagebind/cache_dir')
|
212 |
+
video_process = LanguageBindVideoProcessor(model.config, tokenizer)
|
213 |
|
214 |
+
model.eval()
|
215 |
+
data = video_process(["your/video.mp4"], ['your text.'], return_tensors='pt')
|
216 |
+
with torch.no_grad():
|
217 |
+
out = model(**data)
|
218 |
|
219 |
+
print(out.text_embeds @ out.image_embeds.T)
|
220 |
+
```
|
221 |
|
222 |
+
#### Audio
|
223 |
+
```python
|
224 |
+
import torch
|
225 |
+
from languagebind import LanguageBindAudio, LanguageBindAudioTokenizer, LanguageBindAudioProcessor
|
226 |
|
227 |
+
pretrained_ckpt = 'lb203/LanguageBind_Audio'
|
228 |
+
model = LanguageBindAudio.from_pretrained(pretrained_ckpt, cache_dir='./languagebind/cache_dir')
|
229 |
+
tokenizer = LanguageBindAudioTokenizer.from_pretrained(pretrained_ckpt, cache_dir='./languagebind/cache_dir')
|
230 |
+
audio_process = LanguageBindAudioProcessor(model.config, tokenizer)
|
231 |
|
232 |
+
model.eval()
|
233 |
+
data = audio_process([r"your/audio.wav"], ['your audio.'], return_tensors='pt')
|
234 |
+
with torch.no_grad():
|
235 |
+
out = model(**data)
|
236 |
|
237 |
+
print(out.text_embeds @ out.image_embeds.T)
|
238 |
+
```
|
239 |
|
240 |
+
#### Image
|
241 |
+
```python
|
242 |
+
import torch
|
243 |
+
from languagebind import LanguageBindImage, LanguageBindImageTokenizer, LanguageBindImageProcessor
|
244 |
|
245 |
+
pretrained_ckpt = 'lb203/LanguageBind_Image'
|
246 |
+
model = LanguageBindImage.from_pretrained(pretrained_ckpt, cache_dir='./languagebind/cache_dir')
|
247 |
+
tokenizer = LanguageBindImageTokenizer.from_pretrained(pretrained_ckpt, cache_dir='./languagebind/cache_dir')
|
248 |
+
image_process = LanguageBindImageProcessor(model.config, tokenizer)
|
249 |
|
250 |
+
model.eval()
|
251 |
+
data = image_process([r"your/image.jpg"], ['your text.'], return_tensors='pt')
|
252 |
+
with torch.no_grad():
|
253 |
+
out = model(**data)
|
254 |
+
|
255 |
+
print(out.text_embeds @ out.image_embeds.T)
|
256 |
+
```
|
257 |
+
|
258 |
+
## 💥 VIDAL-10M
|
259 |
+
The datasets is in [DATASETS.md](DATASETS.md).
|
260 |
+
|
261 |
+
## 🗝️ Training & Validating
|
262 |
+
The training & validating instruction is in [TRAIN_AND_VALIDATE.md](TRAIN_AND_VALIDATE.md).
|
263 |
+
|
264 |
+
## 👍 Acknowledgement
|
265 |
+
* [OpenCLIP](https://github.com/mlfoundations/open_clip) An open source pretraining framework.
|
266 |
+
* [CLIP4Clip](https://github.com/ArrowLuo/CLIP4Clip) An open source Video-Text retrieval framework.
|
267 |
+
* [sRGB-TIR](https://github.com/rpmsnu/sRGB-TIR) An open source framework to generate infrared (thermal) images.
|
268 |
+
* [GLPN](https://github.com/vinvino02/GLPDepth) An open source framework to generate depth images.
|
269 |
+
|
270 |
+
## 🔒 License
|
271 |
+
* The majority of this project is released under the MIT license as found in the [LICENSE](https://github.com/PKU-YuanGroup/LanguageBind/blob/main/LICENSE) file.
|
272 |
+
* The dataset of this project is released under the CC-BY-NC 4.0 license as found in the [DATASET_LICENSE](https://github.com/PKU-YuanGroup/LanguageBind/blob/main/DATASET_LICENSE) file.
|
273 |
+
|
274 |
+
## ✏️ Citation
|
275 |
+
If you find our paper and code useful in your research, please consider giving a star :star: and citation :pencil:.
|
276 |
+
|
277 |
+
```BibTeX
|
278 |
+
@misc{zhu2023languagebind,
|
279 |
+
title={LanguageBind: Extending Video-Language Pretraining to N-modality by Language-based Semantic Alignment},
|
280 |
+
author={Bin Zhu and Bin Lin and Munan Ning and Yang Yan and Jiaxi Cui and Wang HongFa and Yatian Pang and Wenhao Jiang and Junwu Zhang and Zongwei Li and Cai Wan Zhang and Zhifeng Li and Wei Liu and Li Yuan},
|
281 |
+
year={2023},
|
282 |
+
eprint={2310.01852},
|
283 |
+
archivePrefix={arXiv},
|
284 |
+
primaryClass={cs.CV}
|
285 |
+
}
|
286 |
```
|
TRAIN_AND_VALIDATE.md
ADDED
@@ -0,0 +1,207 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
We provide the **off-the-shelf** scripts in the [scripts folder](scripts).
|
2 |
+
|
3 |
+
## Training LanguageBind
|
4 |
+
|
5 |
+
For example, to **train** LanguageBind on **Depth-Language** with 16 GPUs (2 nodes x 8 GPUs).
|
6 |
+
* First download the [cache of pretrained weight](https://github.com/PKU-YuanGroup/LanguageBind#-model-zoo) and specify ```CACHE_DIR```.
|
7 |
+
* The second step is to develop a path to ```TRAIN_DATA``` according to the [dataset preparation](https://github.com/PKU-YuanGroup/LanguageBind#-vidal-10m).
|
8 |
+
* Then you can run
|
9 |
+
|
10 |
+
```bash
|
11 |
+
CACHE_DIR="path/to/pretrained/weight"
|
12 |
+
TRAIN_DATA="path/to/data"
|
13 |
+
cd /path/to/LanguageBind
|
14 |
+
TORCH_DISTRIBUTED_DEBUG=DETAIL HF_DATASETS_OFFLINE=1 TRANSFORMERS_OFFLINE=1 torchrun --nnodes=1 --nproc_per_node 8 \
|
15 |
+
-m main \
|
16 |
+
--train-data ${TRAIN_DATA} \
|
17 |
+
--train-num-samples 3020000 \
|
18 |
+
--clip-type "dl" --max-depth 10 \
|
19 |
+
--do_train \
|
20 |
+
--lock-text --lock-image --text-type "polish_mplug" \
|
21 |
+
--init-temp 0.07 --learn-temp \
|
22 |
+
--model "ViT-L-14" --cache-dir ${CACHE_DIR} \
|
23 |
+
--convert_to_lora --lora_r 2 \
|
24 |
+
--lr 5e-4 --coef-lr 1e-3 \
|
25 |
+
--beta1 0.9 --beta2 0.98 --wd 0.2 --eps 1e-6 \
|
26 |
+
--num-frames 1 --force-patch-dropout 0.5 \
|
27 |
+
--epochs 1 --batch-size 128 --accum-freq 1 --warmup 200 \
|
28 |
+
--precision "amp" --workers 10 --video-decode-backend "imgs" \
|
29 |
+
--save-frequency 1 --log-every-n-steps 20 --report-to "tensorboard" --resume "latest" \
|
30 |
+
--do_eval \
|
31 |
+
--val_d_cls_data "NYUV2"
|
32 |
+
```
|
33 |
+
|
34 |
+
|
35 |
+
## Validating LanguageBind
|
36 |
+
|
37 |
+
For example, to **validate** LanguageBind on **Depth-Language** with 1 GPUs.
|
38 |
+
* First specify ```RESUME```.
|
39 |
+
* The second step is to prepare the [downstream dataset](https://github.com/PKU-YuanGroup/LanguageBind/blob/main/TRAIN_AND_VALIDATE.md#downstream-datasets).
|
40 |
+
* Then you can run
|
41 |
+
|
42 |
+
```bash
|
43 |
+
CACHE_DIR="path/to/pretrained/weight"
|
44 |
+
RESUME="thermal_language.pt"
|
45 |
+
TRAIN_DATA="path/to/data"
|
46 |
+
cd /path/to/LanguageBind
|
47 |
+
TORCH_DISTRIBUTED_DEBUG=DETAIL HF_DATASETS_OFFLINE=1 TRANSFORMERS_OFFLINE=1 torchrun --nproc_per_node 1 \
|
48 |
+
-m main \
|
49 |
+
--train-data ${TRAIN_DATA} \
|
50 |
+
--train-num-samples 3020000 \
|
51 |
+
--clip-type "dl" --max-depth 10 \
|
52 |
+
--lock-text --lock-image --text-type "polish_mplug" \
|
53 |
+
--init-temp 0.07 --learn-temp \
|
54 |
+
--model "ViT-L-14" --cache-dir ${CACHE_DIR} \
|
55 |
+
--convert_to_lora --lora_r 2 \
|
56 |
+
--lr 5e-4 --coef-lr 1e-3 \
|
57 |
+
--beta1 0.9 --beta2 0.98 --wd 0.2 --eps 1e-6 \
|
58 |
+
--num-frames 1 --force-patch-dropout 0.5 \
|
59 |
+
--epochs 1 --batch-size 128 --accum-freq 1 --warmup 200 \
|
60 |
+
--precision "amp" --workers 10 --video-decode-backend "imgs" \
|
61 |
+
--save-frequency 1 --log-every-n-steps 20 --report-to "tensorboard" --resume ${RESUME} \
|
62 |
+
--do_eval \
|
63 |
+
--val_d_cls_data "NYUV2"
|
64 |
+
```
|
65 |
+
|
66 |
+
## Downstream datasets
|
67 |
+
|
68 |
+
### Depth
|
69 |
+
NYU V2 dataset is downloaded from [this repo](https://github.com/TUI-NICR/nicr-scene-analysis-datasets/tree/main/nicr_scene_analysis_datasets/datasets/nyuv2) and we reformat them to conform to the standard ImageNet format. Change the ```data_root``` [here](https://github.com/PKU-YuanGroup/LanguageBind/blob/main/data/build_datasets.py#L148).
|
70 |
+
|
71 |
+
### Video
|
72 |
+
Video datasets are downloaded from [this repo](https://github.com/jpthu17/HBI) and we show the folder structure. Change the ```data_root``` [here](https://github.com/PKU-YuanGroup/LanguageBind/blob/main/data/build_datasets.py#L74).
|
73 |
+
|
74 |
+
### Audio
|
75 |
+
Audio datasets are downloaded from [this repo](https://github.com/OFA-Sys/ONE-PEACE/blob/main/datasets.md#audio) and we reformat them to conform to the standard ImageNet format. Change the ```data_root``` [here](https://github.com/PKU-YuanGroup/LanguageBind/blob/main/data/build_datasets.py#L127).
|
76 |
+
|
77 |
+
### Infrared (Thermal)
|
78 |
+
We download LLVIP from [official website](https://bupt-ai-cz.github.io/LLVIP/), and FLIR from [here](https://www.flir.com/oem/adas/adas-dataset-form/). We reformat them to conform to the standard ImageNet format. Change the ```data_root``` [here](https://github.com/PKU-YuanGroup/LanguageBind/blob/main/data/build_datasets.py#L160). We also provide the processed data as follows.
|
79 |
+
|
80 |
+
<div align="center">
|
81 |
+
<table border="1" width="100%">
|
82 |
+
<tr align="center">
|
83 |
+
<th>Datasets</th><th>Baidu Yun</th><th>Google Cloud</th><th>Peking University Yun</th>
|
84 |
+
</tr>
|
85 |
+
<tr align="center">
|
86 |
+
<td>LLVIP</td><td><a href="https://pan.baidu.com/s/15HPVr016F7eO9005NDRJTg?pwd=46fh">Link</a></td><td><a href="https://drive.google.com/file/d/1RfKNR8q6dHiAHB4OlYecnkUSx-ghLuEO/view?usp=drive_link">Link</a></td><td><a href="https://disk.pku.edu.cn:443/link/30D592EA37AC7C411264801A74994376">Link</a></td>
|
87 |
+
</tr>
|
88 |
+
<tr align="center">
|
89 |
+
<td>FLIR V1</td><td><a href="https://pan.baidu.com/s/1ZDSo5VPxJ4SA7wS_rNk0uQ?pwd=l491">Link</a></td><td><a href="https://drive.google.com/file/d/1CezCLJ4GUfPMFimitPfK40OV2j2Kr8t8/view?usp=drive_link">Link</a></td><td><a href="https://disk.pku.edu.cn:443/link/AD89D6ADE2CAC2407B00650870CBBDEC">Link</a></td>
|
90 |
+
</tr>
|
91 |
+
<tr align="center">
|
92 |
+
<td>FLIR V2</td><td><a href="https://pan.baidu.com/s/16xdr2aQkHo3zJ4KbaTmO3Q?pwd=tj9f">Link</a></td><td><a href="https://drive.google.com/file/d/1Z2ThG5QH-9biFI2-Z8k2fBKSA6Nrees6/view?usp=drive_link">Link</a></td><td><a href="https://disk.pku.edu.cn:443/link/E06C010970B0ED51926700D2F7A21EA8">Link</a></td>
|
93 |
+
</tr>
|
94 |
+
</table>
|
95 |
+
</div>
|
96 |
+
|
97 |
+
### Folder structure
|
98 |
+
```bash
|
99 |
+
downstream_datasets
|
100 |
+
├── Audio
|
101 |
+
│ ├── esc50
|
102 |
+
│ │ └── test
|
103 |
+
│ │ ├── airplane
|
104 |
+
│ │ ├── breathing
|
105 |
+
│ │ ├── brushing_teeth
|
106 |
+
│ │ ├── can_opening
|
107 |
+
│ │ ├── car_horn
|
108 |
+
│ │ ├── cat
|
109 |
+
│ │ ├── chainsaw
|
110 |
+
│ │ ├── chirping_birds
|
111 |
+
│ │ ├── church_bells
|
112 |
+
│ │ ├── clapping
|
113 |
+
│ │ ├── clock_alarm
|
114 |
+
│ │ ├── clock_tick
|
115 |
+
│ │ ├── coughing
|
116 |
+
│ │ ├── cow
|
117 |
+
│ │ ├── crackling_fire
|
118 |
+
│ │ ├── crickets
|
119 |
+
│ │ ├── crow
|
120 |
+
│ │ ├── crying_baby
|
121 |
+
│ │ ├── dog
|
122 |
+
│ │ ├── door_wood_creaks
|
123 |
+
│ │ ├── door_wood_knock
|
124 |
+
│ │ ├── drinking_sipping
|
125 |
+
│ │ ├── engine
|
126 |
+
│ │ ├── fireworks
|
127 |
+
│ │ ├── footsteps
|
128 |
+
│ │ ├── frog
|
129 |
+
│ │ ├── glass_breaking
|
130 |
+
│ │ ├── hand_saw
|
131 |
+
│ │ ├── helicopter
|
132 |
+
│ │ ├── hen
|
133 |
+
│ │ ├── insects
|
134 |
+
│ │ ├── keyboard_typing
|
135 |
+
│ │ ├── laughing
|
136 |
+
│ │ ├── mouse_click
|
137 |
+
│ │ ├── pig
|
138 |
+
│ │ ├── pouring_water
|
139 |
+
│ │ ├── rain
|
140 |
+
│ │ ├── rooster
|
141 |
+
│ │ ├── sea_waves
|
142 |
+
│ │ ├── sheep
|
143 |
+
│ │ ├── siren
|
144 |
+
│ │ ├── sneezing
|
145 |
+
│ │ ├── snoring
|
146 |
+
│ │ ├── thunderstorm
|
147 |
+
│ │ ├── toilet_flush
|
148 |
+
│ │ ├── train
|
149 |
+
│ │ ├── vacuum_cleaner
|
150 |
+
│ │ ├── washing_machine
|
151 |
+
│ │ ├── water_drops
|
152 |
+
│ │ └── wind
|
153 |
+
├── Depth
|
154 |
+
│ ├── nyuv2
|
155 |
+
│ │ ├── data
|
156 |
+
│ │ │ └── val
|
157 |
+
│ │ │ ├── bathroom
|
158 |
+
│ │ │ ├── bedroom
|
159 |
+
│ │ │ ├── bookstore
|
160 |
+
│ │ │ ├── classroom
|
161 |
+
│ │ │ ├── dining_room
|
162 |
+
│ │ │ ├── home_office
|
163 |
+
│ │ │ ├── kitchen
|
164 |
+
│ │ │ ├── living_room
|
165 |
+
│ │ │ ├── office
|
166 |
+
│ │ │ └── others
|
167 |
+
├── Thermal
|
168 |
+
│ ├── flirv1
|
169 |
+
│ │ └── val
|
170 |
+
│ │ ├── bicycle
|
171 |
+
│ │ ├── car
|
172 |
+
│ │ ├── dog
|
173 |
+
│ │ └── person
|
174 |
+
│ ├── flirv2
|
175 |
+
│ │ └── val
|
176 |
+
│ │ ├── bike
|
177 |
+
│ │ ├── bus
|
178 |
+
│ │ ├── car
|
179 |
+
│ │ ├── hydrant
|
180 |
+
│ │ ├── light
|
181 |
+
│ │ ├── motor
|
182 |
+
│ │ ├── other\ vehicle
|
183 |
+
│ │ ├── person
|
184 |
+
│ │ ├── sign
|
185 |
+
│ │ ├── skateboard
|
186 |
+
│ │ ├── stroller
|
187 |
+
│ │ └── truck
|
188 |
+
│ ├── llvip
|
189 |
+
│ │ ├── train
|
190 |
+
│ │ │ ├── background
|
191 |
+
│ │ │ └── person
|
192 |
+
│ │ └── val
|
193 |
+
│ │ ├── background
|
194 |
+
│ │ └── person
|
195 |
+
└── VideoTextRetrieval
|
196 |
+
├── vtRetdata
|
197 |
+
│ ├── ActivityNet
|
198 |
+
│ │ └── Videos
|
199 |
+
│ │ └── Activity_Videos
|
200 |
+
│ ├── Didemo
|
201 |
+
│ │ └── videos
|
202 |
+
│ ├── MSRVTT
|
203 |
+
│ │ └── MSRVTT_Videos
|
204 |
+
│ └── MSVD
|
205 |
+
│ └── MSVD_Videos
|
206 |
+
```
|
207 |
+
|
a_cls/__pycache__/precision.cpython-38.pyc
CHANGED
Binary files a/a_cls/__pycache__/precision.cpython-38.pyc and b/a_cls/__pycache__/precision.cpython-38.pyc differ
|
|
a_cls/__pycache__/stats.cpython-38.pyc
CHANGED
Binary files a/a_cls/__pycache__/stats.cpython-38.pyc and b/a_cls/__pycache__/stats.cpython-38.pyc differ
|
|
a_cls/__pycache__/zero_shot.cpython-38.pyc
CHANGED
Binary files a/a_cls/__pycache__/zero_shot.cpython-38.pyc and b/a_cls/__pycache__/zero_shot.cpython-38.pyc differ
|
|
a_cls/__pycache__/zero_shot_classifier.cpython-38.pyc
CHANGED
Binary files a/a_cls/__pycache__/zero_shot_classifier.cpython-38.pyc and b/a_cls/__pycache__/zero_shot_classifier.cpython-38.pyc differ
|
|
a_cls/__pycache__/zero_shot_metadata.cpython-38.pyc
CHANGED
Binary files a/a_cls/__pycache__/zero_shot_metadata.cpython-38.pyc and b/a_cls/__pycache__/zero_shot_metadata.cpython-38.pyc differ
|
|
a_cls/__pycache__/zeroshot_cls.cpython-38.pyc
CHANGED
Binary files a/a_cls/__pycache__/zeroshot_cls.cpython-38.pyc and b/a_cls/__pycache__/zeroshot_cls.cpython-38.pyc differ
|
|
app.py
CHANGED
@@ -1,3 +1,4 @@
|
|
|
|
1 |
|
2 |
import gradio as gr
|
3 |
import argparse
|
@@ -5,195 +6,7 @@ import numpy as np
|
|
5 |
import torch
|
6 |
from torch import nn
|
7 |
|
8 |
-
from
|
9 |
-
from main import SET_GLOBAL_VALUE
|
10 |
-
from model.build_model import create_vat_model
|
11 |
-
from data.process_audio import load_and_transform_audio, get_audio_transform
|
12 |
-
from data.process_video import load_and_transform_video, get_video_transform
|
13 |
-
from data.process_depth import load_and_transform_depth, get_depth_transform
|
14 |
-
from data.process_thermal import load_and_transform_thermal, get_thermal_transform
|
15 |
-
from data.process_text import load_and_transform_text
|
16 |
-
from open_clip import get_tokenizer
|
17 |
-
from open_clip.factory import HF_HUB_PREFIX
|
18 |
-
|
19 |
-
import os
|
20 |
-
|
21 |
-
os.system("wget https://huggingface.co/lb203/LanguageBind/resolve/main/vl.pt")
|
22 |
-
os.system("wget https://huggingface.co/lb203/LanguageBind/resolve/main/al.pt")
|
23 |
-
os.system("wget https://huggingface.co/lb203/LanguageBind/resolve/main/il.pt")
|
24 |
-
os.system("wget https://huggingface.co/lb203/LanguageBind/resolve/main/dl.pt")
|
25 |
-
os.system("wget https://huggingface.co/lb203/LanguageBind/resolve/main/tl.pt")
|
26 |
-
|
27 |
-
|
28 |
-
class LanguageBind(nn.Module):
|
29 |
-
def __init__(self, args):
|
30 |
-
super(LanguageBind, self).__init__()
|
31 |
-
temp_clip_type = args.clip_type
|
32 |
-
self.modality_encoder = {}
|
33 |
-
self.modality_proj = {}
|
34 |
-
self.modality_scale = {}
|
35 |
-
for c in temp_clip_type:
|
36 |
-
args.clip_type = c
|
37 |
-
if c == 'il':
|
38 |
-
args.convert_to_lora = False
|
39 |
-
model = create_vat_model(args)
|
40 |
-
args.convert_to_lora = True
|
41 |
-
elif c == 'vl':
|
42 |
-
args.lora_r = 64
|
43 |
-
args.add_time_attn = True
|
44 |
-
model = create_vat_model(args)
|
45 |
-
args.add_time_attn = False
|
46 |
-
args.lora_r = 2
|
47 |
-
elif c == 'al':
|
48 |
-
args.lora_r = 8
|
49 |
-
model = create_vat_model(args)
|
50 |
-
args.lora_r = 2
|
51 |
-
else:
|
52 |
-
model = create_vat_model(args)
|
53 |
-
|
54 |
-
state_dict = torch.load(f'{c}.pt', map_location='cpu')
|
55 |
-
if state_dict.get('state_dict', None) is not None:
|
56 |
-
state_dict = state_dict['state_dict']
|
57 |
-
if next(iter(state_dict.items()))[0].startswith('module'):
|
58 |
-
state_dict = {k[7:]: v for k, v in state_dict.items()}
|
59 |
-
msg = model.load_state_dict(state_dict, strict=False)
|
60 |
-
print(f'load {c}, {msg}')
|
61 |
-
|
62 |
-
if c == 'vl':
|
63 |
-
self.modality_encoder['video'] = model.vision_model
|
64 |
-
self.modality_proj['video'] = model.visual_projection
|
65 |
-
self.modality_scale['video'] = model.logit_scale
|
66 |
-
elif c == 'al':
|
67 |
-
self.modality_encoder['audio'] = model.vision_model
|
68 |
-
self.modality_proj['audio'] = model.visual_projection
|
69 |
-
self.modality_scale['audio'] = model.logit_scale
|
70 |
-
elif c == 'dl':
|
71 |
-
self.modality_encoder['depth'] = model.vision_model
|
72 |
-
self.modality_proj['depth'] = model.visual_projection
|
73 |
-
self.modality_scale['depth'] = model.logit_scale
|
74 |
-
elif c == 'tl':
|
75 |
-
self.modality_encoder['thermal'] = model.vision_model
|
76 |
-
self.modality_proj['thermal'] = model.visual_projection
|
77 |
-
self.modality_scale['thermal'] = model.logit_scale
|
78 |
-
elif c == 'il':
|
79 |
-
self.modality_encoder['image'] = model.vision_model
|
80 |
-
self.modality_proj['image'] = model.visual_projection
|
81 |
-
self.modality_scale['image'] = model.logit_scale
|
82 |
-
else:
|
83 |
-
raise NameError(f'No clip_type of {c}')
|
84 |
-
self.modality_encoder['language'] = model.text_model
|
85 |
-
self.modality_proj['language'] = model.text_projection
|
86 |
-
|
87 |
-
self.modality_encoder = nn.ModuleDict(self.modality_encoder)
|
88 |
-
self.modality_proj = nn.ModuleDict(self.modality_proj)
|
89 |
-
|
90 |
-
def forward(self, inputs):
|
91 |
-
outputs = {}
|
92 |
-
for key, value in inputs.items():
|
93 |
-
value = self.modality_encoder[key](**value)[1]
|
94 |
-
value = self.modality_proj[key](value)
|
95 |
-
value = value / value.norm(p=2, dim=-1, keepdim=True)
|
96 |
-
# if key != 'language':
|
97 |
-
# value = value * self.modality_scale[key].exp()
|
98 |
-
outputs[key] = value
|
99 |
-
return outputs
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
MODEL_DICT = {"ViT-L-14": "laion/CLIP-ViT-L-14-DataComp.XL-s13B-b90K",
|
105 |
-
"ViT-H-14": "laion/CLIP-ViT-H-14-laion2B-s32B-b79K"}
|
106 |
-
CHECKPOINT_DICT = {"ViT-L-14": "models--laion--CLIP-ViT-L-14-DataComp.XL-s13B-b90K/snapshots/84c9828e63dc9a9351d1fe637c346d4c1c4db341/pytorch_model.bin",
|
107 |
-
"ViT-H-14": "models--laion--CLIP-ViT-H-14-laion2B-s32B-b79K/snapshots/94a64189c3535c1cb44acfcccd7b0908c1c8eb23/pytorch_model.bin"}
|
108 |
-
parser = argparse.ArgumentParser()
|
109 |
-
args = parser.parse_args()
|
110 |
-
args.pretrained = False
|
111 |
-
args.model = MODEL_DICT["ViT-L-14"]
|
112 |
-
args.cache_dir = ''
|
113 |
-
args.video_decode_backend = 'decord'
|
114 |
-
args.device = 'cpu'
|
115 |
-
# args.device = 'cuda:0'
|
116 |
-
device = torch.device(args.device)
|
117 |
-
args.precision = None
|
118 |
-
args.init_temp = 0
|
119 |
-
args.force_patch_dropout = 0.0
|
120 |
-
args.add_time_attn = False
|
121 |
-
args.convert_to_lora = True
|
122 |
-
args.lora_r = 2
|
123 |
-
args.lora_alpha = 16
|
124 |
-
args.lora_dropout = 0.0 # 0.1?
|
125 |
-
args.num_frames = 8
|
126 |
-
args.clip_type = 'vl'
|
127 |
-
args.num_mel_bins = 1008
|
128 |
-
args.target_length = 112
|
129 |
-
args.audio_sample_rate = 16000
|
130 |
-
args.audio_mean = 4.5689974
|
131 |
-
args.audio_std = -4.2677393
|
132 |
-
args.max_depth = 10
|
133 |
-
args.image_size = 224
|
134 |
-
args.rank = 0
|
135 |
-
SET_GLOBAL_VALUE('PATCH_DROPOUT', args.force_patch_dropout)
|
136 |
-
SET_GLOBAL_VALUE('NUM_FRAMES', args.num_frames)
|
137 |
-
args.clip_type = ['il', 'vl', 'al', 'dl', 'tl']
|
138 |
-
model = LanguageBind(args).to(device)
|
139 |
-
model.eval()
|
140 |
-
|
141 |
-
modality_transform = {
|
142 |
-
'language': get_tokenizer(HF_HUB_PREFIX + args.model, cache_dir=args.cache_dir),
|
143 |
-
'video': get_video_transform(args),
|
144 |
-
'audio': get_audio_transform(args),
|
145 |
-
'depth': get_depth_transform(args),
|
146 |
-
'thermal': get_thermal_transform(args),
|
147 |
-
'image': get_image_transform(args),
|
148 |
-
}
|
149 |
-
|
150 |
-
|
151 |
-
def stack_dict(x, device):
|
152 |
-
out_dict = {}
|
153 |
-
keys = list(x[0].keys())
|
154 |
-
for key in keys:
|
155 |
-
out_dict[key] = torch.stack([i[key] for i in x]).to(device)
|
156 |
-
return out_dict
|
157 |
-
|
158 |
-
def image_to_language(image, language):
|
159 |
-
inputs = {}
|
160 |
-
inputs['image'] = stack_dict([load_and_transform_image(image, modality_transform['image'])], device)
|
161 |
-
inputs['language'] = stack_dict([load_and_transform_text(language, modality_transform['language'])], device)
|
162 |
-
with torch.no_grad():
|
163 |
-
embeddings = model(inputs)
|
164 |
-
return (embeddings['image'] @ embeddings['language'].T).item()
|
165 |
-
|
166 |
-
def video_to_language(video, language):
|
167 |
-
inputs = {}
|
168 |
-
inputs['video'] = stack_dict([load_and_transform_video(video, modality_transform['video'])], device)
|
169 |
-
inputs['language'] = stack_dict([load_and_transform_text(language, modality_transform['language'])], device)
|
170 |
-
with torch.no_grad():
|
171 |
-
embeddings = model(inputs)
|
172 |
-
return (embeddings['video'] @ embeddings['language'].T).item()
|
173 |
-
|
174 |
-
def audio_to_language(audio, language):
|
175 |
-
inputs = {}
|
176 |
-
inputs['audio'] = stack_dict([load_and_transform_audio(audio, modality_transform['audio'])], device)
|
177 |
-
inputs['language'] = stack_dict([load_and_transform_text(language, modality_transform['language'])], device)
|
178 |
-
with torch.no_grad():
|
179 |
-
embeddings = model(inputs)
|
180 |
-
return (embeddings['audio'] @ embeddings['language'].T).item()
|
181 |
-
|
182 |
-
def depth_to_language(depth, language):
|
183 |
-
inputs = {}
|
184 |
-
inputs['depth'] = stack_dict([load_and_transform_depth(depth.name, modality_transform['depth'])], device)
|
185 |
-
inputs['language'] = stack_dict([load_and_transform_text(language, modality_transform['language'])], device)
|
186 |
-
with torch.no_grad():
|
187 |
-
embeddings = model(inputs)
|
188 |
-
return (embeddings['depth'] @ embeddings['language'].T).item()
|
189 |
-
|
190 |
-
def thermal_to_language(thermal, language):
|
191 |
-
inputs = {}
|
192 |
-
inputs['thermal'] = stack_dict([load_and_transform_thermal(thermal, modality_transform['thermal'])], device)
|
193 |
-
inputs['language'] = stack_dict([load_and_transform_text(language, modality_transform['language'])], device)
|
194 |
-
with torch.no_grad():
|
195 |
-
embeddings = model(inputs)
|
196 |
-
return (embeddings['thermal'] @ embeddings['language'].T).item()
|
197 |
|
198 |
code_highlight_css = (
|
199 |
"""
|
@@ -293,40 +106,102 @@ pre {
|
|
293 |
}
|
294 |
"""
|
295 |
|
296 |
-
with gr.Blocks(title="LanguageBind🚀", css=css) as demo:
|
297 |
-
gr.Markdown(title_markdown)
|
298 |
-
with gr.Row():
|
299 |
-
with gr.Column():
|
300 |
-
image = gr.Image(type="filepath", height=224, width=224, label='Image Input')
|
301 |
-
language_i = gr.Textbox(lines=2, label='Text Input')
|
302 |
-
out_i = gr.Textbox(label='Similarity of Image to Text')
|
303 |
-
b_i = gr.Button("Calculate similarity of Image to Text")
|
304 |
-
with gr.Column():
|
305 |
-
video = gr.Video(type="filepath", height=224, width=224, label='Video Input')
|
306 |
-
language_v = gr.Textbox(lines=2, label='Text Input')
|
307 |
-
out_v = gr.Textbox(label='Similarity of Video to Text')
|
308 |
-
b_v = gr.Button("Calculate similarity of Video to Text")
|
309 |
-
with gr.Column():
|
310 |
-
audio = gr.Audio(type="filepath", label='Audio Input')
|
311 |
-
language_a = gr.Textbox(lines=2, label='Text Input')
|
312 |
-
out_a = gr.Textbox(label='Similarity of Audio to Text')
|
313 |
-
b_a = gr.Button("Calculate similarity of Audio to Text")
|
314 |
-
with gr.Row():
|
315 |
-
with gr.Column():
|
316 |
-
depth = gr.File(height=224, width=224, label='Depth Input, need a .png file, 16 bit, with values ranging from 0-10000 (representing 0-10 metres, but 1000 times)')
|
317 |
-
language_d = gr.Textbox(lines=2, label='Text Input')
|
318 |
-
out_d = gr.Textbox(label='Similarity of Depth to Text')
|
319 |
-
b_d = gr.Button("Calculate similarity of Depth to Text")
|
320 |
-
with gr.Column():
|
321 |
-
thermal = gr.Image(type="filepath", height=224, width=224, label='Thermal Input, you should first convert to RGB')
|
322 |
-
language_t = gr.Textbox(lines=2, label='Text Input')
|
323 |
-
out_t = gr.Textbox(label='Similarity of Thermal to Text')
|
324 |
-
b_t = gr.Button("Calculate similarity of Thermal to Text")
|
325 |
|
326 |
-
|
327 |
-
|
328 |
-
|
329 |
-
|
330 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
331 |
|
332 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import sys
|
2 |
|
3 |
import gradio as gr
|
4 |
import argparse
|
|
|
6 |
import torch
|
7 |
from torch import nn
|
8 |
|
9 |
+
from languagebind import LanguageBind, transform_dict, LanguageBindImageTokenizer, to_device
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
10 |
|
11 |
code_highlight_css = (
|
12 |
"""
|
|
|
106 |
}
|
107 |
"""
|
108 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
109 |
|
110 |
+
def image_to_language(image, language):
|
111 |
+
inputs = {}
|
112 |
+
inputs['image'] = to_device(modality_transform['image'](image), device)
|
113 |
+
inputs['language'] = to_device(modality_transform['language'](language, max_length=77, padding='max_length',
|
114 |
+
truncation=True, return_tensors='pt'), device)
|
115 |
+
with torch.no_grad():
|
116 |
+
embeddings = model(inputs)
|
117 |
+
return (embeddings['image'] @ embeddings['language'].T).item()
|
118 |
+
|
119 |
+
|
120 |
+
def video_to_language(video, language):
|
121 |
+
inputs = {}
|
122 |
+
inputs['video'] = to_device(modality_transform['video'](video), device)
|
123 |
+
inputs['language'] = to_device(modality_transform['language'](language, max_length=77, padding='max_length',
|
124 |
+
truncation=True, return_tensors='pt'), device)
|
125 |
+
with torch.no_grad():
|
126 |
+
embeddings = model(inputs)
|
127 |
+
return (embeddings['video'] @ embeddings['language'].T).item()
|
128 |
+
|
129 |
+
|
130 |
+
def audio_to_language(audio, language):
|
131 |
+
inputs = {}
|
132 |
+
inputs['audio'] = to_device(modality_transform['audio'](audio), device)
|
133 |
+
inputs['language'] = to_device(modality_transform['language'](language, max_length=77, padding='max_length',
|
134 |
+
truncation=True, return_tensors='pt'), device)
|
135 |
+
with torch.no_grad():
|
136 |
+
embeddings = model(inputs)
|
137 |
+
return (embeddings['audio'] @ embeddings['language'].T).item()
|
138 |
+
|
139 |
+
|
140 |
+
def depth_to_language(depth, language):
|
141 |
+
inputs = {}
|
142 |
+
inputs['depth'] = to_device(modality_transform['depth'](depth.name), device)
|
143 |
+
inputs['language'] = to_device(modality_transform['language'](language, max_length=77, padding='max_length',
|
144 |
+
truncation=True, return_tensors='pt'), device)
|
145 |
+
with torch.no_grad():
|
146 |
+
embeddings = model(inputs)
|
147 |
+
return (embeddings['depth'] @ embeddings['language'].T).item()
|
148 |
+
|
149 |
+
|
150 |
+
def thermal_to_language(thermal, language):
|
151 |
+
inputs = {}
|
152 |
+
inputs['thermal'] = to_device(modality_transform['thermal'](thermal), device)
|
153 |
+
inputs['language'] = to_device(modality_transform['language'](language, max_length=77, padding='max_length',
|
154 |
+
truncation=True, return_tensors='pt'), device)
|
155 |
+
with torch.no_grad():
|
156 |
+
embeddings = model(inputs)
|
157 |
+
return (embeddings['thermal'] @ embeddings['language'].T).item()
|
158 |
|
159 |
+
if __name__ == '__main__':
|
160 |
+
device = 'cpu'
|
161 |
+
device = torch.device(device)
|
162 |
+
clip_type = ('thermal', 'image', 'video', 'depth', 'audio')
|
163 |
+
model = LanguageBind(clip_type=clip_type, cache_dir='./cache_dir', use_temp=False)
|
164 |
+
model = model.to(device)
|
165 |
+
model.eval()
|
166 |
+
pretrained_ckpt = f'lb203/LanguageBind_Image'
|
167 |
+
tokenizer = LanguageBindImageTokenizer.from_pretrained(pretrained_ckpt, cache_dir='./cache_dir/tokenizer_cache_dir')
|
168 |
+
modality_transform = {c: transform_dict[c](model.modality_config[c]) for c in clip_type}
|
169 |
+
modality_transform['language'] = tokenizer
|
170 |
+
|
171 |
+
with gr.Blocks(title="LanguageBind🚀", css=css) as demo:
|
172 |
+
gr.Markdown(title_markdown)
|
173 |
+
with gr.Row():
|
174 |
+
with gr.Column():
|
175 |
+
image = gr.Image(type="filepath", height=224, width=224, label='Image Input')
|
176 |
+
language_i = gr.Textbox(lines=2, label='Text Input')
|
177 |
+
out_i = gr.Textbox(label='Similarity of Image to Text')
|
178 |
+
b_i = gr.Button("Calculate similarity of Image to Text")
|
179 |
+
with gr.Column():
|
180 |
+
video = gr.Video(type="filepath", height=224, width=224, label='Video Input')
|
181 |
+
language_v = gr.Textbox(lines=2, label='Text Input')
|
182 |
+
out_v = gr.Textbox(label='Similarity of Video to Text')
|
183 |
+
b_v = gr.Button("Calculate similarity of Video to Text")
|
184 |
+
with gr.Column():
|
185 |
+
audio = gr.Audio(type="filepath", label='Audio Input')
|
186 |
+
language_a = gr.Textbox(lines=2, label='Text Input')
|
187 |
+
out_a = gr.Textbox(label='Similarity of Audio to Text')
|
188 |
+
b_a = gr.Button("Calculate similarity of Audio to Text")
|
189 |
+
with gr.Row():
|
190 |
+
with gr.Column():
|
191 |
+
depth = gr.File(height=224, width=224, label='Depth Input, need a .png file, 16 bit, with values ranging from 0-10000 (representing 0-10 metres, but 1000 times)')
|
192 |
+
language_d = gr.Textbox(lines=2, label='Text Input')
|
193 |
+
out_d = gr.Textbox(label='Similarity of Depth to Text')
|
194 |
+
b_d = gr.Button("Calculate similarity of Depth to Text")
|
195 |
+
with gr.Column():
|
196 |
+
thermal = gr.Image(type="filepath", height=224, width=224, label='Thermal Input, you should first convert to RGB')
|
197 |
+
language_t = gr.Textbox(lines=2, label='Text Input')
|
198 |
+
out_t = gr.Textbox(label='Similarity of Thermal to Text')
|
199 |
+
b_t = gr.Button("Calculate similarity of Thermal to Text")
|
200 |
+
|
201 |
+
b_i.click(image_to_language, inputs=[image, language_i], outputs=out_i)
|
202 |
+
b_a.click(audio_to_language, inputs=[audio, language_a], outputs=out_a)
|
203 |
+
b_v.click(video_to_language, inputs=[video, language_v], outputs=out_v)
|
204 |
+
b_d.click(depth_to_language, inputs=[depth, language_d], outputs=out_d)
|
205 |
+
b_t.click(thermal_to_language, inputs=[thermal, language_t], outputs=out_t)
|
206 |
+
|
207 |
+
demo.launch()
|
assets/audio/0.wav
ADDED
Binary file (328 kB). View file
|
|
assets/audio/1.wav
ADDED
Binary file (328 kB). View file
|
|
assets/demo.png
ADDED
assets/depth/0.png
ADDED
assets/depth/1.png
ADDED
assets/iclr_dataset_sample.jpg
ADDED
assets/image/0.jpg
ADDED
assets/image/1.jpg
ADDED
assets/res2.jpg
CHANGED
assets/thermal/0.jpg
ADDED
assets/thermal/1.jpg
ADDED
assets/video/0.mp4
ADDED
Binary file (661 kB). View file
|
|
assets/video/1.mp4
ADDED
Binary file (591 kB). View file
|
|
d_cls/__pycache__/precision.cpython-38.pyc
CHANGED
Binary files a/d_cls/__pycache__/precision.cpython-38.pyc and b/d_cls/__pycache__/precision.cpython-38.pyc differ
|
|
d_cls/__pycache__/zero_shot.cpython-38.pyc
CHANGED
Binary files a/d_cls/__pycache__/zero_shot.cpython-38.pyc and b/d_cls/__pycache__/zero_shot.cpython-38.pyc differ
|
|
d_cls/__pycache__/zero_shot_classifier.cpython-38.pyc
CHANGED
Binary files a/d_cls/__pycache__/zero_shot_classifier.cpython-38.pyc and b/d_cls/__pycache__/zero_shot_classifier.cpython-38.pyc differ
|
|
d_cls/__pycache__/zero_shot_metadata.cpython-38.pyc
CHANGED
Binary files a/d_cls/__pycache__/zero_shot_metadata.cpython-38.pyc and b/d_cls/__pycache__/zero_shot_metadata.cpython-38.pyc differ
|
|
d_cls/__pycache__/zeroshot_cls.cpython-38.pyc
CHANGED
Binary files a/d_cls/__pycache__/zeroshot_cls.cpython-38.pyc and b/d_cls/__pycache__/zeroshot_cls.cpython-38.pyc differ
|
|
data/__pycache__/base_datasets.cpython-38.pyc
CHANGED
Binary files a/data/__pycache__/base_datasets.cpython-38.pyc and b/data/__pycache__/base_datasets.cpython-38.pyc differ
|
|
data/__pycache__/build_datasets.cpython-38.pyc
CHANGED
Binary files a/data/__pycache__/build_datasets.cpython-38.pyc and b/data/__pycache__/build_datasets.cpython-38.pyc differ
|
|
data/__pycache__/new_loadvat.cpython-38.pyc
CHANGED
Binary files a/data/__pycache__/new_loadvat.cpython-38.pyc and b/data/__pycache__/new_loadvat.cpython-38.pyc differ
|
|
data/__pycache__/process_audio.cpython-38.pyc
CHANGED
Binary files a/data/__pycache__/process_audio.cpython-38.pyc and b/data/__pycache__/process_audio.cpython-38.pyc differ
|
|
data/__pycache__/process_depth.cpython-38.pyc
CHANGED
Binary files a/data/__pycache__/process_depth.cpython-38.pyc and b/data/__pycache__/process_depth.cpython-38.pyc differ
|
|
data/__pycache__/process_image.cpython-38.pyc
CHANGED
Binary files a/data/__pycache__/process_image.cpython-38.pyc and b/data/__pycache__/process_image.cpython-38.pyc differ
|
|
data/__pycache__/process_text.cpython-38.pyc
CHANGED
Binary files a/data/__pycache__/process_text.cpython-38.pyc and b/data/__pycache__/process_text.cpython-38.pyc differ
|
|
data/__pycache__/process_thermal.cpython-38.pyc
CHANGED
Binary files a/data/__pycache__/process_thermal.cpython-38.pyc and b/data/__pycache__/process_thermal.cpython-38.pyc differ
|
|
data/__pycache__/process_video.cpython-38.pyc
CHANGED
Binary files a/data/__pycache__/process_video.cpython-38.pyc and b/data/__pycache__/process_video.cpython-38.pyc differ
|
|
data/process_audio.py
CHANGED
@@ -56,10 +56,10 @@ class AudioTransform:
|
|
56 |
if len(ranges[2]) == 0: # if the audio is too short, we just use the first chunk
|
57 |
ranges[2] = [0]
|
58 |
# randomly choose index for each part
|
59 |
-
idx_front = np.random.choice(ranges[0])
|
60 |
-
idx_middle = np.random.choice(ranges[1])
|
61 |
-
idx_back = np.random.choice(ranges[2])
|
62 |
-
idx_front = ranges[0][0]
|
63 |
idx_middle = ranges[1][0]
|
64 |
idx_back = ranges[2][0]
|
65 |
# select mel
|
|
|
56 |
if len(ranges[2]) == 0: # if the audio is too short, we just use the first chunk
|
57 |
ranges[2] = [0]
|
58 |
# randomly choose index for each part
|
59 |
+
# idx_front = np.random.choice(ranges[0])
|
60 |
+
# idx_middle = np.random.choice(ranges[1])
|
61 |
+
# idx_back = np.random.choice(ranges[2])
|
62 |
+
idx_front = ranges[0][0] # fixed
|
63 |
idx_middle = ranges[1][0]
|
64 |
idx_back = ranges[2][0]
|
65 |
# select mel
|
i_cls/__pycache__/precision.cpython-38.pyc
CHANGED
Binary files a/i_cls/__pycache__/precision.cpython-38.pyc and b/i_cls/__pycache__/precision.cpython-38.pyc differ
|
|
i_cls/__pycache__/zero_shot.cpython-38.pyc
CHANGED
Binary files a/i_cls/__pycache__/zero_shot.cpython-38.pyc and b/i_cls/__pycache__/zero_shot.cpython-38.pyc differ
|
|
i_cls/__pycache__/zeroshot_cls.cpython-38.pyc
CHANGED
Binary files a/i_cls/__pycache__/zeroshot_cls.cpython-38.pyc and b/i_cls/__pycache__/zeroshot_cls.cpython-38.pyc differ
|
|
inference.py
ADDED
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from languagebind import LanguageBind, to_device, transform_dict, LanguageBindImageTokenizer
|
3 |
+
|
4 |
+
if __name__ == '__main__':
|
5 |
+
device = 'cuda:0'
|
6 |
+
device = torch.device(device)
|
7 |
+
clip_type = ('thermal', 'image', 'video', 'depth', 'audio')
|
8 |
+
model = LanguageBind(clip_type=clip_type, cache_dir='./cache_dir')
|
9 |
+
model = model.to(device)
|
10 |
+
model.eval()
|
11 |
+
pretrained_ckpt = f'lb203/LanguageBind_Image'
|
12 |
+
tokenizer = LanguageBindImageTokenizer.from_pretrained(pretrained_ckpt, cache_dir='./cache_dir/tokenizer_cache_dir')
|
13 |
+
modality_transform = {c: transform_dict[c](model.modality_config[c]) for c in clip_type}
|
14 |
+
|
15 |
+
image = ['assets/image/0.jpg', 'assets/image/1.jpg']
|
16 |
+
audio = ['assets/audio/0.wav', 'assets/audio/1.wav']
|
17 |
+
video = ['assets/video/0.mp4', 'assets/video/1.mp4']
|
18 |
+
depth = ['assets/depth/0.png', 'assets/depth/1.png']
|
19 |
+
thermal = ['assets/thermal/0.jpg', 'assets/thermal/1.jpg']
|
20 |
+
language = ["Training a parakeet to climb up a ladder.", 'A lion climbing a tree to catch a monkey.']
|
21 |
+
|
22 |
+
inputs = {
|
23 |
+
'image': to_device(modality_transform['image'](image), device),
|
24 |
+
'video': to_device(modality_transform['video'](video), device),
|
25 |
+
'audio': to_device(modality_transform['audio'](audio), device),
|
26 |
+
'depth': to_device(modality_transform['depth'](depth), device),
|
27 |
+
'thermal': to_device(modality_transform['thermal'](thermal), device),
|
28 |
+
}
|
29 |
+
inputs['language'] = to_device(tokenizer(language, max_length=77, padding='max_length',
|
30 |
+
truncation=True, return_tensors='pt'), device)
|
31 |
+
|
32 |
+
with torch.no_grad():
|
33 |
+
embeddings = model(inputs)
|
34 |
+
|
35 |
+
print("Video x Text: \n",
|
36 |
+
torch.softmax(embeddings['video'] @ embeddings['language'].T, dim=-1).detach().cpu().numpy())
|
37 |
+
print("Image x Text: \n",
|
38 |
+
torch.softmax(embeddings['image'] @ embeddings['language'].T, dim=-1).detach().cpu().numpy())
|
39 |
+
print("Depth x Text: \n",
|
40 |
+
torch.softmax(embeddings['depth'] @ embeddings['language'].T, dim=-1).detach().cpu().numpy())
|
41 |
+
print("Audio x Text: \n",
|
42 |
+
torch.softmax(embeddings['audio'] @ embeddings['language'].T, dim=-1).detach().cpu().numpy())
|
43 |
+
print("Thermal x Text: \n",
|
44 |
+
torch.softmax(embeddings['thermal'] @ embeddings['language'].T, dim=-1).detach().cpu().numpy())
|
45 |
+
|
46 |
+
print("Video x Audio: \n",
|
47 |
+
torch.softmax(embeddings['video'] @ embeddings['audio'].T, dim=-1).detach().cpu().numpy())
|
48 |
+
|
languagebind/__init__.py
ADDED
@@ -0,0 +1,89 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from torch import nn
|
3 |
+
from transformers import AutoConfig
|
4 |
+
|
5 |
+
from .image.configuration_image import LanguageBindImageConfig
|
6 |
+
from .image.modeling_image import LanguageBindImage
|
7 |
+
from .image.tokenization_image import LanguageBindImageTokenizer
|
8 |
+
from .image.processing_image import LanguageBindImageProcessor
|
9 |
+
|
10 |
+
from .video.configuration_video import LanguageBindVideoConfig
|
11 |
+
from .video.modeling_video import LanguageBindVideo
|
12 |
+
from .video.tokenization_video import LanguageBindVideoTokenizer
|
13 |
+
from .video.processing_video import LanguageBindVideoProcessor
|
14 |
+
|
15 |
+
from .depth.configuration_depth import LanguageBindDepthConfig
|
16 |
+
from .depth.modeling_depth import LanguageBindDepth
|
17 |
+
from .depth.tokenization_depth import LanguageBindDepthTokenizer
|
18 |
+
from .depth.processing_depth import LanguageBindDepthProcessor
|
19 |
+
|
20 |
+
from .audio.configuration_audio import LanguageBindAudioConfig
|
21 |
+
from .audio.modeling_audio import LanguageBindAudio
|
22 |
+
from .audio.tokenization_audio import LanguageBindAudioTokenizer
|
23 |
+
from .audio.processing_audio import LanguageBindAudioProcessor
|
24 |
+
|
25 |
+
from .thermal.configuration_thermal import LanguageBindThermalConfig
|
26 |
+
from .thermal.modeling_thermal import LanguageBindThermal
|
27 |
+
from .thermal.tokenization_thermal import LanguageBindThermalTokenizer
|
28 |
+
from .thermal.processing_thermal import LanguageBindThermalProcessor
|
29 |
+
|
30 |
+
|
31 |
+
|
32 |
+
config_dict = {
|
33 |
+
'thermal': LanguageBindThermalConfig,
|
34 |
+
'image': LanguageBindImageConfig,
|
35 |
+
'video': LanguageBindVideoConfig,
|
36 |
+
'depth': LanguageBindDepthConfig,
|
37 |
+
'audio': LanguageBindAudioConfig
|
38 |
+
}
|
39 |
+
model_dict = {
|
40 |
+
'thermal': LanguageBindThermal,
|
41 |
+
'image': LanguageBindImage,
|
42 |
+
'video': LanguageBindVideo,
|
43 |
+
'depth': LanguageBindDepth,
|
44 |
+
'audio': LanguageBindAudio
|
45 |
+
}
|
46 |
+
transform_dict = {
|
47 |
+
'video': LanguageBindVideoProcessor,
|
48 |
+
'audio': LanguageBindAudioProcessor,
|
49 |
+
'depth': LanguageBindDepthProcessor,
|
50 |
+
'thermal': LanguageBindThermalProcessor,
|
51 |
+
'image': LanguageBindImageProcessor,
|
52 |
+
}
|
53 |
+
|
54 |
+
class LanguageBind(nn.Module):
|
55 |
+
def __init__(self, clip_type=('thermal', 'image', 'video', 'depth', 'audio'), cache_dir='cache_dir', use_temp=True):
|
56 |
+
super(LanguageBind, self).__init__()
|
57 |
+
self.use_temp = use_temp
|
58 |
+
self.modality_encoder = {}
|
59 |
+
self.modality_proj = {}
|
60 |
+
self.modality_scale = {}
|
61 |
+
self.modality_config = {}
|
62 |
+
for c in clip_type:
|
63 |
+
pretrained_ckpt = f'lb203/LanguageBind_{c}'
|
64 |
+
model = model_dict[c].from_pretrained(pretrained_ckpt, cache_dir=cache_dir)
|
65 |
+
self.modality_encoder[c] = model.vision_model
|
66 |
+
self.modality_proj[c] = model.visual_projection
|
67 |
+
self.modality_scale[c] = model.logit_scale
|
68 |
+
self.modality_config[c] = model.config
|
69 |
+
self.modality_encoder['language'] = model.text_model
|
70 |
+
self.modality_proj['language'] = model.text_projection
|
71 |
+
|
72 |
+
self.modality_encoder = nn.ModuleDict(self.modality_encoder)
|
73 |
+
self.modality_proj = nn.ModuleDict(self.modality_proj)
|
74 |
+
|
75 |
+
def forward(self, inputs):
|
76 |
+
outputs = {}
|
77 |
+
for key, value in inputs.items():
|
78 |
+
value = self.modality_encoder[key](**value)[1]
|
79 |
+
value = self.modality_proj[key](value)
|
80 |
+
value = value / value.norm(p=2, dim=-1, keepdim=True)
|
81 |
+
if self.use_temp:
|
82 |
+
if key != 'language':
|
83 |
+
value = value * self.modality_scale[key].exp()
|
84 |
+
outputs[key] = value
|
85 |
+
return outputs
|
86 |
+
|
87 |
+
def to_device(x, device):
|
88 |
+
out_dict = {k: v.to(device) for k, v in x.items()}
|
89 |
+
return out_dict
|
languagebind/__pycache__/__init__.cpython-38.pyc
ADDED
Binary file (3.38 kB). View file
|
|
languagebind/audio/__pycache__/configuration_audio.cpython-38.pyc
ADDED
Binary file (14.7 kB). View file
|
|
languagebind/audio/__pycache__/modeling_audio.cpython-38.pyc
ADDED
Binary file (31 kB). View file
|
|
languagebind/audio/__pycache__/processing_audio.cpython-38.pyc
ADDED
Binary file (5.82 kB). View file
|
|
languagebind/audio/__pycache__/tokenization_audio.cpython-38.pyc
ADDED
Binary file (2.53 kB). View file
|
|