BasicNp commited on
Commit
5acb56c
1 Parent(s): a906978

Upload 71 files

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +2 -50
  2. LICENSE +470 -0
  3. README.md +6 -6
  4. app.py +216 -4
  5. app_256.py +155 -0
  6. app_512.py +156 -0
  7. configs/inference_1024_v1.0.yaml +103 -0
  8. configs/inference_256_v1.0.yaml +98 -0
  9. configs/inference_512_v1.0.yaml +102 -0
  10. gradio_cached_examples/26/Generated Video/202047a2f8d639bc6ddd/rotating_view.mp4 +3 -0
  11. gradio_cached_examples/26/Generated Video/442dbae67917d74c14e0/man_walking.mp4 +0 -0
  12. gradio_cached_examples/26/Generated Video/652335bd60f11038e8c6/a_smiling_girl.mp4 +0 -0
  13. gradio_cached_examples/26/log.csv +4 -0
  14. gradio_cached_examples/48/Generated Video/1a3cd783a760d32f38fc/a_beach_with_waves_and_clouds_at_sunset.mp4 +0 -0
  15. gradio_cached_examples/48/Generated Video/27cf1c87f5065d38aa73/flowers_swaying_in_the_wind.mp4 +0 -0
  16. gradio_cached_examples/48/Generated Video/fd8df1a765d061e9a7c8/clothes_swaying_in_the_wind.mp4 +0 -0
  17. gradio_cached_examples/48/log.csv +4 -0
  18. lvdm/__pycache__/basics.cpython-39.pyc +0 -0
  19. lvdm/__pycache__/common.cpython-39.pyc +0 -0
  20. lvdm/__pycache__/distributions.cpython-39.pyc +0 -0
  21. lvdm/__pycache__/ema.cpython-39.pyc +0 -0
  22. lvdm/basics.py +100 -0
  23. lvdm/common.py +94 -0
  24. lvdm/distributions.py +95 -0
  25. lvdm/ema.py +76 -0
  26. lvdm/models/.DS_Store +0 -0
  27. lvdm/models/__pycache__/autoencoder.cpython-39.pyc +0 -0
  28. lvdm/models/__pycache__/ddpm3d.cpython-39.pyc +0 -0
  29. lvdm/models/__pycache__/utils_diffusion.cpython-39.pyc +0 -0
  30. lvdm/models/autoencoder.py +219 -0
  31. lvdm/models/ddpm3d.py +762 -0
  32. lvdm/models/samplers/__pycache__/ddim.cpython-39.pyc +0 -0
  33. lvdm/models/samplers/__pycache__/ddim_multiplecond.cpython-39.pyc +0 -0
  34. lvdm/models/samplers/ddim.py +317 -0
  35. lvdm/models/samplers/ddim_multiplecond.py +323 -0
  36. lvdm/models/utils_diffusion.py +158 -0
  37. lvdm/modules/.DS_Store +0 -0
  38. lvdm/modules/__pycache__/attention.cpython-39.pyc +0 -0
  39. lvdm/modules/attention.py +514 -0
  40. lvdm/modules/encoders/__pycache__/condition.cpython-39.pyc +0 -0
  41. lvdm/modules/encoders/__pycache__/resampler.cpython-39.pyc +0 -0
  42. lvdm/modules/encoders/condition.py +389 -0
  43. lvdm/modules/encoders/resampler.py +145 -0
  44. lvdm/modules/networks/__pycache__/ae_modules.cpython-39.pyc +0 -0
  45. lvdm/modules/networks/__pycache__/openaimodel3d.cpython-39.pyc +0 -0
  46. lvdm/modules/networks/ae_modules.py +844 -0
  47. lvdm/modules/networks/openaimodel3d.py +603 -0
  48. lvdm/modules/x_transformer.py +639 -0
  49. prompts/512_interp/smile_01.png +0 -0
  50. prompts/512_interp/smile_02.png +0 -0
.gitattributes CHANGED
@@ -33,53 +33,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
- assets/image159.gif filter=lfs diff=lfs merge=lfs -text
37
- assets/image160.gif filter=lfs diff=lfs merge=lfs -text
38
- assets/image161.gif filter=lfs diff=lfs merge=lfs -text
39
- assets/image162.gif filter=lfs diff=lfs merge=lfs -text
40
- assets/image166.gif filter=lfs diff=lfs merge=lfs -text
41
- assets/image167.gif filter=lfs diff=lfs merge=lfs -text
42
- assets/image171.gif filter=lfs diff=lfs merge=lfs -text
43
- assets/image185.gif filter=lfs diff=lfs merge=lfs -text
44
- assets/image186.gif filter=lfs diff=lfs merge=lfs -text
45
- assets/image189.gif filter=lfs diff=lfs merge=lfs -text
46
- assets/image190.gif filter=lfs diff=lfs merge=lfs -text
47
- assets/image265[[:space:]](1).gif filter=lfs diff=lfs merge=lfs -text
48
- assets/image268.gif filter=lfs diff=lfs merge=lfs -text
49
- assets/image269.gif filter=lfs diff=lfs merge=lfs -text
50
- assets/image27[[:space:]](1)1.gif filter=lfs diff=lfs merge=lfs -text
51
- assets/image270[[:space:]](1).gif filter=lfs diff=lfs merge=lfs -text
52
- assets/image271[[:space:]](1).gif filter=lfs diff=lfs merge=lfs -text
53
- assets/image272.gif filter=lfs diff=lfs merge=lfs -text
54
- assets/image274[[:space:]](1).gif filter=lfs diff=lfs merge=lfs -text
55
- assets/image276.gif filter=lfs diff=lfs merge=lfs -text
56
- assets/image277.gif filter=lfs diff=lfs merge=lfs -text
57
- assets/image278.gif filter=lfs diff=lfs merge=lfs -text
58
- assets/image279[[:space:]](1).gif filter=lfs diff=lfs merge=lfs -text
59
- assets/image28[[:space:]](2).gif filter=lfs diff=lfs merge=lfs -text
60
- assets/image280.gif filter=lfs diff=lfs merge=lfs -text
61
- assets/image281.gif filter=lfs diff=lfs merge=lfs -text
62
- assets/image282[[:space:]](1).gif filter=lfs diff=lfs merge=lfs -text
63
- assets/image283[[:space:]](1).gif filter=lfs diff=lfs merge=lfs -text
64
- assets/image285.gif filter=lfs diff=lfs merge=lfs -text
65
- assets/image287.gif filter=lfs diff=lfs merge=lfs -text
66
- assets/image288.gif filter=lfs diff=lfs merge=lfs -text
67
- assets/image29[[:space:]](1)1.gif filter=lfs diff=lfs merge=lfs -text
68
- assets/image290.gif filter=lfs diff=lfs merge=lfs -text
69
- assets/image291.gif filter=lfs diff=lfs merge=lfs -text
70
- assets/image292.gif filter=lfs diff=lfs merge=lfs -text
71
- assets/image293.gif filter=lfs diff=lfs merge=lfs -text
72
- assets/image3011.gif filter=lfs diff=lfs merge=lfs -text
73
- assets/image63111.gif filter=lfs diff=lfs merge=lfs -text
74
- assets/image6411.gif filter=lfs diff=lfs merge=lfs -text
75
- assets/image65111.gif filter=lfs diff=lfs merge=lfs -text
76
- assets/image6611.gif filter=lfs diff=lfs merge=lfs -text
77
- assets/image74.gif filter=lfs diff=lfs merge=lfs -text
78
- assets/image75.gif filter=lfs diff=lfs merge=lfs -text
79
- assets/image76.gif filter=lfs diff=lfs merge=lfs -text
80
- assets/image77.gif filter=lfs diff=lfs merge=lfs -text
81
- assets/image84.gif filter=lfs diff=lfs merge=lfs -text
82
- assets/image85.gif filter=lfs diff=lfs merge=lfs -text
83
- assets/image87.gif filter=lfs diff=lfs merge=lfs -text
84
- assets/image88.gif filter=lfs diff=lfs merge=lfs -text
85
- assets/Our_Motorbike_cloud_floor.gif filter=lfs diff=lfs merge=lfs -text
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ gradio_cached_examples/26/Generated[[:space:]]Video/202047a2f8d639bc6ddd/rotating_view.mp4 filter=lfs diff=lfs merge=lfs -text
37
+ prompts/512_loop/24.png filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
LICENSE ADDED
@@ -0,0 +1,470 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ This license applies to the source codes that are open sourced in connection with the DynamiCrafter.
2
+
3
+ Copyright (C) 2023 THL A29 Limited, a Tencent company.
4
+
5
+ Apache License
6
+ Version 2.0, January 2004
7
+ http://www.apache.org/licenses/
8
+
9
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
10
+
11
+ 1. Definitions.
12
+
13
+ "License" shall mean the terms and conditions for use, reproduction,
14
+ and distribution as defined by Sections 1 through 9 of this document.
15
+
16
+ "Licensor" shall mean the copyright owner or entity authorized by
17
+ the copyright owner that is granting the License.
18
+
19
+ "Legal Entity" shall mean the union of the acting entity and all
20
+ other entities that control, are controlled by, or are under common
21
+ control with that entity. For the purposes of this definition,
22
+ "control" means (i) the power, direct or indirect, to cause the
23
+ direction or management of such entity, whether by contract or
24
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
25
+ outstanding shares, or (iii) beneficial ownership of such entity.
26
+
27
+ "You" (or "Your") shall mean an individual or Legal Entity
28
+ exercising permissions granted by this License.
29
+
30
+ "Source" form shall mean the preferred form for making modifications,
31
+ including but not limited to software source code, documentation
32
+ source, and configuration files.
33
+
34
+ "Object" form shall mean any form resulting from mechanical
35
+ transformation or translation of a Source form, including but
36
+ not limited to compiled object code, generated documentation,
37
+ and conversions to other media types.
38
+
39
+ "Work" shall mean the work of authorship, whether in Source or
40
+ Object form, made available under the License, as indicated by a
41
+ copyright notice that is included in or attached to the work
42
+ (an example is provided in the Appendix below).
43
+
44
+ "Derivative Works" shall mean any work, whether in Source or Object
45
+ form, that is based on (or derived from) the Work and for which the
46
+ editorial revisions, annotations, elaborations, or other modifications
47
+ represent, as a whole, an original work of authorship. For the purposes
48
+ of this License, Derivative Works shall not include works that remain
49
+ separable from, or merely link (or bind by name) to the interfaces of,
50
+ the Work and Derivative Works thereof.
51
+
52
+ "Contribution" shall mean any work of authorship, including
53
+ the original version of the Work and any modifications or additions
54
+ to that Work or Derivative Works thereof, that is intentionally
55
+ submitted to Licensor for inclusion in the Work by the copyright owner
56
+ or by an individual or Legal Entity authorized to submit on behalf of
57
+ the copyright owner. For the purposes of this definition, "submitted"
58
+ means any form of electronic, verbal, or written communication sent
59
+ to the Licensor or its representatives, including but not limited to
60
+ communication on electronic mailing lists, source code control systems,
61
+ and issue tracking systems that are managed by, or on behalf of, the
62
+ Licensor for the purpose of discussing and improving the Work, but
63
+ excluding communication that is conspicuously marked or otherwise
64
+ designated in writing by the copyright owner as "Not a Contribution."
65
+
66
+ "Contributor" shall mean Licensor and any individual or Legal Entity
67
+ on behalf of whom a Contribution has been received by Licensor and
68
+ subsequently incorporated within the Work.
69
+
70
+ 2. Grant of Copyright License. Subject to the terms and conditions of
71
+ this License, each Contributor hereby grants to You a perpetual,
72
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
73
+ copyright license to reproduce, prepare Derivative Works of,
74
+ publicly display, publicly perform, sublicense, and distribute the
75
+ Work and such Derivative Works in Source or Object form.
76
+
77
+ 3. Grant of Patent License. Subject to the terms and conditions of
78
+ this License, each Contributor hereby grants to You a perpetual,
79
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
80
+ (except as stated in this section) patent license to make, have made,
81
+ use, offer to sell, sell, import, and otherwise transfer the Work,
82
+ where such license applies only to those patent claims licensable
83
+ by such Contributor that are necessarily infringed by their
84
+ Contribution(s) alone or by combination of their Contribution(s)
85
+ with the Work to which such Contribution(s) was submitted. If You
86
+ institute patent litigation against any entity (including a
87
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
88
+ or a Contribution incorporated within the Work constitutes direct
89
+ or contributory patent infringement, then any patent licenses
90
+ granted to You under this License for that Work shall terminate
91
+ as of the date such litigation is filed.
92
+
93
+ 4. Redistribution. You may reproduce and distribute copies of the
94
+ Work or Derivative Works thereof in any medium, with or without
95
+ modifications, and in Source or Object form, provided that You
96
+ meet the following conditions:
97
+
98
+ (a) You must give any other recipients of the Work or
99
+ Derivative Works a copy of this License; and
100
+
101
+ (b) You must cause any modified files to carry prominent notices
102
+ stating that You changed the files; and
103
+
104
+ (c) You must retain, in the Source form of any Derivative Works
105
+ that You distribute, all copyright, patent, trademark, and
106
+ attribution notices from the Source form of the Work,
107
+ excluding those notices that do not pertain to any part of
108
+ the Derivative Works; and
109
+
110
+ (d) If the Work includes a "NOTICE" text file as part of its
111
+ distribution, then any Derivative Works that You distribute must
112
+ include a readable copy of the attribution notices contained
113
+ within such NOTICE file, excluding those notices that do not
114
+ pertain to any part of the Derivative Works, in at least one
115
+ of the following places: within a NOTICE text file distributed
116
+ as part of the Derivative Works; within the Source form or
117
+ documentation, if provided along with the Derivative Works; or,
118
+ within a display generated by the Derivative Works, if and
119
+ wherever such third-party notices normally appear. The contents
120
+ of the NOTICE file are for informational purposes only and
121
+ do not modify the License. You may add Your own attribution
122
+ notices within Derivative Works that You distribute, alongside
123
+ or as an addendum to the NOTICE text from the Work, provided
124
+ that such additional attribution notices cannot be construed
125
+ as modifying the License.
126
+
127
+ You may add Your own copyright statement to Your modifications and
128
+ may provide additional or different license terms and conditions
129
+ for use, reproduction, or distribution of Your modifications, or
130
+ for any such Derivative Works as a whole, provided Your use,
131
+ reproduction, and distribution of the Work otherwise complies with
132
+ the conditions stated in this License.
133
+
134
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
135
+ any Contribution intentionally submitted for inclusion in the Work
136
+ by You to the Licensor shall be under the terms and conditions of
137
+ this License, without any additional terms or conditions.
138
+ Notwithstanding the above, nothing herein shall supersede or modify
139
+ the terms of any separate license agreement you may have executed
140
+ with Licensor regarding such Contributions.
141
+
142
+ 6. Trademarks. This License does not grant permission to use the trade
143
+ names, trademarks, service marks, or product names of the Licensor,
144
+ except as required for reasonable and customary use in describing the
145
+ origin of the Work and reproducing the content of the NOTICE file.
146
+
147
+ 7. Disclaimer of Warranty. Unless required by applicable law or
148
+ agreed to in writing, Licensor provides the Work (and each
149
+ Contributor provides its Contributions) on an "AS IS" BASIS,
150
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
151
+ implied, including, without limitation, any warranties or conditions
152
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
153
+ PARTICULAR PURPOSE. You are solely responsible for determining the
154
+ appropriateness of using or redistributing the Work and assume any
155
+ risks associated with Your exercise of permissions under this License.
156
+
157
+ 8. Limitation of Liability. In no event and under no legal theory,
158
+ whether in tort (including negligence), contract, or otherwise,
159
+ unless required by applicable law (such as deliberate and grossly
160
+ negligent acts) or agreed to in writing, shall any Contributor be
161
+ liable to You for damages, including any direct, indirect, special,
162
+ incidental, or consequential damages of any character arising as a
163
+ result of this License or out of the use or inability to use the
164
+ Work (including but not limited to damages for loss of goodwill,
165
+ work stoppage, computer failure or malfunction, or any and all
166
+ other commercial damages or losses), even if such Contributor
167
+ has been advised of the possibility of such damages.
168
+
169
+ 9. Accepting Warranty or Additional Liability. While redistributing
170
+ the Work or Derivative Works thereof, You may choose to offer,
171
+ and charge a fee for, acceptance of support, warranty, indemnity,
172
+ or other liability obligations and/or rights consistent with this
173
+ License. However, in accepting such obligations, You may act only
174
+ on Your own behalf and on Your sole responsibility, not on behalf
175
+ of any other Contributor, and only if You agree to indemnify,
176
+ defend, and hold each Contributor harmless for any liability
177
+ incurred by, or claims asserted against, such Contributor by reason
178
+ of your accepting any such warranty or additional liability.
179
+
180
+ 10. This code is provided for research purposes only and is
181
+ not to be used for any commercial purposes. By using this code,
182
+ you agree that it will be used solely for academic research, scholarly work,
183
+ and non-commercial activities. Any use of this code for commercial purposes,
184
+ including but not limited to, selling, distributing, or incorporating it into
185
+ commercial products or services, is strictly prohibited. Violation of this
186
+ clause may result in legal actions and penalties.
187
+
188
+ END OF TERMS AND CONDITIONS
189
+
190
+ APPENDIX: How to apply the Apache License to your work.
191
+
192
+ To apply the Apache License to your work, attach the following
193
+ boilerplate notice, with the fields enclosed by brackets "[]"
194
+ replaced with your own identifying information. (Don't include
195
+ the brackets!) The text should be enclosed in the appropriate
196
+ comment syntax for the file format. We also recommend that a
197
+ file or class name and description of purpose be included on the
198
+ same "printed page" as the copyright notice for easier
199
+ identification within third-party archives.
200
+
201
+ Copyright [yyyy] [name of copyright owner]
202
+
203
+ Licensed under the Apache License, Version 2.0 (the "License");
204
+ you may not use this file except in compliance with the License.
205
+ You may obtain a copy of the License at
206
+
207
+ http://www.apache.org/licenses/LICENSE-2.0
208
+
209
+ Unless required by applicable law or agreed to in writing, software
210
+ distributed under the License is distributed on an "AS IS" BASIS,
211
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
212
+ See the License for the specific language governing permissions and
213
+ limitations under the License.
214
+
215
+
216
+ Other dependencies and licenses (if such optional components are used):
217
+
218
+
219
+ Components under BSD 3-Clause License:
220
+ ------------------------------------------------
221
+ 1. numpy
222
+ Copyright (c) 2005-2022, NumPy Developers.
223
+ All rights reserved.
224
+
225
+ 2. pytorch
226
+ Copyright (c) 2016- Facebook, Inc (Adam Paszke)
227
+ Copyright (c) 2014- Facebook, Inc (Soumith Chintala)
228
+ Copyright (c) 2011-2014 Idiap Research Institute (Ronan Collobert)
229
+ Copyright (c) 2012-2014 Deepmind Technologies (Koray Kavukcuoglu)
230
+ Copyright (c) 2011-2012 NEC Laboratories America (Koray Kavukcuoglu)
231
+ Copyright (c) 2011-2013 NYU (Clement Farabet)
232
+ Copyright (c) 2006-2010 NEC Laboratories America (Ronan Collobert, Leon Bottou, Iain Melvin, Jason Weston)
233
+ Copyright (c) 2006 Idiap Research Institute (Samy Bengio)
234
+ Copyright (c) 2001-2004 Idiap Research Institute (Ronan Collobert, Samy Bengio, Johnny Mariethoz)
235
+
236
+ 3. torchvision
237
+ Copyright (c) Soumith Chintala 2016,
238
+ All rights reserved.
239
+
240
+ Redistribution and use in source and binary forms, with or without
241
+ modification, are permitted provided that the following conditions are met:
242
+
243
+ * Redistributions of source code must retain the above copyright notice, this
244
+ list of conditions and the following disclaimer.
245
+
246
+ * Redistributions in binary form must reproduce the above copyright notice,
247
+ this list of conditions and the following disclaimer in the documentation
248
+ and/or other materials provided with the distribution.
249
+
250
+ * Neither the name of the copyright holder nor the names of its
251
+ contributors may be used to endorse or promote products derived from
252
+ this software without specific prior written permission.
253
+
254
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
255
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
256
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
257
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
258
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
259
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
260
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
261
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
262
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
263
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
264
+
265
+ Component under Apache v2 License:
266
+ -----------------------------------------------------
267
+ 1. timm
268
+ Copyright 2019 Ross Wightman
269
+
270
+ Apache License
271
+ Version 2.0, January 2004
272
+ http://www.apache.org/licenses/
273
+
274
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
275
+
276
+ 1. Definitions.
277
+
278
+ "License" shall mean the terms and conditions for use, reproduction,
279
+ and distribution as defined by Sections 1 through 9 of this document.
280
+
281
+ "Licensor" shall mean the copyright owner or entity authorized by
282
+ the copyright owner that is granting the License.
283
+
284
+ "Legal Entity" shall mean the union of the acting entity and all
285
+ other entities that control, are controlled by, or are under common
286
+ control with that entity. For the purposes of this definition,
287
+ "control" means (i) the power, direct or indirect, to cause the
288
+ direction or management of such entity, whether by contract or
289
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
290
+ outstanding shares, or (iii) beneficial ownership of such entity.
291
+
292
+ "You" (or "Your") shall mean an individual or Legal Entity
293
+ exercising permissions granted by this License.
294
+
295
+ "Source" form shall mean the preferred form for making modifications,
296
+ including but not limited to software source code, documentation
297
+ source, and configuration files.
298
+
299
+ "Object" form shall mean any form resulting from mechanical
300
+ transformation or translation of a Source form, including but
301
+ not limited to compiled object code, generated documentation,
302
+ and conversions to other media types.
303
+
304
+ "Work" shall mean the work of authorship, whether in Source or
305
+ Object form, made available under the License, as indicated by a
306
+ copyright notice that is included in or attached to the work
307
+ (an example is provided in the Appendix below).
308
+
309
+ "Derivative Works" shall mean any work, whether in Source or Object
310
+ form, that is based on (or derived from) the Work and for which the
311
+ editorial revisions, annotations, elaborations, or other modifications
312
+ represent, as a whole, an original work of authorship. For the purposes
313
+ of this License, Derivative Works shall not include works that remain
314
+ separable from, or merely link (or bind by name) to the interfaces of,
315
+ the Work and Derivative Works thereof.
316
+
317
+ "Contribution" shall mean any work of authorship, including
318
+ the original version of the Work and any modifications or additions
319
+ to that Work or Derivative Works thereof, that is intentionally
320
+ submitted to Licensor for inclusion in the Work by the copyright owner
321
+ or by an individual or Legal Entity authorized to submit on behalf of
322
+ the copyright owner. For the purposes of this definition, "submitted"
323
+ means any form of electronic, verbal, or written communication sent
324
+ to the Licensor or its representatives, including but not limited to
325
+ communication on electronic mailing lists, source code control systems,
326
+ and issue tracking systems that are managed by, or on behalf of, the
327
+ Licensor for the purpose of discussing and improving the Work, but
328
+ excluding communication that is conspicuously marked or otherwise
329
+ designated in writing by the copyright owner as "Not a Contribution."
330
+
331
+ "Contributor" shall mean Licensor and any individual or Legal Entity
332
+ on behalf of whom a Contribution has been received by Licensor and
333
+ subsequently incorporated within the Work.
334
+
335
+ 2. Grant of Copyright License. Subject to the terms and conditions of
336
+ this License, each Contributor hereby grants to You a perpetual,
337
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
338
+ copyright license to reproduce, prepare Derivative Works of,
339
+ publicly display, publicly perform, sublicense, and distribute the
340
+ Work and such Derivative Works in Source or Object form.
341
+
342
+ 3. Grant of Patent License. Subject to the terms and conditions of
343
+ this License, each Contributor hereby grants to You a perpetual,
344
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
345
+ (except as stated in this section) patent license to make, have made,
346
+ use, offer to sell, sell, import, and otherwise transfer the Work,
347
+ where such license applies only to those patent claims licensable
348
+ by such Contributor that are necessarily infringed by their
349
+ Contribution(s) alone or by combination of their Contribution(s)
350
+ with the Work to which such Contribution(s) was submitted. If You
351
+ institute patent litigation against any entity (including a
352
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
353
+ or a Contribution incorporated within the Work constitutes direct
354
+ or contributory patent infringement, then any patent licenses
355
+ granted to You under this License for that Work shall terminate
356
+ as of the date such litigation is filed.
357
+
358
+ 4. Redistribution. You may reproduce and distribute copies of the
359
+ Work or Derivative Works thereof in any medium, with or without
360
+ modifications, and in Source or Object form, provided that You
361
+ meet the following conditions:
362
+
363
+ (a) You must give any other recipients of the Work or
364
+ Derivative Works a copy of this License; and
365
+
366
+ (b) You must cause any modified files to carry prominent notices
367
+ stating that You changed the files; and
368
+
369
+ (c) You must retain, in the Source form of any Derivative Works
370
+ that You distribute, all copyright, patent, trademark, and
371
+ attribution notices from the Source form of the Work,
372
+ excluding those notices that do not pertain to any part of
373
+ the Derivative Works; and
374
+
375
+ (d) If the Work includes a "NOTICE" text file as part of its
376
+ distribution, then any Derivative Works that You distribute must
377
+ include a readable copy of the attribution notices contained
378
+ within such NOTICE file, excluding those notices that do not
379
+ pertain to any part of the Derivative Works, in at least one
380
+ of the following places: within a NOTICE text file distributed
381
+ as part of the Derivative Works; within the Source form or
382
+ documentation, if provided along with the Derivative Works; or,
383
+ within a display generated by the Derivative Works, if and
384
+ wherever such third-party notices normally appear. The contents
385
+ of the NOTICE file are for informational purposes only and
386
+ do not modify the License. You may add Your own attribution
387
+ notices within Derivative Works that You distribute, alongside
388
+ or as an addendum to the NOTICE text from the Work, provided
389
+ that such additional attribution notices cannot be construed
390
+ as modifying the License.
391
+
392
+ You may add Your own copyright statement to Your modifications and
393
+ may provide additional or different license terms and conditions
394
+ for use, reproduction, or distribution of Your modifications, or
395
+ for any such Derivative Works as a whole, provided Your use,
396
+ reproduction, and distribution of the Work otherwise complies with
397
+ the conditions stated in this License.
398
+
399
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
400
+ any Contribution intentionally submitted for inclusion in the Work
401
+ by You to the Licensor shall be under the terms and conditions of
402
+ this License, without any additional terms or conditions.
403
+ Notwithstanding the above, nothing herein shall supersede or modify
404
+ the terms of any separate license agreement you may have executed
405
+ with Licensor regarding such Contributions.
406
+
407
+ 6. Trademarks. This License does not grant permission to use the trade
408
+ names, trademarks, service marks, or product names of the Licensor,
409
+ except as required for reasonable and customary use in describing the
410
+ origin of the Work and reproducing the content of the NOTICE file.
411
+
412
+ 7. Disclaimer of Warranty. Unless required by applicable law or
413
+ agreed to in writing, Licensor provides the Work (and each
414
+ Contributor provides its Contributions) on an "AS IS" BASIS,
415
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
416
+ implied, including, without limitation, any warranties or conditions
417
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
418
+ PARTICULAR PURPOSE. You are solely responsible for determining the
419
+ appropriateness of using or redistributing the Work and assume any
420
+ risks associated with Your exercise of permissions under this License.
421
+
422
+ 8. Limitation of Liability. In no event and under no legal theory,
423
+ whether in tort (including negligence), contract, or otherwise,
424
+ unless required by applicable law (such as deliberate and grossly
425
+ negligent acts) or agreed to in writing, shall any Contributor be
426
+ liable to You for damages, including any direct, indirect, special,
427
+ incidental, or consequential damages of any character arising as a
428
+ result of this License or out of the use or inability to use the
429
+ Work (including but not limited to damages for loss of goodwill,
430
+ work stoppage, computer failure or malfunction, or any and all
431
+ other commercial damages or losses), even if such Contributor
432
+ has been advised of the possibility of such damages.
433
+
434
+ 9. Accepting Warranty or Additional Liability. While redistributing
435
+ the Work or Derivative Works thereof, You may choose to offer,
436
+ and charge a fee for, acceptance of support, warranty, indemnity,
437
+ or other liability obligations and/or rights consistent with this
438
+ License. However, in accepting such obligations, You may act only
439
+ on Your own behalf and on Your sole responsibility, not on behalf
440
+ of any other Contributor, and only if You agree to indemnify,
441
+ defend, and hold each Contributor harmless for any liability
442
+ incurred by, or claims asserted against, such Contributor by reason
443
+ of your accepting any such warranty or additional liability.
444
+
445
+ END OF TERMS AND CONDITIONS
446
+
447
+ APPENDIX: How to apply the Apache License to your work.
448
+
449
+ To apply the Apache License to your work, attach the following
450
+ boilerplate notice, with the fields enclosed by brackets "[]"
451
+ replaced with your own identifying information. (Don't include
452
+ the brackets!) The text should be enclosed in the appropriate
453
+ comment syntax for the file format. We also recommend that a
454
+ file or class name and description of purpose be included on the
455
+ same "printed page" as the copyright notice for easier
456
+ identification within third-party archives.
457
+
458
+ Copyright [yyyy] [name of copyright owner]
459
+
460
+ Licensed under the Apache License, Version 2.0 (the "License");
461
+ you may not use this file except in compliance with the License.
462
+ You may obtain a copy of the License at
463
+
464
+ http://www.apache.org/licenses/LICENSE-2.0
465
+
466
+ Unless required by applicable law or agreed to in writing, software
467
+ distributed under the License is distributed on an "AS IS" BASIS,
468
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
469
+ See the License for the specific language governing permissions and
470
+ limitations under the License.
README.md CHANGED
@@ -1,13 +1,13 @@
1
  ---
2
- title: Dragreal
3
- emoji: 🐠
4
- colorFrom: indigo
5
- colorTo: blue
6
  sdk: gradio
7
- sdk_version: 4.23.0
8
  app_file: app.py
9
  pinned: false
10
- license: mit
11
  ---
12
 
13
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
+ title: DynamiCrafter
3
+ emoji: 🐨
4
+ colorFrom: yellow
5
+ colorTo: purple
6
  sdk: gradio
7
+ sdk_version: 4.7.1
8
  app_file: app.py
9
  pinned: false
10
+ license: other
11
  ---
12
 
13
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py CHANGED
@@ -1,7 +1,219 @@
 
1
  import gradio as gr
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
 
3
- def greet(name):
4
- return "Hello " + name + "!!"
 
 
 
 
 
 
 
5
 
6
- iface = gr.Interface(fn=greet, inputs="text", outputs="text")
7
- iface.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import spaces
2
  import gradio as gr
3
+ import os
4
+ import sys
5
+ import time
6
+ from omegaconf import OmegaConf
7
+ import torch
8
+ from pytorch_lightning import seed_everything
9
+ from huggingface_hub import hf_hub_download
10
+ from einops import repeat
11
+ import torchvision.transforms as transforms
12
+ from utils.utils import instantiate_from_config
13
+ sys.path.insert(0, "scripts/evaluation")
14
+ from funcs import (
15
+ batch_ddim_sampling,
16
+ load_model_checkpoint,
17
+ get_latent_z,
18
+ save_videos
19
+ )
20
 
21
+ def download_model():
22
+ REPO_ID = 'Doubiiu/DynamiCrafter_512_Interp'
23
+ filename_list = ['model.ckpt']
24
+ if not os.path.exists('./checkpoints/dynamicrafter_512_interp_v1/'):
25
+ os.makedirs('./checkpoints/dynamicrafter_512_interp_v1/')
26
+ for filename in filename_list:
27
+ local_file = os.path.join('./checkpoints/dynamicrafter_512_interp_v1/', filename)
28
+ if not os.path.exists(local_file):
29
+ hf_hub_download(repo_id=REPO_ID, filename=filename, local_dir='./checkpoints/dynamicrafter_512_interp_v1/', force_download=True)
30
 
31
+
32
+
33
+ download_model()
34
+ ckpt_path='checkpoints/dynamicrafter_512_interp_v1/model.ckpt'
35
+ config_file='configs/inference_512_v1.0.yaml'
36
+ config = OmegaConf.load(config_file)
37
+ model_config = config.pop("model", OmegaConf.create())
38
+ model_config['params']['unet_config']['params']['use_checkpoint']=False
39
+ model = instantiate_from_config(model_config)
40
+ assert os.path.exists(ckpt_path), "Error: checkpoint Not Found!"
41
+ model = load_model_checkpoint(model, ckpt_path)
42
+ model.eval()
43
+ model = model.cuda()
44
+
45
+
46
+
47
+ @spaces.GPU(duration=300)
48
+ def infer(image, prompt, steps=50, cfg_scale=7.5, eta=1.0, fs=3, seed=123, image2=None):
49
+ resolution = (320, 512)
50
+ save_fps = 8
51
+ seed_everything(seed)
52
+ transform = transforms.Compose([
53
+ transforms.Resize(min(resolution)),
54
+ transforms.CenterCrop(resolution),
55
+ ])
56
+ torch.cuda.empty_cache()
57
+ print('start:', prompt, time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time())))
58
+ start = time.time()
59
+ if steps > 60:
60
+ steps = 60
61
+
62
+ batch_size=1
63
+ channels = model.model.diffusion_model.out_channels
64
+ frames = model.temporal_length
65
+ h, w = resolution[0] // 8, resolution[1] // 8
66
+ noise_shape = [batch_size, channels, frames, h, w]
67
+
68
+ # text cond
69
+ with torch.no_grad(), torch.cuda.amp.autocast():
70
+ text_emb = model.get_learned_conditioning([prompt])
71
+
72
+ # img cond
73
+ img_tensor = torch.from_numpy(image).permute(2, 0, 1).float().to(model.device)
74
+ img_tensor = (img_tensor / 255. - 0.5) * 2
75
+
76
+ image_tensor_resized = transform(img_tensor) #3,256,256
77
+ videos = image_tensor_resized.unsqueeze(0) # bchw
78
+
79
+ z = get_latent_z(model, videos.unsqueeze(2)) #bc,1,hw
80
+
81
+ if image2 is not None:
82
+ img_tensor2 = torch.from_numpy(image2).permute(2, 0, 1).float().to(model.device)
83
+ img_tensor2 = (img_tensor2 / 255. - 0.5) * 2
84
+
85
+ image_tensor_resized2 = transform(img_tensor2) #3,h,w
86
+ videos2 = image_tensor_resized2.unsqueeze(0) # bchw
87
+
88
+ z2 = get_latent_z(model, videos2.unsqueeze(2)) #bc,1,hw
89
+
90
+
91
+
92
+ img_tensor_repeat = repeat(z, 'b c t h w -> b c (repeat t) h w', repeat=frames)
93
+
94
+ img_tensor_repeat = torch.zeros_like(img_tensor_repeat)
95
+
96
+ ## old
97
+ img_tensor_repeat[:,:,:1,:,:] = z
98
+ if image2 is not None:
99
+ img_tensor_repeat[:,:,-1:,:,:] = z2
100
+ else:
101
+ img_tensor_repeat[:,:,-1:,:,:] = z
102
+
103
+ cond_images = model.embedder(img_tensor.unsqueeze(0)) ## blc
104
+ img_emb = model.image_proj_model(cond_images)
105
+
106
+ imtext_cond = torch.cat([text_emb, img_emb], dim=1)
107
+
108
+ fs = torch.tensor([fs], dtype=torch.long, device=model.device)
109
+ cond = {"c_crossattn": [imtext_cond], "fs": fs, "c_concat": [img_tensor_repeat]}
110
+
111
+ ## inference
112
+ batch_samples = batch_ddim_sampling(model, cond, noise_shape, n_samples=1, ddim_steps=steps, ddim_eta=eta, cfg_scale=cfg_scale)
113
+ ## b,samples,c,t,h,w
114
+ ## remove the last frame for looping video
115
+ if image2 is None:
116
+ batch_samples = batch_samples[:,:,:,:-1,...]
117
+ video_path = './output.mp4'
118
+ save_videos(batch_samples, './', filenames=['output'], fps=save_fps)
119
+ return video_path
120
+
121
+
122
+ i2v_examples_interp_512 = [
123
+ ['prompts/512_interp/smile_01.png', 'a smiling girl', 50, 7.5, 1.0, 5, 12306, 'prompts/512_interp/smile_02.png'],
124
+ ['prompts/512_interp/stone01_01.png', 'rotating view', 50, 7.5, 1.0, 5, 123, 'prompts/512_interp/stone01_02.png'],
125
+ ['prompts/512_interp/walk_01.png', 'man walking', 50, 7.5, 1.0, 5, 345, 'prompts/512_interp/walk_02.png'],
126
+ ]
127
+ i2v_examples_loop_512 = [
128
+ ['prompts/512_loop/24.png', 'a beach with waves and clouds at sunset', 50, 7.5, 1.0, 5, 234],
129
+ ['prompts/512_loop/36.png', 'clothes swaying in the wind', 50, 7.5, 1.0, 5, 123],
130
+ ['prompts/512_loop/40.png', 'flowers swaying in the wind', 50, 7.5, 1.0, 5, 234],
131
+ ]
132
+
133
+
134
+
135
+
136
+ css = """#input_img {max-width: 512px !important} #input_img2 {max-width: 512px !important} #output_vid {max-width: 512px; max-height: 320px} """
137
+
138
+ with gr.Blocks(analytics_enabled=False, css=css) as dynamicrafter_iface:
139
+ gr.Markdown("<div align='center'> <h1> DynamiCrafter: Animating Open-domain Images with Video Diffusion Priors </span> </h1> \
140
+ <h2 style='font-weight: 450; font-size: 1rem; margin: 0rem'>\
141
+ <a href='https://doubiiu.github.io/'>Jinbo Xing</a>, \
142
+ <a href='https://menghanxia.github.io/'>Menghan Xia</a>, <a href='https://yzhang2016.github.io/'>Yong Zhang</a>, \
143
+ <a href=''>Haoxin Chen</a>, <a href=''> Wangbo Yu</a>,\
144
+ <a href='https://github.com/hyliu'>Hanyuan Liu</a>, <a href='https://xinntao.github.io/'>Xintao Wang</a>,\
145
+ <a href='https://www.cse.cuhk.edu.hk/~ttwong/myself.html'>Tien-Tsin Wong</a>,\
146
+ <a href='https://scholar.google.com/citations?user=4oXBp9UAAAAJ&hl=zh-CN'>Ying Shan</a>\
147
+ </h2> \
148
+ <a style='font-size:18px;color: #000000'>If DynamiCrafter is useful, please help star the </a>\
149
+ <a style='font-size:18px;color: #000000' href='https://github.com/Doubiiu/DynamiCrafter'>[Github Repo]</a>\
150
+ <a style='font-size:18px;color: #000000'>, which is important to Open-Source projects. Thanks!</a>\
151
+ <a style='font-size:18px;color: #000000' href='https://arxiv.org/abs/2310.12190'> [ArXiv] </a>\
152
+ <a style='font-size:18px;color: #000000' href='https://doubiiu.github.io/projects/DynamiCrafter/'> [Project Page] </a> </div>")
153
+
154
+ #######generative frame interpolation and looping video generation######
155
+ with gr.Tab(label='Generative Frame Interpolation_320x512'):
156
+ with gr.Column():
157
+ with gr.Row():
158
+ with gr.Column():
159
+ with gr.Row():
160
+ i2v_input_image = gr.Image(label="Input Image1",elem_id="input_img")
161
+ with gr.Row():
162
+ i2v_input_text = gr.Text(label='Prompts')
163
+ with gr.Row():
164
+ i2v_seed = gr.Slider(label='Random Seed', minimum=0, maximum=50000, step=1, value=123)
165
+ i2v_eta = gr.Slider(minimum=0.0, maximum=1.0, step=0.1, label='ETA', value=1.0, elem_id="i2v_eta")
166
+ i2v_cfg_scale = gr.Slider(minimum=1.0, maximum=15.0, step=0.5, label='CFG Scale', value=7.5, elem_id="i2v_cfg_scale")
167
+ with gr.Row():
168
+ i2v_steps = gr.Slider(minimum=1, maximum=50, step=1, elem_id="i2v_steps", label="Sampling steps", value=50)
169
+ i2v_motion = gr.Slider(minimum=5, maximum=30, step=1, elem_id="i2v_motion", label="FPS", value=10)
170
+ i2v_end_btn = gr.Button("Generate")
171
+ with gr.Column():
172
+ with gr.Row():
173
+ i2v_input_image2 = gr.Image(label="Input Image2",elem_id="input_img2")
174
+ with gr.Row():
175
+ i2v_output_video = gr.Video(label="Generated Video",elem_id="output_vid",autoplay=True,show_share_button=True)
176
+
177
+ gr.Examples(examples=i2v_examples_interp_512,
178
+ inputs=[i2v_input_image, i2v_input_text, i2v_steps, i2v_cfg_scale, i2v_eta, i2v_motion, i2v_seed, i2v_input_image2],
179
+ outputs=[i2v_output_video],
180
+ fn = infer,
181
+ cache_examples=True,
182
+ )
183
+ i2v_end_btn.click(inputs=[i2v_input_image, i2v_input_text, i2v_steps, i2v_cfg_scale, i2v_eta, i2v_motion, i2v_seed, i2v_input_image2],
184
+ outputs=[i2v_output_video],
185
+ fn = infer
186
+ )
187
+ #######generative frame interpolation and looping video generation######
188
+ with gr.Tab(label='Looping Video Generation_320x512'):
189
+ with gr.Column():
190
+ with gr.Row():
191
+ with gr.Column():
192
+ with gr.Row():
193
+ i2v_input_image = gr.Image(label="Input Image",elem_id="input_img")
194
+ with gr.Row():
195
+ i2v_input_text = gr.Text(label='Prompts')
196
+ with gr.Row():
197
+ i2v_seed = gr.Slider(label='Random Seed', minimum=0, maximum=50000, step=1, value=123)
198
+ i2v_eta = gr.Slider(minimum=0.0, maximum=1.0, step=0.1, label='ETA', value=1.0, elem_id="i2v_eta")
199
+ i2v_cfg_scale = gr.Slider(minimum=1.0, maximum=15.0, step=0.5, label='CFG Scale', value=7.5, elem_id="i2v_cfg_scale")
200
+ with gr.Row():
201
+ i2v_steps = gr.Slider(minimum=1, maximum=50, step=1, elem_id="i2v_steps", label="Sampling steps", value=50)
202
+ i2v_motion = gr.Slider(minimum=5, maximum=30, step=1, elem_id="i2v_motion", label="FPS", value=5)
203
+ i2v_end_btn = gr.Button("Generate")
204
+ # with gr.Tab(label='Result'):
205
+ with gr.Row():
206
+ i2v_output_video = gr.Video(label="Generated Video",elem_id="output_vid",autoplay=True,show_share_button=True)
207
+
208
+ gr.Examples(examples=i2v_examples_loop_512,
209
+ inputs=[i2v_input_image, i2v_input_text, i2v_steps, i2v_cfg_scale, i2v_eta, i2v_motion, i2v_seed],
210
+ outputs=[i2v_output_video],
211
+ fn = infer,
212
+ cache_examples=True,
213
+ )
214
+ i2v_end_btn.click(inputs=[i2v_input_image, i2v_input_text, i2v_steps, i2v_cfg_scale, i2v_eta, i2v_motion, i2v_seed],
215
+ outputs=[i2v_output_video],
216
+ fn = infer
217
+ )
218
+
219
+ dynamicrafter_iface.queue(max_size=12).launch(show_api=True)
app_256.py ADDED
@@ -0,0 +1,155 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import os
3
+ import sys
4
+ import argparse
5
+ import random
6
+ import time
7
+ from omegaconf import OmegaConf
8
+ import torch
9
+ import torchvision
10
+ from pytorch_lightning import seed_everything
11
+ from huggingface_hub import hf_hub_download
12
+ from einops import repeat
13
+ import torchvision.transforms as transforms
14
+ from utils.utils import instantiate_from_config
15
+ sys.path.insert(0, "scripts/evaluation")
16
+ from funcs import (
17
+ batch_ddim_sampling,
18
+ load_model_checkpoint,
19
+ get_latent_z,
20
+ save_videos
21
+ )
22
+
23
+ def download_model():
24
+ REPO_ID = 'Doubiiu/DynamiCrafter'
25
+ filename_list = ['model.ckpt']
26
+ if not os.path.exists('./checkpoints/dynamicrafter_256_v1/'):
27
+ os.makedirs('./checkpoints/dynamicrafter_256_v1/')
28
+ for filename in filename_list:
29
+ local_file = os.path.join('./checkpoints/dynamicrafter_256_v1/', filename)
30
+ if not os.path.exists(local_file):
31
+ hf_hub_download(repo_id=REPO_ID, filename=filename, local_dir='./checkpoints/dynamicrafter_256_v1/', force_download=True)
32
+
33
+
34
+ def infer(image, prompt, steps=50, cfg_scale=7.5, eta=1.0, fs=3, seed=123):
35
+ download_model()
36
+ ckpt_path='checkpoints/dynamicrafter_256_v1/model.ckpt'
37
+ config_file='configs/inference_256_v1.0.yaml'
38
+ config = OmegaConf.load(config_file)
39
+ model_config = config.pop("model", OmegaConf.create())
40
+ model_config['params']['unet_config']['params']['use_checkpoint']=False
41
+ model = instantiate_from_config(model_config)
42
+ assert os.path.exists(ckpt_path), "Error: checkpoint Not Found!"
43
+ model = load_model_checkpoint(model, ckpt_path)
44
+ model.eval()
45
+ model = model.cuda()
46
+ save_fps = 8
47
+
48
+ seed_everything(seed)
49
+ transform = transforms.Compose([
50
+ transforms.Resize(256),
51
+ transforms.CenterCrop(256),
52
+ ])
53
+ torch.cuda.empty_cache()
54
+ print('start:', prompt, time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time())))
55
+ start = time.time()
56
+ if steps > 60:
57
+ steps = 60
58
+
59
+ batch_size=1
60
+ channels = model.model.diffusion_model.out_channels
61
+ frames = model.temporal_length
62
+ h, w = 256 // 8, 256 // 8
63
+ noise_shape = [batch_size, channels, frames, h, w]
64
+
65
+ # text cond
66
+ text_emb = model.get_learned_conditioning([prompt])
67
+
68
+ # img cond
69
+ img_tensor = torch.from_numpy(image).permute(2, 0, 1).float().to(model.device)
70
+ img_tensor = (img_tensor / 255. - 0.5) * 2
71
+
72
+ image_tensor_resized = transform(img_tensor) #3,256,256
73
+ videos = image_tensor_resized.unsqueeze(0) # bchw
74
+
75
+ z = get_latent_z(model, videos.unsqueeze(2)) #bc,1,hw
76
+
77
+ img_tensor_repeat = repeat(z, 'b c t h w -> b c (repeat t) h w', repeat=frames)
78
+
79
+ cond_images = model.embedder(img_tensor.unsqueeze(0)) ## blc
80
+ img_emb = model.image_proj_model(cond_images)
81
+
82
+ imtext_cond = torch.cat([text_emb, img_emb], dim=1)
83
+
84
+ fs = torch.tensor([fs], dtype=torch.long, device=model.device)
85
+ cond = {"c_crossattn": [imtext_cond], "fs": fs, "c_concat": [img_tensor_repeat]}
86
+
87
+ ## inference
88
+ batch_samples = batch_ddim_sampling(model, cond, noise_shape, n_samples=1, ddim_steps=steps, ddim_eta=eta, cfg_scale=cfg_scale)
89
+ ## b,samples,c,t,h,w
90
+
91
+ video_path = './output.mp4'
92
+ save_videos(batch_samples, './', filenames=['output'], fps=save_fps)
93
+ model = model.cpu()
94
+ return video_path
95
+
96
+
97
+
98
+
99
+
100
+
101
+ i2v_examples = [
102
+ ['prompts/256/art.png', 'man fishing in a boat at sunset', 50, 7.5, 1.0, 3, 234],
103
+ ['prompts/256/boy.png', 'boy walking on the street', 50, 7.5, 1.0, 3, 125],
104
+ ['prompts/256/dance1.jpeg', 'two people dancing', 50, 7.5, 1.0, 3, 116],
105
+ ['prompts/256/fire_and_beach.jpg', 'a campfire on the beach and the ocean waves in the background', 50, 7.5, 1.0, 3, 111],
106
+ ['prompts/256/girl3.jpeg', 'girl talking and blinking', 50, 7.5, 1.0, 3, 111],
107
+ ['prompts/256/guitar0.jpeg', 'bear playing guitar happily, snowing', 50, 7.5, 1.0, 3, 122],
108
+ ]
109
+ css = """#input_img {max-width: 256px !important} #output_vid {max-width: 256px; max-height: 256px}"""
110
+
111
+ with gr.Blocks(analytics_enabled=False, css=css) as dynamicrafter_iface:
112
+ gr.Markdown("<div align='center'> <h1> DynamiCrafter: Animating Open-domain Images with Video Diffusion Priors </span> </h1> \
113
+ <h2 style='font-weight: 450; font-size: 1rem; margin: 0rem'>\
114
+ <a href='https://doubiiu.github.io/'>Jinbo Xing</a>, \
115
+ <a href='https://menghanxia.github.io/'>Menghan Xia</a>, <a href='https://yzhang2016.github.io/'>Yong Zhang</a>, \
116
+ <a href=''>Haoxin Chen</a>, <a href=''> Wangbo Yu</a>,\
117
+ <a href='https://github.com/hyliu'>Hanyuan Liu</a>, <a href='https://xinntao.github.io/'>Xintao Wang</a>,\
118
+ <a href='https://www.cse.cuhk.edu.hk/~ttwong/myself.html'>Tien-Tsin Wong</a>,\
119
+ <a href='https://scholar.google.com/citations?user=4oXBp9UAAAAJ&hl=zh-CN'>Ying Shan</a>\
120
+ </h2> \
121
+ <a style='font-size:18px;color: #000000' href='https://arxiv.org/abs/2310.12190'> [ArXiv] </a>\
122
+ <a style='font-size:18px;color: #000000' href='https://doubiiu.github.io/projects/DynamiCrafter/'> [Project Page] </a> \
123
+ <a style='font-size:18px;color: #000000' href='https://github.com/Doubiiu/DynamiCrafter'> [Github] </a> </div>")
124
+
125
+ with gr.Tab(label='ImageAnimation_256x256'):
126
+ with gr.Column():
127
+ with gr.Row():
128
+ with gr.Column():
129
+ with gr.Row():
130
+ i2v_input_image = gr.Image(label="Input Image",elem_id="input_img")
131
+ with gr.Row():
132
+ i2v_input_text = gr.Text(label='Prompts')
133
+ with gr.Row():
134
+ i2v_seed = gr.Slider(label='Random Seed', minimum=0, maximum=10000, step=1, value=123)
135
+ i2v_eta = gr.Slider(minimum=0.0, maximum=1.0, step=0.1, label='ETA', value=1.0, elem_id="i2v_eta")
136
+ i2v_cfg_scale = gr.Slider(minimum=1.0, maximum=15.0, step=0.5, label='CFG Scale', value=7.5, elem_id="i2v_cfg_scale")
137
+ with gr.Row():
138
+ i2v_steps = gr.Slider(minimum=1, maximum=60, step=1, elem_id="i2v_steps", label="Sampling steps", value=50)
139
+ i2v_motion = gr.Slider(minimum=1, maximum=4, step=1, elem_id="i2v_motion", label="Motion magnitude", value=3)
140
+ i2v_end_btn = gr.Button("Generate")
141
+ # with gr.Tab(label='Result'):
142
+ with gr.Row():
143
+ i2v_output_video = gr.Video(label="Generated Video",elem_id="output_vid",autoplay=True,show_share_button=True)
144
+
145
+ gr.Examples(examples=i2v_examples,
146
+ inputs=[i2v_input_image, i2v_input_text, i2v_steps, i2v_cfg_scale, i2v_eta, i2v_motion, i2v_seed],
147
+ outputs=[i2v_output_video],
148
+ fn = infer,
149
+ )
150
+ i2v_end_btn.click(inputs=[i2v_input_image, i2v_input_text, i2v_steps, i2v_cfg_scale, i2v_eta, i2v_motion, i2v_seed],
151
+ outputs=[i2v_output_video],
152
+ fn = infer
153
+ )
154
+
155
+ dynamicrafter_iface.queue(max_size=12).launch(show_api=True)
app_512.py ADDED
@@ -0,0 +1,156 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import os
3
+ import sys
4
+ import argparse
5
+ import random
6
+ import time
7
+ from omegaconf import OmegaConf
8
+ import torch
9
+ import torchvision
10
+ from pytorch_lightning import seed_everything
11
+ from huggingface_hub import hf_hub_download
12
+ from einops import repeat
13
+ import torchvision.transforms as transforms
14
+ from utils.utils import instantiate_from_config
15
+ sys.path.insert(0, "scripts/evaluation")
16
+ from funcs import (
17
+ batch_ddim_sampling,
18
+ load_model_checkpoint,
19
+ get_latent_z,
20
+ save_videos
21
+ )
22
+
23
+ def download_model():
24
+ REPO_ID = 'Doubiiu/DynamiCrafter_512'
25
+ filename_list = ['model.ckpt']
26
+ if not os.path.exists('./checkpoints/dynamicrafter_512_v1/'):
27
+ os.makedirs('./checkpoints/dynamicrafter_512_v1/')
28
+ for filename in filename_list:
29
+ local_file = os.path.join('./checkpoints/dynamicrafter_512_v1/', filename)
30
+ if not os.path.exists(local_file):
31
+ hf_hub_download(repo_id=REPO_ID, filename=filename, local_dir='./checkpoints/dynamicrafter_512_v1/', force_download=True)
32
+
33
+
34
+ def infer(image, prompt, steps=50, cfg_scale=7.5, eta=1.0, fs=3, seed=123):
35
+ resolution = (320, 512)
36
+ download_model()
37
+ ckpt_path='checkpoints/dynamicrafter_512_v1/model.ckpt'
38
+ config_file='configs/inference_512_v1.0.yaml'
39
+ config = OmegaConf.load(config_file)
40
+ model_config = config.pop("model", OmegaConf.create())
41
+ model_config['params']['unet_config']['params']['use_checkpoint']=False
42
+ model = instantiate_from_config(model_config)
43
+ assert os.path.exists(ckpt_path), "Error: checkpoint Not Found!"
44
+ model = load_model_checkpoint(model, ckpt_path)
45
+ model.eval()
46
+ model = model.cuda()
47
+ save_fps = 8
48
+
49
+ seed_everything(seed)
50
+ transform = transforms.Compose([
51
+ transforms.Resize(min(resolution)),
52
+ transforms.CenterCrop(resolution),
53
+ ])
54
+ torch.cuda.empty_cache()
55
+ print('start:', prompt, time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time())))
56
+ start = time.time()
57
+ if steps > 60:
58
+ steps = 60
59
+
60
+ batch_size=1
61
+ channels = model.model.diffusion_model.out_channels
62
+ frames = model.temporal_length
63
+ h, w = resolution[0] // 8, resolution[1] // 8
64
+ noise_shape = [batch_size, channels, frames, h, w]
65
+
66
+ # text cond
67
+ text_emb = model.get_learned_conditioning([prompt])
68
+
69
+ # img cond
70
+ img_tensor = torch.from_numpy(image).permute(2, 0, 1).float().to(model.device)
71
+ img_tensor = (img_tensor / 255. - 0.5) * 2
72
+
73
+ image_tensor_resized = transform(img_tensor) #3,256,256
74
+ videos = image_tensor_resized.unsqueeze(0) # bchw
75
+
76
+ z = get_latent_z(model, videos.unsqueeze(2)) #bc,1,hw
77
+
78
+ img_tensor_repeat = repeat(z, 'b c t h w -> b c (repeat t) h w', repeat=frames)
79
+
80
+ cond_images = model.embedder(img_tensor.unsqueeze(0)) ## blc
81
+ img_emb = model.image_proj_model(cond_images)
82
+
83
+ imtext_cond = torch.cat([text_emb, img_emb], dim=1)
84
+
85
+ fs = torch.tensor([fs], dtype=torch.long, device=model.device)
86
+ cond = {"c_crossattn": [imtext_cond], "fs": fs, "c_concat": [img_tensor_repeat]}
87
+
88
+ ## inference
89
+ batch_samples = batch_ddim_sampling(model, cond, noise_shape, n_samples=1, ddim_steps=steps, ddim_eta=eta, cfg_scale=cfg_scale)
90
+ ## b,samples,c,t,h,w
91
+
92
+ video_path = './output.mp4'
93
+ save_videos(batch_samples, './', filenames=['output'], fps=save_fps)
94
+ model = model.cpu()
95
+ return video_path
96
+
97
+
98
+ i2v_examples = [
99
+ ['prompts/512/bloom01.png', 'time-lapse of a blooming flower with leaves and a stem', 50, 7.5, 1.0, 24, 123],
100
+ ['prompts/512/campfire.png', 'a bonfire is lit in the middle of a field', 50, 7.5, 1.0, 24, 123],
101
+ ['prompts/512/isometric.png', 'rotating view, small house', 50, 7.5, 1.0, 24, 123],
102
+ ['prompts/512/girl08.png', 'a woman looking out in the rain', 50, 7.5, 1.0, 24, 1234],
103
+ ['prompts/512/ship02.png', 'a sailboat sailing in rough seas with a dramatic sunset', 50, 7.5, 1.0, 24, 123],
104
+ ['prompts/512/zreal_penguin.png', 'a group of penguins walking on a beach', 50, 7.5, 1.0, 20, 123],
105
+ ]
106
+
107
+
108
+
109
+
110
+ css = """#input_img {max-width: 512px !important} #output_vid {max-width: 512px; max-height: 320px}"""
111
+
112
+ with gr.Blocks(analytics_enabled=False, css=css) as dynamicrafter_iface:
113
+ gr.Markdown("<div align='center'> <h1> DynamiCrafter: Animating Open-domain Images with Video Diffusion Priors </span> </h1> \
114
+ <h2 style='font-weight: 450; font-size: 1rem; margin: 0rem'>\
115
+ <a href='https://doubiiu.github.io/'>Jinbo Xing</a>, \
116
+ <a href='https://menghanxia.github.io/'>Menghan Xia</a>, <a href='https://yzhang2016.github.io/'>Yong Zhang</a>, \
117
+ <a href=''>Haoxin Chen</a>, <a href=''> Wangbo Yu</a>,\
118
+ <a href='https://github.com/hyliu'>Hanyuan Liu</a>, <a href='https://xinntao.github.io/'>Xintao Wang</a>,\
119
+ <a href='https://www.cse.cuhk.edu.hk/~ttwong/myself.html'>Tien-Tsin Wong</a>,\
120
+ <a href='https://scholar.google.com/citations?user=4oXBp9UAAAAJ&hl=zh-CN'>Ying Shan</a>\
121
+ </h2> \
122
+ <a style='font-size:18px;color: #000000' href='https://arxiv.org/abs/2310.12190'> [ArXiv] </a>\
123
+ <a style='font-size:18px;color: #000000' href='https://doubiiu.github.io/projects/DynamiCrafter/'> [Project Page] </a> \
124
+ <a style='font-size:18px;color: #000000' href='https://github.com/Doubiiu/DynamiCrafter'> [Github] </a> </div>")
125
+
126
+ with gr.Tab(label='ImageAnimation_320x512'):
127
+ with gr.Column():
128
+ with gr.Row():
129
+ with gr.Column():
130
+ with gr.Row():
131
+ i2v_input_image = gr.Image(label="Input Image",elem_id="input_img")
132
+ with gr.Row():
133
+ i2v_input_text = gr.Text(label='Prompts')
134
+ with gr.Row():
135
+ i2v_seed = gr.Slider(label='Random Seed', minimum=0, maximum=10000, step=1, value=123)
136
+ i2v_eta = gr.Slider(minimum=0.0, maximum=1.0, step=0.1, label='ETA', value=1.0, elem_id="i2v_eta")
137
+ i2v_cfg_scale = gr.Slider(minimum=1.0, maximum=15.0, step=0.5, label='CFG Scale', value=7.5, elem_id="i2v_cfg_scale")
138
+ with gr.Row():
139
+ i2v_steps = gr.Slider(minimum=1, maximum=60, step=1, elem_id="i2v_steps", label="Sampling steps", value=50)
140
+ i2v_motion = gr.Slider(minimum=15, maximum=30, step=1, elem_id="i2v_motion", label="FPS", value=24)
141
+ i2v_end_btn = gr.Button("Generate")
142
+ # with gr.Tab(label='Result'):
143
+ with gr.Row():
144
+ i2v_output_video = gr.Video(label="Generated Video",elem_id="output_vid",autoplay=True,show_share_button=True)
145
+
146
+ gr.Examples(examples=i2v_examples,
147
+ inputs=[i2v_input_image, i2v_input_text, i2v_steps, i2v_cfg_scale, i2v_eta, i2v_motion, i2v_seed],
148
+ outputs=[i2v_output_video],
149
+ fn = infer,
150
+ )
151
+ i2v_end_btn.click(inputs=[i2v_input_image, i2v_input_text, i2v_steps, i2v_cfg_scale, i2v_eta, i2v_motion, i2v_seed],
152
+ outputs=[i2v_output_video],
153
+ fn = infer
154
+ )
155
+
156
+ dynamicrafter_iface.queue(max_size=12).launch(show_api=True)
configs/inference_1024_v1.0.yaml ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model:
2
+ target: lvdm.models.ddpm3d.LatentVisualDiffusion
3
+ params:
4
+ rescale_betas_zero_snr: True
5
+ parameterization: "v"
6
+ linear_start: 0.00085
7
+ linear_end: 0.012
8
+ num_timesteps_cond: 1
9
+ timesteps: 1000
10
+ first_stage_key: video
11
+ cond_stage_key: caption
12
+ cond_stage_trainable: False
13
+ conditioning_key: hybrid
14
+ image_size: [72, 128]
15
+ channels: 4
16
+ scale_by_std: False
17
+ scale_factor: 0.18215
18
+ use_ema: False
19
+ uncond_type: 'empty_seq'
20
+ use_dynamic_rescale: true
21
+ base_scale: 0.3
22
+ fps_condition_type: 'fps'
23
+ perframe_ae: True
24
+ unet_config:
25
+ target: lvdm.modules.networks.openaimodel3d.UNetModel
26
+ params:
27
+ in_channels: 8
28
+ out_channels: 4
29
+ model_channels: 320
30
+ attention_resolutions:
31
+ - 4
32
+ - 2
33
+ - 1
34
+ num_res_blocks: 2
35
+ channel_mult:
36
+ - 1
37
+ - 2
38
+ - 4
39
+ - 4
40
+ dropout: 0.1
41
+ num_head_channels: 64
42
+ transformer_depth: 1
43
+ context_dim: 1024
44
+ use_linear: true
45
+ use_checkpoint: True
46
+ temporal_conv: True
47
+ temporal_attention: True
48
+ temporal_selfatt_only: true
49
+ use_relative_position: false
50
+ use_causal_attention: False
51
+ temporal_length: 16
52
+ addition_attention: true
53
+ image_cross_attention: true
54
+ default_fs: 10
55
+ fs_condition: true
56
+
57
+ first_stage_config:
58
+ target: lvdm.models.autoencoder.AutoencoderKL
59
+ params:
60
+ embed_dim: 4
61
+ monitor: val/rec_loss
62
+ ddconfig:
63
+ double_z: True
64
+ z_channels: 4
65
+ resolution: 256
66
+ in_channels: 3
67
+ out_ch: 3
68
+ ch: 128
69
+ ch_mult:
70
+ - 1
71
+ - 2
72
+ - 4
73
+ - 4
74
+ num_res_blocks: 2
75
+ attn_resolutions: []
76
+ dropout: 0.0
77
+ lossconfig:
78
+ target: torch.nn.Identity
79
+
80
+ cond_stage_config:
81
+ target: lvdm.modules.encoders.condition.FrozenOpenCLIPEmbedder
82
+ params:
83
+ freeze: true
84
+ layer: "penultimate"
85
+
86
+ img_cond_stage_config:
87
+ target: lvdm.modules.encoders.condition.FrozenOpenCLIPImageEmbedderV2
88
+ params:
89
+ freeze: true
90
+
91
+ image_proj_stage_config:
92
+ target: lvdm.modules.encoders.resampler.Resampler
93
+ params:
94
+ dim: 1024
95
+ depth: 4
96
+ dim_head: 64
97
+ heads: 12
98
+ num_queries: 16
99
+ embedding_dim: 1280
100
+ output_dim: 1024
101
+ ff_mult: 4
102
+ video_length: 16
103
+
configs/inference_256_v1.0.yaml ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model:
2
+ target: lvdm.models.ddpm3d.LatentVisualDiffusion
3
+ params:
4
+ linear_start: 0.00085
5
+ linear_end: 0.012
6
+ num_timesteps_cond: 1
7
+ timesteps: 1000
8
+ first_stage_key: video
9
+ cond_stage_key: caption
10
+ cond_stage_trainable: False
11
+ conditioning_key: hybrid
12
+ image_size: [32, 32]
13
+ channels: 4
14
+ scale_by_std: False
15
+ scale_factor: 0.18215
16
+ use_ema: False
17
+ uncond_type: 'empty_seq'
18
+ unet_config:
19
+ target: lvdm.modules.networks.openaimodel3d.UNetModel
20
+ params:
21
+ in_channels: 8
22
+ out_channels: 4
23
+ model_channels: 320
24
+ attention_resolutions:
25
+ - 4
26
+ - 2
27
+ - 1
28
+ num_res_blocks: 2
29
+ channel_mult:
30
+ - 1
31
+ - 2
32
+ - 4
33
+ - 4
34
+ dropout: 0.1
35
+ num_head_channels: 64
36
+ transformer_depth: 1
37
+ context_dim: 1024
38
+ use_linear: true
39
+ use_checkpoint: True
40
+ temporal_conv: True
41
+ temporal_attention: True
42
+ temporal_selfatt_only: true
43
+ use_relative_position: false
44
+ use_causal_attention: False
45
+ temporal_length: 16
46
+ addition_attention: true
47
+ image_cross_attention: true
48
+ image_cross_attention_scale_learnable: true
49
+ default_fs: 3
50
+ fs_condition: true
51
+
52
+ first_stage_config:
53
+ target: lvdm.models.autoencoder.AutoencoderKL
54
+ params:
55
+ embed_dim: 4
56
+ monitor: val/rec_loss
57
+ ddconfig:
58
+ double_z: True
59
+ z_channels: 4
60
+ resolution: 256
61
+ in_channels: 3
62
+ out_ch: 3
63
+ ch: 128
64
+ ch_mult:
65
+ - 1
66
+ - 2
67
+ - 4
68
+ - 4
69
+ num_res_blocks: 2
70
+ attn_resolutions: []
71
+ dropout: 0.0
72
+ lossconfig:
73
+ target: torch.nn.Identity
74
+
75
+ cond_stage_config:
76
+ target: lvdm.modules.encoders.condition.FrozenOpenCLIPEmbedder
77
+ params:
78
+ freeze: true
79
+ layer: "penultimate"
80
+
81
+ img_cond_stage_config:
82
+ target: lvdm.modules.encoders.condition.FrozenOpenCLIPImageEmbedderV2
83
+ params:
84
+ freeze: true
85
+
86
+ image_proj_stage_config:
87
+ target: lvdm.modules.encoders.resampler.Resampler
88
+ params:
89
+ dim: 1024
90
+ depth: 4
91
+ dim_head: 64
92
+ heads: 12
93
+ num_queries: 16
94
+ embedding_dim: 1280
95
+ output_dim: 1024
96
+ ff_mult: 4
97
+ video_length: 16
98
+
configs/inference_512_v1.0.yaml ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model:
2
+ target: lvdm.models.ddpm3d.LatentVisualDiffusion
3
+ params:
4
+ rescale_betas_zero_snr: True
5
+ parameterization: "v"
6
+ linear_start: 0.00085
7
+ linear_end: 0.012
8
+ num_timesteps_cond: 1
9
+ timesteps: 1000
10
+ first_stage_key: video
11
+ cond_stage_key: caption
12
+ cond_stage_trainable: False
13
+ conditioning_key: hybrid
14
+ image_size: [40, 64]
15
+ channels: 4
16
+ scale_by_std: False
17
+ scale_factor: 0.18215
18
+ use_ema: False
19
+ uncond_type: 'empty_seq'
20
+ use_dynamic_rescale: true
21
+ base_scale: 0.7
22
+ fps_condition_type: 'fps'
23
+ unet_config:
24
+ target: lvdm.modules.networks.openaimodel3d.UNetModel
25
+ params:
26
+ in_channels: 8
27
+ out_channels: 4
28
+ model_channels: 320
29
+ attention_resolutions:
30
+ - 4
31
+ - 2
32
+ - 1
33
+ num_res_blocks: 2
34
+ channel_mult:
35
+ - 1
36
+ - 2
37
+ - 4
38
+ - 4
39
+ dropout: 0.1
40
+ num_head_channels: 64
41
+ transformer_depth: 1
42
+ context_dim: 1024
43
+ use_linear: true
44
+ use_checkpoint: True
45
+ temporal_conv: True
46
+ temporal_attention: True
47
+ temporal_selfatt_only: true
48
+ use_relative_position: false
49
+ use_causal_attention: False
50
+ temporal_length: 16
51
+ addition_attention: true
52
+ image_cross_attention: true
53
+ default_fs: 24
54
+ fs_condition: true
55
+
56
+ first_stage_config:
57
+ target: lvdm.models.autoencoder.AutoencoderKL
58
+ params:
59
+ embed_dim: 4
60
+ monitor: val/rec_loss
61
+ ddconfig:
62
+ double_z: True
63
+ z_channels: 4
64
+ resolution: 256
65
+ in_channels: 3
66
+ out_ch: 3
67
+ ch: 128
68
+ ch_mult:
69
+ - 1
70
+ - 2
71
+ - 4
72
+ - 4
73
+ num_res_blocks: 2
74
+ attn_resolutions: []
75
+ dropout: 0.0
76
+ lossconfig:
77
+ target: torch.nn.Identity
78
+
79
+ cond_stage_config:
80
+ target: lvdm.modules.encoders.condition.FrozenOpenCLIPEmbedder
81
+ params:
82
+ freeze: true
83
+ layer: "penultimate"
84
+
85
+ img_cond_stage_config:
86
+ target: lvdm.modules.encoders.condition.FrozenOpenCLIPImageEmbedderV2
87
+ params:
88
+ freeze: true
89
+
90
+ image_proj_stage_config:
91
+ target: lvdm.modules.encoders.resampler.Resampler
92
+ params:
93
+ dim: 1024
94
+ depth: 4
95
+ dim_head: 64
96
+ heads: 12
97
+ num_queries: 16
98
+ embedding_dim: 1280
99
+ output_dim: 1024
100
+ ff_mult: 4
101
+ video_length: 16
102
+
gradio_cached_examples/26/Generated Video/202047a2f8d639bc6ddd/rotating_view.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9a4b9fce0fa279ebd969d0ad94cc2386c78bf814a2e723845c8e1bb1c08ada09
3
+ size 1414627
gradio_cached_examples/26/Generated Video/442dbae67917d74c14e0/man_walking.mp4 ADDED
Binary file (775 kB). View file
 
gradio_cached_examples/26/Generated Video/652335bd60f11038e8c6/a_smiling_girl.mp4 ADDED
Binary file (542 kB). View file
 
gradio_cached_examples/26/log.csv ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ Generated Video,flag,username,timestamp
2
+ "{""video"":{""path"":""gradio_cached_examples/26/Generated Video/652335bd60f11038e8c6/a_smiling_girl.mp4"",""url"":null,""size"":null,""orig_name"":""a_smiling_girl.mp4"",""mime_type"":null},""subtitles"":null}",,,2024-03-16 02:07:52.181227
3
+ "{""video"":{""path"":""gradio_cached_examples/26/Generated Video/202047a2f8d639bc6ddd/rotating_view.mp4"",""url"":null,""size"":null,""orig_name"":""rotating_view.mp4"",""mime_type"":null},""subtitles"":null}",,,2024-03-16 02:08:17.956049
4
+ "{""video"":{""path"":""gradio_cached_examples/26/Generated Video/442dbae67917d74c14e0/man_walking.mp4"",""url"":null,""size"":null,""orig_name"":""man_walking.mp4"",""mime_type"":null},""subtitles"":null}",,,2024-03-16 02:08:43.575699
gradio_cached_examples/48/Generated Video/1a3cd783a760d32f38fc/a_beach_with_waves_and_clouds_at_sunset.mp4 ADDED
Binary file (710 kB). View file
 
gradio_cached_examples/48/Generated Video/27cf1c87f5065d38aa73/flowers_swaying_in_the_wind.mp4 ADDED
Binary file (637 kB). View file
 
gradio_cached_examples/48/Generated Video/fd8df1a765d061e9a7c8/clothes_swaying_in_the_wind.mp4 ADDED
Binary file (622 kB). View file
 
gradio_cached_examples/48/log.csv ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ Generated Video,flag,username,timestamp
2
+ "{""video"":{""path"":""gradio_cached_examples/48/Generated Video/1a3cd783a760d32f38fc/a_beach_with_waves_and_clouds_at_sunset.mp4"",""url"":null,""size"":null,""orig_name"":""a_beach_with_waves_and_clouds_at_sunset.mp4"",""mime_type"":null},""subtitles"":null}",,,2024-03-16 02:09:09.688874
3
+ "{""video"":{""path"":""gradio_cached_examples/48/Generated Video/fd8df1a765d061e9a7c8/clothes_swaying_in_the_wind.mp4"",""url"":null,""size"":null,""orig_name"":""clothes_swaying_in_the_wind.mp4"",""mime_type"":null},""subtitles"":null}",,,2024-03-16 02:09:35.985358
4
+ "{""video"":{""path"":""gradio_cached_examples/48/Generated Video/27cf1c87f5065d38aa73/flowers_swaying_in_the_wind.mp4"",""url"":null,""size"":null,""orig_name"":""flowers_swaying_in_the_wind.mp4"",""mime_type"":null},""subtitles"":null}",,,2024-03-16 02:10:02.367606
lvdm/__pycache__/basics.cpython-39.pyc ADDED
Binary file (3.28 kB). View file
 
lvdm/__pycache__/common.cpython-39.pyc ADDED
Binary file (4.57 kB). View file
 
lvdm/__pycache__/distributions.cpython-39.pyc ADDED
Binary file (3.84 kB). View file
 
lvdm/__pycache__/ema.cpython-39.pyc ADDED
Binary file (3.03 kB). View file
 
lvdm/basics.py ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # adopted from
2
+ # https://github.com/openai/improved-diffusion/blob/main/improved_diffusion/gaussian_diffusion.py
3
+ # and
4
+ # https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py
5
+ # and
6
+ # https://github.com/openai/guided-diffusion/blob/0ba878e517b276c45d1195eb29f6f5f72659a05b/guided_diffusion/nn.py
7
+ #
8
+ # thanks!
9
+
10
+ import torch.nn as nn
11
+ from utils.utils import instantiate_from_config
12
+
13
+
14
+ def disabled_train(self, mode=True):
15
+ """Overwrite model.train with this function to make sure train/eval mode
16
+ does not change anymore."""
17
+ return self
18
+
19
+ def zero_module(module):
20
+ """
21
+ Zero out the parameters of a module and return it.
22
+ """
23
+ for p in module.parameters():
24
+ p.detach().zero_()
25
+ return module
26
+
27
+ def scale_module(module, scale):
28
+ """
29
+ Scale the parameters of a module and return it.
30
+ """
31
+ for p in module.parameters():
32
+ p.detach().mul_(scale)
33
+ return module
34
+
35
+
36
+ def conv_nd(dims, *args, **kwargs):
37
+ """
38
+ Create a 1D, 2D, or 3D convolution module.
39
+ """
40
+ if dims == 1:
41
+ return nn.Conv1d(*args, **kwargs)
42
+ elif dims == 2:
43
+ return nn.Conv2d(*args, **kwargs)
44
+ elif dims == 3:
45
+ return nn.Conv3d(*args, **kwargs)
46
+ raise ValueError(f"unsupported dimensions: {dims}")
47
+
48
+
49
+ def linear(*args, **kwargs):
50
+ """
51
+ Create a linear module.
52
+ """
53
+ return nn.Linear(*args, **kwargs)
54
+
55
+
56
+ def avg_pool_nd(dims, *args, **kwargs):
57
+ """
58
+ Create a 1D, 2D, or 3D average pooling module.
59
+ """
60
+ if dims == 1:
61
+ return nn.AvgPool1d(*args, **kwargs)
62
+ elif dims == 2:
63
+ return nn.AvgPool2d(*args, **kwargs)
64
+ elif dims == 3:
65
+ return nn.AvgPool3d(*args, **kwargs)
66
+ raise ValueError(f"unsupported dimensions: {dims}")
67
+
68
+
69
+ def nonlinearity(type='silu'):
70
+ if type == 'silu':
71
+ return nn.SiLU()
72
+ elif type == 'leaky_relu':
73
+ return nn.LeakyReLU()
74
+
75
+
76
+ class GroupNormSpecific(nn.GroupNorm):
77
+ def forward(self, x):
78
+ return super().forward(x.float()).type(x.dtype)
79
+
80
+
81
+ def normalization(channels, num_groups=32):
82
+ """
83
+ Make a standard normalization layer.
84
+ :param channels: number of input channels.
85
+ :return: an nn.Module for normalization.
86
+ """
87
+ return GroupNormSpecific(num_groups, channels)
88
+
89
+
90
+ class HybridConditioner(nn.Module):
91
+
92
+ def __init__(self, c_concat_config, c_crossattn_config):
93
+ super().__init__()
94
+ self.concat_conditioner = instantiate_from_config(c_concat_config)
95
+ self.crossattn_conditioner = instantiate_from_config(c_crossattn_config)
96
+
97
+ def forward(self, c_concat, c_crossattn):
98
+ c_concat = self.concat_conditioner(c_concat)
99
+ c_crossattn = self.crossattn_conditioner(c_crossattn)
100
+ return {'c_concat': [c_concat], 'c_crossattn': [c_crossattn]}
lvdm/common.py ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ from inspect import isfunction
3
+ import torch
4
+ from torch import nn
5
+ import torch.distributed as dist
6
+
7
+
8
+ def gather_data(data, return_np=True):
9
+ ''' gather data from multiple processes to one list '''
10
+ data_list = [torch.zeros_like(data) for _ in range(dist.get_world_size())]
11
+ dist.all_gather(data_list, data) # gather not supported with NCCL
12
+ if return_np:
13
+ data_list = [data.cpu().numpy() for data in data_list]
14
+ return data_list
15
+
16
+ def autocast(f):
17
+ def do_autocast(*args, **kwargs):
18
+ with torch.cuda.amp.autocast(enabled=True,
19
+ dtype=torch.get_autocast_gpu_dtype(),
20
+ cache_enabled=torch.is_autocast_cache_enabled()):
21
+ return f(*args, **kwargs)
22
+ return do_autocast
23
+
24
+
25
+ def extract_into_tensor(a, t, x_shape):
26
+ b, *_ = t.shape
27
+ out = a.gather(-1, t)
28
+ return out.reshape(b, *((1,) * (len(x_shape) - 1)))
29
+
30
+
31
+ def noise_like(shape, device, repeat=False):
32
+ repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(shape[0], *((1,) * (len(shape) - 1)))
33
+ noise = lambda: torch.randn(shape, device=device)
34
+ return repeat_noise() if repeat else noise()
35
+
36
+
37
+ def default(val, d):
38
+ if exists(val):
39
+ return val
40
+ return d() if isfunction(d) else d
41
+
42
+ def exists(val):
43
+ return val is not None
44
+
45
+ def identity(*args, **kwargs):
46
+ return nn.Identity()
47
+
48
+ def uniq(arr):
49
+ return{el: True for el in arr}.keys()
50
+
51
+ def mean_flat(tensor):
52
+ """
53
+ Take the mean over all non-batch dimensions.
54
+ """
55
+ return tensor.mean(dim=list(range(1, len(tensor.shape))))
56
+
57
+ def ismap(x):
58
+ if not isinstance(x, torch.Tensor):
59
+ return False
60
+ return (len(x.shape) == 4) and (x.shape[1] > 3)
61
+
62
+ def isimage(x):
63
+ if not isinstance(x,torch.Tensor):
64
+ return False
65
+ return (len(x.shape) == 4) and (x.shape[1] == 3 or x.shape[1] == 1)
66
+
67
+ def max_neg_value(t):
68
+ return -torch.finfo(t.dtype).max
69
+
70
+ def shape_to_str(x):
71
+ shape_str = "x".join([str(x) for x in x.shape])
72
+ return shape_str
73
+
74
+ def init_(tensor):
75
+ dim = tensor.shape[-1]
76
+ std = 1 / math.sqrt(dim)
77
+ tensor.uniform_(-std, std)
78
+ return tensor
79
+
80
+ ckpt = torch.utils.checkpoint.checkpoint
81
+ def checkpoint(func, inputs, params, flag):
82
+ """
83
+ Evaluate a function without caching intermediate activations, allowing for
84
+ reduced memory at the expense of extra compute in the backward pass.
85
+ :param func: the function to evaluate.
86
+ :param inputs: the argument sequence to pass to `func`.
87
+ :param params: a sequence of parameters `func` depends on but does not
88
+ explicitly take as arguments.
89
+ :param flag: if False, disable gradient checkpointing.
90
+ """
91
+ if flag:
92
+ return ckpt(func, *inputs, use_reentrant=False)
93
+ else:
94
+ return func(*inputs)
lvdm/distributions.py ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import numpy as np
3
+
4
+
5
+ class AbstractDistribution:
6
+ def sample(self):
7
+ raise NotImplementedError()
8
+
9
+ def mode(self):
10
+ raise NotImplementedError()
11
+
12
+
13
+ class DiracDistribution(AbstractDistribution):
14
+ def __init__(self, value):
15
+ self.value = value
16
+
17
+ def sample(self):
18
+ return self.value
19
+
20
+ def mode(self):
21
+ return self.value
22
+
23
+
24
+ class DiagonalGaussianDistribution(object):
25
+ def __init__(self, parameters, deterministic=False):
26
+ self.parameters = parameters
27
+ self.mean, self.logvar = torch.chunk(parameters, 2, dim=1)
28
+ self.logvar = torch.clamp(self.logvar, -30.0, 20.0)
29
+ self.deterministic = deterministic
30
+ self.std = torch.exp(0.5 * self.logvar)
31
+ self.var = torch.exp(self.logvar)
32
+ if self.deterministic:
33
+ self.var = self.std = torch.zeros_like(self.mean).to(device=self.parameters.device)
34
+
35
+ def sample(self, noise=None):
36
+ if noise is None:
37
+ noise = torch.randn(self.mean.shape)
38
+
39
+ x = self.mean + self.std * noise.to(device=self.parameters.device)
40
+ return x
41
+
42
+ def kl(self, other=None):
43
+ if self.deterministic:
44
+ return torch.Tensor([0.])
45
+ else:
46
+ if other is None:
47
+ return 0.5 * torch.sum(torch.pow(self.mean, 2)
48
+ + self.var - 1.0 - self.logvar,
49
+ dim=[1, 2, 3])
50
+ else:
51
+ return 0.5 * torch.sum(
52
+ torch.pow(self.mean - other.mean, 2) / other.var
53
+ + self.var / other.var - 1.0 - self.logvar + other.logvar,
54
+ dim=[1, 2, 3])
55
+
56
+ def nll(self, sample, dims=[1,2,3]):
57
+ if self.deterministic:
58
+ return torch.Tensor([0.])
59
+ logtwopi = np.log(2.0 * np.pi)
60
+ return 0.5 * torch.sum(
61
+ logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var,
62
+ dim=dims)
63
+
64
+ def mode(self):
65
+ return self.mean
66
+
67
+
68
+ def normal_kl(mean1, logvar1, mean2, logvar2):
69
+ """
70
+ source: https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/losses.py#L12
71
+ Compute the KL divergence between two gaussians.
72
+ Shapes are automatically broadcasted, so batches can be compared to
73
+ scalars, among other use cases.
74
+ """
75
+ tensor = None
76
+ for obj in (mean1, logvar1, mean2, logvar2):
77
+ if isinstance(obj, torch.Tensor):
78
+ tensor = obj
79
+ break
80
+ assert tensor is not None, "at least one argument must be a Tensor"
81
+
82
+ # Force variances to be Tensors. Broadcasting helps convert scalars to
83
+ # Tensors, but it does not work for torch.exp().
84
+ logvar1, logvar2 = [
85
+ x if isinstance(x, torch.Tensor) else torch.tensor(x).to(tensor)
86
+ for x in (logvar1, logvar2)
87
+ ]
88
+
89
+ return 0.5 * (
90
+ -1.0
91
+ + logvar2
92
+ - logvar1
93
+ + torch.exp(logvar1 - logvar2)
94
+ + ((mean1 - mean2) ** 2) * torch.exp(-logvar2)
95
+ )
lvdm/ema.py ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch import nn
3
+
4
+
5
+ class LitEma(nn.Module):
6
+ def __init__(self, model, decay=0.9999, use_num_upates=True):
7
+ super().__init__()
8
+ if decay < 0.0 or decay > 1.0:
9
+ raise ValueError('Decay must be between 0 and 1')
10
+
11
+ self.m_name2s_name = {}
12
+ self.register_buffer('decay', torch.tensor(decay, dtype=torch.float32))
13
+ self.register_buffer('num_updates', torch.tensor(0,dtype=torch.int) if use_num_upates
14
+ else torch.tensor(-1,dtype=torch.int))
15
+
16
+ for name, p in model.named_parameters():
17
+ if p.requires_grad:
18
+ #remove as '.'-character is not allowed in buffers
19
+ s_name = name.replace('.','')
20
+ self.m_name2s_name.update({name:s_name})
21
+ self.register_buffer(s_name,p.clone().detach().data)
22
+
23
+ self.collected_params = []
24
+
25
+ def forward(self,model):
26
+ decay = self.decay
27
+
28
+ if self.num_updates >= 0:
29
+ self.num_updates += 1
30
+ decay = min(self.decay,(1 + self.num_updates) / (10 + self.num_updates))
31
+
32
+ one_minus_decay = 1.0 - decay
33
+
34
+ with torch.no_grad():
35
+ m_param = dict(model.named_parameters())
36
+ shadow_params = dict(self.named_buffers())
37
+
38
+ for key in m_param:
39
+ if m_param[key].requires_grad:
40
+ sname = self.m_name2s_name[key]
41
+ shadow_params[sname] = shadow_params[sname].type_as(m_param[key])
42
+ shadow_params[sname].sub_(one_minus_decay * (shadow_params[sname] - m_param[key]))
43
+ else:
44
+ assert not key in self.m_name2s_name
45
+
46
+ def copy_to(self, model):
47
+ m_param = dict(model.named_parameters())
48
+ shadow_params = dict(self.named_buffers())
49
+ for key in m_param:
50
+ if m_param[key].requires_grad:
51
+ m_param[key].data.copy_(shadow_params[self.m_name2s_name[key]].data)
52
+ else:
53
+ assert not key in self.m_name2s_name
54
+
55
+ def store(self, parameters):
56
+ """
57
+ Save the current parameters for restoring later.
58
+ Args:
59
+ parameters: Iterable of `torch.nn.Parameter`; the parameters to be
60
+ temporarily stored.
61
+ """
62
+ self.collected_params = [param.clone() for param in parameters]
63
+
64
+ def restore(self, parameters):
65
+ """
66
+ Restore the parameters stored with the `store` method.
67
+ Useful to validate the model with EMA parameters without affecting the
68
+ original optimization process. Store the parameters before the
69
+ `copy_to` method. After validation (or model saving), use this to
70
+ restore the former parameters.
71
+ Args:
72
+ parameters: Iterable of `torch.nn.Parameter`; the parameters to be
73
+ updated with the stored parameters.
74
+ """
75
+ for c_param, param in zip(self.collected_params, parameters):
76
+ param.data.copy_(c_param.data)
lvdm/models/.DS_Store ADDED
Binary file (6.15 kB). View file
 
lvdm/models/__pycache__/autoencoder.cpython-39.pyc ADDED
Binary file (7.25 kB). View file
 
lvdm/models/__pycache__/ddpm3d.cpython-39.pyc ADDED
Binary file (22.3 kB). View file
 
lvdm/models/__pycache__/utils_diffusion.cpython-39.pyc ADDED
Binary file (5.36 kB). View file
 
lvdm/models/autoencoder.py ADDED
@@ -0,0 +1,219 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from contextlib import contextmanager
3
+ import torch
4
+ import numpy as np
5
+ from einops import rearrange
6
+ import torch.nn.functional as F
7
+ import pytorch_lightning as pl
8
+ from lvdm.modules.networks.ae_modules import Encoder, Decoder
9
+ from lvdm.distributions import DiagonalGaussianDistribution
10
+ from utils.utils import instantiate_from_config
11
+
12
+
13
+ class AutoencoderKL(pl.LightningModule):
14
+ def __init__(self,
15
+ ddconfig,
16
+ lossconfig,
17
+ embed_dim,
18
+ ckpt_path=None,
19
+ ignore_keys=[],
20
+ image_key="image",
21
+ colorize_nlabels=None,
22
+ monitor=None,
23
+ test=False,
24
+ logdir=None,
25
+ input_dim=4,
26
+ test_args=None,
27
+ ):
28
+ super().__init__()
29
+ self.image_key = image_key
30
+ self.encoder = Encoder(**ddconfig)
31
+ self.decoder = Decoder(**ddconfig)
32
+ self.loss = instantiate_from_config(lossconfig)
33
+ assert ddconfig["double_z"]
34
+ self.quant_conv = torch.nn.Conv2d(2*ddconfig["z_channels"], 2*embed_dim, 1)
35
+ self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig["z_channels"], 1)
36
+ self.embed_dim = embed_dim
37
+ self.input_dim = input_dim
38
+ self.test = test
39
+ self.test_args = test_args
40
+ self.logdir = logdir
41
+ if colorize_nlabels is not None:
42
+ assert type(colorize_nlabels)==int
43
+ self.register_buffer("colorize", torch.randn(3, colorize_nlabels, 1, 1))
44
+ if monitor is not None:
45
+ self.monitor = monitor
46
+ if ckpt_path is not None:
47
+ self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)
48
+ if self.test:
49
+ self.init_test()
50
+
51
+ def init_test(self,):
52
+ self.test = True
53
+ save_dir = os.path.join(self.logdir, "test")
54
+ if 'ckpt' in self.test_args:
55
+ ckpt_name = os.path.basename(self.test_args.ckpt).split('.ckpt')[0] + f'_epoch{self._cur_epoch}'
56
+ self.root = os.path.join(save_dir, ckpt_name)
57
+ else:
58
+ self.root = save_dir
59
+ if 'test_subdir' in self.test_args:
60
+ self.root = os.path.join(save_dir, self.test_args.test_subdir)
61
+
62
+ self.root_zs = os.path.join(self.root, "zs")
63
+ self.root_dec = os.path.join(self.root, "reconstructions")
64
+ self.root_inputs = os.path.join(self.root, "inputs")
65
+ os.makedirs(self.root, exist_ok=True)
66
+
67
+ if self.test_args.save_z:
68
+ os.makedirs(self.root_zs, exist_ok=True)
69
+ if self.test_args.save_reconstruction:
70
+ os.makedirs(self.root_dec, exist_ok=True)
71
+ if self.test_args.save_input:
72
+ os.makedirs(self.root_inputs, exist_ok=True)
73
+ assert(self.test_args is not None)
74
+ self.test_maximum = getattr(self.test_args, 'test_maximum', None)
75
+ self.count = 0
76
+ self.eval_metrics = {}
77
+ self.decodes = []
78
+ self.save_decode_samples = 2048
79
+
80
+ def init_from_ckpt(self, path, ignore_keys=list()):
81
+ sd = torch.load(path, map_location="cpu")
82
+ try:
83
+ self._cur_epoch = sd['epoch']
84
+ sd = sd["state_dict"]
85
+ except:
86
+ self._cur_epoch = 'null'
87
+ keys = list(sd.keys())
88
+ for k in keys:
89
+ for ik in ignore_keys:
90
+ if k.startswith(ik):
91
+ print("Deleting key {} from state_dict.".format(k))
92
+ del sd[k]
93
+ self.load_state_dict(sd, strict=False)
94
+ # self.load_state_dict(sd, strict=True)
95
+ print(f"Restored from {path}")
96
+
97
+ def encode(self, x, **kwargs):
98
+
99
+ h = self.encoder(x)
100
+ moments = self.quant_conv(h)
101
+ posterior = DiagonalGaussianDistribution(moments)
102
+ return posterior
103
+
104
+ def decode(self, z, **kwargs):
105
+ z = self.post_quant_conv(z)
106
+ dec = self.decoder(z)
107
+ return dec
108
+
109
+ def forward(self, input, sample_posterior=True):
110
+ posterior = self.encode(input)
111
+ if sample_posterior:
112
+ z = posterior.sample()
113
+ else:
114
+ z = posterior.mode()
115
+ dec = self.decode(z)
116
+ return dec, posterior
117
+
118
+ def get_input(self, batch, k):
119
+ x = batch[k]
120
+ if x.dim() == 5 and self.input_dim == 4:
121
+ b,c,t,h,w = x.shape
122
+ self.b = b
123
+ self.t = t
124
+ x = rearrange(x, 'b c t h w -> (b t) c h w')
125
+
126
+ return x
127
+
128
+ def training_step(self, batch, batch_idx, optimizer_idx):
129
+ inputs = self.get_input(batch, self.image_key)
130
+ reconstructions, posterior = self(inputs)
131
+
132
+ if optimizer_idx == 0:
133
+ # train encoder+decoder+logvar
134
+ aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,
135
+ last_layer=self.get_last_layer(), split="train")
136
+ self.log("aeloss", aeloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)
137
+ self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=False)
138
+ return aeloss
139
+
140
+ if optimizer_idx == 1:
141
+ # train the discriminator
142
+ discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,
143
+ last_layer=self.get_last_layer(), split="train")
144
+
145
+ self.log("discloss", discloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)
146
+ self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=False)
147
+ return discloss
148
+
149
+ def validation_step(self, batch, batch_idx):
150
+ inputs = self.get_input(batch, self.image_key)
151
+ reconstructions, posterior = self(inputs)
152
+ aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, 0, self.global_step,
153
+ last_layer=self.get_last_layer(), split="val")
154
+
155
+ discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, 1, self.global_step,
156
+ last_layer=self.get_last_layer(), split="val")
157
+
158
+ self.log("val/rec_loss", log_dict_ae["val/rec_loss"])
159
+ self.log_dict(log_dict_ae)
160
+ self.log_dict(log_dict_disc)
161
+ return self.log_dict
162
+
163
+ def configure_optimizers(self):
164
+ lr = self.learning_rate
165
+ opt_ae = torch.optim.Adam(list(self.encoder.parameters())+
166
+ list(self.decoder.parameters())+
167
+ list(self.quant_conv.parameters())+
168
+ list(self.post_quant_conv.parameters()),
169
+ lr=lr, betas=(0.5, 0.9))
170
+ opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(),
171
+ lr=lr, betas=(0.5, 0.9))
172
+ return [opt_ae, opt_disc], []
173
+
174
+ def get_last_layer(self):
175
+ return self.decoder.conv_out.weight
176
+
177
+ @torch.no_grad()
178
+ def log_images(self, batch, only_inputs=False, **kwargs):
179
+ log = dict()
180
+ x = self.get_input(batch, self.image_key)
181
+ x = x.to(self.device)
182
+ if not only_inputs:
183
+ xrec, posterior = self(x)
184
+ if x.shape[1] > 3:
185
+ # colorize with random projection
186
+ assert xrec.shape[1] > 3
187
+ x = self.to_rgb(x)
188
+ xrec = self.to_rgb(xrec)
189
+ log["samples"] = self.decode(torch.randn_like(posterior.sample()))
190
+ log["reconstructions"] = xrec
191
+ log["inputs"] = x
192
+ return log
193
+
194
+ def to_rgb(self, x):
195
+ assert self.image_key == "segmentation"
196
+ if not hasattr(self, "colorize"):
197
+ self.register_buffer("colorize", torch.randn(3, x.shape[1], 1, 1).to(x))
198
+ x = F.conv2d(x, weight=self.colorize)
199
+ x = 2.*(x-x.min())/(x.max()-x.min()) - 1.
200
+ return x
201
+
202
+ class IdentityFirstStage(torch.nn.Module):
203
+ def __init__(self, *args, vq_interface=False, **kwargs):
204
+ self.vq_interface = vq_interface # TODO: Should be true by default but check to not break older stuff
205
+ super().__init__()
206
+
207
+ def encode(self, x, *args, **kwargs):
208
+ return x
209
+
210
+ def decode(self, x, *args, **kwargs):
211
+ return x
212
+
213
+ def quantize(self, x, *args, **kwargs):
214
+ if self.vq_interface:
215
+ return x, None, [None, None, None]
216
+ return x
217
+
218
+ def forward(self, x, *args, **kwargs):
219
+ return x
lvdm/models/ddpm3d.py ADDED
@@ -0,0 +1,762 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ wild mixture of
3
+ https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py
4
+ https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py
5
+ https://github.com/CompVis/taming-transformers
6
+ -- merci
7
+ """
8
+
9
+ from functools import partial
10
+ from contextlib import contextmanager
11
+ import numpy as np
12
+ from tqdm import tqdm
13
+ from einops import rearrange, repeat
14
+ import logging
15
+ mainlogger = logging.getLogger('mainlogger')
16
+ import torch
17
+ import torch.nn as nn
18
+ from torchvision.utils import make_grid
19
+ import pytorch_lightning as pl
20
+ from utils.utils import instantiate_from_config
21
+ from lvdm.ema import LitEma
22
+ from lvdm.distributions import DiagonalGaussianDistribution
23
+ from lvdm.models.utils_diffusion import make_beta_schedule, rescale_zero_terminal_snr
24
+ from lvdm.basics import disabled_train
25
+ from lvdm.common import (
26
+ extract_into_tensor,
27
+ noise_like,
28
+ exists,
29
+ default
30
+ )
31
+
32
+ __conditioning_keys__ = {'concat': 'c_concat',
33
+ 'crossattn': 'c_crossattn',
34
+ 'adm': 'y'}
35
+
36
+ class DDPM(pl.LightningModule):
37
+ # classic DDPM with Gaussian diffusion, in image space
38
+ def __init__(self,
39
+ unet_config,
40
+ timesteps=1000,
41
+ beta_schedule="linear",
42
+ loss_type="l2",
43
+ ckpt_path=None,
44
+ ignore_keys=[],
45
+ load_only_unet=False,
46
+ monitor=None,
47
+ use_ema=True,
48
+ first_stage_key="image",
49
+ image_size=256,
50
+ channels=3,
51
+ log_every_t=100,
52
+ clip_denoised=True,
53
+ linear_start=1e-4,
54
+ linear_end=2e-2,
55
+ cosine_s=8e-3,
56
+ given_betas=None,
57
+ original_elbo_weight=0.,
58
+ v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta
59
+ l_simple_weight=1.,
60
+ conditioning_key=None,
61
+ parameterization="eps", # all assuming fixed variance schedules
62
+ scheduler_config=None,
63
+ use_positional_encodings=False,
64
+ learn_logvar=False,
65
+ logvar_init=0.,
66
+ rescale_betas_zero_snr=False,
67
+ ):
68
+ super().__init__()
69
+ assert parameterization in ["eps", "x0", "v"], 'currently only supporting "eps" and "x0" and "v"'
70
+ self.parameterization = parameterization
71
+ mainlogger.info(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode")
72
+ self.cond_stage_model = None
73
+ self.clip_denoised = clip_denoised
74
+ self.log_every_t = log_every_t
75
+ self.first_stage_key = first_stage_key
76
+ self.channels = channels
77
+ self.temporal_length = unet_config.params.temporal_length
78
+ self.image_size = image_size # try conv?
79
+ if isinstance(self.image_size, int):
80
+ self.image_size = [self.image_size, self.image_size]
81
+ self.use_positional_encodings = use_positional_encodings
82
+ self.model = DiffusionWrapper(unet_config, conditioning_key)
83
+ #count_params(self.model, verbose=True)
84
+ self.use_ema = use_ema
85
+ self.rescale_betas_zero_snr = rescale_betas_zero_snr
86
+ if self.use_ema:
87
+ self.model_ema = LitEma(self.model)
88
+ mainlogger.info(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.")
89
+
90
+ self.use_scheduler = scheduler_config is not None
91
+ if self.use_scheduler:
92
+ self.scheduler_config = scheduler_config
93
+
94
+ self.v_posterior = v_posterior
95
+ self.original_elbo_weight = original_elbo_weight
96
+ self.l_simple_weight = l_simple_weight
97
+
98
+ if monitor is not None:
99
+ self.monitor = monitor
100
+ if ckpt_path is not None:
101
+ self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet)
102
+
103
+ self.register_schedule(given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps,
104
+ linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s)
105
+
106
+ self.loss_type = loss_type
107
+
108
+ self.learn_logvar = learn_logvar
109
+ self.logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,))
110
+ if self.learn_logvar:
111
+ self.logvar = nn.Parameter(self.logvar, requires_grad=True)
112
+
113
+ def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000,
114
+ linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):
115
+ if exists(given_betas):
116
+ betas = given_betas
117
+ else:
118
+ betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end,
119
+ cosine_s=cosine_s)
120
+ if self.rescale_betas_zero_snr:
121
+ betas = rescale_zero_terminal_snr(betas)
122
+
123
+ alphas = 1. - betas
124
+ alphas_cumprod = np.cumprod(alphas, axis=0)
125
+ alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1])
126
+
127
+ timesteps, = betas.shape
128
+ self.num_timesteps = int(timesteps)
129
+ self.linear_start = linear_start
130
+ self.linear_end = linear_end
131
+ assert alphas_cumprod.shape[0] == self.num_timesteps, 'alphas have to be defined for each timestep'
132
+
133
+ to_torch = partial(torch.tensor, dtype=torch.float32)
134
+
135
+ self.register_buffer('betas', to_torch(betas))
136
+ self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))
137
+ self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev))
138
+
139
+ # calculations for diffusion q(x_t | x_{t-1}) and others
140
+ self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod)))
141
+ self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod)))
142
+ self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod)))
143
+
144
+ if self.parameterization != 'v':
145
+ self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod)))
146
+ self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1)))
147
+ else:
148
+ self.register_buffer('sqrt_recip_alphas_cumprod', torch.zeros_like(to_torch(alphas_cumprod)))
149
+ self.register_buffer('sqrt_recipm1_alphas_cumprod', torch.zeros_like(to_torch(alphas_cumprod)))
150
+
151
+ # calculations for posterior q(x_{t-1} | x_t, x_0)
152
+ posterior_variance = (1 - self.v_posterior) * betas * (1. - alphas_cumprod_prev) / (
153
+ 1. - alphas_cumprod) + self.v_posterior * betas
154
+ # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t)
155
+ self.register_buffer('posterior_variance', to_torch(posterior_variance))
156
+ # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain
157
+ self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20))))
158
+ self.register_buffer('posterior_mean_coef1', to_torch(
159
+ betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod)))
160
+ self.register_buffer('posterior_mean_coef2', to_torch(
161
+ (1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod)))
162
+
163
+ if self.parameterization == "eps":
164
+ lvlb_weights = self.betas ** 2 / (
165
+ 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod))
166
+ elif self.parameterization == "x0":
167
+ lvlb_weights = 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2. * 1 - torch.Tensor(alphas_cumprod))
168
+ elif self.parameterization == "v":
169
+ lvlb_weights = torch.ones_like(self.betas ** 2 / (
170
+ 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod)))
171
+ else:
172
+ raise NotImplementedError("mu not supported")
173
+ # TODO how to choose this term
174
+ lvlb_weights[0] = lvlb_weights[1]
175
+ self.register_buffer('lvlb_weights', lvlb_weights, persistent=False)
176
+ assert not torch.isnan(self.lvlb_weights).all()
177
+
178
+ @contextmanager
179
+ def ema_scope(self, context=None):
180
+ if self.use_ema:
181
+ self.model_ema.store(self.model.parameters())
182
+ self.model_ema.copy_to(self.model)
183
+ if context is not None:
184
+ mainlogger.info(f"{context}: Switched to EMA weights")
185
+ try:
186
+ yield None
187
+ finally:
188
+ if self.use_ema:
189
+ self.model_ema.restore(self.model.parameters())
190
+ if context is not None:
191
+ mainlogger.info(f"{context}: Restored training weights")
192
+
193
+ def init_from_ckpt(self, path, ignore_keys=list(), only_model=False):
194
+ sd = torch.load(path, map_location="cpu")
195
+ if "state_dict" in list(sd.keys()):
196
+ sd = sd["state_dict"]
197
+ keys = list(sd.keys())
198
+ for k in keys:
199
+ for ik in ignore_keys:
200
+ if k.startswith(ik):
201
+ mainlogger.info("Deleting key {} from state_dict.".format(k))
202
+ del sd[k]
203
+ missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict(
204
+ sd, strict=False)
205
+ mainlogger.info(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys")
206
+ if len(missing) > 0:
207
+ mainlogger.info(f"Missing Keys: {missing}")
208
+ if len(unexpected) > 0:
209
+ mainlogger.info(f"Unexpected Keys: {unexpected}")
210
+
211
+ def q_mean_variance(self, x_start, t):
212
+ """
213
+ Get the distribution q(x_t | x_0).
214
+ :param x_start: the [N x C x ...] tensor of noiseless inputs.
215
+ :param t: the number of diffusion steps (minus 1). Here, 0 means one step.
216
+ :return: A tuple (mean, variance, log_variance), all of x_start's shape.
217
+ """
218
+ mean = (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start)
219
+ variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape)
220
+ log_variance = extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape)
221
+ return mean, variance, log_variance
222
+
223
+ def predict_start_from_noise(self, x_t, t, noise):
224
+ return (
225
+ extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t -
226
+ extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise
227
+ )
228
+
229
+ def predict_start_from_z_and_v(self, x_t, t, v):
230
+ # self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod)))
231
+ # self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod)))
232
+ return (
233
+ extract_into_tensor(self.sqrt_alphas_cumprod, t, x_t.shape) * x_t -
234
+ extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape) * v
235
+ )
236
+
237
+ def predict_eps_from_z_and_v(self, x_t, t, v):
238
+ return (
239
+ extract_into_tensor(self.sqrt_alphas_cumprod, t, x_t.shape) * v +
240
+ extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape) * x_t
241
+ )
242
+
243
+ def q_posterior(self, x_start, x_t, t):
244
+ posterior_mean = (
245
+ extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start +
246
+ extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t
247
+ )
248
+ posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape)
249
+ posterior_log_variance_clipped = extract_into_tensor(self.posterior_log_variance_clipped, t, x_t.shape)
250
+ return posterior_mean, posterior_variance, posterior_log_variance_clipped
251
+
252
+ def p_mean_variance(self, x, t, clip_denoised: bool):
253
+ model_out = self.model(x, t)
254
+ if self.parameterization == "eps":
255
+ x_recon = self.predict_start_from_noise(x, t=t, noise=model_out)
256
+ elif self.parameterization == "x0":
257
+ x_recon = model_out
258
+ if clip_denoised:
259
+ x_recon.clamp_(-1., 1.)
260
+
261
+ model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t)
262
+ return model_mean, posterior_variance, posterior_log_variance
263
+
264
+ @torch.no_grad()
265
+ def p_sample(self, x, t, clip_denoised=True, repeat_noise=False):
266
+ b, *_, device = *x.shape, x.device
267
+ model_mean, _, model_log_variance = self.p_mean_variance(x=x, t=t, clip_denoised=clip_denoised)
268
+ noise = noise_like(x.shape, device, repeat_noise)
269
+ # no noise when t == 0
270
+ nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1)))
271
+ return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise
272
+
273
+ @torch.no_grad()
274
+ def p_sample_loop(self, shape, return_intermediates=False):
275
+ device = self.betas.device
276
+ b = shape[0]
277
+ img = torch.randn(shape, device=device)
278
+ intermediates = [img]
279
+ for i in tqdm(reversed(range(0, self.num_timesteps)), desc='Sampling t', total=self.num_timesteps):
280
+ img = self.p_sample(img, torch.full((b,), i, device=device, dtype=torch.long),
281
+ clip_denoised=self.clip_denoised)
282
+ if i % self.log_every_t == 0 or i == self.num_timesteps - 1:
283
+ intermediates.append(img)
284
+ if return_intermediates:
285
+ return img, intermediates
286
+ return img
287
+
288
+ @torch.no_grad()
289
+ def sample(self, batch_size=16, return_intermediates=False):
290
+ image_size = self.image_size
291
+ channels = self.channels
292
+ return self.p_sample_loop((batch_size, channels, image_size, image_size),
293
+ return_intermediates=return_intermediates)
294
+
295
+ def q_sample(self, x_start, t, noise=None):
296
+ noise = default(noise, lambda: torch.randn_like(x_start))
297
+ return (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start +
298
+ extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise)
299
+
300
+ def get_v(self, x, noise, t):
301
+ return (
302
+ extract_into_tensor(self.sqrt_alphas_cumprod, t, x.shape) * noise -
303
+ extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x.shape) * x
304
+ )
305
+
306
+ def get_input(self, batch, k):
307
+ x = batch[k]
308
+ x = x.to(memory_format=torch.contiguous_format).float()
309
+ return x
310
+
311
+ def _get_rows_from_list(self, samples):
312
+ n_imgs_per_row = len(samples)
313
+ denoise_grid = rearrange(samples, 'n b c h w -> b n c h w')
314
+ denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w')
315
+ denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row)
316
+ return denoise_grid
317
+
318
+ @torch.no_grad()
319
+ def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs):
320
+ log = dict()
321
+ x = self.get_input(batch, self.first_stage_key)
322
+ N = min(x.shape[0], N)
323
+ n_row = min(x.shape[0], n_row)
324
+ x = x.to(self.device)[:N]
325
+ log["inputs"] = x
326
+
327
+ # get diffusion row
328
+ diffusion_row = list()
329
+ x_start = x[:n_row]
330
+
331
+ for t in range(self.num_timesteps):
332
+ if t % self.log_every_t == 0 or t == self.num_timesteps - 1:
333
+ t = repeat(torch.tensor([t]), '1 -> b', b=n_row)
334
+ t = t.to(self.device).long()
335
+ noise = torch.randn_like(x_start)
336
+ x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise)
337
+ diffusion_row.append(x_noisy)
338
+
339
+ log["diffusion_row"] = self._get_rows_from_list(diffusion_row)
340
+
341
+ if sample:
342
+ # get denoise row
343
+ with self.ema_scope("Plotting"):
344
+ samples, denoise_row = self.sample(batch_size=N, return_intermediates=True)
345
+
346
+ log["samples"] = samples
347
+ log["denoise_row"] = self._get_rows_from_list(denoise_row)
348
+
349
+ if return_keys:
350
+ if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0:
351
+ return log
352
+ else:
353
+ return {key: log[key] for key in return_keys}
354
+ return log
355
+
356
+
357
+ class LatentDiffusion(DDPM):
358
+ """main class"""
359
+ def __init__(self,
360
+ first_stage_config,
361
+ cond_stage_config,
362
+ num_timesteps_cond=None,
363
+ cond_stage_key="caption",
364
+ cond_stage_trainable=False,
365
+ cond_stage_forward=None,
366
+ conditioning_key=None,
367
+ uncond_prob=0.2,
368
+ uncond_type="empty_seq",
369
+ scale_factor=1.0,
370
+ scale_by_std=False,
371
+ encoder_type="2d",
372
+ only_model=False,
373
+ noise_strength=0,
374
+ use_dynamic_rescale=False,
375
+ base_scale=0.7,
376
+ turning_step=400,
377
+ loop_video=False,
378
+ fps_condition_type='fs',
379
+ perframe_ae=False,
380
+ *args, **kwargs):
381
+ self.num_timesteps_cond = default(num_timesteps_cond, 1)
382
+ self.scale_by_std = scale_by_std
383
+ assert self.num_timesteps_cond <= kwargs['timesteps']
384
+ # for backwards compatibility after implementation of DiffusionWrapper
385
+ ckpt_path = kwargs.pop("ckpt_path", None)
386
+ ignore_keys = kwargs.pop("ignore_keys", [])
387
+ conditioning_key = default(conditioning_key, 'crossattn')
388
+ super().__init__(conditioning_key=conditioning_key, *args, **kwargs)
389
+
390
+ self.cond_stage_trainable = cond_stage_trainable
391
+ self.cond_stage_key = cond_stage_key
392
+ self.noise_strength = noise_strength
393
+ self.use_dynamic_rescale = use_dynamic_rescale
394
+ self.loop_video = loop_video
395
+ self.fps_condition_type = fps_condition_type
396
+ self.perframe_ae = perframe_ae
397
+ try:
398
+ self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1
399
+ except:
400
+ self.num_downs = 0
401
+ if not scale_by_std:
402
+ self.scale_factor = scale_factor
403
+ else:
404
+ self.register_buffer('scale_factor', torch.tensor(scale_factor))
405
+
406
+ if use_dynamic_rescale:
407
+ scale_arr1 = np.linspace(1.0, base_scale, turning_step)
408
+ scale_arr2 = np.full(self.num_timesteps, base_scale)
409
+ scale_arr = np.concatenate((scale_arr1, scale_arr2))
410
+ to_torch = partial(torch.tensor, dtype=torch.float32)
411
+ self.register_buffer('scale_arr', to_torch(scale_arr))
412
+
413
+ self.instantiate_first_stage(first_stage_config)
414
+ self.instantiate_cond_stage(cond_stage_config)
415
+ self.first_stage_config = first_stage_config
416
+ self.cond_stage_config = cond_stage_config
417
+ self.clip_denoised = False
418
+
419
+ self.cond_stage_forward = cond_stage_forward
420
+ self.encoder_type = encoder_type
421
+ assert(encoder_type in ["2d", "3d"])
422
+ self.uncond_prob = uncond_prob
423
+ self.classifier_free_guidance = True if uncond_prob > 0 else False
424
+ assert(uncond_type in ["zero_embed", "empty_seq"])
425
+ self.uncond_type = uncond_type
426
+
427
+ self.restarted_from_ckpt = False
428
+ if ckpt_path is not None:
429
+ self.init_from_ckpt(ckpt_path, ignore_keys, only_model=only_model)
430
+ self.restarted_from_ckpt = True
431
+
432
+
433
+ def make_cond_schedule(self, ):
434
+ self.cond_ids = torch.full(size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long)
435
+ ids = torch.round(torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond)).long()
436
+ self.cond_ids[:self.num_timesteps_cond] = ids
437
+
438
+ def instantiate_first_stage(self, config):
439
+ model = instantiate_from_config(config)
440
+ self.first_stage_model = model.eval()
441
+ self.first_stage_model.train = disabled_train
442
+ for param in self.first_stage_model.parameters():
443
+ param.requires_grad = False
444
+
445
+ def instantiate_cond_stage(self, config):
446
+ if not self.cond_stage_trainable:
447
+ model = instantiate_from_config(config)
448
+ self.cond_stage_model = model.eval()
449
+ self.cond_stage_model.train = disabled_train
450
+ for param in self.cond_stage_model.parameters():
451
+ param.requires_grad = False
452
+ else:
453
+ model = instantiate_from_config(config)
454
+ self.cond_stage_model = model
455
+
456
+ def get_learned_conditioning(self, c):
457
+ if self.cond_stage_forward is None:
458
+ if hasattr(self.cond_stage_model, 'encode') and callable(self.cond_stage_model.encode):
459
+ c = self.cond_stage_model.encode(c)
460
+ if isinstance(c, DiagonalGaussianDistribution):
461
+ c = c.mode()
462
+ else:
463
+ c = self.cond_stage_model(c)
464
+ else:
465
+ assert hasattr(self.cond_stage_model, self.cond_stage_forward)
466
+ c = getattr(self.cond_stage_model, self.cond_stage_forward)(c)
467
+ return c
468
+
469
+ def get_first_stage_encoding(self, encoder_posterior, noise=None):
470
+ if isinstance(encoder_posterior, DiagonalGaussianDistribution):
471
+ z = encoder_posterior.sample(noise=noise)
472
+ elif isinstance(encoder_posterior, torch.Tensor):
473
+ z = encoder_posterior
474
+ else:
475
+ raise NotImplementedError(f"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented")
476
+ return self.scale_factor * z
477
+
478
+ @torch.no_grad()
479
+ def encode_first_stage(self, x):
480
+ if self.encoder_type == "2d" and x.dim() == 5:
481
+ b, _, t, _, _ = x.shape
482
+ x = rearrange(x, 'b c t h w -> (b t) c h w')
483
+ reshape_back = True
484
+ else:
485
+ reshape_back = False
486
+
487
+ ## consume more GPU memory but faster
488
+ if not self.perframe_ae:
489
+ encoder_posterior = self.first_stage_model.encode(x)
490
+ results = self.get_first_stage_encoding(encoder_posterior).detach()
491
+ else: ## consume less GPU memory but slower
492
+ results = []
493
+ for index in range(x.shape[0]):
494
+ frame_batch = self.first_stage_model.encode(x[index:index+1,:,:,:])
495
+ frame_result = self.get_first_stage_encoding(frame_batch).detach()
496
+ results.append(frame_result)
497
+ results = torch.cat(results, dim=0)
498
+
499
+ if reshape_back:
500
+ results = rearrange(results, '(b t) c h w -> b c t h w', b=b,t=t)
501
+
502
+ return results
503
+
504
+ def decode_core(self, z, **kwargs):
505
+ if self.encoder_type == "2d" and z.dim() == 5:
506
+ b, _, t, _, _ = z.shape
507
+ z = rearrange(z, 'b c t h w -> (b t) c h w')
508
+ reshape_back = True
509
+ else:
510
+ reshape_back = False
511
+
512
+ if not self.perframe_ae:
513
+ z = 1. / self.scale_factor * z
514
+ results = self.first_stage_model.decode(z, **kwargs)
515
+ else:
516
+ results = []
517
+ for index in range(z.shape[0]):
518
+ frame_z = 1. / self.scale_factor * z[index:index+1,:,:,:]
519
+ frame_result = self.first_stage_model.decode(frame_z, **kwargs)
520
+ results.append(frame_result)
521
+ results = torch.cat(results, dim=0)
522
+
523
+ if reshape_back:
524
+ results = rearrange(results, '(b t) c h w -> b c t h w', b=b,t=t)
525
+ return results
526
+
527
+ @torch.no_grad()
528
+ def decode_first_stage(self, z, **kwargs):
529
+ return self.decode_core(z, **kwargs)
530
+
531
+ # same as above but without decorator
532
+ def differentiable_decode_first_stage(self, z, **kwargs):
533
+ return self.decode_core(z, **kwargs)
534
+
535
+ def forward(self, x, c, **kwargs):
536
+ t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long()
537
+ if self.use_dynamic_rescale:
538
+ x = x * extract_into_tensor(self.scale_arr, t, x.shape)
539
+ return self.p_losses(x, c, t, **kwargs)
540
+
541
+ def apply_model(self, x_noisy, t, cond, **kwargs):
542
+ if isinstance(cond, dict):
543
+ # hybrid case, cond is exptected to be a dict
544
+ pass
545
+ else:
546
+ if not isinstance(cond, list):
547
+ cond = [cond]
548
+ key = 'c_concat' if self.model.conditioning_key == 'concat' else 'c_crossattn'
549
+ cond = {key: cond}
550
+
551
+ x_recon = self.model(x_noisy, t, **cond, **kwargs)
552
+
553
+ if isinstance(x_recon, tuple):
554
+ return x_recon[0]
555
+ else:
556
+ return x_recon
557
+
558
+ def _get_denoise_row_from_list(self, samples, desc=''):
559
+ denoise_row = []
560
+ for zd in tqdm(samples, desc=desc):
561
+ denoise_row.append(self.decode_first_stage(zd.to(self.device)))
562
+ n_log_timesteps = len(denoise_row)
563
+
564
+ denoise_row = torch.stack(denoise_row) # n_log_timesteps, b, C, H, W
565
+
566
+ if denoise_row.dim() == 5:
567
+ denoise_grid = rearrange(denoise_row, 'n b c h w -> b n c h w')
568
+ denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w')
569
+ denoise_grid = make_grid(denoise_grid, nrow=n_log_timesteps)
570
+ elif denoise_row.dim() == 6:
571
+ # video, grid_size=[n_log_timesteps*bs, t]
572
+ video_length = denoise_row.shape[3]
573
+ denoise_grid = rearrange(denoise_row, 'n b c t h w -> b n c t h w')
574
+ denoise_grid = rearrange(denoise_grid, 'b n c t h w -> (b n) c t h w')
575
+ denoise_grid = rearrange(denoise_grid, 'n c t h w -> (n t) c h w')
576
+ denoise_grid = make_grid(denoise_grid, nrow=video_length)
577
+ else:
578
+ raise ValueError
579
+
580
+ return denoise_grid
581
+
582
+
583
+ def p_mean_variance(self, x, c, t, clip_denoised: bool, return_x0=False, score_corrector=None, corrector_kwargs=None, **kwargs):
584
+ t_in = t
585
+ model_out = self.apply_model(x, t_in, c, **kwargs)
586
+
587
+ if score_corrector is not None:
588
+ assert self.parameterization == "eps"
589
+ model_out = score_corrector.modify_score(self, model_out, x, t, c, **corrector_kwargs)
590
+
591
+ if self.parameterization == "eps":
592
+ x_recon = self.predict_start_from_noise(x, t=t, noise=model_out)
593
+ elif self.parameterization == "x0":
594
+ x_recon = model_out
595
+ else:
596
+ raise NotImplementedError()
597
+
598
+ if clip_denoised:
599
+ x_recon.clamp_(-1., 1.)
600
+
601
+ model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t)
602
+
603
+ if return_x0:
604
+ return model_mean, posterior_variance, posterior_log_variance, x_recon
605
+ else:
606
+ return model_mean, posterior_variance, posterior_log_variance
607
+
608
+ @torch.no_grad()
609
+ def p_sample(self, x, c, t, clip_denoised=False, repeat_noise=False, return_x0=False, \
610
+ temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, **kwargs):
611
+ b, *_, device = *x.shape, x.device
612
+ outputs = self.p_mean_variance(x=x, c=c, t=t, clip_denoised=clip_denoised, return_x0=return_x0, \
613
+ score_corrector=score_corrector, corrector_kwargs=corrector_kwargs, **kwargs)
614
+ if return_x0:
615
+ model_mean, _, model_log_variance, x0 = outputs
616
+ else:
617
+ model_mean, _, model_log_variance = outputs
618
+
619
+ noise = noise_like(x.shape, device, repeat_noise) * temperature
620
+ if noise_dropout > 0.:
621
+ noise = torch.nn.functional.dropout(noise, p=noise_dropout)
622
+ # no noise when t == 0
623
+ nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1)))
624
+
625
+ if return_x0:
626
+ return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, x0
627
+ else:
628
+ return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise
629
+
630
+ @torch.no_grad()
631
+ def p_sample_loop(self, cond, shape, return_intermediates=False, x_T=None, verbose=True, callback=None, \
632
+ timesteps=None, mask=None, x0=None, img_callback=None, start_T=None, log_every_t=None, **kwargs):
633
+
634
+ if not log_every_t:
635
+ log_every_t = self.log_every_t
636
+ device = self.betas.device
637
+ b = shape[0]
638
+ # sample an initial noise
639
+ if x_T is None:
640
+ img = torch.randn(shape, device=device)
641
+ else:
642
+ img = x_T
643
+
644
+ intermediates = [img]
645
+ if timesteps is None:
646
+ timesteps = self.num_timesteps
647
+ if start_T is not None:
648
+ timesteps = min(timesteps, start_T)
649
+
650
+ iterator = tqdm(reversed(range(0, timesteps)), desc='Sampling t', total=timesteps) if verbose else reversed(range(0, timesteps))
651
+
652
+ if mask is not None:
653
+ assert x0 is not None
654
+ assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match
655
+
656
+ for i in iterator:
657
+ ts = torch.full((b,), i, device=device, dtype=torch.long)
658
+ if self.shorten_cond_schedule:
659
+ assert self.model.conditioning_key != 'hybrid'
660
+ tc = self.cond_ids[ts].to(cond.device)
661
+ cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond))
662
+
663
+ img = self.p_sample(img, cond, ts, clip_denoised=self.clip_denoised, **kwargs)
664
+ if mask is not None:
665
+ img_orig = self.q_sample(x0, ts)
666
+ img = img_orig * mask + (1. - mask) * img
667
+
668
+ if i % log_every_t == 0 or i == timesteps - 1:
669
+ intermediates.append(img)
670
+ if callback: callback(i)
671
+ if img_callback: img_callback(img, i)
672
+
673
+ if return_intermediates:
674
+ return img, intermediates
675
+ return img
676
+
677
+
678
+ class LatentVisualDiffusion(LatentDiffusion):
679
+ def __init__(self, img_cond_stage_config, image_proj_stage_config, freeze_embedder=True, *args, **kwargs):
680
+ super().__init__(*args, **kwargs)
681
+ self._init_embedder(img_cond_stage_config, freeze_embedder)
682
+ self.image_proj_model = instantiate_from_config(image_proj_stage_config)
683
+
684
+ def _init_embedder(self, config, freeze=True):
685
+ embedder = instantiate_from_config(config)
686
+ if freeze:
687
+ self.embedder = embedder.eval()
688
+ self.embedder.train = disabled_train
689
+ for param in self.embedder.parameters():
690
+ param.requires_grad = False
691
+
692
+
693
+ class DiffusionWrapper(pl.LightningModule):
694
+ def __init__(self, diff_model_config, conditioning_key):
695
+ super().__init__()
696
+ self.diffusion_model = instantiate_from_config(diff_model_config)
697
+ self.conditioning_key = conditioning_key
698
+
699
+ def forward(self, x, t, c_concat: list = None, c_crossattn: list = None,
700
+ c_adm=None, s=None, mask=None, **kwargs):
701
+ # temporal_context = fps is foNone
702
+ if self.conditioning_key is None:
703
+ out = self.diffusion_model(x, t)
704
+ elif self.conditioning_key == 'concat':
705
+ xc = torch.cat([x] + c_concat, dim=1)
706
+ out = self.diffusion_model(xc, t, **kwargs)
707
+ elif self.conditioning_key == 'crossattn':
708
+ cc = torch.cat(c_crossattn, 1)
709
+ out = self.diffusion_model(x, t, context=cc, **kwargs)
710
+ elif self.conditioning_key == 'hybrid':
711
+ ## it is just right [b,c,t,h,w]: concatenate in channel dim
712
+ xc = torch.cat([x] + c_concat, dim=1)
713
+ cc = torch.cat(c_crossattn, 1)
714
+ out = self.diffusion_model(xc, t, context=cc, **kwargs)
715
+ elif self.conditioning_key == 'resblockcond':
716
+ cc = c_crossattn[0]
717
+ out = self.diffusion_model(x, t, context=cc)
718
+ elif self.conditioning_key == 'adm':
719
+ cc = c_crossattn[0]
720
+ out = self.diffusion_model(x, t, y=cc)
721
+ elif self.conditioning_key == 'hybrid-adm':
722
+ assert c_adm is not None
723
+ xc = torch.cat([x] + c_concat, dim=1)
724
+ cc = torch.cat(c_crossattn, 1)
725
+ out = self.diffusion_model(xc, t, context=cc, y=c_adm, **kwargs)
726
+ elif self.conditioning_key == 'hybrid-time':
727
+ assert s is not None
728
+ xc = torch.cat([x] + c_concat, dim=1)
729
+ cc = torch.cat(c_crossattn, 1)
730
+ out = self.diffusion_model(xc, t, context=cc, s=s)
731
+ elif self.conditioning_key == 'concat-time-mask':
732
+ # assert s is not None
733
+ xc = torch.cat([x] + c_concat, dim=1)
734
+ out = self.diffusion_model(xc, t, context=None, s=s, mask=mask)
735
+ elif self.conditioning_key == 'concat-adm-mask':
736
+ # assert s is not None
737
+ if c_concat is not None:
738
+ xc = torch.cat([x] + c_concat, dim=1)
739
+ else:
740
+ xc = x
741
+ out = self.diffusion_model(xc, t, context=None, y=s, mask=mask)
742
+ elif self.conditioning_key == 'hybrid-adm-mask':
743
+ cc = torch.cat(c_crossattn, 1)
744
+ if c_concat is not None:
745
+ xc = torch.cat([x] + c_concat, dim=1)
746
+ else:
747
+ xc = x
748
+ out = self.diffusion_model(xc, t, context=cc, y=s, mask=mask)
749
+ elif self.conditioning_key == 'hybrid-time-adm': # adm means y, e.g., class index
750
+ # assert s is not None
751
+ assert c_adm is not None
752
+ xc = torch.cat([x] + c_concat, dim=1)
753
+ cc = torch.cat(c_crossattn, 1)
754
+ out = self.diffusion_model(xc, t, context=cc, s=s, y=c_adm)
755
+ elif self.conditioning_key == 'crossattn-adm':
756
+ assert c_adm is not None
757
+ cc = torch.cat(c_crossattn, 1)
758
+ out = self.diffusion_model(x, t, context=cc, y=c_adm)
759
+ else:
760
+ raise NotImplementedError()
761
+
762
+ return out
lvdm/models/samplers/__pycache__/ddim.cpython-39.pyc ADDED
Binary file (8.22 kB). View file
 
lvdm/models/samplers/__pycache__/ddim_multiplecond.cpython-39.pyc ADDED
Binary file (8.38 kB). View file
 
lvdm/models/samplers/ddim.py ADDED
@@ -0,0 +1,317 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ from tqdm import tqdm
3
+ import torch
4
+ from lvdm.models.utils_diffusion import make_ddim_sampling_parameters, make_ddim_timesteps, rescale_noise_cfg
5
+ from lvdm.common import noise_like
6
+ from lvdm.common import extract_into_tensor
7
+ import copy
8
+
9
+
10
+ class DDIMSampler(object):
11
+ def __init__(self, model, schedule="linear", **kwargs):
12
+ super().__init__()
13
+ self.model = model
14
+ self.ddpm_num_timesteps = model.num_timesteps
15
+ self.schedule = schedule
16
+ self.counter = 0
17
+
18
+ def register_buffer(self, name, attr):
19
+ if type(attr) == torch.Tensor:
20
+ if attr.device != torch.device("cuda"):
21
+ attr = attr.to(torch.device("cuda"))
22
+ setattr(self, name, attr)
23
+
24
+ def make_schedule(self, ddim_num_steps, ddim_discretize="uniform", ddim_eta=0., verbose=True):
25
+ self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,
26
+ num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose)
27
+ alphas_cumprod = self.model.alphas_cumprod
28
+ assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'
29
+ to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)
30
+
31
+ if self.model.use_dynamic_rescale:
32
+ self.ddim_scale_arr = self.model.scale_arr[self.ddim_timesteps]
33
+ self.ddim_scale_arr_prev = torch.cat([self.ddim_scale_arr[0:1], self.ddim_scale_arr[:-1]])
34
+
35
+ self.register_buffer('betas', to_torch(self.model.betas))
36
+ self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))
37
+ self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))
38
+
39
+ # calculations for diffusion q(x_t | x_{t-1}) and others
40
+ self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))
41
+ self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu())))
42
+ self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu())))
43
+ self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu())))
44
+ self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))
45
+
46
+ # ddim sampling parameters
47
+ ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(),
48
+ ddim_timesteps=self.ddim_timesteps,
49
+ eta=ddim_eta,verbose=verbose)
50
+ self.register_buffer('ddim_sigmas', ddim_sigmas)
51
+ self.register_buffer('ddim_alphas', ddim_alphas)
52
+ self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)
53
+ self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas))
54
+ sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(
55
+ (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (
56
+ 1 - self.alphas_cumprod / self.alphas_cumprod_prev))
57
+ self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)
58
+
59
+ @torch.no_grad()
60
+ def sample(self,
61
+ S,
62
+ batch_size,
63
+ shape,
64
+ conditioning=None,
65
+ callback=None,
66
+ normals_sequence=None,
67
+ img_callback=None,
68
+ quantize_x0=False,
69
+ eta=0.,
70
+ mask=None,
71
+ x0=None,
72
+ temperature=1.,
73
+ noise_dropout=0.,
74
+ score_corrector=None,
75
+ corrector_kwargs=None,
76
+ verbose=True,
77
+ schedule_verbose=False,
78
+ x_T=None,
79
+ log_every_t=100,
80
+ unconditional_guidance_scale=1.,
81
+ unconditional_conditioning=None,
82
+ precision=None,
83
+ fs=None,
84
+ timestep_spacing='uniform', #uniform_trailing for starting from last timestep
85
+ guidance_rescale=0.0,
86
+ **kwargs
87
+ ):
88
+
89
+ # check condition bs
90
+ if conditioning is not None:
91
+ if isinstance(conditioning, dict):
92
+ try:
93
+ cbs = conditioning[list(conditioning.keys())[0]].shape[0]
94
+ except:
95
+ cbs = conditioning[list(conditioning.keys())[0]][0].shape[0]
96
+
97
+ if cbs != batch_size:
98
+ print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}")
99
+ else:
100
+ if conditioning.shape[0] != batch_size:
101
+ print(f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}")
102
+
103
+ self.make_schedule(ddim_num_steps=S, ddim_discretize=timestep_spacing, ddim_eta=eta, verbose=schedule_verbose)
104
+
105
+ # make shape
106
+ if len(shape) == 3:
107
+ C, H, W = shape
108
+ size = (batch_size, C, H, W)
109
+ elif len(shape) == 4:
110
+ C, T, H, W = shape
111
+ size = (batch_size, C, T, H, W)
112
+
113
+ samples, intermediates = self.ddim_sampling(conditioning, size,
114
+ callback=callback,
115
+ img_callback=img_callback,
116
+ quantize_denoised=quantize_x0,
117
+ mask=mask, x0=x0,
118
+ ddim_use_original_steps=False,
119
+ noise_dropout=noise_dropout,
120
+ temperature=temperature,
121
+ score_corrector=score_corrector,
122
+ corrector_kwargs=corrector_kwargs,
123
+ x_T=x_T,
124
+ log_every_t=log_every_t,
125
+ unconditional_guidance_scale=unconditional_guidance_scale,
126
+ unconditional_conditioning=unconditional_conditioning,
127
+ verbose=verbose,
128
+ precision=precision,
129
+ fs=fs,
130
+ guidance_rescale=guidance_rescale,
131
+ **kwargs)
132
+ return samples, intermediates
133
+
134
+ @torch.no_grad()
135
+ def ddim_sampling(self, cond, shape,
136
+ x_T=None, ddim_use_original_steps=False,
137
+ callback=None, timesteps=None, quantize_denoised=False,
138
+ mask=None, x0=None, img_callback=None, log_every_t=100,
139
+ temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,
140
+ unconditional_guidance_scale=1., unconditional_conditioning=None, verbose=True,precision=None,fs=None,guidance_rescale=0.0,
141
+ **kwargs):
142
+ device = self.model.betas.device
143
+ b = shape[0]
144
+ if x_T is None:
145
+ img = torch.randn(shape, device=device)
146
+ else:
147
+ img = x_T
148
+ if precision is not None:
149
+ if precision == 16:
150
+ img = img.to(dtype=torch.float16)
151
+
152
+ if timesteps is None:
153
+ timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps
154
+ elif timesteps is not None and not ddim_use_original_steps:
155
+ subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1
156
+ timesteps = self.ddim_timesteps[:subset_end]
157
+
158
+ intermediates = {'x_inter': [img], 'pred_x0': [img]}
159
+ time_range = reversed(range(0,timesteps)) if ddim_use_original_steps else np.flip(timesteps)
160
+ total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]
161
+ if verbose:
162
+ iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps)
163
+ else:
164
+ iterator = time_range
165
+
166
+ clean_cond = kwargs.pop("clean_cond", False)
167
+
168
+ # cond_copy, unconditional_conditioning_copy = copy.deepcopy(cond), copy.deepcopy(unconditional_conditioning)
169
+ for i, step in enumerate(iterator):
170
+ index = total_steps - i - 1
171
+ ts = torch.full((b,), step, device=device, dtype=torch.long)
172
+
173
+ ## use mask to blend noised original latent (img_orig) & new sampled latent (img)
174
+ if mask is not None:
175
+ assert x0 is not None
176
+ if clean_cond:
177
+ img_orig = x0
178
+ else:
179
+ img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass? <ddim inversion>
180
+ img = img_orig * mask + (1. - mask) * img # keep original & modify use img
181
+
182
+
183
+
184
+
185
+ outs = self.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,
186
+ quantize_denoised=quantize_denoised, temperature=temperature,
187
+ noise_dropout=noise_dropout, score_corrector=score_corrector,
188
+ corrector_kwargs=corrector_kwargs,
189
+ unconditional_guidance_scale=unconditional_guidance_scale,
190
+ unconditional_conditioning=unconditional_conditioning,
191
+ mask=mask,x0=x0,fs=fs,guidance_rescale=guidance_rescale,
192
+ **kwargs)
193
+
194
+
195
+ img, pred_x0 = outs
196
+ if callback: callback(i)
197
+ if img_callback: img_callback(pred_x0, i)
198
+
199
+ if index % log_every_t == 0 or index == total_steps - 1:
200
+ intermediates['x_inter'].append(img)
201
+ intermediates['pred_x0'].append(pred_x0)
202
+
203
+ return img, intermediates
204
+
205
+ @torch.no_grad()
206
+ def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,
207
+ temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,
208
+ unconditional_guidance_scale=1., unconditional_conditioning=None,
209
+ uc_type=None, conditional_guidance_scale_temporal=None,mask=None,x0=None,guidance_rescale=0.0,**kwargs):
210
+ b, *_, device = *x.shape, x.device
211
+ if x.dim() == 5:
212
+ is_video = True
213
+ else:
214
+ is_video = False
215
+
216
+ if unconditional_conditioning is None or unconditional_guidance_scale == 1.:
217
+ model_output = self.model.apply_model(x, t, c, **kwargs) # unet denoiser
218
+ else:
219
+ ### do_classifier_free_guidance
220
+ if isinstance(c, torch.Tensor) or isinstance(c, dict):
221
+ e_t_cond = self.model.apply_model(x, t, c, **kwargs)
222
+ e_t_uncond = self.model.apply_model(x, t, unconditional_conditioning, **kwargs)
223
+ else:
224
+ raise NotImplementedError
225
+
226
+ model_output = e_t_uncond + unconditional_guidance_scale * (e_t_cond - e_t_uncond)
227
+
228
+ if guidance_rescale > 0.0:
229
+ model_output = rescale_noise_cfg(model_output, e_t_cond, guidance_rescale=guidance_rescale)
230
+
231
+ if self.model.parameterization == "v":
232
+ e_t = self.model.predict_eps_from_z_and_v(x, t, model_output)
233
+ else:
234
+ e_t = model_output
235
+
236
+ if score_corrector is not None:
237
+ assert self.model.parameterization == "eps", 'not implemented'
238
+ e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)
239
+
240
+ alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas
241
+ alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev
242
+ sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas
243
+ # sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas
244
+ sigmas = self.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas
245
+ # select parameters corresponding to the currently considered timestep
246
+
247
+ if is_video:
248
+ size = (b, 1, 1, 1, 1)
249
+ else:
250
+ size = (b, 1, 1, 1)
251
+ a_t = torch.full(size, alphas[index], device=device)
252
+ a_prev = torch.full(size, alphas_prev[index], device=device)
253
+ sigma_t = torch.full(size, sigmas[index], device=device)
254
+ sqrt_one_minus_at = torch.full(size, sqrt_one_minus_alphas[index],device=device)
255
+
256
+ # current prediction for x_0
257
+ if self.model.parameterization != "v":
258
+ pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()
259
+ else:
260
+ pred_x0 = self.model.predict_start_from_z_and_v(x, t, model_output)
261
+
262
+ if self.model.use_dynamic_rescale:
263
+ scale_t = torch.full(size, self.ddim_scale_arr[index], device=device)
264
+ prev_scale_t = torch.full(size, self.ddim_scale_arr_prev[index], device=device)
265
+ rescale = (prev_scale_t / scale_t)
266
+ pred_x0 *= rescale
267
+
268
+ if quantize_denoised:
269
+ pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)
270
+ # direction pointing to x_t
271
+ dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t
272
+
273
+ noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature
274
+ if noise_dropout > 0.:
275
+ noise = torch.nn.functional.dropout(noise, p=noise_dropout)
276
+
277
+ x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise
278
+
279
+ return x_prev, pred_x0
280
+
281
+ @torch.no_grad()
282
+ def decode(self, x_latent, cond, t_start, unconditional_guidance_scale=1.0, unconditional_conditioning=None,
283
+ use_original_steps=False, callback=None):
284
+
285
+ timesteps = np.arange(self.ddpm_num_timesteps) if use_original_steps else self.ddim_timesteps
286
+ timesteps = timesteps[:t_start]
287
+
288
+ time_range = np.flip(timesteps)
289
+ total_steps = timesteps.shape[0]
290
+ print(f"Running DDIM Sampling with {total_steps} timesteps")
291
+
292
+ iterator = tqdm(time_range, desc='Decoding image', total=total_steps)
293
+ x_dec = x_latent
294
+ for i, step in enumerate(iterator):
295
+ index = total_steps - i - 1
296
+ ts = torch.full((x_latent.shape[0],), step, device=x_latent.device, dtype=torch.long)
297
+ x_dec, _ = self.p_sample_ddim(x_dec, cond, ts, index=index, use_original_steps=use_original_steps,
298
+ unconditional_guidance_scale=unconditional_guidance_scale,
299
+ unconditional_conditioning=unconditional_conditioning)
300
+ if callback: callback(i)
301
+ return x_dec
302
+
303
+ @torch.no_grad()
304
+ def stochastic_encode(self, x0, t, use_original_steps=False, noise=None):
305
+ # fast, but does not allow for exact reconstruction
306
+ # t serves as an index to gather the correct alphas
307
+ if use_original_steps:
308
+ sqrt_alphas_cumprod = self.sqrt_alphas_cumprod
309
+ sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod
310
+ else:
311
+ sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas)
312
+ sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas
313
+
314
+ if noise is None:
315
+ noise = torch.randn_like(x0)
316
+ return (extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0 +
317
+ extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape) * noise)
lvdm/models/samplers/ddim_multiplecond.py ADDED
@@ -0,0 +1,323 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ from tqdm import tqdm
3
+ import torch
4
+ from lvdm.models.utils_diffusion import make_ddim_sampling_parameters, make_ddim_timesteps, rescale_noise_cfg
5
+ from lvdm.common import noise_like
6
+ from lvdm.common import extract_into_tensor
7
+ import copy
8
+
9
+
10
+ class DDIMSampler(object):
11
+ def __init__(self, model, schedule="linear", **kwargs):
12
+ super().__init__()
13
+ self.model = model
14
+ self.ddpm_num_timesteps = model.num_timesteps
15
+ self.schedule = schedule
16
+ self.counter = 0
17
+
18
+ def register_buffer(self, name, attr):
19
+ if type(attr) == torch.Tensor:
20
+ if attr.device != torch.device("cuda"):
21
+ attr = attr.to(torch.device("cuda"))
22
+ setattr(self, name, attr)
23
+
24
+ def make_schedule(self, ddim_num_steps, ddim_discretize="uniform", ddim_eta=0., verbose=True):
25
+ self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,
26
+ num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose)
27
+ alphas_cumprod = self.model.alphas_cumprod
28
+ assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'
29
+ to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)
30
+
31
+ if self.model.use_dynamic_rescale:
32
+ self.ddim_scale_arr = self.model.scale_arr[self.ddim_timesteps]
33
+ self.ddim_scale_arr_prev = torch.cat([self.ddim_scale_arr[0:1], self.ddim_scale_arr[:-1]])
34
+
35
+ self.register_buffer('betas', to_torch(self.model.betas))
36
+ self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))
37
+ self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))
38
+
39
+ # calculations for diffusion q(x_t | x_{t-1}) and others
40
+ self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))
41
+ self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu())))
42
+ self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu())))
43
+ self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu())))
44
+ self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))
45
+
46
+ # ddim sampling parameters
47
+ ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(),
48
+ ddim_timesteps=self.ddim_timesteps,
49
+ eta=ddim_eta,verbose=verbose)
50
+ self.register_buffer('ddim_sigmas', ddim_sigmas)
51
+ self.register_buffer('ddim_alphas', ddim_alphas)
52
+ self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)
53
+ self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas))
54
+ sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(
55
+ (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (
56
+ 1 - self.alphas_cumprod / self.alphas_cumprod_prev))
57
+ self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)
58
+
59
+ @torch.no_grad()
60
+ def sample(self,
61
+ S,
62
+ batch_size,
63
+ shape,
64
+ conditioning=None,
65
+ callback=None,
66
+ normals_sequence=None,
67
+ img_callback=None,
68
+ quantize_x0=False,
69
+ eta=0.,
70
+ mask=None,
71
+ x0=None,
72
+ temperature=1.,
73
+ noise_dropout=0.,
74
+ score_corrector=None,
75
+ corrector_kwargs=None,
76
+ verbose=True,
77
+ schedule_verbose=False,
78
+ x_T=None,
79
+ log_every_t=100,
80
+ unconditional_guidance_scale=1.,
81
+ unconditional_conditioning=None,
82
+ precision=None,
83
+ fs=None,
84
+ timestep_spacing='uniform', #uniform_trailing for starting from last timestep
85
+ guidance_rescale=0.0,
86
+ # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...
87
+ **kwargs
88
+ ):
89
+
90
+ # check condition bs
91
+ if conditioning is not None:
92
+ if isinstance(conditioning, dict):
93
+ try:
94
+ cbs = conditioning[list(conditioning.keys())[0]].shape[0]
95
+ except:
96
+ cbs = conditioning[list(conditioning.keys())[0]][0].shape[0]
97
+
98
+ if cbs != batch_size:
99
+ print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}")
100
+ else:
101
+ if conditioning.shape[0] != batch_size:
102
+ print(f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}")
103
+
104
+ # print('==> timestep_spacing: ', timestep_spacing, guidance_rescale)
105
+ self.make_schedule(ddim_num_steps=S, ddim_discretize=timestep_spacing, ddim_eta=eta, verbose=schedule_verbose)
106
+
107
+ # make shape
108
+ if len(shape) == 3:
109
+ C, H, W = shape
110
+ size = (batch_size, C, H, W)
111
+ elif len(shape) == 4:
112
+ C, T, H, W = shape
113
+ size = (batch_size, C, T, H, W)
114
+ # print(f'Data shape for DDIM sampling is {size}, eta {eta}')
115
+
116
+ samples, intermediates = self.ddim_sampling(conditioning, size,
117
+ callback=callback,
118
+ img_callback=img_callback,
119
+ quantize_denoised=quantize_x0,
120
+ mask=mask, x0=x0,
121
+ ddim_use_original_steps=False,
122
+ noise_dropout=noise_dropout,
123
+ temperature=temperature,
124
+ score_corrector=score_corrector,
125
+ corrector_kwargs=corrector_kwargs,
126
+ x_T=x_T,
127
+ log_every_t=log_every_t,
128
+ unconditional_guidance_scale=unconditional_guidance_scale,
129
+ unconditional_conditioning=unconditional_conditioning,
130
+ verbose=verbose,
131
+ precision=precision,
132
+ fs=fs,
133
+ guidance_rescale=guidance_rescale,
134
+ **kwargs)
135
+ return samples, intermediates
136
+
137
+ @torch.no_grad()
138
+ def ddim_sampling(self, cond, shape,
139
+ x_T=None, ddim_use_original_steps=False,
140
+ callback=None, timesteps=None, quantize_denoised=False,
141
+ mask=None, x0=None, img_callback=None, log_every_t=100,
142
+ temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,
143
+ unconditional_guidance_scale=1., unconditional_conditioning=None, verbose=True,precision=None,fs=None,guidance_rescale=0.0,
144
+ **kwargs):
145
+ device = self.model.betas.device
146
+ b = shape[0]
147
+ if x_T is None:
148
+ img = torch.randn(shape, device=device)
149
+ else:
150
+ img = x_T
151
+ if precision is not None:
152
+ if precision == 16:
153
+ img = img.to(dtype=torch.float16)
154
+
155
+
156
+ if timesteps is None:
157
+ timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps
158
+ elif timesteps is not None and not ddim_use_original_steps:
159
+ subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1
160
+ timesteps = self.ddim_timesteps[:subset_end]
161
+
162
+ intermediates = {'x_inter': [img], 'pred_x0': [img]}
163
+ time_range = reversed(range(0,timesteps)) if ddim_use_original_steps else np.flip(timesteps)
164
+ total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]
165
+ if verbose:
166
+ iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps)
167
+ else:
168
+ iterator = time_range
169
+
170
+ clean_cond = kwargs.pop("clean_cond", False)
171
+
172
+ # cond_copy, unconditional_conditioning_copy = copy.deepcopy(cond), copy.deepcopy(unconditional_conditioning)
173
+ for i, step in enumerate(iterator):
174
+ index = total_steps - i - 1
175
+ ts = torch.full((b,), step, device=device, dtype=torch.long)
176
+
177
+ ## use mask to blend noised original latent (img_orig) & new sampled latent (img)
178
+ if mask is not None:
179
+ assert x0 is not None
180
+ if clean_cond:
181
+ img_orig = x0
182
+ else:
183
+ img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass? <ddim inversion>
184
+ img = img_orig * mask + (1. - mask) * img # keep original & modify use img
185
+
186
+
187
+
188
+
189
+ outs = self.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,
190
+ quantize_denoised=quantize_denoised, temperature=temperature,
191
+ noise_dropout=noise_dropout, score_corrector=score_corrector,
192
+ corrector_kwargs=corrector_kwargs,
193
+ unconditional_guidance_scale=unconditional_guidance_scale,
194
+ unconditional_conditioning=unconditional_conditioning,
195
+ mask=mask,x0=x0,fs=fs,guidance_rescale=guidance_rescale,
196
+ **kwargs)
197
+
198
+
199
+
200
+ img, pred_x0 = outs
201
+ if callback: callback(i)
202
+ if img_callback: img_callback(pred_x0, i)
203
+
204
+ if index % log_every_t == 0 or index == total_steps - 1:
205
+ intermediates['x_inter'].append(img)
206
+ intermediates['pred_x0'].append(pred_x0)
207
+
208
+ return img, intermediates
209
+
210
+ @torch.no_grad()
211
+ def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,
212
+ temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,
213
+ unconditional_guidance_scale=1., unconditional_conditioning=None,
214
+ uc_type=None, cfg_img=None,mask=None,x0=None,guidance_rescale=0.0, **kwargs):
215
+ b, *_, device = *x.shape, x.device
216
+ if x.dim() == 5:
217
+ is_video = True
218
+ else:
219
+ is_video = False
220
+ if cfg_img is None:
221
+ cfg_img = unconditional_guidance_scale
222
+
223
+ unconditional_conditioning_img_nonetext = kwargs['unconditional_conditioning_img_nonetext']
224
+
225
+
226
+ if unconditional_conditioning is None or unconditional_guidance_scale == 1.:
227
+ model_output = self.model.apply_model(x, t, c, **kwargs) # unet denoiser
228
+ else:
229
+ ### with unconditional condition
230
+ e_t_cond = self.model.apply_model(x, t, c, **kwargs)
231
+ e_t_uncond = self.model.apply_model(x, t, unconditional_conditioning, **kwargs)
232
+ e_t_uncond_img = self.model.apply_model(x, t, unconditional_conditioning_img_nonetext, **kwargs)
233
+ # text cfg
234
+ model_output = e_t_uncond + cfg_img * (e_t_uncond_img - e_t_uncond) + unconditional_guidance_scale * (e_t_cond - e_t_uncond_img)
235
+ if guidance_rescale > 0.0:
236
+ model_output = rescale_noise_cfg(model_output, e_t_cond, guidance_rescale=guidance_rescale)
237
+
238
+ if self.model.parameterization == "v":
239
+ e_t = self.model.predict_eps_from_z_and_v(x, t, model_output)
240
+ else:
241
+ e_t = model_output
242
+
243
+ if score_corrector is not None:
244
+ assert self.model.parameterization == "eps", 'not implemented'
245
+ e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)
246
+
247
+ alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas
248
+ alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev
249
+ sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas
250
+ sigmas = self.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas
251
+ # select parameters corresponding to the currently considered timestep
252
+
253
+ if is_video:
254
+ size = (b, 1, 1, 1, 1)
255
+ else:
256
+ size = (b, 1, 1, 1)
257
+ a_t = torch.full(size, alphas[index], device=device)
258
+ a_prev = torch.full(size, alphas_prev[index], device=device)
259
+ sigma_t = torch.full(size, sigmas[index], device=device)
260
+ sqrt_one_minus_at = torch.full(size, sqrt_one_minus_alphas[index],device=device)
261
+
262
+ # current prediction for x_0
263
+ if self.model.parameterization != "v":
264
+ pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()
265
+ else:
266
+ pred_x0 = self.model.predict_start_from_z_and_v(x, t, model_output)
267
+
268
+ if self.model.use_dynamic_rescale:
269
+ scale_t = torch.full(size, self.ddim_scale_arr[index], device=device)
270
+ prev_scale_t = torch.full(size, self.ddim_scale_arr_prev[index], device=device)
271
+ rescale = (prev_scale_t / scale_t)
272
+ pred_x0 *= rescale
273
+
274
+ if quantize_denoised:
275
+ pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)
276
+ # direction pointing to x_t
277
+ dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t
278
+
279
+ noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature
280
+ if noise_dropout > 0.:
281
+ noise = torch.nn.functional.dropout(noise, p=noise_dropout)
282
+
283
+ x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise
284
+
285
+ return x_prev, pred_x0
286
+
287
+ @torch.no_grad()
288
+ def decode(self, x_latent, cond, t_start, unconditional_guidance_scale=1.0, unconditional_conditioning=None,
289
+ use_original_steps=False, callback=None):
290
+
291
+ timesteps = np.arange(self.ddpm_num_timesteps) if use_original_steps else self.ddim_timesteps
292
+ timesteps = timesteps[:t_start]
293
+
294
+ time_range = np.flip(timesteps)
295
+ total_steps = timesteps.shape[0]
296
+ print(f"Running DDIM Sampling with {total_steps} timesteps")
297
+
298
+ iterator = tqdm(time_range, desc='Decoding image', total=total_steps)
299
+ x_dec = x_latent
300
+ for i, step in enumerate(iterator):
301
+ index = total_steps - i - 1
302
+ ts = torch.full((x_latent.shape[0],), step, device=x_latent.device, dtype=torch.long)
303
+ x_dec, _ = self.p_sample_ddim(x_dec, cond, ts, index=index, use_original_steps=use_original_steps,
304
+ unconditional_guidance_scale=unconditional_guidance_scale,
305
+ unconditional_conditioning=unconditional_conditioning)
306
+ if callback: callback(i)
307
+ return x_dec
308
+
309
+ @torch.no_grad()
310
+ def stochastic_encode(self, x0, t, use_original_steps=False, noise=None):
311
+ # fast, but does not allow for exact reconstruction
312
+ # t serves as an index to gather the correct alphas
313
+ if use_original_steps:
314
+ sqrt_alphas_cumprod = self.sqrt_alphas_cumprod
315
+ sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod
316
+ else:
317
+ sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas)
318
+ sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas
319
+
320
+ if noise is None:
321
+ noise = torch.randn_like(x0)
322
+ return (extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0 +
323
+ extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape) * noise)
lvdm/models/utils_diffusion.py ADDED
@@ -0,0 +1,158 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import numpy as np
3
+ import torch
4
+ import torch.nn.functional as F
5
+ from einops import repeat
6
+
7
+
8
+ def timestep_embedding(timesteps, dim, max_period=10000, repeat_only=False):
9
+ """
10
+ Create sinusoidal timestep embeddings.
11
+ :param timesteps: a 1-D Tensor of N indices, one per batch element.
12
+ These may be fractional.
13
+ :param dim: the dimension of the output.
14
+ :param max_period: controls the minimum frequency of the embeddings.
15
+ :return: an [N x dim] Tensor of positional embeddings.
16
+ """
17
+ if not repeat_only:
18
+ half = dim // 2
19
+ freqs = torch.exp(
20
+ -math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half
21
+ ).to(device=timesteps.device)
22
+ args = timesteps[:, None].float() * freqs[None]
23
+ embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1)
24
+ if dim % 2:
25
+ embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1)
26
+ else:
27
+ embedding = repeat(timesteps, 'b -> b d', d=dim)
28
+ return embedding
29
+
30
+
31
+ def make_beta_schedule(schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):
32
+ if schedule == "linear":
33
+ betas = (
34
+ torch.linspace(linear_start ** 0.5, linear_end ** 0.5, n_timestep, dtype=torch.float64) ** 2
35
+ )
36
+
37
+ elif schedule == "cosine":
38
+ timesteps = (
39
+ torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep + cosine_s
40
+ )
41
+ alphas = timesteps / (1 + cosine_s) * np.pi / 2
42
+ alphas = torch.cos(alphas).pow(2)
43
+ alphas = alphas / alphas[0]
44
+ betas = 1 - alphas[1:] / alphas[:-1]
45
+ betas = np.clip(betas, a_min=0, a_max=0.999)
46
+
47
+ elif schedule == "sqrt_linear":
48
+ betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64)
49
+ elif schedule == "sqrt":
50
+ betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64) ** 0.5
51
+ else:
52
+ raise ValueError(f"schedule '{schedule}' unknown.")
53
+ return betas.numpy()
54
+
55
+
56
+ def make_ddim_timesteps(ddim_discr_method, num_ddim_timesteps, num_ddpm_timesteps, verbose=True):
57
+ if ddim_discr_method == 'uniform':
58
+ c = num_ddpm_timesteps // num_ddim_timesteps
59
+ ddim_timesteps = np.asarray(list(range(0, num_ddpm_timesteps, c)))
60
+ steps_out = ddim_timesteps + 1
61
+ elif ddim_discr_method == 'uniform_trailing':
62
+ c = num_ddpm_timesteps / num_ddim_timesteps
63
+ ddim_timesteps = np.flip(np.round(np.arange(num_ddpm_timesteps, 0, -c))).astype(np.int64)
64
+ steps_out = ddim_timesteps - 1
65
+ elif ddim_discr_method == 'quad':
66
+ ddim_timesteps = ((np.linspace(0, np.sqrt(num_ddpm_timesteps * .8), num_ddim_timesteps)) ** 2).astype(int)
67
+ steps_out = ddim_timesteps + 1
68
+ else:
69
+ raise NotImplementedError(f'There is no ddim discretization method called "{ddim_discr_method}"')
70
+
71
+ # assert ddim_timesteps.shape[0] == num_ddim_timesteps
72
+ # add one to get the final alpha values right (the ones from first scale to data during sampling)
73
+ # steps_out = ddim_timesteps + 1
74
+ if verbose:
75
+ print(f'Selected timesteps for ddim sampler: {steps_out}')
76
+ return steps_out
77
+
78
+
79
+ def make_ddim_sampling_parameters(alphacums, ddim_timesteps, eta, verbose=True):
80
+ # select alphas for computing the variance schedule
81
+ # print(f'ddim_timesteps={ddim_timesteps}, len_alphacums={len(alphacums)}')
82
+ alphas = alphacums[ddim_timesteps]
83
+ alphas_prev = np.asarray([alphacums[0]] + alphacums[ddim_timesteps[:-1]].tolist())
84
+
85
+ # according the the formula provided in https://arxiv.org/abs/2010.02502
86
+ sigmas = eta * np.sqrt((1 - alphas_prev) / (1 - alphas) * (1 - alphas / alphas_prev))
87
+ if verbose:
88
+ print(f'Selected alphas for ddim sampler: a_t: {alphas}; a_(t-1): {alphas_prev}')
89
+ print(f'For the chosen value of eta, which is {eta}, '
90
+ f'this results in the following sigma_t schedule for ddim sampler {sigmas}')
91
+ return sigmas, alphas, alphas_prev
92
+
93
+
94
+ def betas_for_alpha_bar(num_diffusion_timesteps, alpha_bar, max_beta=0.999):
95
+ """
96
+ Create a beta schedule that discretizes the given alpha_t_bar function,
97
+ which defines the cumulative product of (1-beta) over time from t = [0,1].
98
+ :param num_diffusion_timesteps: the number of betas to produce.
99
+ :param alpha_bar: a lambda that takes an argument t from 0 to 1 and
100
+ produces the cumulative product of (1-beta) up to that
101
+ part of the diffusion process.
102
+ :param max_beta: the maximum beta to use; use values lower than 1 to
103
+ prevent singularities.
104
+ """
105
+ betas = []
106
+ for i in range(num_diffusion_timesteps):
107
+ t1 = i / num_diffusion_timesteps
108
+ t2 = (i + 1) / num_diffusion_timesteps
109
+ betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta))
110
+ return np.array(betas)
111
+
112
+ def rescale_zero_terminal_snr(betas):
113
+ """
114
+ Rescales betas to have zero terminal SNR Based on https://arxiv.org/pdf/2305.08891.pdf (Algorithm 1)
115
+
116
+ Args:
117
+ betas (`numpy.ndarray`):
118
+ the betas that the scheduler is being initialized with.
119
+
120
+ Returns:
121
+ `numpy.ndarray`: rescaled betas with zero terminal SNR
122
+ """
123
+ # Convert betas to alphas_bar_sqrt
124
+ alphas = 1.0 - betas
125
+ alphas_cumprod = np.cumprod(alphas, axis=0)
126
+ alphas_bar_sqrt = np.sqrt(alphas_cumprod)
127
+
128
+ # Store old values.
129
+ alphas_bar_sqrt_0 = alphas_bar_sqrt[0].copy()
130
+ alphas_bar_sqrt_T = alphas_bar_sqrt[-1].copy()
131
+
132
+ # Shift so the last timestep is zero.
133
+ alphas_bar_sqrt -= alphas_bar_sqrt_T
134
+
135
+ # Scale so the first timestep is back to the old value.
136
+ alphas_bar_sqrt *= alphas_bar_sqrt_0 / (alphas_bar_sqrt_0 - alphas_bar_sqrt_T)
137
+
138
+ # Convert alphas_bar_sqrt to betas
139
+ alphas_bar = alphas_bar_sqrt**2 # Revert sqrt
140
+ alphas = alphas_bar[1:] / alphas_bar[:-1] # Revert cumprod
141
+ alphas = np.concatenate([alphas_bar[0:1], alphas])
142
+ betas = 1 - alphas
143
+
144
+ return betas
145
+
146
+
147
+ def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0):
148
+ """
149
+ Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and
150
+ Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4
151
+ """
152
+ std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True)
153
+ std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True)
154
+ # rescale the results from guidance (fixes overexposure)
155
+ noise_pred_rescaled = noise_cfg * (std_text / std_cfg)
156
+ # mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images
157
+ noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg
158
+ return noise_cfg
lvdm/modules/.DS_Store ADDED
Binary file (6.15 kB). View file
 
lvdm/modules/__pycache__/attention.cpython-39.pyc ADDED
Binary file (15 kB). View file
 
lvdm/modules/attention.py ADDED
@@ -0,0 +1,514 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch import nn, einsum
3
+ import torch.nn.functional as F
4
+ from einops import rearrange, repeat
5
+ from functools import partial
6
+ try:
7
+ import xformers
8
+ import xformers.ops
9
+ XFORMERS_IS_AVAILBLE = True
10
+ except:
11
+ XFORMERS_IS_AVAILBLE = False
12
+ from lvdm.common import (
13
+ checkpoint,
14
+ exists,
15
+ default,
16
+ )
17
+ from lvdm.basics import zero_module
18
+
19
+
20
+ class RelativePosition(nn.Module):
21
+ """ https://github.com/evelinehong/Transformer_Relative_Position_PyTorch/blob/master/relative_position.py """
22
+
23
+ def __init__(self, num_units, max_relative_position):
24
+ super().__init__()
25
+ self.num_units = num_units
26
+ self.max_relative_position = max_relative_position
27
+ self.embeddings_table = nn.Parameter(torch.Tensor(max_relative_position * 2 + 1, num_units))
28
+ nn.init.xavier_uniform_(self.embeddings_table)
29
+
30
+ def forward(self, length_q, length_k):
31
+ device = self.embeddings_table.device
32
+ range_vec_q = torch.arange(length_q, device=device)
33
+ range_vec_k = torch.arange(length_k, device=device)
34
+ distance_mat = range_vec_k[None, :] - range_vec_q[:, None]
35
+ distance_mat_clipped = torch.clamp(distance_mat, -self.max_relative_position, self.max_relative_position)
36
+ final_mat = distance_mat_clipped + self.max_relative_position
37
+ final_mat = final_mat.long()
38
+ embeddings = self.embeddings_table[final_mat]
39
+ return embeddings
40
+
41
+
42
+ class CrossAttention(nn.Module):
43
+
44
+ def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0.,
45
+ relative_position=False, temporal_length=None, video_length=None, image_cross_attention=False, image_cross_attention_scale=1.0, image_cross_attention_scale_learnable=False, text_context_len=77):
46
+ super().__init__()
47
+ inner_dim = dim_head * heads
48
+ context_dim = default(context_dim, query_dim)
49
+
50
+ self.scale = dim_head**-0.5
51
+ self.heads = heads
52
+ self.dim_head = dim_head
53
+ self.to_q = nn.Linear(query_dim, inner_dim, bias=False)
54
+ self.to_k = nn.Linear(context_dim, inner_dim, bias=False)
55
+ self.to_v = nn.Linear(context_dim, inner_dim, bias=False)
56
+
57
+ self.to_out = nn.Sequential(nn.Linear(inner_dim, query_dim), nn.Dropout(dropout))
58
+
59
+ self.relative_position = relative_position
60
+ if self.relative_position:
61
+ assert(temporal_length is not None)
62
+ self.relative_position_k = RelativePosition(num_units=dim_head, max_relative_position=temporal_length)
63
+ self.relative_position_v = RelativePosition(num_units=dim_head, max_relative_position=temporal_length)
64
+ else:
65
+ ## only used for spatial attention, while NOT for temporal attention
66
+ if XFORMERS_IS_AVAILBLE and temporal_length is None:
67
+ self.forward = self.efficient_forward
68
+
69
+ self.video_length = video_length
70
+ self.image_cross_attention = image_cross_attention
71
+ self.image_cross_attention_scale = image_cross_attention_scale
72
+ self.text_context_len = text_context_len
73
+ self.image_cross_attention_scale_learnable = image_cross_attention_scale_learnable
74
+ if self.image_cross_attention:
75
+ self.to_k_ip = nn.Linear(context_dim, inner_dim, bias=False)
76
+ self.to_v_ip = nn.Linear(context_dim, inner_dim, bias=False)
77
+ if image_cross_attention_scale_learnable:
78
+ self.register_parameter('alpha', nn.Parameter(torch.tensor(0.)) )
79
+
80
+
81
+ def forward(self, x, context=None, mask=None):
82
+ spatial_self_attn = (context is None)
83
+ k_ip, v_ip, out_ip = None, None, None
84
+
85
+ h = self.heads
86
+ q = self.to_q(x)
87
+ context = default(context, x)
88
+
89
+ if self.image_cross_attention and not spatial_self_attn:
90
+ context, context_image = context[:,:self.text_context_len,:], context[:,self.text_context_len:,:]
91
+ k = self.to_k(context)
92
+ v = self.to_v(context)
93
+ k_ip = self.to_k_ip(context_image)
94
+ v_ip = self.to_v_ip(context_image)
95
+ else:
96
+ if not spatial_self_attn:
97
+ context = context[:,:self.text_context_len,:]
98
+ k = self.to_k(context)
99
+ v = self.to_v(context)
100
+
101
+ q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v))
102
+
103
+ sim = torch.einsum('b i d, b j d -> b i j', q, k) * self.scale
104
+ if self.relative_position:
105
+ len_q, len_k, len_v = q.shape[1], k.shape[1], v.shape[1]
106
+ k2 = self.relative_position_k(len_q, len_k)
107
+ sim2 = einsum('b t d, t s d -> b t s', q, k2) * self.scale # TODO check
108
+ sim += sim2
109
+ del k
110
+
111
+ if exists(mask):
112
+ ## feasible for causal attention mask only
113
+ max_neg_value = -torch.finfo(sim.dtype).max
114
+ mask = repeat(mask, 'b i j -> (b h) i j', h=h)
115
+ sim.masked_fill_(~(mask>0.5), max_neg_value)
116
+
117
+ # attention, what we cannot get enough of
118
+ sim = sim.softmax(dim=-1)
119
+
120
+ out = torch.einsum('b i j, b j d -> b i d', sim, v)
121
+ if self.relative_position:
122
+ v2 = self.relative_position_v(len_q, len_v)
123
+ out2 = einsum('b t s, t s d -> b t d', sim, v2) # TODO check
124
+ out += out2
125
+ out = rearrange(out, '(b h) n d -> b n (h d)', h=h)
126
+
127
+
128
+ ## for image cross-attention
129
+ if k_ip is not None:
130
+ k_ip, v_ip = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (k_ip, v_ip))
131
+ sim_ip = torch.einsum('b i d, b j d -> b i j', q, k_ip) * self.scale
132
+ del k_ip
133
+ sim_ip = sim_ip.softmax(dim=-1)
134
+ out_ip = torch.einsum('b i j, b j d -> b i d', sim_ip, v_ip)
135
+ out_ip = rearrange(out_ip, '(b h) n d -> b n (h d)', h=h)
136
+
137
+
138
+ if out_ip is not None:
139
+ if self.image_cross_attention_scale_learnable:
140
+ out = out + self.image_cross_attention_scale * out_ip * (torch.tanh(self.alpha)+1)
141
+ else:
142
+ out = out + self.image_cross_attention_scale * out_ip
143
+
144
+ return self.to_out(out)
145
+
146
+ def efficient_forward(self, x, context=None, mask=None):
147
+ spatial_self_attn = (context is None)
148
+ k_ip, v_ip, out_ip = None, None, None
149
+
150
+ q = self.to_q(x)
151
+ context = default(context, x)
152
+
153
+ if self.image_cross_attention and not spatial_self_attn:
154
+ context, context_image = context[:,:self.text_context_len,:], context[:,self.text_context_len:,:]
155
+ k = self.to_k(context)
156
+ v = self.to_v(context)
157
+ k_ip = self.to_k_ip(context_image)
158
+ v_ip = self.to_v_ip(context_image)
159
+ else:
160
+ if not spatial_self_attn:
161
+ context = context[:,:self.text_context_len,:]
162
+ k = self.to_k(context)
163
+ v = self.to_v(context)
164
+
165
+ b, _, _ = q.shape
166
+ q, k, v = map(
167
+ lambda t: t.unsqueeze(3)
168
+ .reshape(b, t.shape[1], self.heads, self.dim_head)
169
+ .permute(0, 2, 1, 3)
170
+ .reshape(b * self.heads, t.shape[1], self.dim_head)
171
+ .contiguous(),
172
+ (q, k, v),
173
+ )
174
+ # actually compute the attention, what we cannot get enough of
175
+ out = xformers.ops.memory_efficient_attention(q, k, v, attn_bias=None, op=None)
176
+
177
+ ## for image cross-attention
178
+ if k_ip is not None:
179
+ k_ip, v_ip = map(
180
+ lambda t: t.unsqueeze(3)
181
+ .reshape(b, t.shape[1], self.heads, self.dim_head)
182
+ .permute(0, 2, 1, 3)
183
+ .reshape(b * self.heads, t.shape[1], self.dim_head)
184
+ .contiguous(),
185
+ (k_ip, v_ip),
186
+ )
187
+ out_ip = xformers.ops.memory_efficient_attention(q, k_ip, v_ip, attn_bias=None, op=None)
188
+ out_ip = (
189
+ out_ip.unsqueeze(0)
190
+ .reshape(b, self.heads, out.shape[1], self.dim_head)
191
+ .permute(0, 2, 1, 3)
192
+ .reshape(b, out.shape[1], self.heads * self.dim_head)
193
+ )
194
+
195
+ if exists(mask):
196
+ raise NotImplementedError
197
+ out = (
198
+ out.unsqueeze(0)
199
+ .reshape(b, self.heads, out.shape[1], self.dim_head)
200
+ .permute(0, 2, 1, 3)
201
+ .reshape(b, out.shape[1], self.heads * self.dim_head)
202
+ )
203
+ if out_ip is not None:
204
+ if self.image_cross_attention_scale_learnable:
205
+ out = out + self.image_cross_attention_scale * out_ip * (torch.tanh(self.alpha)+1)
206
+ else:
207
+ out = out + self.image_cross_attention_scale * out_ip
208
+
209
+ return self.to_out(out)
210
+
211
+
212
+ class BasicTransformerBlock(nn.Module):
213
+
214
+ def __init__(self, dim, n_heads, d_head, dropout=0., context_dim=None, gated_ff=True, checkpoint=True,
215
+ disable_self_attn=False, attention_cls=None, video_length=None, image_cross_attention=False, image_cross_attention_scale=1.0, image_cross_attention_scale_learnable=False, text_context_len=77):
216
+ super().__init__()
217
+ attn_cls = CrossAttention if attention_cls is None else attention_cls
218
+ self.disable_self_attn = disable_self_attn
219
+ self.attn1 = attn_cls(query_dim=dim, heads=n_heads, dim_head=d_head, dropout=dropout,
220
+ context_dim=context_dim if self.disable_self_attn else None)
221
+ self.ff = FeedForward(dim, dropout=dropout, glu=gated_ff)
222
+ self.attn2 = attn_cls(query_dim=dim, context_dim=context_dim, heads=n_heads, dim_head=d_head, dropout=dropout, video_length=video_length, image_cross_attention=image_cross_attention, image_cross_attention_scale=image_cross_attention_scale, image_cross_attention_scale_learnable=image_cross_attention_scale_learnable,text_context_len=text_context_len)
223
+ self.image_cross_attention = image_cross_attention
224
+
225
+ self.norm1 = nn.LayerNorm(dim)
226
+ self.norm2 = nn.LayerNorm(dim)
227
+ self.norm3 = nn.LayerNorm(dim)
228
+ self.checkpoint = checkpoint
229
+
230
+
231
+ def forward(self, x, context=None, mask=None, **kwargs):
232
+ ## implementation tricks: because checkpointing doesn't support non-tensor (e.g. None or scalar) arguments
233
+ input_tuple = (x,) ## should not be (x), otherwise *input_tuple will decouple x into multiple arguments
234
+ if context is not None:
235
+ input_tuple = (x, context)
236
+ if mask is not None:
237
+ forward_mask = partial(self._forward, mask=mask)
238
+ return checkpoint(forward_mask, (x,), self.parameters(), self.checkpoint)
239
+ return checkpoint(self._forward, input_tuple, self.parameters(), self.checkpoint)
240
+
241
+
242
+ def _forward(self, x, context=None, mask=None):
243
+ x = self.attn1(self.norm1(x), context=context if self.disable_self_attn else None, mask=mask) + x
244
+ x = self.attn2(self.norm2(x), context=context, mask=mask) + x
245
+ x = self.ff(self.norm3(x)) + x
246
+ return x
247
+
248
+
249
+ class SpatialTransformer(nn.Module):
250
+ """
251
+ Transformer block for image-like data in spatial axis.
252
+ First, project the input (aka embedding)
253
+ and reshape to b, t, d.
254
+ Then apply standard transformer action.
255
+ Finally, reshape to image
256
+ NEW: use_linear for more efficiency instead of the 1x1 convs
257
+ """
258
+
259
+ def __init__(self, in_channels, n_heads, d_head, depth=1, dropout=0., context_dim=None,
260
+ use_checkpoint=True, disable_self_attn=False, use_linear=False, video_length=None,
261
+ image_cross_attention=False, image_cross_attention_scale_learnable=False):
262
+ super().__init__()
263
+ self.in_channels = in_channels
264
+ inner_dim = n_heads * d_head
265
+ self.norm = torch.nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-6, affine=True)
266
+ if not use_linear:
267
+ self.proj_in = nn.Conv2d(in_channels, inner_dim, kernel_size=1, stride=1, padding=0)
268
+ else:
269
+ self.proj_in = nn.Linear(in_channels, inner_dim)
270
+
271
+ attention_cls = None
272
+ self.transformer_blocks = nn.ModuleList([
273
+ BasicTransformerBlock(
274
+ inner_dim,
275
+ n_heads,
276
+ d_head,
277
+ dropout=dropout,
278
+ context_dim=context_dim,
279
+ disable_self_attn=disable_self_attn,
280
+ checkpoint=use_checkpoint,
281
+ attention_cls=attention_cls,
282
+ video_length=video_length,
283
+ image_cross_attention=image_cross_attention,
284
+ image_cross_attention_scale_learnable=image_cross_attention_scale_learnable,
285
+ ) for d in range(depth)
286
+ ])
287
+ if not use_linear:
288
+ self.proj_out = zero_module(nn.Conv2d(inner_dim, in_channels, kernel_size=1, stride=1, padding=0))
289
+ else:
290
+ self.proj_out = zero_module(nn.Linear(inner_dim, in_channels))
291
+ self.use_linear = use_linear
292
+
293
+
294
+ def forward(self, x, context=None, **kwargs):
295
+ b, c, h, w = x.shape
296
+ x_in = x
297
+ x = self.norm(x)
298
+ if not self.use_linear:
299
+ x = self.proj_in(x)
300
+ x = rearrange(x, 'b c h w -> b (h w) c').contiguous()
301
+ if self.use_linear:
302
+ x = self.proj_in(x)
303
+ for i, block in enumerate(self.transformer_blocks):
304
+ x = block(x, context=context, **kwargs)
305
+ if self.use_linear:
306
+ x = self.proj_out(x)
307
+ x = rearrange(x, 'b (h w) c -> b c h w', h=h, w=w).contiguous()
308
+ if not self.use_linear:
309
+ x = self.proj_out(x)
310
+ return x + x_in
311
+
312
+
313
+ class TemporalTransformer(nn.Module):
314
+ """
315
+ Transformer block for image-like data in temporal axis.
316
+ First, reshape to b, t, d.
317
+ Then apply standard transformer action.
318
+ Finally, reshape to image
319
+ """
320
+ def __init__(self, in_channels, n_heads, d_head, depth=1, dropout=0., context_dim=None,
321
+ use_checkpoint=True, use_linear=False, only_self_att=True, causal_attention=False, causal_block_size=1,
322
+ relative_position=False, temporal_length=None):
323
+ super().__init__()
324
+ self.only_self_att = only_self_att
325
+ self.relative_position = relative_position
326
+ self.causal_attention = causal_attention
327
+ self.causal_block_size = causal_block_size
328
+
329
+ self.in_channels = in_channels
330
+ inner_dim = n_heads * d_head
331
+ self.norm = torch.nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-6, affine=True)
332
+ self.proj_in = nn.Conv1d(in_channels, inner_dim, kernel_size=1, stride=1, padding=0)
333
+ if not use_linear:
334
+ self.proj_in = nn.Conv1d(in_channels, inner_dim, kernel_size=1, stride=1, padding=0)
335
+ else:
336
+ self.proj_in = nn.Linear(in_channels, inner_dim)
337
+
338
+ if relative_position:
339
+ assert(temporal_length is not None)
340
+ attention_cls = partial(CrossAttention, relative_position=True, temporal_length=temporal_length)
341
+ else:
342
+ attention_cls = partial(CrossAttention, temporal_length=temporal_length)
343
+ if self.causal_attention:
344
+ assert(temporal_length is not None)
345
+ self.mask = torch.tril(torch.ones([1, temporal_length, temporal_length]))
346
+
347
+ if self.only_self_att:
348
+ context_dim = None
349
+ self.transformer_blocks = nn.ModuleList([
350
+ BasicTransformerBlock(
351
+ inner_dim,
352
+ n_heads,
353
+ d_head,
354
+ dropout=dropout,
355
+ context_dim=context_dim,
356
+ attention_cls=attention_cls,
357
+ checkpoint=use_checkpoint) for d in range(depth)
358
+ ])
359
+ if not use_linear:
360
+ self.proj_out = zero_module(nn.Conv1d(inner_dim, in_channels, kernel_size=1, stride=1, padding=0))
361
+ else:
362
+ self.proj_out = zero_module(nn.Linear(inner_dim, in_channels))
363
+ self.use_linear = use_linear
364
+
365
+ def forward(self, x, context=None):
366
+ b, c, t, h, w = x.shape
367
+ x_in = x
368
+ x = self.norm(x)
369
+ x = rearrange(x, 'b c t h w -> (b h w) c t').contiguous()
370
+ if not self.use_linear:
371
+ x = self.proj_in(x)
372
+ x = rearrange(x, 'bhw c t -> bhw t c').contiguous()
373
+ if self.use_linear:
374
+ x = self.proj_in(x)
375
+
376
+ temp_mask = None
377
+ if self.causal_attention:
378
+ # slice the from mask map
379
+ temp_mask = self.mask[:,:t,:t].to(x.device)
380
+
381
+ if temp_mask is not None:
382
+ mask = temp_mask.to(x.device)
383
+ mask = repeat(mask, 'l i j -> (l bhw) i j', bhw=b*h*w)
384
+ else:
385
+ mask = None
386
+
387
+ if self.only_self_att:
388
+ ## note: if no context is given, cross-attention defaults to self-attention
389
+ for i, block in enumerate(self.transformer_blocks):
390
+ x = block(x, mask=mask)
391
+ x = rearrange(x, '(b hw) t c -> b hw t c', b=b).contiguous()
392
+ else:
393
+ x = rearrange(x, '(b hw) t c -> b hw t c', b=b).contiguous()
394
+ context = rearrange(context, '(b t) l con -> b t l con', t=t).contiguous()
395
+ for i, block in enumerate(self.transformer_blocks):
396
+ # calculate each batch one by one (since number in shape could not greater then 65,535 for some package)
397
+ for j in range(b):
398
+ context_j = repeat(
399
+ context[j],
400
+ 't l con -> (t r) l con', r=(h * w) // t, t=t).contiguous()
401
+ ## note: causal mask will not applied in cross-attention case
402
+ x[j] = block(x[j], context=context_j)
403
+
404
+ if self.use_linear:
405
+ x = self.proj_out(x)
406
+ x = rearrange(x, 'b (h w) t c -> b c t h w', h=h, w=w).contiguous()
407
+ if not self.use_linear:
408
+ x = rearrange(x, 'b hw t c -> (b hw) c t').contiguous()
409
+ x = self.proj_out(x)
410
+ x = rearrange(x, '(b h w) c t -> b c t h w', b=b, h=h, w=w).contiguous()
411
+
412
+ return x + x_in
413
+
414
+
415
+ class GEGLU(nn.Module):
416
+ def __init__(self, dim_in, dim_out):
417
+ super().__init__()
418
+ self.proj = nn.Linear(dim_in, dim_out * 2)
419
+
420
+ def forward(self, x):
421
+ x, gate = self.proj(x).chunk(2, dim=-1)
422
+ return x * F.gelu(gate)
423
+
424
+
425
+ class FeedForward(nn.Module):
426
+ def __init__(self, dim, dim_out=None, mult=4, glu=False, dropout=0.):
427
+ super().__init__()
428
+ inner_dim = int(dim * mult)
429
+ dim_out = default(dim_out, dim)
430
+ project_in = nn.Sequential(
431
+ nn.Linear(dim, inner_dim),
432
+ nn.GELU()
433
+ ) if not glu else GEGLU(dim, inner_dim)
434
+
435
+ self.net = nn.Sequential(
436
+ project_in,
437
+ nn.Dropout(dropout),
438
+ nn.Linear(inner_dim, dim_out)
439
+ )
440
+
441
+ def forward(self, x):
442
+ return self.net(x)
443
+
444
+
445
+ class LinearAttention(nn.Module):
446
+ def __init__(self, dim, heads=4, dim_head=32):
447
+ super().__init__()
448
+ self.heads = heads
449
+ hidden_dim = dim_head * heads
450
+ self.to_qkv = nn.Conv2d(dim, hidden_dim * 3, 1, bias = False)
451
+ self.to_out = nn.Conv2d(hidden_dim, dim, 1)
452
+
453
+ def forward(self, x):
454
+ b, c, h, w = x.shape
455
+ qkv = self.to_qkv(x)
456
+ q, k, v = rearrange(qkv, 'b (qkv heads c) h w -> qkv b heads c (h w)', heads = self.heads, qkv=3)
457
+ k = k.softmax(dim=-1)
458
+ context = torch.einsum('bhdn,bhen->bhde', k, v)
459
+ out = torch.einsum('bhde,bhdn->bhen', context, q)
460
+ out = rearrange(out, 'b heads c (h w) -> b (heads c) h w', heads=self.heads, h=h, w=w)
461
+ return self.to_out(out)
462
+
463
+
464
+ class SpatialSelfAttention(nn.Module):
465
+ def __init__(self, in_channels):
466
+ super().__init__()
467
+ self.in_channels = in_channels
468
+
469
+ self.norm = torch.nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-6, affine=True)
470
+ self.q = torch.nn.Conv2d(in_channels,
471
+ in_channels,
472
+ kernel_size=1,
473
+ stride=1,
474
+ padding=0)
475
+ self.k = torch.nn.Conv2d(in_channels,
476
+ in_channels,
477
+ kernel_size=1,
478
+ stride=1,
479
+ padding=0)
480
+ self.v = torch.nn.Conv2d(in_channels,
481
+ in_channels,
482
+ kernel_size=1,
483
+ stride=1,
484
+ padding=0)
485
+ self.proj_out = torch.nn.Conv2d(in_channels,
486
+ in_channels,
487
+ kernel_size=1,
488
+ stride=1,
489
+ padding=0)
490
+
491
+ def forward(self, x):
492
+ h_ = x
493
+ h_ = self.norm(h_)
494
+ q = self.q(h_)
495
+ k = self.k(h_)
496
+ v = self.v(h_)
497
+
498
+ # compute attention
499
+ b,c,h,w = q.shape
500
+ q = rearrange(q, 'b c h w -> b (h w) c')
501
+ k = rearrange(k, 'b c h w -> b c (h w)')
502
+ w_ = torch.einsum('bij,bjk->bik', q, k)
503
+
504
+ w_ = w_ * (int(c)**(-0.5))
505
+ w_ = torch.nn.functional.softmax(w_, dim=2)
506
+
507
+ # attend to values
508
+ v = rearrange(v, 'b c h w -> b c (h w)')
509
+ w_ = rearrange(w_, 'b i j -> b j i')
510
+ h_ = torch.einsum('bij,bjk->bik', v, w_)
511
+ h_ = rearrange(h_, 'b c (h w) -> b c h w', h=h)
512
+ h_ = self.proj_out(h_)
513
+
514
+ return x+h_
lvdm/modules/encoders/__pycache__/condition.cpython-39.pyc ADDED
Binary file (13.7 kB). View file
 
lvdm/modules/encoders/__pycache__/resampler.cpython-39.pyc ADDED
Binary file (4.07 kB). View file
 
lvdm/modules/encoders/condition.py ADDED
@@ -0,0 +1,389 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import kornia
4
+ import open_clip
5
+ from torch.utils.checkpoint import checkpoint
6
+ from transformers import T5Tokenizer, T5EncoderModel, CLIPTokenizer, CLIPTextModel
7
+ from lvdm.common import autocast
8
+ from utils.utils import count_params
9
+
10
+
11
+ class AbstractEncoder(nn.Module):
12
+ def __init__(self):
13
+ super().__init__()
14
+
15
+ def encode(self, *args, **kwargs):
16
+ raise NotImplementedError
17
+
18
+
19
+ class IdentityEncoder(AbstractEncoder):
20
+ def encode(self, x):
21
+ return x
22
+
23
+
24
+ class ClassEmbedder(nn.Module):
25
+ def __init__(self, embed_dim, n_classes=1000, key='class', ucg_rate=0.1):
26
+ super().__init__()
27
+ self.key = key
28
+ self.embedding = nn.Embedding(n_classes, embed_dim)
29
+ self.n_classes = n_classes
30
+ self.ucg_rate = ucg_rate
31
+
32
+ def forward(self, batch, key=None, disable_dropout=False):
33
+ if key is None:
34
+ key = self.key
35
+ # this is for use in crossattn
36
+ c = batch[key][:, None]
37
+ if self.ucg_rate > 0. and not disable_dropout:
38
+ mask = 1. - torch.bernoulli(torch.ones_like(c) * self.ucg_rate)
39
+ c = mask * c + (1 - mask) * torch.ones_like(c) * (self.n_classes - 1)
40
+ c = c.long()
41
+ c = self.embedding(c)
42
+ return c
43
+
44
+ def get_unconditional_conditioning(self, bs, device="cuda"):
45
+ uc_class = self.n_classes - 1 # 1000 classes --> 0 ... 999, one extra class for ucg (class 1000)
46
+ uc = torch.ones((bs,), device=device) * uc_class
47
+ uc = {self.key: uc}
48
+ return uc
49
+
50
+
51
+ def disabled_train(self, mode=True):
52
+ """Overwrite model.train with this function to make sure train/eval mode
53
+ does not change anymore."""
54
+ return self
55
+
56
+
57
+ class FrozenT5Embedder(AbstractEncoder):
58
+ """Uses the T5 transformer encoder for text"""
59
+
60
+ def __init__(self, version="google/t5-v1_1-large", device="cuda", max_length=77,
61
+ freeze=True): # others are google/t5-v1_1-xl and google/t5-v1_1-xxl
62
+ super().__init__()
63
+ self.tokenizer = T5Tokenizer.from_pretrained(version)
64
+ self.transformer = T5EncoderModel.from_pretrained(version)
65
+ self.device = device
66
+ self.max_length = max_length # TODO: typical value?
67
+ if freeze:
68
+ self.freeze()
69
+
70
+ def freeze(self):
71
+ self.transformer = self.transformer.eval()
72
+ # self.train = disabled_train
73
+ for param in self.parameters():
74
+ param.requires_grad = False
75
+
76
+ def forward(self, text):
77
+ batch_encoding = self.tokenizer(text, truncation=True, max_length=self.max_length, return_length=True,
78
+ return_overflowing_tokens=False, padding="max_length", return_tensors="pt")
79
+ tokens = batch_encoding["input_ids"].to(self.device)
80
+ outputs = self.transformer(input_ids=tokens)
81
+
82
+ z = outputs.last_hidden_state
83
+ return z
84
+
85
+ def encode(self, text):
86
+ return self(text)
87
+
88
+
89
+ class FrozenCLIPEmbedder(AbstractEncoder):
90
+ """Uses the CLIP transformer encoder for text (from huggingface)"""
91
+ LAYERS = [
92
+ "last",
93
+ "pooled",
94
+ "hidden"
95
+ ]
96
+
97
+ def __init__(self, version="openai/clip-vit-large-patch14", device="cuda", max_length=77,
98
+ freeze=True, layer="last", layer_idx=None): # clip-vit-base-patch32
99
+ super().__init__()
100
+ assert layer in self.LAYERS
101
+ self.tokenizer = CLIPTokenizer.from_pretrained(version)
102
+ self.transformer = CLIPTextModel.from_pretrained(version)
103
+ self.device = device
104
+ self.max_length = max_length
105
+ if freeze:
106
+ self.freeze()
107
+ self.layer = layer
108
+ self.layer_idx = layer_idx
109
+ if layer == "hidden":
110
+ assert layer_idx is not None
111
+ assert 0 <= abs(layer_idx) <= 12
112
+
113
+ def freeze(self):
114
+ self.transformer = self.transformer.eval()
115
+ # self.train = disabled_train
116
+ for param in self.parameters():
117
+ param.requires_grad = False
118
+
119
+ def forward(self, text):
120
+ batch_encoding = self.tokenizer(text, truncation=True, max_length=self.max_length, return_length=True,
121
+ return_overflowing_tokens=False, padding="max_length", return_tensors="pt")
122
+ tokens = batch_encoding["input_ids"].to(self.device)
123
+ outputs = self.transformer(input_ids=tokens, output_hidden_states=self.layer == "hidden")
124
+ if self.layer == "last":
125
+ z = outputs.last_hidden_state
126
+ elif self.layer == "pooled":
127
+ z = outputs.pooler_output[:, None, :]
128
+ else:
129
+ z = outputs.hidden_states[self.layer_idx]
130
+ return z
131
+
132
+ def encode(self, text):
133
+ return self(text)
134
+
135
+
136
+ class ClipImageEmbedder(nn.Module):
137
+ def __init__(
138
+ self,
139
+ model,
140
+ jit=False,
141
+ device='cuda' if torch.cuda.is_available() else 'cpu',
142
+ antialias=True,
143
+ ucg_rate=0.
144
+ ):
145
+ super().__init__()
146
+ from clip import load as load_clip
147
+ self.model, _ = load_clip(name=model, device=device, jit=jit)
148
+
149
+ self.antialias = antialias
150
+
151
+ self.register_buffer('mean', torch.Tensor([0.48145466, 0.4578275, 0.40821073]), persistent=False)
152
+ self.register_buffer('std', torch.Tensor([0.26862954, 0.26130258, 0.27577711]), persistent=False)
153
+ self.ucg_rate = ucg_rate
154
+
155
+ def preprocess(self, x):
156
+ # normalize to [0,1]
157
+ x = kornia.geometry.resize(x, (224, 224),
158
+ interpolation='bicubic', align_corners=True,
159
+ antialias=self.antialias)
160
+ x = (x + 1.) / 2.
161
+ # re-normalize according to clip
162
+ x = kornia.enhance.normalize(x, self.mean, self.std)
163
+ return x
164
+
165
+ def forward(self, x, no_dropout=False):
166
+ # x is assumed to be in range [-1,1]
167
+ out = self.model.encode_image(self.preprocess(x))
168
+ out = out.to(x.dtype)
169
+ if self.ucg_rate > 0. and not no_dropout:
170
+ out = torch.bernoulli((1. - self.ucg_rate) * torch.ones(out.shape[0], device=out.device))[:, None] * out
171
+ return out
172
+
173
+
174
+ class FrozenOpenCLIPEmbedder(AbstractEncoder):
175
+ """
176
+ Uses the OpenCLIP transformer encoder for text
177
+ """
178
+ LAYERS = [
179
+ # "pooled",
180
+ "last",
181
+ "penultimate"
182
+ ]
183
+
184
+ def __init__(self, arch="ViT-H-14", version="laion2b_s32b_b79k", device="cuda", max_length=77,
185
+ freeze=True, layer="last"):
186
+ super().__init__()
187
+ assert layer in self.LAYERS
188
+ model, _, _ = open_clip.create_model_and_transforms(arch, device=torch.device('cpu'), pretrained=version)
189
+ del model.visual
190
+ self.model = model
191
+
192
+ self.device = device
193
+ self.max_length = max_length
194
+ if freeze:
195
+ self.freeze()
196
+ self.layer = layer
197
+ if self.layer == "last":
198
+ self.layer_idx = 0
199
+ elif self.layer == "penultimate":
200
+ self.layer_idx = 1
201
+ else:
202
+ raise NotImplementedError()
203
+
204
+ def freeze(self):
205
+ self.model = self.model.eval()
206
+ for param in self.parameters():
207
+ param.requires_grad = False
208
+
209
+ def forward(self, text):
210
+ tokens = open_clip.tokenize(text) ## all clip models use 77 as context length
211
+ z = self.encode_with_transformer(tokens.to(self.device))
212
+ return z
213
+
214
+ def encode_with_transformer(self, text):
215
+ x = self.model.token_embedding(text) # [batch_size, n_ctx, d_model]
216
+ x = x + self.model.positional_embedding
217
+ x = x.permute(1, 0, 2) # NLD -> LND
218
+ x = self.text_transformer_forward(x, attn_mask=self.model.attn_mask)
219
+ x = x.permute(1, 0, 2) # LND -> NLD
220
+ x = self.model.ln_final(x)
221
+ return x
222
+
223
+ def text_transformer_forward(self, x: torch.Tensor, attn_mask=None):
224
+ for i, r in enumerate(self.model.transformer.resblocks):
225
+ if i == len(self.model.transformer.resblocks) - self.layer_idx:
226
+ break
227
+ if self.model.transformer.grad_checkpointing and not torch.jit.is_scripting():
228
+ x = checkpoint(r, x, attn_mask)
229
+ else:
230
+ x = r(x, attn_mask=attn_mask)
231
+ return x
232
+
233
+ def encode(self, text):
234
+ return self(text)
235
+
236
+
237
+ class FrozenOpenCLIPImageEmbedder(AbstractEncoder):
238
+ """
239
+ Uses the OpenCLIP vision transformer encoder for images
240
+ """
241
+
242
+ def __init__(self, arch="ViT-H-14", version="laion2b_s32b_b79k", device="cuda", max_length=77,
243
+ freeze=True, layer="pooled", antialias=True, ucg_rate=0.):
244
+ super().__init__()
245
+ model, _, _ = open_clip.create_model_and_transforms(arch, device=torch.device('cpu'),
246
+ pretrained=version, )
247
+ del model.transformer
248
+ self.model = model
249
+ # self.mapper = torch.nn.Linear(1280, 1024)
250
+ self.device = device
251
+ self.max_length = max_length
252
+ if freeze:
253
+ self.freeze()
254
+ self.layer = layer
255
+ if self.layer == "penultimate":
256
+ raise NotImplementedError()
257
+ self.layer_idx = 1
258
+
259
+ self.antialias = antialias
260
+
261
+ self.register_buffer('mean', torch.Tensor([0.48145466, 0.4578275, 0.40821073]), persistent=False)
262
+ self.register_buffer('std', torch.Tensor([0.26862954, 0.26130258, 0.27577711]), persistent=False)
263
+ self.ucg_rate = ucg_rate
264
+
265
+ def preprocess(self, x):
266
+ # normalize to [0,1]
267
+ x = kornia.geometry.resize(x, (224, 224),
268
+ interpolation='bicubic', align_corners=True,
269
+ antialias=self.antialias)
270
+ x = (x + 1.) / 2.
271
+ # renormalize according to clip
272
+ x = kornia.enhance.normalize(x, self.mean, self.std)
273
+ return x
274
+
275
+ def freeze(self):
276
+ self.model = self.model.eval()
277
+ for param in self.model.parameters():
278
+ param.requires_grad = False
279
+
280
+ @autocast
281
+ def forward(self, image, no_dropout=False):
282
+ z = self.encode_with_vision_transformer(image)
283
+ if self.ucg_rate > 0. and not no_dropout:
284
+ z = torch.bernoulli((1. - self.ucg_rate) * torch.ones(z.shape[0], device=z.device))[:, None] * z
285
+ return z
286
+
287
+ def encode_with_vision_transformer(self, img):
288
+ img = self.preprocess(img)
289
+ x = self.model.visual(img)
290
+ return x
291
+
292
+ def encode(self, text):
293
+ return self(text)
294
+
295
+ class FrozenOpenCLIPImageEmbedderV2(AbstractEncoder):
296
+ """
297
+ Uses the OpenCLIP vision transformer encoder for images
298
+ """
299
+
300
+ def __init__(self, arch="ViT-H-14", version="laion2b_s32b_b79k", device="cuda",
301
+ freeze=True, layer="pooled", antialias=True):
302
+ super().__init__()
303
+ model, _, _ = open_clip.create_model_and_transforms(arch, device=torch.device('cpu'),
304
+ pretrained=version, )
305
+ del model.transformer
306
+ self.model = model
307
+ self.device = device
308
+
309
+ if freeze:
310
+ self.freeze()
311
+ self.layer = layer
312
+ if self.layer == "penultimate":
313
+ raise NotImplementedError()
314
+ self.layer_idx = 1
315
+
316
+ self.antialias = antialias
317
+
318
+ self.register_buffer('mean', torch.Tensor([0.48145466, 0.4578275, 0.40821073]), persistent=False)
319
+ self.register_buffer('std', torch.Tensor([0.26862954, 0.26130258, 0.27577711]), persistent=False)
320
+
321
+
322
+ def preprocess(self, x):
323
+ # normalize to [0,1]
324
+ x = kornia.geometry.resize(x, (224, 224),
325
+ interpolation='bicubic', align_corners=True,
326
+ antialias=self.antialias)
327
+ x = (x + 1.) / 2.
328
+ # renormalize according to clip
329
+ x = kornia.enhance.normalize(x, self.mean, self.std)
330
+ return x
331
+
332
+ def freeze(self):
333
+ self.model = self.model.eval()
334
+ for param in self.model.parameters():
335
+ param.requires_grad = False
336
+
337
+ def forward(self, image, no_dropout=False):
338
+ ## image: b c h w
339
+ z = self.encode_with_vision_transformer(image)
340
+ return z
341
+
342
+ def encode_with_vision_transformer(self, x):
343
+ x = self.preprocess(x)
344
+
345
+ # to patches - whether to use dual patchnorm - https://arxiv.org/abs/2302.01327v1
346
+ if self.model.visual.input_patchnorm:
347
+ # einops - rearrange(x, 'b c (h p1) (w p2) -> b (h w) (c p1 p2)')
348
+ x = x.reshape(x.shape[0], x.shape[1], self.model.visual.grid_size[0], self.model.visual.patch_size[0], self.model.visual.grid_size[1], self.model.visual.patch_size[1])
349
+ x = x.permute(0, 2, 4, 1, 3, 5)
350
+ x = x.reshape(x.shape[0], self.model.visual.grid_size[0] * self.model.visual.grid_size[1], -1)
351
+ x = self.model.visual.patchnorm_pre_ln(x)
352
+ x = self.model.visual.conv1(x)
353
+ else:
354
+ x = self.model.visual.conv1(x) # shape = [*, width, grid, grid]
355
+ x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2]
356
+ x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]
357
+
358
+ # class embeddings and positional embeddings
359
+ x = torch.cat(
360
+ [self.model.visual.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device),
361
+ x], dim=1) # shape = [*, grid ** 2 + 1, width]
362
+ x = x + self.model.visual.positional_embedding.to(x.dtype)
363
+
364
+ # a patch_dropout of 0. would mean it is disabled and this function would do nothing but return what was passed in
365
+ x = self.model.visual.patch_dropout(x)
366
+ x = self.model.visual.ln_pre(x)
367
+
368
+ x = x.permute(1, 0, 2) # NLD -> LND
369
+ x = self.model.visual.transformer(x)
370
+ x = x.permute(1, 0, 2) # LND -> NLD
371
+
372
+ return x
373
+
374
+ class FrozenCLIPT5Encoder(AbstractEncoder):
375
+ def __init__(self, clip_version="openai/clip-vit-large-patch14", t5_version="google/t5-v1_1-xl", device="cuda",
376
+ clip_max_length=77, t5_max_length=77):
377
+ super().__init__()
378
+ self.clip_encoder = FrozenCLIPEmbedder(clip_version, device, max_length=clip_max_length)
379
+ self.t5_encoder = FrozenT5Embedder(t5_version, device, max_length=t5_max_length)
380
+ print(f"{self.clip_encoder.__class__.__name__} has {count_params(self.clip_encoder) * 1.e-6:.2f} M parameters, "
381
+ f"{self.t5_encoder.__class__.__name__} comes with {count_params(self.t5_encoder) * 1.e-6:.2f} M params.")
382
+
383
+ def encode(self, text):
384
+ return self(text)
385
+
386
+ def forward(self, text):
387
+ clip_z = self.clip_encoder.encode(text)
388
+ t5_z = self.t5_encoder.encode(text)
389
+ return [clip_z, t5_z]
lvdm/modules/encoders/resampler.py ADDED
@@ -0,0 +1,145 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # modified from https://github.com/mlfoundations/open_flamingo/blob/main/open_flamingo/src/helpers.py
2
+ # and https://github.com/lucidrains/imagen-pytorch/blob/main/imagen_pytorch/imagen_pytorch.py
3
+ # and https://github.com/tencent-ailab/IP-Adapter/blob/main/ip_adapter/resampler.py
4
+ import math
5
+ import torch
6
+ import torch.nn as nn
7
+
8
+
9
+ class ImageProjModel(nn.Module):
10
+ """Projection Model"""
11
+ def __init__(self, cross_attention_dim=1024, clip_embeddings_dim=1024, clip_extra_context_tokens=4):
12
+ super().__init__()
13
+ self.cross_attention_dim = cross_attention_dim
14
+ self.clip_extra_context_tokens = clip_extra_context_tokens
15
+ self.proj = nn.Linear(clip_embeddings_dim, self.clip_extra_context_tokens * cross_attention_dim)
16
+ self.norm = nn.LayerNorm(cross_attention_dim)
17
+
18
+ def forward(self, image_embeds):
19
+ #embeds = image_embeds
20
+ embeds = image_embeds.type(list(self.proj.parameters())[0].dtype)
21
+ clip_extra_context_tokens = self.proj(embeds).reshape(-1, self.clip_extra_context_tokens, self.cross_attention_dim)
22
+ clip_extra_context_tokens = self.norm(clip_extra_context_tokens)
23
+ return clip_extra_context_tokens
24
+
25
+
26
+ # FFN
27
+ def FeedForward(dim, mult=4):
28
+ inner_dim = int(dim * mult)
29
+ return nn.Sequential(
30
+ nn.LayerNorm(dim),
31
+ nn.Linear(dim, inner_dim, bias=False),
32
+ nn.GELU(),
33
+ nn.Linear(inner_dim, dim, bias=False),
34
+ )
35
+
36
+
37
+ def reshape_tensor(x, heads):
38
+ bs, length, width = x.shape
39
+ #(bs, length, width) --> (bs, length, n_heads, dim_per_head)
40
+ x = x.view(bs, length, heads, -1)
41
+ # (bs, length, n_heads, dim_per_head) --> (bs, n_heads, length, dim_per_head)
42
+ x = x.transpose(1, 2)
43
+ # (bs, n_heads, length, dim_per_head) --> (bs*n_heads, length, dim_per_head)
44
+ x = x.reshape(bs, heads, length, -1)
45
+ return x
46
+
47
+
48
+ class PerceiverAttention(nn.Module):
49
+ def __init__(self, *, dim, dim_head=64, heads=8):
50
+ super().__init__()
51
+ self.scale = dim_head**-0.5
52
+ self.dim_head = dim_head
53
+ self.heads = heads
54
+ inner_dim = dim_head * heads
55
+
56
+ self.norm1 = nn.LayerNorm(dim)
57
+ self.norm2 = nn.LayerNorm(dim)
58
+
59
+ self.to_q = nn.Linear(dim, inner_dim, bias=False)
60
+ self.to_kv = nn.Linear(dim, inner_dim * 2, bias=False)
61
+ self.to_out = nn.Linear(inner_dim, dim, bias=False)
62
+
63
+
64
+ def forward(self, x, latents):
65
+ """
66
+ Args:
67
+ x (torch.Tensor): image features
68
+ shape (b, n1, D)
69
+ latent (torch.Tensor): latent features
70
+ shape (b, n2, D)
71
+ """
72
+ x = self.norm1(x)
73
+ latents = self.norm2(latents)
74
+
75
+ b, l, _ = latents.shape
76
+
77
+ q = self.to_q(latents)
78
+ kv_input = torch.cat((x, latents), dim=-2)
79
+ k, v = self.to_kv(kv_input).chunk(2, dim=-1)
80
+
81
+ q = reshape_tensor(q, self.heads)
82
+ k = reshape_tensor(k, self.heads)
83
+ v = reshape_tensor(v, self.heads)
84
+
85
+ # attention
86
+ scale = 1 / math.sqrt(math.sqrt(self.dim_head))
87
+ weight = (q * scale) @ (k * scale).transpose(-2, -1) # More stable with f16 than dividing afterwards
88
+ weight = torch.softmax(weight.float(), dim=-1).type(weight.dtype)
89
+ out = weight @ v
90
+
91
+ out = out.permute(0, 2, 1, 3).reshape(b, l, -1)
92
+
93
+ return self.to_out(out)
94
+
95
+
96
+ class Resampler(nn.Module):
97
+ def __init__(
98
+ self,
99
+ dim=1024,
100
+ depth=8,
101
+ dim_head=64,
102
+ heads=16,
103
+ num_queries=8,
104
+ embedding_dim=768,
105
+ output_dim=1024,
106
+ ff_mult=4,
107
+ video_length=None, # using frame-wise version or not
108
+ ):
109
+ super().__init__()
110
+ ## queries for a single frame / image
111
+ self.num_queries = num_queries
112
+ self.video_length = video_length
113
+
114
+ ## <num_queries> queries for each frame
115
+ if video_length is not None:
116
+ num_queries = num_queries * video_length
117
+
118
+ self.latents = nn.Parameter(torch.randn(1, num_queries, dim) / dim**0.5)
119
+ self.proj_in = nn.Linear(embedding_dim, dim)
120
+ self.proj_out = nn.Linear(dim, output_dim)
121
+ self.norm_out = nn.LayerNorm(output_dim)
122
+
123
+ self.layers = nn.ModuleList([])
124
+ for _ in range(depth):
125
+ self.layers.append(
126
+ nn.ModuleList(
127
+ [
128
+ PerceiverAttention(dim=dim, dim_head=dim_head, heads=heads),
129
+ FeedForward(dim=dim, mult=ff_mult),
130
+ ]
131
+ )
132
+ )
133
+
134
+ def forward(self, x):
135
+ latents = self.latents.repeat(x.size(0), 1, 1) ## B (T L) C
136
+ x = self.proj_in(x)
137
+
138
+ for attn, ff in self.layers:
139
+ latents = attn(x, latents) + latents
140
+ latents = ff(latents) + latents
141
+
142
+ latents = self.proj_out(latents)
143
+ latents = self.norm_out(latents) # B L C or B (T L) C
144
+
145
+ return latents
lvdm/modules/networks/__pycache__/ae_modules.cpython-39.pyc ADDED
Binary file (20.6 kB). View file
 
lvdm/modules/networks/__pycache__/openaimodel3d.cpython-39.pyc ADDED
Binary file (15.2 kB). View file
 
lvdm/modules/networks/ae_modules.py ADDED
@@ -0,0 +1,844 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # pytorch_diffusion + derived encoder decoder
2
+ import math
3
+ import torch
4
+ import numpy as np
5
+ import torch.nn as nn
6
+ from einops import rearrange
7
+ from utils.utils import instantiate_from_config
8
+ from lvdm.modules.attention import LinearAttention
9
+
10
+ def nonlinearity(x):
11
+ # swish
12
+ return x*torch.sigmoid(x)
13
+
14
+
15
+ def Normalize(in_channels, num_groups=32):
16
+ return torch.nn.GroupNorm(num_groups=num_groups, num_channels=in_channels, eps=1e-6, affine=True)
17
+
18
+
19
+
20
+ class LinAttnBlock(LinearAttention):
21
+ """to match AttnBlock usage"""
22
+ def __init__(self, in_channels):
23
+ super().__init__(dim=in_channels, heads=1, dim_head=in_channels)
24
+
25
+
26
+ class AttnBlock(nn.Module):
27
+ def __init__(self, in_channels):
28
+ super().__init__()
29
+ self.in_channels = in_channels
30
+
31
+ self.norm = Normalize(in_channels)
32
+ self.q = torch.nn.Conv2d(in_channels,
33
+ in_channels,
34
+ kernel_size=1,
35
+ stride=1,
36
+ padding=0)
37
+ self.k = torch.nn.Conv2d(in_channels,
38
+ in_channels,
39
+ kernel_size=1,
40
+ stride=1,
41
+ padding=0)
42
+ self.v = torch.nn.Conv2d(in_channels,
43
+ in_channels,
44
+ kernel_size=1,
45
+ stride=1,
46
+ padding=0)
47
+ self.proj_out = torch.nn.Conv2d(in_channels,
48
+ in_channels,
49
+ kernel_size=1,
50
+ stride=1,
51
+ padding=0)
52
+
53
+ def forward(self, x):
54
+ h_ = x
55
+ h_ = self.norm(h_)
56
+ q = self.q(h_)
57
+ k = self.k(h_)
58
+ v = self.v(h_)
59
+
60
+ # compute attention
61
+ b,c,h,w = q.shape
62
+ q = q.reshape(b,c,h*w) # bcl
63
+ q = q.permute(0,2,1) # bcl -> blc l=hw
64
+ k = k.reshape(b,c,h*w) # bcl
65
+
66
+ w_ = torch.bmm(q,k) # b,hw,hw w[b,i,j]=sum_c q[b,i,c]k[b,c,j]
67
+ w_ = w_ * (int(c)**(-0.5))
68
+ w_ = torch.nn.functional.softmax(w_, dim=2)
69
+
70
+ # attend to values
71
+ v = v.reshape(b,c,h*w)
72
+ w_ = w_.permute(0,2,1) # b,hw,hw (first hw of k, second of q)
73
+ h_ = torch.bmm(v,w_) # b, c,hw (hw of q) h_[b,c,j] = sum_i v[b,c,i] w_[b,i,j]
74
+ h_ = h_.reshape(b,c,h,w)
75
+
76
+ h_ = self.proj_out(h_)
77
+
78
+ return x+h_
79
+
80
+ def make_attn(in_channels, attn_type="vanilla"):
81
+ assert attn_type in ["vanilla", "linear", "none"], f'attn_type {attn_type} unknown'
82
+ #print(f"making attention of type '{attn_type}' with {in_channels} in_channels")
83
+ if attn_type == "vanilla":
84
+ return AttnBlock(in_channels)
85
+ elif attn_type == "none":
86
+ return nn.Identity(in_channels)
87
+ else:
88
+ return LinAttnBlock(in_channels)
89
+
90
+ class Downsample(nn.Module):
91
+ def __init__(self, in_channels, with_conv):
92
+ super().__init__()
93
+ self.with_conv = with_conv
94
+ self.in_channels = in_channels
95
+ if self.with_conv:
96
+ # no asymmetric padding in torch conv, must do it ourselves
97
+ self.conv = torch.nn.Conv2d(in_channels,
98
+ in_channels,
99
+ kernel_size=3,
100
+ stride=2,
101
+ padding=0)
102
+ def forward(self, x):
103
+ if self.with_conv:
104
+ pad = (0,1,0,1)
105
+ x = torch.nn.functional.pad(x, pad, mode="constant", value=0)
106
+ x = self.conv(x)
107
+ else:
108
+ x = torch.nn.functional.avg_pool2d(x, kernel_size=2, stride=2)
109
+ return x
110
+
111
+ class Upsample(nn.Module):
112
+ def __init__(self, in_channels, with_conv):
113
+ super().__init__()
114
+ self.with_conv = with_conv
115
+ self.in_channels = in_channels
116
+ if self.with_conv:
117
+ self.conv = torch.nn.Conv2d(in_channels,
118
+ in_channels,
119
+ kernel_size=3,
120
+ stride=1,
121
+ padding=1)
122
+
123
+ def forward(self, x):
124
+ x = torch.nn.functional.interpolate(x, scale_factor=2.0, mode="nearest")
125
+ if self.with_conv:
126
+ x = self.conv(x)
127
+ return x
128
+
129
+ def get_timestep_embedding(timesteps, embedding_dim):
130
+ """
131
+ This matches the implementation in Denoising Diffusion Probabilistic Models:
132
+ From Fairseq.
133
+ Build sinusoidal embeddings.
134
+ This matches the implementation in tensor2tensor, but differs slightly
135
+ from the description in Section 3.5 of "Attention Is All You Need".
136
+ """
137
+ assert len(timesteps.shape) == 1
138
+
139
+ half_dim = embedding_dim // 2
140
+ emb = math.log(10000) / (half_dim - 1)
141
+ emb = torch.exp(torch.arange(half_dim, dtype=torch.float32) * -emb)
142
+ emb = emb.to(device=timesteps.device)
143
+ emb = timesteps.float()[:, None] * emb[None, :]
144
+ emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1)
145
+ if embedding_dim % 2 == 1: # zero pad
146
+ emb = torch.nn.functional.pad(emb, (0,1,0,0))
147
+ return emb
148
+
149
+
150
+
151
+ class ResnetBlock(nn.Module):
152
+ def __init__(self, *, in_channels, out_channels=None, conv_shortcut=False,
153
+ dropout, temb_channels=512):
154
+ super().__init__()
155
+ self.in_channels = in_channels
156
+ out_channels = in_channels if out_channels is None else out_channels
157
+ self.out_channels = out_channels
158
+ self.use_conv_shortcut = conv_shortcut
159
+
160
+ self.norm1 = Normalize(in_channels)
161
+ self.conv1 = torch.nn.Conv2d(in_channels,
162
+ out_channels,
163
+ kernel_size=3,
164
+ stride=1,
165
+ padding=1)
166
+ if temb_channels > 0:
167
+ self.temb_proj = torch.nn.Linear(temb_channels,
168
+ out_channels)
169
+ self.norm2 = Normalize(out_channels)
170
+ self.dropout = torch.nn.Dropout(dropout)
171
+ self.conv2 = torch.nn.Conv2d(out_channels,
172
+ out_channels,
173
+ kernel_size=3,
174
+ stride=1,
175
+ padding=1)
176
+ if self.in_channels != self.out_channels:
177
+ if self.use_conv_shortcut:
178
+ self.conv_shortcut = torch.nn.Conv2d(in_channels,
179
+ out_channels,
180
+ kernel_size=3,
181
+ stride=1,
182
+ padding=1)
183
+ else:
184
+ self.nin_shortcut = torch.nn.Conv2d(in_channels,
185
+ out_channels,
186
+ kernel_size=1,
187
+ stride=1,
188
+ padding=0)
189
+
190
+ def forward(self, x, temb):
191
+ h = x
192
+ h = self.norm1(h)
193
+ h = nonlinearity(h)
194
+ h = self.conv1(h)
195
+
196
+ if temb is not None:
197
+ h = h + self.temb_proj(nonlinearity(temb))[:,:,None,None]
198
+
199
+ h = self.norm2(h)
200
+ h = nonlinearity(h)
201
+ h = self.dropout(h)
202
+ h = self.conv2(h)
203
+
204
+ if self.in_channels != self.out_channels:
205
+ if self.use_conv_shortcut:
206
+ x = self.conv_shortcut(x)
207
+ else:
208
+ x = self.nin_shortcut(x)
209
+
210
+ return x+h
211
+
212
+ class Model(nn.Module):
213
+ def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks,
214
+ attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels,
215
+ resolution, use_timestep=True, use_linear_attn=False, attn_type="vanilla"):
216
+ super().__init__()
217
+ if use_linear_attn: attn_type = "linear"
218
+ self.ch = ch
219
+ self.temb_ch = self.ch*4
220
+ self.num_resolutions = len(ch_mult)
221
+ self.num_res_blocks = num_res_blocks
222
+ self.resolution = resolution
223
+ self.in_channels = in_channels
224
+
225
+ self.use_timestep = use_timestep
226
+ if self.use_timestep:
227
+ # timestep embedding
228
+ self.temb = nn.Module()
229
+ self.temb.dense = nn.ModuleList([
230
+ torch.nn.Linear(self.ch,
231
+ self.temb_ch),
232
+ torch.nn.Linear(self.temb_ch,
233
+ self.temb_ch),
234
+ ])
235
+
236
+ # downsampling
237
+ self.conv_in = torch.nn.Conv2d(in_channels,
238
+ self.ch,
239
+ kernel_size=3,
240
+ stride=1,
241
+ padding=1)
242
+
243
+ curr_res = resolution
244
+ in_ch_mult = (1,)+tuple(ch_mult)
245
+ self.down = nn.ModuleList()
246
+ for i_level in range(self.num_resolutions):
247
+ block = nn.ModuleList()
248
+ attn = nn.ModuleList()
249
+ block_in = ch*in_ch_mult[i_level]
250
+ block_out = ch*ch_mult[i_level]
251
+ for i_block in range(self.num_res_blocks):
252
+ block.append(ResnetBlock(in_channels=block_in,
253
+ out_channels=block_out,
254
+ temb_channels=self.temb_ch,
255
+ dropout=dropout))
256
+ block_in = block_out
257
+ if curr_res in attn_resolutions:
258
+ attn.append(make_attn(block_in, attn_type=attn_type))
259
+ down = nn.Module()
260
+ down.block = block
261
+ down.attn = attn
262
+ if i_level != self.num_resolutions-1:
263
+ down.downsample = Downsample(block_in, resamp_with_conv)
264
+ curr_res = curr_res // 2
265
+ self.down.append(down)
266
+
267
+ # middle
268
+ self.mid = nn.Module()
269
+ self.mid.block_1 = ResnetBlock(in_channels=block_in,
270
+ out_channels=block_in,
271
+ temb_channels=self.temb_ch,
272
+ dropout=dropout)
273
+ self.mid.attn_1 = make_attn(block_in, attn_type=attn_type)
274
+ self.mid.block_2 = ResnetBlock(in_channels=block_in,
275
+ out_channels=block_in,
276
+ temb_channels=self.temb_ch,
277
+ dropout=dropout)
278
+
279
+ # upsampling
280
+ self.up = nn.ModuleList()
281
+ for i_level in reversed(range(self.num_resolutions)):
282
+ block = nn.ModuleList()
283
+ attn = nn.ModuleList()
284
+ block_out = ch*ch_mult[i_level]
285
+ skip_in = ch*ch_mult[i_level]
286
+ for i_block in range(self.num_res_blocks+1):
287
+ if i_block == self.num_res_blocks:
288
+ skip_in = ch*in_ch_mult[i_level]
289
+ block.append(ResnetBlock(in_channels=block_in+skip_in,
290
+ out_channels=block_out,
291
+ temb_channels=self.temb_ch,
292
+ dropout=dropout))
293
+ block_in = block_out
294
+ if curr_res in attn_resolutions:
295
+ attn.append(make_attn(block_in, attn_type=attn_type))
296
+ up = nn.Module()
297
+ up.block = block
298
+ up.attn = attn
299
+ if i_level != 0:
300
+ up.upsample = Upsample(block_in, resamp_with_conv)
301
+ curr_res = curr_res * 2
302
+ self.up.insert(0, up) # prepend to get consistent order
303
+
304
+ # end
305
+ self.norm_out = Normalize(block_in)
306
+ self.conv_out = torch.nn.Conv2d(block_in,
307
+ out_ch,
308
+ kernel_size=3,
309
+ stride=1,
310
+ padding=1)
311
+
312
+ def forward(self, x, t=None, context=None):
313
+ #assert x.shape[2] == x.shape[3] == self.resolution
314
+ if context is not None:
315
+ # assume aligned context, cat along channel axis
316
+ x = torch.cat((x, context), dim=1)
317
+ if self.use_timestep:
318
+ # timestep embedding
319
+ assert t is not None
320
+ temb = get_timestep_embedding(t, self.ch)
321
+ temb = self.temb.dense[0](temb)
322
+ temb = nonlinearity(temb)
323
+ temb = self.temb.dense[1](temb)
324
+ else:
325
+ temb = None
326
+
327
+ # downsampling
328
+ hs = [self.conv_in(x)]
329
+ for i_level in range(self.num_resolutions):
330
+ for i_block in range(self.num_res_blocks):
331
+ h = self.down[i_level].block[i_block](hs[-1], temb)
332
+ if len(self.down[i_level].attn) > 0:
333
+ h = self.down[i_level].attn[i_block](h)
334
+ hs.append(h)
335
+ if i_level != self.num_resolutions-1:
336
+ hs.append(self.down[i_level].downsample(hs[-1]))
337
+
338
+ # middle
339
+ h = hs[-1]
340
+ h = self.mid.block_1(h, temb)
341
+ h = self.mid.attn_1(h)
342
+ h = self.mid.block_2(h, temb)
343
+
344
+ # upsampling
345
+ for i_level in reversed(range(self.num_resolutions)):
346
+ for i_block in range(self.num_res_blocks+1):
347
+ h = self.up[i_level].block[i_block](
348
+ torch.cat([h, hs.pop()], dim=1), temb)
349
+ if len(self.up[i_level].attn) > 0:
350
+ h = self.up[i_level].attn[i_block](h)
351
+ if i_level != 0:
352
+ h = self.up[i_level].upsample(h)
353
+
354
+ # end
355
+ h = self.norm_out(h)
356
+ h = nonlinearity(h)
357
+ h = self.conv_out(h)
358
+ return h
359
+
360
+ def get_last_layer(self):
361
+ return self.conv_out.weight
362
+
363
+
364
+ class Encoder(nn.Module):
365
+ def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks,
366
+ attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels,
367
+ resolution, z_channels, double_z=True, use_linear_attn=False, attn_type="vanilla",
368
+ **ignore_kwargs):
369
+ super().__init__()
370
+ if use_linear_attn: attn_type = "linear"
371
+ self.ch = ch
372
+ self.temb_ch = 0
373
+ self.num_resolutions = len(ch_mult)
374
+ self.num_res_blocks = num_res_blocks
375
+ self.resolution = resolution
376
+ self.in_channels = in_channels
377
+
378
+ # downsampling
379
+ self.conv_in = torch.nn.Conv2d(in_channels,
380
+ self.ch,
381
+ kernel_size=3,
382
+ stride=1,
383
+ padding=1)
384
+
385
+ curr_res = resolution
386
+ in_ch_mult = (1,)+tuple(ch_mult)
387
+ self.in_ch_mult = in_ch_mult
388
+ self.down = nn.ModuleList()
389
+ for i_level in range(self.num_resolutions):
390
+ block = nn.ModuleList()
391
+ attn = nn.ModuleList()
392
+ block_in = ch*in_ch_mult[i_level]
393
+ block_out = ch*ch_mult[i_level]
394
+ for i_block in range(self.num_res_blocks):
395
+ block.append(ResnetBlock(in_channels=block_in,
396
+ out_channels=block_out,
397
+ temb_channels=self.temb_ch,
398
+ dropout=dropout))
399
+ block_in = block_out
400
+ if curr_res in attn_resolutions:
401
+ attn.append(make_attn(block_in, attn_type=attn_type))
402
+ down = nn.Module()
403
+ down.block = block
404
+ down.attn = attn
405
+ if i_level != self.num_resolutions-1:
406
+ down.downsample = Downsample(block_in, resamp_with_conv)
407
+ curr_res = curr_res // 2
408
+ self.down.append(down)
409
+
410
+ # middle
411
+ self.mid = nn.Module()
412
+ self.mid.block_1 = ResnetBlock(in_channels=block_in,
413
+ out_channels=block_in,
414
+ temb_channels=self.temb_ch,
415
+ dropout=dropout)
416
+ self.mid.attn_1 = make_attn(block_in, attn_type=attn_type)
417
+ self.mid.block_2 = ResnetBlock(in_channels=block_in,
418
+ out_channels=block_in,
419
+ temb_channels=self.temb_ch,
420
+ dropout=dropout)
421
+
422
+ # end
423
+ self.norm_out = Normalize(block_in)
424
+ self.conv_out = torch.nn.Conv2d(block_in,
425
+ 2*z_channels if double_z else z_channels,
426
+ kernel_size=3,
427
+ stride=1,
428
+ padding=1)
429
+
430
+ def forward(self, x):
431
+ # timestep embedding
432
+ temb = None
433
+
434
+ # print(f'encoder-input={x.shape}')
435
+ # downsampling
436
+ hs = [self.conv_in(x)]
437
+ # print(f'encoder-conv in feat={hs[0].shape}')
438
+ for i_level in range(self.num_resolutions):
439
+ for i_block in range(self.num_res_blocks):
440
+ h = self.down[i_level].block[i_block](hs[-1], temb)
441
+ # print(f'encoder-down feat={h.shape}')
442
+ if len(self.down[i_level].attn) > 0:
443
+ h = self.down[i_level].attn[i_block](h)
444
+ hs.append(h)
445
+ if i_level != self.num_resolutions-1:
446
+ # print(f'encoder-downsample (input)={hs[-1].shape}')
447
+ hs.append(self.down[i_level].downsample(hs[-1]))
448
+ # print(f'encoder-downsample (output)={hs[-1].shape}')
449
+
450
+ # middle
451
+ h = hs[-1]
452
+ h = self.mid.block_1(h, temb)
453
+ # print(f'encoder-mid1 feat={h.shape}')
454
+ h = self.mid.attn_1(h)
455
+ h = self.mid.block_2(h, temb)
456
+ # print(f'encoder-mid2 feat={h.shape}')
457
+
458
+ # end
459
+ h = self.norm_out(h)
460
+ h = nonlinearity(h)
461
+ h = self.conv_out(h)
462
+ # print(f'end feat={h.shape}')
463
+ return h
464
+
465
+
466
+ class Decoder(nn.Module):
467
+ def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks,
468
+ attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels,
469
+ resolution, z_channels, give_pre_end=False, tanh_out=False, use_linear_attn=False,
470
+ attn_type="vanilla", **ignorekwargs):
471
+ super().__init__()
472
+ if use_linear_attn: attn_type = "linear"
473
+ self.ch = ch
474
+ self.temb_ch = 0
475
+ self.num_resolutions = len(ch_mult)
476
+ self.num_res_blocks = num_res_blocks
477
+ self.resolution = resolution
478
+ self.in_channels = in_channels
479
+ self.give_pre_end = give_pre_end
480
+ self.tanh_out = tanh_out
481
+
482
+ # compute in_ch_mult, block_in and curr_res at lowest res
483
+ in_ch_mult = (1,)+tuple(ch_mult)
484
+ block_in = ch*ch_mult[self.num_resolutions-1]
485
+ curr_res = resolution // 2**(self.num_resolutions-1)
486
+ self.z_shape = (1,z_channels,curr_res,curr_res)
487
+ print("AE working on z of shape {} = {} dimensions.".format(
488
+ self.z_shape, np.prod(self.z_shape)))
489
+
490
+ # z to block_in
491
+ self.conv_in = torch.nn.Conv2d(z_channels,
492
+ block_in,
493
+ kernel_size=3,
494
+ stride=1,
495
+ padding=1)
496
+
497
+ # middle
498
+ self.mid = nn.Module()
499
+ self.mid.block_1 = ResnetBlock(in_channels=block_in,
500
+ out_channels=block_in,
501
+ temb_channels=self.temb_ch,
502
+ dropout=dropout)
503
+ self.mid.attn_1 = make_attn(block_in, attn_type=attn_type)
504
+ self.mid.block_2 = ResnetBlock(in_channels=block_in,
505
+ out_channels=block_in,
506
+ temb_channels=self.temb_ch,
507
+ dropout=dropout)
508
+
509
+ # upsampling
510
+ self.up = nn.ModuleList()
511
+ for i_level in reversed(range(self.num_resolutions)):
512
+ block = nn.ModuleList()
513
+ attn = nn.ModuleList()
514
+ block_out = ch*ch_mult[i_level]
515
+ for i_block in range(self.num_res_blocks+1):
516
+ block.append(ResnetBlock(in_channels=block_in,
517
+ out_channels=block_out,
518
+ temb_channels=self.temb_ch,
519
+ dropout=dropout))
520
+ block_in = block_out
521
+ if curr_res in attn_resolutions:
522
+ attn.append(make_attn(block_in, attn_type=attn_type))
523
+ up = nn.Module()
524
+ up.block = block
525
+ up.attn = attn
526
+ if i_level != 0:
527
+ up.upsample = Upsample(block_in, resamp_with_conv)
528
+ curr_res = curr_res * 2
529
+ self.up.insert(0, up) # prepend to get consistent order
530
+
531
+ # end
532
+ self.norm_out = Normalize(block_in)
533
+ self.conv_out = torch.nn.Conv2d(block_in,
534
+ out_ch,
535
+ kernel_size=3,
536
+ stride=1,
537
+ padding=1)
538
+
539
+ def forward(self, z):
540
+ #assert z.shape[1:] == self.z_shape[1:]
541
+ self.last_z_shape = z.shape
542
+
543
+ # print(f'decoder-input={z.shape}')
544
+ # timestep embedding
545
+ temb = None
546
+
547
+ # z to block_in
548
+ h = self.conv_in(z)
549
+ # print(f'decoder-conv in feat={h.shape}')
550
+
551
+ # middle
552
+ h = self.mid.block_1(h, temb)
553
+ h = self.mid.attn_1(h)
554
+ h = self.mid.block_2(h, temb)
555
+ # print(f'decoder-mid feat={h.shape}')
556
+
557
+ # upsampling
558
+ for i_level in reversed(range(self.num_resolutions)):
559
+ for i_block in range(self.num_res_blocks+1):
560
+ h = self.up[i_level].block[i_block](h, temb)
561
+ if len(self.up[i_level].attn) > 0:
562
+ h = self.up[i_level].attn[i_block](h)
563
+ # print(f'decoder-up feat={h.shape}')
564
+ if i_level != 0:
565
+ h = self.up[i_level].upsample(h)
566
+ # print(f'decoder-upsample feat={h.shape}')
567
+
568
+ # end
569
+ if self.give_pre_end:
570
+ return h
571
+
572
+ h = self.norm_out(h)
573
+ h = nonlinearity(h)
574
+ h = self.conv_out(h)
575
+ # print(f'decoder-conv_out feat={h.shape}')
576
+ if self.tanh_out:
577
+ h = torch.tanh(h)
578
+ return h
579
+
580
+
581
+ class SimpleDecoder(nn.Module):
582
+ def __init__(self, in_channels, out_channels, *args, **kwargs):
583
+ super().__init__()
584
+ self.model = nn.ModuleList([nn.Conv2d(in_channels, in_channels, 1),
585
+ ResnetBlock(in_channels=in_channels,
586
+ out_channels=2 * in_channels,
587
+ temb_channels=0, dropout=0.0),
588
+ ResnetBlock(in_channels=2 * in_channels,
589
+ out_channels=4 * in_channels,
590
+ temb_channels=0, dropout=0.0),
591
+ ResnetBlock(in_channels=4 * in_channels,
592
+ out_channels=2 * in_channels,
593
+ temb_channels=0, dropout=0.0),
594
+ nn.Conv2d(2*in_channels, in_channels, 1),
595
+ Upsample(in_channels, with_conv=True)])
596
+ # end
597
+ self.norm_out = Normalize(in_channels)
598
+ self.conv_out = torch.nn.Conv2d(in_channels,
599
+ out_channels,
600
+ kernel_size=3,
601
+ stride=1,
602
+ padding=1)
603
+
604
+ def forward(self, x):
605
+ for i, layer in enumerate(self.model):
606
+ if i in [1,2,3]:
607
+ x = layer(x, None)
608
+ else:
609
+ x = layer(x)
610
+
611
+ h = self.norm_out(x)
612
+ h = nonlinearity(h)
613
+ x = self.conv_out(h)
614
+ return x
615
+
616
+
617
+ class UpsampleDecoder(nn.Module):
618
+ def __init__(self, in_channels, out_channels, ch, num_res_blocks, resolution,
619
+ ch_mult=(2,2), dropout=0.0):
620
+ super().__init__()
621
+ # upsampling
622
+ self.temb_ch = 0
623
+ self.num_resolutions = len(ch_mult)
624
+ self.num_res_blocks = num_res_blocks
625
+ block_in = in_channels
626
+ curr_res = resolution // 2 ** (self.num_resolutions - 1)
627
+ self.res_blocks = nn.ModuleList()
628
+ self.upsample_blocks = nn.ModuleList()
629
+ for i_level in range(self.num_resolutions):
630
+ res_block = []
631
+ block_out = ch * ch_mult[i_level]
632
+ for i_block in range(self.num_res_blocks + 1):
633
+ res_block.append(ResnetBlock(in_channels=block_in,
634
+ out_channels=block_out,
635
+ temb_channels=self.temb_ch,
636
+ dropout=dropout))
637
+ block_in = block_out
638
+ self.res_blocks.append(nn.ModuleList(res_block))
639
+ if i_level != self.num_resolutions - 1:
640
+ self.upsample_blocks.append(Upsample(block_in, True))
641
+ curr_res = curr_res * 2
642
+
643
+ # end
644
+ self.norm_out = Normalize(block_in)
645
+ self.conv_out = torch.nn.Conv2d(block_in,
646
+ out_channels,
647
+ kernel_size=3,
648
+ stride=1,
649
+ padding=1)
650
+
651
+ def forward(self, x):
652
+ # upsampling
653
+ h = x
654
+ for k, i_level in enumerate(range(self.num_resolutions)):
655
+ for i_block in range(self.num_res_blocks + 1):
656
+ h = self.res_blocks[i_level][i_block](h, None)
657
+ if i_level != self.num_resolutions - 1:
658
+ h = self.upsample_blocks[k](h)
659
+ h = self.norm_out(h)
660
+ h = nonlinearity(h)
661
+ h = self.conv_out(h)
662
+ return h
663
+
664
+
665
+ class LatentRescaler(nn.Module):
666
+ def __init__(self, factor, in_channels, mid_channels, out_channels, depth=2):
667
+ super().__init__()
668
+ # residual block, interpolate, residual block
669
+ self.factor = factor
670
+ self.conv_in = nn.Conv2d(in_channels,
671
+ mid_channels,
672
+ kernel_size=3,
673
+ stride=1,
674
+ padding=1)
675
+ self.res_block1 = nn.ModuleList([ResnetBlock(in_channels=mid_channels,
676
+ out_channels=mid_channels,
677
+ temb_channels=0,
678
+ dropout=0.0) for _ in range(depth)])
679
+ self.attn = AttnBlock(mid_channels)
680
+ self.res_block2 = nn.ModuleList([ResnetBlock(in_channels=mid_channels,
681
+ out_channels=mid_channels,
682
+ temb_channels=0,
683
+ dropout=0.0) for _ in range(depth)])
684
+
685
+ self.conv_out = nn.Conv2d(mid_channels,
686
+ out_channels,
687
+ kernel_size=1,
688
+ )
689
+
690
+ def forward(self, x):
691
+ x = self.conv_in(x)
692
+ for block in self.res_block1:
693
+ x = block(x, None)
694
+ x = torch.nn.functional.interpolate(x, size=(int(round(x.shape[2]*self.factor)), int(round(x.shape[3]*self.factor))))
695
+ x = self.attn(x)
696
+ for block in self.res_block2:
697
+ x = block(x, None)
698
+ x = self.conv_out(x)
699
+ return x
700
+
701
+
702
+ class MergedRescaleEncoder(nn.Module):
703
+ def __init__(self, in_channels, ch, resolution, out_ch, num_res_blocks,
704
+ attn_resolutions, dropout=0.0, resamp_with_conv=True,
705
+ ch_mult=(1,2,4,8), rescale_factor=1.0, rescale_module_depth=1):
706
+ super().__init__()
707
+ intermediate_chn = ch * ch_mult[-1]
708
+ self.encoder = Encoder(in_channels=in_channels, num_res_blocks=num_res_blocks, ch=ch, ch_mult=ch_mult,
709
+ z_channels=intermediate_chn, double_z=False, resolution=resolution,
710
+ attn_resolutions=attn_resolutions, dropout=dropout, resamp_with_conv=resamp_with_conv,
711
+ out_ch=None)
712
+ self.rescaler = LatentRescaler(factor=rescale_factor, in_channels=intermediate_chn,
713
+ mid_channels=intermediate_chn, out_channels=out_ch, depth=rescale_module_depth)
714
+
715
+ def forward(self, x):
716
+ x = self.encoder(x)
717
+ x = self.rescaler(x)
718
+ return x
719
+
720
+
721
+ class MergedRescaleDecoder(nn.Module):
722
+ def __init__(self, z_channels, out_ch, resolution, num_res_blocks, attn_resolutions, ch, ch_mult=(1,2,4,8),
723
+ dropout=0.0, resamp_with_conv=True, rescale_factor=1.0, rescale_module_depth=1):
724
+ super().__init__()
725
+ tmp_chn = z_channels*ch_mult[-1]
726
+ self.decoder = Decoder(out_ch=out_ch, z_channels=tmp_chn, attn_resolutions=attn_resolutions, dropout=dropout,
727
+ resamp_with_conv=resamp_with_conv, in_channels=None, num_res_blocks=num_res_blocks,
728
+ ch_mult=ch_mult, resolution=resolution, ch=ch)
729
+ self.rescaler = LatentRescaler(factor=rescale_factor, in_channels=z_channels, mid_channels=tmp_chn,
730
+ out_channels=tmp_chn, depth=rescale_module_depth)
731
+
732
+ def forward(self, x):
733
+ x = self.rescaler(x)
734
+ x = self.decoder(x)
735
+ return x
736
+
737
+
738
+ class Upsampler(nn.Module):
739
+ def __init__(self, in_size, out_size, in_channels, out_channels, ch_mult=2):
740
+ super().__init__()
741
+ assert out_size >= in_size
742
+ num_blocks = int(np.log2(out_size//in_size))+1
743
+ factor_up = 1.+ (out_size % in_size)
744
+ print(f"Building {self.__class__.__name__} with in_size: {in_size} --> out_size {out_size} and factor {factor_up}")
745
+ self.rescaler = LatentRescaler(factor=factor_up, in_channels=in_channels, mid_channels=2*in_channels,
746
+ out_channels=in_channels)
747
+ self.decoder = Decoder(out_ch=out_channels, resolution=out_size, z_channels=in_channels, num_res_blocks=2,
748
+ attn_resolutions=[], in_channels=None, ch=in_channels,
749
+ ch_mult=[ch_mult for _ in range(num_blocks)])
750
+
751
+ def forward(self, x):
752
+ x = self.rescaler(x)
753
+ x = self.decoder(x)
754
+ return x
755
+
756
+
757
+ class Resize(nn.Module):
758
+ def __init__(self, in_channels=None, learned=False, mode="bilinear"):
759
+ super().__init__()
760
+ self.with_conv = learned
761
+ self.mode = mode
762
+ if self.with_conv:
763
+ print(f"Note: {self.__class__.__name} uses learned downsampling and will ignore the fixed {mode} mode")
764
+ raise NotImplementedError()
765
+ assert in_channels is not None
766
+ # no asymmetric padding in torch conv, must do it ourselves
767
+ self.conv = torch.nn.Conv2d(in_channels,
768
+ in_channels,
769
+ kernel_size=4,
770
+ stride=2,
771
+ padding=1)
772
+
773
+ def forward(self, x, scale_factor=1.0):
774
+ if scale_factor==1.0:
775
+ return x
776
+ else:
777
+ x = torch.nn.functional.interpolate(x, mode=self.mode, align_corners=False, scale_factor=scale_factor)
778
+ return x
779
+
780
+ class FirstStagePostProcessor(nn.Module):
781
+
782
+ def __init__(self, ch_mult:list, in_channels,
783
+ pretrained_model:nn.Module=None,
784
+ reshape=False,
785
+ n_channels=None,
786
+ dropout=0.,
787
+ pretrained_config=None):
788
+ super().__init__()
789
+ if pretrained_config is None:
790
+ assert pretrained_model is not None, 'Either "pretrained_model" or "pretrained_config" must not be None'
791
+ self.pretrained_model = pretrained_model
792
+ else:
793
+ assert pretrained_config is not None, 'Either "pretrained_model" or "pretrained_config" must not be None'
794
+ self.instantiate_pretrained(pretrained_config)
795
+
796
+ self.do_reshape = reshape
797
+
798
+ if n_channels is None:
799
+ n_channels = self.pretrained_model.encoder.ch
800
+
801
+ self.proj_norm = Normalize(in_channels,num_groups=in_channels//2)
802
+ self.proj = nn.Conv2d(in_channels,n_channels,kernel_size=3,
803
+ stride=1,padding=1)
804
+
805
+ blocks = []
806
+ downs = []
807
+ ch_in = n_channels
808
+ for m in ch_mult:
809
+ blocks.append(ResnetBlock(in_channels=ch_in,out_channels=m*n_channels,dropout=dropout))
810
+ ch_in = m * n_channels
811
+ downs.append(Downsample(ch_in, with_conv=False))
812
+
813
+ self.model = nn.ModuleList(blocks)
814
+ self.downsampler = nn.ModuleList(downs)
815
+
816
+
817
+ def instantiate_pretrained(self, config):
818
+ model = instantiate_from_config(config)
819
+ self.pretrained_model = model.eval()
820
+ # self.pretrained_model.train = False
821
+ for param in self.pretrained_model.parameters():
822
+ param.requires_grad = False
823
+
824
+
825
+ @torch.no_grad()
826
+ def encode_with_pretrained(self,x):
827
+ c = self.pretrained_model.encode(x)
828
+ if isinstance(c, DiagonalGaussianDistribution):
829
+ c = c.mode()
830
+ return c
831
+
832
+ def forward(self,x):
833
+ z_fs = self.encode_with_pretrained(x)
834
+ z = self.proj_norm(z_fs)
835
+ z = self.proj(z)
836
+ z = nonlinearity(z)
837
+
838
+ for submodel, downmodel in zip(self.model,self.downsampler):
839
+ z = submodel(z,temb=None)
840
+ z = downmodel(z)
841
+
842
+ if self.do_reshape:
843
+ z = rearrange(z,'b c h w -> b (h w) c')
844
+ return z
lvdm/modules/networks/openaimodel3d.py ADDED
@@ -0,0 +1,603 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from functools import partial
2
+ from abc import abstractmethod
3
+ import torch
4
+ import torch.nn as nn
5
+ from einops import rearrange
6
+ import torch.nn.functional as F
7
+ from lvdm.models.utils_diffusion import timestep_embedding
8
+ from lvdm.common import checkpoint
9
+ from lvdm.basics import (
10
+ zero_module,
11
+ conv_nd,
12
+ linear,
13
+ avg_pool_nd,
14
+ normalization
15
+ )
16
+ from lvdm.modules.attention import SpatialTransformer, TemporalTransformer
17
+
18
+
19
+ class TimestepBlock(nn.Module):
20
+ """
21
+ Any module where forward() takes timestep embeddings as a second argument.
22
+ """
23
+ @abstractmethod
24
+ def forward(self, x, emb):
25
+ """
26
+ Apply the module to `x` given `emb` timestep embeddings.
27
+ """
28
+
29
+
30
+ class TimestepEmbedSequential(nn.Sequential, TimestepBlock):
31
+ """
32
+ A sequential module that passes timestep embeddings to the children that
33
+ support it as an extra input.
34
+ """
35
+
36
+ def forward(self, x, emb, context=None, batch_size=None):
37
+ for layer in self:
38
+ if isinstance(layer, TimestepBlock):
39
+ x = layer(x, emb, batch_size=batch_size)
40
+ elif isinstance(layer, SpatialTransformer):
41
+ x = layer(x, context)
42
+ elif isinstance(layer, TemporalTransformer):
43
+ x = rearrange(x, '(b f) c h w -> b c f h w', b=batch_size)
44
+ x = layer(x, context)
45
+ x = rearrange(x, 'b c f h w -> (b f) c h w')
46
+ else:
47
+ x = layer(x)
48
+ return x
49
+
50
+
51
+ class Downsample(nn.Module):
52
+ """
53
+ A downsampling layer with an optional convolution.
54
+ :param channels: channels in the inputs and outputs.
55
+ :param use_conv: a bool determining if a convolution is applied.
56
+ :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
57
+ downsampling occurs in the inner-two dimensions.
58
+ """
59
+
60
+ def __init__(self, channels, use_conv, dims=2, out_channels=None, padding=1):
61
+ super().__init__()
62
+ self.channels = channels
63
+ self.out_channels = out_channels or channels
64
+ self.use_conv = use_conv
65
+ self.dims = dims
66
+ stride = 2 if dims != 3 else (1, 2, 2)
67
+ if use_conv:
68
+ self.op = conv_nd(
69
+ dims, self.channels, self.out_channels, 3, stride=stride, padding=padding
70
+ )
71
+ else:
72
+ assert self.channels == self.out_channels
73
+ self.op = avg_pool_nd(dims, kernel_size=stride, stride=stride)
74
+
75
+ def forward(self, x):
76
+ assert x.shape[1] == self.channels
77
+ return self.op(x)
78
+
79
+
80
+ class Upsample(nn.Module):
81
+ """
82
+ An upsampling layer with an optional convolution.
83
+ :param channels: channels in the inputs and outputs.
84
+ :param use_conv: a bool determining if a convolution is applied.
85
+ :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
86
+ upsampling occurs in the inner-two dimensions.
87
+ """
88
+
89
+ def __init__(self, channels, use_conv, dims=2, out_channels=None, padding=1):
90
+ super().__init__()
91
+ self.channels = channels
92
+ self.out_channels = out_channels or channels
93
+ self.use_conv = use_conv
94
+ self.dims = dims
95
+ if use_conv:
96
+ self.conv = conv_nd(dims, self.channels, self.out_channels, 3, padding=padding)
97
+
98
+ def forward(self, x):
99
+ assert x.shape[1] == self.channels
100
+ if self.dims == 3:
101
+ x = F.interpolate(x, (x.shape[2], x.shape[3] * 2, x.shape[4] * 2), mode='nearest')
102
+ else:
103
+ x = F.interpolate(x, scale_factor=2, mode='nearest')
104
+ if self.use_conv:
105
+ x = self.conv(x)
106
+ return x
107
+
108
+
109
+ class ResBlock(TimestepBlock):
110
+ """
111
+ A residual block that can optionally change the number of channels.
112
+ :param channels: the number of input channels.
113
+ :param emb_channels: the number of timestep embedding channels.
114
+ :param dropout: the rate of dropout.
115
+ :param out_channels: if specified, the number of out channels.
116
+ :param use_conv: if True and out_channels is specified, use a spatial
117
+ convolution instead of a smaller 1x1 convolution to change the
118
+ channels in the skip connection.
119
+ :param dims: determines if the signal is 1D, 2D, or 3D.
120
+ :param up: if True, use this block for upsampling.
121
+ :param down: if True, use this block for downsampling.
122
+ :param use_temporal_conv: if True, use the temporal convolution.
123
+ :param use_image_dataset: if True, the temporal parameters will not be optimized.
124
+ """
125
+
126
+ def __init__(
127
+ self,
128
+ channels,
129
+ emb_channels,
130
+ dropout,
131
+ out_channels=None,
132
+ use_scale_shift_norm=False,
133
+ dims=2,
134
+ use_checkpoint=False,
135
+ use_conv=False,
136
+ up=False,
137
+ down=False,
138
+ use_temporal_conv=False,
139
+ tempspatial_aware=False
140
+ ):
141
+ super().__init__()
142
+ self.channels = channels
143
+ self.emb_channels = emb_channels
144
+ self.dropout = dropout
145
+ self.out_channels = out_channels or channels
146
+ self.use_conv = use_conv
147
+ self.use_checkpoint = use_checkpoint
148
+ self.use_scale_shift_norm = use_scale_shift_norm
149
+ self.use_temporal_conv = use_temporal_conv
150
+
151
+ self.in_layers = nn.Sequential(
152
+ normalization(channels),
153
+ nn.SiLU(),
154
+ conv_nd(dims, channels, self.out_channels, 3, padding=1),
155
+ )
156
+
157
+ self.updown = up or down
158
+
159
+ if up:
160
+ self.h_upd = Upsample(channels, False, dims)
161
+ self.x_upd = Upsample(channels, False, dims)
162
+ elif down:
163
+ self.h_upd = Downsample(channels, False, dims)
164
+ self.x_upd = Downsample(channels, False, dims)
165
+ else:
166
+ self.h_upd = self.x_upd = nn.Identity()
167
+
168
+ self.emb_layers = nn.Sequential(
169
+ nn.SiLU(),
170
+ nn.Linear(
171
+ emb_channels,
172
+ 2 * self.out_channels if use_scale_shift_norm else self.out_channels,
173
+ ),
174
+ )
175
+ self.out_layers = nn.Sequential(
176
+ normalization(self.out_channels),
177
+ nn.SiLU(),
178
+ nn.Dropout(p=dropout),
179
+ zero_module(nn.Conv2d(self.out_channels, self.out_channels, 3, padding=1)),
180
+ )
181
+
182
+ if self.out_channels == channels:
183
+ self.skip_connection = nn.Identity()
184
+ elif use_conv:
185
+ self.skip_connection = conv_nd(dims, channels, self.out_channels, 3, padding=1)
186
+ else:
187
+ self.skip_connection = conv_nd(dims, channels, self.out_channels, 1)
188
+
189
+ if self.use_temporal_conv:
190
+ self.temopral_conv = TemporalConvBlock(
191
+ self.out_channels,
192
+ self.out_channels,
193
+ dropout=0.1,
194
+ spatial_aware=tempspatial_aware
195
+ )
196
+
197
+ def forward(self, x, emb, batch_size=None):
198
+ """
199
+ Apply the block to a Tensor, conditioned on a timestep embedding.
200
+ :param x: an [N x C x ...] Tensor of features.
201
+ :param emb: an [N x emb_channels] Tensor of timestep embeddings.
202
+ :return: an [N x C x ...] Tensor of outputs.
203
+ """
204
+ input_tuple = (x, emb)
205
+ if batch_size:
206
+ forward_batchsize = partial(self._forward, batch_size=batch_size)
207
+ return checkpoint(forward_batchsize, input_tuple, self.parameters(), self.use_checkpoint)
208
+ return checkpoint(self._forward, input_tuple, self.parameters(), self.use_checkpoint)
209
+
210
+ def _forward(self, x, emb, batch_size=None):
211
+ if self.updown:
212
+ in_rest, in_conv = self.in_layers[:-1], self.in_layers[-1]
213
+ h = in_rest(x)
214
+ h = self.h_upd(h)
215
+ x = self.x_upd(x)
216
+ h = in_conv(h)
217
+ else:
218
+ h = self.in_layers(x)
219
+ emb_out = self.emb_layers(emb).type(h.dtype)
220
+ while len(emb_out.shape) < len(h.shape):
221
+ emb_out = emb_out[..., None]
222
+ if self.use_scale_shift_norm:
223
+ out_norm, out_rest = self.out_layers[0], self.out_layers[1:]
224
+ scale, shift = torch.chunk(emb_out, 2, dim=1)
225
+ h = out_norm(h) * (1 + scale) + shift
226
+ h = out_rest(h)
227
+ else:
228
+ h = h + emb_out
229
+ h = self.out_layers(h)
230
+ h = self.skip_connection(x) + h
231
+
232
+ if self.use_temporal_conv and batch_size:
233
+ h = rearrange(h, '(b t) c h w -> b c t h w', b=batch_size)
234
+ h = self.temopral_conv(h)
235
+ h = rearrange(h, 'b c t h w -> (b t) c h w')
236
+ return h
237
+
238
+
239
+ class TemporalConvBlock(nn.Module):
240
+ """
241
+ Adapted from modelscope: https://github.com/modelscope/modelscope/blob/master/modelscope/models/multi_modal/video_synthesis/unet_sd.py
242
+ """
243
+ def __init__(self, in_channels, out_channels=None, dropout=0.0, spatial_aware=False):
244
+ super(TemporalConvBlock, self).__init__()
245
+ if out_channels is None:
246
+ out_channels = in_channels
247
+ self.in_channels = in_channels
248
+ self.out_channels = out_channels
249
+ th_kernel_shape = (3, 1, 1) if not spatial_aware else (3, 3, 1)
250
+ th_padding_shape = (1, 0, 0) if not spatial_aware else (1, 1, 0)
251
+ tw_kernel_shape = (3, 1, 1) if not spatial_aware else (3, 1, 3)
252
+ tw_padding_shape = (1, 0, 0) if not spatial_aware else (1, 0, 1)
253
+
254
+ # conv layers
255
+ self.conv1 = nn.Sequential(
256
+ nn.GroupNorm(32, in_channels), nn.SiLU(),
257
+ nn.Conv3d(in_channels, out_channels, th_kernel_shape, padding=th_padding_shape))
258
+ self.conv2 = nn.Sequential(
259
+ nn.GroupNorm(32, out_channels), nn.SiLU(), nn.Dropout(dropout),
260
+ nn.Conv3d(out_channels, in_channels, tw_kernel_shape, padding=tw_padding_shape))
261
+ self.conv3 = nn.Sequential(
262
+ nn.GroupNorm(32, out_channels), nn.SiLU(), nn.Dropout(dropout),
263
+ nn.Conv3d(out_channels, in_channels, th_kernel_shape, padding=th_padding_shape))
264
+ self.conv4 = nn.Sequential(
265
+ nn.GroupNorm(32, out_channels), nn.SiLU(), nn.Dropout(dropout),
266
+ nn.Conv3d(out_channels, in_channels, tw_kernel_shape, padding=tw_padding_shape))
267
+
268
+ # zero out the last layer params,so the conv block is identity
269
+ nn.init.zeros_(self.conv4[-1].weight)
270
+ nn.init.zeros_(self.conv4[-1].bias)
271
+
272
+ def forward(self, x):
273
+ identity = x
274
+ x = self.conv1(x)
275
+ x = self.conv2(x)
276
+ x = self.conv3(x)
277
+ x = self.conv4(x)
278
+
279
+ return identity + x
280
+
281
+ class UNetModel(nn.Module):
282
+ """
283
+ The full UNet model with attention and timestep embedding.
284
+ :param in_channels: in_channels in the input Tensor.
285
+ :param model_channels: base channel count for the model.
286
+ :param out_channels: channels in the output Tensor.
287
+ :param num_res_blocks: number of residual blocks per downsample.
288
+ :param attention_resolutions: a collection of downsample rates at which
289
+ attention will take place. May be a set, list, or tuple.
290
+ For example, if this contains 4, then at 4x downsampling, attention
291
+ will be used.
292
+ :param dropout: the dropout probability.
293
+ :param channel_mult: channel multiplier for each level of the UNet.
294
+ :param conv_resample: if True, use learned convolutions for upsampling and
295
+ downsampling.
296
+ :param dims: determines if the signal is 1D, 2D, or 3D.
297
+ :param num_classes: if specified (as an int), then this model will be
298
+ class-conditional with `num_classes` classes.
299
+ :param use_checkpoint: use gradient checkpointing to reduce memory usage.
300
+ :param num_heads: the number of attention heads in each attention layer.
301
+ :param num_heads_channels: if specified, ignore num_heads and instead use
302
+ a fixed channel width per attention head.
303
+ :param num_heads_upsample: works with num_heads to set a different number
304
+ of heads for upsampling. Deprecated.
305
+ :param use_scale_shift_norm: use a FiLM-like conditioning mechanism.
306
+ :param resblock_updown: use residual blocks for up/downsampling.
307
+ :param use_new_attention_order: use a different attention pattern for potentially
308
+ increased efficiency.
309
+ """
310
+
311
+ def __init__(self,
312
+ in_channels,
313
+ model_channels,
314
+ out_channels,
315
+ num_res_blocks,
316
+ attention_resolutions,
317
+ dropout=0.0,
318
+ channel_mult=(1, 2, 4, 8),
319
+ conv_resample=True,
320
+ dims=2,
321
+ context_dim=None,
322
+ use_scale_shift_norm=False,
323
+ resblock_updown=False,
324
+ num_heads=-1,
325
+ num_head_channels=-1,
326
+ transformer_depth=1,
327
+ use_linear=False,
328
+ use_checkpoint=False,
329
+ temporal_conv=False,
330
+ tempspatial_aware=False,
331
+ temporal_attention=True,
332
+ use_relative_position=True,
333
+ use_causal_attention=False,
334
+ temporal_length=None,
335
+ use_fp16=False,
336
+ addition_attention=False,
337
+ temporal_selfatt_only=True,
338
+ image_cross_attention=False,
339
+ image_cross_attention_scale_learnable=False,
340
+ default_fs=4,
341
+ fs_condition=False,
342
+ ):
343
+ super(UNetModel, self).__init__()
344
+ if num_heads == -1:
345
+ assert num_head_channels != -1, 'Either num_heads or num_head_channels has to be set'
346
+ if num_head_channels == -1:
347
+ assert num_heads != -1, 'Either num_heads or num_head_channels has to be set'
348
+
349
+ self.in_channels = in_channels
350
+ self.model_channels = model_channels
351
+ self.out_channels = out_channels
352
+ self.num_res_blocks = num_res_blocks
353
+ self.attention_resolutions = attention_resolutions
354
+ self.dropout = dropout
355
+ self.channel_mult = channel_mult
356
+ self.conv_resample = conv_resample
357
+ self.temporal_attention = temporal_attention
358
+ time_embed_dim = model_channels * 4
359
+ self.use_checkpoint = use_checkpoint
360
+ self.dtype = torch.float16 if use_fp16 else torch.float32
361
+ temporal_self_att_only = True
362
+ self.addition_attention = addition_attention
363
+ self.temporal_length = temporal_length
364
+ self.image_cross_attention = image_cross_attention
365
+ self.image_cross_attention_scale_learnable = image_cross_attention_scale_learnable
366
+ self.default_fs = default_fs
367
+ self.fs_condition = fs_condition
368
+
369
+ ## Time embedding blocks
370
+ self.time_embed = nn.Sequential(
371
+ linear(model_channels, time_embed_dim),
372
+ nn.SiLU(),
373
+ linear(time_embed_dim, time_embed_dim),
374
+ )
375
+ if fs_condition:
376
+ self.fps_embedding = nn.Sequential(
377
+ linear(model_channels, time_embed_dim),
378
+ nn.SiLU(),
379
+ linear(time_embed_dim, time_embed_dim),
380
+ )
381
+ nn.init.zeros_(self.fps_embedding[-1].weight)
382
+ nn.init.zeros_(self.fps_embedding[-1].bias)
383
+ ## Input Block
384
+ self.input_blocks = nn.ModuleList(
385
+ [
386
+ TimestepEmbedSequential(conv_nd(dims, in_channels, model_channels, 3, padding=1))
387
+ ]
388
+ )
389
+ if self.addition_attention:
390
+ self.init_attn=TimestepEmbedSequential(
391
+ TemporalTransformer(
392
+ model_channels,
393
+ n_heads=8,
394
+ d_head=num_head_channels,
395
+ depth=transformer_depth,
396
+ context_dim=context_dim,
397
+ use_checkpoint=use_checkpoint, only_self_att=temporal_selfatt_only,
398
+ causal_attention=False, relative_position=use_relative_position,
399
+ temporal_length=temporal_length))
400
+
401
+ input_block_chans = [model_channels]
402
+ ch = model_channels
403
+ ds = 1
404
+ for level, mult in enumerate(channel_mult):
405
+ for _ in range(num_res_blocks):
406
+ layers = [
407
+ ResBlock(ch, time_embed_dim, dropout,
408
+ out_channels=mult * model_channels, dims=dims, use_checkpoint=use_checkpoint,
409
+ use_scale_shift_norm=use_scale_shift_norm, tempspatial_aware=tempspatial_aware,
410
+ use_temporal_conv=temporal_conv
411
+ )
412
+ ]
413
+ ch = mult * model_channels
414
+ if ds in attention_resolutions:
415
+ if num_head_channels == -1:
416
+ dim_head = ch // num_heads
417
+ else:
418
+ num_heads = ch // num_head_channels
419
+ dim_head = num_head_channels
420
+ layers.append(
421
+ SpatialTransformer(ch, num_heads, dim_head,
422
+ depth=transformer_depth, context_dim=context_dim, use_linear=use_linear,
423
+ use_checkpoint=use_checkpoint, disable_self_attn=False,
424
+ video_length=temporal_length, image_cross_attention=self.image_cross_attention,
425
+ image_cross_attention_scale_learnable=self.image_cross_attention_scale_learnable,
426
+ )
427
+ )
428
+ if self.temporal_attention:
429
+ layers.append(
430
+ TemporalTransformer(ch, num_heads, dim_head,
431
+ depth=transformer_depth, context_dim=context_dim, use_linear=use_linear,
432
+ use_checkpoint=use_checkpoint, only_self_att=temporal_self_att_only,
433
+ causal_attention=use_causal_attention, relative_position=use_relative_position,
434
+ temporal_length=temporal_length
435
+ )
436
+ )
437
+ self.input_blocks.append(TimestepEmbedSequential(*layers))
438
+ input_block_chans.append(ch)
439
+ if level != len(channel_mult) - 1:
440
+ out_ch = ch
441
+ self.input_blocks.append(
442
+ TimestepEmbedSequential(
443
+ ResBlock(ch, time_embed_dim, dropout,
444
+ out_channels=out_ch, dims=dims, use_checkpoint=use_checkpoint,
445
+ use_scale_shift_norm=use_scale_shift_norm,
446
+ down=True
447
+ )
448
+ if resblock_updown
449
+ else Downsample(ch, conv_resample, dims=dims, out_channels=out_ch)
450
+ )
451
+ )
452
+ ch = out_ch
453
+ input_block_chans.append(ch)
454
+ ds *= 2
455
+
456
+ if num_head_channels == -1:
457
+ dim_head = ch // num_heads
458
+ else:
459
+ num_heads = ch // num_head_channels
460
+ dim_head = num_head_channels
461
+ layers = [
462
+ ResBlock(ch, time_embed_dim, dropout,
463
+ dims=dims, use_checkpoint=use_checkpoint,
464
+ use_scale_shift_norm=use_scale_shift_norm, tempspatial_aware=tempspatial_aware,
465
+ use_temporal_conv=temporal_conv
466
+ ),
467
+ SpatialTransformer(ch, num_heads, dim_head,
468
+ depth=transformer_depth, context_dim=context_dim, use_linear=use_linear,
469
+ use_checkpoint=use_checkpoint, disable_self_attn=False, video_length=temporal_length,
470
+ image_cross_attention=self.image_cross_attention,image_cross_attention_scale_learnable=self.image_cross_attention_scale_learnable
471
+ )
472
+ ]
473
+ if self.temporal_attention:
474
+ layers.append(
475
+ TemporalTransformer(ch, num_heads, dim_head,
476
+ depth=transformer_depth, context_dim=context_dim, use_linear=use_linear,
477
+ use_checkpoint=use_checkpoint, only_self_att=temporal_self_att_only,
478
+ causal_attention=use_causal_attention, relative_position=use_relative_position,
479
+ temporal_length=temporal_length
480
+ )
481
+ )
482
+ layers.append(
483
+ ResBlock(ch, time_embed_dim, dropout,
484
+ dims=dims, use_checkpoint=use_checkpoint,
485
+ use_scale_shift_norm=use_scale_shift_norm, tempspatial_aware=tempspatial_aware,
486
+ use_temporal_conv=temporal_conv
487
+ )
488
+ )
489
+
490
+ ## Middle Block
491
+ self.middle_block = TimestepEmbedSequential(*layers)
492
+
493
+ ## Output Block
494
+ self.output_blocks = nn.ModuleList([])
495
+ for level, mult in list(enumerate(channel_mult))[::-1]:
496
+ for i in range(num_res_blocks + 1):
497
+ ich = input_block_chans.pop()
498
+ layers = [
499
+ ResBlock(ch + ich, time_embed_dim, dropout,
500
+ out_channels=mult * model_channels, dims=dims, use_checkpoint=use_checkpoint,
501
+ use_scale_shift_norm=use_scale_shift_norm, tempspatial_aware=tempspatial_aware,
502
+ use_temporal_conv=temporal_conv
503
+ )
504
+ ]
505
+ ch = model_channels * mult
506
+ if ds in attention_resolutions:
507
+ if num_head_channels == -1:
508
+ dim_head = ch // num_heads
509
+ else:
510
+ num_heads = ch // num_head_channels
511
+ dim_head = num_head_channels
512
+ layers.append(
513
+ SpatialTransformer(ch, num_heads, dim_head,
514
+ depth=transformer_depth, context_dim=context_dim, use_linear=use_linear,
515
+ use_checkpoint=use_checkpoint, disable_self_attn=False, video_length=temporal_length,
516
+ image_cross_attention=self.image_cross_attention,image_cross_attention_scale_learnable=self.image_cross_attention_scale_learnable
517
+ )
518
+ )
519
+ if self.temporal_attention:
520
+ layers.append(
521
+ TemporalTransformer(ch, num_heads, dim_head,
522
+ depth=transformer_depth, context_dim=context_dim, use_linear=use_linear,
523
+ use_checkpoint=use_checkpoint, only_self_att=temporal_self_att_only,
524
+ causal_attention=use_causal_attention, relative_position=use_relative_position,
525
+ temporal_length=temporal_length
526
+ )
527
+ )
528
+ if level and i == num_res_blocks:
529
+ out_ch = ch
530
+ layers.append(
531
+ ResBlock(ch, time_embed_dim, dropout,
532
+ out_channels=out_ch, dims=dims, use_checkpoint=use_checkpoint,
533
+ use_scale_shift_norm=use_scale_shift_norm,
534
+ up=True
535
+ )
536
+ if resblock_updown
537
+ else Upsample(ch, conv_resample, dims=dims, out_channels=out_ch)
538
+ )
539
+ ds //= 2
540
+ self.output_blocks.append(TimestepEmbedSequential(*layers))
541
+
542
+ self.out = nn.Sequential(
543
+ normalization(ch),
544
+ nn.SiLU(),
545
+ zero_module(conv_nd(dims, model_channels, out_channels, 3, padding=1)),
546
+ )
547
+
548
+ def forward(self, x, timesteps, context=None, features_adapter=None, fs=None, **kwargs):
549
+ b,_,t,_,_ = x.shape
550
+ t_emb = timestep_embedding(timesteps, self.model_channels, repeat_only=False).type(x.dtype)
551
+ emb = self.time_embed(t_emb)
552
+
553
+ ## repeat t times for context [(b t) 77 768] & time embedding
554
+ ## check if we use per-frame image conditioning
555
+ _, l_context, _ = context.shape
556
+ if l_context == 77 + t*16: ## !!! HARD CODE here
557
+ context_text, context_img = context[:,:77,:], context[:,77:,:]
558
+ context_text = context_text.repeat_interleave(repeats=t, dim=0)
559
+ context_img = rearrange(context_img, 'b (t l) c -> (b t) l c', t=t)
560
+ context = torch.cat([context_text, context_img], dim=1)
561
+ else:
562
+ context = context.repeat_interleave(repeats=t, dim=0)
563
+ emb = emb.repeat_interleave(repeats=t, dim=0)
564
+
565
+ ## always in shape (b t) c h w, except for temporal layer
566
+ x = rearrange(x, 'b c t h w -> (b t) c h w')
567
+
568
+ ## combine emb
569
+ if self.fs_condition:
570
+ if fs is None:
571
+ fs = torch.tensor(
572
+ [self.default_fs] * b, dtype=torch.long, device=x.device)
573
+ fs_emb = timestep_embedding(fs, self.model_channels, repeat_only=False).type(x.dtype)
574
+
575
+ fs_embed = self.fps_embedding(fs_emb)
576
+ fs_embed = fs_embed.repeat_interleave(repeats=t, dim=0)
577
+ emb = emb + fs_embed
578
+
579
+ h = x.type(self.dtype)
580
+ adapter_idx = 0
581
+ hs = []
582
+ for id, module in enumerate(self.input_blocks):
583
+ h = module(h, emb, context=context, batch_size=b)
584
+ if id ==0 and self.addition_attention:
585
+ h = self.init_attn(h, emb, context=context, batch_size=b)
586
+ ## plug-in adapter features
587
+ if ((id+1)%3 == 0) and features_adapter is not None:
588
+ h = h + features_adapter[adapter_idx]
589
+ adapter_idx += 1
590
+ hs.append(h)
591
+ if features_adapter is not None:
592
+ assert len(features_adapter)==adapter_idx, 'Wrong features_adapter'
593
+
594
+ h = self.middle_block(h, emb, context=context, batch_size=b)
595
+ for module in self.output_blocks:
596
+ h = torch.cat([h, hs.pop()], dim=1)
597
+ h = module(h, emb, context=context, batch_size=b)
598
+ h = h.type(x.dtype)
599
+ y = self.out(h)
600
+
601
+ # reshape back to (b c t h w)
602
+ y = rearrange(y, '(b t) c h w -> b c t h w', b=b)
603
+ return y
lvdm/modules/x_transformer.py ADDED
@@ -0,0 +1,639 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """shout-out to https://github.com/lucidrains/x-transformers/tree/main/x_transformers"""
2
+ from functools import partial
3
+ from inspect import isfunction
4
+ from collections import namedtuple
5
+ from einops import rearrange, repeat
6
+ import torch
7
+ from torch import nn, einsum
8
+ import torch.nn.functional as F
9
+
10
+ # constants
11
+ DEFAULT_DIM_HEAD = 64
12
+
13
+ Intermediates = namedtuple('Intermediates', [
14
+ 'pre_softmax_attn',
15
+ 'post_softmax_attn'
16
+ ])
17
+
18
+ LayerIntermediates = namedtuple('Intermediates', [
19
+ 'hiddens',
20
+ 'attn_intermediates'
21
+ ])
22
+
23
+
24
+ class AbsolutePositionalEmbedding(nn.Module):
25
+ def __init__(self, dim, max_seq_len):
26
+ super().__init__()
27
+ self.emb = nn.Embedding(max_seq_len, dim)
28
+ self.init_()
29
+
30
+ def init_(self):
31
+ nn.init.normal_(self.emb.weight, std=0.02)
32
+
33
+ def forward(self, x):
34
+ n = torch.arange(x.shape[1], device=x.device)
35
+ return self.emb(n)[None, :, :]
36
+
37
+
38
+ class FixedPositionalEmbedding(nn.Module):
39
+ def __init__(self, dim):
40
+ super().__init__()
41
+ inv_freq = 1. / (10000 ** (torch.arange(0, dim, 2).float() / dim))
42
+ self.register_buffer('inv_freq', inv_freq)
43
+
44
+ def forward(self, x, seq_dim=1, offset=0):
45
+ t = torch.arange(x.shape[seq_dim], device=x.device).type_as(self.inv_freq) + offset
46
+ sinusoid_inp = torch.einsum('i , j -> i j', t, self.inv_freq)
47
+ emb = torch.cat((sinusoid_inp.sin(), sinusoid_inp.cos()), dim=-1)
48
+ return emb[None, :, :]
49
+
50
+
51
+ # helpers
52
+
53
+ def exists(val):
54
+ return val is not None
55
+
56
+
57
+ def default(val, d):
58
+ if exists(val):
59
+ return val
60
+ return d() if isfunction(d) else d
61
+
62
+
63
+ def always(val):
64
+ def inner(*args, **kwargs):
65
+ return val
66
+ return inner
67
+
68
+
69
+ def not_equals(val):
70
+ def inner(x):
71
+ return x != val
72
+ return inner
73
+
74
+
75
+ def equals(val):
76
+ def inner(x):
77
+ return x == val
78
+ return inner
79
+
80
+
81
+ def max_neg_value(tensor):
82
+ return -torch.finfo(tensor.dtype).max
83
+
84
+
85
+ # keyword argument helpers
86
+
87
+ def pick_and_pop(keys, d):
88
+ values = list(map(lambda key: d.pop(key), keys))
89
+ return dict(zip(keys, values))
90
+
91
+
92
+ def group_dict_by_key(cond, d):
93
+ return_val = [dict(), dict()]
94
+ for key in d.keys():
95
+ match = bool(cond(key))
96
+ ind = int(not match)
97
+ return_val[ind][key] = d[key]
98
+ return (*return_val,)
99
+
100
+
101
+ def string_begins_with(prefix, str):
102
+ return str.startswith(prefix)
103
+
104
+
105
+ def group_by_key_prefix(prefix, d):
106
+ return group_dict_by_key(partial(string_begins_with, prefix), d)
107
+
108
+
109
+ def groupby_prefix_and_trim(prefix, d):
110
+ kwargs_with_prefix, kwargs = group_dict_by_key(partial(string_begins_with, prefix), d)
111
+ kwargs_without_prefix = dict(map(lambda x: (x[0][len(prefix):], x[1]), tuple(kwargs_with_prefix.items())))
112
+ return kwargs_without_prefix, kwargs
113
+
114
+
115
+ # classes
116
+ class Scale(nn.Module):
117
+ def __init__(self, value, fn):
118
+ super().__init__()
119
+ self.value = value
120
+ self.fn = fn
121
+
122
+ def forward(self, x, **kwargs):
123
+ x, *rest = self.fn(x, **kwargs)
124
+ return (x * self.value, *rest)
125
+
126
+
127
+ class Rezero(nn.Module):
128
+ def __init__(self, fn):
129
+ super().__init__()
130
+ self.fn = fn
131
+ self.g = nn.Parameter(torch.zeros(1))
132
+
133
+ def forward(self, x, **kwargs):
134
+ x, *rest = self.fn(x, **kwargs)
135
+ return (x * self.g, *rest)
136
+
137
+
138
+ class ScaleNorm(nn.Module):
139
+ def __init__(self, dim, eps=1e-5):
140
+ super().__init__()
141
+ self.scale = dim ** -0.5
142
+ self.eps = eps
143
+ self.g = nn.Parameter(torch.ones(1))
144
+
145
+ def forward(self, x):
146
+ norm = torch.norm(x, dim=-1, keepdim=True) * self.scale
147
+ return x / norm.clamp(min=self.eps) * self.g
148
+
149
+
150
+ class RMSNorm(nn.Module):
151
+ def __init__(self, dim, eps=1e-8):
152
+ super().__init__()
153
+ self.scale = dim ** -0.5
154
+ self.eps = eps
155
+ self.g = nn.Parameter(torch.ones(dim))
156
+
157
+ def forward(self, x):
158
+ norm = torch.norm(x, dim=-1, keepdim=True) * self.scale
159
+ return x / norm.clamp(min=self.eps) * self.g
160
+
161
+
162
+ class Residual(nn.Module):
163
+ def forward(self, x, residual):
164
+ return x + residual
165
+
166
+
167
+ class GRUGating(nn.Module):
168
+ def __init__(self, dim):
169
+ super().__init__()
170
+ self.gru = nn.GRUCell(dim, dim)
171
+
172
+ def forward(self, x, residual):
173
+ gated_output = self.gru(
174
+ rearrange(x, 'b n d -> (b n) d'),
175
+ rearrange(residual, 'b n d -> (b n) d')
176
+ )
177
+
178
+ return gated_output.reshape_as(x)
179
+
180
+
181
+ # feedforward
182
+
183
+ class GEGLU(nn.Module):
184
+ def __init__(self, dim_in, dim_out):
185
+ super().__init__()
186
+ self.proj = nn.Linear(dim_in, dim_out * 2)
187
+
188
+ def forward(self, x):
189
+ x, gate = self.proj(x).chunk(2, dim=-1)
190
+ return x * F.gelu(gate)
191
+
192
+
193
+ class FeedForward(nn.Module):
194
+ def __init__(self, dim, dim_out=None, mult=4, glu=False, dropout=0.):
195
+ super().__init__()
196
+ inner_dim = int(dim * mult)
197
+ dim_out = default(dim_out, dim)
198
+ project_in = nn.Sequential(
199
+ nn.Linear(dim, inner_dim),
200
+ nn.GELU()
201
+ ) if not glu else GEGLU(dim, inner_dim)
202
+
203
+ self.net = nn.Sequential(
204
+ project_in,
205
+ nn.Dropout(dropout),
206
+ nn.Linear(inner_dim, dim_out)
207
+ )
208
+
209
+ def forward(self, x):
210
+ return self.net(x)
211
+
212
+
213
+ # attention.
214
+ class Attention(nn.Module):
215
+ def __init__(
216
+ self,
217
+ dim,
218
+ dim_head=DEFAULT_DIM_HEAD,
219
+ heads=8,
220
+ causal=False,
221
+ mask=None,
222
+ talking_heads=False,
223
+ sparse_topk=None,
224
+ use_entmax15=False,
225
+ num_mem_kv=0,
226
+ dropout=0.,
227
+ on_attn=False
228
+ ):
229
+ super().__init__()
230
+ if use_entmax15:
231
+ raise NotImplementedError("Check out entmax activation instead of softmax activation!")
232
+ self.scale = dim_head ** -0.5
233
+ self.heads = heads
234
+ self.causal = causal
235
+ self.mask = mask
236
+
237
+ inner_dim = dim_head * heads
238
+
239
+ self.to_q = nn.Linear(dim, inner_dim, bias=False)
240
+ self.to_k = nn.Linear(dim, inner_dim, bias=False)
241
+ self.to_v = nn.Linear(dim, inner_dim, bias=False)
242
+ self.dropout = nn.Dropout(dropout)
243
+
244
+ # talking heads
245
+ self.talking_heads = talking_heads
246
+ if talking_heads:
247
+ self.pre_softmax_proj = nn.Parameter(torch.randn(heads, heads))
248
+ self.post_softmax_proj = nn.Parameter(torch.randn(heads, heads))
249
+
250
+ # explicit topk sparse attention
251
+ self.sparse_topk = sparse_topk
252
+
253
+ # entmax
254
+ #self.attn_fn = entmax15 if use_entmax15 else F.softmax
255
+ self.attn_fn = F.softmax
256
+
257
+ # add memory key / values
258
+ self.num_mem_kv = num_mem_kv
259
+ if num_mem_kv > 0:
260
+ self.mem_k = nn.Parameter(torch.randn(heads, num_mem_kv, dim_head))
261
+ self.mem_v = nn.Parameter(torch.randn(heads, num_mem_kv, dim_head))
262
+
263
+ # attention on attention
264
+ self.attn_on_attn = on_attn
265
+ self.to_out = nn.Sequential(nn.Linear(inner_dim, dim * 2), nn.GLU()) if on_attn else nn.Linear(inner_dim, dim)
266
+
267
+ def forward(
268
+ self,
269
+ x,
270
+ context=None,
271
+ mask=None,
272
+ context_mask=None,
273
+ rel_pos=None,
274
+ sinusoidal_emb=None,
275
+ prev_attn=None,
276
+ mem=None
277
+ ):
278
+ b, n, _, h, talking_heads, device = *x.shape, self.heads, self.talking_heads, x.device
279
+ kv_input = default(context, x)
280
+
281
+ q_input = x
282
+ k_input = kv_input
283
+ v_input = kv_input
284
+
285
+ if exists(mem):
286
+ k_input = torch.cat((mem, k_input), dim=-2)
287
+ v_input = torch.cat((mem, v_input), dim=-2)
288
+
289
+ if exists(sinusoidal_emb):
290
+ # in shortformer, the query would start at a position offset depending on the past cached memory
291
+ offset = k_input.shape[-2] - q_input.shape[-2]
292
+ q_input = q_input + sinusoidal_emb(q_input, offset=offset)
293
+ k_input = k_input + sinusoidal_emb(k_input)
294
+
295
+ q = self.to_q(q_input)
296
+ k = self.to_k(k_input)
297
+ v = self.to_v(v_input)
298
+
299
+ q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h=h), (q, k, v))
300
+
301
+ input_mask = None
302
+ if any(map(exists, (mask, context_mask))):
303
+ q_mask = default(mask, lambda: torch.ones((b, n), device=device).bool())
304
+ k_mask = q_mask if not exists(context) else context_mask
305
+ k_mask = default(k_mask, lambda: torch.ones((b, k.shape[-2]), device=device).bool())
306
+ q_mask = rearrange(q_mask, 'b i -> b () i ()')
307
+ k_mask = rearrange(k_mask, 'b j -> b () () j')
308
+ input_mask = q_mask * k_mask
309
+
310
+ if self.num_mem_kv > 0:
311
+ mem_k, mem_v = map(lambda t: repeat(t, 'h n d -> b h n d', b=b), (self.mem_k, self.mem_v))
312
+ k = torch.cat((mem_k, k), dim=-2)
313
+ v = torch.cat((mem_v, v), dim=-2)
314
+ if exists(input_mask):
315
+ input_mask = F.pad(input_mask, (self.num_mem_kv, 0), value=True)
316
+
317
+ dots = einsum('b h i d, b h j d -> b h i j', q, k) * self.scale
318
+ mask_value = max_neg_value(dots)
319
+
320
+ if exists(prev_attn):
321
+ dots = dots + prev_attn
322
+
323
+ pre_softmax_attn = dots
324
+
325
+ if talking_heads:
326
+ dots = einsum('b h i j, h k -> b k i j', dots, self.pre_softmax_proj).contiguous()
327
+
328
+ if exists(rel_pos):
329
+ dots = rel_pos(dots)
330
+
331
+ if exists(input_mask):
332
+ dots.masked_fill_(~input_mask, mask_value)
333
+ del input_mask
334
+
335
+ if self.causal:
336
+ i, j = dots.shape[-2:]
337
+ r = torch.arange(i, device=device)
338
+ mask = rearrange(r, 'i -> () () i ()') < rearrange(r, 'j -> () () () j')
339
+ mask = F.pad(mask, (j - i, 0), value=False)
340
+ dots.masked_fill_(mask, mask_value)
341
+ del mask
342
+
343
+ if exists(self.sparse_topk) and self.sparse_topk < dots.shape[-1]:
344
+ top, _ = dots.topk(self.sparse_topk, dim=-1)
345
+ vk = top[..., -1].unsqueeze(-1).expand_as(dots)
346
+ mask = dots < vk
347
+ dots.masked_fill_(mask, mask_value)
348
+ del mask
349
+
350
+ attn = self.attn_fn(dots, dim=-1)
351
+ post_softmax_attn = attn
352
+
353
+ attn = self.dropout(attn)
354
+
355
+ if talking_heads:
356
+ attn = einsum('b h i j, h k -> b k i j', attn, self.post_softmax_proj).contiguous()
357
+
358
+ out = einsum('b h i j, b h j d -> b h i d', attn, v)
359
+ out = rearrange(out, 'b h n d -> b n (h d)')
360
+
361
+ intermediates = Intermediates(
362
+ pre_softmax_attn=pre_softmax_attn,
363
+ post_softmax_attn=post_softmax_attn
364
+ )
365
+
366
+ return self.to_out(out), intermediates
367
+
368
+
369
+ class AttentionLayers(nn.Module):
370
+ def __init__(
371
+ self,
372
+ dim,
373
+ depth,
374
+ heads=8,
375
+ causal=False,
376
+ cross_attend=False,
377
+ only_cross=False,
378
+ use_scalenorm=False,
379
+ use_rmsnorm=False,
380
+ use_rezero=False,
381
+ rel_pos_num_buckets=32,
382
+ rel_pos_max_distance=128,
383
+ position_infused_attn=False,
384
+ custom_layers=None,
385
+ sandwich_coef=None,
386
+ par_ratio=None,
387
+ residual_attn=False,
388
+ cross_residual_attn=False,
389
+ macaron=False,
390
+ pre_norm=True,
391
+ gate_residual=False,
392
+ **kwargs
393
+ ):
394
+ super().__init__()
395
+ ff_kwargs, kwargs = groupby_prefix_and_trim('ff_', kwargs)
396
+ attn_kwargs, _ = groupby_prefix_and_trim('attn_', kwargs)
397
+
398
+ dim_head = attn_kwargs.get('dim_head', DEFAULT_DIM_HEAD)
399
+
400
+ self.dim = dim
401
+ self.depth = depth
402
+ self.layers = nn.ModuleList([])
403
+
404
+ self.has_pos_emb = position_infused_attn
405
+ self.pia_pos_emb = FixedPositionalEmbedding(dim) if position_infused_attn else None
406
+ self.rotary_pos_emb = always(None)
407
+
408
+ assert rel_pos_num_buckets <= rel_pos_max_distance, 'number of relative position buckets must be less than the relative position max distance'
409
+ self.rel_pos = None
410
+
411
+ self.pre_norm = pre_norm
412
+
413
+ self.residual_attn = residual_attn
414
+ self.cross_residual_attn = cross_residual_attn
415
+
416
+ norm_class = ScaleNorm if use_scalenorm else nn.LayerNorm
417
+ norm_class = RMSNorm if use_rmsnorm else norm_class
418
+ norm_fn = partial(norm_class, dim)
419
+
420
+ norm_fn = nn.Identity if use_rezero else norm_fn
421
+ branch_fn = Rezero if use_rezero else None
422
+
423
+ if cross_attend and not only_cross:
424
+ default_block = ('a', 'c', 'f')
425
+ elif cross_attend and only_cross:
426
+ default_block = ('c', 'f')
427
+ else:
428
+ default_block = ('a', 'f')
429
+
430
+ if macaron:
431
+ default_block = ('f',) + default_block
432
+
433
+ if exists(custom_layers):
434
+ layer_types = custom_layers
435
+ elif exists(par_ratio):
436
+ par_depth = depth * len(default_block)
437
+ assert 1 < par_ratio <= par_depth, 'par ratio out of range'
438
+ default_block = tuple(filter(not_equals('f'), default_block))
439
+ par_attn = par_depth // par_ratio
440
+ depth_cut = par_depth * 2 // 3 # 2 / 3 attention layer cutoff suggested by PAR paper
441
+ par_width = (depth_cut + depth_cut // par_attn) // par_attn
442
+ assert len(default_block) <= par_width, 'default block is too large for par_ratio'
443
+ par_block = default_block + ('f',) * (par_width - len(default_block))
444
+ par_head = par_block * par_attn
445
+ layer_types = par_head + ('f',) * (par_depth - len(par_head))
446
+ elif exists(sandwich_coef):
447
+ assert sandwich_coef > 0 and sandwich_coef <= depth, 'sandwich coefficient should be less than the depth'
448
+ layer_types = ('a',) * sandwich_coef + default_block * (depth - sandwich_coef) + ('f',) * sandwich_coef
449
+ else:
450
+ layer_types = default_block * depth
451
+
452
+ self.layer_types = layer_types
453
+ self.num_attn_layers = len(list(filter(equals('a'), layer_types)))
454
+
455
+ for layer_type in self.layer_types:
456
+ if layer_type == 'a':
457
+ layer = Attention(dim, heads=heads, causal=causal, **attn_kwargs)
458
+ elif layer_type == 'c':
459
+ layer = Attention(dim, heads=heads, **attn_kwargs)
460
+ elif layer_type == 'f':
461
+ layer = FeedForward(dim, **ff_kwargs)
462
+ layer = layer if not macaron else Scale(0.5, layer)
463
+ else:
464
+ raise Exception(f'invalid layer type {layer_type}')
465
+
466
+ if isinstance(layer, Attention) and exists(branch_fn):
467
+ layer = branch_fn(layer)
468
+
469
+ if gate_residual:
470
+ residual_fn = GRUGating(dim)
471
+ else:
472
+ residual_fn = Residual()
473
+
474
+ self.layers.append(nn.ModuleList([
475
+ norm_fn(),
476
+ layer,
477
+ residual_fn
478
+ ]))
479
+
480
+ def forward(
481
+ self,
482
+ x,
483
+ context=None,
484
+ mask=None,
485
+ context_mask=None,
486
+ mems=None,
487
+ return_hiddens=False
488
+ ):
489
+ hiddens = []
490
+ intermediates = []
491
+ prev_attn = None
492
+ prev_cross_attn = None
493
+
494
+ mems = mems.copy() if exists(mems) else [None] * self.num_attn_layers
495
+
496
+ for ind, (layer_type, (norm, block, residual_fn)) in enumerate(zip(self.layer_types, self.layers)):
497
+ is_last = ind == (len(self.layers) - 1)
498
+
499
+ if layer_type == 'a':
500
+ hiddens.append(x)
501
+ layer_mem = mems.pop(0)
502
+
503
+ residual = x
504
+
505
+ if self.pre_norm:
506
+ x = norm(x)
507
+
508
+ if layer_type == 'a':
509
+ out, inter = block(x, mask=mask, sinusoidal_emb=self.pia_pos_emb, rel_pos=self.rel_pos,
510
+ prev_attn=prev_attn, mem=layer_mem)
511
+ elif layer_type == 'c':
512
+ out, inter = block(x, context=context, mask=mask, context_mask=context_mask, prev_attn=prev_cross_attn)
513
+ elif layer_type == 'f':
514
+ out = block(x)
515
+
516
+ x = residual_fn(out, residual)
517
+
518
+ if layer_type in ('a', 'c'):
519
+ intermediates.append(inter)
520
+
521
+ if layer_type == 'a' and self.residual_attn:
522
+ prev_attn = inter.pre_softmax_attn
523
+ elif layer_type == 'c' and self.cross_residual_attn:
524
+ prev_cross_attn = inter.pre_softmax_attn
525
+
526
+ if not self.pre_norm and not is_last:
527
+ x = norm(x)
528
+
529
+ if return_hiddens:
530
+ intermediates = LayerIntermediates(
531
+ hiddens=hiddens,
532
+ attn_intermediates=intermediates
533
+ )
534
+
535
+ return x, intermediates
536
+
537
+ return x
538
+
539
+
540
+ class Encoder(AttentionLayers):
541
+ def __init__(self, **kwargs):
542
+ assert 'causal' not in kwargs, 'cannot set causality on encoder'
543
+ super().__init__(causal=False, **kwargs)
544
+
545
+
546
+
547
+ class TransformerWrapper(nn.Module):
548
+ def __init__(
549
+ self,
550
+ *,
551
+ num_tokens,
552
+ max_seq_len,
553
+ attn_layers,
554
+ emb_dim=None,
555
+ max_mem_len=0.,
556
+ emb_dropout=0.,
557
+ num_memory_tokens=None,
558
+ tie_embedding=False,
559
+ use_pos_emb=True
560
+ ):
561
+ super().__init__()
562
+ assert isinstance(attn_layers, AttentionLayers), 'attention layers must be one of Encoder or Decoder'
563
+
564
+ dim = attn_layers.dim
565
+ emb_dim = default(emb_dim, dim)
566
+
567
+ self.max_seq_len = max_seq_len
568
+ self.max_mem_len = max_mem_len
569
+ self.num_tokens = num_tokens
570
+
571
+ self.token_emb = nn.Embedding(num_tokens, emb_dim)
572
+ self.pos_emb = AbsolutePositionalEmbedding(emb_dim, max_seq_len) if (
573
+ use_pos_emb and not attn_layers.has_pos_emb) else always(0)
574
+ self.emb_dropout = nn.Dropout(emb_dropout)
575
+
576
+ self.project_emb = nn.Linear(emb_dim, dim) if emb_dim != dim else nn.Identity()
577
+ self.attn_layers = attn_layers
578
+ self.norm = nn.LayerNorm(dim)
579
+
580
+ self.init_()
581
+
582
+ self.to_logits = nn.Linear(dim, num_tokens) if not tie_embedding else lambda t: t @ self.token_emb.weight.t()
583
+
584
+ # memory tokens (like [cls]) from Memory Transformers paper
585
+ num_memory_tokens = default(num_memory_tokens, 0)
586
+ self.num_memory_tokens = num_memory_tokens
587
+ if num_memory_tokens > 0:
588
+ self.memory_tokens = nn.Parameter(torch.randn(num_memory_tokens, dim))
589
+
590
+ # let funnel encoder know number of memory tokens, if specified
591
+ if hasattr(attn_layers, 'num_memory_tokens'):
592
+ attn_layers.num_memory_tokens = num_memory_tokens
593
+
594
+ def init_(self):
595
+ nn.init.normal_(self.token_emb.weight, std=0.02)
596
+
597
+ def forward(
598
+ self,
599
+ x,
600
+ return_embeddings=False,
601
+ mask=None,
602
+ return_mems=False,
603
+ return_attn=False,
604
+ mems=None,
605
+ **kwargs
606
+ ):
607
+ b, n, device, num_mem = *x.shape, x.device, self.num_memory_tokens
608
+ x = self.token_emb(x)
609
+ x += self.pos_emb(x)
610
+ x = self.emb_dropout(x)
611
+
612
+ x = self.project_emb(x)
613
+
614
+ if num_mem > 0:
615
+ mem = repeat(self.memory_tokens, 'n d -> b n d', b=b)
616
+ x = torch.cat((mem, x), dim=1)
617
+
618
+ # auto-handle masking after appending memory tokens
619
+ if exists(mask):
620
+ mask = F.pad(mask, (num_mem, 0), value=True)
621
+
622
+ x, intermediates = self.attn_layers(x, mask=mask, mems=mems, return_hiddens=True, **kwargs)
623
+ x = self.norm(x)
624
+
625
+ mem, x = x[:, :num_mem], x[:, num_mem:]
626
+
627
+ out = self.to_logits(x) if not return_embeddings else x
628
+
629
+ if return_mems:
630
+ hiddens = intermediates.hiddens
631
+ new_mems = list(map(lambda pair: torch.cat(pair, dim=-2), zip(mems, hiddens))) if exists(mems) else hiddens
632
+ new_mems = list(map(lambda t: t[..., -self.max_mem_len:, :].detach(), new_mems))
633
+ return out, new_mems
634
+
635
+ if return_attn:
636
+ attn_maps = list(map(lambda t: t.post_softmax_attn, intermediates.attn_intermediates))
637
+ return out, attn_maps
638
+
639
+ return out
prompts/512_interp/smile_01.png ADDED
prompts/512_interp/smile_02.png ADDED