fffiloni commited on
Commit
cdcfdd8
1 Parent(s): 4d7b04e

Upload 15 files

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ assets/demo_video.mp4 filter=lfs diff=lfs merge=lfs -text
LICENSE.txt ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Apache License
2
+ Version 2.0, January 2004
3
+ http://www.apache.org/licenses/
4
+
5
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6
+
7
+ 1. Definitions.
8
+
9
+ "License" shall mean the terms and conditions for use, reproduction,
10
+ and distribution as defined by Sections 1 through 9 of this document.
11
+
12
+ "Licensor" shall mean the copyright owner or entity authorized by
13
+ the copyright owner that is granting the License.
14
+
15
+ "Legal Entity" shall mean the union of the acting entity and all
16
+ other entities that control, are controlled by, or are under common
17
+ control with that entity. For the purposes of this definition,
18
+ "control" means (i) the power, direct or indirect, to cause the
19
+ direction or management of such entity, whether by contract or
20
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
21
+ outstanding shares, or (iii) beneficial ownership of such entity.
22
+
23
+ "You" (or "Your") shall mean an individual or Legal Entity
24
+ exercising permissions granted by this License.
25
+
26
+ "Source" form shall mean the preferred form for making modifications,
27
+ including but not limited to software source code, documentation
28
+ source, and configuration files.
29
+
30
+ "Object" form shall mean any form resulting from mechanical
31
+ transformation or translation of a Source form, including but
32
+ not limited to compiled object code, generated documentation,
33
+ and conversions to other media types.
34
+
35
+ "Work" shall mean the work of authorship, whether in Source or
36
+ Object form, made available under the License, as indicated by a
37
+ copyright notice that is included in or attached to the work
38
+ (an example is provided in the Appendix below).
39
+
40
+ "Derivative Works" shall mean any work, whether in Source or Object
41
+ form, that is based on (or derived from) the Work and for which the
42
+ editorial revisions, annotations, elaborations, or other modifications
43
+ represent, as a whole, an original work of authorship. For the purposes
44
+ of this License, Derivative Works shall not include works that remain
45
+ separable from, or merely link (or bind by name) to the interfaces of,
46
+ the Work and Derivative Works thereof.
47
+
48
+ "Contribution" shall mean any work of authorship, including
49
+ the original version of the Work and any modifications or additions
50
+ to that Work or Derivative Works thereof, that is intentionally
51
+ submitted to Licensor for inclusion in the Work by the copyright owner
52
+ or by an individual or Legal Entity authorized to submit on behalf of
53
+ the copyright owner. For the purposes of this definition, "submitted"
54
+ means any form of electronic, verbal, or written communication sent
55
+ to the Licensor or its representatives, including but not limited to
56
+ communication on electronic mailing lists, source code control systems,
57
+ and issue tracking systems that are managed by, or on behalf of, the
58
+ Licensor for the purpose of discussing and improving the Work, but
59
+ excluding communication that is conspicuously marked or otherwise
60
+ designated in writing by the copyright owner as "Not a Contribution."
61
+
62
+ "Contributor" shall mean Licensor and any individual or Legal Entity
63
+ on behalf of whom a Contribution has been received by Licensor and
64
+ subsequently incorporated within the Work.
65
+
66
+ 2. Grant of Copyright License. Subject to the terms and conditions of
67
+ this License, each Contributor hereby grants to You a perpetual,
68
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69
+ copyright license to reproduce, prepare Derivative Works of,
70
+ publicly display, publicly perform, sublicense, and distribute the
71
+ Work and such Derivative Works in Source or Object form.
72
+
73
+ 3. Grant of Patent License. Subject to the terms and conditions of
74
+ this License, each Contributor hereby grants to You a perpetual,
75
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76
+ (except as stated in this section) patent license to make, have made,
77
+ use, offer to sell, sell, import, and otherwise transfer the Work,
78
+ where such license applies only to those patent claims licensable
79
+ by such Contributor that are necessarily infringed by their
80
+ Contribution(s) alone or by combination of their Contribution(s)
81
+ with the Work to which such Contribution(s) was submitted. If You
82
+ institute patent litigation against any entity (including a
83
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
84
+ or a Contribution incorporated within the Work constitutes direct
85
+ or contributory patent infringement, then any patent licenses
86
+ granted to You under this License for that Work shall terminate
87
+ as of the date such litigation is filed.
88
+
89
+ 4. Redistribution. You may reproduce and distribute copies of the
90
+ Work or Derivative Works thereof in any medium, with or without
91
+ modifications, and in Source or Object form, provided that You
92
+ meet the following conditions:
93
+
94
+ (a) You must give any other recipients of the Work or
95
+ Derivative Works a copy of this License; and
96
+
97
+ (b) You must cause any modified files to carry prominent notices
98
+ stating that You changed the files; and
99
+
100
+ (c) You must retain, in the Source form of any Derivative Works
101
+ that You distribute, all copyright, patent, trademark, and
102
+ attribution notices from the Source form of the Work,
103
+ excluding those notices that do not pertain to any part of
104
+ the Derivative Works; and
105
+
106
+ (d) If the Work includes a "NOTICE" text file as part of its
107
+ distribution, then any Derivative Works that You distribute must
108
+ include a readable copy of the attribution notices contained
109
+ within such NOTICE file, excluding those notices that do not
110
+ pertain to any part of the Derivative Works, in at least one
111
+ of the following places: within a NOTICE text file distributed
112
+ as part of the Derivative Works; within the Source form or
113
+ documentation, if provided along with the Derivative Works; or,
114
+ within a display generated by the Derivative Works, if and
115
+ wherever such third-party notices normally appear. The contents
116
+ of the NOTICE file are for informational purposes only and
117
+ do not modify the License. You may add Your own attribution
118
+ notices within Derivative Works that You distribute, alongside
119
+ or as an addendum to the NOTICE text from the Work, provided
120
+ that such additional attribution notices cannot be construed
121
+ as modifying the License.
122
+
123
+ You may add Your own copyright statement to Your modifications and
124
+ may provide additional or different license terms and conditions
125
+ for use, reproduction, or distribution of Your modifications, or
126
+ for any such Derivative Works as a whole, provided Your use,
127
+ reproduction, and distribution of the Work otherwise complies with
128
+ the conditions stated in this License.
129
+
130
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
131
+ any Contribution intentionally submitted for inclusion in the Work
132
+ by You to the Licensor shall be under the terms and conditions of
133
+ this License, without any additional terms or conditions.
134
+ Notwithstanding the above, nothing herein shall supersede or modify
135
+ the terms of any separate license agreement you may have executed
136
+ with Licensor regarding such Contributions.
137
+
138
+ 6. Trademarks. This License does not grant permission to use the trade
139
+ names, trademarks, service marks, or product names of the Licensor,
140
+ except as required for reasonable and customary use in describing the
141
+ origin of the Work and reproducing the content of the NOTICE file.
142
+
143
+ 7. Disclaimer of Warranty. Unless required by applicable law or
144
+ agreed to in writing, Licensor provides the Work (and each
145
+ Contributor provides its Contributions) on an "AS IS" BASIS,
146
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147
+ implied, including, without limitation, any warranties or conditions
148
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149
+ PARTICULAR PURPOSE. You are solely responsible for determining the
150
+ appropriateness of using or redistributing the Work and assume any
151
+ risks associated with Your exercise of permissions under this License.
152
+
153
+ 8. Limitation of Liability. In no event and under no legal theory,
154
+ whether in tort (including negligence), contract, or otherwise,
155
+ unless required by applicable law (such as deliberate and grossly
156
+ negligent acts) or agreed to in writing, shall any Contributor be
157
+ liable to You for damages, including any direct, indirect, special,
158
+ incidental, or consequential damages of any character arising as a
159
+ result of this License or out of the use or inability to use the
160
+ Work (including but not limited to damages for loss of goodwill,
161
+ work stoppage, computer failure or malfunction, or any and all
162
+ other commercial damages or losses), even if such Contributor
163
+ has been advised of the possibility of such damages.
164
+
165
+ 9. Accepting Warranty or Additional Liability. While redistributing
166
+ the Work or Derivative Works thereof, You may choose to offer,
167
+ and charge a fee for, acceptance of support, warranty, indemnity,
168
+ or other liability obligations and/or rights consistent with this
169
+ License. However, in accepting such obligations, You may act only
170
+ on Your own behalf and on Your sole responsibility, not on behalf
171
+ of any other Contributor, and only if You agree to indemnify,
172
+ defend, and hold each Contributor harmless for any liability
173
+ incurred by, or claims asserted against, such Contributor by reason
174
+ of your accepting any such warranty or additional liability.
175
+
176
+ END OF TERMS AND CONDITIONS
177
+
178
+ APPENDIX: How to apply the Apache License to your work.
179
+
180
+ To apply the Apache License to your work, attach the following
181
+ boilerplate notice, with the fields enclosed by brackets "[]"
182
+ replaced with your own identifying information. (Don't include
183
+ the brackets!) The text should be enclosed in the appropriate
184
+ comment syntax for the file format. We also recommend that a
185
+ file or class name and description of purpose be included on the
186
+ same "printed page" as the copyright notice for easier
187
+ identification within third-party archives.
188
+
189
+ Copyright [yyyy] [name of copyright owner]
190
+
191
+ Licensed under the Apache License, Version 2.0 (the "License");
192
+ you may not use this file except in compliance with the License.
193
+ You may obtain a copy of the License at
194
+
195
+ http://www.apache.org/licenses/LICENSE-2.0
196
+
197
+ Unless required by applicable law or agreed to in writing, software
198
+ distributed under the License is distributed on an "AS IS" BASIS,
199
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200
+ See the License for the specific language governing permissions and
201
+ limitations under the License.
allegro/models/transformers/block.py ADDED
@@ -0,0 +1,1195 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Adapted from Open-Sora-Plan
2
+
3
+ # This source code is licensed under the license found in the
4
+ # LICENSE file in the root directory of this source tree.
5
+ # --------------------------------------------------------
6
+ # References:
7
+ # Open-Sora-Plan: https://github.com/PKU-YuanGroup/Open-Sora-Plan
8
+ # --------------------------------------------------------
9
+
10
+
11
+ from importlib import import_module
12
+ from typing import Any, Callable, Dict, Optional, Tuple
13
+
14
+ import numpy as np
15
+ import torch
16
+ import collections
17
+ import torch.nn.functional as F
18
+ from torch.nn.attention import SDPBackend, sdpa_kernel
19
+ from diffusers.models.activations import GEGLU, GELU, ApproximateGELU
20
+ from diffusers.models.attention_processor import (
21
+ AttnAddedKVProcessor,
22
+ AttnAddedKVProcessor2_0,
23
+ AttnProcessor,
24
+ CustomDiffusionAttnProcessor,
25
+ CustomDiffusionAttnProcessor2_0,
26
+ CustomDiffusionXFormersAttnProcessor,
27
+ LoRAAttnAddedKVProcessor,
28
+ LoRAAttnProcessor,
29
+ LoRAAttnProcessor2_0,
30
+ LoRAXFormersAttnProcessor,
31
+ SlicedAttnAddedKVProcessor,
32
+ SlicedAttnProcessor,
33
+ SpatialNorm,
34
+ XFormersAttnAddedKVProcessor,
35
+ XFormersAttnProcessor,
36
+ )
37
+ from diffusers.models.embeddings import SinusoidalPositionalEmbedding
38
+ from diffusers.models.normalization import AdaLayerNorm, AdaLayerNormZero
39
+ from diffusers.utils import USE_PEFT_BACKEND, deprecate, is_xformers_available
40
+ from diffusers.utils.torch_utils import maybe_allow_in_graph
41
+ from torch import nn
42
+
43
+ from allegro.models.transformers.rope import RoPE3D, PositionGetter3D
44
+ from allegro.models.transformers.embedding import CombinedTimestepSizeEmbeddings
45
+
46
+ if is_xformers_available():
47
+ import xformers
48
+ import xformers.ops
49
+ else:
50
+ xformers = None
51
+
52
+ from diffusers.utils import logging
53
+
54
+ logger = logging.get_logger(__name__)
55
+
56
+
57
+ def to_2tuple(x):
58
+ if isinstance(x, collections.abc.Iterable):
59
+ return x
60
+ return (x, x)
61
+
62
+
63
+ @maybe_allow_in_graph
64
+ class Attention(nn.Module):
65
+ r"""
66
+ A cross attention layer.
67
+
68
+ Parameters:
69
+ query_dim (`int`):
70
+ The number of channels in the query.
71
+ cross_attention_dim (`int`, *optional*):
72
+ The number of channels in the encoder_hidden_states. If not given, defaults to `query_dim`.
73
+ heads (`int`, *optional*, defaults to 8):
74
+ The number of heads to use for multi-head attention.
75
+ dim_head (`int`, *optional*, defaults to 64):
76
+ The number of channels in each head.
77
+ dropout (`float`, *optional*, defaults to 0.0):
78
+ The dropout probability to use.
79
+ bias (`bool`, *optional*, defaults to False):
80
+ Set to `True` for the query, key, and value linear layers to contain a bias parameter.
81
+ upcast_attention (`bool`, *optional*, defaults to False):
82
+ Set to `True` to upcast the attention computation to `float32`.
83
+ upcast_softmax (`bool`, *optional*, defaults to False):
84
+ Set to `True` to upcast the softmax computation to `float32`.
85
+ cross_attention_norm (`str`, *optional*, defaults to `None`):
86
+ The type of normalization to use for the cross attention. Can be `None`, `layer_norm`, or `group_norm`.
87
+ cross_attention_norm_num_groups (`int`, *optional*, defaults to 32):
88
+ The number of groups to use for the group norm in the cross attention.
89
+ added_kv_proj_dim (`int`, *optional*, defaults to `None`):
90
+ The number of channels to use for the added key and value projections. If `None`, no projection is used.
91
+ norm_num_groups (`int`, *optional*, defaults to `None`):
92
+ The number of groups to use for the group norm in the attention.
93
+ spatial_norm_dim (`int`, *optional*, defaults to `None`):
94
+ The number of channels to use for the spatial normalization.
95
+ out_bias (`bool`, *optional*, defaults to `True`):
96
+ Set to `True` to use a bias in the output linear layer.
97
+ scale_qk (`bool`, *optional*, defaults to `True`):
98
+ Set to `True` to scale the query and key by `1 / sqrt(dim_head)`.
99
+ only_cross_attention (`bool`, *optional*, defaults to `False`):
100
+ Set to `True` to only use cross attention and not added_kv_proj_dim. Can only be set to `True` if
101
+ `added_kv_proj_dim` is not `None`.
102
+ eps (`float`, *optional*, defaults to 1e-5):
103
+ An additional value added to the denominator in group normalization that is used for numerical stability.
104
+ rescale_output_factor (`float`, *optional*, defaults to 1.0):
105
+ A factor to rescale the output by dividing it with this value.
106
+ residual_connection (`bool`, *optional*, defaults to `False`):
107
+ Set to `True` to add the residual connection to the output.
108
+ _from_deprecated_attn_block (`bool`, *optional*, defaults to `False`):
109
+ Set to `True` if the attention block is loaded from a deprecated state dict.
110
+ processor (`AttnProcessor`, *optional*, defaults to `None`):
111
+ The attention processor to use. If `None`, defaults to `AttnProcessor2_0` if `torch 2.x` is used and
112
+ `AttnProcessor` otherwise.
113
+ """
114
+
115
+ def __init__(
116
+ self,
117
+ query_dim: int,
118
+ cross_attention_dim: Optional[int] = None,
119
+ heads: int = 8,
120
+ dim_head: int = 64,
121
+ dropout: float = 0.0,
122
+ bias: bool = False,
123
+ upcast_attention: bool = False,
124
+ upcast_softmax: bool = False,
125
+ cross_attention_norm: Optional[str] = None,
126
+ cross_attention_norm_num_groups: int = 32,
127
+ added_kv_proj_dim: Optional[int] = None,
128
+ norm_num_groups: Optional[int] = None,
129
+ spatial_norm_dim: Optional[int] = None,
130
+ out_bias: bool = True,
131
+ scale_qk: bool = True,
132
+ only_cross_attention: bool = False,
133
+ eps: float = 1e-5,
134
+ rescale_output_factor: float = 1.0,
135
+ residual_connection: bool = False,
136
+ _from_deprecated_attn_block: bool = False,
137
+ processor: Optional["AttnProcessor"] = None,
138
+ attention_mode: str = "xformers",
139
+ use_rope: bool = False,
140
+ interpolation_scale_thw=None,
141
+ ):
142
+ super().__init__()
143
+ self.inner_dim = dim_head * heads
144
+ self.cross_attention_dim = cross_attention_dim if cross_attention_dim is not None else query_dim
145
+ self.upcast_attention = upcast_attention
146
+ self.upcast_softmax = upcast_softmax
147
+ self.rescale_output_factor = rescale_output_factor
148
+ self.residual_connection = residual_connection
149
+ self.dropout = dropout
150
+ self.use_rope = use_rope
151
+
152
+ # we make use of this private variable to know whether this class is loaded
153
+ # with an deprecated state dict so that we can convert it on the fly
154
+ self._from_deprecated_attn_block = _from_deprecated_attn_block
155
+
156
+ self.scale_qk = scale_qk
157
+ self.scale = dim_head**-0.5 if self.scale_qk else 1.0
158
+
159
+ self.heads = heads
160
+ # for slice_size > 0 the attention score computation
161
+ # is split across the batch axis to save memory
162
+ # You can set slice_size with `set_attention_slice`
163
+ self.sliceable_head_dim = heads
164
+
165
+ self.added_kv_proj_dim = added_kv_proj_dim
166
+ self.only_cross_attention = only_cross_attention
167
+
168
+ if self.added_kv_proj_dim is None and self.only_cross_attention:
169
+ raise ValueError(
170
+ "`only_cross_attention` can only be set to True if `added_kv_proj_dim` is not None. Make sure to set either `only_cross_attention=False` or define `added_kv_proj_dim`."
171
+ )
172
+
173
+ if norm_num_groups is not None:
174
+ self.group_norm = nn.GroupNorm(num_channels=query_dim, num_groups=norm_num_groups, eps=eps, affine=True)
175
+ else:
176
+ self.group_norm = None
177
+
178
+ if spatial_norm_dim is not None:
179
+ self.spatial_norm = SpatialNorm(f_channels=query_dim, zq_channels=spatial_norm_dim)
180
+ else:
181
+ self.spatial_norm = None
182
+
183
+ if cross_attention_norm is None:
184
+ self.norm_cross = None
185
+ elif cross_attention_norm == "layer_norm":
186
+ self.norm_cross = nn.LayerNorm(self.cross_attention_dim)
187
+ elif cross_attention_norm == "group_norm":
188
+ if self.added_kv_proj_dim is not None:
189
+ # The given `encoder_hidden_states` are initially of shape
190
+ # (batch_size, seq_len, added_kv_proj_dim) before being projected
191
+ # to (batch_size, seq_len, cross_attention_dim). The norm is applied
192
+ # before the projection, so we need to use `added_kv_proj_dim` as
193
+ # the number of channels for the group norm.
194
+ norm_cross_num_channels = added_kv_proj_dim
195
+ else:
196
+ norm_cross_num_channels = self.cross_attention_dim
197
+
198
+ self.norm_cross = nn.GroupNorm(
199
+ num_channels=norm_cross_num_channels, num_groups=cross_attention_norm_num_groups, eps=1e-5, affine=True
200
+ )
201
+ else:
202
+ raise ValueError(
203
+ f"unknown cross_attention_norm: {cross_attention_norm}. Should be None, 'layer_norm' or 'group_norm'"
204
+ )
205
+
206
+ linear_cls = nn.Linear
207
+
208
+
209
+ self.to_q = linear_cls(query_dim, self.inner_dim, bias=bias)
210
+
211
+ if not self.only_cross_attention:
212
+ # only relevant for the `AddedKVProcessor` classes
213
+ self.to_k = linear_cls(self.cross_attention_dim, self.inner_dim, bias=bias)
214
+ self.to_v = linear_cls(self.cross_attention_dim, self.inner_dim, bias=bias)
215
+ else:
216
+ self.to_k = None
217
+ self.to_v = None
218
+
219
+ if self.added_kv_proj_dim is not None:
220
+ self.add_k_proj = linear_cls(added_kv_proj_dim, self.inner_dim)
221
+ self.add_v_proj = linear_cls(added_kv_proj_dim, self.inner_dim)
222
+
223
+ self.to_out = nn.ModuleList([])
224
+ self.to_out.append(linear_cls(self.inner_dim, query_dim, bias=out_bias))
225
+ self.to_out.append(nn.Dropout(dropout))
226
+
227
+ # set attention processor
228
+ # We use the AttnProcessor2_0 by default when torch 2.x is used which uses
229
+ # torch.nn.functional.scaled_dot_product_attention for native Flash/memory_efficient_attention
230
+ # but only if it has the default `scale` argument. TODO remove scale_qk check when we move to torch 2.1
231
+ if processor is None:
232
+ processor = (
233
+ AttnProcessor2_0(
234
+ attention_mode,
235
+ use_rope,
236
+ interpolation_scale_thw=interpolation_scale_thw,
237
+ )
238
+ if hasattr(F, "scaled_dot_product_attention") and self.scale_qk
239
+ else AttnProcessor()
240
+ )
241
+ self.set_processor(processor)
242
+
243
+ def set_use_memory_efficient_attention_xformers(
244
+ self, use_memory_efficient_attention_xformers: bool, attention_op: Optional[Callable] = None
245
+ ) -> None:
246
+ r"""
247
+ Set whether to use memory efficient attention from `xformers` or not.
248
+
249
+ Args:
250
+ use_memory_efficient_attention_xformers (`bool`):
251
+ Whether to use memory efficient attention from `xformers` or not.
252
+ attention_op (`Callable`, *optional*):
253
+ The attention operation to use. Defaults to `None` which uses the default attention operation from
254
+ `xformers`.
255
+ """
256
+ is_lora = hasattr(self, "processor")
257
+ is_custom_diffusion = hasattr(self, "processor") and isinstance(
258
+ self.processor,
259
+ (CustomDiffusionAttnProcessor, CustomDiffusionXFormersAttnProcessor, CustomDiffusionAttnProcessor2_0),
260
+ )
261
+ is_added_kv_processor = hasattr(self, "processor") and isinstance(
262
+ self.processor,
263
+ (
264
+ AttnAddedKVProcessor,
265
+ AttnAddedKVProcessor2_0,
266
+ SlicedAttnAddedKVProcessor,
267
+ XFormersAttnAddedKVProcessor,
268
+ LoRAAttnAddedKVProcessor,
269
+ ),
270
+ )
271
+
272
+ if use_memory_efficient_attention_xformers:
273
+ if is_added_kv_processor and (is_lora or is_custom_diffusion):
274
+ raise NotImplementedError(
275
+ f"Memory efficient attention is currently not supported for LoRA or custom diffusion for attention processor type {self.processor}"
276
+ )
277
+ if not is_xformers_available():
278
+ raise ModuleNotFoundError(
279
+ (
280
+ "Refer to https://github.com/facebookresearch/xformers for more information on how to install"
281
+ " xformers"
282
+ ),
283
+ name="xformers",
284
+ )
285
+ elif not torch.cuda.is_available():
286
+ raise ValueError(
287
+ "torch.cuda.is_available() should be True but is False. xformers' memory efficient attention is"
288
+ " only available for GPU "
289
+ )
290
+ else:
291
+ try:
292
+ # Make sure we can run the memory efficient attention
293
+ _ = xformers.ops.memory_efficient_attention(
294
+ torch.randn((1, 2, 40), device="cuda"),
295
+ torch.randn((1, 2, 40), device="cuda"),
296
+ torch.randn((1, 2, 40), device="cuda"),
297
+ )
298
+ except Exception as e:
299
+ raise e
300
+
301
+ if is_lora:
302
+ # TODO (sayakpaul): should we throw a warning if someone wants to use the xformers
303
+ # variant when using PT 2.0 now that we have LoRAAttnProcessor2_0?
304
+ processor = LoRAXFormersAttnProcessor(
305
+ hidden_size=self.processor.hidden_size,
306
+ cross_attention_dim=self.processor.cross_attention_dim,
307
+ rank=self.processor.rank,
308
+ attention_op=attention_op,
309
+ )
310
+ processor.load_state_dict(self.processor.state_dict())
311
+ processor.to(self.processor.to_q_lora.up.weight.device)
312
+ elif is_custom_diffusion:
313
+ processor = CustomDiffusionXFormersAttnProcessor(
314
+ train_kv=self.processor.train_kv,
315
+ train_q_out=self.processor.train_q_out,
316
+ hidden_size=self.processor.hidden_size,
317
+ cross_attention_dim=self.processor.cross_attention_dim,
318
+ attention_op=attention_op,
319
+ )
320
+ processor.load_state_dict(self.processor.state_dict())
321
+ if hasattr(self.processor, "to_k_custom_diffusion"):
322
+ processor.to(self.processor.to_k_custom_diffusion.weight.device)
323
+ elif is_added_kv_processor:
324
+ # TODO(Patrick, Suraj, William) - currently xformers doesn't work for UnCLIP
325
+ # which uses this type of cross attention ONLY because the attention mask of format
326
+ # [0, ..., -10.000, ..., 0, ...,] is not supported
327
+ # throw warning
328
+ logger.info(
329
+ "Memory efficient attention with `xformers` might currently not work correctly if an attention mask is required for the attention operation."
330
+ )
331
+ processor = XFormersAttnAddedKVProcessor(attention_op=attention_op)
332
+ else:
333
+ processor = XFormersAttnProcessor(attention_op=attention_op)
334
+ else:
335
+ if is_lora:
336
+ attn_processor_class = (
337
+ LoRAAttnProcessor2_0 if hasattr(F, "scaled_dot_product_attention") else LoRAAttnProcessor
338
+ )
339
+ processor = attn_processor_class(
340
+ hidden_size=self.processor.hidden_size,
341
+ cross_attention_dim=self.processor.cross_attention_dim,
342
+ rank=self.processor.rank,
343
+ )
344
+ processor.load_state_dict(self.processor.state_dict())
345
+ processor.to(self.processor.to_q_lora.up.weight.device)
346
+ elif is_custom_diffusion:
347
+ attn_processor_class = (
348
+ CustomDiffusionAttnProcessor2_0
349
+ if hasattr(F, "scaled_dot_product_attention")
350
+ else CustomDiffusionAttnProcessor
351
+ )
352
+ processor = attn_processor_class(
353
+ train_kv=self.processor.train_kv,
354
+ train_q_out=self.processor.train_q_out,
355
+ hidden_size=self.processor.hidden_size,
356
+ cross_attention_dim=self.processor.cross_attention_dim,
357
+ )
358
+ processor.load_state_dict(self.processor.state_dict())
359
+ if hasattr(self.processor, "to_k_custom_diffusion"):
360
+ processor.to(self.processor.to_k_custom_diffusion.weight.device)
361
+ else:
362
+ # set attention processor
363
+ # We use the AttnProcessor2_0 by default when torch 2.x is used which uses
364
+ # torch.nn.functional.scaled_dot_product_attention for native Flash/memory_efficient_attention
365
+ # but only if it has the default `scale` argument. TODO remove scale_qk check when we move to torch 2.1
366
+ processor = (
367
+ AttnProcessor2_0()
368
+ if hasattr(F, "scaled_dot_product_attention") and self.scale_qk
369
+ else AttnProcessor()
370
+ )
371
+
372
+ self.set_processor(processor)
373
+
374
+ def set_attention_slice(self, slice_size: int) -> None:
375
+ r"""
376
+ Set the slice size for attention computation.
377
+
378
+ Args:
379
+ slice_size (`int`):
380
+ The slice size for attention computation.
381
+ """
382
+ if slice_size is not None and slice_size > self.sliceable_head_dim:
383
+ raise ValueError(f"slice_size {slice_size} has to be smaller or equal to {self.sliceable_head_dim}.")
384
+
385
+ if slice_size is not None and self.added_kv_proj_dim is not None:
386
+ processor = SlicedAttnAddedKVProcessor(slice_size)
387
+ elif slice_size is not None:
388
+ processor = SlicedAttnProcessor(slice_size)
389
+ elif self.added_kv_proj_dim is not None:
390
+ processor = AttnAddedKVProcessor()
391
+ else:
392
+ # set attention processor
393
+ # We use the AttnProcessor2_0 by default when torch 2.x is used which uses
394
+ # torch.nn.functional.scaled_dot_product_attention for native Flash/memory_efficient_attention
395
+ # but only if it has the default `scale` argument. TODO remove scale_qk check when we move to torch 2.1
396
+ processor = (
397
+ AttnProcessor2_0() if hasattr(F, "scaled_dot_product_attention") and self.scale_qk else AttnProcessor()
398
+ )
399
+
400
+ self.set_processor(processor)
401
+
402
+ def set_processor(self, processor: "AttnProcessor", _remove_lora: bool = False) -> None:
403
+ r"""
404
+ Set the attention processor to use.
405
+
406
+ Args:
407
+ processor (`AttnProcessor`):
408
+ The attention processor to use.
409
+ _remove_lora (`bool`, *optional*, defaults to `False`):
410
+ Set to `True` to remove LoRA layers from the model.
411
+ """
412
+ if not USE_PEFT_BACKEND and hasattr(self, "processor") and _remove_lora and self.to_q.lora_layer is not None:
413
+ deprecate(
414
+ "set_processor to offload LoRA",
415
+ "0.26.0",
416
+ "In detail, removing LoRA layers via calling `set_default_attn_processor` is deprecated. Please make sure to call `pipe.unload_lora_weights()` instead.",
417
+ )
418
+ # TODO(Patrick, Sayak) - this can be deprecated once PEFT LoRA integration is complete
419
+ # We need to remove all LoRA layers
420
+ # Don't forget to remove ALL `_remove_lora` from the codebase
421
+ for module in self.modules():
422
+ if hasattr(module, "set_lora_layer"):
423
+ module.set_lora_layer(None)
424
+
425
+ # if current processor is in `self._modules` and if passed `processor` is not, we need to
426
+ # pop `processor` from `self._modules`
427
+ if (
428
+ hasattr(self, "processor")
429
+ and isinstance(self.processor, torch.nn.Module)
430
+ and not isinstance(processor, torch.nn.Module)
431
+ ):
432
+ logger.info(f"You are removing possibly trained weights of {self.processor} with {processor}")
433
+ self._modules.pop("processor")
434
+
435
+ self.processor = processor
436
+
437
+ def get_processor(self, return_deprecated_lora: bool = False):
438
+ r"""
439
+ Get the attention processor in use.
440
+
441
+ Args:
442
+ return_deprecated_lora (`bool`, *optional*, defaults to `False`):
443
+ Set to `True` to return the deprecated LoRA attention processor.
444
+
445
+ Returns:
446
+ "AttentionProcessor": The attention processor in use.
447
+ """
448
+ if not return_deprecated_lora:
449
+ return self.processor
450
+
451
+ # TODO(Sayak, Patrick). The rest of the function is needed to ensure backwards compatible
452
+ # serialization format for LoRA Attention Processors. It should be deleted once the integration
453
+ # with PEFT is completed.
454
+ is_lora_activated = {
455
+ name: module.lora_layer is not None
456
+ for name, module in self.named_modules()
457
+ if hasattr(module, "lora_layer")
458
+ }
459
+
460
+ # 1. if no layer has a LoRA activated we can return the processor as usual
461
+ if not any(is_lora_activated.values()):
462
+ return self.processor
463
+
464
+ # If doesn't apply LoRA do `add_k_proj` or `add_v_proj`
465
+ is_lora_activated.pop("add_k_proj", None)
466
+ is_lora_activated.pop("add_v_proj", None)
467
+ # 2. else it is not posssible that only some layers have LoRA activated
468
+ if not all(is_lora_activated.values()):
469
+ raise ValueError(
470
+ f"Make sure that either all layers or no layers have LoRA activated, but have {is_lora_activated}"
471
+ )
472
+
473
+ # 3. And we need to merge the current LoRA layers into the corresponding LoRA attention processor
474
+ non_lora_processor_cls_name = self.processor.__class__.__name__
475
+ lora_processor_cls = getattr(import_module(__name__), "LoRA" + non_lora_processor_cls_name)
476
+
477
+ hidden_size = self.inner_dim
478
+
479
+ # now create a LoRA attention processor from the LoRA layers
480
+ if lora_processor_cls in [LoRAAttnProcessor, LoRAAttnProcessor2_0, LoRAXFormersAttnProcessor]:
481
+ kwargs = {
482
+ "cross_attention_dim": self.cross_attention_dim,
483
+ "rank": self.to_q.lora_layer.rank,
484
+ "network_alpha": self.to_q.lora_layer.network_alpha,
485
+ "q_rank": self.to_q.lora_layer.rank,
486
+ "q_hidden_size": self.to_q.lora_layer.out_features,
487
+ "k_rank": self.to_k.lora_layer.rank,
488
+ "k_hidden_size": self.to_k.lora_layer.out_features,
489
+ "v_rank": self.to_v.lora_layer.rank,
490
+ "v_hidden_size": self.to_v.lora_layer.out_features,
491
+ "out_rank": self.to_out[0].lora_layer.rank,
492
+ "out_hidden_size": self.to_out[0].lora_layer.out_features,
493
+ }
494
+
495
+ if hasattr(self.processor, "attention_op"):
496
+ kwargs["attention_op"] = self.processor.attention_op
497
+
498
+ lora_processor = lora_processor_cls(hidden_size, **kwargs)
499
+ lora_processor.to_q_lora.load_state_dict(self.to_q.lora_layer.state_dict())
500
+ lora_processor.to_k_lora.load_state_dict(self.to_k.lora_layer.state_dict())
501
+ lora_processor.to_v_lora.load_state_dict(self.to_v.lora_layer.state_dict())
502
+ lora_processor.to_out_lora.load_state_dict(self.to_out[0].lora_layer.state_dict())
503
+ elif lora_processor_cls == LoRAAttnAddedKVProcessor:
504
+ lora_processor = lora_processor_cls(
505
+ hidden_size,
506
+ cross_attention_dim=self.add_k_proj.weight.shape[0],
507
+ rank=self.to_q.lora_layer.rank,
508
+ network_alpha=self.to_q.lora_layer.network_alpha,
509
+ )
510
+ lora_processor.to_q_lora.load_state_dict(self.to_q.lora_layer.state_dict())
511
+ lora_processor.to_k_lora.load_state_dict(self.to_k.lora_layer.state_dict())
512
+ lora_processor.to_v_lora.load_state_dict(self.to_v.lora_layer.state_dict())
513
+ lora_processor.to_out_lora.load_state_dict(self.to_out[0].lora_layer.state_dict())
514
+
515
+ # only save if used
516
+ if self.add_k_proj.lora_layer is not None:
517
+ lora_processor.add_k_proj_lora.load_state_dict(self.add_k_proj.lora_layer.state_dict())
518
+ lora_processor.add_v_proj_lora.load_state_dict(self.add_v_proj.lora_layer.state_dict())
519
+ else:
520
+ lora_processor.add_k_proj_lora = None
521
+ lora_processor.add_v_proj_lora = None
522
+ else:
523
+ raise ValueError(f"{lora_processor_cls} does not exist.")
524
+
525
+ return lora_processor
526
+
527
+ def forward(
528
+ self,
529
+ hidden_states: torch.FloatTensor,
530
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
531
+ attention_mask: Optional[torch.FloatTensor] = None,
532
+ **cross_attention_kwargs,
533
+ ) -> torch.Tensor:
534
+ r"""
535
+ The forward method of the `Attention` class.
536
+
537
+ Args:
538
+ hidden_states (`torch.Tensor`):
539
+ The hidden states of the query.
540
+ encoder_hidden_states (`torch.Tensor`, *optional*):
541
+ The hidden states of the encoder.
542
+ attention_mask (`torch.Tensor`, *optional*):
543
+ The attention mask to use. If `None`, no mask is applied.
544
+ **cross_attention_kwargs:
545
+ Additional keyword arguments to pass along to the cross attention.
546
+
547
+ Returns:
548
+ `torch.Tensor`: The output of the attention layer.
549
+ """
550
+ # The `Attention` class can call different attention processors / attention functions
551
+ # here we simply pass along all tensors to the selected processor class
552
+ # For standard processors that are defined here, `**cross_attention_kwargs` is empty
553
+ return self.processor(
554
+ self,
555
+ hidden_states,
556
+ encoder_hidden_states=encoder_hidden_states,
557
+ attention_mask=attention_mask,
558
+ **cross_attention_kwargs,
559
+ )
560
+
561
+ def batch_to_head_dim(self, tensor: torch.Tensor) -> torch.Tensor:
562
+ r"""
563
+ Reshape the tensor from `[batch_size, seq_len, dim]` to `[batch_size // heads, seq_len, dim * heads]`. `heads`
564
+ is the number of heads initialized while constructing the `Attention` class.
565
+
566
+ Args:
567
+ tensor (`torch.Tensor`): The tensor to reshape.
568
+
569
+ Returns:
570
+ `torch.Tensor`: The reshaped tensor.
571
+ """
572
+ head_size = self.heads
573
+ batch_size, seq_len, dim = tensor.shape
574
+ tensor = tensor.reshape(batch_size // head_size, head_size, seq_len, dim)
575
+ tensor = tensor.permute(0, 2, 1, 3).reshape(batch_size // head_size, seq_len, dim * head_size)
576
+ return tensor
577
+
578
+ def head_to_batch_dim(self, tensor: torch.Tensor, out_dim: int = 3) -> torch.Tensor:
579
+ r"""
580
+ Reshape the tensor from `[batch_size, seq_len, dim]` to `[batch_size, seq_len, heads, dim // heads]` `heads` is
581
+ the number of heads initialized while constructing the `Attention` class.
582
+
583
+ Args:
584
+ tensor (`torch.Tensor`): The tensor to reshape.
585
+ out_dim (`int`, *optional*, defaults to `3`): The output dimension of the tensor. If `3`, the tensor is
586
+ reshaped to `[batch_size * heads, seq_len, dim // heads]`.
587
+
588
+ Returns:
589
+ `torch.Tensor`: The reshaped tensor.
590
+ """
591
+ head_size = self.heads
592
+ batch_size, seq_len, dim = tensor.shape
593
+ tensor = tensor.reshape(batch_size, seq_len, head_size, dim // head_size)
594
+ tensor = tensor.permute(0, 2, 1, 3)
595
+
596
+ if out_dim == 3:
597
+ tensor = tensor.reshape(batch_size * head_size, seq_len, dim // head_size)
598
+
599
+ return tensor
600
+
601
+ def get_attention_scores(
602
+ self, query: torch.Tensor, key: torch.Tensor, attention_mask: torch.Tensor = None
603
+ ) -> torch.Tensor:
604
+ r"""
605
+ Compute the attention scores.
606
+
607
+ Args:
608
+ query (`torch.Tensor`): The query tensor.
609
+ key (`torch.Tensor`): The key tensor.
610
+ attention_mask (`torch.Tensor`, *optional*): The attention mask to use. If `None`, no mask is applied.
611
+
612
+ Returns:
613
+ `torch.Tensor`: The attention probabilities/scores.
614
+ """
615
+ dtype = query.dtype
616
+ if self.upcast_attention:
617
+ query = query.float()
618
+ key = key.float()
619
+
620
+ if attention_mask is None:
621
+ baddbmm_input = torch.empty(
622
+ query.shape[0], query.shape[1], key.shape[1], dtype=query.dtype, device=query.device
623
+ )
624
+ beta = 0
625
+ else:
626
+ baddbmm_input = attention_mask
627
+ beta = 1
628
+
629
+ attention_scores = torch.baddbmm(
630
+ baddbmm_input,
631
+ query,
632
+ key.transpose(-1, -2),
633
+ beta=beta,
634
+ alpha=self.scale,
635
+ )
636
+ del baddbmm_input
637
+
638
+ if self.upcast_softmax:
639
+ attention_scores = attention_scores.float()
640
+
641
+ attention_probs = attention_scores.softmax(dim=-1)
642
+ del attention_scores
643
+
644
+ attention_probs = attention_probs.to(dtype)
645
+
646
+ return attention_probs
647
+
648
+ def prepare_attention_mask(
649
+ self, attention_mask: torch.Tensor, target_length: int, batch_size: int, out_dim: int = 3, head_size = None,
650
+ ) -> torch.Tensor:
651
+ r"""
652
+ Prepare the attention mask for the attention computation.
653
+
654
+ Args:
655
+ attention_mask (`torch.Tensor`):
656
+ The attention mask to prepare.
657
+ target_length (`int`):
658
+ The target length of the attention mask. This is the length of the attention mask after padding.
659
+ batch_size (`int`):
660
+ The batch size, which is used to repeat the attention mask.
661
+ out_dim (`int`, *optional*, defaults to `3`):
662
+ The output dimension of the attention mask. Can be either `3` or `4`.
663
+
664
+ Returns:
665
+ `torch.Tensor`: The prepared attention mask.
666
+ """
667
+ head_size = head_size if head_size is not None else self.heads
668
+ if attention_mask is None:
669
+ return attention_mask
670
+
671
+ current_length: int = attention_mask.shape[-1]
672
+ if current_length != target_length:
673
+ if attention_mask.device.type == "mps":
674
+ # HACK: MPS: Does not support padding by greater than dimension of input tensor.
675
+ # Instead, we can manually construct the padding tensor.
676
+ padding_shape = (attention_mask.shape[0], attention_mask.shape[1], target_length)
677
+ padding = torch.zeros(padding_shape, dtype=attention_mask.dtype, device=attention_mask.device)
678
+ attention_mask = torch.cat([attention_mask, padding], dim=2)
679
+ else:
680
+ # TODO: for pipelines such as stable-diffusion, padding cross-attn mask:
681
+ # we want to instead pad by (0, remaining_length), where remaining_length is:
682
+ # remaining_length: int = target_length - current_length
683
+ # TODO: re-enable tests/models/test_models_unet_2d_condition.py#test_model_xattn_padding
684
+ attention_mask = F.pad(attention_mask, (0, target_length), value=0.0)
685
+
686
+ if out_dim == 3:
687
+ if attention_mask.shape[0] < batch_size * head_size:
688
+ attention_mask = attention_mask.repeat_interleave(head_size, dim=0)
689
+ elif out_dim == 4:
690
+ attention_mask = attention_mask.unsqueeze(1)
691
+ attention_mask = attention_mask.repeat_interleave(head_size, dim=1)
692
+
693
+ return attention_mask
694
+
695
+ def norm_encoder_hidden_states(self, encoder_hidden_states: torch.Tensor) -> torch.Tensor:
696
+ r"""
697
+ Normalize the encoder hidden states. Requires `self.norm_cross` to be specified when constructing the
698
+ `Attention` class.
699
+
700
+ Args:
701
+ encoder_hidden_states (`torch.Tensor`): Hidden states of the encoder.
702
+
703
+ Returns:
704
+ `torch.Tensor`: The normalized encoder hidden states.
705
+ """
706
+ assert self.norm_cross is not None, "self.norm_cross must be defined to call self.norm_encoder_hidden_states"
707
+
708
+ if isinstance(self.norm_cross, nn.LayerNorm):
709
+ encoder_hidden_states = self.norm_cross(encoder_hidden_states)
710
+ elif isinstance(self.norm_cross, nn.GroupNorm):
711
+ # Group norm norms along the channels dimension and expects
712
+ # input to be in the shape of (N, C, *). In this case, we want
713
+ # to norm along the hidden dimension, so we need to move
714
+ # (batch_size, sequence_length, hidden_size) ->
715
+ # (batch_size, hidden_size, sequence_length)
716
+ encoder_hidden_states = encoder_hidden_states.transpose(1, 2)
717
+ encoder_hidden_states = self.norm_cross(encoder_hidden_states)
718
+ encoder_hidden_states = encoder_hidden_states.transpose(1, 2)
719
+ else:
720
+ assert False
721
+
722
+ return encoder_hidden_states
723
+
724
+ def _init_compress(self):
725
+ self.sr.bias.data.zero_()
726
+ self.norm = nn.LayerNorm(self.inner_dim)
727
+
728
+
729
+ class AttnProcessor2_0(nn.Module):
730
+ r"""
731
+ Processor for implementing scaled dot-product attention (enabled by default if you're using PyTorch 2.0).
732
+ """
733
+
734
+ def __init__(self, attention_mode="xformers", use_rope=False, interpolation_scale_thw=None):
735
+ super().__init__()
736
+ self.attention_mode = attention_mode
737
+ self.use_rope = use_rope
738
+ self.interpolation_scale_thw = interpolation_scale_thw
739
+
740
+ if self.use_rope:
741
+ self._init_rope(interpolation_scale_thw)
742
+
743
+ if not hasattr(F, "scaled_dot_product_attention"):
744
+ raise ImportError("AttnProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.")
745
+
746
+ def _init_rope(self, interpolation_scale_thw):
747
+ self.rope = RoPE3D(interpolation_scale_thw=interpolation_scale_thw)
748
+ self.position_getter = PositionGetter3D()
749
+
750
+ def __call__(
751
+ self,
752
+ attn: Attention,
753
+ hidden_states: torch.FloatTensor,
754
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
755
+ attention_mask: Optional[torch.FloatTensor] = None,
756
+ temb: Optional[torch.FloatTensor] = None,
757
+ frame: int = 8,
758
+ height: int = 16,
759
+ width: int = 16,
760
+ ) -> torch.FloatTensor:
761
+
762
+ residual = hidden_states
763
+
764
+ if attn.spatial_norm is not None:
765
+ hidden_states = attn.spatial_norm(hidden_states, temb)
766
+
767
+ input_ndim = hidden_states.ndim
768
+
769
+ if input_ndim == 4:
770
+ batch_size, channel, height, width = hidden_states.shape
771
+ hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2)
772
+
773
+
774
+ batch_size, sequence_length, _ = (
775
+ hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape
776
+ )
777
+
778
+ if attention_mask is not None and self.attention_mode == 'xformers':
779
+ attention_heads = attn.heads
780
+ attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size, head_size=attention_heads)
781
+ attention_mask = attention_mask.view(batch_size, attention_heads, -1, attention_mask.shape[-1])
782
+ else:
783
+ attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size)
784
+ # scaled_dot_product_attention expects attention_mask shape to be
785
+ # (batch, heads, source_length, target_length)
786
+ attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1])
787
+
788
+ if attn.group_norm is not None:
789
+ hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2)
790
+
791
+ query = attn.to_q(hidden_states)
792
+
793
+ if encoder_hidden_states is None:
794
+ encoder_hidden_states = hidden_states
795
+ elif attn.norm_cross:
796
+ encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states)
797
+
798
+ key = attn.to_k(encoder_hidden_states)
799
+ value = attn.to_v(encoder_hidden_states)
800
+
801
+
802
+
803
+ attn_heads = attn.heads
804
+
805
+ inner_dim = key.shape[-1]
806
+ head_dim = inner_dim // attn_heads
807
+
808
+ query = query.view(batch_size, -1, attn_heads, head_dim).transpose(1, 2)
809
+ key = key.view(batch_size, -1, attn_heads, head_dim).transpose(1, 2)
810
+ value = value.view(batch_size, -1, attn_heads, head_dim).transpose(1, 2)
811
+
812
+
813
+ if self.use_rope:
814
+ # require the shape of (batch_size x nheads x ntokens x dim)
815
+ pos_thw = self.position_getter(batch_size, t=frame, h=height, w=width, device=query.device)
816
+ query = self.rope(query, pos_thw)
817
+ key = self.rope(key, pos_thw)
818
+
819
+ # the output of sdp = (batch, num_heads, seq_len, head_dim)
820
+ # TODO: add support for attn.scale when we move to Torch 2.1
821
+ if self.attention_mode == 'flash':
822
+ # assert attention_mask is None, 'flash-attn do not support attention_mask'
823
+ with sdpa_kernel(SDPBackend.FLASH_ATTENTION):
824
+ hidden_states = F.scaled_dot_product_attention(
825
+ query, key, value, dropout_p=0.0, is_causal=False
826
+ )
827
+ elif self.attention_mode == 'xformers':
828
+ with sdpa_kernel(SDPBackend.EFFICIENT_ATTENTION):
829
+ hidden_states = F.scaled_dot_product_attention(
830
+ query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False
831
+ )
832
+
833
+
834
+ hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn_heads * head_dim)
835
+ hidden_states = hidden_states.to(query.dtype)
836
+
837
+ # linear proj
838
+ hidden_states = attn.to_out[0](hidden_states)
839
+ # dropout
840
+ hidden_states = attn.to_out[1](hidden_states)
841
+
842
+ if input_ndim == 4:
843
+ hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width)
844
+
845
+ if attn.residual_connection:
846
+ hidden_states = hidden_states + residual
847
+
848
+ hidden_states = hidden_states / attn.rescale_output_factor
849
+
850
+ return hidden_states
851
+
852
+ class FeedForward(nn.Module):
853
+ r"""
854
+ A feed-forward layer.
855
+
856
+ Parameters:
857
+ dim (`int`): The number of channels in the input.
858
+ dim_out (`int`, *optional*): The number of channels in the output. If not given, defaults to `dim`.
859
+ mult (`int`, *optional*, defaults to 4): The multiplier to use for the hidden dimension.
860
+ dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use.
861
+ activation_fn (`str`, *optional*, defaults to `"geglu"`): Activation function to be used in feed-forward.
862
+ final_dropout (`bool` *optional*, defaults to False): Apply a final dropout.
863
+ """
864
+
865
+ def __init__(
866
+ self,
867
+ dim: int,
868
+ dim_out: Optional[int] = None,
869
+ mult: int = 4,
870
+ dropout: float = 0.0,
871
+ activation_fn: str = "geglu",
872
+ final_dropout: bool = False,
873
+ ):
874
+ super().__init__()
875
+ inner_dim = int(dim * mult)
876
+ dim_out = dim_out if dim_out is not None else dim
877
+ linear_cls = nn.Linear
878
+
879
+ if activation_fn == "gelu":
880
+ act_fn = GELU(dim, inner_dim)
881
+ if activation_fn == "gelu-approximate":
882
+ act_fn = GELU(dim, inner_dim, approximate="tanh")
883
+ elif activation_fn == "geglu":
884
+ act_fn = GEGLU(dim, inner_dim)
885
+ elif activation_fn == "geglu-approximate":
886
+ act_fn = ApproximateGELU(dim, inner_dim)
887
+
888
+ self.net = nn.ModuleList([])
889
+ # project in
890
+ self.net.append(act_fn)
891
+ # project dropout
892
+ self.net.append(nn.Dropout(dropout))
893
+ # project out
894
+ self.net.append(linear_cls(inner_dim, dim_out))
895
+ # FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
896
+ if final_dropout:
897
+ self.net.append(nn.Dropout(dropout))
898
+
899
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
900
+ for module in self.net:
901
+ hidden_states = module(hidden_states)
902
+ return hidden_states
903
+
904
+
905
+ @maybe_allow_in_graph
906
+ class BasicTransformerBlock(nn.Module):
907
+ r"""
908
+ A basic Transformer block.
909
+
910
+ Parameters:
911
+ dim (`int`): The number of channels in the input and output.
912
+ num_attention_heads (`int`): The number of heads to use for multi-head attention.
913
+ attention_head_dim (`int`): The number of channels in each head.
914
+ dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use.
915
+ cross_attention_dim (`int`, *optional*): The size of the encoder_hidden_states vector for cross attention.
916
+ activation_fn (`str`, *optional*, defaults to `"geglu"`): Activation function to be used in feed-forward.
917
+ num_embeds_ada_norm (:
918
+ obj: `int`, *optional*): The number of diffusion steps used during training. See `Transformer2DModel`.
919
+ attention_bias (:
920
+ obj: `bool`, *optional*, defaults to `False`): Configure if the attentions should contain a bias parameter.
921
+ only_cross_attention (`bool`, *optional*):
922
+ Whether to use only cross-attention layers. In this case two cross attention layers are used.
923
+ double_self_attention (`bool`, *optional*):
924
+ Whether to use two self-attention layers. In this case no cross attention layers are used.
925
+ upcast_attention (`bool`, *optional*):
926
+ Whether to upcast the attention computation to float32. This is useful for mixed precision training.
927
+ norm_elementwise_affine (`bool`, *optional*, defaults to `True`):
928
+ Whether to use learnable elementwise affine parameters for normalization.
929
+ norm_type (`str`, *optional*, defaults to `"layer_norm"`):
930
+ The normalization layer to use. Can be `"layer_norm"`, `"ada_norm"` or `"ada_norm_zero"`.
931
+ final_dropout (`bool` *optional*, defaults to False):
932
+ Whether to apply a final dropout after the last feed-forward layer.
933
+ positional_embeddings (`str`, *optional*, defaults to `None`):
934
+ The type of positional embeddings to apply to.
935
+ num_positional_embeddings (`int`, *optional*, defaults to `None`):
936
+ The maximum number of positional embeddings to apply.
937
+ """
938
+
939
+ def __init__(
940
+ self,
941
+ dim: int,
942
+ num_attention_heads: int,
943
+ attention_head_dim: int,
944
+ dropout=0.0,
945
+ cross_attention_dim: Optional[int] = None,
946
+ activation_fn: str = "geglu",
947
+ num_embeds_ada_norm: Optional[int] = None,
948
+ attention_bias: bool = False,
949
+ only_cross_attention: bool = False,
950
+ double_self_attention: bool = False,
951
+ upcast_attention: bool = False,
952
+ norm_elementwise_affine: bool = True,
953
+ norm_type: str = "layer_norm", # 'layer_norm', 'ada_norm', 'ada_norm_zero', 'ada_norm_single'
954
+ norm_eps: float = 1e-5,
955
+ final_dropout: bool = False,
956
+ positional_embeddings: Optional[str] = None,
957
+ num_positional_embeddings: Optional[int] = None,
958
+ sa_attention_mode: str = "flash",
959
+ ca_attention_mode: str = "xformers",
960
+ use_rope: bool = False,
961
+ interpolation_scale_thw: Tuple[int] = (1, 1, 1),
962
+ block_idx: Optional[int] = None,
963
+ ):
964
+ super().__init__()
965
+ self.only_cross_attention = only_cross_attention
966
+
967
+ self.use_ada_layer_norm_zero = (num_embeds_ada_norm is not None) and norm_type == "ada_norm_zero"
968
+ self.use_ada_layer_norm = (num_embeds_ada_norm is not None) and norm_type == "ada_norm"
969
+ self.use_ada_layer_norm_single = norm_type == "ada_norm_single"
970
+ self.use_layer_norm = norm_type == "layer_norm"
971
+
972
+ if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
973
+ raise ValueError(
974
+ f"`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to"
975
+ f" define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}."
976
+ )
977
+
978
+ if positional_embeddings and (num_positional_embeddings is None):
979
+ raise ValueError(
980
+ "If `positional_embedding` type is defined, `num_positition_embeddings` must also be defined."
981
+ )
982
+
983
+ if positional_embeddings == "sinusoidal":
984
+ self.pos_embed = SinusoidalPositionalEmbedding(dim, max_seq_length=num_positional_embeddings)
985
+ else:
986
+ self.pos_embed = None
987
+
988
+ # Define 3 blocks. Each block has its own normalization layer.
989
+ # 1. Self-Attn
990
+ if self.use_ada_layer_norm:
991
+ self.norm1 = AdaLayerNorm(dim, num_embeds_ada_norm)
992
+ elif self.use_ada_layer_norm_zero:
993
+ self.norm1 = AdaLayerNormZero(dim, num_embeds_ada_norm)
994
+ else:
995
+ self.norm1 = nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine, eps=norm_eps)
996
+
997
+ self.attn1 = Attention(
998
+ query_dim=dim,
999
+ heads=num_attention_heads,
1000
+ dim_head=attention_head_dim,
1001
+ dropout=dropout,
1002
+ bias=attention_bias,
1003
+ cross_attention_dim=cross_attention_dim if only_cross_attention else None,
1004
+ upcast_attention=upcast_attention,
1005
+ attention_mode=sa_attention_mode,
1006
+ use_rope=use_rope,
1007
+ interpolation_scale_thw=interpolation_scale_thw,
1008
+ )
1009
+
1010
+ # 2. Cross-Attn
1011
+ if cross_attention_dim is not None or double_self_attention:
1012
+ # We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
1013
+ # I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
1014
+ # the second cross attention block.
1015
+ self.norm2 = (
1016
+ AdaLayerNorm(dim, num_embeds_ada_norm)
1017
+ if self.use_ada_layer_norm
1018
+ else nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine, eps=norm_eps)
1019
+ )
1020
+ self.attn2 = Attention(
1021
+ query_dim=dim,
1022
+ cross_attention_dim=cross_attention_dim if not double_self_attention else None,
1023
+ heads=num_attention_heads,
1024
+ dim_head=attention_head_dim,
1025
+ dropout=dropout,
1026
+ bias=attention_bias,
1027
+ upcast_attention=upcast_attention,
1028
+ attention_mode=ca_attention_mode, # only xformers support attention_mask
1029
+ use_rope=False, # do not position in cross attention
1030
+ interpolation_scale_thw=interpolation_scale_thw,
1031
+ ) # is self-attn if encoder_hidden_states is none
1032
+ else:
1033
+ self.norm2 = None
1034
+ self.attn2 = None
1035
+
1036
+ # 3. Feed-forward
1037
+
1038
+ if not self.use_ada_layer_norm_single:
1039
+ self.norm3 = nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine, eps=norm_eps)
1040
+
1041
+ self.ff = FeedForward(
1042
+ dim,
1043
+ dropout=dropout,
1044
+ activation_fn=activation_fn,
1045
+ final_dropout=final_dropout,
1046
+ )
1047
+
1048
+ # 5. Scale-shift for PixArt-Alpha.
1049
+ if self.use_ada_layer_norm_single:
1050
+ self.scale_shift_table = nn.Parameter(torch.randn(6, dim) / dim**0.5)
1051
+
1052
+
1053
+ def forward(
1054
+ self,
1055
+ hidden_states: torch.FloatTensor,
1056
+ attention_mask: Optional[torch.FloatTensor] = None,
1057
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
1058
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
1059
+ timestep: Optional[torch.LongTensor] = None,
1060
+ cross_attention_kwargs: Dict[str, Any] = None,
1061
+ class_labels: Optional[torch.LongTensor] = None,
1062
+ frame: int = None,
1063
+ height: int = None,
1064
+ width: int = None,
1065
+ ) -> torch.FloatTensor:
1066
+ # Notice that normalization is always applied before the real computation in the following blocks.
1067
+ cross_attention_kwargs = cross_attention_kwargs if cross_attention_kwargs is not None else {}
1068
+
1069
+ # 0. Self-Attention
1070
+ batch_size = hidden_states.shape[0]
1071
+
1072
+ if self.use_ada_layer_norm:
1073
+ norm_hidden_states = self.norm1(hidden_states, timestep)
1074
+ elif self.use_ada_layer_norm_zero:
1075
+ norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(
1076
+ hidden_states, timestep, class_labels, hidden_dtype=hidden_states.dtype
1077
+ )
1078
+ elif self.use_layer_norm:
1079
+ norm_hidden_states = self.norm1(hidden_states)
1080
+ elif self.use_ada_layer_norm_single:
1081
+ shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = (
1082
+ self.scale_shift_table[None] + timestep.reshape(batch_size, 6, -1)
1083
+ ).chunk(6, dim=1)
1084
+ norm_hidden_states = self.norm1(hidden_states)
1085
+ norm_hidden_states = norm_hidden_states * (1 + scale_msa) + shift_msa
1086
+ norm_hidden_states = norm_hidden_states.squeeze(1)
1087
+ else:
1088
+ raise ValueError("Incorrect norm used")
1089
+
1090
+ if self.pos_embed is not None:
1091
+ norm_hidden_states = self.pos_embed(norm_hidden_states)
1092
+
1093
+ attn_output = self.attn1(
1094
+ norm_hidden_states,
1095
+ encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None,
1096
+ attention_mask=attention_mask,
1097
+ frame=frame,
1098
+ height=height,
1099
+ width=width,
1100
+ **cross_attention_kwargs,
1101
+ )
1102
+ if self.use_ada_layer_norm_zero:
1103
+ attn_output = gate_msa.unsqueeze(1) * attn_output
1104
+ elif self.use_ada_layer_norm_single:
1105
+ attn_output = gate_msa * attn_output
1106
+
1107
+ hidden_states = attn_output + hidden_states
1108
+ if hidden_states.ndim == 4:
1109
+ hidden_states = hidden_states.squeeze(1)
1110
+
1111
+ # 1. Cross-Attention
1112
+ if self.attn2 is not None:
1113
+
1114
+ if self.use_ada_layer_norm:
1115
+ norm_hidden_states = self.norm2(hidden_states, timestep)
1116
+ elif self.use_ada_layer_norm_zero or self.use_layer_norm:
1117
+ norm_hidden_states = self.norm2(hidden_states)
1118
+ elif self.use_ada_layer_norm_single:
1119
+ # For PixArt norm2 isn't applied here:
1120
+ # https://github.com/PixArt-alpha/PixArt-alpha/blob/0f55e922376d8b797edd44d25d0e7464b260dcab/diffusion/model/nets/PixArtMS.py#L70C1-L76C103
1121
+ norm_hidden_states = hidden_states
1122
+ else:
1123
+ raise ValueError("Incorrect norm")
1124
+
1125
+ if self.pos_embed is not None and self.use_ada_layer_norm_single is False:
1126
+ norm_hidden_states = self.pos_embed(norm_hidden_states)
1127
+
1128
+ attn_output = self.attn2(
1129
+ norm_hidden_states,
1130
+ encoder_hidden_states=encoder_hidden_states,
1131
+ attention_mask=encoder_attention_mask,
1132
+ **cross_attention_kwargs,
1133
+ )
1134
+ hidden_states = attn_output + hidden_states
1135
+
1136
+
1137
+ # 2. Feed-forward
1138
+ if not self.use_ada_layer_norm_single:
1139
+ norm_hidden_states = self.norm3(hidden_states)
1140
+
1141
+ if self.use_ada_layer_norm_zero:
1142
+ norm_hidden_states = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
1143
+
1144
+ if self.use_ada_layer_norm_single:
1145
+ norm_hidden_states = self.norm2(hidden_states)
1146
+ norm_hidden_states = norm_hidden_states * (1 + scale_mlp) + shift_mlp
1147
+
1148
+ ff_output = self.ff(norm_hidden_states)
1149
+
1150
+ if self.use_ada_layer_norm_zero:
1151
+ ff_output = gate_mlp.unsqueeze(1) * ff_output
1152
+ elif self.use_ada_layer_norm_single:
1153
+ ff_output = gate_mlp * ff_output
1154
+
1155
+
1156
+ hidden_states = ff_output + hidden_states
1157
+ if hidden_states.ndim == 4:
1158
+ hidden_states = hidden_states.squeeze(1)
1159
+
1160
+ return hidden_states
1161
+
1162
+
1163
+ class AdaLayerNormSingle(nn.Module):
1164
+ r"""
1165
+ Norm layer adaptive layer norm single (adaLN-single).
1166
+
1167
+ As proposed in PixArt-Alpha (see: https://arxiv.org/abs/2310.00426; Section 2.3).
1168
+
1169
+ Parameters:
1170
+ embedding_dim (`int`): The size of each embedding vector.
1171
+ use_additional_conditions (`bool`): To use additional conditions for normalization or not.
1172
+ """
1173
+
1174
+ def __init__(self, embedding_dim: int, use_additional_conditions: bool = False):
1175
+ super().__init__()
1176
+
1177
+ self.emb = CombinedTimestepSizeEmbeddings(
1178
+ embedding_dim, size_emb_dim=embedding_dim // 3, use_additional_conditions=use_additional_conditions
1179
+ )
1180
+
1181
+ self.silu = nn.SiLU()
1182
+ self.linear = nn.Linear(embedding_dim, 6 * embedding_dim, bias=True)
1183
+
1184
+ def forward(
1185
+ self,
1186
+ timestep: torch.Tensor,
1187
+ added_cond_kwargs: Dict[str, torch.Tensor] = None,
1188
+ batch_size: int = None,
1189
+ hidden_dtype: Optional[torch.dtype] = None,
1190
+ ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
1191
+ # No modulation happening here.
1192
+ embedded_timestep = self.emb(
1193
+ timestep, batch_size=batch_size, hidden_dtype=hidden_dtype, resolution=None, aspect_ratio=None
1194
+ )
1195
+ return self.linear(self.silu(embedded_timestep)), embedded_timestep
allegro/models/transformers/embedding.py ADDED
@@ -0,0 +1,128 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Adapted from Open-Sora-Plan
2
+
3
+ # This source code is licensed under the license found in the
4
+ # LICENSE file in the root directory of this source tree.
5
+ # --------------------------------------------------------
6
+ # References:
7
+ # Open-Sora-Plan: https://github.com/PKU-YuanGroup/Open-Sora-Plan
8
+ # --------------------------------------------------------
9
+
10
+ import torch
11
+ import collections
12
+ from diffusers.models.embeddings import TimestepEmbedding, Timesteps
13
+
14
+ from einops import rearrange
15
+ from torch import nn
16
+
17
+
18
+ from diffusers.utils import logging
19
+
20
+ logger = logging.get_logger(__name__)
21
+
22
+
23
+ class CombinedTimestepSizeEmbeddings(nn.Module):
24
+ """
25
+ For PixArt-Alpha.
26
+
27
+ Reference:
28
+ https://github.com/PixArt-alpha/PixArt-alpha/blob/0f55e922376d8b797edd44d25d0e7464b260dcab/diffusion/model/nets/PixArtMS.py#L164C9-L168C29
29
+ """
30
+
31
+ def __init__(self, embedding_dim, size_emb_dim, use_additional_conditions: bool = False):
32
+ super().__init__()
33
+
34
+ self.outdim = size_emb_dim
35
+ self.time_proj = Timesteps(num_channels=256, flip_sin_to_cos=True, downscale_freq_shift=0)
36
+ self.timestep_embedder = TimestepEmbedding(in_channels=256, time_embed_dim=embedding_dim)
37
+
38
+ self.use_additional_conditions = use_additional_conditions
39
+ if use_additional_conditions:
40
+ self.use_additional_conditions = True
41
+ self.additional_condition_proj = Timesteps(num_channels=256, flip_sin_to_cos=True, downscale_freq_shift=0)
42
+ self.resolution_embedder = TimestepEmbedding(in_channels=256, time_embed_dim=size_emb_dim)
43
+ self.aspect_ratio_embedder = TimestepEmbedding(in_channels=256, time_embed_dim=size_emb_dim)
44
+
45
+ def apply_condition(self, size: torch.Tensor, batch_size: int, embedder: nn.Module):
46
+ if size.ndim == 1:
47
+ size = size[:, None]
48
+
49
+ if size.shape[0] != batch_size:
50
+ size = size.repeat(batch_size // size.shape[0], 1)
51
+ if size.shape[0] != batch_size:
52
+ raise ValueError(f"`batch_size` should be {size.shape[0]} but found {batch_size}.")
53
+
54
+ current_batch_size, dims = size.shape[0], size.shape[1]
55
+ size = size.reshape(-1)
56
+ size_freq = self.additional_condition_proj(size).to(size.dtype)
57
+
58
+ size_emb = embedder(size_freq)
59
+ size_emb = size_emb.reshape(current_batch_size, dims * self.outdim)
60
+ return size_emb
61
+
62
+ def forward(self, timestep, resolution, aspect_ratio, batch_size, hidden_dtype):
63
+ timesteps_proj = self.time_proj(timestep)
64
+ timesteps_emb = self.timestep_embedder(timesteps_proj.to(dtype=hidden_dtype)) # (N, D)
65
+
66
+ if self.use_additional_conditions:
67
+ resolution = self.apply_condition(resolution, batch_size=batch_size, embedder=self.resolution_embedder)
68
+ aspect_ratio = self.apply_condition(
69
+ aspect_ratio, batch_size=batch_size, embedder=self.aspect_ratio_embedder
70
+ )
71
+ conditioning = timesteps_emb + torch.cat([resolution, aspect_ratio], dim=1)
72
+ else:
73
+ conditioning = timesteps_emb
74
+
75
+ return conditioning
76
+
77
+ class PatchEmbed2D(nn.Module):
78
+ """2D Image to Patch Embedding"""
79
+
80
+ def __init__(
81
+ self,
82
+ num_frames=1,
83
+ height=224,
84
+ width=224,
85
+ patch_size_t=1,
86
+ patch_size=16,
87
+ in_channels=3,
88
+ embed_dim=768,
89
+ layer_norm=False,
90
+ flatten=True,
91
+ bias=True,
92
+ interpolation_scale=(1, 1),
93
+ interpolation_scale_t=1,
94
+ use_abs_pos=False,
95
+ ):
96
+ super().__init__()
97
+ self.use_abs_pos = use_abs_pos
98
+ self.flatten = flatten
99
+ self.layer_norm = layer_norm
100
+
101
+ self.proj = nn.Conv2d(
102
+ in_channels, embed_dim, kernel_size=(patch_size, patch_size), stride=(patch_size, patch_size), bias=bias
103
+ )
104
+ if layer_norm:
105
+ self.norm = nn.LayerNorm(embed_dim, elementwise_affine=False, eps=1e-6)
106
+ else:
107
+ self.norm = None
108
+
109
+ self.patch_size_t = patch_size_t
110
+ self.patch_size = patch_size
111
+
112
+ def forward(self, latent):
113
+ b, _, _, _, _ = latent.shape
114
+ video_latent = None
115
+
116
+ latent = rearrange(latent, 'b c t h w -> (b t) c h w')
117
+
118
+ latent = self.proj(latent)
119
+ if self.flatten:
120
+ latent = latent.flatten(2).transpose(1, 2) # BT C H W -> BT N C
121
+ if self.layer_norm:
122
+ latent = self.norm(latent)
123
+
124
+ latent = rearrange(latent, '(b t) n c -> b (t n) c', b=b)
125
+ video_latent = latent
126
+
127
+ return video_latent
128
+
allegro/models/transformers/rope.py ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Adapted from Diffusers and Open-Sora-Plan
2
+
3
+ import torch
4
+
5
+
6
+ from diffusers.utils import logging
7
+
8
+ logger = logging.get_logger(__name__)
9
+
10
+
11
+ class PositionGetter3D(object):
12
+ """ return positions of patches """
13
+
14
+ def __init__(self, ):
15
+ self.cache_positions = {}
16
+
17
+ def __call__(self, b, t, h, w, device):
18
+ if not (b, t,h,w) in self.cache_positions:
19
+ x = torch.arange(w, device=device)
20
+ y = torch.arange(h, device=device)
21
+ z = torch.arange(t, device=device)
22
+ pos = torch.cartesian_prod(z, y, x)
23
+
24
+ pos = pos.reshape(t * h * w, 3).transpose(0, 1).reshape(3, 1, -1).contiguous().expand(3, b, -1).clone()
25
+ poses = (pos[0].contiguous(), pos[1].contiguous(), pos[2].contiguous())
26
+ max_poses = (int(poses[0].max()), int(poses[1].max()), int(poses[2].max()))
27
+
28
+ self.cache_positions[b, t, h, w] = (poses, max_poses)
29
+ pos = self.cache_positions[b, t, h, w]
30
+
31
+ return pos
32
+
33
+
34
+ class RoPE3D(torch.nn.Module):
35
+
36
+ def __init__(self, freq=10000.0, F0=1.0, interpolation_scale_thw=(1, 1, 1)):
37
+ super().__init__()
38
+ self.base = freq
39
+ self.F0 = F0
40
+ self.interpolation_scale_t = interpolation_scale_thw[0]
41
+ self.interpolation_scale_h = interpolation_scale_thw[1]
42
+ self.interpolation_scale_w = interpolation_scale_thw[2]
43
+ self.cache = {}
44
+
45
+ def get_cos_sin(self, D, seq_len, device, dtype, interpolation_scale=1):
46
+ if (D, seq_len, device, dtype) not in self.cache:
47
+ inv_freq = 1.0 / (self.base ** (torch.arange(0, D, 2).float().to(device) / D))
48
+ t = torch.arange(seq_len, device=device, dtype=inv_freq.dtype) / interpolation_scale
49
+ freqs = torch.einsum("i,j->ij", t, inv_freq).to(dtype)
50
+ freqs = torch.cat((freqs, freqs), dim=-1)
51
+ cos = freqs.cos() # (Seq, Dim)
52
+ sin = freqs.sin()
53
+ self.cache[D, seq_len, device, dtype] = (cos, sin)
54
+ return self.cache[D, seq_len, device, dtype]
55
+
56
+ @staticmethod
57
+ def rotate_half(x):
58
+ x1, x2 = x[..., : x.shape[-1] // 2], x[..., x.shape[-1] // 2:]
59
+ return torch.cat((-x2, x1), dim=-1)
60
+
61
+ def apply_rope1d(self, tokens, pos1d, cos, sin):
62
+ assert pos1d.ndim == 2
63
+
64
+ # for (batch_size x ntokens x nheads x dim)
65
+ cos = torch.nn.functional.embedding(pos1d, cos)[:, None, :, :]
66
+ sin = torch.nn.functional.embedding(pos1d, sin)[:, None, :, :]
67
+ return (tokens * cos) + (self.rotate_half(tokens) * sin)
68
+
69
+ def forward(self, tokens, positions):
70
+ """
71
+ input:
72
+ * tokens: batch_size x nheads x ntokens x dim
73
+ * positions: batch_size x ntokens x 3 (t, y and x position of each token)
74
+ output:
75
+ * tokens after appplying RoPE3D (batch_size x nheads x ntokens x x dim)
76
+ """
77
+ assert tokens.size(3) % 3 == 0, "number of dimensions should be a multiple of three"
78
+ D = tokens.size(3) // 3
79
+ poses, max_poses = positions
80
+ assert len(poses) == 3 and poses[0].ndim == 2# Batch, Seq, 3
81
+ cos_t, sin_t = self.get_cos_sin(D, max_poses[0] + 1, tokens.device, tokens.dtype, self.interpolation_scale_t)
82
+ cos_y, sin_y = self.get_cos_sin(D, max_poses[1] + 1, tokens.device, tokens.dtype, self.interpolation_scale_h)
83
+ cos_x, sin_x = self.get_cos_sin(D, max_poses[2] + 1, tokens.device, tokens.dtype, self.interpolation_scale_w)
84
+ # split features into three along the feature dimension, and apply rope1d on each half
85
+ t, y, x = tokens.chunk(3, dim=-1)
86
+ t = self.apply_rope1d(t, poses[0], cos_t, sin_t)
87
+ y = self.apply_rope1d(y, poses[1], cos_y, sin_y)
88
+ x = self.apply_rope1d(x, poses[2], cos_x, sin_x)
89
+ tokens = torch.cat((t, y, x), dim=-1)
90
+ return tokens
allegro/models/transformers/transformer_3d_allegro.py ADDED
@@ -0,0 +1,416 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Adapted from Open-Sora-Plan
2
+
3
+ # This source code is licensed under the license found in the
4
+ # LICENSE file in the root directory of this source tree.
5
+ # --------------------------------------------------------
6
+ # References:
7
+ # Open-Sora-Plan: https://github.com/PKU-YuanGroup/Open-Sora-Plan
8
+ # --------------------------------------------------------
9
+
10
+ from dataclasses import dataclass
11
+ from typing import Any, Dict, Optional
12
+
13
+ import torch
14
+ import torch.nn.functional as F
15
+ from diffusers.configuration_utils import ConfigMixin, register_to_config
16
+
17
+
18
+ from diffusers.models.modeling_utils import ModelMixin
19
+
20
+ from diffusers.utils import BaseOutput, is_xformers_available
21
+ from einops import rearrange
22
+ from torch import nn
23
+ from diffusers.models.embeddings import PixArtAlphaTextProjection
24
+
25
+ from allegro.models.transformers.block import to_2tuple, BasicTransformerBlock, AdaLayerNormSingle
26
+ from allegro.models.transformers.embedding import PatchEmbed2D
27
+
28
+ from diffusers.utils import logging
29
+
30
+ logger = logging.get_logger(__name__)
31
+
32
+ @dataclass
33
+ class Transformer3DModelOutput(BaseOutput):
34
+ """
35
+ The output of [`Transformer2DModel`].
36
+
37
+ Args:
38
+ sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` or `(batch size, num_vector_embeds - 1, num_latent_pixels)` if [`Transformer2DModel`] is discrete):
39
+ The hidden states output conditioned on the `encoder_hidden_states` input. If discrete, returns probability
40
+ distributions for the unnoised latent pixels.
41
+ """
42
+
43
+ sample: torch.FloatTensor
44
+
45
+
46
+ class AllegroTransformer3DModel(ModelMixin, ConfigMixin):
47
+ _supports_gradient_checkpointing = True
48
+
49
+ """
50
+ A 2D Transformer model for image-like data.
51
+
52
+ Parameters:
53
+ num_attention_heads (`int`, *optional*, defaults to 16): The number of heads to use for multi-head attention.
54
+ attention_head_dim (`int`, *optional*, defaults to 88): The number of channels in each head.
55
+ in_channels (`int`, *optional*):
56
+ The number of channels in the input and output (specify if the input is **continuous**).
57
+ num_layers (`int`, *optional*, defaults to 1): The number of layers of Transformer blocks to use.
58
+ dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use.
59
+ cross_attention_dim (`int`, *optional*): The number of `encoder_hidden_states` dimensions to use.
60
+ sample_size (`int`, *optional*): The width of the latent images (specify if the input is **discrete**).
61
+ This is fixed during training since it is used to learn a number of position embeddings.
62
+ num_vector_embeds (`int`, *optional*):
63
+ The number of classes of the vector embeddings of the latent pixels (specify if the input is **discrete**).
64
+ Includes the class for the masked latent pixel.
65
+ activation_fn (`str`, *optional*, defaults to `"geglu"`): Activation function to use in feed-forward.
66
+ num_embeds_ada_norm ( `int`, *optional*):
67
+ The number of diffusion steps used during training. Pass if at least one of the norm_layers is
68
+ `AdaLayerNorm`. This is fixed during training since it is used to learn a number of embeddings that are
69
+ added to the hidden states.
70
+
71
+ During inference, you can denoise for up to but not more steps than `num_embeds_ada_norm`.
72
+ attention_bias (`bool`, *optional*):
73
+ Configure if the `TransformerBlocks` attention should contain a bias parameter.
74
+ """
75
+
76
+ @register_to_config
77
+ def __init__(
78
+ self,
79
+ num_attention_heads: int = 16,
80
+ attention_head_dim: int = 88,
81
+ in_channels: Optional[int] = None,
82
+ out_channels: Optional[int] = None,
83
+ num_layers: int = 1,
84
+ dropout: float = 0.0,
85
+ cross_attention_dim: Optional[int] = None,
86
+ attention_bias: bool = False,
87
+ sample_size: Optional[int] = None,
88
+ sample_size_t: Optional[int] = None,
89
+ patch_size: Optional[int] = None,
90
+ patch_size_t: Optional[int] = None,
91
+ activation_fn: str = "geglu",
92
+ num_embeds_ada_norm: Optional[int] = None,
93
+ use_linear_projection: bool = False,
94
+ only_cross_attention: bool = False,
95
+ double_self_attention: bool = False,
96
+ upcast_attention: bool = False,
97
+ norm_type: str = "ada_norm",
98
+ norm_elementwise_affine: bool = True,
99
+ norm_eps: float = 1e-5,
100
+ caption_channels: int = None,
101
+ interpolation_scale_h: float = None,
102
+ interpolation_scale_w: float = None,
103
+ interpolation_scale_t: float = None,
104
+ use_additional_conditions: Optional[bool] = None,
105
+ sa_attention_mode: str = "flash",
106
+ ca_attention_mode: str = 'xformers',
107
+ downsampler: str = None,
108
+ use_rope: bool = False,
109
+ model_max_length: int = 300,
110
+ ):
111
+ super().__init__()
112
+ self.use_linear_projection = use_linear_projection
113
+ self.interpolation_scale_t = interpolation_scale_t
114
+ self.interpolation_scale_h = interpolation_scale_h
115
+ self.interpolation_scale_w = interpolation_scale_w
116
+ self.downsampler = downsampler
117
+ self.caption_channels = caption_channels
118
+ self.num_attention_heads = num_attention_heads
119
+ self.attention_head_dim = attention_head_dim
120
+ inner_dim = num_attention_heads * attention_head_dim
121
+ self.inner_dim = inner_dim
122
+ self.in_channels = in_channels
123
+ self.out_channels = in_channels if out_channels is None else out_channels
124
+ self.use_rope = use_rope
125
+ self.model_max_length = model_max_length
126
+ self.num_layers = num_layers
127
+ self.config.hidden_size = inner_dim
128
+
129
+
130
+ # 1. Transformer3DModel can process both standard continuous images of shape `(batch_size, num_channels, width, height)` as well as quantized image embeddings of shape `(batch_size, num_image_vectors)`
131
+ # Define whether input is continuous or discrete depending on configuration
132
+ assert in_channels is not None and patch_size is not None
133
+
134
+ # 2. Initialize the right blocks.
135
+ # Initialize the output blocks and other projection blocks when necessary.
136
+
137
+ assert self.config.sample_size_t is not None, "AllegroTransformer3DModel over patched input must provide sample_size_t"
138
+ assert self.config.sample_size is not None, "AllegroTransformer3DModel over patched input must provide sample_size"
139
+ #assert not (self.config.sample_size_t == 1 and self.config.patch_size_t == 2), "Image do not need patchfy in t-dim"
140
+
141
+ self.num_frames = self.config.sample_size_t
142
+ self.config.sample_size = to_2tuple(self.config.sample_size)
143
+ self.height = self.config.sample_size[0]
144
+ self.width = self.config.sample_size[1]
145
+ self.patch_size_t = self.config.patch_size_t
146
+ self.patch_size = self.config.patch_size
147
+ interpolation_scale_t = ((self.config.sample_size_t - 1) // 16 + 1) if self.config.sample_size_t % 2 == 1 else self.config.sample_size_t / 16
148
+ interpolation_scale_t = (
149
+ self.config.interpolation_scale_t if self.config.interpolation_scale_t is not None else interpolation_scale_t
150
+ )
151
+ interpolation_scale = (
152
+ self.config.interpolation_scale_h if self.config.interpolation_scale_h is not None else self.config.sample_size[0] / 30,
153
+ self.config.interpolation_scale_w if self.config.interpolation_scale_w is not None else self.config.sample_size[1] / 40,
154
+ )
155
+ self.pos_embed = PatchEmbed2D(
156
+ num_frames=self.config.sample_size_t,
157
+ height=self.config.sample_size[0],
158
+ width=self.config.sample_size[1],
159
+ patch_size_t=self.config.patch_size_t,
160
+ patch_size=self.config.patch_size,
161
+ in_channels=self.in_channels,
162
+ embed_dim=self.inner_dim,
163
+ interpolation_scale=interpolation_scale,
164
+ interpolation_scale_t=interpolation_scale_t,
165
+ use_abs_pos=not self.config.use_rope,
166
+ )
167
+ interpolation_scale_thw = (interpolation_scale_t, *interpolation_scale)
168
+
169
+ # 3. Define transformers blocks, spatial attention
170
+ self.transformer_blocks = nn.ModuleList(
171
+ [
172
+ BasicTransformerBlock(
173
+ inner_dim,
174
+ num_attention_heads,
175
+ attention_head_dim,
176
+ dropout=dropout,
177
+ cross_attention_dim=cross_attention_dim,
178
+ activation_fn=activation_fn,
179
+ num_embeds_ada_norm=num_embeds_ada_norm,
180
+ attention_bias=attention_bias,
181
+ only_cross_attention=only_cross_attention,
182
+ double_self_attention=double_self_attention,
183
+ upcast_attention=upcast_attention,
184
+ norm_type=norm_type,
185
+ norm_elementwise_affine=norm_elementwise_affine,
186
+ norm_eps=norm_eps,
187
+ sa_attention_mode=sa_attention_mode,
188
+ ca_attention_mode=ca_attention_mode,
189
+ use_rope=use_rope,
190
+ interpolation_scale_thw=interpolation_scale_thw,
191
+ block_idx=d,
192
+ )
193
+ for d in range(num_layers)
194
+ ]
195
+ )
196
+
197
+ # 4. Define output layers
198
+
199
+ if norm_type != "ada_norm_single":
200
+ self.norm_out = nn.LayerNorm(inner_dim, elementwise_affine=False, eps=1e-6)
201
+ self.proj_out_1 = nn.Linear(inner_dim, 2 * inner_dim)
202
+ self.proj_out_2 = nn.Linear(inner_dim, patch_size * patch_size * self.out_channels)
203
+ elif norm_type == "ada_norm_single":
204
+ self.norm_out = nn.LayerNorm(inner_dim, elementwise_affine=False, eps=1e-6)
205
+ self.scale_shift_table = nn.Parameter(torch.randn(2, inner_dim) / inner_dim**0.5)
206
+ self.proj_out = nn.Linear(inner_dim, patch_size * patch_size * self.out_channels)
207
+
208
+ # 5. PixArt-Alpha blocks.
209
+ self.adaln_single = None
210
+ self.use_additional_conditions = False
211
+ if norm_type == "ada_norm_single":
212
+ # self.use_additional_conditions = self.config.sample_size[0] == 128 # False, 128 -> 1024
213
+ # TODO(Sayak, PVP) clean this, for now we use sample size to determine whether to use
214
+ # additional conditions until we find better name
215
+ self.adaln_single = AdaLayerNormSingle(inner_dim, use_additional_conditions=self.use_additional_conditions)
216
+
217
+ self.caption_projection = None
218
+ if caption_channels is not None:
219
+ self.caption_projection = PixArtAlphaTextProjection(
220
+ in_features=caption_channels, hidden_size=inner_dim
221
+ )
222
+
223
+ self.gradient_checkpointing = False
224
+
225
+ def _set_gradient_checkpointing(self, module, value=False):
226
+ self.gradient_checkpointing = value
227
+
228
+
229
+ def forward(
230
+ self,
231
+ hidden_states: torch.Tensor,
232
+ timestep: Optional[torch.LongTensor] = None,
233
+ encoder_hidden_states: Optional[torch.Tensor] = None,
234
+ added_cond_kwargs: Dict[str, torch.Tensor] = None,
235
+ class_labels: Optional[torch.LongTensor] = None,
236
+ cross_attention_kwargs: Dict[str, Any] = None,
237
+ attention_mask: Optional[torch.Tensor] = None,
238
+ encoder_attention_mask: Optional[torch.Tensor] = None,
239
+ return_dict: bool = True,
240
+ ):
241
+ """
242
+ The [`Transformer2DModel`] forward method.
243
+
244
+ Args:
245
+ hidden_states (`torch.LongTensor` of shape `(batch size, num latent pixels)` if discrete, `torch.FloatTensor` of shape `(batch size, frame, channel, height, width)` if continuous):
246
+ Input `hidden_states`.
247
+ encoder_hidden_states ( `torch.FloatTensor` of shape `(batch size, sequence len, embed dims)`, *optional*):
248
+ Conditional embeddings for cross attention layer. If not given, cross-attention defaults to
249
+ self-attention.
250
+ timestep ( `torch.LongTensor`, *optional*):
251
+ Used to indicate denoising step. Optional timestep to be applied as an embedding in `AdaLayerNorm`.
252
+ class_labels ( `torch.LongTensor` of shape `(batch size, num classes)`, *optional*):
253
+ Used to indicate class labels conditioning. Optional class labels to be applied as an embedding in
254
+ `AdaLayerZeroNorm`.
255
+ added_cond_kwargs ( `Dict[str, Any]`, *optional*):
256
+ A kwargs dictionary that if specified is passed along to the `AdaLayerNormSingle`
257
+ cross_attention_kwargs ( `Dict[str, Any]`, *optional*):
258
+ A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
259
+ `self.processor` in
260
+ [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
261
+ attention_mask ( `torch.Tensor`, *optional*):
262
+ An attention mask of shape `(batch, key_tokens)` is applied to `encoder_hidden_states`. If `1` the mask
263
+ is kept, otherwise if `0` it is discarded. Mask will be converted into a bias, which adds large
264
+ negative values to the attention scores corresponding to "discard" tokens.
265
+ encoder_attention_mask ( `torch.Tensor`, *optional*):
266
+ Cross-attention mask applied to `encoder_hidden_states`. Two formats supported:
267
+
268
+ * Mask `(batch, sequence_length)` True = keep, False = discard.
269
+ * Bias `(batch, 1, sequence_length)` 0 = keep, -10000 = discard.
270
+
271
+ If `ndim == 2`: will be interpreted as a mask, then converted into a bias consistent with the format
272
+ above. This bias will be added to the cross-attention scores.
273
+ return_dict (`bool`, *optional*, defaults to `True`):
274
+ Whether or not to return a [`~models.unet_2d_condition.UNet2DConditionOutput`] instead of a plain
275
+ tuple.
276
+
277
+ Returns:
278
+ If `return_dict` is True, an [`~models.transformer_2d.Transformer2DModelOutput`] is returned, otherwise a
279
+ `tuple` where the first element is the sample tensor.
280
+ """
281
+ batch_size, c, frame, h, w = hidden_states.shape
282
+
283
+ # ensure attention_mask is a bias, and give it a singleton query_tokens dimension.
284
+ # we may have done this conversion already, e.g. if we came here via UNet2DConditionModel#forward.
285
+ # we can tell by counting dims; if ndim == 2: it's a mask rather than a bias.
286
+ # expects mask of shape:
287
+ # [batch, key_tokens]
288
+ # adds singleton query_tokens dimension:
289
+ # [batch, 1, key_tokens]
290
+ # this helps to broadcast it as a bias over attention scores, which will be in one of the following shapes:
291
+ # [batch, heads, query_tokens, key_tokens] (e.g. torch sdp attn)
292
+ # [batch * heads, query_tokens, key_tokens] (e.g. xformers or classic attn) attention_mask_vid, attention_mask_img = None, None
293
+ if attention_mask is not None and attention_mask.ndim == 4:
294
+ # assume that mask is expressed as:
295
+ # (1 = keep, 0 = discard)
296
+ # convert mask into a bias that can be added to attention scores:
297
+ # (keep = +0, discard = -10000.0)
298
+ # b, frame+use_image_num, h, w -> a video with images
299
+ # b, 1, h, w -> only images
300
+ attention_mask = attention_mask.to(self.dtype)
301
+ attention_mask_vid = attention_mask[:, :frame] # b, frame, h, w
302
+
303
+ if attention_mask_vid.numel() > 0:
304
+ attention_mask_vid = attention_mask_vid.unsqueeze(1) # b 1 t h w
305
+ attention_mask_vid = F.max_pool3d(attention_mask_vid, kernel_size=(self.patch_size_t, self.patch_size, self.patch_size),
306
+ stride=(self.patch_size_t, self.patch_size, self.patch_size))
307
+ attention_mask_vid = rearrange(attention_mask_vid, 'b 1 t h w -> (b 1) 1 (t h w)')
308
+
309
+ attention_mask_vid = (1 - attention_mask_vid.bool().to(self.dtype)) * -10000.0 if attention_mask_vid.numel() > 0 else None
310
+
311
+ # convert encoder_attention_mask to a bias the same way we do for attention_mask
312
+ if encoder_attention_mask is not None and encoder_attention_mask.ndim == 3:
313
+ # b, 1+use_image_num, l -> a video with images
314
+ # b, 1, l -> only images
315
+ encoder_attention_mask = (1 - encoder_attention_mask.to(self.dtype)) * -10000.0
316
+ encoder_attention_mask_vid = rearrange(encoder_attention_mask, 'b 1 l -> (b 1) 1 l') if encoder_attention_mask.numel() > 0 else None
317
+
318
+ # 1. Input
319
+ frame = frame // self.patch_size_t # patchfy
320
+ # print('frame', frame)
321
+ height, width = hidden_states.shape[-2] // self.patch_size, hidden_states.shape[-1] // self.patch_size
322
+
323
+ added_cond_kwargs = {"resolution": None, "aspect_ratio": None} if added_cond_kwargs is None else added_cond_kwargs
324
+ hidden_states, encoder_hidden_states_vid, \
325
+ timestep_vid, embedded_timestep_vid = self._operate_on_patched_inputs(
326
+ hidden_states, encoder_hidden_states, timestep, added_cond_kwargs, batch_size,
327
+ )
328
+
329
+
330
+ for _, block in enumerate(self.transformer_blocks):
331
+ hidden_states = block(
332
+ hidden_states,
333
+ attention_mask_vid,
334
+ encoder_hidden_states_vid,
335
+ encoder_attention_mask_vid,
336
+ timestep_vid,
337
+ cross_attention_kwargs,
338
+ class_labels,
339
+ frame=frame,
340
+ height=height,
341
+ width=width,
342
+ )
343
+
344
+ # 3. Output
345
+ output = None
346
+ if hidden_states is not None:
347
+ output = self._get_output_for_patched_inputs(
348
+ hidden_states=hidden_states,
349
+ timestep=timestep_vid,
350
+ class_labels=class_labels,
351
+ embedded_timestep=embedded_timestep_vid,
352
+ num_frames=frame,
353
+ height=height,
354
+ width=width,
355
+ ) # b c t h w
356
+
357
+ if not return_dict:
358
+ return (output,)
359
+
360
+ return Transformer3DModelOutput(sample=output)
361
+
362
+ def _operate_on_patched_inputs(self, hidden_states, encoder_hidden_states, timestep, added_cond_kwargs, batch_size):
363
+ # batch_size = hidden_states.shape[0]
364
+ hidden_states_vid = self.pos_embed(hidden_states.to(self.dtype))
365
+ timestep_vid = None
366
+ embedded_timestep_vid = None
367
+ encoder_hidden_states_vid = None
368
+
369
+ if self.adaln_single is not None:
370
+ if self.use_additional_conditions and added_cond_kwargs is None:
371
+ raise ValueError(
372
+ "`added_cond_kwargs` cannot be None when using additional conditions for `adaln_single`."
373
+ )
374
+ timestep, embedded_timestep = self.adaln_single(
375
+ timestep, added_cond_kwargs, batch_size=batch_size, hidden_dtype=self.dtype
376
+ ) # b 6d, b d
377
+
378
+ timestep_vid = timestep
379
+ embedded_timestep_vid = embedded_timestep
380
+
381
+ if self.caption_projection is not None:
382
+ encoder_hidden_states = self.caption_projection(encoder_hidden_states) # b, 1+use_image_num, l, d or b, 1, l, d
383
+ encoder_hidden_states_vid = rearrange(encoder_hidden_states[:, :1], 'b 1 l d -> (b 1) l d')
384
+
385
+ return hidden_states_vid, encoder_hidden_states_vid, timestep_vid, embedded_timestep_vid
386
+
387
+ def _get_output_for_patched_inputs(
388
+ self, hidden_states, timestep, class_labels, embedded_timestep, num_frames, height=None, width=None
389
+ ):
390
+ # import ipdb;ipdb.set_trace()
391
+ if self.config.norm_type != "ada_norm_single":
392
+ conditioning = self.transformer_blocks[0].norm1.emb(
393
+ timestep, class_labels, hidden_dtype=self.dtype
394
+ )
395
+ shift, scale = self.proj_out_1(F.silu(conditioning)).chunk(2, dim=1)
396
+ hidden_states = self.norm_out(hidden_states) * (1 + scale[:, None]) + shift[:, None]
397
+ hidden_states = self.proj_out_2(hidden_states)
398
+ elif self.config.norm_type == "ada_norm_single":
399
+ shift, scale = (self.scale_shift_table[None] + embedded_timestep[:, None]).chunk(2, dim=1)
400
+ hidden_states = self.norm_out(hidden_states)
401
+ # Modulation
402
+ hidden_states = hidden_states * (1 + scale) + shift
403
+ hidden_states = self.proj_out(hidden_states)
404
+ hidden_states = hidden_states.squeeze(1)
405
+
406
+ # unpatchify
407
+ if self.adaln_single is None:
408
+ height = width = int(hidden_states.shape[1] ** 0.5)
409
+ hidden_states = hidden_states.reshape(
410
+ shape=(-1, num_frames, height, width, self.patch_size_t, self.patch_size, self.patch_size, self.out_channels)
411
+ )
412
+ hidden_states = torch.einsum("nthwopqc->nctohpwq", hidden_states)
413
+ output = hidden_states.reshape(
414
+ shape=(-1, self.out_channels, num_frames * self.patch_size_t, height * self.patch_size, width * self.patch_size)
415
+ )
416
+ return output
allegro/models/vae/modules.py ADDED
@@ -0,0 +1,415 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional, Tuple, Union
2
+ from einops import rearrange
3
+
4
+ import torch
5
+ import torch.nn as nn
6
+ from diffusers.models.attention_processor import Attention
7
+ from diffusers.models.resnet import ResnetBlock2D
8
+ from diffusers.models.upsampling import Upsample2D
9
+ from diffusers.models.downsampling import Downsample2D
10
+
11
+
12
+ class TemporalConvBlock(nn.Module):
13
+ """
14
+ Temporal convolutional layer that can be used for video (sequence of images) input Code mostly copied from:
15
+ https://github.com/modelscope/modelscope/blob/1509fdb973e5871f37148a4b5e5964cafd43e64d/modelscope/models/multi_modal/video_synthesis/unet_sd.py#L1016
16
+ """
17
+
18
+ def __init__(self, in_dim, out_dim=None, dropout=0.0, up_sample=False, down_sample=False, spa_stride=1):
19
+ super().__init__()
20
+ out_dim = out_dim or in_dim
21
+ self.in_dim = in_dim
22
+ self.out_dim = out_dim
23
+ spa_pad = int((spa_stride-1)*0.5)
24
+ temp_pad = 0
25
+ self.temp_pad = temp_pad
26
+
27
+ if down_sample:
28
+ self.conv1 = nn.Sequential(
29
+ nn.GroupNorm(32, in_dim),
30
+ nn.SiLU(),
31
+ nn.Conv3d(in_dim, out_dim, (2, spa_stride, spa_stride), stride=(2,1,1), padding=(0, spa_pad, spa_pad))
32
+ )
33
+ elif up_sample:
34
+ self.conv1 = nn.Sequential(
35
+ nn.GroupNorm(32, in_dim),
36
+ nn.SiLU(),
37
+ nn.Conv3d(in_dim, out_dim*2, (1, spa_stride, spa_stride), padding=(0, spa_pad, spa_pad))
38
+ )
39
+ else:
40
+ self.conv1 = nn.Sequential(
41
+ nn.GroupNorm(32, in_dim),
42
+ nn.SiLU(),
43
+ nn.Conv3d(in_dim, out_dim, (3, spa_stride, spa_stride), padding=(temp_pad, spa_pad, spa_pad))
44
+ )
45
+ self.conv2 = nn.Sequential(
46
+ nn.GroupNorm(32, out_dim),
47
+ nn.SiLU(),
48
+ nn.Dropout(dropout),
49
+ nn.Conv3d(out_dim, in_dim, (3, spa_stride, spa_stride), padding=(temp_pad, spa_pad, spa_pad)),
50
+ )
51
+ self.conv3 = nn.Sequential(
52
+ nn.GroupNorm(32, out_dim),
53
+ nn.SiLU(),
54
+ nn.Dropout(dropout),
55
+ nn.Conv3d(out_dim, in_dim, (3, spa_stride, spa_stride), padding=(temp_pad, spa_pad, spa_pad)),
56
+ )
57
+ self.conv4 = nn.Sequential(
58
+ nn.GroupNorm(32, out_dim),
59
+ nn.SiLU(),
60
+ nn.Conv3d(out_dim, in_dim, (3, spa_stride, spa_stride), padding=(temp_pad, spa_pad, spa_pad)),
61
+ )
62
+
63
+ # zero out the last layer params,so the conv block is identity
64
+ nn.init.zeros_(self.conv4[-1].weight)
65
+ nn.init.zeros_(self.conv4[-1].bias)
66
+
67
+ self.down_sample = down_sample
68
+ self.up_sample = up_sample
69
+
70
+
71
+ def forward(self, hidden_states):
72
+ identity = hidden_states
73
+
74
+ if self.down_sample:
75
+ identity = identity[:,:,::2]
76
+ elif self.up_sample:
77
+ hidden_states_new = torch.cat((hidden_states,hidden_states),dim=2)
78
+ hidden_states_new[:, :, 0::2] = hidden_states
79
+ hidden_states_new[:, :, 1::2] = hidden_states
80
+ identity = hidden_states_new
81
+ del hidden_states_new
82
+
83
+ if self.down_sample or self.up_sample:
84
+ hidden_states = self.conv1(hidden_states)
85
+ else:
86
+ hidden_states = torch.cat((hidden_states[:,:,0:1], hidden_states), dim=2)
87
+ hidden_states = torch.cat((hidden_states,hidden_states[:,:,-1:]), dim=2)
88
+ hidden_states = self.conv1(hidden_states)
89
+
90
+
91
+ if self.up_sample:
92
+ hidden_states = rearrange(hidden_states, 'b (d c) f h w -> b c (f d) h w', d=2)
93
+
94
+ hidden_states = torch.cat((hidden_states[:,:,0:1], hidden_states), dim=2)
95
+ hidden_states = torch.cat((hidden_states,hidden_states[:,:,-1:]), dim=2)
96
+ hidden_states = self.conv2(hidden_states)
97
+ hidden_states = torch.cat((hidden_states[:,:,0:1], hidden_states), dim=2)
98
+ hidden_states = torch.cat((hidden_states,hidden_states[:,:,-1:]), dim=2)
99
+ hidden_states = self.conv3(hidden_states)
100
+ hidden_states = torch.cat((hidden_states[:,:,0:1], hidden_states), dim=2)
101
+ hidden_states = torch.cat((hidden_states,hidden_states[:,:,-1:]), dim=2)
102
+ hidden_states = self.conv4(hidden_states)
103
+
104
+ hidden_states = identity + hidden_states
105
+
106
+ return hidden_states
107
+
108
+
109
+ class DownEncoderBlock3D(nn.Module):
110
+ def __init__(
111
+ self,
112
+ in_channels: int,
113
+ out_channels: int,
114
+ dropout: float = 0.0,
115
+ num_layers: int = 1,
116
+ resnet_eps: float = 1e-6,
117
+ resnet_time_scale_shift: str = "default",
118
+ resnet_act_fn: str = "swish",
119
+ resnet_groups: int = 32,
120
+ resnet_pre_norm: bool = True,
121
+ output_scale_factor=1.0,
122
+ add_downsample=True,
123
+ add_temp_downsample=False,
124
+ downsample_padding=1,
125
+ ):
126
+ super().__init__()
127
+ resnets = []
128
+ temp_convs = []
129
+
130
+ for i in range(num_layers):
131
+ in_channels = in_channels if i == 0 else out_channels
132
+ resnets.append(
133
+ ResnetBlock2D(
134
+ in_channels=in_channels,
135
+ out_channels=out_channels,
136
+ temb_channels=None,
137
+ eps=resnet_eps,
138
+ groups=resnet_groups,
139
+ dropout=dropout,
140
+ time_embedding_norm=resnet_time_scale_shift,
141
+ non_linearity=resnet_act_fn,
142
+ output_scale_factor=output_scale_factor,
143
+ pre_norm=resnet_pre_norm,
144
+ )
145
+ )
146
+ temp_convs.append(
147
+ TemporalConvBlock(
148
+ out_channels,
149
+ out_channels,
150
+ dropout=0.1,
151
+ )
152
+ )
153
+
154
+ self.resnets = nn.ModuleList(resnets)
155
+ self.temp_convs = nn.ModuleList(temp_convs)
156
+
157
+ if add_temp_downsample:
158
+ self.temp_convs_down = TemporalConvBlock(
159
+ out_channels,
160
+ out_channels,
161
+ dropout=0.1,
162
+ down_sample=True,
163
+ spa_stride=3
164
+ )
165
+ self.add_temp_downsample = add_temp_downsample
166
+
167
+ if add_downsample:
168
+ self.downsamplers = nn.ModuleList(
169
+ [
170
+ Downsample2D(
171
+ out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name="op"
172
+ )
173
+ ]
174
+ )
175
+ else:
176
+ self.downsamplers = None
177
+
178
+ def _set_partial_grad(self):
179
+ for temp_conv in self.temp_convs:
180
+ temp_conv.requires_grad_(True)
181
+ if self.downsamplers:
182
+ for down_layer in self.downsamplers:
183
+ down_layer.requires_grad_(True)
184
+
185
+ def forward(self, hidden_states):
186
+ bz = hidden_states.shape[0]
187
+
188
+ for resnet, temp_conv in zip(self.resnets, self.temp_convs):
189
+ hidden_states = rearrange(hidden_states, 'b c n h w -> (b n) c h w')
190
+ hidden_states = resnet(hidden_states, temb=None)
191
+ hidden_states = rearrange(hidden_states, '(b n) c h w -> b c n h w', b=bz)
192
+ hidden_states = temp_conv(hidden_states)
193
+ if self.add_temp_downsample:
194
+ hidden_states = self.temp_convs_down(hidden_states)
195
+
196
+ if self.downsamplers is not None:
197
+ hidden_states = rearrange(hidden_states, 'b c n h w -> (b n) c h w')
198
+ for upsampler in self.downsamplers:
199
+ hidden_states = upsampler(hidden_states)
200
+ hidden_states = rearrange(hidden_states, '(b n) c h w -> b c n h w', b=bz)
201
+ return hidden_states
202
+
203
+
204
+ class UpDecoderBlock3D(nn.Module):
205
+ def __init__(
206
+ self,
207
+ in_channels: int,
208
+ out_channels: int,
209
+ dropout: float = 0.0,
210
+ num_layers: int = 1,
211
+ resnet_eps: float = 1e-6,
212
+ resnet_time_scale_shift: str = "default", # default, spatial
213
+ resnet_act_fn: str = "swish",
214
+ resnet_groups: int = 32,
215
+ resnet_pre_norm: bool = True,
216
+ output_scale_factor=1.0,
217
+ add_upsample=True,
218
+ add_temp_upsample=False,
219
+ temb_channels=None,
220
+ ):
221
+ super().__init__()
222
+ self.add_upsample = add_upsample
223
+
224
+ resnets = []
225
+ temp_convs = []
226
+
227
+ for i in range(num_layers):
228
+ input_channels = in_channels if i == 0 else out_channels
229
+
230
+ resnets.append(
231
+ ResnetBlock2D(
232
+ in_channels=input_channels,
233
+ out_channels=out_channels,
234
+ temb_channels=temb_channels,
235
+ eps=resnet_eps,
236
+ groups=resnet_groups,
237
+ dropout=dropout,
238
+ time_embedding_norm=resnet_time_scale_shift,
239
+ non_linearity=resnet_act_fn,
240
+ output_scale_factor=output_scale_factor,
241
+ pre_norm=resnet_pre_norm,
242
+ )
243
+ )
244
+ temp_convs.append(
245
+ TemporalConvBlock(
246
+ out_channels,
247
+ out_channels,
248
+ dropout=0.1,
249
+ )
250
+ )
251
+
252
+ self.resnets = nn.ModuleList(resnets)
253
+ self.temp_convs = nn.ModuleList(temp_convs)
254
+
255
+ self.add_temp_upsample = add_temp_upsample
256
+ if add_temp_upsample:
257
+ self.temp_conv_up = TemporalConvBlock(
258
+ out_channels,
259
+ out_channels,
260
+ dropout=0.1,
261
+ up_sample=True,
262
+ spa_stride=3
263
+ )
264
+
265
+
266
+ if self.add_upsample:
267
+ # self.upsamplers = nn.ModuleList([PSUpsample2D(out_channels, use_conv=True, use_pixel_shuffle=True, out_channels=out_channels)])
268
+ self.upsamplers = nn.ModuleList([Upsample2D(out_channels, use_conv=True, out_channels=out_channels)])
269
+ else:
270
+ self.upsamplers = None
271
+
272
+ def _set_partial_grad(self):
273
+ for temp_conv in self.temp_convs:
274
+ temp_conv.requires_grad_(True)
275
+ if self.add_upsample:
276
+ self.upsamplers.requires_grad_(True)
277
+
278
+ def forward(self, hidden_states):
279
+ bz = hidden_states.shape[0]
280
+
281
+ for resnet, temp_conv in zip(self.resnets, self.temp_convs):
282
+ hidden_states = rearrange(hidden_states, 'b c n h w -> (b n) c h w')
283
+ hidden_states = resnet(hidden_states, temb=None)
284
+ hidden_states = rearrange(hidden_states, '(b n) c h w -> b c n h w', b=bz)
285
+ hidden_states = temp_conv(hidden_states)
286
+ if self.add_temp_upsample:
287
+ hidden_states = self.temp_conv_up(hidden_states)
288
+
289
+ if self.upsamplers is not None:
290
+ hidden_states = rearrange(hidden_states, 'b c n h w -> (b n) c h w')
291
+ for upsampler in self.upsamplers:
292
+ hidden_states = upsampler(hidden_states)
293
+ hidden_states = rearrange(hidden_states, '(b n) c h w -> b c n h w', b=bz)
294
+ return hidden_states
295
+
296
+
297
+ class UNetMidBlock3DConv(nn.Module):
298
+ def __init__(
299
+ self,
300
+ in_channels: int,
301
+ temb_channels: int,
302
+ dropout: float = 0.0,
303
+ num_layers: int = 1,
304
+ resnet_eps: float = 1e-6,
305
+ resnet_time_scale_shift: str = "default", # default, spatial
306
+ resnet_act_fn: str = "swish",
307
+ resnet_groups: int = 32,
308
+ resnet_pre_norm: bool = True,
309
+ add_attention: bool = True,
310
+ attention_head_dim=1,
311
+ output_scale_factor=1.0,
312
+ ):
313
+ super().__init__()
314
+ resnet_groups = resnet_groups if resnet_groups is not None else min(in_channels // 4, 32)
315
+ self.add_attention = add_attention
316
+
317
+ # there is always at least one resnet
318
+ resnets = [
319
+ ResnetBlock2D(
320
+ in_channels=in_channels,
321
+ out_channels=in_channels,
322
+ temb_channels=temb_channels,
323
+ eps=resnet_eps,
324
+ groups=resnet_groups,
325
+ dropout=dropout,
326
+ time_embedding_norm=resnet_time_scale_shift,
327
+ non_linearity=resnet_act_fn,
328
+ output_scale_factor=output_scale_factor,
329
+ pre_norm=resnet_pre_norm,
330
+ )
331
+ ]
332
+ temp_convs = [
333
+ TemporalConvBlock(
334
+ in_channels,
335
+ in_channels,
336
+ dropout=0.1,
337
+ )
338
+ ]
339
+ attentions = []
340
+
341
+ if attention_head_dim is None:
342
+ attention_head_dim = in_channels
343
+
344
+ for _ in range(num_layers):
345
+ if self.add_attention:
346
+ attentions.append(
347
+ Attention(
348
+ in_channels,
349
+ heads=in_channels // attention_head_dim,
350
+ dim_head=attention_head_dim,
351
+ rescale_output_factor=output_scale_factor,
352
+ eps=resnet_eps,
353
+ norm_num_groups=resnet_groups if resnet_time_scale_shift == "default" else None,
354
+ spatial_norm_dim=temb_channels if resnet_time_scale_shift == "spatial" else None,
355
+ residual_connection=True,
356
+ bias=True,
357
+ upcast_softmax=True,
358
+ _from_deprecated_attn_block=True,
359
+ )
360
+ )
361
+ else:
362
+ attentions.append(None)
363
+
364
+ resnets.append(
365
+ ResnetBlock2D(
366
+ in_channels=in_channels,
367
+ out_channels=in_channels,
368
+ temb_channels=temb_channels,
369
+ eps=resnet_eps,
370
+ groups=resnet_groups,
371
+ dropout=dropout,
372
+ time_embedding_norm=resnet_time_scale_shift,
373
+ non_linearity=resnet_act_fn,
374
+ output_scale_factor=output_scale_factor,
375
+ pre_norm=resnet_pre_norm,
376
+ )
377
+ )
378
+
379
+ temp_convs.append(
380
+ TemporalConvBlock(
381
+ in_channels,
382
+ in_channels,
383
+ dropout=0.1,
384
+ )
385
+ )
386
+
387
+ self.resnets = nn.ModuleList(resnets)
388
+ self.temp_convs = nn.ModuleList(temp_convs)
389
+ self.attentions = nn.ModuleList(attentions)
390
+
391
+ def _set_partial_grad(self):
392
+ for temp_conv in self.temp_convs:
393
+ temp_conv.requires_grad_(True)
394
+
395
+ def forward(
396
+ self,
397
+ hidden_states,
398
+ ):
399
+ bz = hidden_states.shape[0]
400
+ hidden_states = rearrange(hidden_states, 'b c n h w -> (b n) c h w')
401
+
402
+ hidden_states = self.resnets[0](hidden_states, temb=None)
403
+ hidden_states = rearrange(hidden_states, '(b n) c h w -> b c n h w', b=bz)
404
+ hidden_states = self.temp_convs[0](hidden_states)
405
+ hidden_states = rearrange(hidden_states, 'b c n h w -> (b n) c h w')
406
+
407
+ for attn, resnet, temp_conv in zip(
408
+ self.attentions, self.resnets[1:], self.temp_convs[1:]
409
+ ):
410
+ hidden_states = attn(hidden_states)
411
+ hidden_states = resnet(hidden_states, temb=None)
412
+ hidden_states = rearrange(hidden_states, '(b n) c h w -> b c n h w', b=bz)
413
+ hidden_states = temp_conv(hidden_states)
414
+ return hidden_states
415
+
allegro/models/vae/vae_allegro.py ADDED
@@ -0,0 +1,568 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import os
3
+ from typing import Optional, Tuple, Union
4
+ from einops import rearrange
5
+
6
+ import torch
7
+ import torch.nn as nn
8
+
9
+ from diffusers.configuration_utils import ConfigMixin, register_to_config
10
+ from diffusers.models.modeling_utils import ModelMixin
11
+ from diffusers.models.modeling_outputs import AutoencoderKLOutput
12
+ from diffusers.models.autoencoders.vae import DecoderOutput, DiagonalGaussianDistribution
13
+ from diffusers.models.attention_processor import SpatialNorm
14
+
15
+ from allegro.models.vae.modules import DownEncoderBlock3D, UNetMidBlock3DConv, UpDecoderBlock3D
16
+
17
+
18
+ class Encoder3D(nn.Module):
19
+ def __init__(
20
+ self,
21
+ in_channels=3,
22
+ out_channels=3,
23
+ num_blocks=4,
24
+ blocks_temp_li=[False, False, False, False],
25
+ block_out_channels=(64,),
26
+ layers_per_block=2,
27
+ norm_num_groups=32,
28
+ act_fn="silu",
29
+ double_z=True,
30
+ ):
31
+ super().__init__()
32
+ self.layers_per_block = layers_per_block
33
+ self.blocks_temp_li = blocks_temp_li
34
+
35
+ self.conv_in = nn.Conv2d(
36
+ in_channels,
37
+ block_out_channels[0],
38
+ kernel_size=3,
39
+ stride=1,
40
+ padding=1,
41
+ )
42
+
43
+ self.temp_conv_in = nn.Conv3d(
44
+ block_out_channels[0],
45
+ block_out_channels[0],
46
+ (3,1,1),
47
+ padding = (1, 0, 0)
48
+ )
49
+
50
+ self.mid_block = None
51
+ self.down_blocks = nn.ModuleList([])
52
+
53
+ # down
54
+ output_channel = block_out_channels[0]
55
+ for i in range(num_blocks):
56
+ input_channel = output_channel
57
+ output_channel = block_out_channels[i]
58
+ is_final_block = i == len(block_out_channels) - 1
59
+
60
+ down_block = DownEncoderBlock3D(
61
+ num_layers=self.layers_per_block,
62
+ in_channels=input_channel,
63
+ out_channels=output_channel,
64
+ add_downsample=not is_final_block,
65
+ add_temp_downsample=blocks_temp_li[i],
66
+ resnet_eps=1e-6,
67
+ downsample_padding=0,
68
+ resnet_act_fn=act_fn,
69
+ resnet_groups=norm_num_groups,
70
+ )
71
+ self.down_blocks.append(down_block)
72
+
73
+ # mid
74
+ self.mid_block = UNetMidBlock3DConv(
75
+ in_channels=block_out_channels[-1],
76
+ resnet_eps=1e-6,
77
+ resnet_act_fn=act_fn,
78
+ output_scale_factor=1,
79
+ resnet_time_scale_shift="default",
80
+ attention_head_dim=block_out_channels[-1],
81
+ resnet_groups=norm_num_groups,
82
+ temb_channels=None,
83
+ )
84
+
85
+ # out
86
+ self.conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[-1], num_groups=norm_num_groups, eps=1e-6)
87
+ self.conv_act = nn.SiLU()
88
+
89
+ conv_out_channels = 2 * out_channels if double_z else out_channels
90
+
91
+ self.temp_conv_out = nn.Conv3d(block_out_channels[-1], block_out_channels[-1], (3,1,1), padding = (1, 0, 0))
92
+
93
+ self.conv_out = nn.Conv2d(block_out_channels[-1], conv_out_channels, 3, padding=1)
94
+
95
+ nn.init.zeros_(self.temp_conv_in.weight)
96
+ nn.init.zeros_(self.temp_conv_in.bias)
97
+ nn.init.zeros_(self.temp_conv_out.weight)
98
+ nn.init.zeros_(self.temp_conv_out.bias)
99
+
100
+ self.gradient_checkpointing = False
101
+
102
+ def forward(self, x):
103
+ '''
104
+ x: [b, c, (tb f), h, w]
105
+ '''
106
+ bz = x.shape[0]
107
+ sample = rearrange(x, 'b c n h w -> (b n) c h w')
108
+ sample = self.conv_in(sample)
109
+
110
+ sample = rearrange(sample, '(b n) c h w -> b c n h w', b=bz)
111
+ temp_sample = sample
112
+ sample = self.temp_conv_in(sample)
113
+ sample = sample+temp_sample
114
+ # down
115
+ for b_id, down_block in enumerate(self.down_blocks):
116
+ sample = down_block(sample)
117
+ # middle
118
+ sample = self.mid_block(sample)
119
+
120
+ # post-process
121
+ sample = rearrange(sample, 'b c n h w -> (b n) c h w')
122
+ sample = self.conv_norm_out(sample)
123
+ sample = self.conv_act(sample)
124
+ sample = rearrange(sample, '(b n) c h w -> b c n h w', b=bz)
125
+
126
+ temp_sample = sample
127
+ sample = self.temp_conv_out(sample)
128
+ sample = sample+temp_sample
129
+ sample = rearrange(sample, 'b c n h w -> (b n) c h w')
130
+
131
+ sample = self.conv_out(sample)
132
+ sample = rearrange(sample, '(b n) c h w -> b c n h w', b=bz)
133
+ return sample
134
+
135
+ class Decoder3D(nn.Module):
136
+ def __init__(
137
+ self,
138
+ in_channels=4,
139
+ out_channels=3,
140
+ num_blocks=4,
141
+ blocks_temp_li=[False, False, False, False],
142
+ block_out_channels=(64,),
143
+ layers_per_block=2,
144
+ norm_num_groups=32,
145
+ act_fn="silu",
146
+ norm_type="group", # group, spatial
147
+ ):
148
+ super().__init__()
149
+ self.layers_per_block = layers_per_block
150
+ self.blocks_temp_li = blocks_temp_li
151
+
152
+ self.conv_in = nn.Conv2d(
153
+ in_channels,
154
+ block_out_channels[-1],
155
+ kernel_size=3,
156
+ stride=1,
157
+ padding=1,
158
+ )
159
+
160
+ self.temp_conv_in = nn.Conv3d(
161
+ block_out_channels[-1],
162
+ block_out_channels[-1],
163
+ (3,1,1),
164
+ padding = (1, 0, 0)
165
+ )
166
+
167
+ self.mid_block = None
168
+ self.up_blocks = nn.ModuleList([])
169
+
170
+ temb_channels = in_channels if norm_type == "spatial" else None
171
+
172
+ # mid
173
+ self.mid_block = UNetMidBlock3DConv(
174
+ in_channels=block_out_channels[-1],
175
+ resnet_eps=1e-6,
176
+ resnet_act_fn=act_fn,
177
+ output_scale_factor=1,
178
+ resnet_time_scale_shift="default" if norm_type == "group" else norm_type,
179
+ attention_head_dim=block_out_channels[-1],
180
+ resnet_groups=norm_num_groups,
181
+ temb_channels=temb_channels,
182
+ )
183
+
184
+ # up
185
+ reversed_block_out_channels = list(reversed(block_out_channels))
186
+ output_channel = reversed_block_out_channels[0]
187
+ for i in range(num_blocks):
188
+ prev_output_channel = output_channel
189
+ output_channel = reversed_block_out_channels[i]
190
+
191
+ is_final_block = i == len(block_out_channels) - 1
192
+
193
+ up_block = UpDecoderBlock3D(
194
+ num_layers=self.layers_per_block + 1,
195
+ in_channels=prev_output_channel,
196
+ out_channels=output_channel,
197
+ add_upsample=not is_final_block,
198
+ add_temp_upsample=blocks_temp_li[i],
199
+ resnet_eps=1e-6,
200
+ resnet_act_fn=act_fn,
201
+ resnet_groups=norm_num_groups,
202
+ temb_channels=temb_channels,
203
+ resnet_time_scale_shift=norm_type,
204
+ )
205
+ self.up_blocks.append(up_block)
206
+ prev_output_channel = output_channel
207
+
208
+ # out
209
+ if norm_type == "spatial":
210
+ self.conv_norm_out = SpatialNorm(block_out_channels[0], temb_channels)
211
+ else:
212
+ self.conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=1e-6)
213
+ self.conv_act = nn.SiLU()
214
+
215
+ self.temp_conv_out = nn.Conv3d(block_out_channels[0], block_out_channels[0], (3,1,1), padding = (1, 0, 0))
216
+ self.conv_out = nn.Conv2d(block_out_channels[0], out_channels, 3, padding=1)
217
+
218
+ nn.init.zeros_(self.temp_conv_in.weight)
219
+ nn.init.zeros_(self.temp_conv_in.bias)
220
+ nn.init.zeros_(self.temp_conv_out.weight)
221
+ nn.init.zeros_(self.temp_conv_out.bias)
222
+
223
+ self.gradient_checkpointing = False
224
+
225
+ def forward(self, z):
226
+ bz = z.shape[0]
227
+ sample = rearrange(z, 'b c n h w -> (b n) c h w')
228
+ sample = self.conv_in(sample)
229
+
230
+ sample = rearrange(sample, '(b n) c h w -> b c n h w', b=bz)
231
+ temp_sample = sample
232
+ sample = self.temp_conv_in(sample)
233
+ sample = sample+temp_sample
234
+
235
+ upscale_dtype = next(iter(self.up_blocks.parameters())).dtype
236
+ # middle
237
+ sample = self.mid_block(sample)
238
+ sample = sample.to(upscale_dtype)
239
+
240
+ # up
241
+ for b_id, up_block in enumerate(self.up_blocks):
242
+ sample = up_block(sample)
243
+
244
+ # post-process
245
+ sample = rearrange(sample, 'b c n h w -> (b n) c h w')
246
+ sample = self.conv_norm_out(sample)
247
+ sample = self.conv_act(sample)
248
+
249
+ sample = rearrange(sample, '(b n) c h w -> b c n h w', b=bz)
250
+ temp_sample = sample
251
+ sample = self.temp_conv_out(sample)
252
+ sample = sample+temp_sample
253
+ sample = rearrange(sample, 'b c n h w -> (b n) c h w')
254
+
255
+ sample = self.conv_out(sample)
256
+ sample = rearrange(sample, '(b n) c h w -> b c n h w', b=bz)
257
+ return sample
258
+
259
+
260
+
261
+ class AllegroAutoencoderKL3D(ModelMixin, ConfigMixin):
262
+ r"""
263
+ A VAE model with KL loss for encoding images into latents and decoding latent representations into images.
264
+
265
+ This model inherits from [`ModelMixin`]. Check the superclass documentation for it's generic methods implemented
266
+ for all models (such as downloading or saving).
267
+
268
+ Parameters:
269
+ in_channels (int, *optional*, defaults to 3): Number of channels in the input image.
270
+ out_channels (int, *optional*, defaults to 3): Number of channels in the output.
271
+ down_block_types (`Tuple[str]`, *optional*, defaults to `("DownEncoderBlock2D",)`):
272
+ Tuple of downsample block types.
273
+ up_block_types (`Tuple[str]`, *optional*, defaults to `("UpDecoderBlock2D",)`):
274
+ Tuple of upsample block types.
275
+ block_out_channels (`Tuple[int]`, *optional*, defaults to `(64,)`):
276
+ Tuple of block output channels.
277
+ act_fn (`str`, *optional*, defaults to `"silu"`): The activation function to use.
278
+ latent_channels (`int`, *optional*, defaults to 4): Number of channels in the latent space.
279
+ sample_size (`int`, *optional*, defaults to `256`): Spatial Tiling Size.
280
+ tile_overlap (`tuple`, *optional*, defaults to `(120, 80`): Spatial overlapping size while tiling (height, width)
281
+ chunk_len (`int`, *optional*, defaults to `24`): Temporal Tiling Size.
282
+ t_over (`int`, *optional*, defaults to `8`): Temporal overlapping size while tiling
283
+ scaling_factor (`float`, *optional*, defaults to 0.13235):
284
+ The component-wise standard deviation of the trained latent space computed using the first batch of the
285
+ training set. This is used to scale the latent space to have unit variance when training the diffusion
286
+ model. The latents are scaled with the formula `z = z * scaling_factor` before being passed to the
287
+ diffusion model. When decoding, the latents are scaled back to the original scale with the formula: `z = 1
288
+ / scaling_factor * z`. For more details, refer to sections 4.3.2 and D.1 of the [High-Resolution Image
289
+ Synthesis with Latent Diffusion Models](https://arxiv.org/abs/2112.10752) paper.
290
+ force_upcast (`bool`, *optional*, default to `True`):
291
+ If enabled it will force the VAE to run in float32 for high image resolution pipelines, such as SD-XL. VAE
292
+ can be fine-tuned / trained to a lower range without loosing too much precision in which case
293
+ `force_upcast` can be set to `False` - see: https://huggingface.co/madebyollin/sdxl-vae-fp16-fix
294
+ blocks_tempdown_li (`List`, *optional*, defaults to `[True, True, False, False]`): Each item indicates whether each TemporalBlock in the Encoder performs temporal downsampling.
295
+ blocks_tempup_li (`List`, *optional*, defaults to `[False, True, True, False]`): Each item indicates whether each TemporalBlock in the Decoder performs temporal upsampling.
296
+ load_mode (`str`, *optional*, defaults to `full`): Load mode for the model. Can be one of `full`, `encoder_only`, `decoder_only`. which corresponds to loading the full model state dicts, only the encoder state dicts, or only the decoder state dicts.
297
+ """
298
+
299
+ _supports_gradient_checkpointing = True
300
+
301
+ @register_to_config
302
+ def __init__(
303
+ self,
304
+ in_channels: int = 3,
305
+ out_channels: int = 3,
306
+ down_block_num: int = 4,
307
+ up_block_num: int = 4,
308
+ block_out_channels: Tuple[int] = (128,256,512,512),
309
+ layers_per_block: int = 2,
310
+ act_fn: str = "silu",
311
+ latent_channels: int = 4,
312
+ norm_num_groups: int = 32,
313
+ sample_size: int = 320,
314
+ tile_overlap: tuple = (120, 80),
315
+ force_upcast: bool = True,
316
+ chunk_len: int = 24,
317
+ t_over: int = 8,
318
+ scale_factor: float = 0.13235,
319
+ blocks_tempdown_li=[True, True, False, False],
320
+ blocks_tempup_li=[False, True, True, False],
321
+ load_mode = 'full',
322
+ ):
323
+ super().__init__()
324
+
325
+ self.blocks_tempdown_li = blocks_tempdown_li
326
+ self.blocks_tempup_li = blocks_tempup_li
327
+ # pass init params to Encoder
328
+ self.load_mode = load_mode
329
+ if load_mode in ['full', 'encoder_only']:
330
+ self.encoder = Encoder3D(
331
+ in_channels=in_channels,
332
+ out_channels=latent_channels,
333
+ num_blocks=down_block_num,
334
+ blocks_temp_li=blocks_tempdown_li,
335
+ block_out_channels=block_out_channels,
336
+ layers_per_block=layers_per_block,
337
+ act_fn=act_fn,
338
+ norm_num_groups=norm_num_groups,
339
+ double_z=True,
340
+ )
341
+ self.quant_conv = nn.Conv2d(2 * latent_channels, 2 * latent_channels, 1)
342
+
343
+ if load_mode in ['full', 'decoder_only']:
344
+ # pass init params to Decoder
345
+ self.decoder = Decoder3D(
346
+ in_channels=latent_channels,
347
+ out_channels=out_channels,
348
+ num_blocks=up_block_num,
349
+ blocks_temp_li=blocks_tempup_li,
350
+ block_out_channels=block_out_channels,
351
+ layers_per_block=layers_per_block,
352
+ norm_num_groups=norm_num_groups,
353
+ act_fn=act_fn,
354
+ )
355
+ self.post_quant_conv = nn.Conv2d(latent_channels, latent_channels, 1)
356
+
357
+
358
+ # only relevant if vae tiling is enabled
359
+ sample_size = (
360
+ sample_size[0]
361
+ if isinstance(sample_size, (list, tuple))
362
+ else sample_size
363
+ )
364
+ self.tile_overlap = tile_overlap
365
+ self.vae_scale_factor=[4, 8, 8]
366
+ self.scale_factor = scale_factor
367
+ self.sample_size = sample_size
368
+ self.chunk_len = chunk_len
369
+ self.t_over = t_over
370
+
371
+ self.latent_chunk_len = self.chunk_len//4
372
+ self.latent_t_over = self.t_over//4
373
+ self.kernel = (self.chunk_len, self.sample_size, self.sample_size) #(24, 256, 256)
374
+ self.stride = (self.chunk_len - self.t_over, self.sample_size-self.tile_overlap[0], self.sample_size-self.tile_overlap[1]) # (16, 112, 192)
375
+
376
+
377
+ def encode(self, input_imgs: torch.Tensor, return_dict: bool = True, local_batch_size=1) -> Union[AutoencoderKLOutput, Tuple[DiagonalGaussianDistribution]]:
378
+ KERNEL = self.kernel
379
+ STRIDE = self.stride
380
+ LOCAL_BS = local_batch_size
381
+ OUT_C = 8
382
+
383
+ B, C, N, H, W = input_imgs.shape
384
+
385
+
386
+ out_n = math.floor((N - KERNEL[0]) / STRIDE[0]) + 1
387
+ out_h = math.floor((H - KERNEL[1]) / STRIDE[1]) + 1
388
+ out_w = math.floor((W - KERNEL[2]) / STRIDE[2]) + 1
389
+
390
+ ## cut video into overlapped small cubes and batch forward
391
+ num = 0
392
+
393
+ out_latent = torch.zeros((out_n*out_h*out_w, OUT_C, KERNEL[0]//4, KERNEL[1]//8, KERNEL[2]//8), device=input_imgs.device, dtype=input_imgs.dtype)
394
+ vae_batch_input = torch.zeros((LOCAL_BS, C, KERNEL[0], KERNEL[1], KERNEL[2]), device=input_imgs.device, dtype=input_imgs.dtype)
395
+
396
+ for i in range(out_n):
397
+ for j in range(out_h):
398
+ for k in range(out_w):
399
+ n_start, n_end = i * STRIDE[0], i * STRIDE[0] + KERNEL[0]
400
+ h_start, h_end = j * STRIDE[1], j * STRIDE[1] + KERNEL[1]
401
+ w_start, w_end = k * STRIDE[2], k * STRIDE[2] + KERNEL[2]
402
+ video_cube = input_imgs[:, :, n_start:n_end, h_start:h_end, w_start:w_end]
403
+ vae_batch_input[num%LOCAL_BS] = video_cube
404
+
405
+ if num%LOCAL_BS == LOCAL_BS-1 or num == out_n*out_h*out_w-1:
406
+ latent = self.encoder(vae_batch_input)
407
+
408
+ if num == out_n*out_h*out_w-1 and num%LOCAL_BS != LOCAL_BS-1:
409
+ out_latent[num-num%LOCAL_BS:] = latent[:num%LOCAL_BS+1]
410
+ else:
411
+ out_latent[num-LOCAL_BS+1:num+1] = latent
412
+ vae_batch_input = torch.zeros((LOCAL_BS, C, KERNEL[0], KERNEL[1], KERNEL[2]), device=input_imgs.device, dtype=input_imgs.dtype)
413
+ num+=1
414
+
415
+ ## flatten the batched out latent to videos and supress the overlapped parts
416
+ B, C, N, H, W = input_imgs.shape
417
+
418
+ out_video_cube = torch.zeros((B, OUT_C, N//4, H//8, W//8), device=input_imgs.device, dtype=input_imgs.dtype)
419
+ OUT_KERNEL = KERNEL[0]//4, KERNEL[1]//8, KERNEL[2]//8
420
+ OUT_STRIDE = STRIDE[0]//4, STRIDE[1]//8, STRIDE[2]//8
421
+ OVERLAP = OUT_KERNEL[0]-OUT_STRIDE[0], OUT_KERNEL[1]-OUT_STRIDE[1], OUT_KERNEL[2]-OUT_STRIDE[2]
422
+
423
+ for i in range(out_n):
424
+ n_start, n_end = i * OUT_STRIDE[0], i * OUT_STRIDE[0] + OUT_KERNEL[0]
425
+ for j in range(out_h):
426
+ h_start, h_end = j * OUT_STRIDE[1], j * OUT_STRIDE[1] + OUT_KERNEL[1]
427
+ for k in range(out_w):
428
+ w_start, w_end = k * OUT_STRIDE[2], k * OUT_STRIDE[2] + OUT_KERNEL[2]
429
+ latent_mean_blend = prepare_for_blend((i, out_n, OVERLAP[0]), (j, out_h, OVERLAP[1]), (k, out_w, OVERLAP[2]), out_latent[i*out_h*out_w+j*out_w+k].unsqueeze(0))
430
+ out_video_cube[:, :, n_start:n_end, h_start:h_end, w_start:w_end] += latent_mean_blend
431
+
432
+ ## final conv
433
+ out_video_cube = rearrange(out_video_cube, 'b c n h w -> (b n) c h w')
434
+ out_video_cube = self.quant_conv(out_video_cube)
435
+ out_video_cube = rearrange(out_video_cube, '(b n) c h w -> b c n h w', b=B)
436
+
437
+ posterior = DiagonalGaussianDistribution(out_video_cube)
438
+
439
+ if not return_dict:
440
+ return (posterior,)
441
+
442
+ return AutoencoderKLOutput(latent_dist=posterior)
443
+
444
+
445
+ def decode(self, input_latents: torch.Tensor, return_dict: bool = True, local_batch_size=1) -> Union[DecoderOutput, torch.Tensor]:
446
+ KERNEL = self.kernel
447
+ STRIDE = self.stride
448
+
449
+ LOCAL_BS = local_batch_size
450
+ OUT_C = 3
451
+ IN_KERNEL = KERNEL[0]//4, KERNEL[1]//8, KERNEL[2]//8
452
+ IN_STRIDE = STRIDE[0]//4, STRIDE[1]//8, STRIDE[2]//8
453
+
454
+ B, C, N, H, W = input_latents.shape
455
+
456
+ ## post quant conv (a mapping)
457
+ input_latents = rearrange(input_latents, 'b c n h w -> (b n) c h w')
458
+ input_latents = self.post_quant_conv(input_latents)
459
+ input_latents = rearrange(input_latents, '(b n) c h w -> b c n h w', b=B)
460
+
461
+ ## out tensor shape
462
+ out_n = math.floor((N - IN_KERNEL[0]) / IN_STRIDE[0]) + 1
463
+ out_h = math.floor((H - IN_KERNEL[1]) / IN_STRIDE[1]) + 1
464
+ out_w = math.floor((W - IN_KERNEL[2]) / IN_STRIDE[2]) + 1
465
+
466
+ ## cut latent into overlapped small cubes and batch forward
467
+ num = 0
468
+ decoded_cube = torch.zeros((out_n*out_h*out_w, OUT_C, KERNEL[0], KERNEL[1], KERNEL[2]), device=input_latents.device, dtype=input_latents.dtype)
469
+ vae_batch_input = torch.zeros((LOCAL_BS, C, IN_KERNEL[0], IN_KERNEL[1], IN_KERNEL[2]), device=input_latents.device, dtype=input_latents.dtype)
470
+ for i in range(out_n):
471
+ for j in range(out_h):
472
+ for k in range(out_w):
473
+ n_start, n_end = i * IN_STRIDE[0], i * IN_STRIDE[0] + IN_KERNEL[0]
474
+ h_start, h_end = j * IN_STRIDE[1], j * IN_STRIDE[1] + IN_KERNEL[1]
475
+ w_start, w_end = k * IN_STRIDE[2], k * IN_STRIDE[2] + IN_KERNEL[2]
476
+ latent_cube = input_latents[:, :, n_start:n_end, h_start:h_end, w_start:w_end]
477
+ vae_batch_input[num%LOCAL_BS] = latent_cube
478
+ if num%LOCAL_BS == LOCAL_BS-1 or num == out_n*out_h*out_w-1:
479
+
480
+ latent = self.decoder(vae_batch_input)
481
+
482
+ if num == out_n*out_h*out_w-1 and num%LOCAL_BS != LOCAL_BS-1:
483
+ decoded_cube[num-num%LOCAL_BS:] = latent[:num%LOCAL_BS+1]
484
+ else:
485
+ decoded_cube[num-LOCAL_BS+1:num+1] = latent
486
+ vae_batch_input = torch.zeros((LOCAL_BS, C, IN_KERNEL[0], IN_KERNEL[1], IN_KERNEL[2]), device=input_latents.device, dtype=input_latents.dtype)
487
+ num+=1
488
+ B, C, N, H, W = input_latents.shape
489
+
490
+ out_video = torch.zeros((B, OUT_C, N*4, H*8, W*8), device=input_latents.device, dtype=input_latents.dtype)
491
+ OVERLAP = KERNEL[0]-STRIDE[0], KERNEL[1]-STRIDE[1], KERNEL[2]-STRIDE[2]
492
+ for i in range(out_n):
493
+ n_start, n_end = i * STRIDE[0], i * STRIDE[0] + KERNEL[0]
494
+ for j in range(out_h):
495
+ h_start, h_end = j * STRIDE[1], j * STRIDE[1] + KERNEL[1]
496
+ for k in range(out_w):
497
+ w_start, w_end = k * STRIDE[2], k * STRIDE[2] + KERNEL[2]
498
+ out_video_blend = prepare_for_blend((i, out_n, OVERLAP[0]), (j, out_h, OVERLAP[1]), (k, out_w, OVERLAP[2]), decoded_cube[i*out_h*out_w+j*out_w+k].unsqueeze(0))
499
+ out_video[:, :, n_start:n_end, h_start:h_end, w_start:w_end] += out_video_blend
500
+
501
+ out_video = rearrange(out_video, 'b c t h w -> b t c h w').contiguous()
502
+
503
+ decoded = out_video
504
+ if not return_dict:
505
+ return (decoded,)
506
+
507
+ return DecoderOutput(sample=decoded)
508
+
509
+ def forward(
510
+ self,
511
+ sample: torch.Tensor,
512
+ sample_posterior: bool = False,
513
+ return_dict: bool = True,
514
+ generator: Optional[torch.Generator] = None,
515
+ encoder_local_batch_size: int = 2,
516
+ decoder_local_batch_size: int = 2,
517
+ ) -> Union[DecoderOutput, torch.Tensor]:
518
+ r"""
519
+ Args:
520
+ sample (`torch.Tensor`): Input sample.
521
+ sample_posterior (`bool`, *optional*, defaults to `False`):
522
+ Whether to sample from the posterior.
523
+ return_dict (`bool`, *optional*, defaults to `True`):
524
+ Whether or not to return a [`DecoderOutput`] instead of a plain tuple.
525
+ generator (`torch.Generator`, *optional*):
526
+ PyTorch random number generator.
527
+ encoder_local_batch_size (`int`, *optional*, defaults to 2):
528
+ Local batch size for the encoder's batch inference.
529
+ decoder_local_batch_size (`int`, *optional*, defaults to 2):
530
+ Local batch size for the decoder's batch inference.
531
+ """
532
+ x = sample
533
+ posterior = self.encode(x, local_batch_size=encoder_local_batch_size).latent_dist
534
+ if sample_posterior:
535
+ z = posterior.sample(generator=generator)
536
+ else:
537
+ z = posterior.mode()
538
+ dec = self.decode(z, local_batch_size=decoder_local_batch_size).sample
539
+
540
+ if not return_dict:
541
+ return (dec,)
542
+
543
+ return DecoderOutput(sample=dec)
544
+
545
+ @classmethod
546
+ def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], **kwargs):
547
+ kwargs["torch_type"] = torch.float32
548
+ return super().from_pretrained(pretrained_model_name_or_path, **kwargs)
549
+
550
+
551
+ def prepare_for_blend(n_param, h_param, w_param, x):
552
+ n, n_max, overlap_n = n_param
553
+ h, h_max, overlap_h = h_param
554
+ w, w_max, overlap_w = w_param
555
+ if overlap_n > 0:
556
+ if n > 0: # the head overlap part decays from 0 to 1
557
+ x[:,:,0:overlap_n,:,:] = x[:,:,0:overlap_n,:,:] * (torch.arange(0, overlap_n).float().to(x.device) / overlap_n).reshape(overlap_n,1,1)
558
+ if n < n_max-1: # the tail overlap part decays from 1 to 0
559
+ x[:,:,-overlap_n:,:,:] = x[:,:,-overlap_n:,:,:] * (1 - torch.arange(0, overlap_n).float().to(x.device) / overlap_n).reshape(overlap_n,1,1)
560
+ if h > 0:
561
+ x[:,:,:,0:overlap_h,:] = x[:,:,:,0:overlap_h,:] * (torch.arange(0, overlap_h).float().to(x.device) / overlap_h).reshape(overlap_h,1)
562
+ if h < h_max-1:
563
+ x[:,:,:,-overlap_h:,:] = x[:,:,:,-overlap_h:,:] * (1 - torch.arange(0, overlap_h).float().to(x.device) / overlap_h).reshape(overlap_h,1)
564
+ if w > 0:
565
+ x[:,:,:,:,0:overlap_w] = x[:,:,:,:,0:overlap_w] * (torch.arange(0, overlap_w).float().to(x.device) / overlap_w)
566
+ if w < w_max-1:
567
+ x[:,:,:,:,-overlap_w:] = x[:,:,:,:,-overlap_w:] * (1 - torch.arange(0, overlap_w).float().to(x.device) / overlap_w)
568
+ return x
allegro/pipelines/pipeline_allegro.py ADDED
@@ -0,0 +1,822 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Adapted from Open-Sora-Plan
2
+
3
+ # This source code is licensed under the license found in the
4
+ # LICENSE file in the root directory of this source tree.
5
+ # --------------------------------------------------------
6
+ # References:
7
+ # Open-Sora-Plan: https://github.com/PKU-YuanGroup/Open-Sora-Plan
8
+ # --------------------------------------------------------
9
+
10
+ import html
11
+ import inspect
12
+ import math
13
+ import re
14
+ import urllib.parse as ul
15
+ from typing import Callable, List, Optional, Tuple, Union
16
+ from einops import rearrange
17
+ import ftfy
18
+ import torch
19
+ from dataclasses import dataclass
20
+ import tqdm
21
+ from bs4 import BeautifulSoup
22
+
23
+ from diffusers import DiffusionPipeline
24
+ from diffusers.schedulers import EulerAncestralDiscreteScheduler
25
+ from diffusers.utils import (
26
+ BACKENDS_MAPPING,
27
+ is_bs4_available,
28
+ is_ftfy_available,
29
+ logging,
30
+ replace_example_docstring,
31
+ BaseOutput
32
+ )
33
+ from diffusers.utils.torch_utils import randn_tensor
34
+ from transformers import T5EncoderModel, T5Tokenizer
35
+
36
+ logger = logging.get_logger(__name__)
37
+
38
+ from allegro.models.transformers.transformer_3d_allegro import AllegroTransformer3DModel
39
+ from allegro.models.vae.vae_allegro import AllegroAutoencoderKL3D
40
+
41
+ @dataclass
42
+ class AllegroPipelineOutput(BaseOutput):
43
+ r"""
44
+ Output class for Allegro pipelines.
45
+
46
+ Args:
47
+ video (`torch.Tensor`):
48
+ Torch tensor with shape `(batch_size, num_frames, channels, height, width)`.
49
+ """
50
+ video: torch.Tensor
51
+
52
+
53
+ EXAMPLE_DOC_STRING = """
54
+ Examples:
55
+ ```py
56
+ >>> import torch
57
+
58
+ >>> # You can replace the your_path_to_model with your own path.
59
+ >>> pipe = AllegroPipeline.from_pretrained(your_path_to_model, torch_dtype=torch.float16, trust_remote_code=True)
60
+
61
+ >>> prompt = "A small cactus with a happy face in the Sahara desert."
62
+ >>> image = pipe(prompt).video[0]
63
+ ```
64
+ """
65
+
66
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps
67
+ def retrieve_timesteps(
68
+ scheduler,
69
+ num_inference_steps: Optional[int] = None,
70
+ device: Optional[Union[str, torch.device]] = None,
71
+ timesteps: Optional[List[int]] = None,
72
+ **kwargs,
73
+ ):
74
+ """
75
+ Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles
76
+ custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`.
77
+
78
+ Args:
79
+ scheduler (`SchedulerMixin`):
80
+ The scheduler to get timesteps from.
81
+ num_inference_steps (`int`):
82
+ The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps`
83
+ must be `None`.
84
+ device (`str` or `torch.device`, *optional*):
85
+ The device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
86
+ timesteps (`List[int]`, *optional*):
87
+ Custom timesteps used to support arbitrary spacing between timesteps. If `None`, then the default
88
+ timestep spacing strategy of the scheduler is used. If `timesteps` is passed, `num_inference_steps`
89
+ must be `None`.
90
+
91
+ Returns:
92
+ `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the
93
+ second element is the number of inference steps.
94
+ """
95
+ if timesteps is not None:
96
+ accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
97
+ if not accepts_timesteps:
98
+ raise ValueError(
99
+ f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
100
+ f" timestep schedules. Please check whether you are using the correct scheduler."
101
+ )
102
+ scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs)
103
+ timesteps = scheduler.timesteps
104
+ num_inference_steps = len(timesteps)
105
+ else:
106
+ scheduler.set_timesteps(num_inference_steps, device=device, **kwargs)
107
+ timesteps = scheduler.timesteps
108
+ return timesteps, num_inference_steps
109
+
110
+
111
+ class AllegroPipeline(DiffusionPipeline):
112
+ r"""
113
+ Pipeline for text-to-image generation using Allegro.
114
+
115
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
116
+ library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
117
+
118
+ Args:
119
+ vae ([`AllegroAutoEncoderKL3D`]):
120
+ Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
121
+ text_encoder ([`T5EncoderModel`]):
122
+ Frozen text-encoder. PixArt-Alpha uses
123
+ [T5](https://huggingface.co/docs/transformers/model_doc/t5#transformers.T5EncoderModel), specifically the
124
+ [t5-v1_1-xxl](https://huggingface.co/PixArt-alpha/PixArt-alpha/tree/main/t5-v1_1-xxl) variant.
125
+ tokenizer (`T5Tokenizer`):
126
+ Tokenizer of class
127
+ [T5Tokenizer](https://huggingface.co/docs/transformers/model_doc/t5#transformers.T5Tokenizer).
128
+ transformer ([`AllegroTransformer3DModel`]):
129
+ A text conditioned `AllegroTransformer3DModel` to denoise the encoded image latents.
130
+ scheduler ([`SchedulerMixin`]):
131
+ A scheduler to be used in combination with `transformer` to denoise the encoded image latents.
132
+ """
133
+ bad_punct_regex = re.compile(
134
+ r"[" + "#®•©™&@·º½¾¿¡§~" + "\)" + "\(" + "\]" + "\[" + "\}" + "\{" + "\|" + "\\" + "\/" + "\*" + r"]{1,}"
135
+ ) # noqa
136
+
137
+ _optional_components = ["tokenizer", "text_encoder", "vae", "transformer", "scheduler"]
138
+ model_cpu_offload_seq = "text_encoder->transformer->vae"
139
+
140
+ def __init__(
141
+ self,
142
+ tokenizer: Optional[T5Tokenizer] = None,
143
+ text_encoder: Optional[T5EncoderModel] = None,
144
+ vae: Optional[AllegroAutoencoderKL3D] = None,
145
+ transformer: Optional[AllegroTransformer3DModel] = None,
146
+ scheduler: Optional[EulerAncestralDiscreteScheduler] = None,
147
+ device: torch.device = torch.device("cuda"),
148
+ dtype: torch.dtype = torch.float16,
149
+ ):
150
+ super().__init__()
151
+
152
+ self.register_modules(
153
+ tokenizer=tokenizer, text_encoder=text_encoder, vae=vae, transformer=transformer, scheduler=scheduler
154
+ )
155
+
156
+
157
+ # Adapted from diffusers.pipelines.deepfloyd_if.pipeline_if.encode_prompt
158
+ def encode_prompt(
159
+ self,
160
+ prompt: Union[str, List[str]],
161
+ do_classifier_free_guidance: bool = True,
162
+ negative_prompt: str = "",
163
+ num_images_per_prompt: int = 1,
164
+ device: Optional[torch.device] = None,
165
+ prompt_embeds: Optional[torch.FloatTensor] = None,
166
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
167
+ prompt_attention_mask: Optional[torch.FloatTensor] = None,
168
+ negative_prompt_attention_mask: Optional[torch.FloatTensor] = None,
169
+ clean_caption: bool = False,
170
+ max_sequence_length: int = 120,
171
+ **kwargs,
172
+ ):
173
+ r"""
174
+ Encodes the prompt into text encoder hidden states.
175
+
176
+ Args:
177
+ prompt (`str` or `List[str]`, *optional*):
178
+ prompt to be encoded
179
+ negative_prompt (`str` or `List[str]`, *optional*):
180
+ The prompt not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds`
181
+ instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). For
182
+ PixArt-Alpha, this should be "".
183
+ do_classifier_free_guidance (`bool`, *optional*, defaults to `True`):
184
+ whether to use classifier free guidance or not
185
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
186
+ number of images that should be generated per prompt
187
+ device: (`torch.device`, *optional*):
188
+ torch device to place the resulting embeddings on
189
+ prompt_embeds (`torch.FloatTensor`, *optional*):
190
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
191
+ provided, text embeddings will be generated from `prompt` input argument.
192
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
193
+ Pre-generated negative text embeddings. For PixArt-Alpha, it's should be the embeddings of the ""
194
+ string.
195
+ clean_caption (`bool`, defaults to `False`):
196
+ If `True`, the function will preprocess and clean the provided caption before encoding.
197
+ max_sequence_length (`int`, defaults to 120): Maximum sequence length to use for the prompt.
198
+ """
199
+ embeds_initially_provided = prompt_embeds is not None and negative_prompt_embeds is not None
200
+
201
+ if device is None:
202
+ device = self._execution_device
203
+
204
+ if prompt is not None and isinstance(prompt, str):
205
+ batch_size = 1
206
+ elif prompt is not None and isinstance(prompt, list):
207
+ batch_size = len(prompt)
208
+ else:
209
+ batch_size = prompt_embeds.shape[0]
210
+
211
+ # See Section 3.1. of the paper.
212
+ max_length = max_sequence_length
213
+
214
+ if prompt_embeds is None:
215
+ prompt = self._text_preprocessing(prompt, clean_caption=clean_caption)
216
+ text_inputs = self.tokenizer(
217
+ prompt,
218
+ padding="max_length",
219
+ max_length=max_length,
220
+ truncation=True,
221
+ add_special_tokens=True,
222
+ return_tensors="pt",
223
+ )
224
+ text_input_ids = text_inputs.input_ids
225
+ untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
226
+
227
+ if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
228
+ text_input_ids, untruncated_ids
229
+ ):
230
+ removed_text = self.tokenizer.batch_decode(untruncated_ids[:, max_length - 1 : -1])
231
+ logger.warning(
232
+ "The following part of your input was truncated because CLIP can only handle sequences up to"
233
+ f" {max_length} tokens: {removed_text}"
234
+ )
235
+
236
+ prompt_attention_mask = text_inputs.attention_mask
237
+ prompt_attention_mask = prompt_attention_mask.to(device)
238
+
239
+ prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=prompt_attention_mask)
240
+ prompt_embeds = prompt_embeds[0]
241
+
242
+ if self.text_encoder is not None:
243
+ dtype = self.text_encoder.dtype
244
+ elif self.transformer is not None:
245
+ dtype = self.transformer.dtype
246
+ else:
247
+ dtype = None
248
+
249
+ prompt_embeds = prompt_embeds.to(dtype=dtype, device=device)
250
+
251
+ bs_embed, seq_len, _ = prompt_embeds.shape
252
+ # duplicate text embeddings and attention mask for each generation per prompt, using mps friendly method
253
+ prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
254
+ prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
255
+ prompt_attention_mask = prompt_attention_mask.view(bs_embed, -1)
256
+ prompt_attention_mask = prompt_attention_mask.repeat(num_images_per_prompt, 1)
257
+
258
+ # get unconditional embeddings for classifier free guidance
259
+ if do_classifier_free_guidance and negative_prompt_embeds is None:
260
+ uncond_tokens = [negative_prompt] * batch_size
261
+ uncond_tokens = self._text_preprocessing(uncond_tokens, clean_caption=clean_caption)
262
+ max_length = prompt_embeds.shape[1]
263
+ uncond_input = self.tokenizer(
264
+ uncond_tokens,
265
+ padding="max_length",
266
+ max_length=max_length,
267
+ truncation=True,
268
+ return_attention_mask=True,
269
+ add_special_tokens=True,
270
+ return_tensors="pt",
271
+ )
272
+ negative_prompt_attention_mask = uncond_input.attention_mask
273
+ negative_prompt_attention_mask = negative_prompt_attention_mask.to(device)
274
+
275
+ negative_prompt_embeds = self.text_encoder(
276
+ uncond_input.input_ids.to(device),
277
+ attention_mask=negative_prompt_attention_mask,
278
+ )
279
+ negative_prompt_embeds = negative_prompt_embeds[0]
280
+
281
+ if do_classifier_free_guidance:
282
+ # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
283
+ seq_len = negative_prompt_embeds.shape[1]
284
+
285
+ negative_prompt_embeds = negative_prompt_embeds.to(dtype=dtype, device=device)
286
+
287
+ negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
288
+ negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
289
+
290
+ negative_prompt_attention_mask = negative_prompt_attention_mask.view(bs_embed, -1)
291
+ negative_prompt_attention_mask = negative_prompt_attention_mask.repeat(num_images_per_prompt, 1)
292
+ else:
293
+ negative_prompt_embeds = None
294
+ negative_prompt_attention_mask = None
295
+
296
+ return prompt_embeds, prompt_attention_mask, negative_prompt_embeds, negative_prompt_attention_mask
297
+
298
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
299
+ def prepare_extra_step_kwargs(self, generator, eta):
300
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
301
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
302
+ # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
303
+ # and should be between [0, 1]
304
+
305
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
306
+ extra_step_kwargs = {}
307
+ if accepts_eta:
308
+ extra_step_kwargs["eta"] = eta
309
+
310
+ # check if the scheduler accepts generator
311
+ accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
312
+ if accepts_generator:
313
+ extra_step_kwargs["generator"] = generator
314
+ return extra_step_kwargs
315
+
316
+ def check_inputs(
317
+ self,
318
+ prompt,
319
+ num_frames,
320
+ height,
321
+ width,
322
+ negative_prompt,
323
+ callback_steps,
324
+ prompt_embeds=None,
325
+ negative_prompt_embeds=None,
326
+ prompt_attention_mask=None,
327
+ negative_prompt_attention_mask=None,
328
+ ):
329
+
330
+ if num_frames <= 0:
331
+ raise ValueError(f"`num_frames` have to be positive but is {num_frames}.")
332
+ if height % 8 != 0 or width % 8 != 0:
333
+ raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
334
+
335
+ if (callback_steps is None) or (
336
+ callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
337
+ ):
338
+ raise ValueError(
339
+ f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
340
+ f" {type(callback_steps)}."
341
+ )
342
+
343
+ if prompt is not None and prompt_embeds is not None:
344
+ raise ValueError(
345
+ f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
346
+ " only forward one of the two."
347
+ )
348
+ elif prompt is None and prompt_embeds is None:
349
+ raise ValueError(
350
+ "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
351
+ )
352
+ elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
353
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
354
+
355
+ if prompt is not None and negative_prompt_embeds is not None:
356
+ raise ValueError(
357
+ f"Cannot forward both `prompt`: {prompt} and `negative_prompt_embeds`:"
358
+ f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
359
+ )
360
+
361
+ if negative_prompt is not None and negative_prompt_embeds is not None:
362
+ raise ValueError(
363
+ f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
364
+ f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
365
+ )
366
+
367
+ if prompt_embeds is not None and prompt_attention_mask is None:
368
+ raise ValueError("Must provide `prompt_attention_mask` when specifying `prompt_embeds`.")
369
+
370
+ if negative_prompt_embeds is not None and negative_prompt_attention_mask is None:
371
+ raise ValueError("Must provide `negative_prompt_attention_mask` when specifying `negative_prompt_embeds`.")
372
+
373
+ if prompt_embeds is not None and negative_prompt_embeds is not None:
374
+ if prompt_embeds.shape != negative_prompt_embeds.shape:
375
+ raise ValueError(
376
+ "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
377
+ f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
378
+ f" {negative_prompt_embeds.shape}."
379
+ )
380
+ if prompt_attention_mask.shape != negative_prompt_attention_mask.shape:
381
+ raise ValueError(
382
+ "`prompt_attention_mask` and `negative_prompt_attention_mask` must have the same shape when passed directly, but"
383
+ f" got: `prompt_attention_mask` {prompt_attention_mask.shape} != `negative_prompt_attention_mask`"
384
+ f" {negative_prompt_attention_mask.shape}."
385
+ )
386
+
387
+
388
+ # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline._text_preprocessing
389
+ def _text_preprocessing(self, text, clean_caption=False):
390
+ if clean_caption and not is_bs4_available():
391
+ logger.warning(BACKENDS_MAPPING["bs4"][-1].format("Setting `clean_caption=True`"))
392
+ logger.warning("Setting `clean_caption` to False...")
393
+ clean_caption = False
394
+
395
+ if clean_caption and not is_ftfy_available():
396
+ logger.warning(BACKENDS_MAPPING["ftfy"][-1].format("Setting `clean_caption=True`"))
397
+ logger.warning("Setting `clean_caption` to False...")
398
+ clean_caption = False
399
+
400
+ if not isinstance(text, (tuple, list)):
401
+ text = [text]
402
+
403
+ def process(text: str):
404
+ if clean_caption:
405
+ text = self._clean_caption(text)
406
+ text = self._clean_caption(text)
407
+ else:
408
+ text = text.lower().strip()
409
+ return text
410
+
411
+ return [process(t) for t in text]
412
+
413
+ # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline._clean_caption
414
+ def _clean_caption(self, caption):
415
+ caption = str(caption)
416
+ caption = ul.unquote_plus(caption)
417
+ caption = caption.strip().lower()
418
+ caption = re.sub("<person>", "person", caption)
419
+ # urls:
420
+ caption = re.sub(
421
+ r"\b((?:https?:(?:\/{1,3}|[a-zA-Z0-9%])|[a-zA-Z0-9.\-]+[.](?:com|co|ru|net|org|edu|gov|it)[\w/-]*\b\/?(?!@)))",
422
+ # noqa
423
+ "",
424
+ caption,
425
+ ) # regex for urls
426
+ caption = re.sub(
427
+ r"\b((?:www:(?:\/{1,3}|[a-zA-Z0-9%])|[a-zA-Z0-9.\-]+[.](?:com|co|ru|net|org|edu|gov|it)[\w/-]*\b\/?(?!@)))",
428
+ # noqa
429
+ "",
430
+ caption,
431
+ ) # regex for urls
432
+ # html:
433
+ caption = BeautifulSoup(caption, features="html.parser").text
434
+
435
+ # @<nickname>
436
+ caption = re.sub(r"@[\w\d]+\b", "", caption)
437
+
438
+ # 31C0—31EF CJK Strokes
439
+ # 31F0—31FF Katakana Phonetic Extensions
440
+ # 3200—32FF Enclosed CJK Letters and Months
441
+ # 3300—33FF CJK Compatibility
442
+ # 3400—4DBF CJK Unified Ideographs Extension A
443
+ # 4DC0—4DFF Yijing Hexagram Symbols
444
+ # 4E00—9FFF CJK Unified Ideographs
445
+ caption = re.sub(r"[\u31c0-\u31ef]+", "", caption)
446
+ caption = re.sub(r"[\u31f0-\u31ff]+", "", caption)
447
+ caption = re.sub(r"[\u3200-\u32ff]+", "", caption)
448
+ caption = re.sub(r"[\u3300-\u33ff]+", "", caption)
449
+ caption = re.sub(r"[\u3400-\u4dbf]+", "", caption)
450
+ caption = re.sub(r"[\u4dc0-\u4dff]+", "", caption)
451
+ # caption = re.sub(r"[\u4e00-\u9fff]+", "", caption)
452
+ #######################################################
453
+
454
+ # все виды тире / all types of dash --> "-"
455
+ caption = re.sub(
456
+ r"[\u002D\u058A\u05BE\u1400\u1806\u2010-\u2015\u2E17\u2E1A\u2E3A\u2E3B\u2E40\u301C\u3030\u30A0\uFE31\uFE32\uFE58\uFE63\uFF0D]+",
457
+ # noqa
458
+ "-",
459
+ caption,
460
+ )
461
+
462
+ # кавычки к одному стандарту
463
+ caption = re.sub(r"[`´«»“”¨]", '"', caption)
464
+ caption = re.sub(r"[‘’]", "'", caption)
465
+
466
+ # &quot;
467
+ caption = re.sub(r"&quot;?", "", caption)
468
+ # &amp
469
+ caption = re.sub(r"&amp", "", caption)
470
+
471
+ # ip adresses:
472
+ caption = re.sub(r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}", " ", caption)
473
+
474
+ # article ids:
475
+ caption = re.sub(r"\d:\d\d\s+$", "", caption)
476
+
477
+ # \n
478
+ caption = re.sub(r"\\n", " ", caption)
479
+
480
+ # "#123"
481
+ caption = re.sub(r"#\d{1,3}\b", "", caption)
482
+ # "#12345.."
483
+ caption = re.sub(r"#\d{5,}\b", "", caption)
484
+ # "123456.."
485
+ caption = re.sub(r"\b\d{6,}\b", "", caption)
486
+ # filenames:
487
+ caption = re.sub(r"[\S]+\.(?:png|jpg|jpeg|bmp|webp|eps|pdf|apk|mp4)", "", caption)
488
+
489
+ #
490
+ caption = re.sub(r"[\"\']{2,}", r'"', caption) # """AUSVERKAUFT"""
491
+ caption = re.sub(r"[\.]{2,}", r" ", caption) # """AUSVERKAUFT"""
492
+
493
+ caption = re.sub(self.bad_punct_regex, r" ", caption) # ***AUSVERKAUFT***, #AUSVERKAUFT
494
+ caption = re.sub(r"\s+\.\s+", r" ", caption) # " . "
495
+
496
+ # this-is-my-cute-cat / this_is_my_cute_cat
497
+ regex2 = re.compile(r"(?:\-|\_)")
498
+ if len(re.findall(regex2, caption)) > 3:
499
+ caption = re.sub(regex2, " ", caption)
500
+
501
+ caption = ftfy.fix_text(caption)
502
+ caption = html.unescape(html.unescape(caption))
503
+
504
+ caption = re.sub(r"\b[a-zA-Z]{1,3}\d{3,15}\b", "", caption) # jc6640
505
+ caption = re.sub(r"\b[a-zA-Z]+\d+[a-zA-Z]+\b", "", caption) # jc6640vc
506
+ caption = re.sub(r"\b\d+[a-zA-Z]+\d+\b", "", caption) # 6640vc231
507
+
508
+ caption = re.sub(r"(worldwide\s+)?(free\s+)?shipping", "", caption)
509
+ caption = re.sub(r"(free\s)?download(\sfree)?", "", caption)
510
+ caption = re.sub(r"\bclick\b\s(?:for|on)\s\w+", "", caption)
511
+ caption = re.sub(r"\b(?:png|jpg|jpeg|bmp|webp|eps|pdf|apk|mp4)(\simage[s]?)?", "", caption)
512
+ caption = re.sub(r"\bpage\s+\d+\b", "", caption)
513
+
514
+ caption = re.sub(r"\b\d*[a-zA-Z]+\d+[a-zA-Z]+\d+[a-zA-Z\d]*\b", r" ", caption) # j2d1a2a...
515
+
516
+ caption = re.sub(r"\b\d+\.?\d*[xх×]\d+\.?\d*\b", "", caption)
517
+
518
+ caption = re.sub(r"\b\s+\:\s+", r": ", caption)
519
+ caption = re.sub(r"(\D[,\./])\b", r"\1 ", caption)
520
+ caption = re.sub(r"\s+", " ", caption)
521
+
522
+ caption.strip()
523
+
524
+ caption = re.sub(r"^[\"\']([\w\W]+)[\"\']$", r"\1", caption)
525
+ caption = re.sub(r"^[\'\_,\-\:;]", r"", caption)
526
+ caption = re.sub(r"[\'\_,\-\:\-\+]$", r"", caption)
527
+ caption = re.sub(r"^\.\S+$", "", caption)
528
+ return caption.strip()
529
+
530
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents
531
+ def prepare_latents(
532
+ self, batch_size, num_channels_latents, num_frames, height, width, dtype, device, generator, latents=None
533
+ ):
534
+ shape = (
535
+ batch_size,
536
+ num_channels_latents,
537
+ (math.ceil((int(num_frames) - 1) / self.vae.vae_scale_factor[0]) + 1)
538
+ if int(num_frames) % 2 == 1
539
+ else math.ceil(int(num_frames) / self.vae.vae_scale_factor[0]),
540
+ math.ceil(int(height) / self.vae.vae_scale_factor[1]),
541
+ math.ceil(int(width) / self.vae.vae_scale_factor[2]),
542
+ )
543
+ if isinstance(generator, list) and len(generator) != batch_size:
544
+ raise ValueError(
545
+ f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
546
+ f" size of {batch_size}. Make sure the batch size matches the length of the generators."
547
+ )
548
+
549
+ if latents is None:
550
+ latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
551
+ else:
552
+ latents = latents.to(device)
553
+
554
+ # scale the initial noise by the standard deviation required by the scheduler
555
+ latents = latents * self.scheduler.init_noise_sigma
556
+
557
+
558
+ return latents
559
+
560
+ @torch.no_grad()
561
+ @replace_example_docstring(EXAMPLE_DOC_STRING)
562
+ def __call__(
563
+ self,
564
+ prompt: Union[str, List[str]] = None,
565
+ negative_prompt: str = "",
566
+ num_inference_steps: int = 100,
567
+ timesteps: List[int] = None,
568
+ guidance_scale: float = 7.5,
569
+ num_images_per_prompt: Optional[int] = 1,
570
+ num_frames: Optional[int] = None,
571
+ height: Optional[int] = None,
572
+ width: Optional[int] = None,
573
+ eta: float = 0.0,
574
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
575
+ latents: Optional[torch.FloatTensor] = None,
576
+ prompt_embeds: Optional[torch.FloatTensor] = None,
577
+ prompt_attention_mask: Optional[torch.FloatTensor] = None,
578
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
579
+ negative_prompt_attention_mask: Optional[torch.FloatTensor] = None,
580
+ output_type: Optional[str] = "pil",
581
+ return_dict: bool = True,
582
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
583
+ callback_steps: int = 1,
584
+ clean_caption: bool = True,
585
+ max_sequence_length: int = 512,
586
+ verbose: bool = True,
587
+ ) -> Union[AllegroPipelineOutput, Tuple]:
588
+ """
589
+ Function invoked when calling the pipeline for generation.
590
+
591
+ Args:
592
+ prompt (`str` or `List[str]`, *optional*):
593
+ The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
594
+ instead.
595
+ negative_prompt (`str` or `List[str]`, *optional*):
596
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
597
+ `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
598
+ less than `1`).
599
+ num_inference_steps (`int`, *optional*, defaults to 100):
600
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
601
+ expense of slower inference.
602
+ timesteps (`List[int]`, *optional*):
603
+ Custom timesteps to use for the denoising process. If not defined, equal spaced `num_inference_steps`
604
+ timesteps are used. Must be in descending order.
605
+ guidance_scale (`float`, *optional*, defaults to 7.0):
606
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
607
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
608
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
609
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
610
+ usually at the expense of lower image quality.
611
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
612
+ The number of images to generate per prompt.
613
+ num_frames: (`int`, *optional*, defaults to 88):
614
+ The number controls the generated video frames.
615
+ height (`int`, *optional*, defaults to self.unet.config.sample_size):
616
+ The height in pixels of the generated image.
617
+ width (`int`, *optional*, defaults to self.unet.config.sample_size):
618
+ The width in pixels of the generated image.
619
+ eta (`float`, *optional*, defaults to 0.0):
620
+ Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
621
+ [`schedulers.DDIMScheduler`], will be ignored for others.
622
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
623
+ One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
624
+ to make generation deterministic.
625
+ latents (`torch.FloatTensor`, *optional*):
626
+ Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
627
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
628
+ tensor will ge generated by sampling using the supplied random `generator`.
629
+ prompt_embeds (`torch.FloatTensor`, *optional*):
630
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
631
+ provided, text embeddings will be generated from `prompt` input argument.
632
+ prompt_attention_mask (`torch.FloatTensor`, *optional*): Pre-generated attention mask for text embeddings.
633
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
634
+ Pre-generated negative text embeddings. For PixArt-Sigma this negative prompt should be "". If not
635
+ provided, negative_prompt_embeds will be generated from `negative_prompt` input argument.
636
+ negative_prompt_attention_mask (`torch.FloatTensor`, *optional*):
637
+ Pre-generated attention mask for negative text embeddings.
638
+ output_type (`str`, *optional*, defaults to `"pil"`):
639
+ The output format of the generate image. Choose between
640
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
641
+ return_dict (`bool`, *optional*, defaults to `True`):
642
+ Whether or not to return a [`~pipelines.stable_diffusion.IFPipelineOutput`] instead of a plain tuple.
643
+ callback (`Callable`, *optional*):
644
+ A function that will be called every `callback_steps` steps during inference. The function will be
645
+ called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
646
+ callback_steps (`int`, *optional*, defaults to 1):
647
+ The frequency at which the `callback` function will be called. If not specified, the callback will be
648
+ called at every step.
649
+ clean_caption (`bool`, *optional*, defaults to `True`):
650
+ Whether or not to clean the caption before creating embeddings. Requires `beautifulsoup4` and `ftfy` to
651
+ be installed. If the dependencies are not installed, the embeddings will be created from the raw
652
+ prompt.
653
+ max_sequence_length (`int` defaults to 512): Maximum sequence length to use with the `prompt`.
654
+
655
+ Examples:
656
+
657
+ Returns:
658
+ [`~pipelines.ImagePipelineOutput`] or `tuple`:
659
+ If `return_dict` is `True`, [`~pipelines.ImagePipelineOutput`] is returned, otherwise a `tuple` is
660
+ returned where the first element is a list with the generated images
661
+ """
662
+ # 1. Check inputs. Raise error if not correct
663
+ num_frames = num_frames or self.transformer.config.sample_size_t * self.vae.vae_scale_factor[0]
664
+ height = height or self.transformer.config.sample_size[0] * self.vae.vae_scale_factor[1]
665
+ width = width or self.transformer.config.sample_size[1] * self.vae.vae_scale_factor[2]
666
+ self.check_inputs(
667
+ prompt,
668
+ num_frames,
669
+ height,
670
+ width,
671
+ negative_prompt,
672
+ callback_steps,
673
+ prompt_embeds,
674
+ negative_prompt_embeds,
675
+ prompt_attention_mask,
676
+ negative_prompt_attention_mask,
677
+ )
678
+
679
+ # 2. Default height and width to transformer
680
+ if prompt is not None and isinstance(prompt, str):
681
+ batch_size = 1
682
+ elif prompt is not None and isinstance(prompt, list):
683
+ batch_size = len(prompt)
684
+ else:
685
+ batch_size = prompt_embeds.shape[0]
686
+
687
+ device = self._execution_device
688
+
689
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
690
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
691
+ # corresponds to doing no classifier free guidance.
692
+ do_classifier_free_guidance = guidance_scale > 1.0
693
+
694
+ # 3. Encode input prompt
695
+ (
696
+ prompt_embeds,
697
+ prompt_attention_mask,
698
+ negative_prompt_embeds,
699
+ negative_prompt_attention_mask,
700
+ ) = self.encode_prompt(
701
+ prompt,
702
+ do_classifier_free_guidance,
703
+ negative_prompt=negative_prompt,
704
+ num_images_per_prompt=num_images_per_prompt,
705
+ device=device,
706
+ prompt_embeds=prompt_embeds,
707
+ negative_prompt_embeds=negative_prompt_embeds,
708
+ prompt_attention_mask=prompt_attention_mask,
709
+ negative_prompt_attention_mask=negative_prompt_attention_mask,
710
+ clean_caption=clean_caption,
711
+ max_sequence_length=max_sequence_length,
712
+ )
713
+ if do_classifier_free_guidance:
714
+ prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0)
715
+ prompt_attention_mask = torch.cat([negative_prompt_attention_mask, prompt_attention_mask], dim=0)
716
+
717
+ # 4. Prepare timesteps
718
+ timesteps, num_inference_steps = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps)
719
+ self.scheduler.set_timesteps(num_inference_steps, device=device)
720
+
721
+ # 5. Prepare latents.
722
+ latent_channels = self.transformer.config.in_channels
723
+ latents = self.prepare_latents(
724
+ batch_size * num_images_per_prompt,
725
+ latent_channels,
726
+ num_frames,
727
+ height,
728
+ width,
729
+ prompt_embeds.dtype,
730
+ device,
731
+ generator,
732
+ latents,
733
+ )
734
+
735
+ # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
736
+ extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
737
+
738
+ # 6.1 Prepare micro-conditions.
739
+ added_cond_kwargs = {"resolution": None, "aspect_ratio": None}
740
+
741
+ # 7. Denoising loop
742
+ num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0)
743
+
744
+ progress_wrap = tqdm.tqdm if verbose else (lambda x: x)
745
+ for i, t in progress_wrap(list(enumerate(timesteps))):
746
+
747
+ latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
748
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
749
+
750
+ current_timestep = t
751
+ if not torch.is_tensor(current_timestep):
752
+ # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
753
+ # This would be a good case for the `match` statement (Python 3.10+)
754
+ is_mps = latent_model_input.device.type == "mps"
755
+ if isinstance(current_timestep, float):
756
+ dtype = torch.float32 if is_mps else torch.float64
757
+ else:
758
+ dtype = torch.int32 if is_mps else torch.int64
759
+ current_timestep = torch.tensor([current_timestep], dtype=dtype, device=latent_model_input.device)
760
+ elif len(current_timestep.shape) == 0:
761
+ current_timestep = current_timestep[None].to(latent_model_input.device)
762
+ # broadcast to batch dimension in a way that's compatible with ONNX/Core ML
763
+ current_timestep = current_timestep.expand(latent_model_input.shape[0])
764
+
765
+ if prompt_embeds.ndim == 3:
766
+ prompt_embeds = prompt_embeds.unsqueeze(1) # b l d -> b 1 l d
767
+ if prompt_attention_mask.ndim == 2:
768
+ prompt_attention_mask = prompt_attention_mask.unsqueeze(1) # b l -> b 1 l
769
+ # prepare attention_mask.
770
+ # b c t h w -> b t h w
771
+ attention_mask = torch.ones_like(latent_model_input)[:, 0]
772
+ # predict noise model_output
773
+ noise_pred = self.transformer(
774
+ latent_model_input,
775
+ attention_mask=attention_mask,
776
+ encoder_hidden_states=prompt_embeds,
777
+ encoder_attention_mask=prompt_attention_mask,
778
+ timestep=current_timestep,
779
+ added_cond_kwargs=added_cond_kwargs,
780
+ return_dict=False,
781
+ )[0]
782
+
783
+ # perform guidance
784
+ if do_classifier_free_guidance:
785
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
786
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
787
+
788
+ # learned sigma
789
+ if self.transformer.config.out_channels // 2 == latent_channels:
790
+ noise_pred = noise_pred.chunk(2, dim=1)[0]
791
+ else:
792
+ noise_pred = noise_pred
793
+
794
+ # compute previous image: x_t -> x_t-1
795
+ latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
796
+
797
+ # call the callback, if provided
798
+ if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
799
+ if callback is not None and i % callback_steps == 0:
800
+ step_idx = i // getattr(self.scheduler, "order", 1)
801
+ callback(step_idx, t, latents)
802
+
803
+ if not output_type == "latents":
804
+ video = self.decode_latents(latents)
805
+ video = video[:, :num_frames, :height, :width]
806
+ else:
807
+ video = latents
808
+ return AllegroPipelineOutput(video=video)
809
+
810
+ # Offload all models
811
+ self.maybe_free_model_hooks()
812
+
813
+ if not return_dict:
814
+ return (video,)
815
+
816
+ return AllegroPipelineOutput(video=video)
817
+
818
+ def decode_latents(self, latents):
819
+ video = self.vae.decode(latents.to(self.vae.dtype) / self.vae.scale_factor).sample
820
+ # b t c h w -> b t h w c
821
+ video = ((video / 2.0 + 0.5).clamp(0, 1) * 255).to(dtype=torch.uint8).cpu().permute(0, 1, 3, 4, 2).contiguous()
822
+ return video
assets/Allegro_banner.gif ADDED
assets/banner_white.gif ADDED
assets/demo_video.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d3babd856c116886c00f3e2f8349bba8ef5a58833e0451787ec9080efaf9272d
3
+ size 1439517
assets/rhymes_1.png ADDED
requirements.txt ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ accelerate==0.33.0
2
+ diffusers==0.28.0
3
+ numpy==1.24.4
4
+ torch==2.4.1
5
+ tqdm==4.66.2
6
+ transformers==4.40.1
7
+ xformers==0.0.28.post1
8
+ einops==0.7.0
9
+ decord==0.6.0
10
+ sentencepiece==0.1.99
11
+ imageio
12
+ imageio-ffmpeg
13
+ ftfy
14
+ bs4
single_inference.py ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import imageio
3
+ import os
4
+ import argparse
5
+ from diffusers.schedulers import EulerAncestralDiscreteScheduler
6
+ from transformers import T5EncoderModel, T5Tokenizer
7
+ from allegro.pipelines.pipeline_allegro import AllegroPipeline
8
+ from allegro.models.vae.vae_allegro import AllegroAutoencoderKL3D
9
+ from allegro.models.transformers.transformer_3d_allegro import AllegroTransformer3DModel
10
+
11
+
12
+ def single_inference(args):
13
+ dtype=torch.bfloat16
14
+
15
+ # vae have better formance in float32
16
+ vae = AllegroAutoencoderKL3D.from_pretrained(args.vae, torch_dtype=torch.float32).cuda()
17
+
18
+ vae.eval()
19
+
20
+ text_encoder = T5EncoderModel.from_pretrained(
21
+ args.text_encoder,
22
+ torch_dtype=dtype
23
+ )
24
+ text_encoder.eval()
25
+
26
+ tokenizer = T5Tokenizer.from_pretrained(
27
+ args.tokenizer,
28
+ )
29
+
30
+ scheduler = EulerAncestralDiscreteScheduler()
31
+
32
+ transformer = AllegroTransformer3DModel.from_pretrained(
33
+ args.dit,
34
+ torch_dtype=dtype
35
+ ).cuda()
36
+ transformer.eval()
37
+
38
+ allegro_pipeline = AllegroPipeline(
39
+ vae=vae,
40
+ text_encoder=text_encoder,
41
+ tokenizer=tokenizer,
42
+ scheduler=scheduler,
43
+ transformer=transformer
44
+ ).to("cuda:0")
45
+
46
+
47
+ positive_prompt = """
48
+ (masterpiece), (best quality), (ultra-detailed), (unwatermarked),
49
+ {}
50
+ emotional, harmonious, vignette, 4k epic detailed, shot on kodak, 35mm photo,
51
+ sharp focus, high budget, cinemascope, moody, epic, gorgeous
52
+ """
53
+
54
+ negative_prompt = """
55
+ nsfw, lowres, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, worst quality,
56
+ low quality, normal quality, jpeg artifacts, signature, watermark, username, blurry.
57
+ """
58
+
59
+ user_prompt = positive_prompt.format(args.user_prompt.lower().strip())
60
+
61
+ if args.enable_cpu_offload:
62
+ allegro_pipeline.enable_sequential_cpu_offload()
63
+ print("cpu offload enabled")
64
+
65
+ out_video = allegro_pipeline(
66
+ user_prompt,
67
+ negative_prompt = negative_prompt,
68
+ num_frames=88,
69
+ height=720,
70
+ width=1280,
71
+ num_inference_steps=args.num_sampling_steps,
72
+ guidance_scale=args.guidance_scale,
73
+ max_sequence_length=512,
74
+ generator = torch.Generator(device="cuda:0").manual_seed(args.seed)
75
+ ).video[0]
76
+
77
+ imageio.mimwrite(args.save_path, out_video, fps=15, quality=8) # highest quality is 10, lowest is 0
78
+
79
+
80
+ if __name__ == "__main__":
81
+
82
+ parser = argparse.ArgumentParser()
83
+ parser.add_argument("--user_prompt", type=str, default='')
84
+ parser.add_argument("--vae", type=str, default='')
85
+ parser.add_argument("--dit", type=str, default='')
86
+ parser.add_argument("--text_encoder", type=str, default='')
87
+ parser.add_argument("--tokenizer", type=str, default='')
88
+ parser.add_argument("--save_path", type=str, default="./output_videos/test_video.mp4")
89
+ parser.add_argument("--guidance_scale", type=float, default=7.5)
90
+ parser.add_argument("--num_sampling_steps", type=int, default=100)
91
+ parser.add_argument("--seed", type=int, default=42)
92
+ parser.add_argument("--enable_cpu_offload", action='store_true')
93
+
94
+ args = parser.parse_args()
95
+
96
+ if os.path.dirname(args.save_path) != '' and (not os.path.exists(os.path.dirname(args.save_path))):
97
+ os.makedirs(os.path.dirname(args.save_path))
98
+
99
+ single_inference(args)
vae_inference.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from einops import rearrange
2
+ import torch
3
+ import imageio
4
+ import os
5
+ import argparse
6
+ from allegro.models.vae.vae_allegro import AllegroAutoencoderKL3D
7
+
8
+ from decord import VideoReader
9
+
10
+ torch.backends.cuda.matmul.allow_tf32 = True
11
+ torch.backends.cudnn.allow_tf32 = True
12
+
13
+ def vae_inference(args):
14
+
15
+ # vae have better formance in float32
16
+ vae = AllegroAutoencoderKL3D.from_pretrained(args.vae, torch_dtype=torch.float32).cuda()
17
+
18
+ vae.eval()
19
+
20
+ vr = VideoReader(args.input_video)
21
+
22
+ frames = vr.get_batch(range(len(vr))).asnumpy()
23
+ frames = torch.from_numpy(frames).float() / 255.0
24
+ frames = frames * 2.0 - 1.0
25
+ frames = rearrange(frames, 'f h w c -> 1 c f h w')
26
+ frames = frames[:,:,:88]
27
+
28
+ frames = frames.cuda().to(torch.float32)
29
+ with torch.no_grad():
30
+ out_video = vae(frames, encoder_local_batch_size=args.local_batch_size, decoder_local_batch_size=args.local_batch_size).sample
31
+ out_video = ((out_video / 2.0 + 0.5).clamp(0, 1) * 255).to(dtype=torch.uint8).cpu().permute(0, 1, 3, 4, 2).contiguous()
32
+
33
+ imageio.mimwrite(f"{args.save_path}/test_vae.mp4", out_video[0], fps=15, quality=8) # highest quality is 10, lowest is 0
34
+
35
+
36
+
37
+ if __name__ == "__main__":
38
+
39
+ parser = argparse.ArgumentParser()
40
+ parser.add_argument("--vae", type=str, default='')
41
+ parser.add_argument("--input_video", type=str, default="resources/demo_video.mp4")
42
+ parser.add_argument("--save_path", type=str, default="./output_videos")
43
+ parser.add_argument("--local_batch_size", type=int, default=2)
44
+
45
+
46
+ args = parser.parse_args()
47
+ if not os.path.exists(args.save_path):
48
+ os.makedirs(args.save_path)
49
+
50
+ vae_inference(args)